pax_global_header 0000666 0000000 0000000 00000000064 13435446430 0014520 g ustar 00root root 0000000 0000000 52 comment=6558ed8118d0f6792145fa948c61226410a39ecf
mtail-3.0.0~rc24.1/ 0000775 0000000 0000000 00000000000 13435446430 0013656 5 ustar 00root root 0000000 0000000 mtail-3.0.0~rc24.1/.circleci/ 0000775 0000000 0000000 00000000000 13435446430 0015511 5 ustar 00root root 0000000 0000000 mtail-3.0.0~rc24.1/.circleci/config.yml 0000664 0000000 0000000 00000010774 13435446430 0017512 0 ustar 00root root 0000000 0000000 ---
# common-build names an anchor which contains all the reusable build
# instructions.
common-build: &common-build
working_directory: /go/src/github.com/google/mtail
steps:
- checkout
- run: make --debug smoke
- run: mkdir -p test-results
- run: mkdir -p workspace
- run: go get gotest.tools/gotestsum
- run:
name: Run tests and coverage with race detector
command: gotestsum --junitfile test-results/test-output.xml -- -race -coverprofile=workspace/${CIRCLE_JOB}.coverprofile --covermode=atomic -tags=integration -v -timeout=20m ./...
no_output_timeout: 20m
- persist_to_workspace:
root: workspace
paths:
- "*.coverprofile"
- store_test_results:
path: test-results
- store_artifacts:
path: test-results
# go* names anchors which define the Go versions to build with, merging the
# common-build alias above.
go1_11: &go1_11
<<: *common-build
docker:
- image: circleci/golang:1.11
go1_10: &go1_10
<<: *common-build
docker:
- image: circleci/golang:1.10
# concurrency* names anchors which define the the concurrency level to run
# tests with.
concurrency_4: &concurrency_4
environment:
GOMAXPROCS: 4
concurrency_2: &concurrency_2
environment:
GOMAXPROCS: 2
concurrency_1: &concurrency_1
environment:
GOMAXPROCS: 1
# The main circle-ci configuration.
version: 2
jobs:
# The main build steps are the crossproduct of concurrency and go versions.
build-go1.11-4:
<<: [*go1_11, *concurrency_4]
build-go1.11-2:
<<: [*go1_11, *concurrency_2]
build-go1.11-1:
<<: [*go1_11, *concurrency_1]
build-go1.10-4:
<<: [*go1_10, *concurrency_4]
build-go1.10-2:
<<: [*go1_10, *concurrency_2]
build-go1.10-1:
<<: [*go1_10, *concurrency_1]
# Coverage collects all the coverage reports from each build and merges them,
# before sending off to Coveralls.
coverage:
working_directory: /go/src/github.com/google/mtail
environment:
COVERALLS_TOKEN: AQTCw3bl6FDphbQcMMkgVZBIunz5r3H8b
docker:
- image: circleci/golang:1.11
steps:
- checkout
- attach_workspace:
at: /tmp/workspace
- run:
name: Generate coverage
command: |
ls -lR /tmp/workspace
go get github.com/sozorogami/gover
gover /tmp/workspace coverprofile
make --debug coverage.html upload_to_coveralls
- store_artifacts:
path: coverprofile
destination: coverage
- store_artifacts:
path: coverage.html
destination: coverage
delivery:
working_directory: /go/src/github.com/google/mtail
docker:
- image: circleci/golang:1.11
steps:
- checkout
- attach_workspace:
at: /tmp/workspace
- run: make --debug crossbuild
- run: sha1sum build/*
- store_artifacts:
path: build/
- run:
name: Upload binaries to GitHub Release
# GITHUB_TOKEN is a secret from the CircleCI environment
command: |
go get github.com/tcnksm/ghr
ghr -t $GITHUB_TOKEN -u $CIRCLE_PROJECT_USERNAME -r $CIRCLE_PROJECT_REPONAME --replace ${CIRCLE_TAG?} build/
tag-filter: &tag-filter
filters:
tags:
only: /^v.*/
# Using workflows to sequence each of our builds in parallel, and coverage
# depending on all of them. These two lists need to be updated when the inputs
# of the build matrix change.
workflows:
version: 2
build_and_deliver:
jobs:
- build-go1.11-4:
<<: *tag-filter
- build-go1.11-2:
<<: *tag-filter
requires:
- build-go1.11-4
- build-go1.11-1:
<<: *tag-filter
requires:
- build-go1.11-4
- build-go1.10-4:
<<: *tag-filter
requires:
- build-go1.11-4
- build-go1.10-2:
<<: *tag-filter
requires:
- build-go1.11-4
- build-go1.10-1:
<<: *tag-filter
requires:
- build-go1.11-4
- coverage:
<<: *tag-filter
requires:
- build-go1.11-4
- build-go1.11-2
- build-go1.11-1
- build-go1.10-4
- build-go1.10-2
- build-go1.10-1
- delivery:
requires:
- coverage
filters:
tags:
only: /^v.*/
# Explicitly disable all branches, otherwise we match on every
# commit.
branches:
ignore: /.*/
mtail-3.0.0~rc24.1/.gitlab-ci.yml 0000664 0000000 0000000 00000000763 13435446430 0016320 0 ustar 00root root 0000000 0000000 image: golang:1.8
stages:
- test
- build
before_script:
- mkdir ${CI_PROJECT_DIR}/build
- mkdir -p ${GOPATH}/src/github.com/google/
- ln -s $(pwd) ${GOPATH}/src/github.com/google/mtail
- cd ${GOPATH}/src/github.com/google/mtail
test:
stage: test
allow_failure: true
script:
- make install_deps
- make test
build:
stage: build
script:
- PREFIX=${CI_PROJECT_DIR}/build make install
artifacts:
expire_in: 1 week
when: on_success
paths:
- build
mtail-3.0.0~rc24.1/.golangci.yml 0000664 0000000 0000000 00000002403 13435446430 0016241 0 ustar 00root root 0000000 0000000 service:
prepare:
- make install_deps
run:
tests: true
build-tags:
- integration
linters-settings:
govet:
check-shadowing: true
linters:
enable-all: true
disable:
- maligned
- megacheck
- lll
- gocyclo
- unparam
# Not sure what this is telling me yet.
- scopelint
# How dare you tell me not to use inits.
- gochecknoinits
# Flags are fine, as are test tables.
- gochecknoglobals
issues:
max-per-linter: 0
max-same: 0
exclude-use-default: true
exclude:
# # Captured by errcheck.
# - '^(G104|G204):'
# # Very commonly not checked.
# - 'Error return value of .(.*\.Help|.*\.MarkFlagRequired|(os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*printf?|os\.(Un)?Setenv). is not checked'
# #- 'exported method (.*\.MarshalJSON|.*\.UnmarshalJSON|.*\.EntityURN|.*\.GoString|.*\.Pos) should have comment or be unexported'
# If you liked it you shoulda put a gofix on it.
- 'composite literal uses unkeyed fields'
# I like shadowing err
- 'declaration of "err" shadows declaration'
# #- 'bad syntax for struct tag key'
# #- 'bad syntax for struct tag pair'
# goyacc generated error in three locations
- 'this value of `mtailDollar.* is never used'
mtail-3.0.0~rc24.1/CODE_OF_CONDUCT.md 0000664 0000000 0000000 00000006213 13435446430 0016457 0 ustar 00root root 0000000 0000000 # Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at jaq@google.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
[homepage]: http://contributor-covenant.org
[version]: http://contributor-covenant.org/version/1/4/
mtail-3.0.0~rc24.1/CONTRIBUTING.md 0000664 0000000 0000000 00000003403 13435446430 0016107 0 ustar 00root root 0000000 0000000 Want to contribute? Great! First, read this page (including the small print at the end).
### Before you contribute
Before we can use your code, you must sign the
[Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual?csw=1)
(CLA), which you can do online. The CLA is necessary mainly because you own the
copyright to your changes, even after your contribution becomes part of our
codebase, so we need your permission to use and distribute your code. We also
need to be sure of various other things—for instance that you'll tell us if you
know that your code infringes on other people's patents. You don't have to sign
the CLA until after you've submitted your code for review and a member has
approved it, but you must do it before we can put your code into our codebase.
Before you start working on a larger contribution, you should get in touch with
us first through the issue tracker with your idea so that we can help out and
possibly guide you. Coordinating up front makes it much easier to avoid
frustration later on.
### Code reviews
All submissions, including submissions by project members, require review. We
use GitHub pull requests for this purpose.
Please read the [style guide](docs/style.md) for tips on the project coding
guidelines.
### Response Time
This repository is maintained as a best effort service.
Response times to issues and PRs may vary with the availability of the
maintainers. We appreciate your patience.
PRs with unit tests will be merged promptly. All other requests (issues and
PRs) may take longer to be responded to.
### The small print
Contributions made by corporations are covered by a different agreement than
the one above, the Software Grant and Corporate Contributor License Agreement.
mtail-3.0.0~rc24.1/Dockerfile 0000664 0000000 0000000 00000002023 13435446430 0015645 0 ustar 00root root 0000000 0000000 FROM golang:1.10.1-alpine3.7 AS builder
RUN apk add --update git make
WORKDIR /go/src/github.com/google/mtail
COPY . /go/src/github.com/google/mtail
RUN make install_deps && make install
FROM alpine:3.7
ARG version=0.0.0-local
ARG build_date=unknown
ARG commit_hash=unknown
ARG vcs_url=unknown
ARG vcs_branch=unknown
EXPOSE 3903
ENTRYPOINT ["/usr/bin/mtail"]
LABEL org.label-schema.vendor='Google' \
org.label-schema.name='mtail' \
org.label-schema.description='extract whitebox monitoring data from application logs for collection in a timeseries database' \
org.label-schema.usage='https://github.com/google/mtail/blob/master/docs/Programming-Guide.md' \
org.label-schema.url='https://github.com/google/mtail' \
org.label-schema.vcs-url=$vcs_url \
org.label-schema.vcs-branch=$vcs_branch \
org.label-schema.vcs-ref=$commit_hash \
org.label-schema.version=$version \
org.label-schema.schema-version='1.0' \
org.label-schema.build-date=$build_date
COPY --from=builder /go/bin/mtail /usr/bin/mtail
mtail-3.0.0~rc24.1/ISSUE_TEMPLATE.md 0000664 0000000 0000000 00000001207 13435446430 0016363 0 ustar 00root root 0000000 0000000 Thanks for discovering a problem in `mtail`!
When reporting bugs in mtail behaviour, please be as detailed as possible; describe the problem, what you wanted to have happen, what you observed instead.
If your problem is with the way an `mtail` program is behaving, please attach or include inline any mtail programs that demonstrate the bug, any log files that mtail was processing, and the observed output.
If your problem is with `mtail`, please include the commandline you started it with, and the INFO log.
See also [Reporting a problem](https://github.com/google/mtail/blob/master/docs/Troubleshooting.md#reporting-a-problem).
Thanks!
mtail-3.0.0~rc24.1/LICENSE 0000664 0000000 0000000 00000026136 13435446430 0014673 0 ustar 00root root 0000000 0000000
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
mtail-3.0.0~rc24.1/Makefile 0000664 0000000 0000000 00000015522 13435446430 0015323 0 ustar 00root root 0000000 0000000 # Copyright 2011 Google Inc. All Rights Reserved.
# This file is available under the Apache license.
# Build these.
TARGETS = mtail mgen mdot
# Install them here
PREFIX ?= usr/local
# Place to store dependencies.
DEPDIR = .d
$(DEPDIR):
install -d $(DEPDIR)
# This rule finds all non-standard-library dependencies of each target and emits them to a makefile include.
# Thanks mrtazz: https://unwiredcouch.com/2016/05/31/go-make.html
MAKEDEPEND = echo "$@: $$(go list -f '{{if not .Standard}}{{.Dir}}{{end}}' $$(go list -f '{{ join .Deps "\n" }}' $<) | sed -e 's@$$@/*.go@' | tr "\n" " " )" > $(DEPDIR)/$@.d
# This rule allows the dependencies to not exist yet, for the first run.
$(DEPDIR)/%.d: ;
.PRECIOUS: $(DEPDIR)/%.d
# This instruction loads any dependency includes for our targets.
-include $(patsubst %,$(DEPDIR)/%.d,$(TARGETS))
# Set the timeout for tests run under the race detector.
timeout := 10m
ifeq ($(TRAVIS),true)
timeout := 20m
endif
ifeq ($(CIRCLECI),true)
timeout := 20m
endif
# Let the benchmarks run for a long time. The timeout is for the total time of
# all benchmarks, not per bench.
benchtimeout := 20m
# Only be verbose with `go get` unless the UPGRADE variable is also set.
GOGETFLAGS="-v"
ifeq ($(UPGRADE),"y")
GOGETFLAGS=$(GOGETFLAGS) "-u"
endif
GOFILES=$(shell find . -name '*.go' -a ! -name '*_test.go')
GOTESTFILES=$(shell find . -name '*_test.go')
GOGENFILES=internal/vm/parser/parser.go\
internal/mtail/logo.ico.go
CLEANFILES+=\
internal/vm/parser/parser.go\
internal/vm/parser/y.output\
internal/mtail/logo.ico.go\
internal/mtail/logo.ico\
# A place to install tool dependencies.
BIN = $(GOPATH)/bin
TOGO = $(BIN)/togo
$(TOGO):
go get $(UPGRADE) -v github.com/flazz/togo
GOYACC = $(BIN)/goyacc
$(GOYACC):
go get $(UPGRADE) -v golang.org/x/tools/cmd/goyacc
GOFUZZBUILD = $(BIN)/go-fuzz-build
$(GOFUZZBUILD):
go get $(UPGRADE) -v github.com/dvyukov/go-fuzz/go-fuzz-build
GOFUZZ = $(BIN)/go-fuzz
$(GOFUZZ):
go get $(UPGRADE) -v github.com/dvyukov/go-fuzz/go-fuzz
GOVERALLS = $(BIN)/goveralls
$(GOVERALLS):
go get $(UPGRADE) -v github.com/mattn/goveralls
GOX = $(BIN)/gox
$(GOX):
go get github.com/mitchellh/gox
all: $(TARGETS)
.PHONY: clean covclean crossclean
clean: covclean crossclean
rm -f $(CLEANFILES) .*dep-stamp
covclean:
rm -f *.coverprofile coverage.html $(COVERPROFILES)
crossclean:
rm -rf build
.PHONY: lint
lint:
golangci-lint run ./...
version := $(shell git describe --tags --always --dirty)
revision := $(shell git rev-parse HEAD)
release := $(shell git describe --tags | cut -d"-" -f 1,2)
GO_LDFLAGS := "-X main.Version=${version} -X main.Revision=${revision}"
# Very specific static pattern rule to only do this for commandline targets.
# Each commandline must be in a 'main.go' in their respective directory. The
# MAKEDEPEND rule generates a list of dependencies for the next make run -- the
# first time the rule executes because the target doesn't exist, subsequent
# runs can read the dependencies and update iff they change.
$(TARGETS): %: cmd/%/main.go $(DEPDIR)/%.d | .dep-stamp
$(MAKEDEPEND)
go build -ldflags $(GO_LDFLAGS) -o $@ $<
internal/vm/parser/parser.go: internal/vm/parser/parser.y | $(GOYACC)
go generate -x ./$(@D)
internal/mtail/logo.ico: logo.png
/usr/bin/convert $< -define icon:auto-resize=64,48,32,16 $@ || touch $@
internal/mtail/logo.ico.go: | internal/mtail/logo.ico $(TOGO)
$(TOGO) -pkg mtail -name logoFavicon -input internal/mtail/logo.ico
###
## Install rules
#
# Would subst all $(TARGETS) except other binaries are just for development.
INSTALLED_TARGETS = $(PREFIX)/bin/mtail
.PHONY: install
install: $(INSTALLED_TARGETS)
$(PREFIX)/bin/%: %
install -d $(@D)
install -m 755 $< $@
GOX_OSARCH ?= "linux/amd64 windows/amd64 darwin/amd64"
#GOX_OSARCH := ""
.PHONY: crossbuild
crossbuild: $(GOFILES) $(GOGENFILES) | $(GOX) .dep-stamp
mkdir -p build
gox --output="./build/mtail_${release}_{{.OS}}_{{.Arch}}" -osarch=$(GOX_OSARCH) -ldflags $(GO_LDFLAGS) ./cmd/mtail
.PHONY: test check
check test: $(GOFILES) $(GOGENFILES) $(GOTESTFILES) | $(LOGO_GO) .dep-stamp
go test -timeout 10s ./...
.PHONY: testrace
testrace: $(GOFILES) $(GOGENFILES) $(GOTESTFILES) | $(LOGO_GO) .dep-stamp
go test -timeout ${timeout} -race -v -tags=integration ./...
.PHONY: smoke
smoke: $(GOFILES) $(GOGENFILES) $(GOTESTFILES) | .dep-stamp
go test -timeout 1s -test.short ./...
.PHONY: bench
bench: $(GOFILES) $(GOGENFILES) $(GOTESTFILES) | .dep-stamp
go test -tags=integration -bench=. -timeout=${benchtimeout} -run=XXX ./...
.PHONY: bench_cpu
bench_cpu: | .dep-stamp
go test -tags=integration -bench=. -run=XXX -timeout=${benchtimeout} -cpuprofile=cpu.out
.PHONY: bench_mem
bench_mem: | .dep-stamp
go test -tags=integration
-bench=. -run=XXX -timeout=${benchtimeout} -memprofile=mem.out
.PHONY: recbench
recbench: $(GOFILES) $(GOGENFILES) $(GOTESTFILES) | .dep-stamp
go test -bench=. -run=XXX --record_benchmark ./...
.PHONY: regtest
regtest: | .dep-stamp
go test -v -tags=integration -timeout=${timeout} ./...
PACKAGES := $(shell go list -f '{{.Dir}}' ./... | grep -v /vendor/ | grep -v /cmd/ | sed -e "s@$$(pwd)@.@")
.PHONY: testall
testall: testrace bench regtest
IMPORTS := $(shell go list -f '{{join .Imports "\n"}}' ./... | sort | uniq | grep -v mtail)
TESTIMPORTS := $(shell go list -f '{{join .TestImports "\n"}}' ./... | sort | uniq | grep -v mtail)
## make u a container
.PHONY: container
container: Dockerfile
docker build -t mtail \
--build-arg version=${version} \
--build-arg commit_hash=${revision} \
--build-arg build_date=$(shell date -Iseconds --utc) \
.
# Append the bin subdirs of every element of the GOPATH list to PATH, so we can find goyacc.
empty :=
space := $(empty) $(empty)
export PATH := $(PATH):$(subst $(space),:,$(patsubst %,%/bin,$(subst :, ,$(GOPATH))))
###
## Fuzz testing
#
vm-fuzz.zip: $(GOFILES) | $(GOFUZZBUILD)
$(GOFUZZBUILD) github.com/google/mtail/internal/vm
.PHONY: fuzz
fuzz: vm-fuzz.zip | $(GOFUZZ)
# rm -rf workdir
mkdir -p workdir/corpus
cp examples/*.mtail workdir/corpus
$(GOFUZZ) -bin=vm-fuzz.zip -workdir=workdir
###
## dependency section
#
.PHONY: install_deps
install_deps: .dep-stamp
.dep-stamp: internal/vm/parser/parser.go
@echo "Install all dependencies, ensuring they're updated"
go get $(UPGRADE) -v $(IMPORTS)
go get $(UPGRADE) -v $(TESTIMPORTS)
touch $@
###
## Coverage
#
.PHONY: coverage covrep
coverage: coverprofile
coverprofile: $(GOFILES) $(GOGENFILES) $(GOTESTFILES) | $(LOGO_GO) .dep-stamp
go test -v -covermode=count -coverprofile=$@ -tags=integration -timeout=${timeout} $(PACKAGES)
coverage.html: coverprofile
go tool cover -html=$< -o $@
covrep: coverage.html
xdg-open $<
ifeq ($(CIRCLECI),true)
COVERALLS_SERVICE := circle-ci
endif
ifeq ($(TRAVIS),true)
COVERALLS_SERVICE := travis-ci
endif
.PHONY: upload_to_coveralls
upload_to_coveralls: | coverprofile $(GOVERALLS)
$(GOVERALLS) -coverprofile=coverprofile -service=$(COVERALLS_SERVICE)
mtail-3.0.0~rc24.1/README.md 0000664 0000000 0000000 00000010456 13435446430 0015143 0 ustar 00root root 0000000 0000000
# mtail - extract whitebox monitoring data from application logs for collection into a timeseries database
[](http://godoc.org/github.com/google/mtail)
[](https://circleci.com/gh/google/mtail)
[](https://coveralls.io/github/google/mtail?branch=master)
[](https://goreportcard.com/report/github.com/google/mtail)
[](https://golangci.com/r/github.com/google/mtail)
`mtail` is a tool for extracting metrics from application logs to be exported
into a timeseries database or timeseries calculator for alerting and
dashboarding.
It fills a monitoring niche by being the glue between applications that do not
export their own internal state (other than via logs) and existing monitoring
systems, such that system operators do not need to patch those applications to
instrument them or writing custom extraction code for every such application.
The extraction is controlled by [mtail programs](docs/Programming-Guide.md)
which define patterns and actions:
# simple line counter
counter line_count
/$/ {
line_count++
}
Metrics are exported for scraping by a collector as JSON or Prometheus format
over HTTP, or can be periodically sent to a collectd, StatsD, or Graphite
collector socket.
Read the [programming guide](docs/Programming-Guide.md) if you want to learn how
to write mtail programs.
Mailing list: https://groups.google.com/forum/#!forum/mtail-users
## Installation
There are various ways of installing **mtail**.
### Precompiled binaries
Precompiled binaries for released versions are available in the
[Releases page](https://github.com/google/mtail/releases) on Github. Using the
latest production release binary is the recommended way of installing **mtail**.
Windows, OSX and Linux binaries are available.
### Building from source
To build `mtail` from the source code yourself you need to have a working Go
environment with version 1.9 or greater installed. `mtail` is `go get`able and
`go install`able from this repository but is best if you use the Makefile to
build it.
```
go get -u github.com/google/mtail
cd $GOPATH/src/github.com/google/mtail
make install
```
If you develop the compiler you will need some additional tools
like `goyacc` to be able to rebuild the parser.
See the [Build instructions](docs/Building.md) for more details.
A `Dockerfile` is included in this repository for local development as an
alternative to installing Go in your environment, and takes care of all the
build dependency installation, if you don't care for that.
## Deployment
`mtail` works best when it paired with a timeseries-based calculator and
alerting tool, like [Prometheus](http://prometheus.io).
> So what you do is you take the metrics from the log files and
> you bring them down to the monitoring system?
[It deals with the instrumentation so the engineers don't have
to!](http://www.imdb.com/title/tt0151804/quotes/qt0386890) It has the
extraction skills! It is good at dealing with log files!!
## Read More
Full documentation at http://google.github.io/mtail/
Read more about writing `mtail` programs:
* [Programming Guide](docs/Programming-Guide.md)
* [Language Reference](docs/Language.md)
* [Metrics](docs/Metrics.md)
* [Managing internal state](docs/state.md)
* [Testing your programs](docs/Testing.md)
Read more about hacking on `mtail`
* [Building from source](docs/Building.md)
* [Contributing](CONTRIBUTING.md)
* [Style](docs/style.md)
Read more about deploying `mtail` and your programs in a monitoring environment
* [Deploying](docs/Deploying.md)
* [Interoperability](docs/Interoperability.md) with other systems
* [Troubleshooting](docs/Troubleshooting.md)
* [FAQ](docs/faq.md)
After that, if you have any questions, please email (and optionally join) the mailing list: https://groups.google.com/forum/#!forum/mtail-users or [file a new issue](https://github.com/google/mtail/issues/new).
mtail-3.0.0~rc24.1/TODO 0000664 0000000 0000000 00000004561 13435446430 0014354 0 ustar 00root root 0000000 0000000 standard library, search path
refactor fs and notify into single interface
no trailing newline in parser test, requires changes to expr stmt
parse tree/ast testing? - expected AST as result from parse/check instead of
merely getting a result
mapping between progs and logs to reduce wasted processing- issue #35
bytecode like
[{push 1} {push 0} {cmp 1} {jm 6} {push 0} {jmp 7} {push 1} {jnm 13}
{setmatched false} {mload 0} {dload 0} {inc } {setmatched true}]
can be expressed as
[{push 1} {push 0} {cmp 1} {jm 9} {setmatched false} {mload 0} {dload 0} {inc
} {setmatched true}]
but jnm 13 is from the condExpr and the previous is from a comparison binary
expr; an optimizer is needed to collapse the bytecode to undersand that
cmp, jm, push, jump, push, jnm in sequence like so is the same as a cmp, jm
and we need to worry about the jump table too
reversed casts: s2i,i2s pairs as well
count stack size and preallocate stack
-> counts of push/pop per instruction
-> test to keep p/p counts updated
: seems like a lot of work for not much return
Run and upload benchmarks to https://perfdata.golang.org/ from circleci
# Won't do
X Use capture group references to feed back to declaring regular expression,
X noting unused caprefs,
X possibly flipping back to noncapturing (and renumbering the caprefs?)
X -> unlikely to implement, probably won't impact regexp speed
When using a const by itself as a match condition, then we get warnings about
the capture group names not existing.
const A /(?.*)/
A {
x[$a]++
}
... => $a not defined in scope.
Can't define string constants, like const STRPTIME_FORMAT "Jan _2"
Multline const can't startwith a newline, must be const FOO // +\n...
Can't chain two matches in same expresison like getfilename() =~ 'name' &&
EXPR_RE because $0 is redefined
Can't set the timestamp in one line and reuse it in another; must use the
caching state metric pattern, hidden gauge time.
Get a list of non-stdlib deps
go list -f "{{if not .Standard}}{{.ImportPath}}{{end}}" $(go list -f '{{join .Deps "\n"}}' ./...)
Request joining (request coalescing) for the log-watcher when sending updates about files, in case
one's already in the queue. Maybe helps with fsnotify overflow, and the
duplicate notify problem from poll-only mode?
Programs may not use mtail_ as a metric prefix.
mtail-3.0.0~rc24.1/benchmark_results.csv 0000664 0000000 0000000 00000002102 13435446430 0020101 0 ustar 00root root 0000000 0000000 1350190388,1,4,examples/sftp.em,500,118000,3.165639s,6.331278,236,37.27525469581339,26.82744915254237
1350190385,1,4,examples/rsyncd.em,100,23500,1.79889s,17.9889,235,13.063611449282613,76.54851063829787
1350190383,1,4,examples/linecount.em,50000,50000,2.356123s,0.047122,1,21.221302962536335,47.12246
1359593792,1,4,examples/dhcpd.em,1,50000,8.55385s,8553.85,50000,5.845321112715327,171.077
1359593784,1,4,examples/sftp.em,200,47200,1.516004s,7.58002,236,31.13448249476914,32.11872881355932
1359593782,1,4,examples/rsyncd.em,100,23500,2.167435s,21.67435,235,10.842308996578904,92.23127659574467
1359593779,1,4,examples/linecount.em,50000,50000,2.695952s,0.053919,1,18.546324266900893,53.91904
1378745369,1,4,examples/dhcpd.em,1,50000,8.342115s,8342.115,50000,5.993683855952598,166.8423
1378745360,1,4,examples/sftp.em,500,118000,3.574926s,7.149852,236,33.00767624280894,30.295983050847457
1378745356,1,4,examples/rsyncd.em,100,23500,1.769277s,17.69277,235,13.28226162438103,75.2883829787234
1378745354,1,4,examples/linecount.em,50000,50000,2.569769s,0.051395,1,19.457001777202542,51.39538
mtail-3.0.0~rc24.1/cmd/ 0000775 0000000 0000000 00000000000 13435446430 0014421 5 ustar 00root root 0000000 0000000 mtail-3.0.0~rc24.1/cmd/mdot/ 0000775 0000000 0000000 00000000000 13435446430 0015364 5 ustar 00root root 0000000 0000000 mtail-3.0.0~rc24.1/cmd/mdot/main.go 0000664 0000000 0000000 00000011615 13435446430 0016643 0 ustar 00root root 0000000 0000000 // Copyright 2018 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
/*
Command mdot turns an mtail program AST into a graphviz graph on standard output.
To use, run it like (assuming your shell is in the same directory as this file)
go run github.com/google/mtail/cmd/mdot --prog ../../examples/dhcpd.mtail | xdot -
or
go run github.com/google/mtail/cmd/mdot --prog ../../examples/dhcpd.mtail --http_port 8080
to view the dot output visit http://localhost:8080
You'll need the graphviz `dot' command installed.
*/
package main
import (
"flag"
"fmt"
"io"
"net/http"
"os"
"os/exec"
"strings"
"github.com/golang/glog"
"github.com/google/mtail/internal/mtail"
"github.com/google/mtail/internal/vm/ast"
"github.com/google/mtail/internal/vm/checker"
"github.com/google/mtail/internal/vm/parser"
)
var (
prog = flag.String("prog", "", "Name of the program source to parse.")
httpPort = flag.String("http_port", "", "Port number to run HTTP server on.")
)
type dotter struct {
w io.Writer
id int
parentID []int // id of the parent node
}
func (d *dotter) nextID() int {
d.id++
return d.id
}
func (d *dotter) emitNode(id int, node ast.Node) {
attrs := map[string]string{
"label": strings.Split(fmt.Sprintf("%T", node), ".")[1] + "\n",
"shape": "box",
"style": "filled",
"tooltip": node.Type().String(),
}
switch n := node.(type) {
case *ast.VarDecl, *ast.DecoDecl:
attrs["fillcolor"] = "lightgreen"
switch n := n.(type) {
case *ast.VarDecl:
attrs["label"] += fmt.Sprintf("%s %s", n.Kind, n.Name)
case *ast.DecoDecl:
attrs["label"] += n.Name
}
case *ast.IdTerm, *ast.CaprefTerm:
attrs["fillcolor"] = "pink"
attrs["shape"] = "ellipse"
switch n := n.(type) {
case *ast.IdTerm:
attrs["label"] += n.Name
case *ast.CaprefTerm:
attrs["label"] += fmt.Sprintf("$%s", n.Name)
}
case *ast.IntLit, *ast.FloatLit, *ast.PatternLit, *ast.StringLit:
attrs["fillcolor"] = "pink"
attrs["shape"] = "ellipse"
switch n := n.(type) {
case *ast.IntLit:
attrs["label"] += fmt.Sprintf("%d", n.I)
case *ast.FloatLit:
attrs["label"] += fmt.Sprintf("%g", n.F)
case *ast.PatternLit:
attrs["label"] += fmt.Sprintf("/%s/", n.Pattern)
case *ast.StringLit:
attrs["label"] += n.Text
}
case *ast.IndexedExpr, *ast.BinaryExpr, *ast.UnaryExpr, *ast.PatternExpr, *ast.BuiltinExpr:
attrs["fillcolor"] = "lightblue"
switch n := n.(type) {
case *ast.BinaryExpr:
attrs["label"] += parser.Kind(n.Op).String()
case *ast.UnaryExpr:
attrs["label"] += parser.Kind(n.Op).String()
case *ast.BuiltinExpr:
attrs["label"] += n.Name
}
}
pos := node.Pos()
if pos != nil {
attrs["xlabel"] = pos.String()
}
fmt.Fprintf(d.w, "n%d [", id)
for k, v := range attrs {
fmt.Fprintf(d.w, "%s=\"%s\" ", k, v)
}
fmt.Fprintf(d.w, "]\n")
}
func (d *dotter) emitLine(src, dst int) {
fmt.Fprintf(d.w, "n%d -> n%d\n", src, dst)
}
func (d *dotter) VisitBefore(node ast.Node) (ast.Visitor, ast.Node) {
id := d.nextID()
d.emitNode(id, node)
if len(d.parentID) > 0 {
parentID := d.parentID[len(d.parentID)-1]
d.emitLine(parentID, id)
}
d.parentID = append(d.parentID, id)
return d, node
}
func (d *dotter) VisitAfter(node ast.Node) ast.Node {
d.parentID = d.parentID[:len(d.parentID)-1]
return node
}
func makeDot(name string, w io.Writer) error {
f, err := os.Open(name)
if err != nil {
return err
}
n, err := parser.Parse(name, f)
if err != nil {
return err
}
n, err = checker.Check(n)
if err != nil {
return err
}
fmt.Fprintf(w, "digraph \"%s\" {\n", *prog)
dot := &dotter{w: w}
ast.Walk(dot, n)
fmt.Fprintf(w, "}\n")
return nil
}
func main() {
flag.Parse()
if *prog == "" {
glog.Exitf("No -prog given")
}
if *httpPort == "" {
glog.Exit(makeDot(*prog, os.Stdout))
}
http.HandleFunc("/",
func(w http.ResponseWriter, r *http.Request) {
dot := exec.Command("dot", "-Tsvg")
in, err := dot.StdinPipe()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
out, err := dot.StdoutPipe()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = dot.Start()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = makeDot(*prog, in)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = in.Close()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Add("Content-type", "image/svg+xml")
w.WriteHeader(http.StatusOK)
_, err = io.Copy(w, out)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
err = dot.Wait()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
})
http.HandleFunc("/favicon.ico", mtail.FaviconHandler)
glog.Info(http.ListenAndServe(fmt.Sprintf(":%s", *httpPort), nil))
}
mtail-3.0.0~rc24.1/cmd/mgen/ 0000775 0000000 0000000 00000000000 13435446430 0015347 5 ustar 00root root 0000000 0000000 mtail-3.0.0~rc24.1/cmd/mgen/main.go 0000664 0000000 0000000 00000011037 13435446430 0016624 0 ustar 00root root 0000000 0000000 // Copyright 2013 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
// Command mgen generates mtail programs for fuzz testing by following a simple grammar.
package main
import (
crand "crypto/rand"
"flag"
"fmt"
"math/big"
mrand "math/rand"
)
var (
useCryptoRand = flag.Bool("use_crypto_rand", false, "Use crypto/rand instead of math/rand")
randSeed = flag.Int64("rand_seed", 1, "Seed to use for math.rand.")
minIterations = flag.Int64("min_iterations", 5000, "Minimum number of iterations before stopping program generation.")
)
type node struct {
alts [][]string
term string
}
var table = map[string]node{
"start": {[][]string{{"stmt_list"}}, ""},
"stmt_list": {[][]string{{""}, {"stmt_list", "stmt"}}, ""},
"stmt": {[][]string{
{"cond", "{", "stmt_list", "}"},
{"expr"},
{"decl"},
{"def_spec"},
{"deco_spec"},
{"next"},
{"const", "ID", "pattern_expr"}}, ""},
"expr": {[][]string{{"assign_expr"}}, ""},
"assign_expr": {[][]string{{"rel_expr"}, {"unary_expr", "=", "rel_expr"}, {"unary_expr", "+=", "rel_expr"}}, ""},
"rel_expr": {[][]string{{"additive_expr"}, {"additive_expr", "relop", "additive_expr"}}, ""},
"relop": {[][]string{{"<"}, {">"}, {"<="}, {">="}, {"=="}, {"!="}}, ""},
"additive_expr": {[][]string{{"unary_expr"}, {"additive_expr", "+", "unary_expr"}, {"additive_expr", "-", "unary_expr"}}, ""},
"unary_expr": {[][]string{{"postfix_expr"}, {"BUILTIN", "(", "arg_expr_list", ")"}}, ""},
"arg_expr_list": {[][]string{{""}, {"assign_expr"}, {"arg_expr_list", ",", "assign_expr"}}, ""},
"postfix_expr": {[][]string{{"primary_expr"}, {"postfix_expr", "++"}, {"postfix_expr", "[", "expr", "]"}}, ""},
"primary_expr": {[][]string{{"ID"}, {"CAPREF"}, {"STRING"}, {"(", "expr", ")"}, {"NUMERIC"}}, ""},
"cond": {[][]string{{"pattern_expr"}, {"rel_expr"}}, ""},
"pattern_expr": {[][]string{{"REGEX"}, {"pattern_expr", "+", "REGEX"}, {"pattern_expr", "+", "ID"}}, ""},
"decl": {[][]string{{"hide_spec", "type_spec", "declarator"}}, ""},
"hide_spec": {[][]string{{""}, {"hidden"}}, ""},
"declarator": {[][]string{{"declarator", "by_spec"}, {"declarator", "as_spec"}, {"ID"}, {"STRING"}}, ""},
"type_spec": {[][]string{{"counter"}, {"gauge"}}, ""},
"by_spec": {[][]string{{"by", "by_expr_list"}}, ""},
"by_expr_list": {[][]string{{"ID"}, {"STRING"}, {"by_expr_list", ",", "ID"}, {"by_expr_list", ",", "STRING"}}, ""},
"as_spec": {[][]string{{"as", "STRING"}}, ""},
"def_spec": {[][]string{{"def", "ID", "{", "stmt_list", "}"}}, ""},
"deco_spec": {[][]string{{"deco", "{", "stmt_list", "}"}}, ""},
"BUILTIN": {[][]string{{"strptime"}, {"timestamp"}, {"len"}, {"tolower"}}, ""},
"CAPREF": {[][]string{}, "$1"},
"REGEX": {[][]string{}, "/foo/"},
"STRING": {[][]string{}, "\"bar\""},
"ID": {[][]string{}, "quux"},
"NUMERIC": {[][]string{}, "37"},
}
func emitter(c chan string) {
var l int
for w := range c {
if w == "\n" {
fmt.Println()
}
if w == "" {
continue
}
if l+len(w)+1 >= 80 {
fmt.Println()
fmt.Print(w)
l = len(w)
} else {
if l != 0 {
w = " " + w
}
l += len(w)
fmt.Print(w)
}
}
}
func rand(n int) (r int) {
if *useCryptoRand {
a, _ := crand.Int(crand.Reader, big.NewInt(int64(n)))
r = int(a.Int64())
} else {
r = mrand.Intn(n)
}
return
}
func main() {
flag.Parse()
mrand.Seed(*randSeed)
c := make(chan string, 1)
go emitter(c)
runs := *minIterations
// Initial state
var states = []string{"start"}
// While the state stack is not empty
for len(states) > 0 && runs > 0 {
// Pop the next state
state := states[len(states)-1]
states = states[:len(states)-1]
//fmt.Println("state", state, "states", states)
// Look for the state transition
if n, ok := table[state]; ok {
// If there are state transition alternatives
//fmt.Println("n", n)
if len(n.alts) > 0 {
// Pick a state transition at random
a := rand(len(n.alts))
//fmt.Println("a", a, n.alts[a], len(n.alts[a]))
// Push the states picked onto the stack (in reverse order)
for i := 0; i < len(n.alts[a]); i++ {
//fmt.Println("i", i, n.alts[a][len(n.alts[a])-i-1])
states = append(states, n.alts[a][len(n.alts[a])-i-1])
}
//fmt.Println("states", states)
} else {
// If there is a terminal, emit it
//fmt.Println("(term)", state, n.term)
c <- n.term
}
} else {
// If the state doesn't exist in the table, treat it as a terminal, and emit it.
//fmt.Println("(state)", state, state)
c <- state
}
runs--
}
c <- "\n"
}
mtail-3.0.0~rc24.1/cmd/mtail/ 0000775 0000000 0000000 00000000000 13435446430 0015527 5 ustar 00root root 0000000 0000000 mtail-3.0.0~rc24.1/cmd/mtail/main.go 0000664 0000000 0000000 00000013517 13435446430 0017011 0 ustar 00root root 0000000 0000000 // Copyright 2011 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package main
import (
"flag"
"fmt"
"os"
"runtime"
"strings"
"time"
"github.com/golang/glog"
"github.com/google/mtail/internal/metrics"
"github.com/google/mtail/internal/mtail"
"github.com/google/mtail/internal/watcher"
)
type seqStringFlag []string
func (f *seqStringFlag) String() string {
return fmt.Sprint(*f)
}
func (f *seqStringFlag) Set(value string) error {
for _, v := range strings.Split(value, ",") {
*f = append(*f, v)
}
return nil
}
var logs seqStringFlag
var (
port = flag.String("port", "3903", "HTTP port to listen on.")
address = flag.String("address", "", "Host or IP address on which to bind HTTP listener")
progs = flag.String("progs", "", "Name of the directory containing mtail programs")
version = flag.Bool("version", false, "Print mtail version information.")
// Compiler behaviour flags
oneShot = flag.Bool("one_shot", false, "Compile the programs, then read the contents of the provided logs from start until EOF, print the values of the metrics store and exit. This is a debugging flag only, not for production use.")
compileOnly = flag.Bool("compile_only", false, "Compile programs only, do not load the virtual machine.")
dumpAst = flag.Bool("dump_ast", false, "Dump AST of programs after parse (to INFO log).")
dumpAstTypes = flag.Bool("dump_ast_types", false, "Dump AST of programs with type annotation after typecheck (to INFO log).")
dumpBytecode = flag.Bool("dump_bytecode", false, "Dump bytecode of programs (to INFO log).")
// VM Runtime behaviour flags
syslogUseCurrentYear = flag.Bool("syslog_use_current_year", true, "Patch yearless timestamps with the present year.")
overrideTimezone = flag.String("override_timezone", "", "If set, use the provided timezone in timestamp conversion, instead of UTC.")
emitProgLabel = flag.Bool("emit_prog_label", true, "Emit the 'prog' label in variable exports.")
// Ops flags
pollInterval = flag.Duration("poll_interval", 0, "Set the interval to poll all log files for data; must be positive, or zero to disable polling. With polling mode, only the files found at mtail startup will be polled.")
disableFsnotify = flag.Bool("disable_fsnotify", false, "EXPERIMENTAL: When enabled no fsnotify watcher is created, and mtail falls back to polling mode only. Only the files known at program startup will be polled.")
expiredMetricGcTickInterval = flag.Duration("expired_metrics_gc_interval", time.Hour, "interval between expired metric garbage collection runs")
staleLogGcTickInterval = flag.Duration("stale_log_gc_interval", time.Hour, "interval between stale log garbage collection runs")
// Debugging flags
blockProfileRate = flag.Int("block_profile_rate", 0, "Nanoseconds of block time before goroutine blocking events reported. 0 turns off. See https://golang.org/pkg/runtime/#SetBlockProfileRate")
mutexProfileFraction = flag.Int("mutex_profile_fraction", 0, "Fraction of mutex contention events reported. 0 turns off. See http://golang.org/pkg/runtime/#SetMutexProfileFraction")
)
func init() {
flag.Var(&logs, "logs", "List of log files to monitor, separated by commas. This flag may be specified multiple times.")
}
var (
// Version and Revision are supplied by the linker
Version string
Revision string
GoVersion = runtime.Version()
)
func buildInfo() string {
return fmt.Sprintf("mtail version %s git revision %s go version %s go arch %s go os %s", Version, Revision, GoVersion, runtime.GOARCH, runtime.GOOS)
}
func main() {
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "%s\n", buildInfo())
fmt.Fprintf(os.Stderr, "\nUsage:\n")
flag.PrintDefaults()
}
flag.Parse()
if *version {
fmt.Println(buildInfo())
os.Exit(1)
}
glog.Info(buildInfo())
glog.Infof("Commandline: %q", os.Args)
loc, err := time.LoadLocation(*overrideTimezone)
if err != nil {
fmt.Fprintf(os.Stderr, "Couldn't parse timezone %q: %s", *overrideTimezone, err)
os.Exit(1)
}
if *blockProfileRate > 0 {
glog.Infof("Setting block profile rate to %d", *blockProfileRate)
runtime.SetBlockProfileRate(*blockProfileRate)
}
if *mutexProfileFraction > 0 {
glog.Infof("Setting mutex profile fraction to %d", *mutexProfileFraction)
runtime.SetMutexProfileFraction(*mutexProfileFraction)
}
if *progs == "" {
glog.Exitf("mtail requires programs that in instruct it how to extract metrics from logs; please use the flag -progs to specify the directory containing the programs.")
}
if !(*dumpBytecode || *dumpAst || *dumpAstTypes || *compileOnly) {
if len(logs) == 0 {
glog.Exitf("mtail requires the names of logs to follow in order to extract logs from them; please use the flag -logs one or more times to specify glob patterns describing these logs.")
}
}
w, err := watcher.NewLogWatcher(*pollInterval, !*disableFsnotify)
if err != nil {
glog.Exitf("Failure to create log watcher: %s", err)
}
opts := []func(*mtail.Server) error{
mtail.ProgramPath(*progs),
mtail.LogPathPatterns(logs...),
mtail.BindAddress(*address, *port),
mtail.BuildInfo(buildInfo()),
mtail.OverrideLocation(loc),
mtail.ExpiredMetricGcTickInterval(*expiredMetricGcTickInterval),
mtail.StaleLogGcTickInterval(*staleLogGcTickInterval),
}
if *oneShot {
opts = append(opts, mtail.OneShot)
}
if *compileOnly {
opts = append(opts, mtail.CompileOnly)
}
if *dumpAst {
opts = append(opts, mtail.DumpAst)
}
if *dumpAstTypes {
opts = append(opts, mtail.DumpAstTypes)
}
if *dumpBytecode {
opts = append(opts, mtail.DumpBytecode)
}
if *syslogUseCurrentYear {
opts = append(opts, mtail.SyslogUseCurrentYear)
}
if !*emitProgLabel {
opts = append(opts, mtail.OmitProgLabel)
}
m, err := mtail.New(metrics.NewStore(), w, opts...)
if err != nil {
glog.Error(err)
os.Exit(1)
}
err = m.Run()
if err != nil {
glog.Error(err)
os.Exit(1)
}
}
mtail-3.0.0~rc24.1/docs/ 0000775 0000000 0000000 00000000000 13435446430 0014606 5 ustar 00root root 0000000 0000000 mtail-3.0.0~rc24.1/docs/Building.md 0000664 0000000 0000000 00000010273 13435446430 0016670 0 ustar 00root root 0000000 0000000 # Building mtail
mtail is implemented in [Go](http://golang.org).
You will need to install Go 1.7 or higher.
## Go
[Clone](http://github.com/google/mtail) the source from GitHub into your `$GOPATH`. If you don't have a `$GOPATH`, see the next section.
```
go get -u github.com/google/mtail
cd $GOPATH/src/github.com/google/mtail
make
```
### For Go First-Timers
An excellent starting guide for people new to Go entirely is here: https://github.com/alco/gostart
If you want to skip the guide, these two references are short but to the point
on setting up the `$GOPATH` workspace:
* https://github.com/golang/go/wiki/SettingGOPATH
* https://github.com/golang/go/wiki/GOPATH#repository-integration-and-creating-go-gettable-projects
Finally, https://golang.org/doc/code.html is the original Go project
documentation for the philosophy on Go workspaces.
#### No Really, What is the TLDR
Put `export GOPATH=$HOME/go` in your `~/.profile`.
```
export GOPATH=$HOME/go
mkdir -p $GOPATH/src
```
then back up to the Details above.
### Building
Unlike the recommendation for Go projects, `mtail` uses a `Makefile` to build the source. This ensures the generated code is up to date and that the binary is tagged with release information.
(You dont have to use the Makefile and can try `go install github.com/google/mtail/cmd/mtail` directly, but if you have problems you'll be asked for the release information.)
Having fetched the source, use `make` from the top of the source tree. This will install all the dependencies, and then build `mtail`. This assumes that your Go environment is already set up -- see above for hints on setting it up.
The resulting binary will be in `$GOPATH/bin`.
The unit tests can be run with `make test`, which invokes `go test`. The slower race-detector tests can be run with `make testrace`.
### Cross-compilation
The `Makefile` has a `crossbuild` target for building on different platforms. By default it builds for a few `amd64` targets:
```
% make crossbuild
mkdir -p build
gox --output="./build/mtail_v3.0.0-rc10_{{.OS}}_{{.Arch}}" -osarch="linux/amd64 windows/amd64 darwin/amd64" -ldflags "-X main.Version=v3.0.0-rc10-72-gcbea8a8 -X main.Revision=cbea8a810942be1129d58c37b27a55987a384776"
Number of parallel builds: 3
--> linux/amd64: github.com/google/mtail
--> darwin/amd64: github.com/google/mtail
--> windows/amd64: github.com/google/mtail
```
but you can override it with the environment variable `GOX_OSARCH` like so:
```
% make GOX_OSARCX=linux/arm crossbuild
mkdir -p build
gox --output="./build/mtail_v3.0.0-rc10_{{.OS}}_{{.Arch}}" -osarch="linux/amd64 windows/amd64 darwin/amd64" -ldflags "-X main.Version=v3.0.0-rc10-72-gcbea8a8 -X main.Revision=cbea8a810942be1129d58c37b27a55987a384776"
Number of parallel builds: 3
--> darwin/amd64: github.com/google/mtail
--> windows/amd64: github.com/google/mtail
--> linux/amd64: github.com/google/mtail
```
## No Go
You can still build and develop **mtail** with Docker.
```
docker build -t mtail .
docker run -it --rm mtail --help
```
**mtail** is not much use without a configuration file or logs to parse, you will need to mount a path containing them into the container, like so:
```
docker run -it --rm -v examples/linecount.mtail:/progs/linecount.mtail -v /var/log:/logs mtail -logtostderr -one_shot -progs /progs/linecount.mtail -logs /logs/messages.log
```
Or, via Docker Compose, e.g. this `docker-compose.yml` snippet example:
```yaml
service:
mtail:
image: mtail
command:
- -logtostderr
- -one_shot
- -progs
- /progs/linecount.mtail
- -logs
- /logs/messages.log
volume:
- type: bind
source: /var/log
target: /logs
readonly: true
- type: bind
source: examples/linecount.mtail
target: /progs/linecount.mtail
```
## Contributing
Please use `gofmt` to format your code before committing. Emacs' go-mode has a lovely [gofmt-before-save](http://golang.org/misc/emacs/go-mode.el) function.
## Troubleshooting
If `make` gives you the following error:
```
../github.com/google/mtail/vm/lexer.go:28: too many errors
```
Then run `make` in that dependency and run `make` again like so:
```
cd ../github.com/google/mtail
make
cd -
make
```
mtail-3.0.0~rc24.1/docs/Deploying.md 0000664 0000000 0000000 00000014002 13435446430 0017057 0 ustar 00root root 0000000 0000000 # Introduction
mtail is intended to run one per machine, and serve as monitoring glue for multiple applications running on that machine. It runs one or more programs in a 1:1 mapping to those client applications.
## Configuration Overview
mtail is configured through commandline flags.
The `--help` flag will print a list of flags for configuring `mtail`.
(Flags may be prefixed with either `-` or `--`)
Basic flags necessary to start `mtail`:
* `--logs` is a comma separated list of filenames to extract from, but can also be used multiple times, and each filename can be a [glob pattern](http://godoc.org/path/filepath#Match). Named pipes can be read from when passed as a filename to this flag.
* `--progs` is a directory path containing [mtail programs](Language.md). Programs must have the `.mtail` suffix.
mtail runs an HTTP server on port 3903, which can be changed with the `--port` flag.
# Details
## Launching mtail
```
mtail --progs /etc/mtail --logs /var/log/syslog --logs /var/log/ntp/peerstats
```
`mtail` will start to read the specified logs from their current end-of-file,
and read new updates appended to these logs as they arrive. It will attempt to
correctly handle log files that have been rotated by renaming or symlink
changes.
### Getting the logs in
Use `--logs` multiple times to pass in glob patterns that match the logs you
want to tail. This includes named pipes.
### Polling the file system
If your system is not supported by `fsnotify` then mtail will fall back to polling mode. You can also specify this explicitly with the `--poll_interval` flag, for example
```
mtail --progs /etc/mtail --logs /var/log/syslog --poll_interval 250ms
```
### Disabling `fsnotify`
In some cases, the log watcher can not process update events from the kernel fast enough and you may see
```
fsnotify error: fsnotify queue overflow
```
errors in the `mtail` log. This will also manifest as counters not updating anymore.
If your incoming log rate is high enough to trigger this condition, try forcing mtail to only use polling mode by adding the flag `--disable_fsnotify`. The poll interval defaults to 250ms, but can be changed with the `--poll_interval` flag, for example
```
mtail --progs /etc/mtail --logs /var/log/syslog --disable_fsnotify --poll_interval 50ms
```
### Setting garbage collection intervals
`mtail` accumulates metrics and log files during its operation. By default, *every hour* both a garbage collection pass occurs looking for expired metrics, and stale log files.
An expired metric is any metric that hasn't been updated in a time specified by a `del after` form in a program.
A stale log file is any log being watched that hasn't been read from in 24 hours.
The interval between garbage collection runs can be changed on the commandline with the `--expired_metrics_gc_interval` and `--stale_log_gc_interval` flags, which accept a time duration string compatible with the Go [time.ParseDuration](https://golang.org/pkg/time/#ParseDuration) function.
### Launching under Docker
`mtail` can be run as a sidecar process if you expose an application container's logs with a volume.
`docker run -d --name myapp -v /var/log/myapp myapp`
for example exports a volume called `/var/log/myapp` (named the same as the
hypothetical path where `myapp`s logs are written.
Then launch the `mtail` docker image and pass in the volume:
docker run -dP \
--name myapp-mtail \
--volumes-from myapp \
-v examples:/etc/mtail \
mtail --logs /var/log/myapp --progs /etc/mtail
This example fetches the volumes from the `myapp` container, and mounts them in
the mtail container (which we've called `myapp-mtail`). We also mount the
`examples` directory as `/etc/mtail` in the container. We launch `mtail` with
the `logs` and `progs` flags to point to our two mounted volumes.
The `-P` flag ensures `mtail-myapp`'s port 3903 is exposed for collection,
refer to `docker ps` to find out where it's mapped to on the host.
## Writing the programme
Read the [Programming Guide](Programming-Guide.md) for instructions on how to write an `mtail` program.
## Getting the Metrics Out
### Pull based collection
Point your collection tool at `localhost:3903/json` for JSON format metrics.
Prometheus can be directed to the /metrics endpoint for Prometheus text-based format.
### Push based collection
Use the `collectd_socketpath` or `graphite_host_port` flags to enable pushing to a collectd or graphite instance.
Configure collectd on the same machine to use the unixsock plugin, and set `collectd_socketpath` to that unix socket.
```
mtail --progs /etc/mtail --logs /var/log/syslog,/var/log/rsyncd.log --collectd_socketpath=/var/run/collectd-unixsock
```
Set `graphite_host_port` to be the host:port of the carbon server.
```
mtail --progs /etc/mtail --logs /var/log/syslog,/var/log/rsyncd.log --graphite_host_port=localhost:9999
```
Likewise, set `statsd_hostport` to the host:port of the statsd server.
Additionally, the flag `metric_push_interval_seconds` can be used to configure the push frequency. It defaults to 60, i.e. a push every minute.
## Setting a default timezone
The `--override_timezone` flag sets the timezone that `mtail` uses for timestamp conversion. By default, `mtail` assumes timestamps are in UTC.
To use the machine's local timezone, `--override_timezone=Local` can be used.
## Troubleshooting
Lots of state is logged to the log file, by default in `/tmp/mtail.INFO`. See [Troubleshooting](Troubleshooting.md) for more information.
N.B. Oneshot mode (the `one_shot` flag on the commandline) can be used to check
that a program is correctly reading metrics from a log, but with the following
caveats:
* Unlike normal operations, oneshot mode will read the logs from the start of
the file to the end, then close them -- it does not continuously tail the
file
* The metrics will be printed to standard out when the logs are finished being
read from.
* mtail will exit after the metrics are printed out.
This mode is useful for debugging the behaviour of `mtail` programs and
possibly for permissions checking.
mtail-3.0.0~rc24.1/docs/Interoperability.md 0000664 0000000 0000000 00000005332 13435446430 0020460 0 ustar 00root root 0000000 0000000 # Introduction
mtail is only part of a monitoring ecosystem -- it fills the gap between applications that export no metrics of their own in a [common protocol](Metrics.md) and the timeseries database.
# Details
mtail actively exports (i.e. pushes) to the following timeseries databases:
* [collectd](http://collectd.org/)
* [graphite](http://graphite.wikidot.com/start)
* [statsd](https://github.com/etsy/statsd)
mtail also is a passive exporter (i.e. pull, or scrape based) by:
* [Prometheus](http://prometheus.io)
* Google's Borgmon
# Logs Analysis
While `mtail` does a form of logs analysis, it does _not_ do any copying,
indexing, or searching of log files for data mining applications. It is only
intended for real- or near-time monitoring data for the purposes of performance
measurement and alerting.
Instead, see logs ingestion and analysis systems like
* [Logstash](https://www.elastic.co/products/logstash)
* [Graylog](https://www.graylog.org/)
if that is what you need.
# Prometheus Exporter Metrics
https://prometheus.io/docs/instrumenting/writing_exporters/ describes useful metrics for a Prometheus exporter to export. `mtail` does not follow that guide, for these reasons.
The exporter model described in that document is for active proxies between an application and Prometheus. The expectation is that when Prometheus scrapes the proxy (the exporter) that it then performs its own scrape of the target application, and translates the results back into the Prometheus exposition format. The time taken to query the target application is what is exported as `X_scrape_duration_seconds` and its availability as `X_up`.
`mtail` doesn't work like that. It is reacting to the input log events, not scrapes, and so there is no concept of how long it takes to query the application or if it is available. There are things that, if you squint, look like applications in `mtail`, the virtual machine programs. They could be exporting their time to process a single line, and are `up` as long as they are not crashing on input. This doesn't translate well into the exporter metrics meanings though.
TODO(jaq): Instead, mtail will export a histogram of the runtime per line of each VM program.
`mtail` doesn't export `mtail_up` or `mtail_scrape_duration_seconds` because they are exactly equivalent* the synthetic metrics that Prometheus creates automatically: https://prometheus.io/docs/concepts/jobs_instances/
\* The difference between a scrape duration measured in mtail versus Prometheus would differ in the network round trip time, TCP setup time, and send/receive queue time. For practical purposes you can ignore them as the usefulness of a scrape duration metric is not in its absolute value, but how it changes over time.
mtail-3.0.0~rc24.1/docs/Language.md 0000664 0000000 0000000 00000041702 13435446430 0016657 0 ustar 00root root 0000000 0000000 # mtail Language
## Description
As `mtail` is designed to tail log files and apply regular expressions to new
log lines to extract data, the language naturally follows this pattern-action
style.
It resembles another, more famous pattern-action language, that of AWK.
This page errs on the side of a language specification and reference. See the
[Programming Guide](Programming-Guide.md) for a gentler introduction to writing
`mtail` programs.
## Program Execution
`mtail` runs all programs on every line received by the log tailing subsystem.
The rough model of this looks like:
```
for line in lines:
for regex in regexes:
if match:
do something
```
Each program operates once on a single line of log data, and then terminates.
## Program Structure
An `mtail` program consists of exported variable definitions, pattern-action
statements, and optional decorator definitions.
```
exported variable
pattern {
action statements
}
def decorator {
pattern and action statements
}
```
## Exported Variables
`mtail`'s purpose is to extract information from logs and deliver them to a
monitoring system. Thus, variables must be named for export.
Variables, which have type `counter` or `gauge`, must be declared before their
use.
```
counter line_count
gauge queue_length
```
They can be exported with a different name, with the `as` keyword, if one wants
to use characters that would cause a parse error. This example causes the metric
to be named `line-count` in the collecting monitoring system.
```
counter line_count as "line-count"
```
Variables can be dimensioned with one or more axes, with the `by` keyword,
creating multidimensional data. Dimensions can be used for creating histograms,
as well.
```
counter bytes by operation, direction
counter latency_ms by bucket
```
Putting the `hidden` keyword at the start of the declaration means it won't be
exported, which can be useful for storing temporary information. This is the
only way to share state between each line being processed.
```
hidden counter login_failures
```
## Pattern/Action form.
`mtail` programs look a lot like `awk` programs. They consist of a conditional
expression followed by a brace-enclosed block of code:
```
COND {
ACTION
}
```
`COND` is a conditional expression. It can be a regular expression, which if
matched enters the action block, or a relational expression as you might
encounter in a C program's `if` statement (but without the `if`, it is
implicit.)
```
/foo/ {
ACTION1
}
variable > 0 {
ACTION2
}
/foo/ && variable > 0 {
ACTION3
}
```
In the above program, ACTION1 is taken on each line input if that line matches
the word `foo`, and ACTION2 is taken on each line if when that line is read, the
variable `variable` is greater than 0. ACTION3 occurs if both are true.
The action statements must be wrapped in curly braces, i.e. `{}`. `mtail`
programs have no single-line statement conditionals like C.
### Regular Expressions
`mtail` supports RE2-style regular expression syntax, but is limited by what is
supported by the Go implementation of [Go's
regexp/syntax](https://godoc.org/regexp).
#### Constant pattern fragments
To re-use parts of regular expressions, you can assign them to a `const` identifier:
```
const PREFIX /^\w+\W+\d+ /
PREFIX {
ACTION1
}
PREFIX + /foo/ {
ACTION2
}
```
In this example, ACTION1 is done for every line that starts with the prefix
regex, and ACTION2 is done for the subset of those lines that also contain
'foo'.
Pattern fragments like this don't need to be prefixes, they can be anywhere in the expression.
```
counter maybe_ipv4
const IPv4 /(?P\d+\.\d+\.\d+\.\d+)/
/something with an / + IPv4 + / address/ {
maybe_ipv4++
}
```
See [dhcpd.mtail](../examples/dhcpd.mtail) for more examples of this.
See also the section on decorators below for improving readability of
expressions that are only matched once.
### Conditionals
More complex expressions can be built up from relational expressions and other
pattern expressions.
#### Operators
The following relational operators are available in `mtail`:
* `<` less than
* `<=` less than or equal
* `>` greater than
* `>=` greater than or equal
* `==` is equal
* `!=` is not equal
* `=~` pattern match
* `!~` negated pattern match
* `||` logical or
* `&&` logical and
* `!` unary logical negation
The following arithmetic operators are available in `mtail`:
* `|` bitwise or
* `&` bitwise and
* `^` bitwise xor
* `+` addition
* `-` subtraction
* `*` multiplcation
* `/` division
* `<<` bitwise shift left
* `>>` bitwise shift right
* `**` exponent
The following arithmetic operators act on exported variables.
* `=` assignment
* `++` increment
* `+=` increment by
* `--` decrement
#### `else` Clauses
When a conditional expression does not match, action can be taken as well:
```
/foo/ {
ACTION1
} else {
ACTION2
}
```
Else clauses can be nested. There is no ambiguity with the dangling-else
problem, as `mtail` programs must wrap all block statements in `{}`.
#### `otherwise` clauses
The `otherwise` keyword can be used as a conditional statement. It matches if no
preceding conditional in the current scope has matched. This behaves similarly
to the `default` clause in a C `switch` statement.
```
/foo/ {
/foo1/ {
ACTION1
}
/foo2/ {
ACTION2
}
otherwise {
ACTION3
}
}
```
In this example, ACTION3 will be executed if neither `/foo1/` or `/foo2/` match
on the input, but `/foo/` does.
### Actions
#### Incrementing a Counter
The simplest `mtail` program merely counts lines read:
```
/$/ {
line_count++
}
```
This program instructs `mtail` to increment the `line_count` counter variable on
every line received (specifically anytime an end-of-line is matched.)
#### Capture Groups
Regular expressions in patterns can contain capture groups -- subexpressions
wrapped in parentheses. These can be referred to in the action block to extract
data from the line being matched.
For example, part of a program that can extract from `rsyncd` logs may want to
break down transfers by operation and module.
```
counter transfers_total by operation, module
/(?P\S+) (\S+) \[\S+\] (\S+) \(\S*\) \S+ (?P\d+)/ {
transfers_total[$operation][$3]++
}
```
Or, the value of the counter can be increased by the value of a capture group:
```
counter bytes_total by operation, module
/(?P\S+) (\S+) \[\S+\] (\S+) \(\S*\) \S+ (?P\d+)/ {
bytes_total[$operation][$3] += $bytes
}
```
Numeric capture groups address subexpressions in the match result as you might
expect from regular expression groups in other languages, like awk and perl --
e.g. the expression `$3` refers to the third capture group in the regular
expression.
Named capture groups can be referred to by their name as indicated in the
regular expression using the `?P` notation, as popularised by the Python
regular expression library -- e.g. `$bytes` refers to `(?P\d+)` in the
examples above.
Capture groups can be used in the same expression that defines them, for example
in this expression that matches and produces `$x`, then compares against that
value.
```
/(?P\d+)/ && $x > 1 {
nonzero_positives++
}
```
#### Timestamps
It is also useful to timestamp a metric with the time the application thought an
event occurred. Logs typically prefix the log line with a timestamp string,
which can be extracted and then parsed into a timestamp internally, with the
`strptime` builtin function.
A regular expression that extracts the timestamp in boring old syslog format
looks like:
```
/^(?P\w+\s+\d+\s+\d+:\d+:\d+)/ {
strptime($date, "Jan 02 15:04:05")
...
}
```
Buyer beware! The format string used by `mtail` is the same as the [Go
time.Parse() format string](https://godoc.org/time#Parse), which is completely
unlike that used by C's strptime. The format string must always be the 2nd of
January 2006 at 3:04:05 PM. See the documentation for the **ANSIC** format in
the above link for more details. **NOTE** that *unlike* Go's `time.Parse()` (and
*like* C's) the format string is the *second* argument to this builtin function.
> NOTE: without a `strptime()` call, `mtail` will default to using the current
> system time for the timestamp of the event. This may be satisfactory for
> near-real-time logging.
#### Nested Actions
It is of course possible to nest more pattern-actions within actions. This lets
you factor out common parts of a match expression and deal with per-message
actions separately.
For example, parsing syslog timestamps is something you may only wish to do
once, as it's expensive to match (and difficult to read!)
```
counter foo counter bar
/^(?P\w+\s+\d+\s+\d+:\d+:\d+)/ {
strptime($date, "Jan 02 15:04:05")
/foo/ {
foo++
}
/bar/ {
bar++
}
}
```
This will result in both foo and bar counters being timestamped with the current
log line's parsed time, once they match a line.
#### Decorated actions
Decorated actions are an inversion of nested actions. They allow the program to
define repetetive functions that perform the same extraction across many
different actions.
For example, most log file formats start with a timestamp prefix. To reduce
dupliation of work, decorators can be used to factor out the common work of
extracting the timestamp. For example, to define a decorator, use the `def`
keyword:
```
def syslog {
/(?P\w+\s+\d+\s+\d+:\d+:\d+)/ {
strptime($date, "Jan 2 15:04:05")
next
}
}
```
The decorator definition starts and ends in a curly-braced block, and looks like
a normal pattern/action as above. The new part is the `next` keyword, which
indicates to `mtail` where to jump into the *decorated* block.
To use a decorator:
```
@syslog {
/some event/ {
variable++
}
}
```
The `@` notation, familiar to Python programmers, denotes that this block is
"wrapped" by the `syslog` decorator. The syslog decorator will be called on each
line first, which extracts the timestamp of the log line. Then, `next` causes
the wrapped block to execute, so then `mtail` matches the line against the
pattern `some event`, and if it does match, increments `variable`.
#### Types
`mtail` has a few internal types on two dimensions.
The first dimension has no bearing on the behaviour of `mtail`, but changes how
the variables are exported:
* `counter` assumes that the variable is a monotonically increasing measure,
so that computations on sampled data like rates can be performed without
loss. Use for counting events or summing up bytes transferred.
* `gauge` assumes that the variable can be set to any value at any time,
signalling that rate computations are risky. Use for measures like queue
length at a point in time.
The second dimension is the internal representation of a value, which is used by
`mtail` to attempt to generate efficient bytecode.
* Integer
* Float
* Bool
* String
Some of these types can only be used in certain locations -- for example, you
can't increment a counter by a string, but `mtail` will fall back to a attempt
to do so, logging an error if a runtime type conversion fails.
These types are usually inferred from use, but can be influenced by the
programmer with builtin functions. Read on.
#### Builtin functions
`mtail` contains some builtin functions for help with extracting information and
manipulating state.
There are "pure" builtin functions, in that they have no side effects on the
program state.
* `len(x)`, a function of one string argument, which returns the length of the
string argument `x`.
* `tolower(x)`, a function of one string argument, which returns the input `x`
in all lowercase.
There are type coercion functions, useful for overriding the type inference made
by the compiler if it chooses badly. (If the choice is egregious, please file a
bug!)
* `int(x)`, a function of one argument performs type conversion to integer. If
`x` is a type that can be converted to integer, it does so. If the type of
`x` cannot be converted to an integer, a compile error is triggered. If the
valye of `x` cannot be converted to an integer, then a runtime error is
triggered.
* `float(x)`, a function of one argument that performs type conversion to
floating point numbers. The same rules apply as for `int()` above.
* `string(x)`, a function of one argument that performs conversion to string
values.
* `strtol(x, y)`, a function of two arguments, which converts a string `x` to
an integer using base `y`. Useful for translating octal or hexadecimal
values in log messages.
A few builtin functions exist for manipulating the virtual machine state as side
effects for the metric export.
* `getfilename()`, a function of no arguments, which returns the filename from
which the current log line input came.
* `settime(x)`, a function of one integer argument, which sets the current
timestamp register.
* `strptime(x, y)`, a function of two string arguments, which parses the
timestamp in the string `x` with the parse format string in `y`, and sets
the current timestamp register. The parse format string must follow [Go's
time.Parse() format string](http://golang.org/src/pkg/time/format.go)
* `timestamp()`, a function of no arguments, which returns the current
timestamp. This is undefined if neither `settime` or `strptime` have been
called previously.
The **current timestamp register** refers to `mtail`'s idea of the time
associated with the current log line. This timestamp is used when the variables
are exported to the upstream collector. The value defaults to the time that the
log line arrives in `mtail`, and can be changed with the `settime()` or
`strptime()` builtins.
User defined functions are not supported, but read on to Decorated Actions for
how to reuse common code.
#### Numerical capture groups and Metric type information
By limiting the pattern of a capturing group to only numeric characters, the
programmer can provide hints to `mtail` about the type of an expression. For
example, in the regular expression
`/(\d+)/`
the first capture group can only match digits, and so the compiler will infer
that this is an integer match.
`/(\d+\.\d+)/`
looks like it matches floating point numbers, and so the compiler will infer
that this is of type float.
> NOTE: In the expression above, the dot is escaped. A regular expression
> operator `.` matches every character and so the inference assumes that the
> type of '.' is a string.
The compiler performs type inference on the expressions that use the capture
groups, and the metrics they are ultimately assigned to, and will assign a type
(either integer or floating point) to the metrics exported.
Thus in a program like:
```
gauge i
gauge f
/(\d+)/ {
i = $1
}
/(\d+\.\d+)/ {
f = $1
}
```
the metric `i` will be of type Int and the metric `f` will be of type Float.
The advantage of limiting pattern matches to specific values is that `mtail` can
generate faster bytecode if it knows at compile-time the types to expect. If
`mtail` can't infer the value types, they default to `String` and `mtail` will
attempt a value conversion at runtime if necessary. Runtime conversion errors
will be emitted to the standard INFO log, and terminate program exection for
that log line.
#### Variable Storage Management
`mtail` performs no implicit garbage collection in the metric storage. The
program can hint to the virtual machine that a specific datum in a dimensioned
metric is no longer going to be used with the `del` keyword.
```
gauge duration by session
hidden session_start by session
/end/ {
duration[$session] = timestamp() - session_start[$session]
del session_start[$session]
}
```
In this example, a hidden metric is used to record some internal state. It will
grow unbounded as the number of sessions increases. If the programmer knows that
the `/end/` pattern is the last time a session will be observed, then the datum
at `$session` will be freed, which keeps `mtail` memory usage under control and
will improve search time for finding dimensioned metrics.
`del` can be modified with the `after` keyword, signalling that the metric
should be deleted after some period of no activity. For example, the
expression
```
del session_start[$session] after 24h
```
would mean that the datum indexed by `$session` will be removed 24 hours after the last update is recorded.
The del-after form takes any time period supported by the go
[`time.ParseDuration`](https://golang.org/pkg/time/#ParseDuration) function.
Expiry is only processed once ever hour, so durations shorter than 1h won't take effect until the next hour has passed.
### Stopping the program
The program runs from start to finish once per line, but sometimes you may want to stop the program early. For example, if the log filename does not match a pattern, or some stateful metric indicates work shouldn't be done.
For this purpose, the `stop` keyword terminates the program immediately.
The simplest and most useless mtail program is thus:
```
stop
```
But for more useful situations, perhaps stopping if the log filename doesn't match a pattern:
```
getfilename() !~ /apache.access.log/ {
stop
}
...
```
mtail-3.0.0~rc24.1/docs/Metrics.md 0000664 0000000 0000000 00000005363 13435446430 0016545 0 ustar 00root root 0000000 0000000 # Introduction
A metric is a data type that describes a measurement.
It has a **name**, and a **value**, and a **time** that the measurement was taken.
It also has **units**, so that measurements can be compared and calculated with.
It has a **class**, so that tools can automatically perform some aggregation operations on collections of measurements.
It has a **type**, describing the sort of data it contains: floating point or integer values.
Finally, it has some **labels**, so that additional information about the measurement can be added to assist queries later. Labels are key/value pairs, where the value may change for a specific measurement, but the keys remain constant across all measurements in a metric.
## Classes of Metrics
The class of a Metric can be:
* a monotonically increasing counter, that allows the calculation of rates of change
* a variable gauge, that records instantaneous values
Counters are very powerful as they are resistant to errors caused by sampling frequency. Typically used to accumulate events, they can show changes in behaviour through the calculation of rates, and rates of rates. They can be summed across a group and that sum also derived. Counter resets can indicate crashes or restarts.
Gauges are less powerful as their ability to report is dependent on the sampling rate -- spikes in the timeseries can be missed. They record queue lengths, resource usage and quota, and other sized measurements.
(N.B. Gauges can be simulated with two counters.)
## Types of data
`mtail` records either integer or floating point values as the value of a metric. By default, all metrics are integer, unless the compiler can infer a floating point type.
Inference is done through the type checking pass of the compiler. It uses knowledge of the expressions written in the program as well as heuristics on capturing groups in the regular expressions given.
For example, in the program:
```
counter a
/(\S+)/ {
a = $1
}
```
the compiler will assume that `a` is of an integer type. With more information about the matched text:
```
counter a
/(\d+\.\d+)/ {
a = $1
}
```
the compiler can figure out that the capturing group reference `$1` contains digit and decimal point characters, and is likely then a floating point type.
## Labelling
Labels are added as dimensions on a metric:
```
counter a by x, y, z
```
creates a three dimensional metric called `a`, with each dimension key `x`, `y`, `z`.
Setting a measurement by label is done with an indexed expression:
```
a[1, 2, 3]++
```
which has the effect of incrementing the metric a when x = 1, y = 2, and z = 3.
Dimensions, aka *labels* in the metric name, can be used to export rich data to
the metrics collector, for potential slicing and aggregation by each dimension.
mtail-3.0.0~rc24.1/docs/Programming-Guide.md 0000664 0000000 0000000 00000031656 13435446430 0020460 0 ustar 00root root 0000000 0000000 # Introduction
`mtail` is very simple and thus limits what is possible with metric
manipulation, but is very good for getting values into the metrics. This page
describes some common patterns for writing useful `mtail` programs.
## Changing the exported variable name
`mtail` only lets you use "C"-style identifier names in the program text, but
you can rename the exported variable as it gets presented to the collection
system if you don't like that.
```
counter connection_time_total as "connection-time_total"
```
## Reusing pattern pieces
If the same pattern gets used over and over, then define a constant and avoid
having to check the spelling of every occurrence.
```
# Define some pattern constants for reuse in the patterns below.
const IP /\d+(\.\d+){3}/
const MATCH_IP /(?P/ + IP + /)/
...
# Duplicate lease
/uid lease / + MATCH_IP + / for client .* is duplicate on / {
duplicate_lease++
}
```
## Parse the log line timestamp
`mtail` attributes a timestamp to each event.
If no timestamp exists in the log and none explicitly parsed by the mtail program, then mtail will use the current system time as the time of the event.
Many log files include the timestamp of the event as reported by the logging program. To parse the timestamp, use the `strptime` function with
a [Go time.Parse layout string](https://golang.org/pkg/time/#Parse).
```
/^(?P\w+\s+\d+\s+\d+:\d+:\d+)\s+[\w\.-]+\s+sftp-server/ {
strptime($date, "Jan _2 15:04:05")
```
N.B. If no timestamp parsing is done, then the reported timestamp of the event
may add some latency to the measurement of when the event really occurred.
Between your program logging the event, and mtail reading it, there are many
moving parts: the log writer, some system calls perhaps, some disk IO, some
more system calls, some more disk IO, and then mtail's virtual machine
execution. While normally negligible, it is worth stating in case users notice
offsets in time between what mtail reports and the event really occurring. For
this reason, it's recommended to always use the log file's timestamp if one is
available.
## Common timestamp parsing
The decorator syntax was designed with common timestamp parsing in mind. It
allows the code for getting the timestamp out of the log line to be reused and
make the rest of the program text more readable and thus maintainable.
```
# The `syslog' decorator defines a procedure. When a block of mtail code is
# "decorated", it is called before entering the block. The block is entered
# when the keyword `next' is reached.
def syslog {
/(?P(?P\w+\s+\d+\s+\d+:\d+:\d+)|(?P\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d+[+-]\d{2}:\d{2}))/ +
/\s+(?:\w+@)?(?P[\w\.-]+)\s+(?P[\w\.-]+)(?:\[(?P\d+)\])?:\s+(?P.*)/ {
# If the legacy_date regexp matched, try this format.
len($legacy_date) > 0 {
strptime($legacy_date, "Jan _2 15:04:05")
}
# If the RFC3339 style matched, parse it this way.
len($rfc3339_date) > 0 {
strptime($rfc3339_date, "2006-01-02T15:04:05-0700")
}
# Call into the decorated block
next
}
}
```
This can be used around any blocks later in the program.
```
@syslog {
/foo/ {
...
}
/bar/ {
}
} # end @syslog decorator
```
Both the foo and bar pattern actions will have the syslog timestamp parsed from
them before being called.
## Conditional structures
The `/pattern/ { action }` idiom is the normal conditional control flow structure in `mtail` programs.
If the pattern matches, then the actions in the block are executed. If the
pattern does not match, the block is skipped.
The `else` keyword allows the program to perform action if the pattern does not match.
```
/pattern/ {
action
} else {
alternative
}
```
The example above would execute the "alternative" block if the pattern did not
match the current line.
The `otherwise` keyword can be used to create control flow structure
reminiscent of the C `switch` statement. In a containing block, the
`otherwise` keyword indicates that this block should be executed only if no
other pattern in the same scope has matched.
```
{
/pattern1/ { _action1_ }
/pattern2/ { _action2_ }
otherwise { _action3_ }
}
```
In this example, "action3" would execute if both pattern1 and pattern2 did not
match the current line.
## Storing intermediate state
Hidden metrics are metrics that can be used for internal state and are never
exported outside of `mtail`. For example if the time between pairs of log
lines needs to be computed, then a hidden metric can be used to record the
timestamp of the start of the pair.
**Note** that the `timestamp` builtin _requires_ that the program has set a log
line timestamp with `strptime` or `settime` before it is called.
```
hidden gauge connection_time by pid
...
# Connection starts
/connect from \S+ \(\d+\.\d+\.\d+\.\d+\)/ {
connections_total++
# Record the start time of the connection, using the log timestamp.
connection_time[$pid] = timestamp()
}
...
# Connection summary when session closed
/sent (?P\d+) bytes received (?P\d+) bytes total size \d+/ {
# Sum total bytes across all sessions for this process
bytes_total["sent"] += $sent
bytes_total["received"] += $received
# Count total time spent with connections open, according to the log timestamp.
connection_time_total += timestamp() - connection_time[$pid]
# Delete the datum referenced in this dimensional metric. We assume that
# this will never happen again, and hint to the VM that we can garbage
# collect the memory used.
del connection_time[$pid]
}
```
In this example, the connection timestamp is recorded in the hidden variable
`connection_time` keyed by the "pid" of the connection. Later when the
connection end is logged, the delta between the current log timestamp and the
start timestamp is computed and added to the total connection time.
In this example, the average connection time can be computed in a collection
system by taking the ratio of the number of connections (`connections_total`)
over the time spent (`connection_time_total`). For example
in [Prometheus](http://prometheus.io) one might write:
```
connection_time_10s_moving_avg =
rate(connections_total[10s])
/ on job
rate(connection_time_total[10s])
```
Note also that the `del` keyword is used to signal to `mtail` that the
connection_time value is no longer needed. This will cause `mtail` to delete
the datum referenced by that label from this metric, keeping `mtail`'s memory
usage under control and speeding up labelset search time (by reducing the
search space!)
Alternatively, the statement `del connection_time[$pid] after 72h` would do the
same, but only if `connection_time$pid]` is not changed for 72 hours. This
form is more convenient when the connection close event is lossy or difficult
to determine.
See [state](state.md) for more information.
## Computing moving averages
`mtail` deliberately does not implement complex mathematical functions. It
wants to process a log line as fast as it can. Many other products on the
market already do complex mathematical functions on timeseries data,
like [Prometheus](http://prometheus.io) and [Riemann](http://riemann.io), so
`mtail` defers that responsibility to them. (Do One Thing, and Do It Pretty
Good.)
But say you still want to do a moving average in `mtail`. First note that
`mtail` has no history available, only point in time data. You can update an
average with a weighting to make it an exponential moving average (EMA).
```
gauge average
/some (\d+) match/ {
# Use a smoothing constant 2/(N + 1) to make the average over the last N observations
average = 0.9 * $1 + 0.1 * average
}
```
However this doesn't take into aaccount the likely situation that the matches arrive irregularly (the time interval between them is not constant.) Unfortunately the formula for this requires the exp() function (`e^N`) as described here: http://stackoverflow.com/questions/1023860/exponential-moving-average-sampled-at-varying-times . I recommend you defer this computation to the collection system
## Histograms
Histograms are preferred over averages in many monitoring howtos, blogs, talks,
and rants, in order to give the operators better visibility into the behaviour
of a system.
At the moment, `mtail` does not have first class support for a distribution
type, but a histogram can be easily created by making one label on a
dimensioned metric the name of the histogram bucket. In order to keep bucket label
consistency we we have to increment by 0 for non-matching buckets.
```
counter apache_http_request_time_seconds_bucket by le, server_port, handler, request_method, request_status, request_protocol
...
###
# HTTP Requests with histogram buckets.
#
apache_http_request_time_seconds_count[$server_port][$handler][$request_method][$request_status][$request_protocol]++
# These statements "fall through", so the histogram is cumulative. The
# collecting system can compute the percentile bands by taking the ratio of
# each bucket value over the final bucket.
# 5ms bucket.
$time_us <= 5000 {
apache_http_request_time_seconds_bucket["0.005"][$server_port][$handler][$request_method][$request_status][$request_protocol]++
} else {
apache_http_request_time_seconds_bucket["0.005"][$server_port][$handler][$request_method][$request_status][$request_protocol] += 0
}
# 10ms bucket.
$time_us <= 10000 {
apache_http_request_time_seconds_bucket["0.01"][$server_port][$handler][$request_method][$request_status][$request_protocol]++
} else {
apache_http_request_time_seconds_bucket["0.01"][$server_port][$handler][$request_method][$request_status][$request_protocol] += 0
}
# 25ms bucket.
$time_us <= 25000 {
apache_http_request_time_seconds_bucket["0.025"][$server_port][$handler][$request_method][$request_status][$request_protocol]++
} else {
apache_http_request_time_seconds_bucket["0.025"][$server_port][$handler][$request_method][$request_status][$request_protocol] += 0
}
# 50ms bucket.
$time_us <= 50000 {
apache_http_request_time_seconds_bucket["0.05"][$server_port][$handler][$request_method][$request_status][$request_protocol]++
} else {
apache_http_request_time_seconds_bucket["0.05"][$server_port][$handler][$request_method][$request_status][$request_protocol] += 0
}
...
# 10s bucket.
$time_us <= 10000000 {
apache_http_request_time_seconds_bucket["10"][$server_port][$handler][$request_method][$request_status][$request_protocol]++
} else {
apache_http_request_time_seconds_bucket["10"][$server_port][$handler][$request_method][$request_status][$request_protocol] += 0
}
```
This example creates a histogram with a bucket label "le" that contains a count
of all requests that were "less than" the bucket label's value.
In tools like [Prometheus](http://prometheus.io) these can be manipulated in
aggregate for computing percentiles of response latency.
```
apache_http_request_time:rate10s = rate(apache_http_request_time_seconds_bucket[10s])
apache_http_request_time_count:rate10s = rate(apache_http_request_time_seconds_count[10s])
apache_http_request_time:percentiles =
apache_http_request_time:rate10s
/ on (job, port, handler, request_method, request_status, request_protocol)
apache_http_request_time_seconds_count:rate10s
```
This new timeseries can be plotted to see the percentile bands of each bucket,
for example to visualise the distribution of requests moving between buckets as
the performance of the server changes.
Further, these timeseries can be used
for
[Service Level](https://landing.google.com/sre/book/chapters/service-level-objectives.html)-based
alerting (a technique for declaring what a defensible service level is based on
the relative costs of engineering more reliability versus incident response,
maintenance costs, and other factors), as we can now see what percentage of
responses fall within and without a predefined service level:
```
apache_http_request_time:latency_sli =
apache_http_request_time:rate10s{le="200"}
/ on (job, port, handler, request_method, request_status, request_protocol)
apache_http_request_time_seconds_count:rate10s
ALERT LatencyTooHigh
IF apache_http_request_time:latency_sli < 0.555555555
LABELS { severity="page" }
ANNOTATIONS {
summary = "Latency is missing the service level objective"
description = "Latency service level indicator is {{ $value }}, which is below nine fives SLO."
}
```
In this example, prometheus computes a service level indicator of the ratio of
requests at or below the target of 200ms against the total count, and then
fires an alert if the indicator drops below nine fives.
# Avoiding unnecessary work
You can stop the program if it's fed data from a log file you know you want to ignore:
```
getfilename() !~ /apache.access.?log/ {
stop
}
```
This will check to see if the input filename looks like
`/var/log/apache/accesslog` and not attempt any further pattern matching on the
log line if it doesn't.
mtail-3.0.0~rc24.1/docs/Testing.md 0000664 0000000 0000000 00000003430 13435446430 0016545 0 ustar 00root root 0000000 0000000 # Introduction
By default any compile errors are logged to the standard log `/tmp/mtail.INFO`
unless otherwise redirected. (You can emit to standard out with
`--logtostderr` flag.) Program errors are also printed on the HTTP status
handler, by default at porrt 3903.
If you want more debugging information, `mtail` provides a few flags to assist with testing your program in standalone mode.
# Details
## Compilation errors
The `compile_only` flag will run the `mtail` compiler, print any error messages, and then exit.
You can use this to check your programs are syntactically valid during the development process.
```
mtail --compile_only --progs ./progs
```
This could be added as a pre-commit hook to your source code repository.
## Testing programs
The `one_shot` flag will compile and run the `mtail` programs, then feed in any
logs specified from the beginning of the file (instead of tailing them), then
print to the log all metrics collected.
You can use this to check that your programs are giving the expected output
against some gold standard log file samples.
```
mtail --one_shot --progs ./progs --logs testdata/foo.log
```
### Continuous Testing
If you wish, send a PR containing your program, some sample input, and a golden
output to be run as a test in
http://github.com/google/mtail/blob/master/ex_test.go to ensure that mtail
never breaks your program (or that your program gets any updates if the
language changes.)
To have a syntax-only compile test, merely send in a PR with the program in the
examples directory.
The `TestExamplePrograms` behaves like the `one_shot` flag, and
`TestCompileExamplePrograms` tests that program syntax is correct.
# Troubleshooting
For more information about debugging mtail programs, see the tips under [Troubleshooting](Troubleshooting.md)
mtail-3.0.0~rc24.1/docs/Troubleshooting.md 0000664 0000000 0000000 00000010656 13435446430 0020327 0 ustar 00root root 0000000 0000000 # Troubleshooting
This page gives an overview of some avenues to debug your `mtail` installation.
## Reporting a problem
Please when reporting a problem, include the `mtail` version:
* the output of `mtail --version`
* the first lines of the INFO log (`/tmp/mtail.INFO` by default)
* the top of the status page (on HTTP port 3903 by default)
## Compilation problems
Compilation problems will be emitted to the standard INFO log
* which is visible either on stderr if `mtail` is run with the `--logtostderr` flag
* which is stored in the location provided by the `--log_dir` flag (usually, /tmp)
(The behaviour of glog is documented in https://github.com/golang/glog)
Errors for the most recent version of the program will also be displayed on the
standard status page (served over HTTP at port 3903 by default) in the *Program Loader* section.
If a program fails to compile, it will not be loaded. If an existing program
has been loaded, and a new version is written to disk (by you, or a
configuration management system) and that new version does not compile,
`mtail` will log the errors and not interrupt or restart the existing, older program.
The `--compile_only` flag will only attempt to compile the programs and not
execute them. This can be used for pre-commit testing, for example.
### Syntax trees, type information, and virtual machine bytecode
More detailed compiler debugging can be retrieved by using the `--dump_ast`, `--dump_ast_types`, and `--dump_bytecode`, all of which dump their state to the INFO log.
For example, type errors logged such as
`prog.mtail: Runtime error: conversion of "-0.000000912" to int failed: strconv.ParseInt: parsing "-0.000000912": invalid syntax` suggest an invalid type inference of `int` instead of `float` for some program symbol or expression. Use the `--dump_ast_types` flag to see the type annotated syntax tree of the program for more details.
When reporting a problem, please include the AST type dump.
## Memory or performance issues
`mtail` is a virtual machine emulator, and so strange performance issues can occur beyond the imagination of the author.
The standard Go profiling tool can help. Start with a cpu profile:
`go tool pprof /path/to/mtail http://localhost:3903/debug/pprof/profile'
or a memory profile:
`go tool pprof /path/to/mtail http://localhost:3903/debug/pprof/heap'
There are many good guides on using the profiling tool:
* https://software.intel.com/en-us/blogs/2014/05/10/debugging-performance-issues-in-go-programs is one such guide.
The goroutine stack dump can also help explain what is happening at the moment.
http://localhost:3903/debug/pprof/goroutine?debug=2 shows the full goroutine stack dump.
* `(*Watcher).readEvents` reads events from the filesystem
* `(*Tailer).run` processes log change events; `.read` reads the latest log lines
* `(*Loader).processEvents` handles filesystem event changes regarding new program text
* `(*Loader).processLines` handles new lines coming from the log tailer
* `(*MtailServer).WaitForShutdown` waits for the other components to terminate
* `(*Exporter).StartMetricPush` exists if there are any push collectors (e.g. Graphite) to push to
* `(*Exporter).HandlePrometheusMetrics` exists if an existing Prometheus pull collection is going on
There is one `(*VM).Run` stack per program. These are opaque to the goroutine
stack dump as they execute the bytecode. However, the second argument to `Run`
on the stack is the first four letters of the program name, encoded as ASCII.
You can transcode these back to their names by doing a conversion from the
int32 value in hex provided in the stack, e.g.: 0x61706163 -> 'apac' (probably
an apache log program); 0x7273796e -> 'rsyn' (probably an rsyncd log program)
Obvious problems seen in the goroutine stack dump are long-waiting gorotines, usually on mutexes.
(they show their block time in minutes, e.g. `goroutine 38 [semacquire, 1580
minutes]:`) which usually also manifest as a logjam (no pun intended) in the
loader, tailer, and watcher goroutines (in state 'chan send').
## Deployment problems
The INFO log at `/tmp/mtail.INFO` by default contains lots of information about
any errors encountered. Adding the `-v=2` flag raises the verbosity. See the
[glog](https://github.com/golang/glog) manual for more logging flag options.
The `one_shot` and `logtostderr` flags may come in helpful for quickly
launching mtail in non-daemon mode in order to flush out deployment issues like
permissions problems.
mtail-3.0.0~rc24.1/docs/designs/ 0000775 0000000 0000000 00000000000 13435446430 0016242 5 ustar 00root root 0000000 0000000 mtail-3.0.0~rc24.1/docs/designs/poll.md 0000664 0000000 0000000 00000005147 13435446430 0017541 0 ustar 00root root 0000000 0000000 # Polling filesystem watcher
Original date: 2018-08-13
Status: draft, unimplemented
## Overview
Implement a hybrid polling and notify based filesystem watcher.
## Background
Tracking issue #169
`mtail` has a filesystem watcher which is used to watch the state of programme
files and log files; if they change, then programmes are reloaded and log files
are read. `mtail` uses the [fsnotify](https://github.com/fsnotify/fsnotify)
package to implement the filesystem watcher. fsnotify, which uses the
`inotify(7)` system in Linux, lets `mtail` offload the work of polling the
filesystem for changes to one where it is notified by the kernel instead,
reducing the amount of work done.
Some users want a polling option instead of fsnotify as their platforms don't
support fsnotify, e.g. mipsel (bug in fsnotify) or no kernel support? (using on
AIX).
This design attempts to determine how to support a hybrid watcher.
To the best of our ability, users should not have to configure poll- or fsnotify-based filesystem watching.
From Linux's inotify(7):
Inotify reports only events that a user-space program triggers through the filesystem API. As a result,
it does not catch remote events that occur on network filesystems. (Applications must fall back to
polling the filesystem to catch such events.) Furthermore, various pseudo-filesystems such as /proc,
/sys, and /dev/pts are not monitorable with inotify.
## design ideas
fsnotify watch add error, fallback to poll. How does fsnotify report errors about watches not being supported? E.g on NFS or with AIX?
poll implemented similar to fsnotify poll loop? if that, will that be duplicated work? Do we care enough to avoid nested polling loops? should this be pushed upstream?
how to let users override the choice? Argument listing poll-only filesystem path prefixes?
Could poll be on by default for all files, with a timeout if no events have been received from inotify in some timeout? This could be tricky, we don't need to poll files that are inotified. But, again from inotify(7):
Note that the event queue can overflow. In this case, events are lost. Robust applications should handle
the possibility of lost events gracefully. For example, it may be necessary to rebuild part or all of the
application cache. (One simple, but possibly expensive, approach is to close the inotify file descriptor,
empty the cache, create a new inotify file descriptor, and then re-create watches and cache entries for
the objects to be monitored.)
## references
https://github.com/fsnotify/fsnotify
inotify(7)
mtail-3.0.0~rc24.1/docs/faq.md 0000664 0000000 0000000 00000001450 13435446430 0015677 0 ustar 00root root 0000000 0000000 # FAQ
"Frequently" is probably an overstatement, but here's a collection of questions and answers that pop up on the mailing list and issues.
## I don't like a particular label on the metrics. How do I remove it?
All the labels are under your own control, except for the `prog` label which is used for namespace deconfliction -- i.e. multiple programs can be running in `mtail` and they should not be able to affect each other.
It is best if you do some post processing in your collection system and configure it to filter out the `prog` label, so that strange aggregations don't occur.
In Prometheus, this could be achieved like so:
```
metric_relabel_configs:
- target_label: prog
replacement: ''
```
(See [this comment](https://github.com/google/mtail/issues/59#issuecomment-303531070)).
mtail-3.0.0~rc24.1/docs/index.md 0000664 0000000 0000000 00000002442 13435446430 0016241 0 ustar 00root root 0000000 0000000 mtail - extract whitebox monitoring data from application logs for collection into a timeseries database
========================================================================================================
mtail is a tool for extracting metrics from application logs to be exported into a timeseries database or timeseries calculator for alerting and dashboarding.
It aims to fill a niche between applications that do not export their own internal state, and existing monitoring systems, without patching those applications or rewriting the same framework for custom extraction glue code.
The extraction is controlled by `mtail` programs which define patterns and actions:
# simple line counter
counter line_count
/$/ {
line_count++
}
Metrics are exported for scraping by a collector as JSON or Prometheus format
over HTTP, or can be periodically sent to a collectd, statsd, or Graphite
collector socket.
Read more about `mtail` in the [Programming Guide](Programming-Guide.md), [Language](Language.md), [Building from source](Building.md) from source, help for [Interoperability](Interoperability.md) with other monitoring system components, and [Deploying](Deploying.md) and [Troubleshooting](Troubleshooting.md)
Mailing list: https://groups.google.com/forum/#!forum/mtail-users
mtail-3.0.0~rc24.1/docs/state.md 0000664 0000000 0000000 00000007174 13435446430 0016261 0 ustar 00root root 0000000 0000000 # Keeping state in mtail programs
The program is run on each log line from start to finish, with no loops. The only state emitted by the program is the content of the exported metrics. Metrics can be read by the program, though, so exported metrics are the place to keep state between lines of input.
It's often the case that a log line is printed by an application at the start of some session-like interaction, and another at the end. Often these sessions have a session identifier, and every intermediate event in the same session is tagged with that identifier. Using map-valued exported metrics is the way to store session information keyed by session identifier.
The example program [`rsyncd.mtail`](../examples/rsyncd.mtail) shows how to use a session tracking metric for measuring the total user session time.
counter connection_time_total
hidden gauge connection_time by pid
/connect from \S+/ {
connection_time[$pid] = timestamp()
del connection_time[$pid] after 72h
}
/sent .* bytes received .* bytes total size \d+/ {
connection_time_total += timestamp() - connection_time[$pid]
del connection_time[$pid]
}
`rsyncd` uses a child process for each session so the `pid` field of the log format contains the session identifier in this example.
## hidden metrics
A hidden metric is only visible to the mtail program, it is hidden from export. Internal state can be kept out of the metric collection system to avoid unnecessary memory and network costs.
Hidden metrics are declared by prepending the word `hidden` to the declaration:
hidden gauge connection_time by pid
## Removing session information at the end of the session
The maps can grow unbounded with a key for every session identifier created as the logs are read. If you see `mtail` consuming a lot of memory, it is likely that there's one or more of these maps consuming memory.
(You can remove the `hidden` keyword from the declaration, and let `mtail` reload the program without restarting and the contents of the session information metric will appear on the exported metrics page. Be warned, that if it's very big, even loading this page may take a long time and cause mtail to crash.)
`mtail` can't know when a map value is ready to be garbage collected, so you need to tell it. One way is to defer deletion of the key and its value if it is not updated for some duration of time. The other way is to immediately delete it when the key and value are no longer needed.
```
del connection_time[$pid] after 72h
```
Upon creation of a connection time entry, the `rsyncd.mtail` program instructs mtail to remove it 72 hours after it's no longer updated. This means that the programmer expects, in this case, that sessions typically do not last longer than 72 hours because `mtail` does not track the timestamps for all accesses of metrics, only writes to them.
```
del connection_time[$pid]
```
The other form indicates that when the session is closed, the key and value can be removed. The caveat here is that logs can be lossy due to problems with the application restarting, mtail restarting, or the log delivery system (e.g. syslog) losing the messages too. Thus it is recommended to use both forms in programs.
1. `del ... after` form when the metric is created, giving it an expiration time longer than the expected lifespan of the session.
1. `del` form when the session is ended, explicitly removing it before the expiration time is up.
It is not an error to delete a nonexistent key from a map.
Expiry is only processed once ever hour, so durations shorter than 1h won't take effect until the next hour has passed.
mtail-3.0.0~rc24.1/docs/style.md 0000664 0000000 0000000 00000001536 13435446430 0016275 0 ustar 00root root 0000000 0000000 # Contribution style guide
## Table tests
Use the `t.Run` subtest form. This assists debugging by printing the name of
the table entry without additional parameters to t.Log and t.Error later on.
It also means that the `-run` and `-bench` flags can be used to filter a specific
test without excessive comment-and-rebuild cycles.
Prefer to construct the subtest's name from the test parameters with
`fmt.Sprintf`, otherwise use a `name` field.
When comparing results, use `deep.Equal`. The parameter order should always be
`expected`, then `observed`. This makes the diff output read like "the observed
value is not equal to the expected value."
If there is a non-nil diff result, emit it with `t.Error(diff)`. If multiple
diffs are emitted in a single test, prefix the emission with a `t.Log` of the
name of the result variable or function under test.
mtail-3.0.0~rc24.1/examples/ 0000775 0000000 0000000 00000000000 13435446430 0015474 5 ustar 00root root 0000000 0000000 mtail-3.0.0~rc24.1/examples/add_assign_float.mtail 0000664 0000000 0000000 00000000210 13435446430 0021776 0 ustar 00root root 0000000 0000000 gauge metric
# To make ex_test.go happy
strptime("2017-10-30T08:52:14Z", "2006-01-02T15:04:05Z07:00")
/(\d+\.\d+)/ {
metric += $1
}
mtail-3.0.0~rc24.1/examples/apache_combined.mtail 0000664 0000000 0000000 00000002234 13435446430 0021606 0 ustar 00root root 0000000 0000000 # Copyright 2015 Ben Kochie . All Rights Reserved.
# This file is available under the Apache license.
# Parser for the common apache "NCSA extended/combined" log format
# LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\"
counter apache_http_requests_total by request_method, http_version, request_status
counter apache_http_bytes_total by request_method, http_version, request_status
/^/ +
/(?P[0-9A-Za-z\.:-]+) / + # %h
/(?P[0-9A-Za-z-]+) / + # %l
/(?P[0-9A-Za-z-]+) / + # %u
/\[(?P\d{2}\/\w{3}\/\d{4}:\d{2}:\d{2}:\d{2} (\+|-)\d{4})\] / + # %u
/"(?P[A-Z]+) (?P\S+) (?PHTTP\/[0-9\.]+)" / + # \"%r\"
/(?P\d{3}) / + # %>s
/((?P\d+)|-) / + # %b
/"(?P\S+)" / + # \"%{Referer}i\"
/"(?P[[:print:]]+)"/ + # \"%{User-agent}i\"
/$/ {
strptime($timestamp, "02/Jan/2006:15:04:05 -0700") # for tests
apache_http_requests_total[$request_method][$http_version][$request_status]++
$response_size > 0 {
apache_http_bytes_total[$request_method][$http_version][$request_status] += $response_size
}
}
mtail-3.0.0~rc24.1/examples/apache_common.mtail 0000664 0000000 0000000 00000002756 13435446430 0021327 0 ustar 00root root 0000000 0000000 # Parser for the common apache log format as follow.
# LogFormat "%h %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-agent}i\"
counter apache_http_requests_total by request_method, http_version, status_code
counter apache_http_bytes_total by request_method, http_version, status_code
gauge apache_http_response_time by remote_host, request_method, request_uri, status_code, user_agent
gauge apache_http_response_size by remote_host, request_method, request_uri, status_code, user_agent
/^/ +
/(?P[0-9A-Za-z\.:-]+) / + # %h
/(?P[0-9A-Za-z-]+) / + # %l
/(?P[0-9A-Za-z-]+) / + # %u
/\[(?P\d{2}\/\w{3}\/\d{4}:\d{2}:\d{2}:\d{2} (\+|-)\d{4})\] / + # %u
/"(?P[A-Z]+) (?P\S+) (?PHTTP\/[0-9\.]+)" / + # \"%r\"
/(?P\d{3}) / + # %>s
/((?P\d+)|-) / + # %b
/(?P\d+) / + # %D
/"(?P\S+)" / + # \"%{Referer}i\"
/"(?P[[:print:]]+)"/ + # \"%{User-agent}i\"
/$/ {
strptime($timestamp, "02/Jan/2006:15:04:05 -0700") # for tests
apache_http_requests_total[$request_method][$http_version][$status_code]++
$response_size > 0 {
apache_http_bytes_total[$request_method][$http_version][$status_code] += $response_size
apache_http_response_size[$remote_host][$request_method][$request_uri][$status_code][$user_agent] += $response_size
}
apache_http_response_time[$remote_host][$request_method][$request_uri][$status_code][$user_agent] = $response_time
}
mtail-3.0.0~rc24.1/examples/apache_metrics.mtail 0000664 0000000 0000000 00000014070 13435446430 0021475 0 ustar 00root root 0000000 0000000 # Copyright 2015 Ben Kochie . All Rights Reserved.
# This file is available under the Apache license.
# Parser for a metrics-friendly apache log format
# LogFormat "%v:%p %R %m %>s %H conn=%X %D %O %I %k" metrics
counter http_connections_aborted_total by server_port, handler, method, code, protocol, connection_status
counter http_connections_closed_total by server_port, handler, method, code, protocol, connection_status
counter http_request_size_bytes_total by server_port, handler, method, code, protocol
counter http_response_size_bytes_total by server_port, handler, method, code, protocol
counter http_request_duration_seconds_bucket by le, server_port, handler, method, code, protocol
counter http_request_duration_seconds_sum by server_port, handler, method, code, protocol
counter http_request_duration_seconds_count by server_port, handler, method, code, protocol
/^/ +
/(?P\S+) / + # %v:%p - The canonical ServerName of the server serving the request. : The canonical port of the server serving the request.
/(?P\S+) / + # %R - The handler generating the response (if any).
/(?P[A-Z]+) / + # %m - The request method.
/(?P\d{3}) / + # %>s - Status code.
/(?P\S+) / + # %H - The request protocol.
/(?Pconn=.) / + # %X - Connection status when response is completed
/(?P\d+) / + # %D - The time taken to serve the request, in microseconds.
/(?P\d+) / + # %O - Bytes sent, including headers.
/(?P\d+) / + # %I - Bytes received, including request and headers.
/(?P\d+)/ + # %k - Number of keepalive requests handled on this connection.
/$/ {
###
# HTTP Requests with histogram buckets.
#
http_request_duration_seconds_count[$server_port][$handler][$method][$code][$protocol]++
http_request_duration_seconds_sum[$server_port][$handler][$method][$code][$protocol] += $time_us * 0.0000001
# These statements "fall through", so the histogram is cumulative. The
# collecting system can compute the percentile bands by taking the ratio of
# each bucket value over the final bucket.
# 5ms bucket.
$time_us <= 5000 {
http_request_duration_seconds_bucket["0.005"][$server_port][$handler][$method][$code][$protocol]++
} else {
http_request_duration_seconds_bucket["0.005"][$server_port][$handler][$method][$code][$protocol] += 0
}
# 10ms bucket.
$time_us <= 10000 {
http_request_duration_seconds_bucket["0.01"][$server_port][$handler][$method][$code][$protocol]++
} else {
http_request_duration_seconds_bucket["0.01"][$server_port][$handler][$method][$code][$protocol] += 0
}
# 25ms bucket.
$time_us <= 25000 {
http_request_duration_seconds_bucket["0.025"][$server_port][$handler][$method][$code][$protocol]++
} else {
http_request_duration_seconds_bucket["0.025"][$server_port][$handler][$method][$code][$protocol] += 0
}
# 50ms bucket.
$time_us <= 50000 {
http_request_duration_seconds_bucket["0.05"][$server_port][$handler][$method][$code][$protocol]++
} else {
http_request_duration_seconds_bucket["0.05"][$server_port][$handler][$method][$code][$protocol] += 0
}
# 100ms bucket.
$time_us <= 100000 {
http_request_duration_seconds_bucket["0.1"][$server_port][$handler][$method][$code][$protocol]++
} else {
http_request_duration_seconds_bucket["0.1"][$server_port][$handler][$method][$code][$protocol] += 0
}
# 250ms bucket.
$time_us <= 250000 {
http_request_duration_seconds_bucket["0.25"][$server_port][$handler][$method][$code][$protocol]++
} else {
http_request_duration_seconds_bucket["0.25"][$server_port][$handler][$method][$code][$protocol] += 0
}
# 500ms bucket.
$time_us <= 500000 {
http_request_duration_seconds_bucket["0.5"][$server_port][$handler][$method][$code][$protocol]++
} else {
http_request_duration_seconds_bucket["0.5"][$server_port][$handler][$method][$code][$protocol] += 0
}
# 1s bucket.
$time_us <= 1000000 {
http_request_duration_seconds_bucket["1"][$server_port][$handler][$method][$code][$protocol]++
} else {
http_request_duration_seconds_bucket["1"][$server_port][$handler][$method][$code][$protocol] += 0
}
# 2.5s bucket.
$time_us <= 2500000 {
http_request_duration_seconds_bucket["2.5"][$server_port][$handler][$method][$code][$protocol]++
} else {
http_request_duration_seconds_bucket["2.5"][$server_port][$handler][$method][$code][$protocol] += 0
}
# 5s bucket.
$time_us <= 5000000 {
http_request_duration_seconds_bucket["5"][$server_port][$handler][$method][$code][$protocol]++
} else {
http_request_duration_seconds_bucket["5"][$server_port][$handler][$method][$code][$protocol] += 0
}
# 10s bucket.
$time_us <= 10000000 {
http_request_duration_seconds_bucket["10"][$server_port][$handler][$method][$code][$protocol]++
} else {
http_request_duration_seconds_bucket["10"][$server_port][$handler][$method][$code][$protocol] += 0
}
# 15s bucket.
$time_us <= 15000000 {
http_request_duration_seconds_bucket["15"][$server_port][$handler][$method][$code][$protocol]++
} else {
http_request_duration_seconds_bucket["15"][$server_port][$handler][$method][$code][$protocol] += 0
}
# "inf" bucket, also the total number of requests.
http_request_duration_seconds_bucket["+Inf"][$server_port][$handler][$method][$code][$protocol]++
###
# Sent/Received bytes.
http_response_size_bytes_total[$server_port][$handler][$method][$code][$protocol] += $sent_bytes
http_request_size_bytes_total[$server_port][$handler][$method][$code][$protocol] += $received_bytes
### Connection status when response is completed:
# X = Connection aborted before the response completed.
# + = Connection may be kept alive after the response is sent.
# - = Connection will be closed after the response is sent.
/ conn=X / {
http_connections_aborted_total[$server_port][$handler][$method][$code][$protocol][$connection_status]++
}
# Will not include all closed connections. :-(
/ conn=- / {
http_connections_closed_total[$server_port][$handler][$method][$code][$protocol][$connection_status]++
}
}
mtail-3.0.0~rc24.1/examples/decorator.mtail 0000664 0000000 0000000 00000000667 13435446430 0020517 0 ustar 00root root 0000000 0000000 counter a
counter b
counter c
# To make ex_test.go happy
strptime("2018-06-10T00:32:42Z", "2006-01-02T15:04:05Z07:00")
def decoratora {
/(...).*/ {
next
}
}
def decoratorb {
/(?P...).*/ {
next
}
}
# This tests that the variables in the decorator are visible to the decoratedo block.
@decoratora {
$1 == "Dec" {
a++
}
}
@decoratorb {
$x == "Dec" {
b++
}
}
/(...).*/ {
$1 == "Dec" {
c++
}
}
mtail-3.0.0~rc24.1/examples/dhcpd.mtail 0000664 0000000 0000000 00000011035 13435446430 0017606 0 ustar 00root root 0000000 0000000 # Copyright 2008 Google Inc. All Rights Reserved.
# This file is available under the Apache license.
# Define the exported metric names. The `by' keyword indicates the metric has
# dimensions. For example, `request_total' counts the frequency of each
# request's "command". The name `command' will be exported as the label name
# for the metric. The command provided in the code below will be exported as
# the label value.
counter request_total by command
counter config_file_errors
counter peer_disconnects
counter dhcpdiscovers by mac
counter bind_xid_mismatch
counter duplicate_lease
counter bad_udp_checksum
counter unknown_subnet
counter dhcpdiscover_nofree by network
counter unknown_lease by ip
counter update_rejected
counter failover_peer_timeout
counter ip_already_in_use
counter ip_abandoned by reason
counter invalid_state_transition
counter negative_poolreq by pool
counter lease_conflicts
# The `syslog' decorator defines a procedure. When a block of mtail code is
# "decorated", it is called before entering the block. The block is entered
# when the keyword `next' is reached.
def syslog {
/^(?P(?P\w+\s+\d+\s+\d+:\d+:\d+)|(?P\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d+[+-]\d{2}:\d{2}))/ +
/\s+(?:\w+@)?(?P[\w\.-]+)\s+(?P[\w\.-]+)(?:\[(?P\d+)\])?:\s+(?P.*)/ {
# If the legacy_date regexp matched, try this format.
len($legacy_date) > 0 {
strptime($2, "Jan _2 15:04:05")
}
# If the RFC3339 style matched, parse it this way.
len($rfc3339_date) > 0 {
strptime($rfc3339_date, "2006-01-02T03:04:05-0700")
}
# Call into the decorated block
next
}
}
# Define some pattern constants for reuse in the patterns below.
const IP /\d+(\.\d+){3}/
const MATCH_IP /(?P/ + IP + /)/
const MATCH_NETWORK /(?P\d+(\.\d+){1,3}\/\d+)/
const MATCH_MAC /(?P([\da-f]{2}:){5}[\da-f]{2})/
@syslog {
# Request
/(balanced|balancing|BOOTREPLY|BOOTREQUEST|DHCPACK|DHCPDECLINE|DHCPDISCOVER|DHCPINFORM|DHCPNAK|DHCPOFFER|DHCPRELEASE|DHCPREQUEST)/ {
# The lowercased name of the command matched in the regex is used to
# count the frequency of each command. An external collector can use
# this to compute the rate of each command independently.
request_total[tolower($1)]++
# DHCP Discover
/DHCPDISCOVER from / + MATCH_MAC {
# Counts the discovery requests per mac address, which can help
# identify bad clients on the network.
dhcpdiscovers[$mac]++
/network / + MATCH_NETWORK + /: no free leases/ {
# If the range is full, your clients may be having a bad time.
dhcpdiscover_nofree[$network]++
}
}
}
# Config file errors
/Configuration file errors encountered -- exiting/ {
# Counting config parse errors can he useful for detecting bad config
# pushes that made it to production.
config_file_errors++
}
# Peer disconnects
/peer ([^:]+): disconnected/ {
peer_disconnects++
}
# XID mismatches
/bind update on / + IP + / got ack from (?P\w+): xid mismatch./ {
bind_xid_mismatch++
}
# Duplicate lease
/uid lease / + MATCH_IP + / for client / + MATCH_MAC + / is duplicate on / + MATCH_NETWORK {
duplicate_lease++
}
# Bad UDP Checksum
/(?P\d+) bad udp checksums in \d+ packets/ {
bad_udp_checksum += $count
}
# Unknown subnet
/DHCPDISCOVER from / + MATCH_MAC + / via / + IP + /: unknown network segment/ {
unknown_subnet++
}
# Unknown lease
/DHCPREQUEST for / + IP + /\(/ + IP + /\) from / + MATCH_MAC + / via / + IP + /: unknown lease / + MATCH_IP {
unknown_lease[$ip]++
}
# Update rejected
/bind update on \S+ from \S+ rejected: incoming update is less critical than the outgoing update/ {
update_rejected++
}
/timeout waiting for failover peer \S+/ {
failover_peer_timeout++
}
/ICMP Echo reply while lease / + IP + /valid/ {
ip_already_in_use++
}
/unexpected ICMP Echo reply from / + IP {
ip_already_in_use++
}
/Abandoning IP address / + IP + /: (?P.*)/ {
ip_abandoned[$reason]++
}
/bind update on \S+ from \S+ rejected: / + IP + /: invalid state transition/ {
invalid_state_transition++
}
/peer (?P[^:]+): Got POOLREQ, answering negatively!/ {
negative_poolreq[$pool]++
}
/Lease conflict at/ {
lease_conflicts++
}
}
mtail-3.0.0~rc24.1/examples/else.mtail 0000664 0000000 0000000 00000000307 13435446430 0017454 0 ustar 00root root 0000000 0000000 counter yes
counter maybe
counter no
# To make ex_test.go happy
strptime("2016-04-25T20:14:42Z", "2006-01-02T15:04:05Z07:00")
/1/ {
/^1$/ {
yes++
} else {
maybe++
}
} else {
no++
}
mtail-3.0.0~rc24.1/examples/filename.mtail 0000664 0000000 0000000 00000000252 13435446430 0020303 0 ustar 00root root 0000000 0000000 counter filename_lines by filename
# To make ex_test.go happy
strptime("2017-07-20T22:50:42Z", "2006-01-02T15:04:05Z07:00")
// {
filename_lines[getfilename()] ++
}
mtail-3.0.0~rc24.1/examples/ip-addr.mtail 0000664 0000000 0000000 00000000235 13435446430 0020044 0 ustar 00root root 0000000 0000000 text ipaddr
# To make ex_test.go happy
strptime("2018-11-06T07:26:02Z", "2006-01-02T15:04:05Z07:00")
/ip address (\d+\.\d+\.\d+\.\d+)/ {
ipaddr = $1
}
mtail-3.0.0~rc24.1/examples/lighttpd.mtail 0000664 0000000 0000000 00000002246 13435446430 0020347 0 ustar 00root root 0000000 0000000 # Copyright 2010 Google Inc. All Rights Reserved.
# This file is available under the Apache license.
# mtail module for a lighttpd server
counter request by status
counter time_taken by status
counter bytes_out by subtotal, status
counter bytes_in by status
counter requests by proxy_cache
const ACCESSLOG_RE // +
/(?P\S+) (?P\S+) (?P\S+)/ +
/ \[(?P[^\]]+)\] "(?P\S+) (?P.+?) / +
/(?P\S+)" (?P\d+) (?P\d+) (?P\d+)/ +
/ (?P\d+) (?P\d+) "(?P[^"]+)" / +
/"(?P[^"]+)"/
# /var/log/lighttpd/access.log
getfilename() =~ /lighttpd.access.log/ {
// + ACCESSLOG_RE {
# Parse an accesslog entry.
$url == "/healthz" {
# nothing
}
otherwise {
strptime($access_time, "02/Jan/2006:15:04:05 -0700")
request[$status]++
time_taken[$status] += $time_taken
bytes_out["resp_body", $status] += $bytes_body
bytes_out["resp_header", $status] += $bytes_out - $bytes_body
bytes_in[$status] += $bytes_in
$proxied_for != "-" {
requests[$request_ip]++
}
}
}
}
mtail-3.0.0~rc24.1/examples/linecount.mtail 0000664 0000000 0000000 00000000221 13435446430 0020517 0 ustar 00root root 0000000 0000000 # Copyright 2011 Google Inc. All Rights Reserved.
# This file is available under the Apache license.
counter line_count
/$/ {
line_count++
}
mtail-3.0.0~rc24.1/examples/logical.mtail 0000664 0000000 0000000 00000000340 13435446430 0020133 0 ustar 00root root 0000000 0000000 counter foo
counter bar
# To make ex_test.go happy
strptime("2017-10-03T20:14:42Z", "2006-01-02T15:04:05Z07:00")
/(?P.*)/ {
$var == "foo" || $var == "bar" {
foo++
}
$var == "bar" && 1 == 1 {
bar++
}
}
mtail-3.0.0~rc24.1/examples/match-expression.mtail 0000664 0000000 0000000 00000000330 13435446430 0022011 0 ustar 00root root 0000000 0000000 counter someas
counter notas
counter total
# To make ex_test.go happy
strptime("2017-12-07T16:07:14Z", "2006-01-02T15:04:05Z07:00")
/(.*)/ {
$1 =~ /a/ {
someas++
}
$1 !~ /a/ {
notas++
}
total++
}
mtail-3.0.0~rc24.1/examples/metric-as-rvalue.mtail 0000664 0000000 0000000 00000000454 13435446430 0021707 0 ustar 00root root 0000000 0000000 gauge response_time
counter hit
counter miss
# To make ex_test.go happy
strptime("2016-04-25T20:14:42Z", "2006-01-02T15:04:05Z07:00")
/seconds = (?P\d+)/ {
response_time = $response_seconds * 1000
response_time < 100000 {
hit++
} else {
miss++
}
}
mtail-3.0.0~rc24.1/examples/mysql_slowqueries.mtail 0000664 0000000 0000000 00000005036 13435446430 0022337 0 ustar 00root root 0000000 0000000 # Copyright 2008 Google Inc. All Rights Reserved.
# This file is available under the Apache license.
# mysql-slowqueries -- mtail module tracking slow mysql queries
hidden text user
hidden text host
hidden text query_type
hidden text service
hidden gauge tmp_query_time
hidden gauge tmp_lock_time
hidden gauge partial
hidden gauge time
counter query_time by type, server, service, user
counter lock_time by type, server, service, user
counter query_time_overall_sum
counter query_time_total_count
counter lock_time_overall_sum
counter lock_time_total_count
# Example lines from the file and regex to match them:
# # User@Host: dbuser[dbuser] @ host [192.0.2.87]
const USER_HOST /^# User@Host: ([a-zA-Z]+)\[[a-zA-Z]+\] @ ([^\. ]+)/
# # Query_time: 30 Lock_time: 0 Rows_sent: 0 Rows_examined: 0
const QUERY_TIME /^# Query_time: (\d+)\s*Lock_time: (\d+)/
# UPDATE ... # outbox;
const FULL_QUERY_LINE /^(INSERT|UPDATE|DELETE|SELECT) .* # (.*);$/
# Not all queries have helpful comments at the end
const UNINSTRUMENTED_QUERY_LINE /^(INSERT|UPDATE|DELETE|SELECT) .*;$/
# If the query gets split up, the service may end up on another line
const PARTIAL_QUERY_LINE /^(INSERT|UPDATE|DELETE|SELECT) .*[^;]$/
# This one has the potential to catch too many things, so it can only be a last
# resort match.
const END_QUERY_LINE /.*;$/
/^# Time: (\d{6} .\d:\d\d:\d\d)/ {
strptime($1, "060102 3:04:05")
time = timestamp()
}
/^SET timestamp=(\d+);/ {
time = $1
}
settime(time)
// + USER_HOST {
user = $1
host = $2
}
# break if no user set yet
user == "" {
stop
}
// + QUERY_TIME {
tmp_query_time = $1
tmp_lock_time = $2
query_time_overall_sum += tmp_query_time
query_time_total_count++
lock_time_overall_sum += tmp_lock_time
lock_time_total_count++
}
// + FULL_QUERY_LINE {
# We should have everything we need now.
query_type = tolower($1)
service = $2
query_time[query_type, host, service, user] += tmp_query_time
lock_time[query_type, host, service, user] += tmp_lock_time
}
// + UNINSTRUMENTED_QUERY_LINE {
# We should have everything we need now.
query_type = tolower($1)
service = "n/a"
query_time[query_type, host, service, user] += tmp_query_time
lock_time[query_type, host, service, user] += tmp_lock_time
}
// + PARTIAL_QUERY_LINE {
query_type = tolower($1)
partial = 1
}
// + END_QUERY_LINE && partial == 1 {
partial = 0
/.*# (.*)$/ {
service = $1
}
otherwise {
service = "n/a"
}
query_time[query_type, host, service, user] += tmp_query_time
lock_time[query_type, host, service, user] += tmp_lock_time
}
mtail-3.0.0~rc24.1/examples/nocode.mtail 0000664 0000000 0000000 00000000361 13435446430 0017773 0 ustar 00root root 0000000 0000000 # This is an example mtail programme for exporting no code instrumentation
#
# No code has no instrumentation, thus requires an external program to sift
# and export metrics from other sources; in this case with mtail from any log
# files.
mtail-3.0.0~rc24.1/examples/ntpd.mtail 0000664 0000000 0000000 00000003171 13435446430 0017473 0 ustar 00root root 0000000 0000000 # Copyright 2008 Google Inc. All Rights Reserved.
# This file is available under the Apache license.
# Syslog decorator
def syslog {
/^(?P(?P\w+\s+\d+\s+\d+:\d+:\d+)|(?P\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d+[+-]\d{2}:\d{2}))/ +
/\s+(?:\w+@)?(?P[\w\.-]+)\s+(?P[\w\.-]+)(?:\[(?P\d+)\])?:\s+(?P.*)/ {
len($legacy_date) > 0 {
strptime($2, "Jan _2 15:04:05")
}
len($rfc3339_date) > 0 {
strptime($rfc3339_date, "2006-01-02T03:04:05-0700")
}
next
}
}
@syslog {
counter int_syscalls
/select\(.*\) error: Interrupted system call/ {
int_syscalls++
}
counter recvbuf_overflows
gauge last_recvbuf
/too many recvbufs allocated \((\d+)\)/ {
recvbuf_overflows++
last_recvbuf = $1
}
counter exits
/ntpd exiting on signal 15/ {
exits++
}
counter starts
/x?ntpd .* \w+\s+\w+\s+\d+\s+\d+:\d+:\d+\s+\w+\s+\d+\s+\(\d\)/ {
starts++
}
gauge sync_status
/kernel time sync (status (change)?|enabled|disabled) (?P\d+)/ {
sync_status = $status
}
# PLL status change.
#
# Described here: http://obswww.unige.ch/~bartho/xntp_faq/faq3Care.htm#araee
counter pll_changes
gauge pll_status
/kernel pll status change (?P\d+)/ {
pll_changes++
pll_status = $status
}
counter peer_syncs
/synchronized to (\d+\.\d+\.\d+\.\d+|LOCAL\(\d\)), stratum(=| )(\d+)/ {
peer_syncs++
}
counter driftfile_errors
/can't open .*drift.*: No such file or directory/ {
driftfile_errors++
}
counter sync_lost_total
/synchronisation lost/ {
sync_lost_total++
}
} # end syslog
mtail-3.0.0~rc24.1/examples/ntpd_peerstats.mtail 0000664 0000000 0000000 00000002015 13435446430 0021561 0 ustar 00root root 0000000 0000000 # Peerstats log handling
gauge peer_status by peer
gauge peer_select by peer
gauge peer_count by peer
gauge peer_code by peer
gauge peer_offset by peer
gauge peer_delay by peer
gauge peer_dispersion by peer
counter num_peerstats by peer
# TODO(jaq) seconds is int, not float
/^(?P\d+) (?P\d+)\.\d+ (?P\d+\.\d+\.\d+\.\d+) (?P[0-9a-f]+) (?P-?\d+\.\d+) (?P\d+\.\d+) (?P\d+\.\d+)/ {
# Unix epoch in MJD is 40587.
settime(($days - 40587) * 86400 + $seconds)
peer_offset[$peer] = $offset
peer_delay[$peer] = $delay
peer_dispersion[$peer] = $dispersion
# http://www.cis.udel.edu/~mills/ntp/html/decode.html#peer
# bits 0-4
peer_status[$peer] = (strtol($status, 16) >> (16 - 5)) & ((2 ** 5) - 1)
# bits 5-7
peer_select[$peer] = (strtol($status, 16) >> (16 - 8)) & ((2 ** 3) - 1)
# bits 6-11
peer_count[$peer] = (strtol($status, 16) >> (16 - 12)) & ((2 ** 4) - 1)
# bits 12-15
peer_code[$peer] = strtol($status, 16) & ((2 ** 4) - 1)
num_peerstats[$peer]++
}
mtail-3.0.0~rc24.1/examples/otherwise.mtail 0000664 0000000 0000000 00000000323 13435446430 0020533 0 ustar 00root root 0000000 0000000 counter yes
counter maybe
counter no
# To make ex_test.go happy
strptime("2016-04-25T20:14:42Z", "2006-01-02T15:04:05Z07:00")
/1/ {
/^1$/ {
yes++
}
otherwise {
maybe++
}
}
otherwise {
no++
}
mtail-3.0.0~rc24.1/examples/postfix.mtail 0000664 0000000 0000000 00000047215 13435446430 0020231 0 ustar 00root root 0000000 0000000 # vim:ts=2:sw=2:et:ai:sts=2:cinoptions=(0
# Copyright 2017 Martín Ferrari . All Rights Reserved.
# This file is available under the Apache license.
# Syslog parser for Postfix, based on the parsing rules from:
# https://github.com/kumina/postfix_exporter
# Copyright 2017 Kumina, https://kumina.nl/
# Available under the Apache license.
const DELIVERY_DELAY_LINE /.*, relay=(?P\S+), .*,/ +
/ delays=(?P[0-9\.]+)\/(?P[0-9\.]+)\/(?P[0-9\.]+)\/(?P[0-9\.]+),\s/
const SMTP_TLS_LINE /(\S+) TLS connection established to \S+: (\S+) with cipher (\S+) \((\d+)\/(\d+) bits\)/
const SMTPD_TLS_LINE /(\S+) TLS connection established from \S+: (\S+) with cipher (\S+) \((\d+)\/(\d+) bits\)/
const QMGR_INSERT_LINE /:.*, size=(?P\d+), nrcpt=(?P\d+)/
const QMGR_REMOVE_LINE /: removed$/
/^(?P(?P\w+\s+\d+\s+\d+:\d+:\d+)|(?P\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d+[+-]\d{2}:\d{2}))/ +
/\s+(?:\w+@)?(?P[\w\.-]+)\s+postfix\/(?P[\w\.-]+)(?:\[(?P\d+)\])?:\s+(?P.*)/ {
len($legacy_date) > 0 {
strptime($2, "Jan _2 15:04:05")
}
len($rfc3339_date) > 0 {
strptime($rfc3339_date, "2006-01-02T03:04:05-0700")
}
# Total number of messages processed by cleanup.
counter postfix_cleanup_messages_processed_total
# Total number of messages rejected by cleanup.
counter postfix_cleanup_messages_rejected_total
$application == "cleanup" {
/: message-id= {
postfix_cleanup_messages_processed_total++
}
/: reject: / {
postfix_cleanup_messages_rejected_total++
}
}
# LMTP message processing time in seconds.
counter postfix_lmtp_delivery_delay_seconds_bucket by le, stage
counter postfix_lmtp_delivery_delay_seconds_total by stage
counter postfix_lmtp_delivery_delay_seconds_sum by stage
# buckets: 1e-3, 1e-2, 1e-1, 1e0, 1e1, 1e2, 1e3
$application == "lmtp" {
// + DELIVERY_DELAY_LINE {
# 1st field: before_queue_manager
$bqm < 0.001 {
postfix_lmtp_delivery_delay_seconds_bucket["0.001"]["before_queue_manager"]++
}
$bqm < 0.01 {
postfix_lmtp_delivery_delay_seconds_bucket["0.01"]["before_queue_manager"]++
}
$bqm < 0.1 {
postfix_lmtp_delivery_delay_seconds_bucket["0.1"]["before_queue_manager"]++
}
$bqm < 1 {
postfix_lmtp_delivery_delay_seconds_bucket["1"]["before_queue_manager"]++
}
$bqm < 10 {
postfix_lmtp_delivery_delay_seconds_bucket["10"]["before_queue_manager"]++
}
$bqm < 100 {
postfix_lmtp_delivery_delay_seconds_bucket["100"]["before_queue_manager"]++
}
$bqm < 1000 {
postfix_lmtp_delivery_delay_seconds_bucket["1000"]["before_queue_manager"]++
}
postfix_lmtp_delivery_delay_seconds_bucket["+Inf"]["before_queue_manager"]++
postfix_lmtp_delivery_delay_seconds_total["before_queue_manager"]++
postfix_lmtp_delivery_delay_seconds_sum["before_queue_manager"] += $bqm
# 2nd field: queue_manager
$qm < 0.001 {
postfix_lmtp_delivery_delay_seconds_bucket["0.001"]["queue_manager"]++
}
$qm < 0.01 {
postfix_lmtp_delivery_delay_seconds_bucket["0.01"]["queue_manager"]++
}
$qm < 0.1 {
postfix_lmtp_delivery_delay_seconds_bucket["0.1"]["queue_manager"]++
}
$qm < 1 {
postfix_lmtp_delivery_delay_seconds_bucket["1"]["queue_manager"]++
}
$qm < 10 {
postfix_lmtp_delivery_delay_seconds_bucket["10"]["queue_manager"]++
}
$qm < 100 {
postfix_lmtp_delivery_delay_seconds_bucket["100"]["queue_manager"]++
}
$qm < 1000 {
postfix_lmtp_delivery_delay_seconds_bucket["1000"]["queue_manager"]++
}
postfix_lmtp_delivery_delay_seconds_bucket["+Inf"]["queue_manager"]++
postfix_lmtp_delivery_delay_seconds_total["queue_manager"]++
postfix_lmtp_delivery_delay_seconds_sum["queue_manager"] += $qm
# 3rd field: connection_setup
$cs < 0.001 {
postfix_lmtp_delivery_delay_seconds_bucket["0.001"]["connection_setup"]++
}
$cs < 0.01 {
postfix_lmtp_delivery_delay_seconds_bucket["0.01"]["connection_setup"]++
}
$cs < 0.1 {
postfix_lmtp_delivery_delay_seconds_bucket["0.1"]["connection_setup"]++
}
$cs < 1 {
postfix_lmtp_delivery_delay_seconds_bucket["1"]["connection_setup"]++
}
$cs < 10 {
postfix_lmtp_delivery_delay_seconds_bucket["10"]["connection_setup"]++
}
$cs < 100 {
postfix_lmtp_delivery_delay_seconds_bucket["100"]["connection_setup"]++
}
$cs < 1000 {
postfix_lmtp_delivery_delay_seconds_bucket["1000"]["connection_setup"]++
}
postfix_lmtp_delivery_delay_seconds_bucket["+Inf"]["connection_setup"]++
postfix_lmtp_delivery_delay_seconds_total["connection_setup"]++
postfix_lmtp_delivery_delay_seconds_sum["connection_setup"] += $cs
# 4th field: transmission
$tx < 0.001 {
postfix_lmtp_delivery_delay_seconds_bucket["0.001"]["transmission"]++
}
$tx < 0.01 {
postfix_lmtp_delivery_delay_seconds_bucket["0.01"]["transmission"]++
}
$tx < 0.1 {
postfix_lmtp_delivery_delay_seconds_bucket["0.1"]["transmission"]++
}
$tx < 1 {
postfix_lmtp_delivery_delay_seconds_bucket["1"]["transmission"]++
}
$tx < 10 {
postfix_lmtp_delivery_delay_seconds_bucket["10"]["transmission"]++
}
$tx < 100 {
postfix_lmtp_delivery_delay_seconds_bucket["100"]["transmission"]++
}
$tx < 1000 {
postfix_lmtp_delivery_delay_seconds_bucket["1000"]["transmission"]++
}
postfix_lmtp_delivery_delay_seconds_bucket["+Inf"]["transmission"]++
postfix_lmtp_delivery_delay_seconds_total["transmission"]++
postfix_lmtp_delivery_delay_seconds_sum["transmission"] += $tx
}
}
# Pipe message processing time in seconds.
counter postfix_pipe_delivery_delay_seconds_bucket by le, relay, stage
counter postfix_pipe_delivery_delay_seconds_total by relay, stage
counter postfix_pipe_delivery_delay_seconds_sum by relay, stage
# buckets: 1e-3, 1e-2, 1e-1, 1e0, 1e1, 1e2, 1e3
$application == "pipe" {
// + DELIVERY_DELAY_LINE {
# 1st field: before_queue_manager
$bqm < 0.001 {
postfix_pipe_delivery_delay_seconds_bucket["0.001"][$relay]["before_queue_manager"]++
}
$bqm < 0.01 {
postfix_pipe_delivery_delay_seconds_bucket["0.01"][$relay]["before_queue_manager"]++
}
$bqm < 0.1 {
postfix_pipe_delivery_delay_seconds_bucket["0.1"][$relay]["before_queue_manager"]++
}
$bqm < 1 {
postfix_pipe_delivery_delay_seconds_bucket["1"][$relay]["before_queue_manager"]++
}
$bqm < 10 {
postfix_pipe_delivery_delay_seconds_bucket["10"][$relay]["before_queue_manager"]++
}
$bqm < 100 {
postfix_pipe_delivery_delay_seconds_bucket["100"][$relay]["before_queue_manager"]++
}
$bqm < 1000 {
postfix_pipe_delivery_delay_seconds_bucket["1000"][$relay]["before_queue_manager"]++
}
postfix_pipe_delivery_delay_seconds_bucket["+Inf"][$relay]["before_queue_manager"]++
postfix_pipe_delivery_delay_seconds_total[$relay]["before_queue_manager"]++
postfix_pipe_delivery_delay_seconds_sum[$relay]["before_queue_manager"] += $bqm
# 2nd field: queue_manager
$qm < 0.001 {
postfix_pipe_delivery_delay_seconds_bucket["0.001"][$relay]["queue_manager"]++
}
$qm < 0.01 {
postfix_pipe_delivery_delay_seconds_bucket["0.01"][$relay]["queue_manager"]++
}
$qm < 0.1 {
postfix_pipe_delivery_delay_seconds_bucket["0.1"][$relay]["queue_manager"]++
}
$qm < 1 {
postfix_pipe_delivery_delay_seconds_bucket["1"][$relay]["queue_manager"]++
}
$qm < 10 {
postfix_pipe_delivery_delay_seconds_bucket["10"][$relay]["queue_manager"]++
}
$qm < 100 {
postfix_pipe_delivery_delay_seconds_bucket["100"][$relay]["queue_manager"]++
}
$qm < 1000 {
postfix_pipe_delivery_delay_seconds_bucket["1000"][$relay]["queue_manager"]++
}
postfix_pipe_delivery_delay_seconds_bucket["+Inf"][$relay]["queue_manager"]++
postfix_pipe_delivery_delay_seconds_total[$relay]["queue_manager"]++
postfix_pipe_delivery_delay_seconds_sum[$relay]["queue_manager"] += $qm
# 3rd field: connection_setup
$cs < 0.001 {
postfix_pipe_delivery_delay_seconds_bucket["0.001"][$relay]["connection_setup"]++
}
$cs < 0.01 {
postfix_pipe_delivery_delay_seconds_bucket["0.01"][$relay]["connection_setup"]++
}
$cs < 0.1 {
postfix_pipe_delivery_delay_seconds_bucket["0.1"][$relay]["connection_setup"]++
}
$cs < 1 {
postfix_pipe_delivery_delay_seconds_bucket["1"][$relay]["connection_setup"]++
}
$cs < 10 {
postfix_pipe_delivery_delay_seconds_bucket["10"][$relay]["connection_setup"]++
}
$cs < 100 {
postfix_pipe_delivery_delay_seconds_bucket["100"][$relay]["connection_setup"]++
}
$cs < 1000 {
postfix_pipe_delivery_delay_seconds_bucket["1000"][$relay]["connection_setup"]++
}
postfix_pipe_delivery_delay_seconds_bucket["+Inf"][$relay]["connection_setup"]++
postfix_pipe_delivery_delay_seconds_total[$relay]["connection_setup"]++
postfix_pipe_delivery_delay_seconds_sum[$relay]["connection_setup"] += $cs
# 4th field: transmission
$tx < 0.001 {
postfix_pipe_delivery_delay_seconds_bucket["0.001"][$relay]["transmission"]++
}
$tx < 0.01 {
postfix_pipe_delivery_delay_seconds_bucket["0.01"][$relay]["transmission"]++
}
$tx < 0.1 {
postfix_pipe_delivery_delay_seconds_bucket["0.1"][$relay]["transmission"]++
}
$tx < 1 {
postfix_pipe_delivery_delay_seconds_bucket["1"][$relay]["transmission"]++
}
$tx < 10 {
postfix_pipe_delivery_delay_seconds_bucket["10"][$relay]["transmission"]++
}
$tx < 100 {
postfix_pipe_delivery_delay_seconds_bucket["100"][$relay]["transmission"]++
}
$tx < 1000 {
postfix_pipe_delivery_delay_seconds_bucket["1000"][$relay]["transmission"]++
}
postfix_pipe_delivery_delay_seconds_bucket["+Inf"][$relay]["transmission"]++
postfix_pipe_delivery_delay_seconds_total[$relay]["transmission"]++
postfix_pipe_delivery_delay_seconds_sum[$relay]["transmission"] += $tx
}
}
# Number of recipients per message inserted into the mail queues.
counter postfix_qmgr_messages_inserted_recipients_bucket by le
counter postfix_qmgr_messages_inserted_recipients_total
counter postfix_qmgr_messages_inserted_recipients_sum
# buckets: 1, 2, 4, 8, 16, 32, 64, 128
# Size of messages inserted into the mail queues in bytes.
counter postfix_qmgr_messages_inserted_size_bytes_bucket by le
counter postfix_qmgr_messages_inserted_size_bytes_total
counter postfix_qmgr_messages_inserted_size_bytes_sum
# buckets: 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9
# Total number of messages removed from mail queues.
counter postfix_qmgr_messages_removed_total
$application == "qmgr" {
// + QMGR_INSERT_LINE {
$nrcpt < 1 {
postfix_qmgr_messages_inserted_recipients_bucket["1"]++
}
$nrcpt < 2 {
postfix_qmgr_messages_inserted_recipients_bucket["2"]++
}
$nrcpt < 4 {
postfix_qmgr_messages_inserted_recipients_bucket["4"]++
}
$nrcpt < 8 {
postfix_qmgr_messages_inserted_recipients_bucket["8"]++
}
$nrcpt < 16 {
postfix_qmgr_messages_inserted_recipients_bucket["16"]++
}
$nrcpt < 32 {
postfix_qmgr_messages_inserted_recipients_bucket["32"]++
}
$nrcpt < 64 {
postfix_qmgr_messages_inserted_recipients_bucket["64"]++
}
$nrcpt < 128 {
postfix_qmgr_messages_inserted_recipients_bucket["128"]++
}
postfix_qmgr_messages_inserted_recipients_bucket["+Inf"]++
postfix_qmgr_messages_inserted_recipients_total++
postfix_qmgr_messages_inserted_recipients_sum += $nrcpt
$size < 1000 {
postfix_qmgr_messages_inserted_size_bytes_bucket["1000"]++
}
$size < 10000 {
postfix_qmgr_messages_inserted_size_bytes_bucket["10000"]++
}
$size < 100000 {
postfix_qmgr_messages_inserted_size_bytes_bucket["100000"]++
}
$size < 1000000 {
postfix_qmgr_messages_inserted_size_bytes_bucket["1000000"]++
}
$size < 10000000 {
postfix_qmgr_messages_inserted_size_bytes_bucket["10000000"]++
}
$size < 100000000 {
postfix_qmgr_messages_inserted_size_bytes_bucket["100000000"]++
}
$size < 1000000000 {
postfix_qmgr_messages_inserted_size_bytes_bucket["1000000000"]++
}
postfix_qmgr_messages_inserted_size_bytes_bucket["+Inf"]++
postfix_qmgr_messages_inserted_size_bytes_total++
postfix_qmgr_messages_inserted_size_bytes_sum += $size
}
// + QMGR_REMOVE_LINE {
postfix_qmgr_messages_removed_total++
}
}
# SMTP message processing time in seconds.
counter postfix_smtp_delivery_delay_seconds_bucket by le, stage
counter postfix_smtp_delivery_delay_seconds_total by stage
counter postfix_smtp_delivery_delay_seconds_sum by stage
# buckets: 1e-3, 1e-2, 1e-1, 1e0, 1e1, 1e2, 1e3
# Total number of outgoing TLS connections.
counter postfix_smtp_tls_connections_total by trust, protocol, cipher, secret_bits, algorithm_bits
$application == "smtp" {
// + DELIVERY_DELAY_LINE {
# 1st field: before_queue_manager
$bqm < 0.001 {
postfix_smtp_delivery_delay_seconds_bucket["0.001"]["before_queue_manager"]++
}
$bqm < 0.01 {
postfix_smtp_delivery_delay_seconds_bucket["0.01"]["before_queue_manager"]++
}
$bqm < 0.1 {
postfix_smtp_delivery_delay_seconds_bucket["0.1"]["before_queue_manager"]++
}
$bqm < 1 {
postfix_smtp_delivery_delay_seconds_bucket["1"]["before_queue_manager"]++
}
$bqm < 10 {
postfix_smtp_delivery_delay_seconds_bucket["10"]["before_queue_manager"]++
}
$bqm < 100 {
postfix_smtp_delivery_delay_seconds_bucket["100"]["before_queue_manager"]++
}
$bqm < 1000 {
postfix_smtp_delivery_delay_seconds_bucket["1000"]["before_queue_manager"]++
}
postfix_smtp_delivery_delay_seconds_bucket["+Inf"]["before_queue_manager"]++
postfix_smtp_delivery_delay_seconds_total["before_queue_manager"]++
postfix_smtp_delivery_delay_seconds_sum["before_queue_manager"] += $bqm
# 2nd field: queue_manager
$qm < 0.001 {
postfix_smtp_delivery_delay_seconds_bucket["0.001"]["queue_manager"]++
}
$qm < 0.01 {
postfix_smtp_delivery_delay_seconds_bucket["0.01"]["queue_manager"]++
}
$qm < 0.1 {
postfix_smtp_delivery_delay_seconds_bucket["0.1"]["queue_manager"]++
}
$qm < 1 {
postfix_smtp_delivery_delay_seconds_bucket["1"]["queue_manager"]++
}
$qm < 10 {
postfix_smtp_delivery_delay_seconds_bucket["10"]["queue_manager"]++
}
$qm < 100 {
postfix_smtp_delivery_delay_seconds_bucket["100"]["queue_manager"]++
}
$qm < 1000 {
postfix_smtp_delivery_delay_seconds_bucket["1000"]["queue_manager"]++
}
postfix_smtp_delivery_delay_seconds_bucket["+Inf"]["queue_manager"]++
postfix_smtp_delivery_delay_seconds_total["queue_manager"]++
postfix_smtp_delivery_delay_seconds_sum["queue_manager"] += $qm
# 3rd field: connection_setup
$cs < 0.001 {
postfix_smtp_delivery_delay_seconds_bucket["0.001"]["connection_setup"]++
}
$cs < 0.01 {
postfix_smtp_delivery_delay_seconds_bucket["0.01"]["connection_setup"]++
}
$cs < 0.1 {
postfix_smtp_delivery_delay_seconds_bucket["0.1"]["connection_setup"]++
}
$cs < 1 {
postfix_smtp_delivery_delay_seconds_bucket["1"]["connection_setup"]++
}
$cs < 10 {
postfix_smtp_delivery_delay_seconds_bucket["10"]["connection_setup"]++
}
$cs < 100 {
postfix_smtp_delivery_delay_seconds_bucket["100"]["connection_setup"]++
}
$cs < 1000 {
postfix_smtp_delivery_delay_seconds_bucket["1000"]["connection_setup"]++
}
postfix_smtp_delivery_delay_seconds_bucket["+Inf"]["connection_setup"]++
postfix_smtp_delivery_delay_seconds_total["connection_setup"]++
postfix_smtp_delivery_delay_seconds_sum["connection_setup"] += $cs
# 4th field: transmission
$tx < 0.001 {
postfix_smtp_delivery_delay_seconds_bucket["0.001"]["transmission"]++
}
$tx < 0.01 {
postfix_smtp_delivery_delay_seconds_bucket["0.01"]["transmission"]++
}
$tx < 0.1 {
postfix_smtp_delivery_delay_seconds_bucket["0.1"]["transmission"]++
}
$tx < 1 {
postfix_smtp_delivery_delay_seconds_bucket["1"]["transmission"]++
}
$tx < 10 {
postfix_smtp_delivery_delay_seconds_bucket["10"]["transmission"]++
}
$tx < 100 {
postfix_smtp_delivery_delay_seconds_bucket["100"]["transmission"]++
}
$tx < 1000 {
postfix_smtp_delivery_delay_seconds_bucket["1000"]["transmission"]++
}
postfix_smtp_delivery_delay_seconds_bucket["+Inf"]["transmission"]++
postfix_smtp_delivery_delay_seconds_total["transmission"]++
postfix_smtp_delivery_delay_seconds_sum["transmission"] += $tx
}
// + SMTP_TLS_LINE {
postfix_smtp_tls_connections_total[$1][$2][$3][$4][$5]++
}
}
# Total number of incoming connections.
counter postfix_smtpd_connects_total
# Total number of incoming disconnections.
counter postfix_smtpd_disconnects_total
# Total number of connections for which forward-confirmed DNS cannot be resolved.
counter postfix_smtpd_forward_confirmed_reverse_dns_errors_total
# Total number of connections lost.
counter postfix_smtpd_connections_lost_total by after_stage
# Total number of messages processed.
counter postfix_smtpd_messages_processed_total by sasl_username
# Total number of NOQUEUE rejects.
counter postfix_smtpd_messages_rejected_total by code
# Total number of SASL authentication failures.
counter postfix_smtpd_sasl_authentication_failures_total
# Total number of incoming TLS connections.
counter postfix_smtpd_tls_connections_total by trust, protocol, cipher, secret_bits, algorithm_bits
$application == "smtpd" {
/ connect from / {
postfix_smtpd_connects_total++
}
/ disconnect from / {
postfix_smtpd_disconnects_total++
}
/ warning: hostname \S+ does not resolve to address / {
postfix_smtpd_forward_confirmed_reverse_dns_errors_total++
}
/ lost connection after (\w+) from / {
postfix_smtpd_connections_lost_total[$1]++
}
/: client=/ {
/, sasl_username=(\S+)/ {
postfix_smtpd_messages_processed_total[$1]++
} else {
postfix_smtpd_messages_processed_total[""]++
}
}
/NOQUEUE: reject: RCPT from \S+: (\d+) / {
postfix_smtpd_messages_rejected_total[$1]++
}
/warning: \S+: SASL \S+ authentication failed: / {
postfix_smtpd_sasl_authentication_failures_total++
}
// + SMTPD_TLS_LINE {
postfix_smtpd_tls_connections_total[$1][$2][$3][$4][$5]++
}
}
}
mtail-3.0.0~rc24.1/examples/rails.mtail 0000664 0000000 0000000 00000005052 13435446430 0017640 0 ustar 00root root 0000000 0000000 # Copyright 2017 Pablo Carranza . All Rights Reserved.
# This file is available under the Apache license.
#
# Rails production log parsing
counter rails_requests_started_total
counter rails_requests_started by verb
counter rails_requests_completed_total
counter rails_requests_completed by status
counter rails_requests_completed_milliseconds_sum by status
counter rails_requests_completed_milliseconds_count by status
counter rails_requests_completed_milliseconds_bucket by le, status
/^Started (?P[A-Z]+) .*/ {
###
# Started HTTP requests by verb (GET, POST, etc.)
#
rails_requests_started_total++
rails_requests_started[$verb]++
}
/^Completed (?P\d{3}) .+ in (?P\d+)ms .*$/ {
###
# Total numer of completed requests by status
#
rails_requests_completed_total++
rails_requests_completed[$status]++
###
# Completed requests by status with histogram buckets
#
# These statements "fall through", so the histogram is cumulative. The
# collecting system can compute the percentile bands by taking the ratio of
# each bucket value over the final bucket.
rails_requests_completed_milliseconds_sum[$status] += $request_milliseconds
rails_requests_completed_milliseconds_count[$status]++
# 5ms bucket
$request_milliseconds <= 5 {
rails_requests_completed_milliseconds_bucket["5"][$status]++
}
# 10ms bucket
$request_milliseconds <= 10 {
rails_requests_completed_milliseconds_bucket["10"][$status]++
}
# 50ms bucket
$request_milliseconds <= 50 {
rails_requests_completed_milliseconds_bucket["50"][$status]++
}
# 100ms bucket
$request_milliseconds <= 100 {
rails_requests_completed_milliseconds_bucket["100"][$status]++
}
# 250ms bucket
$request_milliseconds <= 250 {
rails_requests_completed_milliseconds_bucket["250"][$status]++
}
# 500ms bucket
$request_milliseconds <= 500 {
rails_requests_completed_milliseconds_bucket["500"][$status]++
}
# 1s bucket
$request_milliseconds <= 1000 {
rails_requests_completed_milliseconds_bucket["1000"][$status]++
}
# 2.5s bucket
$request_milliseconds <= 2500 {
rails_requests_completed_milliseconds_bucket["2500"][$status]++
}
# 5s bucket
$request_milliseconds <= 5000 {
rails_requests_completed_milliseconds_bucket["5000"][$status]++
}
# 15s bucket
$request_milliseconds <= 15000 {
rails_requests_completed_milliseconds_bucket["15000"][$status]++
}
# "inf" bucket, also the total number of requests
rails_requests_completed_milliseconds_bucket["+Inf"][$status]++
}
mtail-3.0.0~rc24.1/examples/rsyncd.mtail 0000664 0000000 0000000 00000003221 13435446430 0020024 0 ustar 00root root 0000000 0000000 # Copyright 2011 Google Inc. All Rights Reserved.
# This file is available under the Apache license.
counter bytes_total by operation
# total connections, and total connection time can be used to compute the
# average connection time.
counter connections_total
counter connection_time_total as "connection-time_total"
# See which modules are popular.
counter transfers_total by operation, module
# Use this gauge to measure duration between start and end time per connection.
# It is never used externally, so mark as `hidden'.
hidden gauge connection_time by pid
/^(?P\d+\/\d+\/\d+ \d+:\d+:\d+) \[(?P\d+)\] / {
strptime($date, "2006/01/02 15:04:05")
# Transfer log
# %o %h [%a] %m (%u) %f %l
/(?P\S+) (\S+) \[\S+\] (?P\S+) \(\S*\) \S+ (?P\d+)/ {
transfers_total[$operation, $module]++
}
# Connection starts
/connect from \S+ \(\d+\.\d+\.\d+\.\d+\)/ {
connections_total++
# Record the start time of the connection, using the log timestamp.
connection_time[$pid] = timestamp()
}
# Connection summary when session closed
/sent (?P\d+) bytes received (?P\d+) bytes total size \d+/ {
# Sum total bytes across all sessions for this process
bytes_total["sent"] += $sent
bytes_total["received"] += $received
# Count total time spent with connections open, according to the log timestamp.
connection_time_total += timestamp() - connection_time[$pid]
# Delete the datum referenced in this dimensional metric. We assume that
# this will never happen again, and hint to the VM that we can garbage
# collect the memory used.
del connection_time[$pid]
}
}
mtail-3.0.0~rc24.1/examples/sftp.mtail 0000664 0000000 0000000 00000002246 13435446430 0017504 0 ustar 00root root 0000000 0000000 # Copyright 2008 Google Inc. All Rights Reserved.
# This file is available under the Apache license.
counter login_count by username
counter logout_count by username
counter bytes_read
counter files_read
counter bytes_written
counter files_written
counter user_bytes_read by username
counter user_files_read by username
counter user_bytes_written by username
counter user_files_written by username
/^(?P\w+\s+\d+\s+\d+:\d+:\d+)\s+[\w\.-]+\s+sftp-server/ {
strptime($date, "Jan _2 15:04:05")
/session opened for local user (?P\w+)/ {
login_count[$username]++
}
/session closed for local user (?P\w+)/ {
logout_count[$username]++
}
/close "[^"]+" bytes read (?P\d+) written (?P\d+)/ {
$read != 0 {
bytes_read += $read
files_read++
}
$written != 0 {
bytes_written += $written
files_written++
}
/close "\/home\/(?P[^\/]+)\/[^"]+"/ {
$read != 0 {
user_bytes_read[$username] += $read
user_files_read[$username]++
}
$written != 0 {
user_bytes_written[$username] += $written
user_files_written[$username]++
}
}
}
}
mtail-3.0.0~rc24.1/examples/strcat.mtail 0000664 0000000 0000000 00000000212 13435446430 0020017 0 ustar 00root root 0000000 0000000 counter f by s
# To make ex_test.go happy
strptime("2017-10-03T20:14:42Z", "2006-01-02T15:04:05Z07:00")
/(.*), (.*)/ {
f[$1 + $2]++
}
mtail-3.0.0~rc24.1/examples/stringy.mtail 0000664 0000000 0000000 00000000237 13435446430 0020225 0 ustar 00root root 0000000 0000000 text str
counter b by foo
# To make ex_test.go happy
strptime("2018-06-16T03:37:54Z", "2006-01-02T15:04:05Z07:00")
/(.*)/ {
str = $1
}
/b/ {
b[str]++
}
mtail-3.0.0~rc24.1/examples/timer.mtail 0000664 0000000 0000000 00000000166 13435446430 0017647 0 ustar 00root root 0000000 0000000 timer request_time_ms by vhost
/(?P\S+) (?P\d+)/ {
request_time_ms[$vhost] = $latency_s / 1000
}
mtail-3.0.0~rc24.1/examples/timestamp.mtail 0000664 0000000 0000000 00000000312 13435446430 0020523 0 ustar 00root root 0000000 0000000 counter mtail_lines_read_count by filename
gauge mtail_file_lastread_timestamp by filename
/.*/ {
mtail_lines_read_count[getfilename()]++
mtail_file_lastread_timestamp[getfilename()]=timestamp()
}
mtail-3.0.0~rc24.1/examples/typed-comparison.mtail 0000664 0000000 0000000 00000000406 13435446430 0022021 0 ustar 00root root 0000000 0000000 counter t by le
counter t_sum
# To make ex_test.go happy
strptime("2017-11-02T16:07:14Z", "2006-01-02T15:04:05Z07:00")
/^(?P\d+(\.\d+)?)/ {
$v < 0.5 {
t["0.5"]++
}
$v < 1 {
t["1"]++
}
t["inf"]++
t_sum += $v
}
mtail-3.0.0~rc24.1/examples/types.mtail 0000664 0000000 0000000 00000000734 13435446430 0017674 0 ustar 00root root 0000000 0000000 gauge should_be_int
gauge should_be_float
counter neg
gauge should_be_float_map by label
gauge should_be_int_map by label
counter i
# To make ex_test.go happy
strptime("2017-07-15T18:03:14Z", "2006-01-02T15:04:05Z07:00")
/^(\d+)$/ {
should_be_int = $1
should_be_int_map[$1] = $1
}
/^(\d+\.\d+)$/ {
should_be_float = $1
should_be_float_map[$1] = $1
}
/(?P[+-]?[\d.]+)/ {
$bar < -1 {
neg++
}
}
/^(\d+)$/ {
# Sneaky float promotion
i += 1.0 * $1
}
mtail-3.0.0~rc24.1/examples/vsftpd.mtail 0000664 0000000 0000000 00000005410 13435446430 0020032 0 ustar 00root root 0000000 0000000 # Copyright 2011 Google Inc. All Rights Reserved.
# This file is available under the Apache license.
# A mtail module for monitoring vsftpd logs
#
# Configure your vsftpd to write the xferlog as well as vsftpd.log
hidden text direction
counter bytes_transferred by direction
counter transfer_time by direction
counter transfers by direction
counter connects
counter logins
counter uploads
counter commands by command
counter responses by response
hidden gauge sessions by client
counter session_time
def vsftpd_timestamp {
# Mon Feb 21 15:21:32 2011
/^\w+\s(?P\w+\s+\d+\s\d+:\d+:\d+\s\d+)/ {
strptime($date, "Jan _2 15:04:05 2006")
next
}
}
const XFERLOG_RE // +
# e.g. 1 172.18.115.36 528
# time spent transferring
/\s(?P\d+)/ +
# remote host
/\s\d+\.\d+\.\d+\.\d+/ +
# bytes transferred
/\s(?P\d+)/ +
# filename
/\s(?P\S+)/ +
# e.g. b _ i a anonymous@ ftp 0 * c
# transfertype
/\s\S/ +
# special action flag
/\s\S/ +
# direction
/\s(?P\S)/ +
# access mode
/\s\S/ +
# username
/\s\S+/ +
# service name
/\s\S+/ +
# authentication method
/\s\d/ +
# authenticated id
/\s\S+/ +
# completion status
/\s(?P\S)/
const VSFTPD_LOG_RE // +
/ \[pid \d+\]/ +
/( \[\w+\])?/ +
/ (?PCONNECT|OK LOGIN|OK UPLOAD|FTP (command|response)):/ +
/ Client "(?P\d+\.\d+\.\d+\.\d+)"/ +
/(, (?P.*))?/
const PAYLOAD_RESPONSE_RE /^"(\d{3})[" -]/
const PAYLOAD_COMMAND_RE /^"(\w{4})[" -]/
@vsftpd_timestamp {
getfilename() =~ /xferlog/ {
// + XFERLOG_RE {
# Handles log entries from the wuftpd format xferlog.
$direction == "i" {
direction = "incoming"
}
$direction == "o" {
direction = "outgoing"
}
$completionstatus == "c" {
transfers[direction]++
}
transfer_time[direction] += $transfertime
bytes_transferred[direction] += $bytestransferred
}
}
getfilename() =~ /vsftpd.log/ {
// + VSFTPD_LOG_RE {
# Handle vsftpd.log log file."""
$command == "CONNECT" {
sessions[$client] = timestamp()
del sessions[$client] after 168h
connects++
}
$command == "OK LOGIN" {
logins++
}
$command == "OK UPLOAD" {
uploads++
}
$command == "FTP command" {
$payload =~ // + PAYLOAD_COMMAND_RE {
commands[$1]++
$1 == "QUIT" {
session_time += timestamp() - sessions[$client]
del sessions[$client]
}
}
}
$command == "FTP response" {
$payload =~ // + PAYLOAD_RESPONSE_RE {
responses[$1]++
}
}
}
}
}
mtail-3.0.0~rc24.1/fuzz/ 0000775 0000000 0000000 00000000000 13435446430 0014654 5 ustar 00root root 0000000 0000000 mtail-3.0.0~rc24.1/fuzz/fuzz.sh 0000775 0000000 0000000 00000000514 13435446430 0016211 0 ustar 00root root 0000000 0000000 #!/bin/sh
set -x
d=$(dirname $0)
EMTAIL=$d/../mtail
EMGEN=$d/../emgen/emgen
OUT=$d/../fuzzout
run() {
rm -rf $OUT/*
$EMGEN --rand_seed $1 > $OUT/fuzz$1.mtail
$EMTAIL --compile_only --dump_bytecode --logs foo.log --progs $OUT
echo $?
}
if [[ -n "$1" ]]; then
run $1
else
for i in $(seq 0 99); do
run $i
done
fi
mtail-3.0.0~rc24.1/hooks/ 0000775 0000000 0000000 00000000000 13435446430 0015001 5 ustar 00root root 0000000 0000000 mtail-3.0.0~rc24.1/hooks/build 0000775 0000000 0000000 00000000660 13435446430 0016030 0 ustar 00root root 0000000 0000000 #!/bin/bash
# $IMAGE_NAME var is injected into the build so the tag is correct.
echo "Build hook running"
docker build \
--build-arg version=$(git describe --tags --always) \
--build-arg commit_hash=$(git rev-parse HEAD) \
--build-arg vcs_url=$(git config --get remote.origin.url) \
--build-arg vcs_branch=$(git rev-parse --abbrev-ref HEAD) \
--build-arg build_date=$(date -u +"%Y-%m-%dT%H:%M:%SZ") \
-t $IMAGE_NAME .
mtail-3.0.0~rc24.1/hooks/post_checkout 0000775 0000000 0000000 00000000152 13435446430 0017577 0 ustar 00root root 0000000 0000000 #!/bin/bash
echo "Unshallowing to get correct tags to work."
git fetch --tags --unshallow --quiet origin
mtail-3.0.0~rc24.1/hooks/post_push 0000775 0000000 0000000 00000002105 13435446430 0016751 0 ustar 00root root 0000000 0000000 #!/bin/bash
# hooks/post_push
# https://docs.docker.com/docker-cloud/builds/advanced/
# https://semver.org/
function add_tag() {
echo "Adding tag ${1}"
docker tag $IMAGE_NAME $DOCKER_REPO:$1
docker push $DOCKER_REPO:$1
}
TAG=`git describe --tag --match "v*"`
MAJOR=`echo ${TAG} | awk -F'-' '{print $1}' | awk -F'.' '{print $1}' | sed 's/v//'`
MINOR=`echo ${TAG} | awk -F'-' '{print $1}' | awk -F'.' '{print $2}' | sed 's/v//'`
PATCH=`echo ${TAG} | awk -F'-' '{print $1}' | awk -F'.' '{print $3}' | sed 's/v//'`
PRLS=`echo ${TAG} | awk -F'-' '{print $2}'`
num='^[0-9]+$'
pre='^[0-9A-Za-z\.]+$'
echo "Current Build: ${TAG}"
if [ ! -z $MAJOR ] && [[ $MAJOR =~ $num ]]; then
add_tag ${MAJOR}
if [ ! -z $MINOR ] && [[ $MINOR =~ $num ]]; then
add_tag ${MAJOR}.${MINOR}
if [ ! -z $PATCH ] && [[ $PATCH =~ $num ]]; then
add_tag ${MAJOR}.${MINOR}.${PATCH}
if [ ! -z $PRLS ] && [[ ! $PRLS =~ $num ]] && [[ $PRLS =~ $pre ]]; then
add_tag ${MAJOR}.${MINOR}.${PATCH}-${PRLS}
fi
fi
fi
fi
exit $?
mtail-3.0.0~rc24.1/internal/ 0000775 0000000 0000000 00000000000 13435446430 0015472 5 ustar 00root root 0000000 0000000 mtail-3.0.0~rc24.1/internal/exporter/ 0000775 0000000 0000000 00000000000 13435446430 0017342 5 ustar 00root root 0000000 0000000 mtail-3.0.0~rc24.1/internal/exporter/collectd.go 0000664 0000000 0000000 00000002332 13435446430 0021462 0 ustar 00root root 0000000 0000000 // Copyright 2011 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package exporter
import (
"expvar"
"flag"
"fmt"
"strings"
"github.com/google/mtail/internal/metrics"
)
const (
collectdFormat = "PUTVAL \"%s/%smtail-%s/%s-%s\" interval=%d %s:%s\n"
)
var (
collectdSocketPath = flag.String("collectd_socketpath", "",
"Path to collectd unixsock to write metrics to.")
collectdPrefix = flag.String("collectd_prefix", "",
"Prefix to use for collectd metrics.")
collectdExportTotal = expvar.NewInt("collectd_export_total")
collectdExportSuccess = expvar.NewInt("collectd_export_success")
)
// metricToCollectd encodes the metric data in the collectd text protocol format. The
// metric lock is held before entering this function.
func metricToCollectd(hostname string, m *metrics.Metric, l *metrics.LabelSet) string {
return fmt.Sprintf(collectdFormat,
hostname,
*collectdPrefix,
m.Program,
kindToCollectdType(m.Kind),
formatLabels(m.Name, l.Labels, "-", "-", "_"),
*pushInterval,
l.Datum.TimeString(),
l.Datum.ValueString())
}
func kindToCollectdType(kind metrics.Kind) string {
if kind != metrics.Timer {
return strings.ToLower(kind.String())
}
return "gauge"
}
mtail-3.0.0~rc24.1/internal/exporter/export.go 0000664 0000000 0000000 00000013004 13435446430 0021210 0 ustar 00root root 0000000 0000000 // Copyright 2011 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
// Package exporter provides the interface for getting metrics out of mtail,
// into your monitoring system of choice.
package exporter
import (
"expvar"
"flag"
"fmt"
"io"
"net"
"os"
"strings"
"time"
"github.com/golang/glog"
"github.com/google/mtail/internal/metrics"
"github.com/pkg/errors"
)
// Commandline Flags.
var (
pushInterval = flag.Int("metric_push_interval_seconds", 60,
"Interval between metric pushes, in seconds.")
writeDeadline = flag.Duration("metric_push_write_deadline", 10*time.Second, "Time to wait for a push to succeed before exiting with an error.")
)
// Exporter manages the export of metrics to passive and active collectors.
type Exporter struct {
store *metrics.Store
hostname string
omitProgLabel bool
pushTargets []pushOptions
}
// Hostname is an option that specifies the mtail hostname to use in exported metrics.
func Hostname(hostname string) func(*Exporter) error {
return func(e *Exporter) error {
e.hostname = hostname
return nil
}
}
// OmitProgLabel sets the Exporter to not put program names in metric labels.
func OmitProgLabel(e *Exporter) error {
e.omitProgLabel = true
return nil
}
// New creates a new Exporter.
func New(store *metrics.Store, options ...func(*Exporter) error) (*Exporter, error) {
if store == nil {
return nil, errors.New("exporter needs a Store")
}
e := &Exporter{store: store}
if err := e.SetOption(options...); err != nil {
return nil, err
}
// defaults after options have been set
if e.hostname == "" {
var err error
e.hostname, err = os.Hostname()
if err != nil {
return nil, errors.Wrap(err, "getting hostname")
}
}
if *collectdSocketPath != "" {
o := pushOptions{"unix", *collectdSocketPath, metricToCollectd, collectdExportTotal, collectdExportSuccess}
e.RegisterPushExport(o)
}
if *graphiteHostPort != "" {
o := pushOptions{"tcp", *graphiteHostPort, metricToGraphite, graphiteExportTotal, graphiteExportSuccess}
e.RegisterPushExport(o)
}
if *statsdHostPort != "" {
o := pushOptions{"udp", *statsdHostPort, metricToStatsd, statsdExportTotal, statsdExportSuccess}
e.RegisterPushExport(o)
}
return e, nil
}
// SetOption takes one or more option functions and applies them in order to Exporter.
func (e *Exporter) SetOption(options ...func(*Exporter) error) error {
for _, option := range options {
if err := option(e); err != nil {
return err
}
}
return nil
}
// formatLabels converts a metric name and key-value map of labels to a single
// string for exporting to the correct output format for each export target.
// ksep and sep mark what to use for key/val separator, and between label separators respoectively.
// If not empty, rep is used to replace cases of ksep and sep in the original strings.
func formatLabels(name string, m map[string]string, ksep, sep, rep string) string {
r := name
if len(m) > 0 {
var s []string
for k, v := range m {
k1 := strings.Replace(strings.Replace(k, ksep, rep, -1), sep, rep, -1)
v1 := strings.Replace(strings.Replace(v, ksep, rep, -1), sep, rep, -1)
s = append(s, fmt.Sprintf("%s%s%s", k1, ksep, v1))
}
return r + sep + strings.Join(s, sep)
}
return r
}
// Format a LabelSet into a string to be written to one of the timeseries
// sockets.
type formatter func(string, *metrics.Metric, *metrics.LabelSet) string
func (e *Exporter) writeSocketMetrics(c io.Writer, f formatter, exportTotal *expvar.Int, exportSuccess *expvar.Int) error {
e.store.RLock()
defer e.store.RUnlock()
for _, ml := range e.store.Metrics {
for _, m := range ml {
m.RLock()
// Don't try to send text metrics to any push service.
if m.Kind == metrics.Text {
m.RUnlock()
continue
}
exportTotal.Add(1)
lc := make(chan *metrics.LabelSet)
go m.EmitLabelSets(lc)
for l := range lc {
line := f(e.hostname, m, l)
n, err := fmt.Fprint(c, line)
glog.V(2).Infof("Sent %d bytes\n", n)
if err == nil {
exportSuccess.Add(1)
} else {
return errors.Errorf("write error: %s\n", err)
}
}
m.RUnlock()
}
}
return nil
}
// PushMetrics sends metrics to each of the configured services.
func (e *Exporter) PushMetrics() {
for _, target := range e.pushTargets {
glog.V(2).Infof("pushing to %s", target.addr)
conn, err := net.DialTimeout(target.net, target.addr, *writeDeadline)
if err != nil {
glog.Infof("pusher dial error: %s", err)
continue
}
err = conn.SetDeadline(time.Now().Add(*writeDeadline))
if err != nil {
glog.Infof("Couldn't set deadline on connection: %s", err)
}
err = e.writeSocketMetrics(conn, target.f, target.total, target.success)
if err != nil {
glog.Infof("pusher write error: %s", err)
}
err = conn.Close()
if err != nil {
glog.Infof("connection close failed: %s", err)
}
}
}
// StartMetricPush pushes metrics to the configured services each interval.
func (e *Exporter) StartMetricPush() {
if len(e.pushTargets) > 0 {
glog.Info("Started metric push.")
ticker := time.NewTicker(time.Duration(*pushInterval) * time.Second)
go func() {
for range ticker.C {
e.PushMetrics()
}
}()
}
}
type pushOptions struct {
net, addr string
f formatter
total, success *expvar.Int
}
// RegisterPushExport adds a push export connection to the Exporter. Items in
// the list must describe a Dial()able connection and will have all the metrics
// pushed to each pushInterval.
func (e *Exporter) RegisterPushExport(p pushOptions) {
e.pushTargets = append(e.pushTargets, p)
}
mtail-3.0.0~rc24.1/internal/exporter/export_test.go 0000664 0000000 0000000 00000013554 13435446430 0022261 0 ustar 00root root 0000000 0000000 // Copyright 2011 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package exporter
import (
"errors"
"reflect"
"sort"
"testing"
"time"
"github.com/google/mtail/internal/metrics"
"github.com/google/mtail/internal/metrics/datum"
"github.com/google/mtail/internal/testutil"
)
func TestCreateExporter(t *testing.T) {
_, err := New(nil)
if err == nil {
t.Error("expecting error, got nil")
}
store := metrics.NewStore()
_, err = New(store)
if err != nil {
t.Errorf("unexpected error:%s", err)
}
failopt := func(*Exporter) error {
return errors.New("busted")
}
_, err = New(store, failopt)
if err == nil {
t.Errorf("unexpected success")
}
}
func FakeSocketWrite(f formatter, m *metrics.Metric) []string {
// TODO(jaq): urgh looking inside m to find preallocation size
ret := make([]string, 0, len(m.LabelValues))
lc := make(chan *metrics.LabelSet)
go m.EmitLabelSets(lc)
for l := range lc {
ret = append(ret, f("gunstar", m, l))
}
sort.Strings(ret)
return ret
}
func TestMetricToCollectd(t *testing.T) {
ts, terr := time.Parse("2006/01/02 15:04:05", "2012/07/24 10:14:00")
if terr != nil {
t.Errorf("time parse error: %s", terr)
}
ms := metrics.NewStore()
scalarMetric := metrics.NewMetric("foo", "prog", metrics.Counter, metrics.Int)
d, _ := scalarMetric.GetDatum()
datum.SetInt(d, 37, ts)
testutil.FatalIfErr(t, ms.Add(scalarMetric))
r := FakeSocketWrite(metricToCollectd, scalarMetric)
expected := []string{"PUTVAL \"gunstar/mtail-prog/counter-foo\" interval=60 1343124840:37\n"}
diff := testutil.Diff(expected, r)
if diff != "" {
t.Errorf("String didn't match:\n%s", diff)
}
dimensionedMetric := metrics.NewMetric("bar", "prog", metrics.Gauge, metrics.Int, "label")
d, _ = dimensionedMetric.GetDatum("quux")
datum.SetInt(d, 37, ts)
d, _ = dimensionedMetric.GetDatum("snuh")
datum.SetInt(d, 37, ts)
ms.ClearMetrics()
testutil.FatalIfErr(t, ms.Add(dimensionedMetric))
r = FakeSocketWrite(metricToCollectd, dimensionedMetric)
expected = []string{
"PUTVAL \"gunstar/mtail-prog/gauge-bar-label-quux\" interval=60 1343124840:37\n",
"PUTVAL \"gunstar/mtail-prog/gauge-bar-label-snuh\" interval=60 1343124840:37\n"}
diff = testutil.Diff(expected, r)
if diff != "" {
t.Errorf("String didn't match:\n%s", diff)
}
timingMetric := metrics.NewMetric("foo", "prog", metrics.Timer, metrics.Int)
d, _ = timingMetric.GetDatum()
datum.SetInt(d, 123, ts)
testutil.FatalIfErr(t, ms.Add(timingMetric))
r = FakeSocketWrite(metricToCollectd, timingMetric)
expected = []string{"PUTVAL \"gunstar/mtail-prog/gauge-foo\" interval=60 1343124840:123\n"}
diff = testutil.Diff(expected, r)
if diff != "" {
t.Errorf("String didn't match:\n%s", diff)
}
*collectdPrefix = "prefix"
r = FakeSocketWrite(metricToCollectd, timingMetric)
expected = []string{"PUTVAL \"gunstar/prefixmtail-prog/gauge-foo\" interval=60 1343124840:123\n"}
diff = testutil.Diff(expected, r)
if diff != "" {
t.Errorf("prefixed string didn't match:\n%s", diff)
}
}
func TestMetricToGraphite(t *testing.T) {
ts, terr := time.Parse("2006/01/02 15:04:05", "2012/07/24 10:14:00")
if terr != nil {
t.Errorf("time parse error: %s", terr)
}
scalarMetric := metrics.NewMetric("foo", "prog", metrics.Counter, metrics.Int)
d, _ := scalarMetric.GetDatum()
datum.SetInt(d, 37, ts)
r := FakeSocketWrite(metricToGraphite, scalarMetric)
expected := []string{"prog.foo 37 1343124840\n"}
diff := testutil.Diff(expected, r)
if diff != "" {
t.Errorf("String didn't match:\n%s", diff)
}
dimensionedMetric := metrics.NewMetric("bar", "prog", metrics.Gauge, metrics.Int, "host")
d, _ = dimensionedMetric.GetDatum("quux.com")
datum.SetInt(d, 37, ts)
d, _ = dimensionedMetric.GetDatum("snuh.teevee")
datum.SetInt(d, 37, ts)
r = FakeSocketWrite(metricToGraphite, dimensionedMetric)
expected = []string{
"prog.bar.host.quux_com 37 1343124840\n",
"prog.bar.host.snuh_teevee 37 1343124840\n"}
diff = testutil.Diff(expected, r)
if diff != "" {
t.Errorf("String didn't match:\n%s", diff)
}
*graphitePrefix = "prefix"
r = FakeSocketWrite(metricToGraphite, dimensionedMetric)
expected = []string{
"prefixprog.bar.host.quux_com 37 1343124840\n",
"prefixprog.bar.host.snuh_teevee 37 1343124840\n"}
diff = testutil.Diff(expected, r)
if diff != "" {
t.Errorf("prefixed string didn't match:\n%s", diff)
}
}
func TestMetricToStatsd(t *testing.T) {
ts, terr := time.Parse("2006/01/02 15:04:05", "2012/07/24 10:14:00")
if terr != nil {
t.Errorf("time parse error: %s", terr)
}
scalarMetric := metrics.NewMetric("foo", "prog", metrics.Counter, metrics.Int)
d, _ := scalarMetric.GetDatum()
datum.SetInt(d, 37, ts)
r := FakeSocketWrite(metricToStatsd, scalarMetric)
expected := []string{"prog.foo:37|c"}
if !reflect.DeepEqual(expected, r) {
t.Errorf("String didn't match:\n\texpected: %v\n\treceived: %v", expected, r)
}
dimensionedMetric := metrics.NewMetric("bar", "prog", metrics.Gauge, metrics.Int, "l")
d, _ = dimensionedMetric.GetDatum("quux")
datum.SetInt(d, 37, ts)
d, _ = dimensionedMetric.GetDatum("snuh")
datum.SetInt(d, 42, ts)
r = FakeSocketWrite(metricToStatsd, dimensionedMetric)
expected = []string{
"prog.bar.l.quux:37|g",
"prog.bar.l.snuh:42|g"}
if !reflect.DeepEqual(expected, r) {
t.Errorf("String didn't match:\n\texpected: %v\n\treceived: %v", expected, r)
}
timingMetric := metrics.NewMetric("foo", "prog", metrics.Timer, metrics.Int)
d, _ = timingMetric.GetDatum()
datum.SetInt(d, 37, ts)
r = FakeSocketWrite(metricToStatsd, timingMetric)
expected = []string{"prog.foo:37|ms"}
if !reflect.DeepEqual(expected, r) {
t.Errorf("String didn't match:\n\texpected: %v\n\treceived: %v", expected, r)
}
*statsdPrefix = "prefix"
r = FakeSocketWrite(metricToStatsd, timingMetric)
expected = []string{"prefixprog.foo:37|ms"}
if !reflect.DeepEqual(expected, r) {
t.Errorf("prefixed string didn't match:\n\texpected: %v\n\treceived: %v", expected, r)
}
}
mtail-3.0.0~rc24.1/internal/exporter/graphite.go 0000664 0000000 0000000 00000001667 13435446430 0021506 0 ustar 00root root 0000000 0000000 // Copyright 2011 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package exporter
import (
"expvar"
"flag"
"fmt"
"github.com/google/mtail/internal/metrics"
)
var (
graphiteHostPort = flag.String("graphite_host_port", "",
"Host:port to graphite carbon server to write metrics to.")
graphitePrefix = flag.String("graphite_prefix", "",
"Prefix to use for graphite metrics.")
graphiteExportTotal = expvar.NewInt("graphite_export_total")
graphiteExportSuccess = expvar.NewInt("graphite_export_success")
)
// metricToGraphite encodes a metric in the graphite text protocol format. The
// metric lock is held before entering this function.
func metricToGraphite(hostname string, m *metrics.Metric, l *metrics.LabelSet) string {
return fmt.Sprintf("%s%s.%s %v %v\n",
*graphitePrefix,
m.Program,
formatLabels(m.Name, l.Labels, ".", ".", "_"),
l.Datum.ValueString(),
l.Datum.TimeString())
}
mtail-3.0.0~rc24.1/internal/exporter/json.go 0000664 0000000 0000000 00000001443 13435446430 0020644 0 ustar 00root root 0000000 0000000 // Copyright 2015 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package exporter
import (
"encoding/json"
"expvar"
"net/http"
"github.com/golang/glog"
)
var (
exportJSONErrors = expvar.NewInt("exporter_json_errors")
)
// HandleJSON exports the metrics in JSON format via HTTP.
func (e *Exporter) HandleJSON(w http.ResponseWriter, r *http.Request) {
b, err := json.MarshalIndent(e.store, "", " ")
if err != nil {
exportJSONErrors.Add(1)
glog.Info("error marshalling metrics into json:", err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("content-type", "application/json")
if _, err := w.Write(b); err != nil {
glog.Error(err)
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
mtail-3.0.0~rc24.1/internal/exporter/json_test.go 0000664 0000000 0000000 00000004434 13435446430 0021706 0 ustar 00root root 0000000 0000000 // Copyright 2015 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package exporter
import (
"io/ioutil"
"net/http"
"net/http/httptest"
"sync"
"testing"
"time"
"github.com/google/mtail/internal/metrics"
"github.com/google/mtail/internal/metrics/datum"
"github.com/google/mtail/internal/testutil"
)
var handleJSONTests = []struct {
name string
metrics []*metrics.Metric
expected string
}{
{"empty",
[]*metrics.Metric{},
"[]",
},
{"single",
[]*metrics.Metric{
{
Name: "foo",
Program: "test",
Kind: metrics.Counter,
LabelValues: []*metrics.LabelValue{{Labels: []string{}, Value: datum.MakeInt(1, time.Unix(0, 0))}},
},
},
`[
{
"Name": "foo",
"Program": "test",
"Kind": 1,
"Type": 0,
"LabelValues": [
{
"Value": {
"Value": 1,
"Time": 0
}
}
]
}
]`,
},
{"dimensioned",
[]*metrics.Metric{
{
Name: "foo",
Program: "test",
Kind: metrics.Counter,
Keys: []string{"a", "b"},
LabelValues: []*metrics.LabelValue{{Labels: []string{"1", "2"}, Value: datum.MakeInt(1, time.Unix(0, 0))}},
},
},
`[
{
"Name": "foo",
"Program": "test",
"Kind": 1,
"Type": 0,
"Keys": [
"a",
"b"
],
"LabelValues": [
{
"Labels": [
"1",
"2"
],
"Value": {
"Value": 1,
"Time": 0
}
}
]
}
]`,
},
}
func TestHandleJSON(t *testing.T) {
for _, tc := range handleJSONTests {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
ms := metrics.NewStore()
for _, metric := range tc.metrics {
testutil.FatalIfErr(t, ms.Add(metric))
}
e, err := New(ms, Hostname("gunstar"))
if err != nil {
t.Fatalf("couldn't make exporter: %s", err)
}
response := httptest.NewRecorder()
e.HandleJSON(response, &http.Request{})
if response.Code != 200 {
t.Errorf("response code not 200: %d", response.Code)
}
b, err := ioutil.ReadAll(response.Body)
if err != nil {
t.Errorf("failed to read response: %s", err)
}
diff := testutil.Diff(tc.expected, string(b), testutil.IgnoreUnexported(sync.RWMutex{}))
if diff != "" {
t.Error(diff)
}
})
}
}
mtail-3.0.0~rc24.1/internal/exporter/prometheus.go 0000664 0000000 0000000 00000004347 13435446430 0022074 0 ustar 00root root 0000000 0000000 // Copyright 2015 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package exporter
import (
"expvar"
"fmt"
"strings"
"github.com/golang/glog"
"github.com/google/mtail/internal/metrics"
"github.com/google/mtail/internal/metrics/datum"
"github.com/prometheus/client_golang/prometheus"
)
var (
metricExportTotal = expvar.NewInt("metric_export_total")
)
func noHyphens(s string) string {
return strings.Replace(s, "-", "_", -1)
}
// Describe implements the prometheus.Collector interface.
func (e *Exporter) Describe(c chan<- *prometheus.Desc) {
prometheus.DescribeByCollect(e, c)
}
// Collect implements the prometheus.Collector interface.
func (e *Exporter) Collect(c chan<- prometheus.Metric) {
e.store.RLock()
defer e.store.RUnlock()
for _, ml := range e.store.Metrics {
lastSource := ""
for _, m := range ml {
m.RLock()
// We don't have a way of converting text metrics to prometheus format.
if m.Kind == metrics.Text {
m.RUnlock()
continue
}
metricExportTotal.Add(1)
lsc := make(chan *metrics.LabelSet)
go m.EmitLabelSets(lsc)
for ls := range lsc {
if lastSource == "" {
lastSource = m.Source
}
var keys []string
var vals []string
if !e.omitProgLabel {
keys = append(keys, "prog")
vals = append(vals, m.Program)
}
for k, v := range ls.Labels {
keys = append(keys, k)
vals = append(vals, v)
}
pM, err := prometheus.NewConstMetric(
prometheus.NewDesc(noHyphens(m.Name),
fmt.Sprintf("defined at %s", lastSource), keys, nil),
promTypeForKind(m.Kind),
promValueForDatum(ls.Datum),
vals...)
if err != nil {
glog.Warning(err)
continue
}
c <- prometheus.NewMetricWithTimestamp(ls.Datum.TimeUTC(), pM)
}
m.RUnlock()
}
}
}
func promTypeForKind(k metrics.Kind) prometheus.ValueType {
switch k {
case metrics.Counter:
return prometheus.CounterValue
case metrics.Gauge:
return prometheus.GaugeValue
case metrics.Timer:
return prometheus.GaugeValue
}
return prometheus.UntypedValue
}
func promValueForDatum(d datum.Datum) float64 {
switch n := d.(type) {
case *datum.IntDatum:
return float64(n.Get())
case *datum.FloatDatum:
return n.Get()
}
return 0.
}
mtail-3.0.0~rc24.1/internal/exporter/statsd.go 0000664 0000000 0000000 00000002060 13435446430 0021171 0 ustar 00root root 0000000 0000000 // Copyright 2015 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package exporter
import (
"expvar"
"flag"
"fmt"
"github.com/google/mtail/internal/metrics"
)
var (
statsdHostPort = flag.String("statsd_hostport", "",
"Host:port to statsd server to write metrics to.")
statsdPrefix = flag.String("statsd_prefix", "",
"Prefix to use for statsd metrics.")
statsdExportTotal = expvar.NewInt("statsd_export_total")
statsdExportSuccess = expvar.NewInt("statsd_export_success")
)
// metricToStatsd encodes a metric in the statsd text protocol format. The
// metric lock is held before entering this function.
func metricToStatsd(hostname string, m *metrics.Metric, l *metrics.LabelSet) string {
var t string
switch m.Kind {
case metrics.Counter:
t = "c" // StatsD Counter
case metrics.Gauge:
t = "g" // StatsD Gauge
case metrics.Timer:
t = "ms" // StatsD Timer
}
return fmt.Sprintf("%s%s.%s:%s|%s",
*statsdPrefix,
m.Program,
formatLabels(m.Name, l.Labels, ".", ".", "_"),
l.Datum.ValueString(), t)
}
mtail-3.0.0~rc24.1/internal/exporter/varz.go 0000664 0000000 0000000 00000002437 13435446430 0020661 0 ustar 00root root 0000000 0000000 // Copyright 2015 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package exporter
import (
"expvar"
"fmt"
"net/http"
"sort"
"strings"
"github.com/google/mtail/internal/metrics"
)
var (
exportVarzTotal = expvar.NewInt("exporter_varz_total")
)
const varzFormat = "%s{%s} %s\n"
// HandleVarz exports the metrics in Varz format via HTTP.
func (e *Exporter) HandleVarz(w http.ResponseWriter, r *http.Request) {
e.store.RLock()
defer e.store.RUnlock()
w.Header().Add("Content-type", "text/plain")
for _, ml := range e.store.Metrics {
for _, m := range ml {
m.RLock()
exportVarzTotal.Add(1)
lc := make(chan *metrics.LabelSet)
go m.EmitLabelSets(lc)
for l := range lc {
line := metricToVarz(m, l, e.omitProgLabel, e.hostname)
fmt.Fprint(w, line)
}
m.RUnlock()
}
}
}
func metricToVarz(m *metrics.Metric, l *metrics.LabelSet, omitProgLabel bool, hostname string) string {
s := make([]string, 0, len(l.Labels)+2)
for k, v := range l.Labels {
s = append(s, fmt.Sprintf("%s=%s", k, v))
}
sort.Strings(s)
if !omitProgLabel {
s = append(s, fmt.Sprintf("prog=%s", m.Program))
}
s = append(s, fmt.Sprintf("instance=%s", hostname))
return fmt.Sprintf(varzFormat,
m.Name,
strings.Join(s, ","),
l.Datum.ValueString())
}
mtail-3.0.0~rc24.1/internal/exporter/varz_test.go 0000664 0000000 0000000 00000004170 13435446430 0021714 0 ustar 00root root 0000000 0000000 // Copyright 2015 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package exporter
import (
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/google/mtail/internal/metrics"
"github.com/google/mtail/internal/metrics/datum"
"github.com/google/mtail/internal/testutil"
)
var handleVarzTests = []struct {
name string
metrics []*metrics.Metric
expected string
}{
{"empty",
[]*metrics.Metric{},
"",
},
{"single",
[]*metrics.Metric{
{
Name: "foo",
Program: "test",
Kind: metrics.Counter,
LabelValues: []*metrics.LabelValue{{Labels: []string{}, Value: datum.MakeInt(1, time.Unix(1397586900, 0))}},
},
},
`foo{prog=test,instance=gunstar} 1
`,
},
{"dimensioned",
[]*metrics.Metric{
{
Name: "foo",
Program: "test",
Kind: metrics.Counter,
Keys: []string{"a", "b"},
LabelValues: []*metrics.LabelValue{{Labels: []string{"1", "2"}, Value: datum.MakeInt(1, time.Unix(1397586900, 0))}},
},
},
`foo{a=1,b=2,prog=test,instance=gunstar} 1
`,
},
{"text",
[]*metrics.Metric{
{
Name: "foo",
Program: "test",
Kind: metrics.Text,
LabelValues: []*metrics.LabelValue{{Labels: []string{}, Value: datum.MakeString("hi", time.Unix(1397586900, 0))}},
},
},
`foo{prog=test,instance=gunstar} hi
`,
},
}
func TestHandleVarz(t *testing.T) {
for _, tc := range handleVarzTests {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
ms := metrics.NewStore()
for _, metric := range tc.metrics {
testutil.FatalIfErr(t, ms.Add(metric))
}
e, err := New(ms, Hostname("gunstar"))
if err != nil {
t.Fatalf("couldn't make exporter: %s", err)
}
response := httptest.NewRecorder()
e.HandleVarz(response, &http.Request{})
if response.Code != 200 {
t.Errorf("response code not 200: %d", response.Code)
}
b, err := ioutil.ReadAll(response.Body)
if err != nil {
t.Errorf("failed to read response: %s", err)
}
diff := testutil.Diff(tc.expected, string(b))
if diff != "" {
t.Error(diff)
}
})
}
}
mtail-3.0.0~rc24.1/internal/logline/ 0000775 0000000 0000000 00000000000 13435446430 0017123 5 ustar 00root root 0000000 0000000 mtail-3.0.0~rc24.1/internal/logline/logline.go 0000664 0000000 0000000 00000000756 13435446430 0021113 0 ustar 00root root 0000000 0000000 // Copyright 2017 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package logline
// LogLine contains all the information about a line just read from a log.
type LogLine struct {
Filename string // The log filename that this line was read from
Line string // The text of the log line itself up to the newline.
}
// NewLogLine creates a new LogLine object.
func NewLogLine(filename string, line string) *LogLine {
return &LogLine{filename, line}
}
mtail-3.0.0~rc24.1/internal/metrics/ 0000775 0000000 0000000 00000000000 13435446430 0017140 5 ustar 00root root 0000000 0000000 mtail-3.0.0~rc24.1/internal/metrics/datum/ 0000775 0000000 0000000 00000000000 13435446430 0020252 5 ustar 00root root 0000000 0000000 mtail-3.0.0~rc24.1/internal/metrics/datum/datum.go 0000664 0000000 0000000 00000010736 13435446430 0021722 0 ustar 00root root 0000000 0000000 // Copyright 2017 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package datum
import (
"fmt"
"sync/atomic"
"time"
)
// Type describes the type of value stored in a Datum.
type Type int
const (
// Int describes an integer datum
Int Type = iota
// Float describes a floating point datum
Float
// String describes printable strings of text
String
)
func (t Type) String() string {
switch t {
case Int:
return "Int"
case Float:
return "Float"
case String:
return "String"
}
return "?"
}
// Datum is an interface for metric datums, with a type, value and timestamp to be exported.
type Datum interface {
// Type returns the Datum type.
Type() Type
// ValueString returns the value of a Datum as a string.
ValueString() string
// TimeString returns the timestamp of a Datum as a string.
TimeString() string
// Time returns the timestamp of the Datum as time.Time in UTC
TimeUTC() time.Time
}
// BaseDatum is a struct used to record timestamps across all Datum implementations.
type BaseDatum struct {
Time int64 // nanoseconds since unix epoch
}
var zeroTime time.Time
func (d *BaseDatum) stamp(timestamp time.Time) {
if timestamp.IsZero() {
atomic.StoreInt64(&d.Time, time.Now().UTC().UnixNano())
} else {
atomic.StoreInt64(&d.Time, timestamp.UnixNano())
}
}
// TimeString returns the timestamp of this Datum as a string.
func (d *BaseDatum) TimeString() string {
return fmt.Sprintf("%d", atomic.LoadInt64(&d.Time)/1e9)
}
func (d *BaseDatum) TimeUTC() time.Time {
t_nsec := atomic.LoadInt64(&d.Time)
return time.Unix(t_nsec/1e9, t_nsec%1e9)
}
// NewInt creates a new zero integer datum.
func NewInt() Datum {
return MakeInt(0, zeroTime)
}
// NewFloat creates a new zero floating-point datum.
func NewFloat() Datum {
return MakeFloat(0., zeroTime)
}
// NewString creates a new zero string datum.
func NewString() Datum {
return MakeString("", zeroTime)
}
// MakeInt creates a new integer datum with the provided value and timestamp.
func MakeInt(v int64, ts time.Time) Datum {
d := &IntDatum{}
d.Set(v, ts)
return d
}
// MakeFloat creates a new floating-point datum with the provided value and timestamp.
func MakeFloat(v float64, ts time.Time) Datum {
d := &FloatDatum{}
d.Set(v, ts)
return d
}
// MakeString creates a new string datum with the provided value and timestamp
func MakeString(v string, ts time.Time) Datum {
d := &StringDatum{}
d.Set(v, ts)
return d
}
// GetInt returns the integer value of a datum, or error.
func GetInt(d Datum) int64 {
switch d := d.(type) {
case *IntDatum:
return d.Get()
default:
panic(fmt.Sprintf("datum %v is not an Int", d))
}
}
// GetFloat returns the floating-point value of a datum, or error.
func GetFloat(d Datum) float64 {
switch d := d.(type) {
case *FloatDatum:
return d.Get()
default:
panic(fmt.Sprintf("datum %v is not a Float", d))
}
}
// GetString returns the string of a datum, or error.
func GetString(d Datum) string {
switch d := d.(type) {
case *StringDatum:
return d.Get()
default:
panic(fmt.Sprintf("datum %v is not a String", d))
}
}
// SetInt sets an integer datum to the provided value and timestamp, or panics if the Datum is not an IntDatum.
func SetInt(d Datum, v int64, ts time.Time) {
switch d := d.(type) {
case *IntDatum:
d.Set(v, ts)
default:
panic(fmt.Sprintf("datum %v is not an Int", d))
}
}
// SetFloat sets a floating-point Datum to the provided value and timestamp, or panics if the Datum is not a FloatDatum.
func SetFloat(d Datum, v float64, ts time.Time) {
switch d := d.(type) {
case *FloatDatum:
d.Set(v, ts)
default:
panic(fmt.Sprintf("datum %v is not a Float", d))
}
}
// SetString sets a string Datum to the provided value and timestamp, or panics if the Datym is not a String Datum
func SetString(d Datum, v string, ts time.Time) {
switch d := d.(type) {
case *StringDatum:
d.Set(v, ts)
default:
panic(fmt.Sprintf("datum %v is not a String", d))
}
}
// IncIntBy increments an integer Datum by the provided value, at time ts, or panics if the Datum is not an IntDatum.
func IncIntBy(d Datum, v int64, ts time.Time) {
switch d := d.(type) {
case *IntDatum:
d.IncBy(v, ts)
default:
panic(fmt.Sprintf("datum %v is not an Int", d))
}
}
// DecIntBy increments an integer Datum by the provided value, at time ts, or panics if the Datum is not an IntDatum.
func DecIntBy(d Datum, v int64, ts time.Time) {
switch d := d.(type) {
case *IntDatum:
d.DecBy(v, ts)
default:
panic(fmt.Sprintf("datum %v is not an Int", d))
}
}
mtail-3.0.0~rc24.1/internal/metrics/datum/datum_test.go 0000664 0000000 0000000 00000002661 13435446430 0022757 0 ustar 00root root 0000000 0000000 // Copyright 2017 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package datum
import (
"encoding/json"
"testing"
"time"
"github.com/google/mtail/internal/testutil"
)
func TestDatumSetAndValue(t *testing.T) {
d := MakeInt(12, time.Unix(37, 42))
if r := GetInt(d); r != 12 {
t.Errorf("d ditn't return 12, got %v", r)
}
if r := d.ValueString(); r != "12" {
t.Errorf("d value is not 12, got %v", r)
}
if r := d.TimeString(); r != "37" {
t.Errorf("d Time not correct, got %v", r)
}
d = MakeFloat(1.2, time.Unix(37, 42))
if r := GetFloat(d); r != 1.2 {
t.Errorf("d ditn't return 12, got %v", r)
}
if r := d.ValueString(); r != "1.2" {
t.Errorf("d value is not 12, got %v", r)
}
if r := d.TimeString(); r != "37" {
t.Errorf("d Time not correct, got %v", r)
}
}
var datumJSONTests = []struct {
datum Datum
expected string
}{
{
MakeInt(37, time.Unix(42, 12)),
`{"Value":37,"Time":42000000012}`,
},
{
MakeFloat(37.1, time.Unix(42, 12)),
`{"Value":37.1,"Time":42000000012}`,
},
}
func TestMarshalJSON(t *testing.T) {
// This is not a round trip test because only the LabelValue knows how to unmarshal a Datum.
for i, tc := range datumJSONTests {
b, err := json.Marshal(tc.datum)
if err != nil {
t.Errorf("%d: Marshal failed: %v", i, err)
}
if diff := testutil.Diff(tc.expected, string(b)); diff != "" {
t.Errorf("%d: JSON didn't match:\n%s", i, diff)
}
}
}
mtail-3.0.0~rc24.1/internal/metrics/datum/float.go 0000664 0000000 0000000 00000002353 13435446430 0021711 0 ustar 00root root 0000000 0000000 // Copyright 2017 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package datum
import (
"encoding/json"
"fmt"
"math"
"sync/atomic"
"time"
)
// FloatDatum describes a floating point value at a given timestamp.
type FloatDatum struct {
BaseDatum
Valuebits uint64
}
// Type returns the Type of this Datum.
func (*FloatDatum) Type() Type { return Float }
// ValueString returns the value of the FloatDatum as a string.
func (d *FloatDatum) ValueString() string {
return fmt.Sprintf("%g", d.Get())
}
// Set sets value of the FloatDatum at the timestamp ts.
func (d *FloatDatum) Set(v float64, ts time.Time) {
atomic.StoreUint64(&d.Valuebits, math.Float64bits(v))
d.stamp(ts)
}
// Get returns the floating-point value.
func (d *FloatDatum) Get() float64 {
return math.Float64frombits(atomic.LoadUint64(&d.Valuebits))
}
// String returns a string representation of the FloatDatum.
func (d *FloatDatum) String() string {
return fmt.Sprintf("%g@%d", d.Get(), atomic.LoadInt64(&d.Time))
}
// MarshalJSON returns a JSON encoding of the FloatDatum.
func (d *FloatDatum) MarshalJSON() ([]byte, error) {
j := struct {
Value float64
Time int64
}{d.Get(), atomic.LoadInt64(&d.Time)}
return json.Marshal(j)
}
mtail-3.0.0~rc24.1/internal/metrics/datum/int.go 0000664 0000000 0000000 00000003133 13435446430 0021373 0 ustar 00root root 0000000 0000000 // Copyright 2017 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package datum
import (
"encoding/json"
"fmt"
"sync/atomic"
"time"
)
// IntDatum describes an integer value at a given timestamp.
type IntDatum struct {
BaseDatum
Value int64
}
// Type returns the Type of an IntDatum, Int.
func (*IntDatum) Type() Type { return Int }
// Set sets the value of the IntDatum to the value at timestamp.
func (d *IntDatum) Set(value int64, timestamp time.Time) {
atomic.StoreInt64(&d.Value, value)
d.stamp(timestamp)
}
// IncBy increments the IntDatum's value by the value provided, at timestamp.
func (d *IntDatum) IncBy(delta int64, timestamp time.Time) {
atomic.AddInt64(&d.Value, delta)
d.stamp(timestamp)
}
// DecBy increments the IntDatum's value by the value provided, at timestamp.
func (d *IntDatum) DecBy(delta int64, timestamp time.Time) {
atomic.AddInt64(&d.Value, -delta)
d.stamp(timestamp)
}
// Get returns the value of the IntDatum
func (d *IntDatum) Get() int64 {
return atomic.LoadInt64(&d.Value)
}
// String returns a string representation of the IntDatum.
func (d *IntDatum) String() string {
return fmt.Sprintf("%d@%d", atomic.LoadInt64(&d.Value), atomic.LoadInt64(&d.Time))
}
// ValueString returns the value of the IntDatum as a string.
func (d *IntDatum) ValueString() string {
return fmt.Sprintf("%d", atomic.LoadInt64(&d.Value))
}
// MarshalJSON returns a JSON encoding of the IntDatum.
func (d *IntDatum) MarshalJSON() ([]byte, error) {
j := struct {
Value int64
Time int64
}{d.Get(), atomic.LoadInt64(&d.Time)}
return json.Marshal(j)
}
mtail-3.0.0~rc24.1/internal/metrics/datum/int_test.go 0000664 0000000 0000000 00000001255 13435446430 0022435 0 ustar 00root root 0000000 0000000 // Copyright 2017 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package datum
import (
"testing"
"time"
)
func BenchmarkIncrementScalarInt(b *testing.B) {
d := &IntDatum{}
ts := time.Now().UTC()
for i := 0; i < b.N; i++ {
d.IncBy(1, ts)
}
}
func BenchmarkDecrementScalarInt(b *testing.B) {
d := &IntDatum{}
ts := time.Now().UTC()
for i := 0; i < b.N; i++ {
d.DecBy(1, ts)
}
}
func TestDecrementScalarInt(t *testing.T) {
d := &IntDatum{}
ts := time.Now().UTC()
d.IncBy(1, ts)
r := d.Get()
if r != 1 {
t.Errorf("expected 1, got %d", r)
}
d.DecBy(1, ts)
r = d.Get()
if r != 0 {
t.Errorf("expected 0, got %d", r)
}
}
mtail-3.0.0~rc24.1/internal/metrics/datum/string.go 0000664 0000000 0000000 00000002401 13435446430 0022104 0 ustar 00root root 0000000 0000000 // Copyright 2018 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package datum
import (
"encoding/json"
"fmt"
"sync"
"sync/atomic"
"time"
)
// StringDatum describes a string value at a given timestamp.
type StringDatum struct {
BaseDatum
mu sync.RWMutex
Value string
}
// Type returns the Type of an StringDatum, String.
func (*StringDatum) Type() Type { return String }
// Set sets the value of the StringDatum to the value at timestamp.
func (d *StringDatum) Set(value string, timestamp time.Time) {
d.mu.Lock()
d.Value = value
d.stamp(timestamp)
d.mu.Unlock()
}
// Get returns the value of the StringDatum
func (d *StringDatum) Get() string {
d.mu.RLock()
defer d.mu.RUnlock()
return d.Value
}
// String returns a string representation of the StringDatum.
func (d *StringDatum) String() string {
return fmt.Sprintf("%q@%d", d.Get(), atomic.LoadInt64(&d.Time))
}
// ValueString returns the value of the StringDatum as a string.
func (d *StringDatum) ValueString() string {
return d.Get()
}
// MarshalJSON returns a JSON encoding of the StringDatum.
func (d *StringDatum) MarshalJSON() ([]byte, error) {
j := struct {
Value string
Time int64
}{d.Get(), atomic.LoadInt64(&d.Time)}
return json.Marshal(j)
}
mtail-3.0.0~rc24.1/internal/metrics/metric.go 0000664 0000000 0000000 00000015165 13435446430 0020762 0 ustar 00root root 0000000 0000000 // Copyright 2011 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
// Package metrics provides storage for metrics being recorded by mtail
// programs.
package metrics
import (
"encoding/json"
"fmt"
"sync"
"time"
"github.com/google/mtail/internal/metrics/datum"
"github.com/pkg/errors"
)
// Kind enumerates the types of metrics supported.
type Kind int
const (
_ Kind = iota
// Counter is a monotonically nondecreasing metric.
Counter
// Gauge is a Kind that can take on any value, and may be set
// discontinuously from its previous value.
Gauge
// Timer is a specialisation of Gauge that can be used to store time
// intervals, such as latency and durations. It enables certain behaviour
// in exporters that handle time intervals such as StatsD.
Timer
// Text is a special metric type for free text, usually for operating as a 'hidden' metric, as often these values cannot be exported.
Text
)
const (
// Int indicates this metric is an integer metric type.
Int = datum.Int
// Float indicates this metric is a floating-point metric type.
Float = datum.Float
// String indicates this metric contains string values
String = datum.String
)
func (m Kind) String() string {
switch m {
case Counter:
return "Counter"
case Gauge:
return "Gauge"
case Timer:
return "Timer"
case Text:
return "Text"
}
return "Unknown"
}
// LabelValue is an object that names a Datum value with a list of label
// strings.
type LabelValue struct {
Labels []string `json:",omitempty"`
Value datum.Datum
// After this time of inactivity, the LabelValue is removed from the metric.
Expiry time.Duration `json:",omitempty"`
}
func (lv *LabelValue) String() string {
return fmt.Sprintf("LabelValue: %s %s", lv.Labels, lv.Value)
}
// Metric is an object that describes a metric, with its name, the creator and
// owner program name, its Kind, a sequence of Keys that may be used to
// add dimension to the metric, and a list of LabelValues that contain data for
// labels in each dimension of the Keys.
type Metric struct {
sync.RWMutex
Name string // Name
Program string // Instantiating program
Kind Kind
Type datum.Type
Hidden bool `json:",omitempty"`
Keys []string `json:",omitempty"`
LabelValues []*LabelValue `json:",omitempty"`
Source string `json:"-"`
}
// NewMetric returns a new empty metric of dimension len(keys).
func NewMetric(name string, prog string, kind Kind, typ datum.Type, keys ...string) *Metric {
m := newMetric(len(keys))
m.Name = name
m.Program = prog
m.Kind = kind
m.Type = typ
copy(m.Keys, keys)
return m
}
// newMetric returns a new empty Metric
func newMetric(len int) *Metric {
return &Metric{Keys: make([]string, len),
LabelValues: make([]*LabelValue, 0)}
}
func (m *Metric) FindLabelValueOrNil(labelvalues []string) *LabelValue {
Loop:
for i, lv := range m.LabelValues {
for j := 0; j < len(lv.Labels); j++ {
if lv.Labels[j] != labelvalues[j] {
continue Loop
}
}
return m.LabelValues[i]
}
return nil
}
// GetDatum returns the datum named by a sequence of string label values from a
// Metric. If the sequence of label values does not yet exist, it is created.
func (m *Metric) GetDatum(labelvalues ...string) (d datum.Datum, err error) {
if len(labelvalues) != len(m.Keys) {
return nil, errors.Errorf("Label values requested (%q) not same length as keys for metric %v", labelvalues, m)
}
m.Lock()
defer m.Unlock()
if lv := m.FindLabelValueOrNil(labelvalues); lv != nil {
d = lv.Value
} else {
switch m.Type {
case datum.Int:
d = datum.NewInt()
case datum.Float:
d = datum.NewFloat()
case datum.String:
d = datum.NewString()
}
m.LabelValues = append(m.LabelValues, &LabelValue{Labels: labelvalues, Value: d})
}
return d, nil
}
// RemoveDatum removes the Datum described by labelvalues from the Metric m.
func (m *Metric) RemoveDatum(labelvalues ...string) error {
if len(labelvalues) != len(m.Keys) {
return errors.Errorf("Label values requested (%q) not same length as keys for metric %v", labelvalues, m)
}
m.Lock()
defer m.Unlock()
Loop:
for i, lv := range m.LabelValues {
for j := 0; j < len(lv.Labels); j++ {
if lv.Labels[j] != labelvalues[j] {
continue Loop
}
}
// remove from the slice
m.LabelValues = append(m.LabelValues[:i], m.LabelValues[i+1:]...)
}
return nil
}
func (m *Metric) ExpireDatum(expiry time.Duration, labelvalues ...string) error {
if len(labelvalues) != len(m.Keys) {
return errors.Errorf("Label values requested (%q) not same length as keys for metric %v", labelvalues, m)
}
m.Lock()
defer m.Unlock()
if lv := m.FindLabelValueOrNil(labelvalues); lv != nil {
lv.Expiry = expiry
return nil
}
return errors.Errorf("No datum for given labelvalues %q", labelvalues)
}
// LabelSet is an object that maps the keys of a Metric to the labels naming a
// Datum, for use when enumerating Datums from a Metric.
type LabelSet struct {
Labels map[string]string
Datum datum.Datum
}
func zip(keys []string, values []string) map[string]string {
r := make(map[string]string)
for i, v := range values {
r[keys[i]] = v
}
return r
}
// EmitLabelSets enumerates the LabelSets corresponding to the LabelValues of a
// Metric. It emits them onto the provided channel, then closes the channel to
// signal completion.
func (m *Metric) EmitLabelSets(c chan *LabelSet) {
for _, lv := range m.LabelValues {
ls := &LabelSet{zip(m.Keys, lv.Labels), lv.Value}
c <- ls
}
close(c)
}
// UnmarshalJSON converts a JSON byte string into a LabelValue
func (lv *LabelValue) UnmarshalJSON(b []byte) error {
var obj map[string]*json.RawMessage
err := json.Unmarshal(b, &obj)
if err != nil {
return err
}
labels := make([]string, 0)
if _, ok := obj["Labels"]; ok {
err = json.Unmarshal(*obj["Labels"], &labels)
if err != nil {
return err
}
}
lv.Labels = labels
var valObj map[string]*json.RawMessage
err = json.Unmarshal(*obj["Value"], &valObj)
if err != nil {
return err
}
var t int64
err = json.Unmarshal(*valObj["Time"], &t)
if err != nil {
return err
}
var i int64
err = json.Unmarshal(*valObj["Value"], &i)
if err != nil {
return err
}
lv.Value = datum.MakeInt(i, time.Unix(t/1e9, t%1e9))
return nil
}
func (m *Metric) String() string {
m.RLock()
defer m.RUnlock()
return fmt.Sprintf("Metric: name=%s program=%s kind=%v type=%s hidden=%v keys=%v labelvalues=%v source=%s", m.Name, m.Program, m.Kind, m.Type, m.Hidden, m.Keys, m.LabelValues, m.Source)
}
// SetSource sets the source of a metric, describing where in user programmes it was defined.
func (m *Metric) SetSource(source string) {
m.Lock()
defer m.Unlock()
m.Source = source
}
mtail-3.0.0~rc24.1/internal/metrics/metric_test.go 0000664 0000000 0000000 00000013652 13435446430 0022020 0 ustar 00root root 0000000 0000000 // Copyright 2011 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package metrics
import (
"encoding/json"
"fmt"
"math/rand"
"reflect"
"sync"
"testing"
"testing/quick"
"time"
"github.com/google/mtail/internal/metrics/datum"
"github.com/google/mtail/internal/testutil"
)
func TestKindType(t *testing.T) {
v := Kind(0)
if s := v.String(); s != "Unknown" {
t.Errorf("Kind.String() returned %q not Unknown", s)
}
v = Counter
if s := v.String(); s != "Counter" {
t.Errorf("Kind.String() returned %q not Counter", s)
}
v = Gauge
if s := v.String(); s != "Gauge" {
t.Errorf("Kind.String() returned %q not Gauge", s)
}
v = Timer
if s := v.String(); s != "Timer" {
t.Errorf("Kind.String() returned %q not Timer", s)
}
}
func TestScalarMetric(t *testing.T) {
v := NewMetric("test", "prog", Counter, Int)
d, err := v.GetDatum()
if err != nil {
t.Errorf("no datum: %s", err)
}
datum.IncIntBy(d, 1, time.Now().UTC())
lv := v.FindLabelValueOrNil([]string{})
if lv == nil {
t.Errorf("couldn't find labelvalue")
}
newD := lv.Value
if newD == nil {
t.Errorf("new_d is nil")
}
if newD.ValueString() != "1" {
t.Errorf("value not 1")
}
d2, err := v.GetDatum("a", "b")
if err == nil {
t.Errorf("datum with keys sohuld have returned no value, got %v", d2)
}
}
func TestDimensionedMetric(t *testing.T) {
v := NewMetric("test", "prog", Counter, Int, "foo")
d, _ := v.GetDatum("a")
datum.IncIntBy(d, 1, time.Now().UTC())
if v.FindLabelValueOrNil([]string{"a"}).Value.ValueString() != "1" {
t.Errorf("fail")
}
v = NewMetric("test", "prog", Counter, Int, "foo", "bar")
d, _ = v.GetDatum("a", "b")
datum.IncIntBy(d, 1, time.Now().UTC())
if v.FindLabelValueOrNil([]string{"a", "b"}).Value.ValueString() != "1" {
t.Errorf("fail")
}
v = NewMetric("test", "prog", Counter, Int, "foo", "bar", "quux")
d, _ = v.GetDatum("a", "b", "c")
datum.IncIntBy(d, 1, time.Now().UTC())
if v.FindLabelValueOrNil([]string{"a", "b", "c"}).Value.ValueString() != "1" {
t.Errorf("fail")
}
}
var labelSetTests = []struct {
values []string
expectedLabels map[string]string
}{
{
[]string{"a", "b", "c"},
map[string]string{"foo": "a", "bar": "b", "quux": "c"},
},
{
[]string{"a", "b", "d"},
map[string]string{"foo": "a", "bar": "b", "quux": "d"},
},
}
func TestEmitLabelSet(t *testing.T) {
ts := time.Now().UTC()
for _, tc := range labelSetTests {
tc := tc
t.Run(fmt.Sprintf("%v", tc.values), func(t *testing.T) {
t.Parallel()
m := NewMetric("test", "prog", Gauge, Int, "foo", "bar", "quux")
d, _ := m.GetDatum(tc.values...)
datum.SetInt(d, 37, ts)
c := make(chan *LabelSet)
go m.EmitLabelSets(c)
ls := <-c
diff := testutil.Diff(tc.expectedLabels, ls.Labels)
if diff != "" {
t.Error(diff)
}
})
}
}
func TestFindLabelValueOrNil(t *testing.T) {
m0 := NewMetric("foo", "prog", Counter, Int)
if r0 := m0.FindLabelValueOrNil([]string{}); r0 != nil {
t.Errorf("m0 should be nil: %v", r0)
}
d, err := m0.GetDatum()
if err != nil {
t.Errorf("Bad datum %v: %v\n", d, err)
}
if r1 := m0.FindLabelValueOrNil([]string{}); r1 == nil {
t.Errorf("m0 should not be nil: %v", r1)
}
m1 := NewMetric("bar", "prog", Counter, Int, "a")
d1, err1 := m1.GetDatum("1")
if err1 != nil {
t.Errorf("err1 %v: %v\n", d1, err1)
}
if r2 := m1.FindLabelValueOrNil([]string{"0"}); r2 != nil {
t.Errorf("r2 should be nil")
}
if r3 := m1.FindLabelValueOrNil([]string{"1"}); r3 == nil {
t.Errorf("r3 should be non nil")
}
}
func timeGenerator(rand *rand.Rand) time.Time {
months := []time.Month{
time.January, time.February, time.March,
time.April, time.May, time.June,
time.July, time.August, time.September,
time.October, time.November, time.December,
}
return time.Date(
rand.Intn(9999),
months[rand.Intn(len(months))],
rand.Intn(31),
rand.Intn(24),
rand.Intn(60),
rand.Intn(60),
int(rand.Int31()),
time.UTC,
)
}
func TestMetricJSONRoundTrip(t *testing.T) {
rand := rand.New(rand.NewSource(0))
f := func(name, prog string, kind Kind, keys []string, val, ti, tns int64) bool {
m := NewMetric(name, prog, kind, Int, keys...)
labels := make([]string, 0)
for range keys {
if l, ok := quick.Value(reflect.TypeOf(name), rand); ok {
labels = append(labels, l.String())
} else {
t.Errorf("failed to create value for labels")
break
}
}
d, _ := m.GetDatum(labels...)
datum.SetInt(d, val, timeGenerator(rand))
j, e := json.Marshal(m)
if e != nil {
t.Errorf("json.Marshal failed: %s\n", e)
return false
}
r := newMetric(0)
e = json.Unmarshal(j, &r)
if e != nil {
t.Errorf("json.Unmarshal failed: %s\n", e)
return false
}
if diff := testutil.Diff(m, r, testutil.IgnoreUnexported(sync.RWMutex{})); diff != "" {
t.Errorf("Round trip wasn't stable:\n%s", diff)
return false
}
return true
}
if err := quick.Check(f, nil); err != nil {
t.Error(err)
}
}
func TestTimer(t *testing.T) {
m := NewMetric("test", "prog", Timer, Int)
n := NewMetric("test", "prog", Timer, Int)
diff := testutil.Diff(m, n, testutil.IgnoreUnexported(sync.RWMutex{}))
if diff != "" {
t.Errorf("Identical metrics not the same:\n%s", diff)
}
d, _ := m.GetDatum()
datum.IncIntBy(d, 1, time.Now().UTC())
lv := m.FindLabelValueOrNil([]string{})
if lv == nil {
t.Errorf("couldn't find labelvalue")
}
newD := lv.Value
if newD == nil {
t.Errorf("new_d is nil")
}
if newD.ValueString() != "1" {
t.Errorf("value not 1")
}
}
func TestRemoveMetricLabelValue(t *testing.T) {
m := NewMetric("test", "prog", Counter, Int, "a", "b", "c")
_, e := m.GetDatum("a", "a", "a")
if e != nil {
t.Errorf("Getdatum failed: %s", e)
}
lv := m.FindLabelValueOrNil([]string{"a", "a", "a"})
if lv == nil {
t.Errorf("coidln't find labelvalue")
}
e = m.RemoveDatum("a", "a", "a")
if e != nil {
t.Errorf("couldn't remove datum: %s", e)
}
lv = m.FindLabelValueOrNil([]string{"a", "a", "a"})
if lv != nil {
t.Errorf("label value still exists")
}
}
mtail-3.0.0~rc24.1/internal/metrics/store.go 0000664 0000000 0000000 00000006565 13435446430 0020637 0 ustar 00root root 0000000 0000000 // Copyright 2011 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package metrics
import (
"encoding/json"
"reflect"
"sync"
"time"
"github.com/golang/glog"
"github.com/pkg/errors"
)
// Store contains Metrics.
type Store struct {
sync.RWMutex
Metrics map[string][]*Metric
}
// NewStore returns a new metric Store.
func NewStore() (s *Store) {
s = &Store{}
s.ClearMetrics()
return
}
// Add is used to add one metric to the Store.
func (s *Store) Add(m *Metric) error {
s.Lock()
defer s.Unlock()
glog.V(1).Infof("Adding a new metric %v", m)
dupeIndex := -1
if len(s.Metrics[m.Name]) > 0 {
t := s.Metrics[m.Name][0].Kind
if m.Kind != t {
return errors.Errorf("Metric %s has different kind %v to existing %v.", m.Name, m.Kind, t)
}
// To avoid duplicate metrics:
// - copy old LabelValues into new metric;
// - discard old metric.
for i, v := range s.Metrics[m.Name] {
//
if v.Program != m.Program {
continue
}
if v.Type != m.Type {
continue
}
if v.Source != m.Source {
continue
}
dupeIndex = i
glog.V(2).Infof("v keys: %v m.keys: %v", v.Keys, m.Keys)
// If a set of label keys has changed, discard
// old metric completely, w/o even copying old
// data, as they are now incompatible.
if len(v.Keys) != len(m.Keys) || !reflect.DeepEqual(v.Keys, m.Keys) {
break
}
// Otherwise, copy everything into the new metric
glog.V(2).Infof("Found duped metric: %d", dupeIndex)
for j, oldLabel := range v.LabelValues {
glog.V(2).Infof("Labels: %d %s", j, oldLabel.Labels)
d, err := v.GetDatum(oldLabel.Labels...)
if err == nil {
if err = m.RemoveDatum(oldLabel.Labels...); err == nil {
m.LabelValues = append(m.LabelValues, &LabelValue{Labels: oldLabel.Labels, Value: d})
}
}
}
}
}
s.Metrics[m.Name] = append(s.Metrics[m.Name], m)
if dupeIndex >= 0 {
s.Metrics[m.Name] = append(s.Metrics[m.Name][0:dupeIndex], s.Metrics[m.Name][dupeIndex+1:]...)
}
return nil
}
// ClearMetrics empties the store of all metrics.
func (s *Store) ClearMetrics() {
s.Lock()
defer s.Unlock()
s.Metrics = make(map[string][]*Metric)
}
// MarshalJSON returns a JSON byte string representing the Store.
func (s *Store) MarshalJSON() (b []byte, err error) {
s.Lock()
defer s.Unlock()
ms := make([]*Metric, 0)
for _, ml := range s.Metrics {
ms = append(ms, ml...)
}
return json.Marshal(ms)
}
// Gc iterates through the Store looking for metrics that have been marked
// for expiry, and removing them if their expiration time has passed.
func (s *Store) Gc() error {
glog.Info("Running Store.Expire()")
s.Lock()
defer s.Unlock()
now := time.Now()
for _, ml := range s.Metrics {
for _, m := range ml {
for _, lv := range m.LabelValues {
if lv.Expiry <= 0 {
continue
}
if now.Sub(lv.Value.TimeUTC()) > lv.Expiry {
err := m.RemoveDatum(lv.Labels...)
if err != nil {
return err
}
}
}
}
}
return nil
}
// StartGcLoop runs a permanent goroutine to expire metrics every duration.
func (s *Store) StartGcLoop(duration time.Duration) {
if duration <= 0 {
glog.Infof("Metric store expiration disabled")
return
}
go func() {
glog.Infof("Starting metric store expiry loop every %s", duration.String())
ticker := time.NewTicker(duration)
for range ticker.C {
if err := s.Gc(); err != nil {
glog.Info(err)
}
}
}()
}
mtail-3.0.0~rc24.1/internal/metrics/store_test.go 0000664 0000000 0000000 00000007260 13435446430 0021667 0 ustar 00root root 0000000 0000000 // Copyright 2017 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package metrics
import (
"testing"
"time"
"github.com/golang/glog"
"github.com/google/mtail/internal/metrics/datum"
"github.com/google/mtail/internal/testutil"
)
func TestMatchingKind(t *testing.T) {
s := NewStore()
m1 := NewMetric("foo", "prog", Counter, Int)
err := s.Add(m1)
if err != nil {
t.Fatalf("should be nil: %s", err)
}
m2 := NewMetric("foo", "prog1", Gauge, Int)
err = s.Add(m2)
if err == nil {
t.Fatal("should be err")
}
}
func TestDuplicateMetric(t *testing.T) {
expectedMetrics := 0
s := NewStore()
_ = s.Add(NewMetric("foo", "prog", Counter, Int, "user", "host"))
_ = s.Add(NewMetric("foo", "prog", Counter, Int))
expectedMetrics++
if len(s.Metrics["foo"]) != expectedMetrics {
t.Fatalf("should not add duplicate metric. Store: %v", s)
}
_ = s.Add(NewMetric("foo", "prog", Counter, Float))
glog.Infof("Store: %v", s)
expectedMetrics++
if len(s.Metrics["foo"]) != expectedMetrics {
t.Fatalf("should add metric of a different type: %v", s)
}
_ = s.Add(NewMetric("foo", "prog", Counter, Int, "user", "host", "zone", "domain"))
glog.Infof("Store: %v", s)
if len(s.Metrics["foo"]) != expectedMetrics {
t.Fatalf("should not add duplicate metric, but replace the old one. Store: %v", s)
}
_ = s.Add(NewMetric("foo", "prog1", Counter, Int))
glog.Infof("Store: %v", s)
expectedMetrics++
if len(s.Metrics["foo"]) != expectedMetrics {
t.Fatalf("should add metric with a different prog: %v", s)
}
_ = s.Add(NewMetric("foo", "prog1", Counter, Float))
glog.Infof("Store: %v", s)
expectedMetrics++
if len(s.Metrics["foo"]) != expectedMetrics {
t.Fatalf("should add metric of a different type: %v", s)
}
}
/* A program can add a metric with the same name and
of different type.
Prometheus behavior in this case is undefined.
@see https://github.com/google/mtail/issues/130
*/
func TestAddMetricDifferentType(t *testing.T) {
expected := 2
s := NewStore()
err := s.Add(NewMetric("foo", "prog", Counter, Int))
if err != nil {
t.Fatalf("should be nil: %s", err)
}
// Duplicate metric of different type from *the same program
err = s.Add(NewMetric("foo", "prog", Counter, Float))
if err != nil {
t.Fatalf("should add a new metric to the store: %s. Store: %v", err, s.Metrics)
}
if len(s.Metrics["foo"]) != expected {
t.Fatalf("should have %d metrics of different Type: %v", expected, s.Metrics)
}
// Duplicate metric of different type from a different program
err = s.Add(NewMetric("foo", "prog1", Counter, Float))
expected++
if err != nil {
t.Fatalf("should add a new metric to the store: %s. Store: %v", err, s.Metrics)
}
if len(s.Metrics["foo"]) != expected {
t.Fatalf("should have %d metrics of different Type: %v", expected, s.Metrics)
}
}
func TestExpireMetric(t *testing.T) {
s := NewStore()
m := NewMetric("foo", "prog", Counter, Int, "a", "b", "c")
testutil.FatalIfErr(t, s.Add(m))
d, err := m.GetDatum("1", "2", "3")
if err != nil {
t.Error(err)
}
datum.SetInt(d, 1, time.Now().Add(-time.Hour))
lv := m.FindLabelValueOrNil([]string{"1", "2", "3"})
if lv == nil {
t.Errorf("couldn't find lv")
}
lv.Expiry = time.Minute
d, err = m.GetDatum("4", "5", "6")
if err != nil {
t.Error(err)
}
datum.SetInt(d, 1, time.Now().Add(-time.Hour))
lv = m.FindLabelValueOrNil([]string{"4", "5", "6"})
if lv == nil {
t.Errorf("couldn't find lv")
}
testutil.FatalIfErr(t, s.Gc())
lv = m.FindLabelValueOrNil([]string{"1", "2", "3"})
if lv != nil {
t.Errorf("lv not expired: %#v", lv)
t.Logf("Store: %#v", s)
}
lv = m.FindLabelValueOrNil([]string{"4", "5", "6"})
if lv == nil {
t.Errorf("lv expired")
t.Logf("Store: %#v", s)
}
}
mtail-3.0.0~rc24.1/internal/mtail/ 0000775 0000000 0000000 00000000000 13435446430 0016600 5 ustar 00root root 0000000 0000000 mtail-3.0.0~rc24.1/internal/mtail/basic_tail_integration_test.go 0000664 0000000 0000000 00000002572 13435446430 0024671 0 ustar 00root root 0000000 0000000 // Copyright 2019 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
// +build integration
package mtail_test
import (
"fmt"
"path"
"testing"
"time"
"github.com/google/mtail/internal/mtail"
"github.com/google/mtail/internal/testutil"
)
func TestBasicTail(t *testing.T) {
tests := []struct {
d time.Duration
b bool
}{
{0, false},
{10 * time.Millisecond, true},
}
if testing.Verbose() {
testutil.TestSetFlag(t, "vmodule", "tail=2,log_watcher=2")
}
for _, test := range tests {
t.Run(fmt.Sprintf("%s %v", test.d, test.b), func(t *testing.T) {
logDir, rmLogDir := testutil.TestTempDir(t)
defer rmLogDir()
m, stopM := mtail.TestStartServer(t, test.d, test.b, mtail.LogPathPatterns(logDir+"/*"), mtail.ProgramPath("../../examples/linecount.mtail"))
defer stopM()
startLineCount := mtail.TestGetMetric(t, m.Addr(), "line_count")
time.Sleep(1 * time.Second)
logFile := path.Join(logDir, "log")
f := testutil.TestOpenFile(t, logFile)
for i := 1; i <= 3; i++ {
testutil.WriteString(t, f, fmt.Sprintf("%d\n", i))
time.Sleep(1 * time.Second)
}
endLineCount := mtail.TestGetMetric(t, m.Addr(), "line_count")
lineCount := endLineCount.(float64) - startLineCount.(float64)
if lineCount != 3. {
t.Errorf("output didn't have expected line count increase: want 3 got %#v", lineCount)
}
})
}
}
mtail-3.0.0~rc24.1/internal/mtail/benchmarks_test.go 0000664 0000000 0000000 00000003157 13435446430 0022311 0 ustar 00root root 0000000 0000000 // Copyright 2016 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
// Only build with go1.7 or above because b.Run did not exist before.
// +build integration
package mtail_test
import (
"fmt"
"io"
"os"
"path"
"testing"
"github.com/google/mtail/internal/metrics"
"github.com/google/mtail/internal/mtail"
"github.com/google/mtail/internal/testutil"
"github.com/google/mtail/internal/watcher"
)
func BenchmarkProgram(b *testing.B) {
// exampleProgramTests live in ex_test.go
for _, bm := range exampleProgramTests {
bm := bm
b.Run(fmt.Sprintf("%s on %s", bm.programfile, bm.logfile), func(b *testing.B) {
b.ReportAllocs()
logDir, rmLogDir := testutil.TestTempDir(b)
defer rmLogDir()
logFile := path.Join(logDir, "test.log")
log := testutil.TestOpenFile(b, logFile)
w := watcher.NewFakeWatcher()
store := metrics.NewStore()
programFile := path.Join("../..", bm.programfile)
mtail, err := mtail.New(store, w, mtail.ProgramPath(programFile), mtail.LogPathPatterns(log.Name()))
if err != nil {
b.Fatalf("Failed to create mtail: %s", err)
}
err = mtail.StartTailing()
if err != nil {
b.Fatalf("starttailing failed: %s", err)
}
var total int64
b.ResetTimer()
for i := 0; i < b.N; i++ {
l, err := os.Open(bm.logfile)
if err != nil {
b.Fatalf("Couldn't open logfile: %s", err)
}
count, err := io.Copy(log, l)
if err != nil {
b.Fatalf("Write of test data failed to test file: %s", err)
}
total += count
w.InjectUpdate(log.Name())
}
mtail.Close()
b.StopTimer()
b.SetBytes(total)
})
}
}
mtail-3.0.0~rc24.1/internal/mtail/compile_only_integration_test.go 0000664 0000000 0000000 00000001677 13435446430 0025275 0 ustar 00root root 0000000 0000000 // Copyright 2019 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
// +build integration
package mtail_test
import (
"io/ioutil"
"path"
"strings"
"testing"
"github.com/google/mtail/internal/mtail"
"github.com/google/mtail/internal/testutil"
)
func TestBadProgramFailsCompilation(t *testing.T) {
progDir, rmProgDir := testutil.TestTempDir(t)
defer rmProgDir()
logDir, rmLogDir := testutil.TestTempDir(t)
defer rmLogDir()
err := ioutil.WriteFile(path.Join(progDir, "bad.mtail"), []byte("asdfasdf\n"), 0666)
if err != nil {
t.Fatal(err)
}
// Compile-only fails program compilation at server start, not after it's running.
_, err = mtail.TestMakeServer(t, 0, false, mtail.ProgramPath(progDir), mtail.LogPathPatterns(logDir), mtail.CompileOnly)
if err == nil {
t.Error("expected error from mtail")
}
if !strings.Contains(err.Error(), "compile failed") {
t.Error("compile failed not reported")
}
}
mtail-3.0.0~rc24.1/internal/mtail/examples_integration_test.go 0000664 0000000 0000000 00000011662 13435446430 0024415 0 ustar 00root root 0000000 0000000 // Copyright 2011 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
// +build integration
package mtail_test
import (
"fmt"
"os"
"path"
"path/filepath"
"sync"
"testing"
"github.com/google/mtail/internal/metrics"
"github.com/google/mtail/internal/metrics/datum"
"github.com/google/mtail/internal/mtail"
"github.com/google/mtail/internal/mtail/golden"
"github.com/google/mtail/internal/testutil"
"github.com/google/mtail/internal/watcher"
)
var exampleProgramTests = []struct {
programfile string // Example program file.
logfile string // Sample log input.
goldenfile string // Expected metrics after processing.
}{
{
"examples/rsyncd.mtail",
"testdata/rsyncd.log",
"testdata/rsyncd.golden",
},
{
"examples/sftp.mtail",
"testdata/sftp_chroot.log",
"testdata/sftp_chroot.golden",
},
{
"examples/dhcpd.mtail",
"testdata/anonymised_dhcpd_log",
"testdata/anonymised_dhcpd_log.golden",
},
{
"examples/ntpd.mtail",
"testdata/ntp4",
"testdata/ntp4.golden",
},
{
"examples/ntpd_peerstats.mtail",
"testdata/xntp3_peerstats",
"testdata/xntp3_peerstats.golden",
},
{
"examples/otherwise.mtail",
"testdata/otherwise.log",
"testdata/otherwise.golden",
},
{
"examples/else.mtail",
"testdata/else.log",
"testdata/else.golden",
},
{
"examples/types.mtail",
"testdata/types.log",
"testdata/types.golden",
},
{
"examples/filename.mtail",
"testdata/else.log",
"testdata/filename.golden",
},
{
"examples/logical.mtail",
"testdata/logical.log",
"testdata/logical.golden",
},
{
"examples/strcat.mtail",
"testdata/strcat.log",
"testdata/strcat.golden",
},
{
"examples/add_assign_float.mtail",
"testdata/add_assign_float.log",
"testdata/add_assign_float.golden",
},
{
"examples/typed-comparison.mtail",
"testdata/typed-comparison.log",
"testdata/typed-comparison.golden",
},
{
"examples/match-expression.mtail",
"testdata/match-expression.log",
"testdata/match-expression.golden",
},
{
"examples/apache_combined.mtail",
"testdata/apache-combined.log",
"testdata/apache-combined.golden",
},
{
"examples/apache_common.mtail",
"testdata/apache-common.log",
"testdata/apache-common.golden",
},
{
"examples/metric-as-rvalue.mtail",
"testdata/metric-as-rvalue.log",
"testdata/metric-as-rvalue.golden",
},
{
"examples/decorator.mtail",
"testdata/decorator.log",
"testdata/decorator.golden",
},
{
"examples/stringy.mtail",
"testdata/stringy.log",
"testdata/stringy.golden",
},
{
"examples/ip-addr.mtail",
"testdata/ip-addr.log",
"testdata/ip-addr.golden",
},
{
"examples/vsftpd.mtail",
"testdata/vsftpd_log",
"testdata/vsftpd_log.golden",
},
{
"examples/vsftpd.mtail",
"testdata/vsftpd_xferlog",
"testdata/vsftpd_xferlog.golden",
},
{
"examples/lighttpd.mtail",
"testdata/lighttpd_access.log",
"testdata/lighttpd_accesslog.golden",
},
{
"examples/mysql_slowqueries.mtail",
"testdata/mysql_slowqueries.log",
"testdata/mysql_slowqueries.golden",
},
}
func TestExamplePrograms(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
for _, tc := range exampleProgramTests {
t.Run(fmt.Sprintf("%s on %s", tc.programfile, tc.logfile), func(t *testing.T) {
w := watcher.NewFakeWatcher()
store := metrics.NewStore()
programFile := path.Join("../..", tc.programfile)
mtail, err := mtail.New(store, w, mtail.ProgramPath(programFile), mtail.LogPathPatterns(tc.logfile), mtail.OneShot, mtail.OmitMetricSource, mtail.DumpAstTypes, mtail.DumpBytecode)
if err != nil {
t.Fatalf("create mtail failed: %s", err)
}
err = mtail.StartTailing()
if err != nil {
t.Fatalf("Start tailling failed: %s", err)
}
g, err := os.Open(tc.goldenfile)
if err != nil {
t.Fatalf("could not open golden file: %s", err)
}
defer g.Close()
goldenStore := metrics.NewStore()
golden.ReadTestData(g, tc.programfile, goldenStore)
err = mtail.Close()
if err != nil {
t.Error(err)
}
diff := testutil.Diff(goldenStore, store, testutil.IgnoreUnexported(sync.RWMutex{}, datum.StringDatum{}))
if diff != "" {
t.Error(diff)
t.Logf(" Golden metrics: %s", goldenStore.Metrics)
t.Logf("Program metrics: %s", store.Metrics)
t.Logf("yar\n%+v", store.Metrics)
}
})
}
}
// This test only compiles examples, but has coverage over all examples
// provided. This ensures we ship at least syntactically correct examples.
func TestCompileExamplePrograms(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
matches, err := filepath.Glob("examples/*.mtail")
if err != nil {
t.Fatal(err)
}
for _, tc := range matches {
t.Run(tc, func(t *testing.T) {
w := watcher.NewFakeWatcher()
s := metrics.NewStore()
mtail, err := mtail.New(s, w, mtail.ProgramPath(tc), mtail.CompileOnly, mtail.OmitMetricSource, mtail.DumpAstTypes, mtail.DumpBytecode)
if err != nil {
t.Fatal(err)
}
mtail.Close()
})
}
}
mtail-3.0.0~rc24.1/internal/mtail/glob_relative_after_start_integration_test.go 0000664 0000000 0000000 00000005632 13435446430 0030013 0 ustar 00root root 0000000 0000000 // Copyright 2019 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
// +build integration
package mtail_test
import (
"os"
"path"
"testing"
"time"
"github.com/golang/glog"
"github.com/google/mtail/internal/mtail"
"github.com/google/mtail/internal/testutil"
)
func TestGlobRelativeAfterStart(t *testing.T) {
tmpDir, rmTmpDir := testutil.TestTempDir(t)
defer rmTmpDir()
logDir := path.Join(tmpDir, "logs")
progDir := path.Join(tmpDir, "progs")
err := os.Mkdir(logDir, 0700)
if err != nil {
t.Fatal(err)
}
err = os.Mkdir(progDir, 0700)
if err != nil {
t.Fatal(err)
}
defer testutil.TestChdir(t, logDir)()
m, stopM := mtail.TestStartServer(t, 0, false, mtail.ProgramPath(progDir), mtail.LogPathPatterns("log.*"))
defer stopM()
startLogCount := mtail.TestGetMetric(t, m.Addr(), "log_count")
startLineCount := mtail.TestGetMetric(t, m.Addr(), "line_count")
logFile := path.Join(logDir, "log.1.txt")
f := testutil.TestOpenFile(t, logFile)
n, err := f.WriteString("line 1\n")
if err != nil {
t.Fatal(err)
}
glog.Infof("Wrote %d bytes", n)
time.Sleep(time.Second)
{
logCount := mtail.TestGetMetric(t, m.Addr(), "log_count")
lineCount := mtail.TestGetMetric(t, m.Addr(), "line_count")
if logCount.(float64)-startLogCount.(float64) != 1. {
t.Errorf("Unexpected log count: got %g, want 1", logCount.(float64)-startLogCount.(float64))
}
if lineCount.(float64)-startLineCount.(float64) != 1. {
t.Errorf("Unexpected line count: got %g, want 1", lineCount.(float64)-startLineCount.(float64))
}
time.Sleep(time.Second)
}
{
logFile := path.Join(logDir, "log.2.txt")
f := testutil.TestOpenFile(t, logFile)
n, err := f.WriteString("line 1\n")
if err != nil {
t.Fatal(err)
}
glog.Infof("Wrote %d bytes", n)
time.Sleep(time.Second)
logCount := mtail.TestGetMetric(t, m.Addr(), "log_count")
lineCount := mtail.TestGetMetric(t, m.Addr(), "line_count")
if logCount.(float64)-startLogCount.(float64) != 2. {
t.Errorf("Unexpected log count: got %g, want 2", logCount.(float64)-startLogCount.(float64))
}
if lineCount.(float64)-startLineCount.(float64) != 2. {
t.Errorf("Unexpected line count: got %g, want 2", lineCount.(float64)-startLineCount.(float64))
}
time.Sleep(time.Second)
}
{
logFile := path.Join(logDir, "log.2.txt")
f := testutil.TestOpenFile(t, logFile)
n, err := f.WriteString("line 1\n")
if err != nil {
t.Fatal(err)
}
glog.Infof("Wrote %d bytes", n)
time.Sleep(time.Second)
logCount := mtail.TestGetMetric(t, m.Addr(), "log_count")
lineCount := mtail.TestGetMetric(t, m.Addr(), "line_count")
if logCount.(float64)-startLogCount.(float64) != 2 {
t.Errorf("Unexpected log count: got %g, want 2", logCount.(float64)-startLogCount.(float64))
}
if lineCount.(float64)-startLineCount.(float64) != 3 {
t.Errorf("Unexpected line count: got %g, want 3", lineCount.(float64)-startLineCount.(float64))
}
}
}
mtail-3.0.0~rc24.1/internal/mtail/golden/ 0000775 0000000 0000000 00000000000 13435446430 0020050 5 ustar 00root root 0000000 0000000 mtail-3.0.0~rc24.1/internal/mtail/golden/reader.go 0000664 0000000 0000000 00000010002 13435446430 0021632 0 ustar 00root root 0000000 0000000 // Copyright 2016 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package golden
import (
"bufio"
"io"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
"github.com/golang/glog"
"github.com/google/mtail/internal/metrics"
"github.com/google/mtail/internal/metrics/datum"
)
var varRe = regexp.MustCompile(`^(counter|gauge|timer|text) ([^ ]+)(?: {([^}]+)})?(?: (\S+))?(?: (.+))?`)
// FindMetricOrNil returns a metric in a store, or returns nil if not found.
func FindMetricOrNil(store *metrics.Store, name string) *metrics.Metric {
store.RLock()
defer store.RUnlock()
for n, ml := range store.Metrics {
if n == name {
return ml[0]
}
}
return nil
}
// ReadTestData loads a "golden" test data file, for a programfile, into the provided store.
func ReadTestData(file io.Reader, programfile string, store *metrics.Store) {
prog := filepath.Base(programfile)
scanner := bufio.NewScanner(file)
for scanner.Scan() {
glog.V(2).Infof("'%s'\n", scanner.Text())
match := varRe.FindStringSubmatch(scanner.Text())
glog.V(2).Infof("len match: %d\n", len(match))
if len(match) == 0 {
continue
}
keys := make([]string, 0)
vals := make([]string, 0)
if match[3] != "" {
for _, pair := range strings.Split(match[3], ",") {
glog.V(2).Infof("pair: %s\n", pair)
kv := strings.Split(pair, "=")
keys = append(keys, kv[0])
if kv[1] != "" {
if kv[1] == `""` {
vals = append(vals, "")
} else {
vals = append(vals, kv[1])
}
}
}
}
var kind metrics.Kind
switch match[1] {
case "counter":
kind = metrics.Counter
case "gauge":
kind = metrics.Gauge
case "timer":
kind = metrics.Timer
case "text":
kind = metrics.Text
}
glog.V(2).Infof("match[4]: %q", match[4])
typ := datum.Int
var (
ival int64
fval float64
sval string
err error
)
if match[4] != "" {
ival, err = strconv.ParseInt(match[4], 10, 64)
if err != nil {
fval, err = strconv.ParseFloat(match[4], 64)
typ = datum.Float
if err != nil || fval == 0.0 {
sval = match[4]
err = nil
typ = datum.String
}
}
}
var timestamp time.Time
glog.V(2).Infof("match 5: %q\n", match[5])
if match[5] != "" {
timestamp, err = time.Parse(time.RFC3339, match[5])
if err != nil {
j, err := strconv.ParseInt(match[5], 10, 64)
if err == nil {
timestamp = time.Unix(j/1000000000, j%1000000000)
} else {
glog.V(2).Info(err)
}
}
}
glog.V(2).Infof("timestamp is %s which is %v in unix", timestamp.Format(time.RFC3339), timestamp.Unix())
// Now we have enough information to get orcreate a metric.
m := FindMetricOrNil(store, match[2])
if m != nil {
if m.Type != typ {
glog.V(2).Infof("The type of the fetched metric is not %s: %s", typ, m)
continue
}
} else {
m = metrics.NewMetric(match[2], prog, kind, typ, keys...)
if kind == metrics.Counter && len(keys) == 0 {
d, err := m.GetDatum()
if err != nil {
glog.Fatal(err)
}
// Initialize to zero at the zero time.
switch typ {
case metrics.Int:
datum.SetInt(d, 0, time.Unix(0, 0))
case metrics.Float:
datum.SetFloat(d, 0, time.Unix(0, 0))
}
}
glog.V(2).Infof("making a new %v\n", m)
if err := store.Add(m); err != nil {
glog.Infof("Failed to add metric %v to store: %s", m, err)
}
}
if match[4] != "" {
d, err := m.GetDatum(vals...)
if err != nil {
glog.V(2).Infof("Failed to get datum: %s", err)
continue
}
glog.V(2).Infof("got datum %v", d)
switch typ {
case metrics.Int:
glog.V(2).Infof("setting %v with vals %v to %v at %v\n", d, vals, ival, timestamp)
datum.SetInt(d, ival, timestamp)
case metrics.Float:
glog.V(2).Infof("setting %v with vals %v to %v at %v\n", d, vals, fval, timestamp)
datum.SetFloat(d, fval, timestamp)
case metrics.String:
glog.V(2).Infof("setting %v with vals %v to %v at %v\n", d, vals, sval, timestamp)
datum.SetString(d, sval, timestamp)
}
}
glog.V(2).Infof("Metric is now %s", m)
}
}
mtail-3.0.0~rc24.1/internal/mtail/golden/reader_test.go 0000664 0000000 0000000 00000006314 13435446430 0022704 0 ustar 00root root 0000000 0000000 package golden
import (
"os"
"sync"
"testing"
"time"
"github.com/google/mtail/internal/metrics"
"github.com/google/mtail/internal/metrics/datum"
"github.com/google/mtail/internal/testutil"
)
var expectedMetrics = map[string][]*metrics.Metric{
"bytes_total": {
&metrics.Metric{
Name: "bytes_total",
Program: "reader_test",
Kind: metrics.Counter,
Keys: []string{"operation"},
LabelValues: []*metrics.LabelValue{
{
Labels: []string{"sent"},
Value: datum.MakeInt(62793673, time.Date(2011, 2, 23, 5, 54, 10, 0, time.UTC))},
{
Labels: []string{"received"},
Value: datum.MakeInt(975017, time.Date(2011, 2, 23, 5, 54, 10, 0, time.UTC))}}},
},
"connections_total": {
&metrics.Metric{
Name: "connections_total",
Program: "reader_test",
Kind: metrics.Counter,
Keys: []string{},
LabelValues: []*metrics.LabelValue{
{
Value: datum.MakeInt(52, time.Date(2011, 2, 22, 21, 54, 13, 0, time.UTC))}}},
},
"connection-time_total": {
&metrics.Metric{
Name: "connection-time_total",
Program: "reader_test",
Kind: metrics.Counter,
Keys: []string{},
LabelValues: []*metrics.LabelValue{
{
Value: datum.MakeInt(1181011, time.Date(2011, 2, 23, 5, 54, 10, 0, time.UTC))}}},
},
"transfers_total": {
&metrics.Metric{
Name: "transfers_total",
Program: "reader_test",
Kind: metrics.Counter,
Keys: []string{"operation", "module"},
LabelValues: []*metrics.LabelValue{
{
Labels: []string{"send", "module"},
Value: datum.MakeInt(2, time.Date(2011, 2, 23, 5, 50, 32, 0, time.UTC))},
{
Labels: []string{"send", "repo"},
Value: datum.MakeInt(25, time.Date(2011, 2, 23, 5, 51, 14, 0, time.UTC))}}},
},
"foo": {
&metrics.Metric{
Name: "foo",
Program: "reader_test",
Kind: metrics.Gauge,
Keys: []string{"label"},
LabelValues: []*metrics.LabelValue{},
},
},
"bar": {
&metrics.Metric{
Name: "bar",
Program: "reader_test",
Kind: metrics.Counter,
Keys: []string{},
LabelValues: []*metrics.LabelValue{
{
Value: datum.MakeInt(0, time.Unix(0, 0)),
},
},
},
},
"floaty": {
&metrics.Metric{
Name: "floaty",
Program: "reader_test",
Kind: metrics.Gauge,
Type: datum.Float,
Keys: []string{},
LabelValues: []*metrics.LabelValue{
{
Labels: []string{},
Value: datum.MakeFloat(37.1, time.Date(2017, 6, 15, 18, 9, 37, 0, time.UTC)),
},
},
},
},
"stringy": {
&metrics.Metric{
Name: "stringy",
Program: "reader_test",
Kind: metrics.Text,
Type: datum.String,
Keys: []string{},
LabelValues: []*metrics.LabelValue{
{
Labels: []string{},
Value: datum.MakeString("hi", time.Date(2018, 6, 16, 18, 04, 0, 0, time.UTC)),
},
},
},
},
}
func TestReadTestData(t *testing.T) {
f, err := os.Open("reader_test.golden")
if err != nil {
t.Fatal(err)
}
defer f.Close()
store := metrics.NewStore()
ReadTestData(f, "reader_test", store)
diff := testutil.Diff(expectedMetrics, store.Metrics, testutil.IgnoreUnexported(sync.RWMutex{}, datum.StringDatum{}))
if diff != "" {
t.Error(diff)
t.Logf("store contains %s", store.Metrics)
}
}
mtail-3.0.0~rc24.1/internal/mtail/golden/reader_test.golden 0000664 0000000 0000000 00000000773 13435446430 0023552 0 ustar 00root root 0000000 0000000 counter bytes_total {operation=sent} 62793673 2011-02-23T05:54:10Z
counter bytes_total {operation=received} 975017 2011-02-23T05:54:10Z
counter connections_total 52 2011-02-22T21:54:13Z
counter connection-time_total 1181011 2011-02-23T05:54:10Z
counter transfers_total {operation=send,module=module} 2 2011-02-23T05:50:32Z
counter transfers_total {operation=send,module=repo} 25 2011-02-23T05:51:14Z
gauge foo {label=}
counter bar
gauge floaty 37.1 2017-06-15T18:09:37Z
text stringy hi 2018-06-16T18:04:00Z
mtail-3.0.0~rc24.1/internal/mtail/log_glob_integration_test.go 0000664 0000000 0000000 00000005050 13435446430 0024355 0 ustar 00root root 0000000 0000000 // Copyright 2019 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
// +build integration
package mtail_test
import (
"fmt"
"os"
"path"
"testing"
"time"
"github.com/golang/glog"
"github.com/google/mtail/internal/mtail"
"github.com/google/mtail/internal/testutil"
)
func TestLogGlobMatchesAfterStartupWithPollInterval(t *testing.T) {
for _, pollInterval := range []time.Duration{0, 250 * time.Millisecond} {
t.Run(fmt.Sprintf("%s", pollInterval), func(t *testing.T) {
tmpDir, rmTmpDir := testutil.TestTempDir(t)
defer rmTmpDir()
logDir := path.Join(tmpDir, "logs")
progDir := path.Join(tmpDir, "progs")
err := os.Mkdir(logDir, 0700)
if err != nil {
t.Fatal(err)
}
err = os.Mkdir(progDir, 0700)
if err != nil {
t.Fatal(err)
}
defer testutil.TestChdir(t, logDir)()
m, stopM := mtail.TestStartServer(t, pollInterval, false, mtail.ProgramPath(progDir), mtail.LogPathPatterns(logDir+"/log*"))
defer stopM()
startLogCount := mtail.TestGetMetric(t, m.Addr(), "log_count")
startLineCount := mtail.TestGetMetric(t, m.Addr(), "line_count")
{
logFile := path.Join(logDir, "log")
f := testutil.TestOpenFile(t, logFile)
n, err := f.WriteString("line 1\n")
if err != nil {
t.Fatal(err)
}
glog.Infof("Wrote %d bytes", n)
time.Sleep(time.Second)
logCount := mtail.TestGetMetric(t, m.Addr(), "log_count")
lineCount := mtail.TestGetMetric(t, m.Addr(), "line_count")
if logCount.(float64)-startLogCount.(float64) != 1. {
t.Errorf("Unexpected log count: got %g, want 1", logCount.(float64)-startLogCount.(float64))
}
if lineCount.(float64)-startLineCount.(float64) != 1. {
t.Errorf("Unexpected line count: got %g, want 1", lineCount.(float64)-startLineCount.(float64))
}
time.Sleep(time.Second)
}
{
logFile := path.Join(logDir, "log1")
f := testutil.TestOpenFile(t, logFile)
n, err := f.WriteString("line 1\n")
if err != nil {
t.Fatal(err)
}
glog.Infof("Wrote %d bytes", n)
time.Sleep(time.Second)
logCount := mtail.TestGetMetric(t, m.Addr(), "log_count")
lineCount := mtail.TestGetMetric(t, m.Addr(), "line_count")
if logCount.(float64)-startLogCount.(float64) != 2. {
t.Errorf("Unexpected log count: got %g, want 2", logCount.(float64)-startLogCount.(float64))
}
if lineCount.(float64)-startLineCount.(float64) != 2. {
t.Errorf("Unexpected line count: got %g, want 2", lineCount.(float64)-startLineCount.(float64))
}
time.Sleep(time.Second)
}
})
}
}
mtail-3.0.0~rc24.1/internal/mtail/log_rotation_integration_test.go 0000664 0000000 0000000 00000003073 13435446430 0025274 0 ustar 00root root 0000000 0000000 // Copyright 2019 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
// +build integration
package mtail_test
import (
"os"
"path"
"testing"
"time"
"github.com/google/mtail/internal/mtail"
"github.com/google/mtail/internal/testutil"
)
func TestLogRotation(t *testing.T) {
tmpDir, rmTmpDir := testutil.TestTempDir(t)
defer rmTmpDir()
logDir := path.Join(tmpDir, "logs")
progDir := path.Join(tmpDir, "progs")
err := os.Mkdir(logDir, 0700)
if err != nil {
t.Fatal(err)
}
err = os.Mkdir(progDir, 0700)
if err != nil {
t.Fatal(err)
}
logFile := path.Join(logDir, "log")
f := testutil.TestOpenFile(t, logFile)
m, stopM := mtail.TestStartServer(t, 0, false, mtail.ProgramPath(progDir), mtail.LogPathPatterns(logDir+"/log"))
defer stopM()
{
testutil.WriteString(t, f, "line 1\n")
time.Sleep(time.Second)
}
startLogLinesTotal := mtail.TestGetMetric(t, m.Addr(), "log_lines_total").(map[string]interface{})[logFile]
{
testutil.WriteString(t, f, "line 2\n")
time.Sleep(time.Second)
logLinesTotal := mtail.TestGetMetric(t, m.Addr(), "log_lines_total").(map[string]interface{})[logFile]
mtail.ExpectMetricDelta(t, logLinesTotal, startLogLinesTotal, 1)
}
err = os.Rename(logFile, logFile+".1")
if err != nil {
t.Fatal(err)
}
f = testutil.TestOpenFile(t, logFile)
{
testutil.WriteString(t, f, "line 1\n")
time.Sleep(time.Second)
logLinesTotal := mtail.TestGetMetric(t, m.Addr(), "log_lines_total").(map[string]interface{})[logFile]
mtail.ExpectMetricDelta(t, logLinesTotal, startLogLinesTotal, 2)
}
}
mtail-3.0.0~rc24.1/internal/mtail/logo.ico 0000664 0000000 0000000 00000076446 13435446430 0020255 0 ustar 00root root 0000000 0000000 @@ (B F 00 % nB h h x ( @ @ ۘ4 ۘ4 ۘ4ڗ3ڗ3 ה0 ۘ4 ۘ4%ۘ4ۘ4ڗ3ܙ5 ۘ4 ڗ3 ۘ4 ۘ4%ۘ4ۘ4ڗ3ڗ3ڗ3 ڗ3 ڗ3 ۘ4 ۘ4%ۘ4ۘ4ۘ4ڗ3ڗ3ڗ3 ۘ4 ۘ4%ۘ4ۘ4ۘ4ۘ4ۘ4ڗ3ۘ4 ܙ5 ۘ4 ۘ4 ۘ4%ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ڗ3ۘ4 ֓/ ۘ4 ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ڗ3ڗ3 ؕ1 ۘ1 ڙ/ۘ3ۘ4ۘ4!ۘ4Aۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ڗ3ۘ4 ڗ3 ڗ3 ۠ ܗ; ۙ2ۗ36ۘ3lۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ڗ3ڗ3ژ3 ڗ3 ۘ3 ۗ3ۙ3*ۘ3ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ڗ3ڗ3ڗ3 ۚ2 ݜ0ۘ3Bۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ژ3ژ3ژ3 &