pax_global_header 0000666 0000000 0000000 00000000064 13216376353 0014523 g ustar 00root root 0000000 0000000 52 comment=04017b5a1241b4e78b0a2dab84e5a332228b54d2
mtail-3.0.0~rc5/ 0000775 0000000 0000000 00000000000 13216376353 0013441 5 ustar 00root root 0000000 0000000 mtail-3.0.0~rc5/.gitignore 0000664 0000000 0000000 00000000423 13216376353 0015430 0 ustar 00root root 0000000 0000000 6.out
_go_.6
_gotest_.6
_test
_testmain.6
_testmain.go
parser.go
y.output
mtail.test
fuzz/fuzz
fuzzout
emgen/emgen
foo.log
/watcher/watcher.test
/vm/vm.test
/coverage.html
*.coverprofile
/exporter/exporter.test
/.dep-stamp
/.build-dep-stamp
/.gen-dep-stamp
/cpu.out
/mem.out
mtail-3.0.0~rc5/.gitlab-ci.yml 0000664 0000000 0000000 00000000772 13216376353 0016103 0 ustar 00root root 0000000 0000000 image: golang:1.8
stages:
- test
- build
before_script:
- mkdir ${CI_PROJECT_DIR}/build
- mkdir -p ${GOPATH}/src/github.com/google/
- ln -s $(pwd) ${GOPATH}/src/github.com/google/mtail
- cd ${GOPATH}/src/github.com/google/mtail
test:
stage: test
allow_failure: true
script:
- make test
build:
stage: build
script:
- make install
- cp -v ${GOPATH}/bin/mtail ${CI_PROJECT_DIR}/build/mtail
artifacts:
expire_in: 1 week
when: on_success
paths:
- build
mtail-3.0.0~rc5/.travis.yml 0000664 0000000 0000000 00000002110 13216376353 0015544 0 ustar 00root root 0000000 0000000 language: go
# Must generate the parser before installing deps or go get will error out on
# undefined lexer tokens.
before_install:
- make vm/parser.go
# Default dependency installation command, which is disabled when Makefile
# detected. Also install three tools for measuring coverage and sending to
# coveralls.io.
install:
- travis_retry make install_coverage_deps
# Run all tests, under race detector.
script: time make testrace
# If the full suite passes, run again to collect coverage, merge all of the
# packages reports, and send to coveralls.io.
# Run the benchmarks, if applicable.
after_success:
- make coverage
- goveralls -coverprofile=gover.coverprofile -service=travis-ci
- make bench
# Run tests at these Go versions.
go:
- tip
- 1.9
- 1.8
- 1.7
# GOMAXPROCS=1 forces scheduling race bugs.
env:
- GOMAXPROCS=1
- GOMAXPROCS=2
- GOMAXPROCS=4
# Allow tip to fail, and only wait for mature versions to succeed before
# reporting build status.
matrix:
fast_finish: true
allow_failures:
- go: tip
# Not using sudo, can use containers.
sudo: false
mtail-3.0.0~rc5/CODE_OF_CONDUCT.md 0000664 0000000 0000000 00000006213 13216376353 0016242 0 ustar 00root root 0000000 0000000 # Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at jaq@google.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
[homepage]: http://contributor-covenant.org
[version]: http://contributor-covenant.org/version/1/4/
mtail-3.0.0~rc5/CONTRIBUTING.md 0000664 0000000 0000000 00000002715 13216376353 0015677 0 ustar 00root root 0000000 0000000 Want to contribute? Great! First, read this page (including the small print at the end).
### Before you contribute
Before we can use your code, you must sign the
[Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual?csw=1)
(CLA), which you can do online. The CLA is necessary mainly because you own the
copyright to your changes, even after your contribution becomes part of our
codebase, so we need your permission to use and distribute your code. We also
need to be sure of various other things—for instance that you'll tell us if you
know that your code infringes on other people's patents. You don't have to sign
the CLA until after you've submitted your code for review and a member has
approved it, but you must do it before we can put your code into our codebase.
Before you start working on a larger contribution, you should get in touch with
us first through the issue tracker with your idea so that we can help out and
possibly guide you. Coordinating up front makes it much easier to avoid
frustration later on.
### Code reviews
All submissions, including submissions by project members, require review. We
use GitHub pull requests for this purpose.
Please read the [style guide](docs/style.md) for tips on the project coding
guidelines.
### The small print
Contributions made by corporations are covered by a different agreement than
the one above, the Software Grant and Corporate Contributor License Agreement.
mtail-3.0.0~rc5/Dockerfile 0000664 0000000 0000000 00000000427 13216376353 0015436 0 ustar 00root root 0000000 0000000 FROM golang:1.9-alpine
WORKDIR /go/src/github.com/google/mtail
COPY . /go/src/github.com/google/mtail
RUN apk add --update --no-cache --virtual build-dependencies git make \
&& export GOPATH=/go \
&& make \
&& apk del build-dependencies
ENTRYPOINT ["mtail"]
mtail-3.0.0~rc5/LICENSE 0000664 0000000 0000000 00000026136 13216376353 0014456 0 ustar 00root root 0000000 0000000
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
mtail-3.0.0~rc5/Makefile 0000664 0000000 0000000 00000007067 13216376353 0015113 0 ustar 00root root 0000000 0000000 # Copyright 2011 Google Inc. All Rights Reserved.
# This file is available under the Apache license.
ifeq ($(TRAVIS),true)
timeout := 5m
else
timeout := 60s
endif
GOFILES=\
exporter/collectd.go\
exporter/export.go\
exporter/graphite.go\
exporter/json.go\
exporter/prometheus.go\
exporter/statsd.go\
exporter/varz.go\
main.go\
metrics/datum/datum.go\
metrics/datum/int.go\
metrics/metric.go\
metrics/store.go\
mtail/mtail.go\
tailer/tail.go\
vm/ast.go\
vm/checker.go\
vm/compiler.go\
vm/driver.go\
vm/lexer.go\
vm/loader.go\
vm/parser.go\
vm/symtab.go\
vm/unparser.go\
vm/vm.go\
watcher/fake_watcher.go\
watcher/log_watcher.go\
watcher/watcher.go\
GOTESTFILES=\
ex_test.go\
bench_test.go\
exporter/export_test.go\
exporter/json_test.go\
exporter/prometheus_test.go\
exporter/varz_test.go\
metrics/datum/int_test.go\
metrics/metric_test.go\
metrics/store_test.go\
mtail/mtail_test.go\
tailer/tail_test.go\
testdata/reader.go\
testdata/reader_test.go\
vm/checker_test.go\
vm/codegen_test.go\
vm/lexer_test.go\
vm/parser_test.go\
vm/symtab_test.go\
vm/types_test.go\
vm/vm_test.go\
watcher/fake_watcher_test.go\
watcher/log_watcher_test.go\
CLEANFILES+=\
vm/parser.go\
vm/y.output\
all: mtail
.PHONY: clean
clean:
rm -f $(CLEANFILES) .*dep-stamp
version := $(shell git describe --tags)
revision := $(shell git rev-parse HEAD)
install mtail: $(GOFILES) .dep-stamp
go install -ldflags "-X main.Version=${version} -X main.Revision=${revision}"
vm/parser.go: vm/parser.y .gen-dep-stamp
go generate -x ./vm
emgen/emgen: emgen/emgen.go
cd emgen && go build
.PHONY: test check
check test: $(GOFILES) $(GOTESTFILES) .dep-stamp
go test -v -timeout 10s ./... ./testdata
.PHONY: testrace
testrace: $(GOFILES) $(GOTESTFILES) .dep-stamp
go test -v -timeout ${timeout} -race ./... ./testdata
.PHONY: smoke
smoke: $(GOFILES) $(GOTESTFILES) .dep-stamp
go test -v -timeout 1s -test.short ./... ./testdata
.PHONY: ex_test
ex_test: ex_test.go testdata/* examples/*
go test -run TestExamplePrograms -v --logtostderr
.PHONY: bench
bench: $(GOFILES) $(GOTESTFILES) .dep-stamp
go test -bench=. -timeout=60s -run=XXX ./... ./testdata
.PHONY: bench_cpu
bench_cpu:
go test -bench=. -run=XXX -timeout=60s -cpuprofile=cpu.out
.PHONY: bench_mem
bench_mem:
go test -bench=. -run=XXX -timeout=60s -memprofile=mem.out
.PHONY: recbench
recbench: $(GOFILES) $(GOTESTFILES) .dep-stamp
go test -bench=. -run=XXX --record_benchmark ./... ./testdata
.PHONY: coverage
coverage: gover.coverprofile
gover.coverprofile: $(GOFILES) $(GOTESTFILES) .dep-stamp
for package in exporter metrics mtail tailer vm watcher; do\
go test -covermode=count -coverprofile=$$package.coverprofile ./$$package;\
done
gover
.PHONY: covrep
covrep: coverage.html
xdg-open $<
coverage.html: gover.coverprofile
go tool cover -html=$< -o $@
.PHONY: testall
testall: testrace bench
.PHONY: install_deps
install_deps: .dep-stamp
IMPORTS := $(shell go list -f '{{join .Imports "\n"}}' ./... ./testdata | sort | uniq | grep -v mtail)
TESTIMPORTS := $(shell go list -f '{{join .TestImports "\n"}}' ./... ./testdata | sort | uniq | grep -v mtail)
.dep-stamp:
# Install all dependencies, ensuring they're updated
go get -u -v $(IMPORTS)
go get -u -v $(TESTIMPORTS)
touch $@
.PHONY: install_gen_deps
install_gen_deps: .gen-dep-stamp
.gen-dep-stamp:
go get golang.org/x/tools/cmd/goyacc
touch $@
.PHONY: install_coverage_deps
install_coverage_deps: .cov-dep-stamp
.cov-dep-stamp: install_deps
go get golang.org/x/tools/cmd/cover
go get github.com/modocache/gover
go get github.com/mattn/goveralls
touch $@
mtail-3.0.0~rc5/README.md 0000664 0000000 0000000 00000003431 13216376353 0014721 0 ustar 00root root 0000000 0000000
# mtail - extract whitebox monitoring data from application logs for collection into a timeseries database
[](http://godoc.org/github.com/google/mtail)
[](https://travis-ci.org/google/mtail)
[](https://coveralls.io/github/google/mtail?branch=master)
`mtail` is a tool for extracting metrics from application logs to be exported
into a timeseries database or timeseries calculator for alerting and
dashboarding.
It aims to fill a niche between applications that do not export their own
internal state, and existing monitoring systems, without patching those
applications or rewriting the same framework for custom extraction glue code.
The extraction is controlled by [mtail programs](docs/Programming-Guide.md)
which define patterns and actions:
# simple line counter
counter line_count
/$/ {
line_count++
}
Metrics are exported for scraping by a collector as JSON or Prometheus format
over HTTP, or can be periodically sent to a collectd, StatsD, or Graphite
collector socket.
Read the [programming guide](docs/Programming-Guide.md) if you want to learn how
to write mtail programs.
Mailing list: https://groups.google.com/forum/#!forum/mtail-users
### Installation
`mtail` uses a Makefile. To build `mtail`, type `make` at the commandline. See
the [Build instructions](docs/Building.md) for more details.
### Deployment
`mtail` works best when paired with a timeseries-based calculator and alerting
tool, like [Prometheus](http://prometheus.io).
mtail-3.0.0~rc5/TODO 0000664 0000000 0000000 00000003367 13216376353 0014142 0 ustar 00root root 0000000 0000000
copy flag tests from mtail_test.sh
standard library, search path
refactor fs and notify into single interface
no trailing newline in parser test, requires changes to expr stmt
parse tree/ast testing, s-expressions to render expected?
mapping between progs and logs to reduce wasted processing
~ and !~
define a capref in the same expression it's used: e.g.
/(?.*)/ && $x > 0
conversion in vm.compare() crashes on $var == "string" when $var is
float (logical.mtail, input line "12.8") --
codegen for conditional and jmp is weird for 2-deep trees
Using a const pattern fragment as the first or only in a pattern expr is a
parse error.
{"concat expr 1", `
/bar/ + X {
}`},
{"concat expr 2", `
X {
}`},
// {"match expression 3", `
// $foo =~ X {
// }
// `},
Match against a variable requires simplifying binaryExpr grammar to elim the
concat_expr branch, fixing the id_expr problem above
{"match expr 2", `
/(?P.{6}) (?P.*)/ {
$foo =~ $bar {
}
}`},
bytecode like
[{push 1} {push 0} {cmp 1} {jm 6} {push 0} {jmp 7} {push 1} {jnm 13}
{setmatched false} {mload 0} {dload 0} {inc } {setmatched true}]
can be expressed as
[{push 1} {push 0} {cmp 1} {jm 9} {setmatched false} {mload 0} {dload 0} {inc
} {setmatched true}]
but jnm 13 is from the condExpr and the previous is from a comparison binary
expr; an optimizer is needed to collapse the bytecode to undersand that
cmp, jm, push, jump, push, jnm in sequence like so is the same as a cmp, jm
and we need to worry about the jump table too
count stack size and preallocate stack
Use capture group references to feed back to declaring regular expression,
noting unused caprefs,
possibly flipping back to noncapturing (and renumbering the caprefs?)
mtail-3.0.0~rc5/bench_test.go 0000664 0000000 0000000 00000003220 13216376353 0016103 0 ustar 00root root 0000000 0000000 // Copyright 2016 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
// Only build with go1.7 or above because b.Run did not exist before.
// +build go1.7
package main
import (
"flag"
"fmt"
"io"
"os"
"testing"
"github.com/google/mtail/mtail"
"github.com/google/mtail/watcher"
"github.com/spf13/afero"
)
var (
recordBenchmark = flag.Bool("record_benchmark", false, "Record the benchmark results to 'benchmark_results.csv'.")
)
func BenchmarkProgram(b *testing.B) {
// exampleProgramTests live in ex_test.go
for _, bm := range exampleProgramTests {
bm := bm
b.Run(fmt.Sprintf("%s on %s", bm.programfile, bm.logfile), func(b *testing.B) {
b.ReportAllocs()
w := watcher.NewFakeWatcher()
fs := afero.NewOsFs()
fs = afero.NewCopyOnWriteFs(fs, afero.NewMemMapFs())
log, err := fs.Create("/tmp/test.log")
if err != nil {
b.Fatalf("failed to create test file descriptor")
}
logs = []string{log.Name()}
o := mtail.Options{Progs: bm.programfile, LogPathPatterns: logs, W: w, FS: fs}
mtail, err := mtail.New(o)
if err != nil {
b.Fatalf("Failed to create mtail: %s", err)
}
err = mtail.StartTailing()
if err != nil {
b.Fatalf("starttailing failed: %s", err)
}
var total int64
b.ResetTimer()
for i := 0; i < b.N; i++ {
l, err := os.Open(bm.logfile)
if err != nil {
b.Fatalf("Couldn't open logfile: %s", err)
}
count, err := io.Copy(log, l)
if err != nil {
b.Fatalf("Write of test data failed to test file: %s", err)
}
total += count
w.InjectUpdate(log.Name())
}
mtail.Close()
b.StopTimer()
b.SetBytes(total)
})
}
}
mtail-3.0.0~rc5/benchmark_results.csv 0000664 0000000 0000000 00000002102 13216376353 0017664 0 ustar 00root root 0000000 0000000 1350190388,1,4,examples/sftp.em,500,118000,3.165639s,6.331278,236,37.27525469581339,26.82744915254237
1350190385,1,4,examples/rsyncd.em,100,23500,1.79889s,17.9889,235,13.063611449282613,76.54851063829787
1350190383,1,4,examples/linecount.em,50000,50000,2.356123s,0.047122,1,21.221302962536335,47.12246
1359593792,1,4,examples/dhcpd.em,1,50000,8.55385s,8553.85,50000,5.845321112715327,171.077
1359593784,1,4,examples/sftp.em,200,47200,1.516004s,7.58002,236,31.13448249476914,32.11872881355932
1359593782,1,4,examples/rsyncd.em,100,23500,2.167435s,21.67435,235,10.842308996578904,92.23127659574467
1359593779,1,4,examples/linecount.em,50000,50000,2.695952s,0.053919,1,18.546324266900893,53.91904
1378745369,1,4,examples/dhcpd.em,1,50000,8.342115s,8342.115,50000,5.993683855952598,166.8423
1378745360,1,4,examples/sftp.em,500,118000,3.574926s,7.149852,236,33.00767624280894,30.295983050847457
1378745356,1,4,examples/rsyncd.em,100,23500,1.769277s,17.69277,235,13.28226162438103,75.2883829787234
1378745354,1,4,examples/linecount.em,50000,50000,2.569769s,0.051395,1,19.457001777202542,51.39538
mtail-3.0.0~rc5/debug.go 0000664 0000000 0000000 00000000433 13216376353 0015056 0 ustar 00root root 0000000 0000000 // Copyright 2017 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
// +build !1.7 !1.8
package main
import "github.com/golang/glog"
func SetMutexProfileFraction(rate int) int {
glog.Info("SetMutexProfileFraction not supported")
return rate
}
mtail-3.0.0~rc5/debug_go17.go 0000664 0000000 0000000 00000000367 13216376353 0015721 0 ustar 00root root 0000000 0000000 // Copyright 2017 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
// +build 1.7 1.8
package main
import "runtime"
func SetMutexProfileFraction(rate int) int {
return runtime.SetMutexProfileFraction(rate)
}
mtail-3.0.0~rc5/docs/ 0000775 0000000 0000000 00000000000 13216376353 0014371 5 ustar 00root root 0000000 0000000 mtail-3.0.0~rc5/docs/Building.md 0000664 0000000 0000000 00000003654 13216376353 0016460 0 ustar 00root root 0000000 0000000 # Introduction
mtail is implemented in [Go](http://golang.org).
You will need to install Go 1.7 or higher.
# Details
[Clone](http://github.com/google/mtail) the source from GitHub into your `$GOPATH`. If you don't have a `$GOPATH`, see the next section.
```
cd $GOPATH/src
go get https://github.com/google/mtail
make
```
## For Go First-Timers
An excellent starting guide for people new to Go entirely is here: https://github.com/alco/gostart
If you want to skip the guide, these two references are short but to the point
on setting up the `$GOPATH` workspace:
* https://github.com/golang/go/wiki/SettingGOPATH
* https://github.com/golang/go/wiki/GOPATH#repository-integration-and-creating-go-gettable-projects
Finally, https://golang.org/doc/code.html is the original Go project
documentation for the philosophy on Go workspaces.
### No Really What's the TLDR
Put `export GOPATH=$HOME/go` in your `~/.profile`.
```
export GOPATH=$HOME/go
mkdir -p $GOPATH/src
```
then back up to the Details above.
## Building
Unlike the recommendation for Go projects, `mtail` uses a `Makefile` to build the source.
Having fetched the source, use `make` from the top of the source tree. This will install all the dependencies, and then build `mtail`. This assumes that your Go environment is already set up -- see above for hints on setting it up.
The resulting binary will be in `$GOPATH/bin`.
The unit tests can be run with `make test`, which invokes `go test`. The slower race-detector tests can be run with `make testrace`.
## Contributing
Please use `gofmt` to format your code before committing. Emacs' go-mode has a lovely [gofmt-before-save](http://golang.org/misc/emacs/go-mode.el) function.
## Troubleshooting
If `make` gives you the following error:
```
../github.com/google/mtail/vm/lexer.go:28: too many errors
```
Then run `make` in that dependency and run `make` again like such:
```
cd ../github.com/google/mtail
make
cd -
make
```
mtail-3.0.0~rc5/docs/Deploying.md 0000664 0000000 0000000 00000004107 13216376353 0016647 0 ustar 00root root 0000000 0000000 # Introduction
mtail is intended to run one per machine, and serve as monitoring glue for multiple applications running on that machine. It runs one or more programs in a 1:1 mapping to those client applications.
## Configuration Overview
mtail is configured through commandline flags.
The `--help` flag will print a list of flags for configuring `mtail`.
(Flags may be prefixed with either `-` or `--`)
Basic flags necessary to start `mtail`:
* `--logs` is a comma separated list of filenames to extract from, but can also be used multiple times, and each filename can be a [glob pattern](http://godoc.org/path/filepath#Match).
* `--progs` is a directory path containing [mtail programs](Language). Programs must have the `.mtail` suffix.
mtail runs an HTTP server on port 3903, which can be changed with the `--port` flag.
# Details
## Launching mtail
```
mtail --progs /etc/mtail --logs /var/log/syslog --logs /var/log/ntp/peerstats
```
## Getting the Metrics Out
### Pull based collection
Point your collection tool at `localhost:3903/json` for JSON format metrics.
Prometheus can be directed to the /metrics endpoint for Prometheus text-based format.
### Push based collection
Use the `collectd_socketpath` or `graphite_host_port` flags to enable pushing to a collectd or graphite instance.
Configure collectd on the same machine to use the unixsock plugin, and set `collectd_socketpath` to that unix socket.
```
mtail --progs /etc/mtail --logs /var/log/syslog,/var/log/rsyncd.log --collectd_socketpath=/var/run/collectd-unixsock
```
Set `graphite_host_port` to be the host:port of the carbon server.
```
mtail --progs /etc/mtail --logs /var/log/syslog,/var/log/rsyncd.log --graphite_host_port=localhost:9999
```
Likewise, set `statsd_hostport` to the host:port of the statsd server.
Additionally, the flag `metric_push_interval_seconds` can be used to configure the push frequency. It defaults to 60, i.e. a push every minute.
## Troubleshooting
Lots of state is logged to the log file, by default in `/tmp/mtail.INFO`. See [Troubleshooting](Troubleshooting) for more information.
mtail-3.0.0~rc5/docs/Interoperability.md 0000664 0000000 0000000 00000005335 13216376353 0020246 0 ustar 00root root 0000000 0000000 # Introduction
mtail is only part of a monitoring ecosystem -- it fills the gap between applications that export no metrics of their own in a [common protocol](Metrics) and the timeseries database.
# Details
mtail actively exports (i.e. pushes) to the following timeseries databases:
* [collectd](http://collectd.org/)
* [graphite](http://graphite.wikidot.com/start)
* [statsd](https://github.com/etsy/statsd)
mtail also is a passive exporter (i.e. pull, or scrape based) by:
* [Prometheus](http://prometheus.io)
* Google's Borgmon
# Logs Analysis
While `mtail` does a form of logs analysis, it does _not_ do any copying,
indexing, or searching of log files for data mining applications. It is only
intended for real- or near-time monitoring data for the purposes of performance
measurement and alerting.
Instead, see logs ingestion and analysis systems like
* [Logstash](https://www.elastic.co/products/logstash)
* [Graylog](https://www.graylog.org/)
if that is what you need.
# Prometheus Exporter Metrics
https://prometheus.io/docs/instrumenting/writing_exporters/ describes useful metrics for a Prometheus exporter to export. `mtail` does not follow that guide, for these reasons.
The exporter model described in that document is for active proxies between an application and Prometheus. The expectation is that when Prometheus scrapes the proxy (the exporter) that it then performs its own scrape of the target application, and translates the results back into the Prometheus expoisition format. The time taken to query the tagret applicaiton is what is exported as `X_scrape_duration_seconds` and its availability as `X_up`.
`mtail` doesn't work like that. It is reacting to the input log events, not scrapes, and so there is no concept of how long it takes to query the application or if it is available. There are things that, if you squint, look like applications in `mtail`, the virtual machine programs. They could be exporting their time to process a single line, and are `up` as long as they are not crashing on input. This doesn't translate well into the exporter metrics meanings though.
TODO(jaq): Instead, mtail will export a histogram of the runtime per line of each VM program.
`mtail` doesn't export `mtail_up` or `mtail_scrape_duration_seconds` because they are exactly equivalent* the synthetic metrics that Prometheus creates automatically: https://prometheus.io/docs/concepts/jobs_instances/
\* The difference between a scrape duration measured in mtail versus Prometheus would differ in the network round trup time, TCP setup time, and send/receive queue time. For practical purposes you can ignore them as the usefulness of a scrape duration metric is not in its absolute value, but how it changes over time.
mtail-3.0.0~rc5/docs/Language.md 0000664 0000000 0000000 00000026467 13216376353 0016455 0 ustar 00root root 0000000 0000000 # Introduction
As `mtail` is designed to tail log files and apply regular expressions to new log lines to extract data, the language naturally follows this pattern-action style.
It resembles another, more famous pattern-action language, that of AWK.
This page errs on the side of a language specification and reference. See the [Programming Guide](Programming-Guide) for a gentler introduction to writing `mtail` programs.
# Program Execution
`mtail` runs all programs on every line received by the log tailing subsystem. The rough model of this looks like:
```
for line in lines:
for regex in regexes:
if match:
do something
```
Thus it is useful to keep in mind that each program is acting on a single line of log data, then terminates.
An `mtail` program consists of exported variable definitions, pattern-action statements, and optional decorator definitions.
exported variable
pattern {
action statements
}
def decorator {
pattern and action statements
}
## Exported Variables
Variables, which have type `counter` or `gauge`, must be declared before their use.
```
counter line_count
gauge queue_length
```
They can be exported with a different name, with the `as` keyword, if one wants to use characters that would cause a parse error.
```
counter line_count as "line-count"
```
Variables can be dimensioned with one or more axes, with the `by` keyword.
```
counter bytes by operation, direction
```
Putting the `hidden` keyword at the start of the declaration means it won't be exported, which can be useful for storing temporary information.
```
hidden counter login_failures
```
## Pattern/Actions
mtail programs look a lot like awk programs. They consist of a list of conditional expressions followed by a brace-delimited block of code:
```
COND {
ACTION
}
```
COND is a conditional expression. It can be a regular expression, which if matched enters the action block, or a relational expression as you might encounter in a C program's `if` statement:
```
/foo/ {
ACTION1
}
variable > 0 {
ACTION2
}
```
In the above program, ACTION1 is taken on each line input if that line matches the word `foo`, and ACTION2 is taken on each line if when that line is read, the variable `variable` is greater than 0.
The action statements must be wrapped in curly braces, i.e. `{}`. `mtail` programs have no single-line statement conditionals like C.
## Regular Expressions
`mtail` supports RE2-style regular expression syntax.
## Single definition of constants
To re-use regular expressions, you can assign them to a `const` identifier:
```
const PREFIX /^\w+\W+\d+ /
// + PREFIX {
ACTION1
}
// + PREFIX + /foo/ {
ACTION2
}
```
In this example, ACTION1 is done for every line that starts with the prefix regex, and ACTION2 is done for the subset of those lines that also contain 'foo'.
## Else Clauses
When a conditional expression does not match, action can be taken as well:
```
/foo/ {
ACTION1
} else {
ACTION2
}
```
Else clauses can be nested. There is no ambiguity with the dangling-else problem, as `mtail` programs must wrap all block statements in `{}`.
## Incrementing a Counter
The simplest `mtail` program merely counts lines read:
```
/$/ {
line_count++
}
```
This program instructs `mtail` to increment the `line_count` counter variable on every line received (specifically anytime an end-of-line is matched.)
## Advanced conditionals
The `otherwise` keyword can be used as a conditional statement. It matches if no preceding conditional in the current scope has matched. This functions like the "default" clause in a switch statement in a C-like language.
```
/foo/ {
/foo1/ {
ACTION1
}
/foo2/ {
ACTION2
}
otherwise {
ACTION3
}
}
```
In this example, ACTION3 will be executed if neither `/foo1/` or `/foo2/` match on the input, but `/foo/` does.
## Capture Groups
Regular expressions can contain capture groups, subexpressions wrapped in parentheses. These can be referred to in the action block to extract data from the line being matched.
For example, part of a program that can extract from rsyncd logs may want to break down transfers by operation and module.
```
counter transfers_total by operation, module
/(?P\S+) (\S+) \[\S+\] (\S+) \(\S*\) \S+ (?P\d+)/ {
transfers_total[$operation][$3]++
}
```
Numeric capture groups address subexpressions in the match result as you might expect from grouping in awk and perl.
Named capture groups can be referred to by their name as indicated in the regular expression using the `?P` notation, as popularised by the Python regular expression library.
## Numerical capture groups and Metric type information
By limiting the pattern of a capturing group to only numeric characters, the programmer can hint to mtail about the type of an expression. For example, in the regular expression
`/(\d+)/`
the first capture group can only match digits, and so the compiler will infer that this is an integer match.
`/(\d+\.\d+)/`
looks like it matches floating point numbers, and so the compiler will infer that this is of type float.
The compiler performs type inference on the expressions that use the capture groups, and the metrics they are ultimately assigned to, and will assign a type (either integer or floating point) to the metrics exported.
Thus in a program like:
```
gauge i
gauge f
/(\d+)/ {
i = $1
}
/(\d+\.\d+)/ {
f = $1
}
```
the metric `i` will be of type Int and the metric `f` will be of type Float.
## Timestamps
It is also useful to timestamp a metric with the time the application thought an event occurred. Logs typically prefix the log line with a timestamp string, which can be extracted and then parsed into a timestamp internally, with the `strptime` builtin function.
A regular expression that extracts the timestamp in boring old syslog format looks like:
```
/^(?P\w+\s+\d+\s+\d+:\d+:\d+)/ {
strptime($date, "Jan 02 15:04:05")
...
}
```
Buyer beware! The format string used by `mtail` is the same as the [Go time.Parse() format string](http://godoc.org/time#Parse), which is completely unlike that used by C's strptime. The format string must always be the 2nd of January 2006 at 3:04:05 PM. See the documentation for the **ANSIC** format in the above link for more details. **NOTE** that *unlike* Go's `time.Parse()` (and *like* C's) the format string is the *second* argument to this builtin function.
## Nested Actions
It is of course possible to nest pattern-actions within actions. This lets you factor out common prefixes (or suffixes!) and deal with per-message actions separately.
For example, parsing syslog timestamps is something you only want to do once, as it's expensive to match (and difficult to read!)
```
counter foo
counter bar
/^(?P\w+\s+\d+\s+\d+:\d+:\d+)/ {
strptime($date, "Jan 02 15:04:05")
/foo/ {
foo++
}
/bar/ {
bar++
}
}
```
This will result in both foo and bar counters being timestamped with the current log line's parsed time, once they match a line.
# Builtin functions
There are "pure" builtin functions, in that they have no side effects on the
program state.
1. `len(x)`, a function of one string argument, which returns the length of
the string argument `x`.
1. `tolower(x)`, a function of one string argument, which returns the input `x`
in all lowercase.
There are type coercion functions, useful for overriding the type inference
made by the compiler if it chooses badly. (If the choice is egregious, please
file a bug!)
1. `int(x)`, a function of one argument performs type conversion to integer.
If `x` is a type that can be converted to integer, it does so. If the type
of `x` cannot be converted to an integer, a compile error is triggered. If
the valye of `x` cannot be converted to an integer, then a runtime error is
triggered.
1. `float(x)`, a function of one argument that performs type conversion to
floating point numbers. The same rules apply as for `int()` above.
1. `strtol(x, y)`, a function of two arguments, which converts a string `x` to
an integer using base `y`. Useful for translating octal or hexadecimal
values in log messages.
A few builtin functions exist for manipulating the virtual machine state as
side effects for the metric export.
1. `getfilename()`, a function of no arguments, which returns the filename from
which the current log line input came.
1. `settime(x)`, a function of one integer argument, which sets the current
timestamp register.
1. `strptime(x, y)`, a function of two string arguments, which parses the
timestamp in the string `x` with the parse format string in `y`, and sets
the current timestamp register. The parse format string must follow [Go's
time.Parse() format string](http://golang.org/src/pkg/time/format.go)
1. `timestamp()`, a function of no arguments, which returns the current
timestamp. This is undefined if neither `settime` or `strptime` have been
called previously.
As described in Nested Actions, the **current timestamp register** refers to
`mtail`'s idea of the time associated with the current log line. This
timestamp is used when the variables are exported to the upstream collector.
The value defaults to the time that the log line arrives in `mtail`, and can be
changed with the `settime()` or `strptime()` builtins.
User defined functions are not supported, but read on to Decorated Actions for
how to reuse common code.
# Decorated actions
Decorated actions are an inversion of nested actions. They allow the program
to define repetetive functions that perform the same extraction across many
different actions.
For example, most log file formats start with a timestamp prefix. To reduce
dupliation of work, decorators can be used to factor out the common work of
extracting the timestamp. For example, to define a decorator, use the `def`
keyword:
```
def syslog {
/(?P\w+\s+\d+\s+\d+:\d+:\d+)/ {
strptime($date, "Jan 2 15:04:05")
next
}
}
```
The decorator definition starts and ends in a curly-braced block, and looks
like a normal pattern/action as above. The new part is the `next` keyword,
which indicates to `mtail` where to jump into the *decorated* block.
To use a decorator:
```
@syslog {
/some event/ {
variable++
}
}
```
The `@` notation, familiar to Python programmers, denotes that this block is
"wrapped" by the `syslog` decorator. The syslog decorator will be called on
each line first, which extracts the timestamp of the log line. Then, `next`
causes the wrapped block to execute, so then `mtail` matches the line against the
pattern `some event`, and if it does match, increments `variable`.
# Metric Storage Management
mtail performs no implicit garbage collection in the metric storage. The
program can hint to the virtual machine that a specific datum in a dimensioned
metric is no longer going to be used with the `del` keyword.
```
gauge duration by session
hidden session_start by session
/end/ {
duration[$session] = timestamp() - session_start[$session]
del session_start[$session]
}
```
In this example, a hidden metric is used to record some internal state. It
will grow unbounded as the number of sessions increases. If the programmer
knows that the `/end/` pattern is the last time a session will be observed,
then the datum at `$session` will be freed, which keeps `mtail` memory usage
under control and will improve search time for finding dimensioned metrics.
mtail-3.0.0~rc5/docs/Metrics.md 0000664 0000000 0000000 00000004135 13216376353 0016324 0 ustar 00root root 0000000 0000000 # Introduction
A metric is a data type that describes a measurement.
It has a **name**, and a **value**, and a **time** that the measurement was taken.
It also has **units**, so that measurements can be compared and calculated with.
It has a **class**, so that tools can automatically perform some aggregation operations on collections of measurements.
It has a **type**, describing the sort of data it contains: floating point or integer values.
Finally, it has some **tags**, so that additional information about the measurement can be added to assist queries later.
## Classes of Metrics
The class of a Metric can be:
* a monotonically increasing counter, that allows the calculation of rates of change
* a variable gauge, that records instantaneous values
Counters are very powerful as they are resistant to errors caused by sampling frequency. Typically used to accumulate events, they can show changes in behaviour through the calculation of rates, and rates of rates. They can be summed across a group and that sum also derived. Counter resets can indicate crashes or restarts.
Gauges are less powerful as their ability to report is dependent on the sampling rate -- spikes in the timeseries can be missed. They record queue lengths, resource usage and quota, and other sized measurements.
(N.B. Gauges can be simulated with two counters.)
## Types of data
`mtail` records either integer or floating point values as the value of a metric. By default, all metrics are integer, unless the compiler can infer a floating point type.
Inference is done through the type checking pass of the compiler. It uses knowledge of the expressions written in the program as well as heuristics on capturing groups in the regular expressions given.
For example, in the program:
```
counter a
/(\S+)/ {
a = $1
}
```
the compiler will assume that `a` is of an integer type. With more information about the matched text:
```
counter a
/(\d+\.\d+)/ {
a = $1
}
```
the compiler can figure out that the capturing group reference `$1` contains digit and decimal point characters, and is likely then a floating point type.
mtail-3.0.0~rc5/docs/Programming-Guide.md 0000664 0000000 0000000 00000027001 13216376353 0020230 0 ustar 00root root 0000000 0000000 # Introduction
`mtail` is very simple and thus limits what is possible with metric
manipulation, but is very good for getting values into the metrics. This page
describes some common patterns for writing useful `mtail` programs.
## Changing the exported variable name
`mtail` only lets you use "C"-style identifier names in the program text, but
you can rename the exported variable as it gets presented to the collection
system if you don't like that.
```
counter connection_time_total as "connection-time_total"
```
## Reusing pattern pieces
If the same pattern gets used over and over, then define a constant and avoid
having to check the spelling of every occurrence.
```
# Define some pattern constants for reuse in the patterns below.
const IP /\d+(\.\d+){3}/
const MATCH_IP /(?P/ + IP + /)/
...
# Duplicate lease
/uid lease / + MATCH_IP + / for client .* is duplicate on / {
duplicate_lease++
}
```
## Parse the log line timestamp
`mtail` attributes a timestamp to each event.
If no timestamp exists in the log and none explicitly parsed by the mtail program, then mtail will use the current system time as the time of the event.
Many log files include the timestamp of the event as reported by the logging program. To parse the timestamp, use the `strptime` function with
a [Go time.Parse layout string](https://golang.org/pkg/time/#Parse).
```
/^(?P\w+\s+\d+\s+\d+:\d+:\d+)\s+[\w\.-]+\s+sftp-server/ {
strptime($date, "Jan _2 15:04:05")
```
N.B. If no timestamp parsing is done, then the reported timestamp of the event
may add some latency to the mearusrement of when the event really occurred.
Between your program logging the event, and mtail reading it, there are many
moving parts: the log writer, some system calls perhaps, some disk IO, some
more system calls, some more disk IO, and then mtail's virtual machine
execution. While normally negligible, it is worth stating in case users notice
offsets in time between what mtail reports and the event really occurring. For
this reason, it's recommended to always use the log file's timestamp if one is
available.
## Common timestamp parsing
The decorator syntax was designed with common timestamp parsing in mind. It
allows the code for getting the timestamp out of the log line to be reused and
make the rest of the program text more readable and thus maintainable.
```
# The `syslog' decorator defines a procedure. When a block of mtail code is
# "decorated", it is called before entering the block. The block is entered
# when the keyword `next' is reached.
def syslog {
/(?P(?P\w+\s+\d+\s+\d+:\d+:\d+)|(?P\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d+[+-]\d{2}:\d{2}))/ +
/\s+(?:\w+@)?(?P[\w\.-]+)\s+(?P[\w\.-]+)(?:\[(?P\d+)\])?:\s+(?P.*)/ {
# If the legacy_date regexp matched, try this format.
len($legacy_date) > 0 {
strptime($2, "Jan _2 15:04:05")
}
# If the RFC3339 style matched, parse it this way.
len($rfc3339_date) > 0 {
strptime($rfc3339_date, "2006-01-02T03:04:05-0700")
}
# Call into the decorated block
next
}
}
```
This can be used around any blocks later in the program.
```
@syslog {
/foo/ {
...
}
/bar/ {
}
} # end @syslog decorator
```
Both the foo and bar pattern actions will have the syslog timestamp parsed from
them before being called.
## Conditional structures
The `/pattern/ { action }` idiom is the normal conditional control flow structure in `mtail` programs.
If the pattern matches, then the actions in the block are executed. If the
pattern does not match, the block is skipped.
The `else` keyword allows the program to perform action if the pattern does not match.
```
/pattern/ {
action
} else {
alternative
}
```
The example above would execute the "alternative" block if the pattern did not
match the current line.
The `otherwise` keyword can be used to create control flow structure
reminiscent of the C `switch` statement. In a containing block, the
`otherwise` keyword indicates that this block should be executed only if no
other pattern in the same scope has matched.
```
{
/pattern1/ { _action1_ }
/pattern2/ { _action2_ }
otherwise { _action3_ }
}
```
In this example, "action3" would execute if both pattern1 and pattern2 did not
match the current line.
## Storing intermediate state
Hidden metrics are metrics that can be used for internal state and are never
exported outside of `mtail`. For example if the time between pairs of log
lines needs to be computed, then a hidden metric can be used to record the
timestamp of the start of the pair.
**Note** that the `timestamp` builtin _requires_ that the program has set a log
line timestamp with `strptime` or `settime` before it is called.
```
hidden gauge connection_time by pid
...
# Connection starts
/connect from \S+ \(\d+\.\d+\.\d+\.\d+\)/ {
connections_total++
# Record the start time of the connection, using the log timestamp.
connection_time[$pid] = timestamp()
}
...
# Connection summary when session closed
/sent (?P\d+) bytes received (?P\d+) bytes total size \d+/ {
# Sum total bytes across all sessions for this process
bytes_total["sent"] += $sent
bytes_total["received"] += $received
# Count total time spent with connections open, according to the log timestamp.
connection_time_total += timestamp() - connection_time[$pid]
# Delete the datum referenced in this dimensional metric. We assume that
# this will never happen again, and hint to the VM that we can garbage
# collect the memory used.
del connection_time[$pid]
}
```
In this example, the connection timestamp is recorded in the hidden variable
`connection_time` keyed by the "pid" of the connection. Later when the
connection end is logged, the delta between the current log timestamp and the
start timestamp is computed and added to the total connection time.
In this example, the average connection time can be computed in a collection
system by taking the ratio of the number of connections (`connections_total`)
over the time spent (`connection_time_total`). For example
in [Prometheus](http://prometheus.io) one might write:
```
connection_time_10s_moving_avg =
rate(connections_total[10s])
/ on job
rate(connection_time_total[10s])
```
Note also that the `del` keyword is used to signal to `mtail` that the
connection_time value is no longer needed. This will cause `mtail` to delete
the datum referenced by that label from this metric, keeping `mtail`'s memory
usage under control and speeding up labelset search time (by reducing the
search space!)
## Computing moving averages
`mtail` deliberately does not implement complex mathematical functions. It
wants to process a log line as fast as it can. Many other products on the
market already do complex mathematical functions on timeseries data,
like [Prometheus](http://prometheus.io) and [Riemann](http://riemann.io), so
`mtail` defers that responsibility to them. (Do One Thing, and Do It Pretty
Good.)
But say you still want to do a moving average in `mtail`. First note that
`mtail` has no history available, only point in time data. You can update an
average with a weighting to make it an exponential moving average (EMA).
```
gauge average
/some (\d+) match/ {
# Use a smoothing constant 2/(N + 1) to make the average over the last N observations
average = 0.9 * $1 + 0.1 * average
}
```
However this doesn't take into aaccount the likely situation that the matches arrive irregularly (the time interval between them is not constant.) Unfortunately the formula for this requires the exp() function (`e^N`) as described here: http://stackoverflow.com/questions/1023860/exponential-moving-average-sampled-at-varying-times . I recommend you defer this computation to the collection system
## Histograms
Histograms are preferred over averages in many monitoring howtos, blogs, talks,
and rants, in order to give the operators better visibility into the behaviour
of a system.
At the moment, `mtail` does not have first class support for a distribution
type, but a histogram can be easily created by making one label on a
dimensioned metric the name of the histogram bucket.
```
counter apache_http_request_time_microseconds by le, server_port, handler, request_method, request_status, request_protocol
...
###
# HTTP Requests with histogram buckets.
#
apache_http_request_time_microseconds_count[$server_port][$handler][$request_method][$request_status][$request_protocol]++
# These statements "fall through", so the histogram is cumulative. The
# collecting system can compute the percentile bands by taking the ratio of
# each bucket value over the final bucket.
# 5ms bucket.
$time_us < 5000 {
apache_http_request_time_microseconds["5000"][$server_port][$handler][$request_method][$request_status][$request_protocol]++
}
# 10ms bucket.
$time_us < 10000 {
apache_http_request_time_microseconds["10000"][$server_port][$handler][$request_method][$request_status][$request_protocol]++
}
# 25ms bucket.
$time_us < 25000 {
apache_http_request_time_microseconds["25000"][$server_port][$handler][$request_method][$request_status][$request_protocol]++
}
# 50ms bucket.
$time_us < 50000 {
apache_http_request_time_microseconds["50000"][$server_port][$handler][$request_method][$request_status][$request_protocol]++
}
...
# 10s bucket.
$time_us < 10000000 {
apache_http_request_time_microseconds["10000000"][$server_port][$handler][$request_method][$request_status][$request_protocol]++
}
```
This example creates a histogram with a bucket label "le" that contains a count
of all requests that were "less than" the bucket label's value.
In tools like [Prometheus](http://prometheus.io) these can be manipulated in
aggregate for computing percentiles of response latency.
```
apache_http_request_time:rate10s = rate(apache_http_request_time_microseconds[10s])
apache_http_request_time_count:rate10s = rate(apache_http_request_time_microseconds_count[10s])
apache_http_request_time:percentiles =
apache_http_request_time:rate10s
/ on (job, port, handler, request_method, request_status, request_protocol)
apache_http_request_time_microseconds_count:rate10s
```
This new timeseries can be plotted to see the percentile bands of each bucket,
for example to visualise the distribution of requests moving between buckets as
the performance of the server changes.
Further, these timeseries can be used
for
[Service Level](https://landing.google.com/sre/book/chapters/service-level-objectives.html)-based
alerting (a technique for declaring what a defensible service level is based on
the relative costs of engineering more reliability versus incident response,
maintenance costs, and other factors), as we can now see what percentage of
responses fall within and without a predefined service level:
```
apache_http_request_time:latency_sli =
apache_http_request_time:rate10s{le="200"}
/ on (job, port, handler, request_method, request_status, request_protocol)
apache_http_request_time_microseconds_count:rate10s
ALERT LatencyTooHigh
IF apache_http_request_time:latency_sli < 0.555555555
LABELS { severity="page" }
ANNOTATIONS {
summary = "Latency is missing the service level objective"
description = "Latency service level indicator is {{ $value }}, which is below nine fives SLO."
}
```
In this example, prometheus computes a service level indicator of the ratio of
requests at or below the target of 200ms against the total count, and then
fires an alert if the indicator drops below five nines.
mtail-3.0.0~rc5/docs/Testing.md 0000664 0000000 0000000 00000002377 13216376353 0016341 0 ustar 00root root 0000000 0000000 # Introduction
By default any compile errors are logged to the standard log `/tmp/mtail.INFO`
unless otherwise redirected. (You can emit to standard out with
`--logtostderr` flag.) Program errors are also printed on the HTTP status
handler, by default at porrt 3903.
If you want more debugging information, `mtail` provides a few flags to assist with testing your program in standalone mode.
# Details
## Compilation errors
The `compile_only` flag will run the `mtail` compiler, print any error messages, and then exit.
You can use this to check your programs are syntactically valid during the development process.
```
mtail --compile_only --progs ./progs
```
This could be added as a pre-commit hook to your source code repository.
## Testing programs
The `one_shot` flag will compile and run the `mtail` programs, then feed in any logs specified from the beginning of the file (instead of tailing them), then print to the log all metrics collected.
You can use this to check that your programs are giving the expected output against some gold standard log file samples.
```
mtail --one_shot --progs ./progs --logs testdata/foo.log
```
# Troubleshooting
For more information about debugging mtail programs, see the tips under [Troubleshooting](Troubleshooting)
mtail-3.0.0~rc5/docs/Troubleshooting.md 0000664 0000000 0000000 00000007773 13216376353 0020120 0 ustar 00root root 0000000 0000000 # Troubleshooting
This page gives an overview of some avenues to debug your `mtail` installation.
## Reporting a problem
Please when reporting a problem, include the `mtail` version:
* the output of `mtail --version`
* the first lines of the INFO log (`/tmp/mtail.INFO` by default)
* the top of the status page (on HTTP port 3903 by default)
## Compilation problems
Compilation problems will be emitted to the standard INFO log
* which is visible either on stderr if `mtail` is run with the `--logtostderr` flag
* which is stored in the location provided by the `--log_dir` flag (usually, /tmp)
(The behaviour of glog is documented in https://github.com/golang/glog)
Errors for the most recent version of the program will also be displayed on the
standard status page (served over HTTP at port 3903 by default) in the *Program Loader* section.
If a program fails to compile, it will not be loaded. If an existing program
has been loaded, and a new version is written to disk (by you, or a
configuration management system) and that new version does not compile,
`mtail` will log the errors and not interrupt or restart the existing, older program.
The `--compile_only` flag will only attempt to compile the programs and not
execute them. This can be used for pre-commit testing, for example.
### Syntax trees, type information, and virtual machine bytecode
More detailed compiler debugging can be retrieved by using the `--dump_ast`, `--dump_ast_types`, and `--dump_bytecode`, all of which dump their state to the INFO log.
For example, type errors logged such as
`prog.mtail: Runtime error: conversion of "-0.000000912" to int failed: strconv.ParseInt: parsing "-0.000000912": invalid syntax` suggest an invalid type inference of `int` instead of `float` for some program symbol or expression. Use the `--dump_ast_types` flag to see the type annotated syntax tree of the program for more details.
When reporting a problem, please include the AST type dump.
## Memory or performance issues
`mtail` is a virtual machine emulator, and so strange performance issues can occur beyond the imagination of the author.
The standard Go profiling tool can help. Start with a cpu profile:
`go tool pprof /path/to/mtail http://localhost:3903/debug/pprof/profile'
or a memory profile:
`go tool pprof /path/to/mtail http://localhost:3903/debug/pprof/heap'
There are many good guides on using the profiling tool:
* https://software.intel.com/en-us/blogs/2014/05/10/debugging-performance-issues-in-go-programs is one such guide.
The goroutine stack dump can also help explain what is happening at the moment.
http://localhost:3903/debug/pprof/goroutine?debug=2 shows the full goroutine stack dump.
* `(*Watcher).readEvents` reads events from the filesystem
* `(*Tailer).run` processes log change events; `.read` reads the latest log lines
* `(*Loader).processEvents` handles filesystem event changes regarding new program text
* `(*Loader).processLines` handles new lines coming from the log tailer
* `(*MtailServer).WaitForShutdown` waits for the other components to terminate
* `(*Exporter).StartMetricPush` exists if there are any push collectors (e.g. Graphite) to push to
* `(*Exporter).HandlePrometheusMetrics` exists if an existing Prometheus pull collection is going on
There is one `(*VM).Run` stack per program. These are opaque to the goroutine
stack dump as they execute the bytecode. However, the second argument to `Run`
on the stack is the first four letters of the program name, encoded as ASCII.
You can transcode these back to their names by doing a conversion from the
int32 value in hex provided in the stack, e.g.: 0x61706163 -> 'apac' (probably
an apache log program); 0x7273796e -> 'rsyn' (probably an rsyncd log program)
Obvious problems seen in the goroutine stack dump are long-waiting gorotines, usually on mutexes.
(they show their block time in minutes, e.g. `goroutine 38 [semacquire, 1580
minutes]:`) which usually also manifest as a logjam (no pun intended) in the
loader, tailer, and watcher goroutines (in state 'chan send').
mtail-3.0.0~rc5/docs/faq.md 0000664 0000000 0000000 00000001450 13216376353 0015462 0 ustar 00root root 0000000 0000000 # FAQ
"Frequently" is probably an overstatement, but here's a collection of questions and answers that pop up on the mailing list and issues.
## I don't like a particular label on the metrics. How do I remove it?
All the labels are under your own control, except for the `prog` label which is used for namespace deconfliction -- i.e. multiple programs can be running in `mtail` and they should not be able to affect each other.
It is best if you do some post processing in your collection system and configure it to filter out the `prog` label, so that strange aggregations don't occur.
In Prometheus, this could be achieved like so:
```
metric_relabel_configs:
- target_label: prog
replacement: ''
```
(See [this comment](https://github.com/google/mtail/issues/59#issuecomment-303531070)).
mtail-3.0.0~rc5/docs/index.md 0000664 0000000 0000000 00000002427 13216376353 0016027 0 ustar 00root root 0000000 0000000 mtail - extract whitebox monitoring data from application logs for collection into a timeseries database
========================================================================================================
mtail is a tool for extracting metrics from application logs to be exported into a timeseries database or timeseries calculator for alerting and dashboarding.
It aims to fill a niche between applications that do not export their own internal state, and existing monitoring systems, without patching those applications or rewriting the same framework for custom extraction glue code.
The extraction is controlled by `mtail` programs which define patterns and actions:
# simple line counter
counter line_count
/$/ {
line_count++
}
Metrics are exported for scraping by a collector as JSON or Prometheus format
over HTTP, or can be periodically sent to a collectd, statsd, or Graphite
collector socket.
Read more about the [Programming Guide](Programming-Guide.md), [Language](Language.md), [Building from source](Building.md) from source, help for [Interoperability](Interoperability.md) with other monitoring system components, and [Deploying](Deploying.md) and [Troubleshooting](Troubleshooting.md)
Mailing list: https://groups.google.com/forum/#!forum/mtail-users
mtail-3.0.0~rc5/docs/style.md 0000664 0000000 0000000 00000001536 13216376353 0016060 0 ustar 00root root 0000000 0000000 # Contribution style guide
## Table tests
Use the `t.Run` subtest form. This assists debugging by printing the name of
the table entry without additional parameters to t.Log and t.Error later on.
It also means that the `-run` and `-bench` flags can be used to filter a specific
test without excessive comment-and-rebuild cycles.
Prefer to construct the subtest's name from the test parameters with
`fmt.Sprintf`, otherwise use a `name` field.
When comparing results, use `deep.Equal`. The parameter order should always be
`expected`, then `observed`. This makes the diff output read like "the observed
value is not equal to the expected value."
If there is a non-nil diff result, emit it with `t.Error(diff)`. If multiple
diffs are emitted in a single test, prefix the emission with a `t.Log` of the
name of the result variable or function under test.
mtail-3.0.0~rc5/emgen/ 0000775 0000000 0000000 00000000000 13216376353 0014534 5 ustar 00root root 0000000 0000000 mtail-3.0.0~rc5/emgen/emgen.go 0000664 0000000 0000000 00000010602 13216376353 0016155 0 ustar 00root root 0000000 0000000 package main
import (
crand "crypto/rand"
"flag"
"fmt"
"math/big"
mrand "math/rand"
)
var (
useCryptoRand = flag.Bool("use_crypto_rand", false, "Use crypto/rand instead of math/rand")
randSeed = flag.Int64("rand_seed", 1, "Seed to use for math.rand.")
minIterations = flag.Int64("min_iterations", 5000, "Minimum number of iterations before stopping program generation.")
)
type node struct {
alts [][]string
term string
}
var table = map[string]node{
"start": {[][]string{{"stmt_list"}}, ""},
"stmt_list": {[][]string{{""}, {"stmt_list", "stmt"}}, ""},
"stmt": {[][]string{
{"cond", "{", "stmt_list", "}"},
{"expr"},
{"decl"},
{"def_spec"},
{"deco_spec"},
{"next"},
{"const", "ID", "pattern_expr"}}, ""},
"expr": {[][]string{{"assign_expr"}}, ""},
"assign_expr": {[][]string{{"rel_expr"}, {"unary_expr", "=", "rel_expr"}, {"unary_expr", "+=", "rel_expr"}}, ""},
"rel_expr": {[][]string{{"additive_expr"}, {"additive_expr", "relop", "additive_expr"}}, ""},
"relop": {[][]string{{"<"}, {">"}, {"<="}, {">="}, {"=="}, {"!="}}, ""},
"additive_expr": {[][]string{{"unary_expr"}, {"additive_expr", "+", "unary_expr"}, {"additive_expr", "-", "unary_expr"}}, ""},
"unary_expr": {[][]string{{"postfix_expr"}, {"BUILTIN", "(", "arg_expr_list", ")"}}, ""},
"arg_expr_list": {[][]string{{""}, {"assign_expr"}, {"arg_expr_list", ",", "assign_expr"}}, ""},
"postfix_expr": {[][]string{{"primary_expr"}, {"postfix_expr", "++"}, {"postfix_expr", "[", "expr", "]"}}, ""},
"primary_expr": {[][]string{{"ID"}, {"CAPREF"}, {"STRING"}, {"(", "expr", ")"}, {"NUMERIC"}}, ""},
"cond": {[][]string{{"pattern_expr"}, {"rel_expr"}}, ""},
"pattern_expr": {[][]string{{"REGEX"}, {"pattern_expr", "+", "REGEX"}, {"pattern_expr", "+", "ID"}}, ""},
"decl": {[][]string{{"hide_spec", "type_spec", "declarator"}}, ""},
"hide_spec": {[][]string{{""}, {"hidden"}}, ""},
"declarator": {[][]string{{"declarator", "by_spec"}, {"declarator", "as_spec"}, {"ID"}, {"STRING"}}, ""},
"type_spec": {[][]string{{"counter"}, {"gauge"}}, ""},
"by_spec": {[][]string{{"by", "by_expr_list"}}, ""},
"by_expr_list": {[][]string{{"ID"}, {"STRING"}, {"by_expr_list", ",", "ID"}, {"by_expr_list", ",", "STRING"}}, ""},
"as_spec": {[][]string{{"as", "STRING"}}, ""},
"def_spec": {[][]string{{"def", "ID", "{", "stmt_list", "}"}}, ""},
"deco_spec": {[][]string{{"deco", "{", "stmt_list", "}"}}, ""},
"BUILTIN": {[][]string{{"strptime"}, {"timestamp"}, {"len"}, {"tolower"}}, ""},
"CAPREF": {[][]string{}, "$1"},
"REGEX": {[][]string{}, "/foo/"},
"STRING": {[][]string{}, "\"bar\""},
"ID": {[][]string{}, "quux"},
"NUMERIC": {[][]string{}, "37"},
}
func emitter(c chan string) {
var l int
for {
select {
case w := <-c:
if w == "\n" {
fmt.Println()
}
if w == "" {
continue
}
if l+len(w)+1 >= 80 {
fmt.Println()
fmt.Print(w)
l = len(w)
} else {
if l != 0 {
w = " " + w
}
l += len(w)
fmt.Print(w)
}
}
}
}
func rand(n int) (r int) {
if *useCryptoRand {
a, _ := crand.Int(crand.Reader, big.NewInt(int64(n)))
r = int(a.Int64())
} else {
r = mrand.Intn(n)
}
return
}
func main() {
flag.Parse()
mrand.Seed(*randSeed)
c := make(chan string, 1)
go emitter(c)
runs := *minIterations
// Initial state
var states = []string{"start"}
// While the state stack is not empty
for len(states) > 0 && runs > 0 {
// Pop the next state
state := states[len(states)-1]
states = states[:len(states)-1]
//fmt.Println("state", state, "states", states)
// Look for the state transition
if n, ok := table[state]; ok {
// If there are state transition alternatives
//fmt.Println("n", n)
if len(n.alts) > 0 {
// Pick a state transition at random
a := rand(len(n.alts))
//fmt.Println("a", a, n.alts[a], len(n.alts[a]))
// Push the states picked onto the stack (in reverse order)
for i := 0; i < len(n.alts[a]); i++ {
//fmt.Println("i", i, n.alts[a][len(n.alts[a])-i-1])
states = append(states, n.alts[a][len(n.alts[a])-i-1])
}
//fmt.Println("states", states)
} else {
// If there is a terminal, emit it
//fmt.Println("(term)", state, n.term)
c <- n.term
}
} else {
// If the state doesn't exist in the table, treat it as a terminal, and emit it.
//fmt.Println("(state)", state, state)
c <- state
}
runs--
}
c <- "\n"
}
mtail-3.0.0~rc5/ex_test.go 0000664 0000000 0000000 00000007631 13216376353 0015452 0 ustar 00root root 0000000 0000000 // Copyright 2011 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package main
import (
"fmt"
"os"
"path/filepath"
"sync"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/google/mtail/metrics"
"github.com/google/mtail/mtail"
"github.com/google/mtail/testdata"
"github.com/google/mtail/watcher"
"github.com/spf13/afero"
)
var exampleProgramTests = []struct {
programfile string // Example program file.
logfile string // Sample log input.
goldenfile string // Expected metrics after processing.
}{
{
"examples/rsyncd.mtail",
"testdata/rsyncd.log",
"testdata/rsyncd.golden",
},
{
"examples/sftp.mtail",
"testdata/sftp_chroot.log",
"testdata/sftp_chroot.golden",
},
{
"examples/dhcpd.mtail",
"testdata/anonymised_dhcpd_log",
"testdata/anonymised_dhcpd_log.golden",
},
{
"examples/ntpd.mtail",
"testdata/ntp4",
"testdata/ntp4.golden",
},
{
"examples/ntpd_peerstats.mtail",
"testdata/xntp3_peerstats",
"testdata/xntp3_peerstats.golden",
},
{
"examples/otherwise.mtail",
"testdata/otherwise.log",
"testdata/otherwise.golden",
},
{
"examples/else.mtail",
"testdata/else.log",
"testdata/else.golden",
},
{
"examples/types.mtail",
"testdata/types.log",
"testdata/types.golden",
},
{
"examples/filename.mtail",
"testdata/else.log",
"testdata/filename.golden",
},
{
"examples/logical.mtail",
"testdata/logical.log",
"testdata/logical.golden",
},
{
"examples/strcat.mtail",
"testdata/strcat.log",
"testdata/strcat.golden",
},
{
"examples/add_assign_float.mtail",
"testdata/add_assign_float.log",
"testdata/add_assign_float.golden",
},
{
"examples/typed-comparison.mtail",
"testdata/typed-comparison.log",
"testdata/typed-comparison.golden",
},
{
"examples/match-expression.mtail",
"testdata/match-expression.log",
"testdata/match-expression.golden",
},
}
func TestExamplePrograms(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
for _, tc := range exampleProgramTests {
t.Run(fmt.Sprintf("%s on %s", tc.programfile, tc.logfile), func(t *testing.T) {
w := watcher.NewFakeWatcher()
store := metrics.NewStore()
fs := &afero.OsFs{}
logs := []string{tc.logfile}
o := mtail.Options{Progs: tc.programfile, LogPathPatterns: logs, W: w, FS: fs, Store: store}
o.OneShot = true
o.OmitMetricSource = true
o.DumpAstTypes = true
o.DumpBytecode = true
mtail, err := mtail.New(o)
if err != nil {
t.Fatalf("create mtail failed: %s", err)
}
err = mtail.StartTailing()
if err != nil {
t.Fatalf("Start tailling failed: %s", err)
}
g, err := os.Open(tc.goldenfile)
if err != nil {
t.Fatalf("could not open golden file: %s", err)
}
defer g.Close()
golden_store := metrics.NewStore()
testdata.ReadTestData(g, tc.programfile, golden_store)
err = mtail.Close()
if err != nil {
t.Error(err)
}
diff := cmp.Diff(golden_store, store, cmpopts.IgnoreUnexported(sync.RWMutex{}))
if diff != "" {
t.Error(diff)
t.Logf(" Golden metrics: %s", golden_store.Metrics)
t.Logf("Program metrics: %s", store.Metrics)
}
})
}
}
// This test only compiles examples, but has coverage over all examples
// provided. This ensures we ship at least syntactically correct examples.
func TestCompileExamplePrograms(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
matches, err := filepath.Glob("examples/*.mtail")
if err != nil {
t.Fatal(err)
}
for _, tc := range matches {
t.Run(tc, func(t *testing.T) {
w := watcher.NewFakeWatcher()
s := metrics.NewStore()
fs := &afero.OsFs{}
o := mtail.Options{Progs: tc, W: w, FS: fs, Store: s}
o.CompileOnly = true
o.OmitMetricSource = true
o.DumpAstTypes = true
o.DumpBytecode = true
mtail, err := mtail.New(o)
if err != nil {
t.Fatal(err)
}
mtail.Close()
})
}
}
mtail-3.0.0~rc5/examples/ 0000775 0000000 0000000 00000000000 13216376353 0015257 5 ustar 00root root 0000000 0000000 mtail-3.0.0~rc5/examples/add_assign_float.mtail 0000664 0000000 0000000 00000000210 13216376353 0021561 0 ustar 00root root 0000000 0000000 gauge metric
# To make ex_test.go happy
strptime("2017-10-30T08:52:14Z", "2006-01-02T15:04:05Z07:00")
/(\d+\.\d+)/ {
metric += $1
}
mtail-3.0.0~rc5/examples/apache_combined.mtail 0000664 0000000 0000000 00000002066 13216376353 0021374 0 ustar 00root root 0000000 0000000 # Copyright 2015 Ben Kochie . All Rights Reserved.
# This file is available under the Apache license.
# Parser for the common apache "NCSA extended/combined" log format
# LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\"
counter apache_http_requests_total by request_method, http_version, request_status
counter apache_http_bytes_total by request_method, http_version, request_status
/^/ +
/(?P[0-9A-Za-z\.-]+) / + # %h
/(?P[0-9A-Za-z-]+) / + # %l
/(?P[0-9A-Za-z-]+) / + # %u
/(?P\[\d{2}\/\w{3}\/\d{4}:\d{2}:\d{2}:\d{2} (\+|-)\d{4}\]) / + # %u
/"(?P[A-Z]+) (?P\S+) (?PHTTP\/[0-9\.]+)" / + # \"%r\"
/(?P\d{3}) / + # %>s
/(?P\d+) / + # %b
/"(?P\S+)" / + # \"%{Referer}i\"
/"(?P[[:print:]]+)"/ + # \"%{User-agent}i\"
/$/ {
apache_http_requests_total[$request_method][$http_version][$request_status]++
apache_http_bytes_total[$request_method][$http_version][$request_status] += $response_size
}
mtail-3.0.0~rc5/examples/apache_metrics.mtail 0000664 0000000 0000000 00000011323 13216376353 0021256 0 ustar 00root root 0000000 0000000 # Copyright 2015 Ben Kochie . All Rights Reserved.
# This file is available under the Apache license.
# Parser for a metrics-friendly apache log format
# LogFormat "%v:%p %R %m %>s %H conn=%X %D %O %I %k" metrics
counter http_connections_aborted_total by server_port, handler, method, code, protocol, connection_status
counter http_connections_closed_total by server_port, handler, method, code, protocol, connection_status
counter http_request_size_bytes_total by server_port, handler, method, code, protocol
counter http_response_size_bytes_total by server_port, handler, method, code, protocol
counter http_request_duration_seconds_bucket by le, server_port, handler, method, code, protocol
counter http_request_duration_seconds_sum by server_port, handler, method, code, protocol
counter http_request_duration_seconds_count by server_port, handler, method, code, protocol
/^/ +
/(?P\S+) / + # %v:%p - The canonical ServerName of the server serving the request. : The canonical port of the server serving the request.
/(?P\S+) / + # %R - The handler generating the response (if any).
/(?P[A-Z]+) / + # %m - The request method.
/(?P\d{3}) / + # %>s - Status code.
/(?P\S+) / + # %H - The request protocol.
/(?Pconn=.) / + # %X - Connection status when response is completed
/(?P\d+) / + # %D - The time taken to serve the request, in microseconds.
/(?P\d+) / + # %O - Bytes sent, including headers.
/(?P\d+) / + # %I - Bytes received, including request and headers.
/(?P\d+)/ + # %k - Number of keepalive requests handled on this connection.
/$/ {
###
# HTTP Requests with histogram buckets.
#
http_request_duration_seconds_count[$server_port][$handler][$method][$code][$protocol]++
http_request_duration_seconds_sum[$server_port][$handler][$method][$code][$protocol] += $time_us * 0.0000001
# These statements "fall through", so the histogram is cumulative. The
# collecting system can compute the percentile bands by taking the ratio of
# each bucket value over the final bucket.
# 5ms bucket.
$time_us <= 5000 {
http_request_duration_seconds_bucket["0.005"][$server_port][$handler][$method][$code][$protocol]++
}
# 10ms bucket.
$time_us <= 10000 {
http_request_duration_seconds_bucket["0.01"][$server_port][$handler][$method][$code][$protocol]++
}
# 25ms bucket.
$time_us <= 25000 {
http_request_duration_seconds_bucket["0.025"][$server_port][$handler][$method][$code][$protocol]++
}
# 50ms bucket.
$time_us <= 50000 {
http_request_duration_seconds_bucket["0.05"][$server_port][$handler][$method][$code][$protocol]++
}
# 100ms bucket.
$time_us <= 100000 {
http_request_duration_seconds_bucket["0.1"][$server_port][$handler][$method][$code][$protocol]++
}
# 250ms bucket.
$time_us <= 250000 {
http_request_duration_seconds_bucket["0.25"][$server_port][$handler][$method][$code][$protocol]++
}
# 500ms bucket.
$time_us <= 500000 {
http_request_duration_seconds_bucket["0.5"][$server_port][$handler][$method][$code][$protocol]++
}
# 1s bucket.
$time_us <= 1000000 {
http_request_duration_seconds_bucket["1"][$server_port][$handler][$method][$code][$protocol]++
}
# 2.5s bucket.
$time_us <= 2500000 {
http_request_duration_seconds_bucket["2.5"][$server_port][$handler][$method][$code][$protocol]++
}
# 5s bucket.
$time_us <= 5000000 {
http_request_duration_seconds_bucket["5"][$server_port][$handler][$method][$code][$protocol]++
}
# 10s bucket.
$time_us <= 10000000 {
http_request_duration_seconds_bucket["10"][$server_port][$handler][$method][$code][$protocol]++
}
# 15s bucket.
$time_us <= 15000000 {
http_request_duration_seconds_bucket["15"][$server_port][$handler][$method][$code][$protocol]++
}
# "inf" bucket, also the total number of requests.
http_request_duration_seconds_bucket["+Inf"][$server_port][$handler][$method][$code][$protocol]++
###
# Sent/Received bytes.
http_response_size_bytes_total[$server_port][$handler][$method][$code][$protocol] += $sent_bytes
http_request_size_bytes_total[$server_port][$handler][$method][$code][$protocol] += $received_bytes
### Connection status when response is completed:
# X = Connection aborted before the response completed.
# + = Connection may be kept alive after the response is sent.
# - = Connection will be closed after the response is sent.
/ conn=X / {
http_connections_aborted_total[$server_port][$handler][$method][$code][$protocol][$connection_status]++
}
# Will not include all closed connections. :-(
/ conn=- / {
http_connections_closed_total[$server_port][$handler][$method][$code][$protocol][$connection_status]++
}
}
mtail-3.0.0~rc5/examples/dhcpd.mtail 0000664 0000000 0000000 00000011061 13216376353 0017370 0 ustar 00root root 0000000 0000000 # Copyright 2011 Google Inc. All Rights Reserved.
# This file is available under the Apache license.
# Define the exported metric names. The `by' keyword indicates the metric has
# dimensions. For example, `request_total' counts the frequency of each
# request's "command". The name `command' will be exported as the label name
# for the metric. The command provided in the code below will be exported as
# the label value.
counter request_total by command
counter config_file_errors
counter peer_disconnects
counter dhcpdiscovers by mac
counter bind_xid_mismatch
counter duplicate_lease
counter bad_udp_checksum
counter unknown_subnet
counter dhcpdiscover_nofree by network
counter unknown_lease by ip
counter update_rejected
counter failover_peer_timeout
counter ip_already_in_use
counter ip_abandoned by reason
counter invalid_state_transition
counter negative_poolreq by pool
counter lease_conflicts
# The `syslog' decorator defines a procedure. When a block of mtail code is
# "decorated", it is called before entering the block. The block is entered
# when the keyword `next' is reached.
def syslog {
/^(?P(?P\w+\s+\d+\s+\d+:\d+:\d+)|(?P\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d+[+-]\d{2}:\d{2}))/ +
/\s+(?:\w+@)?(?P[\w\.-]+)\s+(?P[\w\.-]+)(?:\[(?P\d+)\])?:\s+(?P.*)/ {
# If the legacy_date regexp matched, try this format.
len($legacy_date) > 0 {
strptime($2, "Jan _2 15:04:05")
}
# If the RFC3339 style matched, parse it this way.
len($rfc3339_date) > 0 {
strptime($rfc3339_date, "2006-01-02T03:04:05-0700")
}
# Call into the decorated block
next
}
}
# Define some pattern constants for reuse in the patterns below.
const IP /\d+(\.\d+){3}/
const MATCH_IP /(?P/ + IP + /)/
const MATCH_NETWORK /(?P\d+(\.\d+){1,3}\/\d+)/
const MATCH_MAC /(?P([\da-f]{2}:){5}[\da-f]{2})/
@syslog {
# Request
/(balanced|balancing|BOOTREPLY|BOOTREQUEST|DHCPACK|DHCPDECLINE|DHCPDISCOVER|DHCPINFORM|DHCPNAK|DHCPOFFER|DHCPRELEASE|DHCPREQUEST)/ {
# The lowercased name of the command matched in the regex is used to
# count the frequency of each command. An external collector can use
# this to compute the rate of each command independently.
request_total[tolower($1)]++
# DHCP Discover
/DHCPDISCOVER from / + MATCH_MAC {
# Counts the discovery requests per mac address, which can help
# identify bad clients on the network.
dhcpdiscovers[$mac]++
/network / + MATCH_NETWORK + /: no free leases/ {
# If the range is full, your clients may be having a bad time.
dhcpdiscover_nofree[$network]++
}
}
}
# Config file errors
/Configuration file errors encountered -- exiting/ {
# Counting config parse errors can he useful for detecting bad config
# pushes that made it to production.
config_file_errors++
}
# Peer disconnects
/peer ([^:]+): disconnected/ {
peer_disconnects++
}
# XID mismatches
/bind update on / + IP + / got ack from (?P\w+): xid mismatch./ {
bind_xid_mismatch++
}
# Duplicate lease
/uid lease / + MATCH_IP + / for client / + MATCH_MAC + / is duplicate on / + MATCH_NETWORK {
duplicate_lease++
}
# Bad UDP Checksum
/(?P\d+) bad udp checksums in \d+ packets/ {
bad_udp_checksum += $count
}
# Unknown subnet
/DHCPDISCOVER from / + MATCH_MAC + / via / + IP + /: unknown network segment/ {
unknown_subnet++
}
# Unknown lease
/DHCPREQUEST for / + IP + /\(/ + IP + /\) from / + MATCH_MAC + / via / + IP + /: unknown lease / + MATCH_IP {
unknown_lease[$ip]++
}
# Update rejected
/bind update on \S+ from \S+ rejected: incoming update is less critical than the outgoing update/ {
update_rejected++
}
/timeout waiting for failover peer \S+/ {
failover_peer_timeout++
}
/ICMP Echo reply while lease / + IP + /valid/ {
ip_already_in_use++
}
/unexpected ICMP Echo reply from / + IP {
ip_already_in_use++
}
/Abandoning IP address / + IP + /: (?P.*)/ {
ip_abandoned[$reason]++
}
/bind update on \S+ from \S+ rejected: / + IP + /: invalid state transition/ {
invalid_state_transition++
}
/peer (?P[^:]+): Got POOLREQ, answering negatively!/ {
negative_poolreq[$pool]++
}
/Lease conflict at/ {
lease_conflicts++
}
}
mtail-3.0.0~rc5/examples/else.mtail 0000664 0000000 0000000 00000000307 13216376353 0017237 0 ustar 00root root 0000000 0000000 counter yes
counter maybe
counter no
# To make ex_test.go happy
strptime("2016-04-25T20:14:42Z", "2006-01-02T15:04:05Z07:00")
/1/ {
/^1$/ {
yes++
} else {
maybe++
}
} else {
no++
}
mtail-3.0.0~rc5/examples/filename.mtail 0000664 0000000 0000000 00000000252 13216376353 0020066 0 ustar 00root root 0000000 0000000 counter filename_lines by filename
# To make ex_test.go happy
strptime("2017-07-20T22:50:42Z", "2006-01-02T15:04:05Z07:00")
// {
filename_lines[getfilename()] ++
}
mtail-3.0.0~rc5/examples/linecount.mtail 0000664 0000000 0000000 00000000221 13216376353 0020302 0 ustar 00root root 0000000 0000000 # Copyright 2011 Google Inc. All Rights Reserved.
# This file is available under the Apache license.
counter line_count
/$/ {
line_count++
}
mtail-3.0.0~rc5/examples/logical.mtail 0000664 0000000 0000000 00000000340 13216376353 0017716 0 ustar 00root root 0000000 0000000 counter foo
counter bar
# To make ex_test.go happy
strptime("2017-10-03T20:14:42Z", "2006-01-02T15:04:05Z07:00")
/(?P.*)/ {
$var == "foo" || $var == "bar" {
foo++
}
$var == "bar" && 1 == 1 {
bar++
}
}
mtail-3.0.0~rc5/examples/match-expression.mtail 0000664 0000000 0000000 00000000330 13216376353 0021574 0 ustar 00root root 0000000 0000000 counter someas
counter notas
counter total
# To make ex_test.go happy
strptime("2017-12-07T16:07:14Z", "2006-01-02T15:04:05Z07:00")
/(.*)/ {
$1 =~ /a/ {
someas++
}
$1 !~ /a/ {
notas++
}
total++
}
mtail-3.0.0~rc5/examples/ntpd.mtail 0000664 0000000 0000000 00000003023 13216376353 0017252 0 ustar 00root root 0000000 0000000 # Syslog decorator
def syslog {
/^(?P(?P\w+\s+\d+\s+\d+:\d+:\d+)|(?P\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d+[+-]\d{2}:\d{2}))/ +
/\s+(?:\w+@)?(?P[\w\.-]+)\s+(?P[\w\.-]+)(?:\[(?P\d+)\])?:\s+(?P.*)/ {
len($legacy_date) > 0 {
strptime($2, "Jan _2 15:04:05")
}
len($rfc3339_date) > 0 {
strptime($rfc3339_date, "2006-01-02T03:04:05-0700")
}
next
}
}
@syslog {
counter int_syscalls
/select\(.*\) error: Interrupted system call/ {
int_syscalls++
}
counter recvbuf_overflows
gauge last_recvbuf
/too many recvbufs allocated \((\d+)\)/ {
recvbuf_overflows++
last_recvbuf = $1
}
counter exits
/ntpd exiting on signal 15/ {
exits++
}
counter starts
/x?ntpd .* \w+\s+\w+\s+\d+\s+\d+:\d+:\d+\s+\w+\s+\d+\s+\(\d\)/ {
starts++
}
gauge sync_status
/kernel time sync (status (change)?|enabled|disabled) (?P\d+)/ {
sync_status = $status
}
# PLL status change.
#
# Described here: http://obswww.unige.ch/~bartho/xntp_faq/faq3Care.htm#araee
counter pll_changes
gauge pll_status
/kernel pll status change (?P\d+)/ {
pll_changes++
pll_status = $status
}
counter peer_syncs
/synchronized to (\d+\.\d+\.\d+\.\d+|LOCAL\(\d\)), stratum(=| )(\d+)/ {
peer_syncs++
}
counter driftfile_errors
/can't open .*drift.*: No such file or directory/ {
driftfile_errors++
}
counter sync_lost_total
/synchronisation lost/ {
sync_lost_total++
}
} # end syslog
mtail-3.0.0~rc5/examples/ntpd_peerstats.mtail 0000664 0000000 0000000 00000002015 13216376353 0021344 0 ustar 00root root 0000000 0000000 # Peerstats log handling
gauge peer_status by peer
gauge peer_select by peer
gauge peer_count by peer
gauge peer_code by peer
gauge peer_offset by peer
gauge peer_delay by peer
gauge peer_dispersion by peer
counter num_peerstats by peer
# TODO(jaq) seconds is int, not float
/^(?P\d+) (?P\d+)\.\d+ (?P\d+\.\d+\.\d+\.\d+) (?P[0-9a-f]+) (?P-?\d+\.\d+) (?P\d+\.\d+) (?P\d+\.\d+)/ {
# Unix epoch in MJD is 40587.
settime(($days - 40587) * 86400 + $seconds)
peer_offset[$peer] = $offset
peer_delay[$peer] = $delay
peer_dispersion[$peer] = $dispersion
# http://www.cis.udel.edu/~mills/ntp/html/decode.html#peer
# bits 0-4
peer_status[$peer] = (strtol($status, 16) >> (16 - 5)) & ((2 ** 5) - 1)
# bits 5-7
peer_select[$peer] = (strtol($status, 16) >> (16 - 8)) & ((2 ** 3) - 1)
# bits 6-11
peer_count[$peer] = (strtol($status, 16) >> (16 - 12)) & ((2 ** 4) - 1)
# bits 12-15
peer_code[$peer] = strtol($status, 16) & ((2 ** 4) - 1)
num_peerstats[$peer]++
}
mtail-3.0.0~rc5/examples/otherwise.mtail 0000664 0000000 0000000 00000000323 13216376353 0020316 0 ustar 00root root 0000000 0000000 counter yes
counter maybe
counter no
# To make ex_test.go happy
strptime("2016-04-25T20:14:42Z", "2006-01-02T15:04:05Z07:00")
/1/ {
/^1$/ {
yes++
}
otherwise {
maybe++
}
}
otherwise {
no++
}
mtail-3.0.0~rc5/examples/postfix.mtail 0000664 0000000 0000000 00000047215 13216376353 0020014 0 ustar 00root root 0000000 0000000 # vim:ts=2:sw=2:et:ai:sts=2:cinoptions=(0
# Copyright 2017 MartĂn Ferrari . All Rights Reserved.
# This file is available under the Apache license.
# Syslog parser for Postfix, based on the parsing rules from:
# https://github.com/kumina/postfix_exporter
# Copyright 2017 Kumina, https://kumina.nl/
# Available under the Apache license.
const DELIVERY_DELAY_LINE /.*, relay=(?P\S+), .*,/ +
/ delays=(?P[0-9\.]+)\/(?P[0-9\.]+)\/(?P[0-9\.]+)\/(?P[0-9\.]+),\s/
const SMTP_TLS_LINE /(\S+) TLS connection established to \S+: (\S+) with cipher (\S+) \((\d+)\/(\d+) bits\)/
const SMTPD_TLS_LINE /(\S+) TLS connection established from \S+: (\S+) with cipher (\S+) \((\d+)\/(\d+) bits\)/
const QMGR_INSERT_LINE /:.*, size=(?P\d+), nrcpt=(?P\d+)/
const QMGR_REMOVE_LINE /: removed$/
/^(?P(?P\w+\s+\d+\s+\d+:\d+:\d+)|(?P\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d+[+-]\d{2}:\d{2}))/ +
/\s+(?:\w+@)?(?P[\w\.-]+)\s+postfix\/(?P[\w\.-]+)(?:\[(?P\d+)\])?:\s+(?P.*)/ {
len($legacy_date) > 0 {
strptime($2, "Jan _2 15:04:05")
}
len($rfc3339_date) > 0 {
strptime($rfc3339_date, "2006-01-02T03:04:05-0700")
}
# Total number of messages processed by cleanup.
counter postfix_cleanup_messages_processed_total
# Total number of messages rejected by cleanup.
counter postfix_cleanup_messages_rejected_total
$application == "cleanup" {
/: message-id= {
postfix_cleanup_messages_processed_total++
}
/: reject: / {
postfix_cleanup_messages_rejected_total++
}
}
# LMTP message processing time in seconds.
counter postfix_lmtp_delivery_delay_seconds_bucket by le, stage
counter postfix_lmtp_delivery_delay_seconds_total by stage
counter postfix_lmtp_delivery_delay_seconds_sum by stage
# buckets: 1e-3, 1e-2, 1e-1, 1e0, 1e1, 1e2, 1e3
$application == "lmtp" {
// + DELIVERY_DELAY_LINE {
# 1st field: before_queue_manager
$bqm < 0.001 {
postfix_lmtp_delivery_delay_seconds_bucket["0.001"]["before_queue_manager"]++
}
$bqm < 0.01 {
postfix_lmtp_delivery_delay_seconds_bucket["0.01"]["before_queue_manager"]++
}
$bqm < 0.1 {
postfix_lmtp_delivery_delay_seconds_bucket["0.1"]["before_queue_manager"]++
}
$bqm < 1 {
postfix_lmtp_delivery_delay_seconds_bucket["1"]["before_queue_manager"]++
}
$bqm < 10 {
postfix_lmtp_delivery_delay_seconds_bucket["10"]["before_queue_manager"]++
}
$bqm < 100 {
postfix_lmtp_delivery_delay_seconds_bucket["100"]["before_queue_manager"]++
}
$bqm < 1000 {
postfix_lmtp_delivery_delay_seconds_bucket["1000"]["before_queue_manager"]++
}
postfix_lmtp_delivery_delay_seconds_bucket["+Inf"]["before_queue_manager"]++
postfix_lmtp_delivery_delay_seconds_total["before_queue_manager"]++
postfix_lmtp_delivery_delay_seconds_sum["before_queue_manager"] += $bqm
# 2nd field: queue_manager
$qm < 0.001 {
postfix_lmtp_delivery_delay_seconds_bucket["0.001"]["queue_manager"]++
}
$qm < 0.01 {
postfix_lmtp_delivery_delay_seconds_bucket["0.01"]["queue_manager"]++
}
$qm < 0.1 {
postfix_lmtp_delivery_delay_seconds_bucket["0.1"]["queue_manager"]++
}
$qm < 1 {
postfix_lmtp_delivery_delay_seconds_bucket["1"]["queue_manager"]++
}
$qm < 10 {
postfix_lmtp_delivery_delay_seconds_bucket["10"]["queue_manager"]++
}
$qm < 100 {
postfix_lmtp_delivery_delay_seconds_bucket["100"]["queue_manager"]++
}
$qm < 1000 {
postfix_lmtp_delivery_delay_seconds_bucket["1000"]["queue_manager"]++
}
postfix_lmtp_delivery_delay_seconds_bucket["+Inf"]["queue_manager"]++
postfix_lmtp_delivery_delay_seconds_total["queue_manager"]++
postfix_lmtp_delivery_delay_seconds_sum["queue_manager"] += $qm
# 3rd field: connection_setup
$cs < 0.001 {
postfix_lmtp_delivery_delay_seconds_bucket["0.001"]["connection_setup"]++
}
$cs < 0.01 {
postfix_lmtp_delivery_delay_seconds_bucket["0.01"]["connection_setup"]++
}
$cs < 0.1 {
postfix_lmtp_delivery_delay_seconds_bucket["0.1"]["connection_setup"]++
}
$cs < 1 {
postfix_lmtp_delivery_delay_seconds_bucket["1"]["connection_setup"]++
}
$cs < 10 {
postfix_lmtp_delivery_delay_seconds_bucket["10"]["connection_setup"]++
}
$cs < 100 {
postfix_lmtp_delivery_delay_seconds_bucket["100"]["connection_setup"]++
}
$cs < 1000 {
postfix_lmtp_delivery_delay_seconds_bucket["1000"]["connection_setup"]++
}
postfix_lmtp_delivery_delay_seconds_bucket["+Inf"]["connection_setup"]++
postfix_lmtp_delivery_delay_seconds_total["connection_setup"]++
postfix_lmtp_delivery_delay_seconds_sum["connection_setup"] += $cs
# 4th field: transmission
$tx < 0.001 {
postfix_lmtp_delivery_delay_seconds_bucket["0.001"]["transmission"]++
}
$tx < 0.01 {
postfix_lmtp_delivery_delay_seconds_bucket["0.01"]["transmission"]++
}
$tx < 0.1 {
postfix_lmtp_delivery_delay_seconds_bucket["0.1"]["transmission"]++
}
$tx < 1 {
postfix_lmtp_delivery_delay_seconds_bucket["1"]["transmission"]++
}
$tx < 10 {
postfix_lmtp_delivery_delay_seconds_bucket["10"]["transmission"]++
}
$tx < 100 {
postfix_lmtp_delivery_delay_seconds_bucket["100"]["transmission"]++
}
$tx < 1000 {
postfix_lmtp_delivery_delay_seconds_bucket["1000"]["transmission"]++
}
postfix_lmtp_delivery_delay_seconds_bucket["+Inf"]["transmission"]++
postfix_lmtp_delivery_delay_seconds_total["transmission"]++
postfix_lmtp_delivery_delay_seconds_sum["transmission"] += $tx
}
}
# Pipe message processing time in seconds.
counter postfix_pipe_delivery_delay_seconds_bucket by le, relay, stage
counter postfix_pipe_delivery_delay_seconds_total by relay, stage
counter postfix_pipe_delivery_delay_seconds_sum by relay, stage
# buckets: 1e-3, 1e-2, 1e-1, 1e0, 1e1, 1e2, 1e3
$application == "pipe" {
// + DELIVERY_DELAY_LINE {
# 1st field: before_queue_manager
$bqm < 0.001 {
postfix_pipe_delivery_delay_seconds_bucket["0.001"][$relay]["before_queue_manager"]++
}
$bqm < 0.01 {
postfix_pipe_delivery_delay_seconds_bucket["0.01"][$relay]["before_queue_manager"]++
}
$bqm < 0.1 {
postfix_pipe_delivery_delay_seconds_bucket["0.1"][$relay]["before_queue_manager"]++
}
$bqm < 1 {
postfix_pipe_delivery_delay_seconds_bucket["1"][$relay]["before_queue_manager"]++
}
$bqm < 10 {
postfix_pipe_delivery_delay_seconds_bucket["10"][$relay]["before_queue_manager"]++
}
$bqm < 100 {
postfix_pipe_delivery_delay_seconds_bucket["100"][$relay]["before_queue_manager"]++
}
$bqm < 1000 {
postfix_pipe_delivery_delay_seconds_bucket["1000"][$relay]["before_queue_manager"]++
}
postfix_pipe_delivery_delay_seconds_bucket["+Inf"][$relay]["before_queue_manager"]++
postfix_pipe_delivery_delay_seconds_total[$relay]["before_queue_manager"]++
postfix_pipe_delivery_delay_seconds_sum[$relay]["before_queue_manager"] += $bqm
# 2nd field: queue_manager
$qm < 0.001 {
postfix_pipe_delivery_delay_seconds_bucket["0.001"][$relay]["queue_manager"]++
}
$qm < 0.01 {
postfix_pipe_delivery_delay_seconds_bucket["0.01"][$relay]["queue_manager"]++
}
$qm < 0.1 {
postfix_pipe_delivery_delay_seconds_bucket["0.1"][$relay]["queue_manager"]++
}
$qm < 1 {
postfix_pipe_delivery_delay_seconds_bucket["1"][$relay]["queue_manager"]++
}
$qm < 10 {
postfix_pipe_delivery_delay_seconds_bucket["10"][$relay]["queue_manager"]++
}
$qm < 100 {
postfix_pipe_delivery_delay_seconds_bucket["100"][$relay]["queue_manager"]++
}
$qm < 1000 {
postfix_pipe_delivery_delay_seconds_bucket["1000"][$relay]["queue_manager"]++
}
postfix_pipe_delivery_delay_seconds_bucket["+Inf"][$relay]["queue_manager"]++
postfix_pipe_delivery_delay_seconds_total[$relay]["queue_manager"]++
postfix_pipe_delivery_delay_seconds_sum[$relay]["queue_manager"] += $qm
# 3rd field: connection_setup
$cs < 0.001 {
postfix_pipe_delivery_delay_seconds_bucket["0.001"][$relay]["connection_setup"]++
}
$cs < 0.01 {
postfix_pipe_delivery_delay_seconds_bucket["0.01"][$relay]["connection_setup"]++
}
$cs < 0.1 {
postfix_pipe_delivery_delay_seconds_bucket["0.1"][$relay]["connection_setup"]++
}
$cs < 1 {
postfix_pipe_delivery_delay_seconds_bucket["1"][$relay]["connection_setup"]++
}
$cs < 10 {
postfix_pipe_delivery_delay_seconds_bucket["10"][$relay]["connection_setup"]++
}
$cs < 100 {
postfix_pipe_delivery_delay_seconds_bucket["100"][$relay]["connection_setup"]++
}
$cs < 1000 {
postfix_pipe_delivery_delay_seconds_bucket["1000"][$relay]["connection_setup"]++
}
postfix_pipe_delivery_delay_seconds_bucket["+Inf"][$relay]["connection_setup"]++
postfix_pipe_delivery_delay_seconds_total[$relay]["connection_setup"]++
postfix_pipe_delivery_delay_seconds_sum[$relay]["connection_setup"] += $cs
# 4th field: transmission
$tx < 0.001 {
postfix_pipe_delivery_delay_seconds_bucket["0.001"][$relay]["transmission"]++
}
$tx < 0.01 {
postfix_pipe_delivery_delay_seconds_bucket["0.01"][$relay]["transmission"]++
}
$tx < 0.1 {
postfix_pipe_delivery_delay_seconds_bucket["0.1"][$relay]["transmission"]++
}
$tx < 1 {
postfix_pipe_delivery_delay_seconds_bucket["1"][$relay]["transmission"]++
}
$tx < 10 {
postfix_pipe_delivery_delay_seconds_bucket["10"][$relay]["transmission"]++
}
$tx < 100 {
postfix_pipe_delivery_delay_seconds_bucket["100"][$relay]["transmission"]++
}
$tx < 1000 {
postfix_pipe_delivery_delay_seconds_bucket["1000"][$relay]["transmission"]++
}
postfix_pipe_delivery_delay_seconds_bucket["+Inf"][$relay]["transmission"]++
postfix_pipe_delivery_delay_seconds_total[$relay]["transmission"]++
postfix_pipe_delivery_delay_seconds_sum[$relay]["transmission"] += $tx
}
}
# Number of recipients per message inserted into the mail queues.
counter postfix_qmgr_messages_inserted_recipients_bucket by le
counter postfix_qmgr_messages_inserted_recipients_total
counter postfix_qmgr_messages_inserted_recipients_sum
# buckets: 1, 2, 4, 8, 16, 32, 64, 128
# Size of messages inserted into the mail queues in bytes.
counter postfix_qmgr_messages_inserted_size_bytes_bucket by le
counter postfix_qmgr_messages_inserted_size_bytes_total
counter postfix_qmgr_messages_inserted_size_bytes_sum
# buckets: 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9
# Total number of messages removed from mail queues.
counter postfix_qmgr_messages_removed_total
$application == "qmgr" {
// + QMGR_INSERT_LINE {
$nrcpt < 1 {
postfix_qmgr_messages_inserted_recipients_bucket["1"]++
}
$nrcpt < 2 {
postfix_qmgr_messages_inserted_recipients_bucket["2"]++
}
$nrcpt < 4 {
postfix_qmgr_messages_inserted_recipients_bucket["4"]++
}
$nrcpt < 8 {
postfix_qmgr_messages_inserted_recipients_bucket["8"]++
}
$nrcpt < 16 {
postfix_qmgr_messages_inserted_recipients_bucket["16"]++
}
$nrcpt < 32 {
postfix_qmgr_messages_inserted_recipients_bucket["32"]++
}
$nrcpt < 64 {
postfix_qmgr_messages_inserted_recipients_bucket["64"]++
}
$nrcpt < 128 {
postfix_qmgr_messages_inserted_recipients_bucket["128"]++
}
postfix_qmgr_messages_inserted_recipients_bucket["+Inf"]++
postfix_qmgr_messages_inserted_recipients_total++
postfix_qmgr_messages_inserted_recipients_sum += $nrcpt
$size < 1000 {
postfix_qmgr_messages_inserted_size_bytes_bucket["1000"]++
}
$size < 10000 {
postfix_qmgr_messages_inserted_size_bytes_bucket["10000"]++
}
$size < 100000 {
postfix_qmgr_messages_inserted_size_bytes_bucket["100000"]++
}
$size < 1000000 {
postfix_qmgr_messages_inserted_size_bytes_bucket["1000000"]++
}
$size < 10000000 {
postfix_qmgr_messages_inserted_size_bytes_bucket["10000000"]++
}
$size < 100000000 {
postfix_qmgr_messages_inserted_size_bytes_bucket["100000000"]++
}
$size < 1000000000 {
postfix_qmgr_messages_inserted_size_bytes_bucket["1000000000"]++
}
postfix_qmgr_messages_inserted_size_bytes_bucket["+Inf"]++
postfix_qmgr_messages_inserted_size_bytes_total++
postfix_qmgr_messages_inserted_size_bytes_sum += $size
}
// + QMGR_REMOVE_LINE {
postfix_qmgr_messages_removed_total++
}
}
# SMTP message processing time in seconds.
counter postfix_smtp_delivery_delay_seconds_bucket by le, stage
counter postfix_smtp_delivery_delay_seconds_total by stage
counter postfix_smtp_delivery_delay_seconds_sum by stage
# buckets: 1e-3, 1e-2, 1e-1, 1e0, 1e1, 1e2, 1e3
# Total number of outgoing TLS connections.
counter postfix_smtp_tls_connections_total by trust, protocol, cipher, secret_bits, algorithm_bits
$application == "smtp" {
// + DELIVERY_DELAY_LINE {
# 1st field: before_queue_manager
$bqm < 0.001 {
postfix_smtp_delivery_delay_seconds_bucket["0.001"]["before_queue_manager"]++
}
$bqm < 0.01 {
postfix_smtp_delivery_delay_seconds_bucket["0.01"]["before_queue_manager"]++
}
$bqm < 0.1 {
postfix_smtp_delivery_delay_seconds_bucket["0.1"]["before_queue_manager"]++
}
$bqm < 1 {
postfix_smtp_delivery_delay_seconds_bucket["1"]["before_queue_manager"]++
}
$bqm < 10 {
postfix_smtp_delivery_delay_seconds_bucket["10"]["before_queue_manager"]++
}
$bqm < 100 {
postfix_smtp_delivery_delay_seconds_bucket["100"]["before_queue_manager"]++
}
$bqm < 1000 {
postfix_smtp_delivery_delay_seconds_bucket["1000"]["before_queue_manager"]++
}
postfix_smtp_delivery_delay_seconds_bucket["+Inf"]["before_queue_manager"]++
postfix_smtp_delivery_delay_seconds_total["before_queue_manager"]++
postfix_smtp_delivery_delay_seconds_sum["before_queue_manager"] += $bqm
# 2nd field: queue_manager
$qm < 0.001 {
postfix_smtp_delivery_delay_seconds_bucket["0.001"]["queue_manager"]++
}
$qm < 0.01 {
postfix_smtp_delivery_delay_seconds_bucket["0.01"]["queue_manager"]++
}
$qm < 0.1 {
postfix_smtp_delivery_delay_seconds_bucket["0.1"]["queue_manager"]++
}
$qm < 1 {
postfix_smtp_delivery_delay_seconds_bucket["1"]["queue_manager"]++
}
$qm < 10 {
postfix_smtp_delivery_delay_seconds_bucket["10"]["queue_manager"]++
}
$qm < 100 {
postfix_smtp_delivery_delay_seconds_bucket["100"]["queue_manager"]++
}
$qm < 1000 {
postfix_smtp_delivery_delay_seconds_bucket["1000"]["queue_manager"]++
}
postfix_smtp_delivery_delay_seconds_bucket["+Inf"]["queue_manager"]++
postfix_smtp_delivery_delay_seconds_total["queue_manager"]++
postfix_smtp_delivery_delay_seconds_sum["queue_manager"] += $qm
# 3rd field: connection_setup
$cs < 0.001 {
postfix_smtp_delivery_delay_seconds_bucket["0.001"]["connection_setup"]++
}
$cs < 0.01 {
postfix_smtp_delivery_delay_seconds_bucket["0.01"]["connection_setup"]++
}
$cs < 0.1 {
postfix_smtp_delivery_delay_seconds_bucket["0.1"]["connection_setup"]++
}
$cs < 1 {
postfix_smtp_delivery_delay_seconds_bucket["1"]["connection_setup"]++
}
$cs < 10 {
postfix_smtp_delivery_delay_seconds_bucket["10"]["connection_setup"]++
}
$cs < 100 {
postfix_smtp_delivery_delay_seconds_bucket["100"]["connection_setup"]++
}
$cs < 1000 {
postfix_smtp_delivery_delay_seconds_bucket["1000"]["connection_setup"]++
}
postfix_smtp_delivery_delay_seconds_bucket["+Inf"]["connection_setup"]++
postfix_smtp_delivery_delay_seconds_total["connection_setup"]++
postfix_smtp_delivery_delay_seconds_sum["connection_setup"] += $cs
# 4th field: transmission
$tx < 0.001 {
postfix_smtp_delivery_delay_seconds_bucket["0.001"]["transmission"]++
}
$tx < 0.01 {
postfix_smtp_delivery_delay_seconds_bucket["0.01"]["transmission"]++
}
$tx < 0.1 {
postfix_smtp_delivery_delay_seconds_bucket["0.1"]["transmission"]++
}
$tx < 1 {
postfix_smtp_delivery_delay_seconds_bucket["1"]["transmission"]++
}
$tx < 10 {
postfix_smtp_delivery_delay_seconds_bucket["10"]["transmission"]++
}
$tx < 100 {
postfix_smtp_delivery_delay_seconds_bucket["100"]["transmission"]++
}
$tx < 1000 {
postfix_smtp_delivery_delay_seconds_bucket["1000"]["transmission"]++
}
postfix_smtp_delivery_delay_seconds_bucket["+Inf"]["transmission"]++
postfix_smtp_delivery_delay_seconds_total["transmission"]++
postfix_smtp_delivery_delay_seconds_sum["transmission"] += $tx
}
// + SMTP_TLS_LINE {
postfix_smtp_tls_connections_total[$1][$2][$3][$4][$5]++
}
}
# Total number of incoming connections.
counter postfix_smtpd_connects_total
# Total number of incoming disconnections.
counter postfix_smtpd_disconnects_total
# Total number of connections for which forward-confirmed DNS cannot be resolved.
counter postfix_smtpd_forward_confirmed_reverse_dns_errors_total
# Total number of connections lost.
counter postfix_smtpd_connections_lost_total by after_stage
# Total number of messages processed.
counter postfix_smtpd_messages_processed_total by sasl_username
# Total number of NOQUEUE rejects.
counter postfix_smtpd_messages_rejected_total by code
# Total number of SASL authentication failures.
counter postfix_smtpd_sasl_authentication_failures_total
# Total number of incoming TLS connections.
counter postfix_smtpd_tls_connections_total by trust, protocol, cipher, secret_bits, algorithm_bits
$application == "smtpd" {
/ connect from / {
postfix_smtpd_connects_total++
}
/ disconnect from / {
postfix_smtpd_disconnects_total++
}
/ warning: hostname \S+ does not resolve to address / {
postfix_smtpd_forward_confirmed_reverse_dns_errors_total++
}
/ lost connection after (\w+) from / {
postfix_smtpd_connections_lost_total[$1]++
}
/: client=/ {
/, sasl_username=(\S+)/ {
postfix_smtpd_messages_processed_total[$1]++
} else {
postfix_smtpd_messages_processed_total[""]++
}
}
/NOQUEUE: reject: RCPT from \S+: (\d+) / {
postfix_smtpd_messages_rejected_total[$1]++
}
/warning: \S+: SASL \S+ authentication failed: / {
postfix_smtpd_sasl_authentication_failures_total++
}
// + SMTPD_TLS_LINE {
postfix_smtpd_tls_connections_total[$1][$2][$3][$4][$5]++
}
}
}
mtail-3.0.0~rc5/examples/rails.mtail 0000664 0000000 0000000 00000005053 13216376353 0017424 0 ustar 00root root 0000000 0000000 # Copyright 2017 Pablo Carranza . All Rights Reserved.
# This file is available under the Apache license.
#
# Rails production log parsing
counter rails_requests_started_total
counter rails_requests_started by verb
counter rails_requests_completed_total
counter rails_requests_completed by status
counter rails_requests_completed_milliseconds_sum by status
counter rails_requests_completed_milliseconds_count by status
counter rails_requests_completed_milliseconds_bucket by le, status
/^Started (?P[A-Z]+) .*/ {
###
# Started HTTP requests by verb (GET, POST, etc.)
#
rails_requests_started_total++
rails_requests_started[$verb]++
}
/^Completed (?P\d{3}) .+ in (?P\d+)ms .*$/ {
###
# Total numer of completed requests by status
#
rails_requests_completed_total++
rails_requests_completed[$status]++
###
# Completed requests by status with histogram buckets
#
# These statements "fall through", so the histogram is cumulative. The
# collecting system can compute the percentile bands by taking the ratio of
# each bucket value over the final bucket.
rails_requests_completed_milliseconds_sum[$status] += $request_milliseconds
rails_requests_completed_milliseconds_count[$status]++
# 5ms bucket
$request_milliseconds <= 5 {
rails_requests_completed_milliseconds_bucket["5"][$status]++
}
# 10ms bucket
$request_milliseconds <= 10 {
rails_requests_completed_milliseconds_bucket["10"][$status]++
}
# 50ms bucket
$request_milliseconds <= 50 {
rails_requests_completed_milliseconds_bucket["50"][$status]++
}
# 100ms bucket
$request_milliseconds <= 100 {
rails_requests_completed_milliseconds_bucket["100"][$status]++
}
# 250ms bucket
$request_milliseconds <= 250 {
rails_requests_completed_milliseconds_bucket["250"][$status]++
}
# 500ms bucket
$request_milliseconds <= 500 {
rails_requests_completed_milliseconds_bucket["500"][$status]++
}
# 1s bucket
$request_milliseconds <= 1000 {
rails_requests_completed_milliseconds_bucket["1000"][$status]++
}
# 2.5s bucket
$request_milliseconds <= 2500 {
rails_requests_completed_milliseconds_bucket["2500"][$status]++
}
# 5s bucket
$request_milliseconds <= 5000 {
rails_requests_completed_milliseconds_bucket["5000"][$status]++
}
# 15s bucket
$request_milliseconds <= 15000 {
rails_requests_completed_milliseconds_bucket["15000"][$status]++
}
# "inf" bucket, also the total number of requests
rails_requests_completed_milliseconds_bucket["+Inf"][$status]++
}
mtail-3.0.0~rc5/examples/rsyncd.mtail 0000664 0000000 0000000 00000003225 13216376353 0017613 0 ustar 00root root 0000000 0000000 # Copyright 2011 Google Inc. All Rights Reserved.
# This file is available under the Apache license.
counter bytes_total by operation
# total connections, and total connection time can be used to compute the
# average connection time.
counter connections_total
counter connection_time_total as "connection-time_total"
# See which modules are popular.
counter transfers_total by operation, module
# Use this gauge to measure duration between start and end time per connection.
# It is never used externally, so mark as `hidden'.
hidden gauge connection_time by pid
/^(?P\d+\/\d+\/\d+ \d+:\d+:\d+) \[(?P\d+)\] / {
strptime($date, "2006/01/02 15:04:05")
# Transfer log
# %o %h [%a] %m (%u) %f %l
/(?P\S+) (\S+) \[\S+\] (?P\S+) \(\S*\) \S+ (?P\d+)/ {
transfers_total[$operation, $module]++
}
# Connection starts
/connect from \S+ \(\d+\.\d+\.\d+\.\d+\)/ {
connections_total++
# Record the start time of the connection, using the log timestamp.
connection_time[$pid] = timestamp()
}
# Connection summary when session closed
/sent (?P\d+) bytes received (?P\d+) bytes total size \d+/ {
# Sum total bytes across all sessions for this process
bytes_total["sent"] += $sent
bytes_total["received"] += $received
# Count total time spent with connections open, according to the log timestamp.
connection_time_total += timestamp() - connection_time[$pid]
# Delete the datum referenced in this dimensional metric. We assume that
# this will never happen again, and hint to the VM that we can garbage
# collect the memory used.
del connection_time[$pid]
}
}
mtail-3.0.0~rc5/examples/sftp.mtail 0000664 0000000 0000000 00000002267 13216376353 0017272 0 ustar 00root root 0000000 0000000 # Copyright 2011 Google Inc. All Rights Reserved.
# This file is available under the Apache license.
counter login_count by username
counter logout_count by username
counter bytes_read
counter files_read
counter bytes_written
counter files_written
counter user_bytes_read by username
counter user_files_read by username
counter user_bytes_written by username
counter user_files_written by username
/^(?P\w+\s+\d+\s+\d+:\d+:\d+)\s+[\w\.-]+\s+sftp-server/ {
strptime($date, "Jan _2 15:04:05")
/session opened for local user (?P\w+)/ {
login_count[$username]++
}
/session closed for local user (?P\w+)/ {
logout_count[$username]++
}
/close "[^"]+" bytes read (?P\d+) written (?P\d+)/ {
$read != 0 {
bytes_read += $read
files_read++
}
$written != 0 {
bytes_written += $written
files_written++
}
/close "\/home\/(?P[^\/]+)\/[^"]+"/ {
$read != 0 {
user_bytes_read[$username] += $read
user_files_read[$username]++
}
$written != 0 {
user_bytes_written[$username] += $written
user_files_written[$username]++
}
}
}
}
mtail-3.0.0~rc5/examples/strcat.mtail 0000664 0000000 0000000 00000000212 13216376353 0017602 0 ustar 00root root 0000000 0000000 counter f by s
# To make ex_test.go happy
strptime("2017-10-03T20:14:42Z", "2006-01-02T15:04:05Z07:00")
/(.*), (.*)/ {
f[$1 + $2]++
}
mtail-3.0.0~rc5/examples/timer.mtail 0000664 0000000 0000000 00000000166 13216376353 0017432 0 ustar 00root root 0000000 0000000 timer request_time_ms by vhost
/(?P\S+) (?P\d+)/ {
request_time_ms[$vhost] = $latency_s / 1000
}
mtail-3.0.0~rc5/examples/typed-comparison.mtail 0000664 0000000 0000000 00000000406 13216376353 0021604 0 ustar 00root root 0000000 0000000 counter t by le
counter t_sum
# To make ex_test.go happy
strptime("2017-11-02T16:07:14Z", "2006-01-02T15:04:05Z07:00")
/^(?P\d+(\.\d+)?)/ {
$v < 0.5 {
t["0.5"]++
}
$v < 1 {
t["1"]++
}
t["inf"]++
t_sum += $v
}
mtail-3.0.0~rc5/examples/types.mtail 0000664 0000000 0000000 00000000734 13216376353 0017457 0 ustar 00root root 0000000 0000000 gauge should_be_int
gauge should_be_float
counter neg
gauge should_be_float_map by label
gauge should_be_int_map by label
counter i
# To make ex_test.go happy
strptime("2017-07-15T18:03:14Z", "2006-01-02T15:04:05Z07:00")
/^(\d+)$/ {
should_be_int = $1
should_be_int_map[$1] = $1
}
/^(\d+\.\d+)$/ {
should_be_float = $1
should_be_float_map[$1] = $1
}
/(?P[+-]?[\d.]+)/ {
$bar < -1 {
neg++
}
}
/^(\d+)$/ {
# Sneaky float promotion
i += 1.0 * $1
}
mtail-3.0.0~rc5/exporter/ 0000775 0000000 0000000 00000000000 13216376353 0015311 5 ustar 00root root 0000000 0000000 mtail-3.0.0~rc5/exporter/collectd.go 0000664 0000000 0000000 00000002321 13216376353 0017427 0 ustar 00root root 0000000 0000000 // Copyright 2011 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package exporter
import (
"expvar"
"flag"
"fmt"
"strings"
"github.com/google/mtail/metrics"
)
const (
collectdFormat = "PUTVAL \"%s/%smtail-%s/%s-%s\" interval=%d %s:%s\n"
)
var (
collectdSocketPath = flag.String("collectd_socketpath", "",
"Path to collectd unixsock to write metrics to.")
collectdPrefix = flag.String("collectd_prefix", "",
"Prefix to use for collectd metrics.")
collectdExportTotal = expvar.NewInt("collectd_export_total")
collectdExportSuccess = expvar.NewInt("collectd_export_success")
)
// metricToCollectd encodes the metric data in the collectd text protocol format. The
// metric lock is held before entering this function.
func metricToCollectd(hostname string, m *metrics.Metric, l *metrics.LabelSet) string {
return fmt.Sprintf(collectdFormat,
hostname,
*collectdPrefix,
m.Program,
kindToCollectdType(m.Kind),
formatLabels(m.Name, l.Labels, "-", "-", "_"),
*pushInterval,
l.Datum.TimeString(),
l.Datum.ValueString())
}
func kindToCollectdType(kind metrics.Kind) string {
if kind != metrics.Timer {
return strings.ToLower(kind.String())
}
return "gauge"
}
mtail-3.0.0~rc5/exporter/export.go 0000664 0000000 0000000 00000011635 13216376353 0017167 0 ustar 00root root 0000000 0000000 // Copyright 2011 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
// Package exporter provides the interface for getting metrics out of mtail,
// into your monitoring system of choice.
package exporter
import (
"expvar"
"flag"
"fmt"
"io"
"net"
"os"
"strings"
"time"
"github.com/golang/glog"
"github.com/google/mtail/metrics"
"github.com/pkg/errors"
)
// Commandline Flags.
var (
pushInterval = flag.Int("metric_push_interval_seconds", 60,
"Interval between metric pushes, in seconds.")
writeDeadline = flag.Duration("metric_push_write_deadline", 10*time.Second, "Time to wait for a push to succeed before exiting with an error.")
)
// Exporter manages the export of metrics to passive and active collectors.
type Exporter struct {
store *metrics.Store
o Options
pushTargets []pushOptions
}
// Options contains the required and optional parameters for constructing an
// Exporter.
type Options struct {
Store *metrics.Store
Hostname string // Not required, uses os.Hostname if zero.
OmitProgLabel bool // If true, don't emit the prog label that identifies the source program in variable exports.
}
// New creates a new Exporter.
func New(o Options) (*Exporter, error) {
if o.Store == nil {
return nil, errors.New("exporter needs a Store")
}
if o.Hostname == "" {
var err error
o.Hostname, err = os.Hostname()
if err != nil {
return nil, errors.Wrap(err, "getting hostname")
}
}
e := &Exporter{store: o.Store, o: o}
if *collectdSocketPath != "" {
o := pushOptions{"unix", *collectdSocketPath, metricToCollectd, collectdExportTotal, collectdExportSuccess}
e.RegisterPushExport(o)
}
if *graphiteHostPort != "" {
o := pushOptions{"tcp", *graphiteHostPort, metricToGraphite, graphiteExportTotal, graphiteExportSuccess}
e.RegisterPushExport(o)
}
if *statsdHostPort != "" {
o := pushOptions{"udp", *statsdHostPort, metricToStatsd, statsdExportTotal, statsdExportSuccess}
e.RegisterPushExport(o)
}
return e, nil
}
// formatLabels converts a metric name and key-value map of labels to a single
// string for exporting to the correct output format for each export target.
// ksep and sep mark what to use for key/val separator, and between label separators respoectively.
// If not empty, rep is used to replace cases of ksep and sep in the original strings.
func formatLabels(name string, m map[string]string, ksep, sep, rep string) string {
r := name
if len(m) > 0 {
var s []string
for k, v := range m {
k1 := strings.Replace(strings.Replace(k, ksep, rep, -1), sep, rep, -1)
v1 := strings.Replace(strings.Replace(v, ksep, rep, -1), sep, rep, -1)
s = append(s, fmt.Sprintf("%s%s%s", k1, ksep, v1))
}
return r + sep + strings.Join(s, sep)
}
return r
}
// Format a LabelSet into a string to be written to one of the timeseries
// sockets.
type formatter func(string, *metrics.Metric, *metrics.LabelSet) string
func (e *Exporter) writeSocketMetrics(c io.Writer, f formatter, exportTotal *expvar.Int, exportSuccess *expvar.Int) error {
e.store.RLock()
defer e.store.RUnlock()
for _, ml := range e.store.Metrics {
for _, m := range ml {
m.RLock()
exportTotal.Add(1)
lc := make(chan *metrics.LabelSet)
go m.EmitLabelSets(lc)
for l := range lc {
line := f(e.o.Hostname, m, l)
n, err := fmt.Fprint(c, line)
glog.V(2).Infof("Sent %d bytes\n", n)
if err == nil {
exportSuccess.Add(1)
} else {
return errors.Errorf("write error: %s\n", err)
}
}
m.RUnlock()
}
}
return nil
}
// PushMetrics sends metrics to each of the configured services.
func (e *Exporter) PushMetrics() {
for _, target := range e.pushTargets {
glog.V(2).Infof("pushing to %s", target.addr)
conn, err := net.DialTimeout(target.net, target.addr, *writeDeadline)
if err != nil {
glog.Infof("pusher dial error: %s", err)
continue
}
err = conn.SetDeadline(time.Now().Add(*writeDeadline))
if err != nil {
glog.Infof("Couldn't set deadline on connection: %s", err)
}
err = e.writeSocketMetrics(conn, target.f, target.total, target.success)
if err != nil {
glog.Infof("pusher write error: %s", err)
}
err = conn.Close()
if err != nil {
glog.Infof("connection close failed: %s", err)
}
}
}
// StartMetricPush pushes metrics to the configured services each interval.
func (e *Exporter) StartMetricPush() {
if len(e.pushTargets) > 0 {
glog.Info("Started metric push.")
ticker := time.NewTicker(time.Duration(*pushInterval) * time.Second)
go func() {
for range ticker.C {
e.PushMetrics()
}
}()
}
}
type pushOptions struct {
net, addr string
f formatter
total, success *expvar.Int
}
// RegisterPushExport adds a push export connection to the Exporter. Items in
// the list must describe a Dial()able connection and will have all the metrics
// pushed to each pushInterval.
func (e *Exporter) RegisterPushExport(p pushOptions) {
e.pushTargets = append(e.pushTargets, p)
}
mtail-3.0.0~rc5/exporter/export_test.go 0000664 0000000 0000000 00000012376 13216376353 0020231 0 ustar 00root root 0000000 0000000 // Copyright 2011 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package exporter
import (
"reflect"
"sort"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/google/mtail/metrics"
"github.com/google/mtail/metrics/datum"
)
func FakeSocketWrite(f formatter, m *metrics.Metric) []string {
var ret []string
lc := make(chan *metrics.LabelSet)
go m.EmitLabelSets(lc)
for l := range lc {
ret = append(ret, f("gunstar", m, l))
}
sort.Strings(ret)
return ret
}
func TestMetricToCollectd(t *testing.T) {
ts, terr := time.Parse("2006/01/02 15:04:05", "2012/07/24 10:14:00")
if terr != nil {
t.Errorf("time parse error: %s", terr)
}
ms := metrics.NewStore()
scalarMetric := metrics.NewMetric("foo", "prog", metrics.Counter, metrics.Int)
d, _ := scalarMetric.GetDatum()
datum.SetInt(d, 37, ts)
ms.Add(scalarMetric)
r := FakeSocketWrite(metricToCollectd, scalarMetric)
expected := []string{"PUTVAL \"gunstar/mtail-prog/counter-foo\" interval=60 1343124840:37\n"}
diff := cmp.Diff(expected, r)
if diff != "" {
t.Errorf("String didn't match:\n%s", diff)
}
dimensionedMetric := metrics.NewMetric("bar", "prog", metrics.Gauge, metrics.Int, "label")
d, _ = dimensionedMetric.GetDatum("quux")
datum.SetInt(d, 37, ts)
d, _ = dimensionedMetric.GetDatum("snuh")
datum.SetInt(d, 37, ts)
ms.ClearMetrics()
ms.Add(dimensionedMetric)
r = FakeSocketWrite(metricToCollectd, dimensionedMetric)
expected = []string{
"PUTVAL \"gunstar/mtail-prog/gauge-bar-label-quux\" interval=60 1343124840:37\n",
"PUTVAL \"gunstar/mtail-prog/gauge-bar-label-snuh\" interval=60 1343124840:37\n"}
diff = cmp.Diff(expected, r)
if diff != "" {
t.Errorf("String didn't match:\n%s", diff)
}
timingMetric := metrics.NewMetric("foo", "prog", metrics.Timer, metrics.Int)
d, _ = timingMetric.GetDatum()
datum.SetInt(d, 123, ts)
ms.Add(timingMetric)
r = FakeSocketWrite(metricToCollectd, timingMetric)
expected = []string{"PUTVAL \"gunstar/mtail-prog/gauge-foo\" interval=60 1343124840:123\n"}
diff = cmp.Diff(expected, r)
if diff != "" {
t.Errorf("String didn't match:\n%s", diff)
}
*collectdPrefix = "prefix"
r = FakeSocketWrite(metricToCollectd, timingMetric)
expected = []string{"PUTVAL \"gunstar/prefixmtail-prog/gauge-foo\" interval=60 1343124840:123\n"}
diff = cmp.Diff(expected, r)
if diff != "" {
t.Errorf("prefixed string didn't match:\n%s", diff)
}
}
func TestMetricToGraphite(t *testing.T) {
ts, terr := time.Parse("2006/01/02 15:04:05", "2012/07/24 10:14:00")
if terr != nil {
t.Errorf("time parse error: %s", terr)
}
scalarMetric := metrics.NewMetric("foo", "prog", metrics.Counter, metrics.Int)
d, _ := scalarMetric.GetDatum()
datum.SetInt(d, 37, ts)
r := FakeSocketWrite(metricToGraphite, scalarMetric)
expected := []string{"prog.foo 37 1343124840\n"}
diff := cmp.Diff(expected, r)
if diff != "" {
t.Errorf("String didn't match:\n%s", diff)
}
dimensionedMetric := metrics.NewMetric("bar", "prog", metrics.Gauge, metrics.Int, "host")
d, _ = dimensionedMetric.GetDatum("quux.com")
datum.SetInt(d, 37, ts)
d, _ = dimensionedMetric.GetDatum("snuh.teevee")
datum.SetInt(d, 37, ts)
r = FakeSocketWrite(metricToGraphite, dimensionedMetric)
expected = []string{
"prog.bar.host.quux_com 37 1343124840\n",
"prog.bar.host.snuh_teevee 37 1343124840\n"}
diff = cmp.Diff(expected, r)
if diff != "" {
t.Errorf("String didn't match:\n%s", diff)
}
*graphitePrefix = "prefix"
r = FakeSocketWrite(metricToGraphite, dimensionedMetric)
expected = []string{
"prefixprog.bar.host.quux_com 37 1343124840\n",
"prefixprog.bar.host.snuh_teevee 37 1343124840\n"}
diff = cmp.Diff(expected, r)
if diff != "" {
t.Errorf("prefixed string didn't match:\n%s", diff)
}
}
func TestMetricToStatsd(t *testing.T) {
ts, terr := time.Parse("2006/01/02 15:04:05", "2012/07/24 10:14:00")
if terr != nil {
t.Errorf("time parse error: %s", terr)
}
scalarMetric := metrics.NewMetric("foo", "prog", metrics.Counter, metrics.Int)
d, _ := scalarMetric.GetDatum()
datum.SetInt(d, 37, ts)
r := FakeSocketWrite(metricToStatsd, scalarMetric)
expected := []string{"prog.foo:37|c"}
if !reflect.DeepEqual(expected, r) {
t.Errorf("String didn't match:\n\texpected: %v\n\treceived: %v", expected, r)
}
dimensionedMetric := metrics.NewMetric("bar", "prog", metrics.Gauge, metrics.Int, "l")
d, _ = dimensionedMetric.GetDatum("quux")
datum.SetInt(d, 37, ts)
d, _ = dimensionedMetric.GetDatum("snuh")
datum.SetInt(d, 42, ts)
r = FakeSocketWrite(metricToStatsd, dimensionedMetric)
expected = []string{
"prog.bar.l.quux:37|g",
"prog.bar.l.snuh:42|g"}
if !reflect.DeepEqual(expected, r) {
t.Errorf("String didn't match:\n\texpected: %v\n\treceived: %v", expected, r)
}
timingMetric := metrics.NewMetric("foo", "prog", metrics.Timer, metrics.Int)
d, _ = timingMetric.GetDatum()
datum.SetInt(d, 37, ts)
r = FakeSocketWrite(metricToStatsd, timingMetric)
expected = []string{"prog.foo:37|ms"}
if !reflect.DeepEqual(expected, r) {
t.Errorf("String didn't match:\n\texpected: %v\n\treceived: %v", expected, r)
}
*statsdPrefix = "prefix"
r = FakeSocketWrite(metricToStatsd, timingMetric)
expected = []string{"prefixprog.foo:37|ms"}
if !reflect.DeepEqual(expected, r) {
t.Errorf("prefixed string didn't match:\n\texpected: %v\n\treceived: %v", expected, r)
}
}
mtail-3.0.0~rc5/exporter/graphite.go 0000664 0000000 0000000 00000001656 13216376353 0017453 0 ustar 00root root 0000000 0000000 // Copyright 2011 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package exporter
import (
"expvar"
"flag"
"fmt"
"github.com/google/mtail/metrics"
)
var (
graphiteHostPort = flag.String("graphite_host_port", "",
"Host:port to graphite carbon server to write metrics to.")
graphitePrefix = flag.String("graphite_prefix", "",
"Prefix to use for graphite metrics.")
graphiteExportTotal = expvar.NewInt("graphite_export_total")
graphiteExportSuccess = expvar.NewInt("graphite_export_success")
)
// metricToGraphite encodes a metric in the graphite text protocol format. The
// metric lock is held before entering this function.
func metricToGraphite(hostname string, m *metrics.Metric, l *metrics.LabelSet) string {
return fmt.Sprintf("%s%s.%s %v %v\n",
*graphitePrefix,
m.Program,
formatLabels(m.Name, l.Labels, ".", ".", "_"),
l.Datum.ValueString(),
l.Datum.TimeString())
}
mtail-3.0.0~rc5/exporter/json.go 0000664 0000000 0000000 00000001266 13216376353 0016616 0 ustar 00root root 0000000 0000000 // Copyright 2015 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package exporter
import (
"encoding/json"
"expvar"
"net/http"
"github.com/golang/glog"
)
var (
exportJSONErrors = expvar.NewInt("exporter_json_errors")
)
// HandleJSON exports the metrics in JSON format via HTTP.
func (e *Exporter) HandleJSON(w http.ResponseWriter, r *http.Request) {
b, err := json.MarshalIndent(e.store, "", " ")
if err != nil {
exportJSONErrors.Add(1)
glog.Info("error marshalling metrics into json:", err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("content-type", "application/json")
w.Write(b)
}
mtail-3.0.0~rc5/exporter/json_test.go 0000664 0000000 0000000 00000004427 13216376353 0017657 0 ustar 00root root 0000000 0000000 // Copyright 2015 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package exporter
import (
"io/ioutil"
"net/http"
"net/http/httptest"
"sync"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/google/mtail/metrics"
"github.com/google/mtail/metrics/datum"
)
var handleJSONTests = []struct {
name string
metrics []*metrics.Metric
expected string
}{
{"empty",
[]*metrics.Metric{},
"[]",
},
{"single",
[]*metrics.Metric{
{
Name: "foo",
Program: "test",
Kind: metrics.Counter,
LabelValues: []*metrics.LabelValue{{Labels: []string{}, Value: datum.MakeInt(1, time.Unix(0, 0))}},
},
},
`[
{
"Name": "foo",
"Program": "test",
"Kind": 1,
"Type": 0,
"LabelValues": [
{
"Value": {
"Value": 1,
"Time": 0
}
}
]
}
]`,
},
{"dimensioned",
[]*metrics.Metric{
{
Name: "foo",
Program: "test",
Kind: metrics.Counter,
Keys: []string{"a", "b"},
LabelValues: []*metrics.LabelValue{{Labels: []string{"1", "2"}, Value: datum.MakeInt(1, time.Unix(0, 0))}},
},
},
`[
{
"Name": "foo",
"Program": "test",
"Kind": 1,
"Type": 0,
"Keys": [
"a",
"b"
],
"LabelValues": [
{
"Labels": [
"1",
"2"
],
"Value": {
"Value": 1,
"Time": 0
}
}
]
}
]`,
},
}
func TestHandleJSON(t *testing.T) {
for _, tc := range handleJSONTests {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
ms := metrics.NewStore()
for _, metric := range tc.metrics {
ms.Add(metric)
}
o := Options{ms, "gunstar", false}
e, err := New(o)
if err != nil {
t.Fatalf("couldn't make exporter: %s", err)
}
response := httptest.NewRecorder()
e.HandleJSON(response, &http.Request{})
if response.Code != 200 {
t.Errorf("response code not 200: %d", response.Code)
}
b, err := ioutil.ReadAll(response.Body)
if err != nil {
t.Errorf("failed to read response: %s", err)
}
diff := cmp.Diff(tc.expected, string(b), cmpopts.IgnoreUnexported(sync.RWMutex{}))
if diff != "" {
t.Error(diff)
}
})
}
}
mtail-3.0.0~rc5/exporter/prometheus.go 0000664 0000000 0000000 00000003470 13216376353 0020037 0 ustar 00root root 0000000 0000000 // Copyright 2015 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package exporter
import (
"expvar"
"fmt"
"net/http"
"sort"
"strings"
"github.com/google/mtail/metrics"
)
var (
metricExportTotal = expvar.NewInt("metric_export_total")
)
const (
prometheusFormat = "%s{%s} %s\n"
)
func noHyphens(s string) string {
return strings.Replace(s, "-", "_", -1)
}
// HandlePrometheusMetrics exports the metrics in a format readable by
// Prometheus via HTTP.
func (e *Exporter) HandlePrometheusMetrics(w http.ResponseWriter, r *http.Request) {
e.store.RLock()
defer e.store.RUnlock()
w.Header().Add("Content-type", "text/plain; version=0.0.4")
for _, ml := range e.store.Metrics {
emittype := true
for _, m := range ml {
m.RLock()
metricExportTotal.Add(1)
if emittype {
fmt.Fprintf(w,
"# TYPE %s %s\n",
noHyphens(m.Name),
kindToPrometheusType(m.Kind))
emittype = false
}
lc := make(chan *metrics.LabelSet)
go m.EmitLabelSets(lc)
for l := range lc {
if m.Source != "" {
fmt.Fprintf(w, "# %s defined at %s\n", noHyphens(m.Name), m.Source)
}
line := metricToPrometheus(e.o, m, l)
fmt.Fprint(w, line)
}
m.RUnlock()
}
}
}
func metricToPrometheus(options Options, m *metrics.Metric, l *metrics.LabelSet) string {
var s []string
for k, v := range l.Labels {
// Prometheus quotes the value of each label=value pair.
s = append(s, fmt.Sprintf("%s=%q", k, v))
}
sort.Strings(s)
if !options.OmitProgLabel {
s = append(s, fmt.Sprintf("prog=\"%s\"", m.Program))
}
return fmt.Sprintf(prometheusFormat,
noHyphens(m.Name),
strings.Join(s, ","),
l.Datum.ValueString())
}
func kindToPrometheusType(kind metrics.Kind) string {
if kind != metrics.Timer {
return strings.ToLower(kind.String())
}
return "gauge"
}
mtail-3.0.0~rc5/exporter/prometheus_test.go 0000664 0000000 0000000 00000007124 13216376353 0021076 0 ustar 00root root 0000000 0000000 // Copyright 2015 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package exporter
import (
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/google/mtail/metrics"
"github.com/google/mtail/metrics/datum"
)
var handlePrometheusTests = []struct {
name string
metrics []*metrics.Metric
expected string
}{
{"empty",
[]*metrics.Metric{},
"",
},
{"single",
[]*metrics.Metric{
{
Name: "foo",
Program: "test",
Kind: metrics.Counter,
LabelValues: []*metrics.LabelValue{{Labels: []string{}, Value: datum.MakeInt(1, time.Unix(0, 0))}}},
},
`# TYPE foo counter
foo{} 1
`,
},
{"dimensioned",
[]*metrics.Metric{
{
Name: "foo",
Program: "test",
Kind: metrics.Counter,
Keys: []string{"a", "b"},
LabelValues: []*metrics.LabelValue{{Labels: []string{"1", "2"}, Value: datum.MakeInt(1, time.Unix(0, 0))}},
},
},
`# TYPE foo counter
foo{a="1",b="2"} 1
`,
},
{"gauge",
[]*metrics.Metric{
{
Name: "foo",
Program: "test",
Kind: metrics.Gauge,
LabelValues: []*metrics.LabelValue{{Labels: []string{}, Value: datum.MakeInt(1, time.Unix(0, 0))}}},
},
`# TYPE foo gauge
foo{} 1
`,
},
{"timer",
[]*metrics.Metric{
{
Name: "foo",
Program: "test",
Kind: metrics.Timer,
LabelValues: []*metrics.LabelValue{{Labels: []string{}, Value: datum.MakeInt(1, time.Unix(0, 0))}}},
},
`# TYPE foo gauge
foo{} 1
`,
},
{"quotes",
[]*metrics.Metric{
{
Name: "foo",
Program: "test",
Kind: metrics.Counter,
Keys: []string{"a"},
LabelValues: []*metrics.LabelValue{{Labels: []string{"str\"bang\"blah"}, Value: datum.MakeInt(1, time.Unix(0, 0))}},
},
},
`# TYPE foo counter
foo{a="str\"bang\"blah"} 1
`,
},
{"help",
[]*metrics.Metric{
{
Name: "foo",
Program: "test",
Kind: metrics.Counter,
LabelValues: []*metrics.LabelValue{{Labels: []string{}, Value: datum.MakeInt(1, time.Unix(0, 0))}},
Source: "location.mtail:37",
},
},
`# TYPE foo counter
# foo defined at location.mtail:37
foo{} 1
`,
},
{"2 help",
[]*metrics.Metric{
{
Name: "foo",
Program: "test",
Kind: metrics.Counter,
LabelValues: []*metrics.LabelValue{{Labels: []string{}, Value: datum.MakeInt(1, time.Unix(0, 0))}},
Source: "location.mtail:37",
},
{
Name: "foo",
Program: "test",
Kind: metrics.Counter,
LabelValues: []*metrics.LabelValue{{Labels: []string{}, Value: datum.MakeInt(1, time.Unix(0, 0))}},
Source: "different.mtail:37",
},
},
`# TYPE foo counter
# foo defined at location.mtail:37
foo{} 1
# foo defined at different.mtail:37
foo{} 1
`,
},
}
func TestHandlePrometheus(t *testing.T) {
for _, tc := range handlePrometheusTests {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
ms := metrics.NewStore()
for _, metric := range tc.metrics {
ms.Add(metric)
}
o := Options{ms, "gunstar", true}
e, err := New(o)
if err != nil {
t.Fatalf("couldn't make exporter: %s", err)
}
response := httptest.NewRecorder()
e.HandlePrometheusMetrics(response, &http.Request{})
if response.Code != 200 {
t.Errorf("response code not 200: %d", response.Code)
}
b, err := ioutil.ReadAll(response.Body)
if err != nil {
t.Errorf(" failed to read response: %s", err)
}
diff := cmp.Diff(tc.expected, string(b))
if diff != "" {
t.Error(diff)
}
})
}
}
mtail-3.0.0~rc5/exporter/statsd.go 0000664 0000000 0000000 00000002047 13216376353 0017145 0 ustar 00root root 0000000 0000000 // Copyright 2015 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package exporter
import (
"expvar"
"flag"
"fmt"
"github.com/google/mtail/metrics"
)
var (
statsdHostPort = flag.String("statsd_hostport", "",
"Host:port to statsd server to write metrics to.")
statsdPrefix = flag.String("statsd_prefix", "",
"Prefix to use for statsd metrics.")
statsdExportTotal = expvar.NewInt("statsd_export_total")
statsdExportSuccess = expvar.NewInt("statsd_export_success")
)
// metricToStatsd encodes a metric in the statsd text protocol format. The
// metric lock is held before entering this function.
func metricToStatsd(hostname string, m *metrics.Metric, l *metrics.LabelSet) string {
var t string
switch m.Kind {
case metrics.Counter:
t = "c" // StatsD Counter
case metrics.Gauge:
t = "g" // StatsD Gauge
case metrics.Timer:
t = "ms" // StatsD Timer
}
return fmt.Sprintf("%s%s.%s:%s|%s",
*statsdPrefix,
m.Program,
formatLabels(m.Name, l.Labels, ".", ".", "_"),
l.Datum.ValueString(), t)
}
mtail-3.0.0~rc5/exporter/varz.go 0000664 0000000 0000000 00000002317 13216376353 0016625 0 ustar 00root root 0000000 0000000 // Copyright 2015 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package exporter
import (
"expvar"
"fmt"
"net/http"
"sort"
"strings"
"github.com/google/mtail/metrics"
)
var (
exportVarzTotal = expvar.NewInt("exporter_varz_total")
)
const varzFormat = "%s{%s} %s\n"
// HandleVarz exports the metrics in Varz format via HTTP.
func (e *Exporter) HandleVarz(w http.ResponseWriter, r *http.Request) {
e.store.RLock()
defer e.store.RUnlock()
w.Header().Add("Content-type", "text/plain")
for _, ml := range e.store.Metrics {
for _, m := range ml {
m.RLock()
exportVarzTotal.Add(1)
lc := make(chan *metrics.LabelSet)
go m.EmitLabelSets(lc)
for l := range lc {
line := metricToVarz(e.o, m, l)
fmt.Fprint(w, line)
}
m.RUnlock()
}
}
}
func metricToVarz(o Options, m *metrics.Metric, l *metrics.LabelSet) string {
var s []string
for k, v := range l.Labels {
s = append(s, fmt.Sprintf("%s=%s", k, v))
}
sort.Strings(s)
if !o.OmitProgLabel {
s = append(s, fmt.Sprintf("prog=%s", m.Program))
}
s = append(s, fmt.Sprintf("instance=%s", o.Hostname))
return fmt.Sprintf(varzFormat,
m.Name,
strings.Join(s, ","),
l.Datum.ValueString())
}
mtail-3.0.0~rc5/exporter/varz_test.go 0000664 0000000 0000000 00000003451 13216376353 0017664 0 ustar 00root root 0000000 0000000 // Copyright 2015 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package exporter
import (
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/google/mtail/metrics"
"github.com/google/mtail/metrics/datum"
)
var handleVarzTests = []struct {
name string
metrics []*metrics.Metric
expected string
}{
{"empty",
[]*metrics.Metric{},
"",
},
{"single",
[]*metrics.Metric{
{
Name: "foo",
Program: "test",
Kind: metrics.Counter,
LabelValues: []*metrics.LabelValue{{Labels: []string{}, Value: datum.MakeInt(1, time.Unix(1397586900, 0))}},
},
},
`foo{prog=test,instance=gunstar} 1
`,
},
{"dimensioned",
[]*metrics.Metric{
{
Name: "foo",
Program: "test",
Kind: metrics.Counter,
Keys: []string{"a", "b"},
LabelValues: []*metrics.LabelValue{{Labels: []string{"1", "2"}, Value: datum.MakeInt(1, time.Unix(1397586900, 0))}},
},
},
`foo{a=1,b=2,prog=test,instance=gunstar} 1
`,
},
}
func TestHandleVarz(t *testing.T) {
for _, tc := range handleVarzTests {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
ms := metrics.NewStore()
for _, metric := range tc.metrics {
ms.Add(metric)
}
o := Options{ms, "gunstar", false}
e, err := New(o)
if err != nil {
t.Fatalf("couldn't make exporter: %s", err)
}
response := httptest.NewRecorder()
e.HandleVarz(response, &http.Request{})
if response.Code != 200 {
t.Errorf("response code not 200: %d", response.Code)
}
b, err := ioutil.ReadAll(response.Body)
if err != nil {
t.Errorf("failed to read response: %s", err)
}
diff := cmp.Diff(tc.expected, string(b))
if diff != "" {
t.Error(diff)
}
})
}
}
mtail-3.0.0~rc5/fuzz/ 0000775 0000000 0000000 00000000000 13216376353 0014437 5 ustar 00root root 0000000 0000000 mtail-3.0.0~rc5/fuzz/fuzz.sh 0000775 0000000 0000000 00000000514 13216376353 0015774 0 ustar 00root root 0000000 0000000 #!/bin/sh
set -x
d=$(dirname $0)
EMTAIL=$d/../mtail
EMGEN=$d/../emgen/emgen
OUT=$d/../fuzzout
run() {
rm -rf $OUT/*
$EMGEN --rand_seed $1 > $OUT/fuzz$1.mtail
$EMTAIL --compile_only --dump_bytecode --logs foo.log --progs $OUT
echo $?
}
if [[ -n "$1" ]]; then
run $1
else
for i in $(seq 0 99); do
run $i
done
fi
mtail-3.0.0~rc5/log_rewriter.awk 0000664 0000000 0000000 00000000675 13216376353 0016661 0 ustar 00root root 0000000 0000000 BEGIN{
now = systime()
format = "%Y/%m/%d %H:%M:%S"
}
{
split($1, DATE, "/")
split($2, TIME, ":")
t = mktime(DATE[1] " " DATE[2] " " DATE[3] " " TIME[1] " " TIME[2] " " TIME[3])
if (delta == "") {
delta = now - t
}
out = strftime(format, t + delta)
for (i = 3; i <= NF; i++) {
out = out OFS $i
}
print out
}
mtail-3.0.0~rc5/logo.png 0000664 0000000 0000000 00000007335 13216376353 0015117 0 ustar 00root root 0000000 0000000 ‰PNG
IHDR Ś Ś ®ŔA> bKGD ˙ ˙ ˙ ˝§“ pHYs šś tIMEá :1Ź}Z jIDATxÚíťmp[ĺ•ÇçĘvBxÉ Y [šĐD’ »Ěn;[¶,t˛”ĄËĚ’™B qd;ťaşí‡–ťv?ěěKËtXl(HÓ´ef§í»›]vˇ±‡đfÉ!Y§I“Ľ9‰“ŘÖ=űAW©lI¶¤D–Ď/Łql]Ý{¤ç˙ś˙9Ďs=Ă0Ă0Ă0Ă0Ă0Ă0Ă0Ă0ŚÉ˲ČÎÂŹ}f'Ëž)üřP4Yt|ˇh˛dç)úłęH°¬#QőšplZăA
9čş'öN»Ŕw˘Y`©Ŕ…a`ŹŔű§/žŰľjŃPv¦éZ]?jíçˇhr.°¸,ôľşgşß{ö Ň»%Ü0PčuJ‹ §Aű@ /ĺhbk‹ďó´§3VçÚĆ©'呝
îRwŤŔJ`ž¦źú?—Őá×çtm}ŕšcŮеşžPÄűđZ˙á…:z.Tu§ŁĚä&ŕ ¸¸¸`”Pú€°čbŠĆEä$Žt®i8•90ŘŢ@lmĂŚöóńǢ{@€ĂŔŢPt›¨ěá¸Ç¶´4ć‹e˛R“ď ź7ß n.ÍRŮUŔ=âę
ź¦ľ<_Č—ţhĎ4únAą
¸É›Ń3<{ôĺi.ĐĽßě^GůđćxßüŇ
{¦qlĽ±Č@ĹŔRQľ Či…Ŕż+l âSÖ’BŃä-Ŕ&`ÖŘGékĎOŻ}±ëŢúS·|őż8rÝeŇą¶Qnü~O]jšî¤¸ÖKůĹ0ôď)úż*ÎËę«}oëW®ĚXCĆBíÉ3±:v^‚‡DőÖư+ú&âlv}µďŹKµćŔĎóźJ;Uśż®ykŰý‹Of~zŰ®#ô˝Ň7%|IŃU‚L/őQô Č‹"ňonéli<ڦ;—X؟λĽ#yC =)“M0ă¸ůÉÁeşúôa„oO`v÷›@žë7Ľqf¦Ž˛çô‰Ů|V;ş0ˇ˛čżvü˘_n-¤Ż„X¶©čłńµţÇreŻI/ě)IŢ<а(t섢[Ô‘Gâ-Ťgö[
ŮaÎ>q| =q·(ߤi5Íy‹EQ€»«¶¶ř‡«Ú’2¸č.EEz›żPvJD\>*©ęUŢ•§€ă“!I˙[äÂâ¶;U+ěśv«ČŻ5ťÖ«”í˘˛9n<’™ˇă™Ń™Ůܵş~ÄÝ}±pc/°č™D±Ě÷©ÜĽ°˙‚ąUµp7ŰÂţăěxŹa÷€×Rć˘X¦¨E’tŽcpF¬łjŤ](żA¸śôm
•Ë,QůcßxkVŐmI ΰ{\”]ŔŃ@ŢvŃXąŢ‹»čëšľąj2Är¨Ö‹ę…UťaFz.’ľKmfžňzW
íÍŢ&Jö9\Ńđ!ű(ő+!–D/B'žĺ'…`FxĽ"=‹öMyißÉişż\od¨–”Ŕn߇
L%ÄRĚFT·>eIęHJé%˝_“§{`oö{©yď&÷ÝšIa±ě©€X|
MÁ˘˘#ĎJ«wÖéĚ"U©ç¬óĄ
xÉp…Ä"L˛_ő)Ę?UUS"ä[|:”¤r¬hžuN§Đ‰R!±č”ĚPŤ« µ)9ťcÉxPĐýłßcT‡%˝ł¦)őÎoŻ®ą
äBű¸§°`Îňę|Ű3É?°QÍ‚©vŻ6Ę`I† Ć0L0† Ć0Á&Ăc`Ăc`Śa‚1L0†a‚1L0† Ć0Á&Ăc&Ăc`Śa‚1L0†a‚1L0† Ć0Á&Ăca‚1L0† Ć0Á&Ă0Á&Ăc`Śa‚1Śa‚1L0† Ć0Á&Ă0Á&ęCB‘$ˇHrÜ/F9ż7ÁT2™‚±ń*gęĐ@ż çFCčÉäĚÎÖĆŁąF,ě'M\Ü
lŠ…ý}™,—ţďr[†)/ÇŹ7Ç1łĽG1\ܬD»gÇÂţŚ,ĂL*ŻQN"9˙˘®ćyľ O“K€fEëq€deź’fË0嵤ăŠöĺĘ0
—»ŠĽ”ë çÓ‚ÜŚ&D»g”ŁF˛S^N óXŇĺ¨.,…`