pax_global_header 0000666 0000000 0000000 00000000064 14600635717 0014523 g ustar 00root root 0000000 0000000 52 comment=0ff5a18c11af1970b0ef16a31423c66623ac24e9
mtail-3.0.0~rc54+git0ff5/ 0000775 0000000 0000000 00000000000 14600635717 0014765 5 ustar 00root root 0000000 0000000 mtail-3.0.0~rc54+git0ff5/.github/ 0000775 0000000 0000000 00000000000 14600635717 0016325 5 ustar 00root root 0000000 0000000 mtail-3.0.0~rc54+git0ff5/.github/CODEOWNERS 0000664 0000000 0000000 00000000012 14600635717 0017711 0 ustar 00root root 0000000 0000000 * @jaqx0r
mtail-3.0.0~rc54+git0ff5/.github/ISSUE_TEMPLATE/ 0000775 0000000 0000000 00000000000 14600635717 0020510 5 ustar 00root root 0000000 0000000 mtail-3.0.0~rc54+git0ff5/.github/ISSUE_TEMPLATE/bug_report.md 0000664 0000000 0000000 00000001354 14600635717 0023205 0 ustar 00root root 0000000 0000000 ---
name: Bug report
about: Create a report to help us improve
title: ''
labels: ''
assignees: ''
---
Thanks for discovering a problem in `mtail`!
When reporting bugs in mtail behaviour, please be as detailed as possible; describe the problem, what you wanted to have happen, what you observed instead.
If your problem is with the way an `mtail` program is behaving, please attach or include inline any mtail programs that demonstrate the bug, any log files that mtail was processing, and the observed output.
If your problem is with `mtail`, please include the commandline you started it with, and the INFO log.
See also [Reporting a problem](https://github.com/google/mtail/blob/main/docs/Troubleshooting.md#reporting-a-problem).
Thanks!
mtail-3.0.0~rc54+git0ff5/.github/dependabot.yml 0000664 0000000 0000000 00000000315 14600635717 0021154 0 ustar 00root root 0000000 0000000 version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"
- package-ecosystem: "gomod"
directory: "/"
schedule:
interval: "daily"
mtail-3.0.0~rc54+git0ff5/.github/workflows/ 0000775 0000000 0000000 00000000000 14600635717 0020362 5 ustar 00root root 0000000 0000000 mtail-3.0.0~rc54+git0ff5/.github/workflows/auto-review.yml 0000664 0000000 0000000 00000002624 14600635717 0023360 0 ustar 00root root 0000000 0000000 # This is a single-maintainer project but I want to require reviews before
# merge, which means that I need a bot to review my own work.
name: Automatic pull request approvals
on:
merge_group:
pull_request_target:
types:
- opened
- reopened
- synchronize
- ready_for_review
check_suite:
types:
- completed
jobs:
auto-approve:
runs-on: ubuntu-latest
if: >
github.event.pull_request.head.repo.full_name == github.repository &&
github.event.pull_request.draft == false && (
github.event.action == 'opened' ||
github.event.action == 'reopened' ||
github.event.action == 'synchronize'
) && (
github.actor == 'jaqx0r'
)
permissions:
# wait on check
checks: read
# create review
pull-requests: write
steps:
- uses: lewagon/wait-on-check-action@v1.3.3
with:
ref: ${{ github.event.pull_request.head.sha }}
repo-token: ${{ github.token }}
check-regexp: "test.*"
wait-interval: 60
- uses: "actions/github-script@v7"
with:
github-token: ${{ github.token }}
script: |
await github.rest.pulls.createReview({
event: "APPROVE",
owner: context.repo.owner,
pull_number: context.payload.pull_request.number,
repo: context.repo.repo,
})
mtail-3.0.0~rc54+git0ff5/.github/workflows/automerge.yml 0000664 0000000 0000000 00000004246 14600635717 0023103 0 ustar 00root root 0000000 0000000 # We "trust" dependabot updates once they pass tests.
# (this still requires all other checks to pass!)
# This doesn't work on forked repos per the discussion in
# https://github.com/pascalgn/automerge-action/issues/46 so don't attempt to
# add people other than dependabot to the if field below.
name: dependabot-auto-merge
on:
pull_request_target:
types:
# Dependabot will label the PR
- labeled
# Dependabot has rebased the PR
- synchronize
jobs:
enable-automerge:
if: github.event.pull_request.user.login == 'dependabot[bot]' && contains(github.event.pull_request.labels.*.name, 'dependencies')
runs-on: ubuntu-latest
permissions:
# enable-automerge is a graphql query, not REST, so isn't documented,
# except in a mention in
# https://github.blog/changelog/2021-02-04-pull-request-auto-merge-is-now-generally-available/
# which says "can only be enabled by users with permissino to merge"; the
# REST documentation says you need contents: write to perform a merge.
# https://github.community/t/what-permission-does-a-github-action-need-to-call-graphql-enablepullrequestautomerge/197708
# says this is it
contents: write
steps:
# Enable auto-merge *before* issuing an approval.
- uses: alexwilson/enable-github-automerge-action@main
with:
github-token: "${{ secrets.GITHUB_TOKEN }}"
wait-on-checks:
needs: enable-automerge
runs-on: ubuntu-latest
permissions:
# wait-on-check requires only checks read
checks: read
steps:
- uses: lewagon/wait-on-check-action@v1.3.3
with:
ref: ${{ github.event.pull_request.head.sha }}
check-regexp: "test.*"
repo-token: ${{ secrets.GITHUB_TOKEN }}
wait-interval: 60
approve:
needs: wait-on-checks
runs-on: ubuntu-latest
permissions:
# https://github.com/hmarr/auto-approve-action/issues/183 says
# auto-approve-action requires write on pull-requests
pull-requests: write
steps:
- uses: hmarr/auto-approve-action@f0939ea97e9205ef24d872e76833fa908a770363
with:
github-token: "${{ secrets.GITHUB_TOKEN }}"
mtail-3.0.0~rc54+git0ff5/.github/workflows/ci-done.yml 0000664 0000000 0000000 00000003367 14600635717 0022434 0 ustar 00root root 0000000 0000000 name: Comment CI test results on PR
on:
workflow_run:
workflows: ["CI"]
types:
- completed
jobs:
comment:
strategy:
matrix:
# Sync with matrix in ci.yml
runs-on: [ubuntu-latest]
runs-on: ${{ matrix.runs-on }}
permissions:
# list and download
actions: read
# post results as comment
pull-requests: write
# publish creates a check run
checks: write
steps:
- uses: actions/github-script@v7
with:
script: |
var artifacts = await github.rest.actions.listWorkflowRunArtifacts({
owner: context.repo.owner,
repo: context.repo.repo,
run_id: ${{github.event.workflow_run.id }},
});
var matchArtifact = artifacts.data.artifacts.filter((artifact) => {
return artifact.name == "test-results-${{ matrix.runs-on }}"
})[0];
var download = await github.rest.actions.downloadArtifact({
owner: context.repo.owner,
repo: context.repo.repo,
artifact_id: matchArtifact.id,
archive_format: 'zip',
});
var fs = require('fs');
fs.writeFileSync('${{github.workspace}}/test-results.zip', Buffer.from(download.data));
- id: unpack
run: |
mkdir -p test-results
unzip -d test-results test-results.zip
echo "sha=$(cat test-results/sha-number)" >> $GITHUB_OUTPUT
- uses: docker://ghcr.io/enricomi/publish-unit-test-result-action:v1.6
with:
commit: ${{ steps.unpack.outputs.sha }}
check_name: Unit Test Results
github_token: ${{ secrets.GITHUB_TOKEN }}
files: "**/test-results/**/*.xml"
mtail-3.0.0~rc54+git0ff5/.github/workflows/ci.yml 0000664 0000000 0000000 00000006542 14600635717 0021507 0 ustar 00root root 0000000 0000000 name: CI
on:
push:
tags:
- v*
branches:
- main
pull_request:
merge_group:
env:
GOPROXY: "https://proxy.golang.org"
permissions:
# none-all, which doesn't exist, but
# https://docs.github.com/en/actions/reference/authentication-in-a-workflow#using-the-github_token-in-a-workflow
# implies that the token still gets created. Elsewhere we learn that any
# permission not mentioned here gets turned to `none`.
actions: none
jobs:
test:
strategy:
matrix:
# macos-latest is slow and has weird test failures with unixgram message sizes, so it's been disabled.
# Sync with matrix in ci-done.yml
runs-on: [ubuntu-latest, windows-latest]
runs-on: ${{ matrix.runs-on }}
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
cache: true
- name: install deps
run: go mod download
- name: build
run: make --debug all
- name: test
run: |
mkdir -p test-results
# Don't use GITHUB_SHA as we need the head of the branch, not the
# secret merge commit of the PR itself. https://help.github.com/en/actions/automating-your-workflow-with-github-actions/events-that-trigger-workflows#pull-request-event-pull_request
if [[ ${{ github.event_name }} == 'pull_request' ]]; then
echo ${{ github.event.pull_request.head.sha }} > test-results/sha-number
else
echo ${{ github.sha }} > test-results/sha-number
fi
make --debug junit-regtest TESTCOVERPROFILE=coverprofile
shell: bash
- uses: codecov/codecov-action@v4
if: always()
with:
file: coverprofile
- uses: actions/upload-artifact@v4
if: always()
with:
name: test-results-${{ matrix.runs-on }}
path: test-results/
container:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0 # so that we get tags
- run: make --debug container
gosec:
runs-on: ubuntu-latest
# gosec is regularly broken and reporting false positives, don't let it interfere
continue-on-error: true
permissions:
security-events: write
steps:
- uses: actions/checkout@v4
- uses: securego/gosec@master
with:
# we let the report trigger content trigger a failure using the GitHub Security features.
args: '-no-fail -fmt sarif -out results.sarif -tags fuzz ./...'
- uses: github/codeql-action/upload-sarif@v3
with:
# Path to SARIF file relative to the root of the repository
sarif_file: results.sarif
fuzz:
runs-on: ubuntu-latest
container:
image: gcr.io/oss-fuzz-base/base-builder
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version: '^1.x'
- uses: actions/cache@v4
id: cache
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: install deps
if: steps.cache.output.cache-hit != 'true'
run: make --debug install_deps
- name: local fuzz regtest
run: make --debug CXX=clang LIB_FUZZING_ENGINE=-fsanitize=fuzzer fuzz-regtest
mtail-3.0.0~rc54+git0ff5/.github/workflows/codeql-analysis.yml 0000664 0000000 0000000 00000004616 14600635717 0024204 0 ustar 00root root 0000000 0000000 # For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL"
on:
push:
branches: [ main ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ main ]
schedule:
- cron: '34 6 * * 3'
permissions:
# https://github.com/github/codeql-action/issues/464
security-events: write
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
language: [ 'go' ]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
# Learn more:
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
steps:
- name: Checkout repository
uses: actions/checkout@v4
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v3
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# queries: ./path/to/local/query, your-org/your-repo/queries@main
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v3
# ℹ️ Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
# and modify them (or add more) to build your code if your project
# uses a compiled language
#- run: |
# make bootstrap
# make release
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v3
mtail-3.0.0~rc54+git0ff5/.github/workflows/golangci-lint.yml 0000664 0000000 0000000 00000001264 14600635717 0023637 0 ustar 00root root 0000000 0000000 name: golangci-lint
on:
push:
tags:
- v*
branches:
- main
pull_request:
permissions:
# golangci-lint does annotations, not comments
# No-one knows what an annotation is, but I suspect it's printing file:line: msg to stdout.
# https://github.community/t/what-are-annotations/16173/2
checks: none
jobs:
golangci:
name: lint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: golangci/golangci-lint-action@v4
with:
# Required: the version of golangci-lint is required and must be
# specified without patch version: we always use the latest patch
# version.
version: v1.57
mtail-3.0.0~rc54+git0ff5/.github/workflows/oss-fuzz.yml 0000664 0000000 0000000 00000001517 14600635717 0022711 0 ustar 00root root 0000000 0000000 name: OSS-Fuzz
on:
pull_request:
paths:
- '**.go'
- 'internal/runtime/compiler/parser/parser.y'
- 'Makefile'
- 'Dockerfile'
permissions:
# Secret code for "the github token should have no tokens at all"
actions: none
jobs:
oss-fuzz:
runs-on: ubuntu-latest
steps:
- name: Build Fuzzers
id: build
uses: google/oss-fuzz/infra/cifuzz/actions/build_fuzzers@master
with:
oss-fuzz-project-name: 'mtail'
dry-run: false
- name: Run Fuzzers
uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@master
with:
oss-fuzz-project-name: 'mtail'
dry-run: false
- name: Upload Crash
uses: actions/upload-artifact@v4
if: failure() && steps.build.outcome == 'success'
with:
name: artifacts
path: ./out/artifacts
mtail-3.0.0~rc54+git0ff5/.github/workflows/release.yml 0000664 0000000 0000000 00000004156 14600635717 0022533 0 ustar 00root root 0000000 0000000 name: release
on:
# Test that it works on pull_request or merge group;
# goreleaser goes into snapshot mode if not a tag;
# docker image will be built but not pushed for pull requests or merge group events.
pull_request:
merge_group:
push:
tags:
- v*
env:
# Use docker.io for Docker Hub if empty
REGISTRY: ghcr.io
# github.repository as /
IMAGE_NAME: ${{ github.repository }}
jobs:
goreleaser:
runs-on: ubuntu-latest
permissions:
# goreleaser writes to the releases api
contents: write
env:
flags: ""
steps:
- if: ${{ !startsWith(github.ref, 'refs/tags/v') }}
run: echo "flags=--snapshot" >> $GITHUB_ENV
- uses: actions/checkout@v4
with:
fetch-depth: 0
- run: git fetch --force --tags
- uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
cache: true
- uses: goreleaser/goreleaser-action@v5
with:
version: latest
args: release --rm-dist ${{ env.flags }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
docker-release:
runs-on: ubuntu-latest
permissions:
# docker writes packages to container registry
packages: write
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- run: git fetch --force --tags
- name: Log in to the Container registry
uses: docker/login-action@v3.1.0
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v5.5.1
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
- name: Build and push Docker image (image is not pushed on pull request)
uses: docker/build-push-action@v5.3.0
with:
context: .
push: ${{ github.event_name != 'pull_request' && github.event_name != 'merge_group' }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
mtail-3.0.0~rc54+git0ff5/.github/workflows/stale.yml 0000664 0000000 0000000 00000001121 14600635717 0022210 0 ustar 00root root 0000000 0000000 name: "Close stale issues"
on:
schedule:
- cron: "30 1 * * *"
permissions:
pull-requests: write
issues: write
jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v9
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
stale-issue-message: 'This issue has been waiting for more information for more than 60 days and will be closed in 7 if no update is provided.'
stale-pr-message: 'This PR has been waiting for an update for more than 60 days and wlil be closed in 7 if no update is provided.'
only-labels: 'more-info-needed'
mtail-3.0.0~rc54+git0ff5/.gitlab-ci.yml 0000664 0000000 0000000 00000000766 14600635717 0017432 0 ustar 00root root 0000000 0000000 image: golang:latest
stages:
- test
- build
before_script:
- mkdir ${CI_PROJECT_DIR}/build
- mkdir -p ${GOPATH}/src/github.com/google/
- ln -s $(pwd) ${GOPATH}/src/github.com/google/mtail
- cd ${GOPATH}/src/github.com/google/mtail
test:
stage: test
allow_failure: true
script:
- make install_deps
- make test
build:
stage: build
script:
- PREFIX=${CI_PROJECT_DIR}/build make install
artifacts:
expire_in: 1 week
when: on_success
paths:
- build
mtail-3.0.0~rc54+git0ff5/.golangci.yml 0000664 0000000 0000000 00000005347 14600635717 0017362 0 ustar 00root root 0000000 0000000 run:
tests: true
build-tags:
- integration
- fuzz
# fail if go.mod needs changing
modules-download-mode: readonly
linters-settings:
govet:
enable-all: true
disable:
- composites # same as exhaustruct below
- fieldalignment
asasalint:
exclude:
- glog\.Infof
linters:
presets:
- bugs
- error
- format
- import
- module
- performance
- test
- unused
- metalinter
enable:
- exportloopref
# A general rule is if the lint author can't be bothered supplying automated
# fixes for obvious lint warnings, I'm not bothered using their tool.
disable:
- cyclop # boo cyclomatic complexity
- dupl # exclude test code
- depguard
- errcheck # handled by gosec, lots of false posi
- exhaustive # this false-positives for switches with a default
- exhaustivestruct # too noisy, labelling fields is not my jam
- exhaustruct # above, renamed
- forbidigo # exclude non prod tools
- forcetypeassert # too many at the moment
- funlen # My tests will be as long as they need to be thanks
- gci
- gochecknoglobals # Flags are fine, as are test tables.
- gochecknoinits # How dare you tell me not to use inits.
- gocognit # boo cyclomatic complexity
- gocyclo # boo cyclomatic complexity
- godox # TODOs are fine
- golint # deprecated
- gomnd # magic numbers in test tables are fine actually
- gosec # run independently
- ifshort # buggy, false positives
- interfacer # deprecated
- lll # go says long lines are ok, and this is trivially automatable
- maligned # deprecated
- musttag # don't agree with the premise
- nakedret # weird thing to report on
- nestif # cognitive complexity
- nlreturn # Not a fan of this one, looks messy
- nolintlint # broken on gocritic
- paralleltest # i had a good reason for this
- perfsprint
- testpackage # need to test internal methods
- unparam # too noisy
- whitespace # broken by goyacc
- wrapcheck # not sure this is necessary
- wsl # wsl doesn't explain any of its recommendations
issues:
# Show everything.
max-issues-per-linter: 0
max-same-issues: 0
exclude-use-default: true
exclude:
# `gofix` should fix this if it really cared
- 'composite literal uses unkeyed fields'
# I like common patterns of shadowing: ctx and err
- 'declaration of "ctx" shadows declaration'
- 'declaration of "err" shadows declaration'
# goyacc generated error in three locations
- 'this value of `mtailDollar.* is never used'
# Incorrectly reports undeclared in same package
- "undeclared name:"
# Disagree with capitalisation of identifier names
- "ST1003:"
mtail-3.0.0~rc54+git0ff5/.goreleaser.yml 0000664 0000000 0000000 00000001114 14600635717 0017713 0 ustar 00root root 0000000 0000000 before:
hooks:
- go mod download
builds:
- id: mtail
main: ./cmd/mtail/main.go
binary: mtail
env:
- CGO_ENABLED=0
goos:
- linux
- windows
- darwin
ldflags:
- -X main.Branch={{.Branch}}
- -X main.Version={{.Version}}
- -X main.Revision={{.Commit}}
gcflags:
# I love errors.
- -e
checksum:
name_template: 'checksums.txt'
snapshot:
name_template: "{{ .Tag }}-next"
changelog:
filters:
exclude:
- '^docs:'
- '^test:'
- '^Merge'
release:
github:
name_template: v{{.Version}}
mtail-3.0.0~rc54+git0ff5/CODE_OF_CONDUCT.md 0000664 0000000 0000000 00000006620 14600635717 0017570 0 ustar 00root root 0000000 0000000 # Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a professional setting
Paraphrasing [@pinksoulstudios](https://www.pinksoulstudios.com/shop/we-believe-reclaimed-wood-sign):
>In this project we believe
>Black Lives Matter
>Women's Rights are Human Rights
>No human is illegal
>Science is real
>Love is love
>KINDNESS IS EVERYTHING
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at jaq@google.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
[homepage]: http://contributor-covenant.org
[version]: http://contributor-covenant.org/version/1/4/
mtail-3.0.0~rc54+git0ff5/CONTRIBUTING.md 0000664 0000000 0000000 00000003403 14600635717 0017216 0 ustar 00root root 0000000 0000000 Want to contribute? Great! First, read this page (including the small print at the end).
### Before you contribute
Before we can use your code, you must sign the
[Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual?csw=1)
(CLA), which you can do online. The CLA is necessary mainly because you own the
copyright to your changes, even after your contribution becomes part of our
codebase, so we need your permission to use and distribute your code. We also
need to be sure of various other things—for instance that you'll tell us if you
know that your code infringes on other people's patents. You don't have to sign
the CLA until after you've submitted your code for review and a member has
approved it, but you must do it before we can put your code into our codebase.
Before you start working on a larger contribution, you should get in touch with
us first through the issue tracker with your idea so that we can help out and
possibly guide you. Coordinating up front makes it much easier to avoid
frustration later on.
### Code reviews
All submissions, including submissions by project members, require review. We
use GitHub pull requests for this purpose.
Please read the [style guide](docs/style.md) for tips on the project coding
guidelines.
### Response Time
This repository is maintained as a best effort service.
Response times to issues and PRs may vary with the availability of the
maintainers. We appreciate your patience.
PRs with unit tests will be merged promptly. All other requests (issues and
PRs) may take longer to be responded to.
### The small print
Contributions made by corporations are covered by a different agreement than
the one above, the Software Grant and Corporate Contributor License Agreement.
mtail-3.0.0~rc54+git0ff5/Dockerfile 0000664 0000000 0000000 00000002316 14600635717 0016761 0 ustar 00root root 0000000 0000000 FROM golang:alpine AS builder
RUN apk add --update git make
WORKDIR /go/src/github.com/google/mtail
COPY . /go/src/github.com/google/mtail
RUN make depclean && make install_deps && PREFIX=/go make STATIC=y -B install
FROM scratch
COPY --from=builder /go/bin/mtail /usr/bin/mtail
ENTRYPOINT ["/usr/bin/mtail"]
EXPOSE 3903
WORKDIR /tmp
ARG version=0.0.0-local
ARG build_date=unknown
ARG commit_hash=unknown
ARG vcs_url=unknown
ARG vcs_branch=unknown
LABEL org.opencontainers.image.ref.name="google/mtail" \
org.opencontainers.image.vendor="Google" \
org.opencontainers.image.title="mtail" \
org.opencontainers.image.description="extract internal monitoring data from application logs for collection in a timeseries database" \
org.opencontainers.image.authors="Jamie Wilkinson (@jaqx0r)" \
org.opencontainers.image.licenses="Apache-2.0" \
org.opencontainers.image.version=$version \
org.opencontainers.image.revision=$commit_hash \
org.opencontainers.image.source=$vcs_url \
org.opencontainers.image.documentation="https://google.github.io/mtail/" \
org.opencontainers.image.created=$build_date \
org.opencontainers.image.url="https://github.com/google/mtail"
mtail-3.0.0~rc54+git0ff5/ISSUE_TEMPLATE.md 0000664 0000000 0000000 00000001205 14600635717 0017470 0 ustar 00root root 0000000 0000000 Thanks for discovering a problem in `mtail`!
When reporting bugs in mtail behaviour, please be as detailed as possible; describe the problem, what you wanted to have happen, what you observed instead.
If your problem is with the way an `mtail` program is behaving, please attach or include inline any mtail programs that demonstrate the bug, any log files that mtail was processing, and the observed output.
If your problem is with `mtail`, please include the commandline you started it with, and the INFO log.
See also [Reporting a problem](https://github.com/google/mtail/blob/main/docs/Troubleshooting.md#reporting-a-problem).
Thanks!
mtail-3.0.0~rc54+git0ff5/LICENSE 0000664 0000000 0000000 00000026136 14600635717 0016002 0 ustar 00root root 0000000 0000000
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
mtail-3.0.0~rc54+git0ff5/Makefile 0000664 0000000 0000000 00000023221 14600635717 0016425 0 ustar 00root root 0000000 0000000 # Copyright 2011 Google Inc. All Rights Reserved.
# This file is available under the Apache license.
export GO111MODULE ?= on
# Build these.
TARGETS = mtail mgen mdot mfmt
GO_TEST_FLAGS ?=
BENCH_COUNT ?= 1
BASE_REF ?= main
HEAD_REF ?= $(shell git symbolic-ref HEAD -q --short)
BASE_REF := $(subst /,-,$(BASE_REF))
HEAD_REF := $(subst /,-,$(HEAD_REF))
all: $(TARGETS)
# Install them here
PREFIX ?= usr/local
# Place to store dependencies.
DEPDIR = .d
# Can't use a dependency rule here.
$(shell install -d $(DEPDIR))
# This rule finds all non-standard-library dependencies of each target and emits them to a makefile include.
# Thanks mrtazz: https://unwiredcouch.com/2016/05/31/go-make.html
MAKEDEPEND = echo "$@: $$(go list -f '{{if not .Standard}}{{.Dir}}{{end}}' $$(go list -f '{{ join .Deps "\n" }}' $<) | sed -e 's@$$@/*.go@' | tr "\n" " " )" > $(DEPDIR)/$@.d
# This rule allows the dependencies to not exist yet, for the first run.
$(DEPDIR)/%.d: ;
.PRECIOUS: $(DEPDIR)/%.d
# This instruction loads any dependency includes for our targets.
-include $(patsubst %,$(DEPDIR)/%.d,$(TARGETS))
# Set the timeout for tests.
test_timeout := 20s
testrace_timeout := 4m
ifeq ($(CI),true)
test_timeout := 100s
testrace_timeout := 20m
endif
# Let the benchmarks run for a long time. The timeout is for the total time of
# all benchmarks, not per bench.
benchtimeout := 120m
GOFILES=$(shell find . -name '*.go' -a ! -name '*_test.go')
GOTESTFILES=$(shell find . -name '*_test.go')
GOGENFILES=internal/runtime/compiler/parser/parser.go\
internal/mtail/logo.ico.go
CLEANFILES+=\
internal/runtime/compiler/parser/parser.go\
internal/runtime/compiler/parser/y.output\
internal/mtail/logo.ico.go\
internal/mtail/logo.ico\
# A place to install tool dependencies.
GOBIN ?= $(firstword $(subst :, ,$(shell go env GOPATH)))/bin
export PATH := $(GOBIN):$(PATH)
TOGO = $(GOBIN)/togo
$(TOGO):
go install github.com/flazz/togo@latest
GOYACC = $(GOBIN)/goyacc
$(GOYACC):
go install golang.org/x/tools/cmd/goyacc@latest
GOFUZZBUILD = $(GOBIN)/go114-fuzz-build
$(GOFUZZBUILD):
go install github.com/mdempsky/go114-fuzz-build@latest
GOFUZZ = $(GOBIN)/go-fuzz
$(GOFUZZ):
go install github.com/dvyukov/go-fuzz/go-fuzz@latest
GOTESTSUM = $(GOBIN)/gotestsum
$(GOTESTSUM):
go install gotest.tools/gotestsum@latest
BENCHSTAT = $(GOBIN)/benchstat
$(BENCHSTAT):
go install golang.org/x/perf/cmd/benchstat@latest
GOSEC = $(GOBIN)/gosec
$(GOSEC):
go install github.com/securego/gosec/v2/cmd/gosec@latest
.PHONY: clean covclean crossclean depclean veryclean
clean: covclean crossclean
rm -f $(CLEANFILES)
covclean:
rm -f *.coverprofile coverage.html $(COVERPROFILES)
crossclean:
rm -rf build
depclean:
rm -f .d/* .*dep-stamp
veryclean: clean depclean
# This version should match the one in .github/workflows/golangci-lint.yml
GOLANGCILINT_VERSION=$(shell grep 'version: v' .github/workflows/golangci-lint.yml | cut -f2 -d: | tr -d ' ')
# lint
.PHONY: lint
lint: $(GOFILES) $(GOGENFILES) $(GOTESTFILES)
mkdir -p $(HOME)/.cache/golangci-lint/$(GOLANGCILINT_VERSION)
podman run --rm -v $(shell pwd):/app -v $(HOME)/.cache/golangci-lint/$(GOLANGCILINT_VERSION):/root/.cache -w /app docker.io/golangci/golangci-lint:$(GOLANGCILINT_VERSION) golangci-lint run -v
branch := $(shell git rev-parse --abbrev-ref HEAD)
version := $(shell git describe --tags --always --dirty)
revision := $(shell git rev-parse HEAD)
release := $(shell git describe --tags --always --dirty | cut -d"-" -f 1,2)
GO_LDFLAGS := -X main.Branch=${branch} -X main.Version=${version} -X main.Revision=${revision}
ifeq ($(STATIC),y)
# -s Omit symbol table and debug info
# -w Omit DWARF symbol table
# -extldflags -static and CGO_ENABLED=0 to make pure static
GO_LDFLAGS += -w -s -extldflags "-static"
export CGO_ENABLED=0
endif
# Show all errors, not just limit to 10.
GO_GCFLAGS = -e
# Very specific static pattern rule to only do this for commandline targets.
# Each commandline must be in a 'main.go' in their respective directory. The
# MAKEDEPEND rule generates a list of dependencies for the next make run -- the
# first time the rule executes because the target doesn't exist, subsequent
# runs can read the dependencies and update iff they change.
$(TARGETS): %: cmd/%/main.go $(DEPDIR)/%.d | print-version .dep-stamp
$(MAKEDEPEND)
go build -gcflags "$(GO_GCFLAGS)" -ldflags "$(GO_LDFLAGS)" -o $@ $<
internal/runtime/compiler/parser/parser.go: internal/runtime/compiler/parser/parser.y | $(GOYACC)
go generate -x ./$(@D)
internal/mtail/logo.ico: logo.png
/usr/bin/convert $< -define icon:auto-resize=64,48,32,16 $@ || touch $@
internal/mtail/logo.ico.go: | internal/mtail/logo.ico $(TOGO)
togo -pkg mtail -name logoFavicon -input internal/mtail/logo.ico
###
## Emit the current toolchain version at the start of every goal, if that goal depends on this.
#
.PHONY: print-version
print-version:
which go
go version
go env
###
## Install rules
#
# Would subst all $(TARGETS) except other binaries are just for development.
INSTALLED_TARGETS = $(PREFIX)/bin/mtail
.PHONY: install
install: $(INSTALLED_TARGETS)
$(PREFIX)/bin/%: %
install -d $(@D)
install -m 755 $< $@
.PHONY: test check
check test: $(GOFILES) $(GOGENFILES) $(GOTESTFILES) | print-version $(LOGO_GO) .dep-stamp
go test $(GO_TEST_FLAGS) -gcflags "$(GO_GCFLAGS)" -timeout ${test_timeout} ./...
.PHONY: testrace
testrace: $(GOFILES) $(GOGENFILES) $(GOTESTFILES) | print-version $(LOGO_GO) .dep-stamp
go test $(GO_TEST_FLAGS) -gcflags "$(GO_GCFLAGS)" -timeout ${testrace_timeout} -race -v ./...
.PHONY: smoke
smoke: $(GOFILES) $(GOGENFILES) $(GOTESTFILES) | print-version $(LOGO_GO) .dep-stamp
go test $(GO_TEST_FLAGS) -gcflags "$(GO_GCFLAGS)" -timeout 1s -test.short ./...
.PHONY: regtest
regtest: $(GOFILES) $(GOGENFILES) $(GOTESTFILES) | print-version $(LOGO_GO) .dep-stamp
go test $(GO_TEST_FLAGS) -gcflags "$(GO_GCFLAGS)" -v -timeout=${testrace_timeout} ./...
TESTRESULTS ?= test-results
TESTCOVERPROFILE ?= out.coverprofile
.PHONY: junit-regtest
junit-regtest: $(TESTRESULTS)/test-output.xml $(TESTCOVERPROFILE)
$(TESTRESULTS)/test-output.xml $(TESTCOVERPROFILE): $(GOFILES) $(GOGENFILES) $(GOTESTFILES) | print-version .dep-stamp $(GOTESTSUM)
mkdir -p $(TESTRESULTS)
gotestsum --debug --junitfile $(TESTRESULTS)/test-output.xml -- $(GO_TEST_FLAGS) -p=1 -cpu=1,2,4 -race -count=1 -parallel=1 -coverprofile=$(TESTCOVERPROFILE) --covermode=atomic -v -timeout=30m -gcflags "$(GO_GCFLAGS)" ./...
.PHONY: bench
bench: $(TESTRESULTS)/benchmark-results-$(HEAD_REF).txt
$(TESTRESULTS)/benchmark-results-$(HEAD_REF).txt: $(GOFILES) $(GOGENFILES) $(GOTESTFILES) | print-version .dep-stamp
mkdir -p $(TESTRESULTS)
go test -cpu 1,2,4 -bench=. -count=$(BENCH_COUNT) -timeout=${benchtimeout} -run=^a ./... | tee $@
.PHONY: benchstat
benchstat: $(TESTRESULTS)/benchstat.txt
$(TESTRESULTS)/benchstat.txt: $(TESTRESULTS)/benchmark-results-$(HEAD_REF).txt | print-version $(BENCHSTAT)
(test -s $(TESTRESULTS)/benchmark-results-$(BASE_REF).txt && benchstat -sort=-delta $(TESTRESULTS)/benchmark-results-$(BASE_REF).txt $< || benchstat $<) | tee $@
PACKAGES := $(shell go list -f '{{.Dir}}' ./... | grep -v /vendor/ | grep -v /cmd/ | sed -e "s@$$(pwd)@.@")
.PHONY: testall
testall: testrace fuzz-regtest bench
.PHONY: checkall
checkall: check all fuzz-targets
## make u a container
.PHONY: container
container: Dockerfile
docker build -t mtail \
--build-arg version=${version} \
--build-arg commit_hash=${revision} \
--build-arg build_date=$(shell date -Iseconds --utc) \
.
## Run gosec
.PHONY: gosec
gosec: $(GOFILES) $(GOGENFILES) $(GOTESTFILES) | $(GOSEC)
$(GOSEC) -tags fuzz ./...
###
## Fuzz testing
#
# These flags set compatibility with OSS-Fuzz
CXX = clang
CXXFLAGS ?= -fsanitize=fuzzer,address
LIB_FUZZING_ENGINE ?=
OUT ?= .
.PHONY: fuzz-targets
fuzz-targets: $(OUT)/vm-fuzzer
$(OUT)/vm-fuzzer: $(GOFILES) | $(GOFUZZBUILD)
go114-fuzz-build -o fuzzer.a ./internal/runtime
$(CXX) $(CXXFLAGS) $(LIB_FUZZING_ENGINE) fuzzer.a -lpthread -o $(OUT)/vm-fuzzer
$(OUT)/vm-fuzzer.dict: mgen
./mgen --dictionary | sort > $@
$(OUT)/vm-fuzzer_seed_corpus.zip: $(wildcard examples/*.mtail) $(wildcard internal/runtime/fuzz/*.mtail)
zip -j $@ $^
FUZZER_FLAGS=-rss_limit_mb=4096 -timeout=60s
.INTERMEDIATE: SEED/*
SEED: $(OUT)/vm-fuzzer_seed_corpus.zip
mkdir -p SEED
unzip -o -d SEED $<
.PHONY: fuzz
fuzz: SEED $(OUT)/vm-fuzzer $(OUT)/vm-fuzzer.dict
mkdir -p CORPUS
$(OUT)/vm-fuzzer $(FUZZER_FLAGS) -dict=$(OUT)/vm-fuzzer.dict CORPUS SEED
.PHONY: fuzz-regtest
fuzz-regtest: $(OUT)/vm-fuzzer SEED
$(OUT)/vm-fuzzer $(FUZZER_FLAGS) $(shell ls SEED/*.mtail)
CRASH ?=
.PHONY: fuzz-repro
fuzz-repro: $(OUT)/vm-fuzzer mtail
$(OUT)/vm-fuzzer $(FUZZER_FLAGS) $(CRASH) || true # Want to continue
./mtail --logtostderr --vmodule=runtime=2,lexer=2,parser=2,checker=2,types=2,codegen=2 --mtailDebug=3 --dump_ast --dump_ast_types --dump_bytecode --compile_only --progs $(CRASH)
# make fuzz-min CRASH=example crash
.PHONY: fuzz-min
fuzz-min: $(OUT)/vm-fuzzer $(OUT)/vm-fuzzer.dict
$(OUT)/vm-fuzzer -dict=$(OUT)/vm-fuzzer.dict -minimize_crash=1 -runs=10000 $(CRASH)
###
## dependency section
#
.PHONY: install_deps
install_deps: .dep-stamp
.dep-stamp: | print-version $(GOGENFILES)
go mod download
touch $@
###
## Coverage
#
.PHONY: coverage covrep
coverage: coverprofile
coverprofile: $(GOFILES) $(GOGENFILES) $(GOTESTFILES) | print-version $(LOGO_GO) .dep-stamp
go test -v -covermode=count -coverprofile=$@ -timeout=${timeout} $(PACKAGES)
coverage.html: coverprofile | print-version
go tool cover -html=$< -o $@
covrep: coverage.html
xdg-open $<
###
## Github issue tracking
#
GHI = $(GOBIN)/ghi
$(GHI):
go install github.com/markbates/ghi@latest
issue-fetch: | $(GHI)
ghi fetch
issue-list: | $(GHI)
ghi list
ISSUE?=1
issue-show: | $(GHI)
ghi show $(ISSUE)
mtail-3.0.0~rc54+git0ff5/README.md 0000664 0000000 0000000 00000011061 14600635717 0016243 0 ustar 00root root 0000000 0000000
# mtail - extract internal monitoring data from application logs for collection into a timeseries database
[](https://github.com/google/mtail/actions?query=workflow%3ACI+branch%3main)
[](http://godoc.org/github.com/google/mtail)
[](https://goreportcard.com/report/github.com/google/mtail)
[](https://bugs.chromium.org/p/oss-fuzz/issues/list?sort=-opened&can=1&q=proj:mtail)
[](https://codecov.io/gh/google/mtail)
`mtail` is a tool for extracting metrics from application logs to be exported
into a timeseries database or timeseries calculator for alerting and
dashboarding.
It fills a monitoring niche by being the glue between applications that do not
export their own internal state (other than via logs) and existing monitoring
systems, such that system operators do not need to patch those applications to
instrument them or writing custom extraction code for every such application.
The extraction is controlled by [mtail programs](docs/Programming-Guide.md)
which define patterns and actions:
# simple line counter
counter lines_total
/$/ {
lines_total++
}
Metrics are exported for scraping by a collector as JSON or Prometheus format
over HTTP, or can be periodically sent to a collectd, StatsD, or Graphite
collector socket.
Read the [programming guide](docs/Programming-Guide.md) if you want to learn how
to write mtail programs.
Ask general questions on the users mailing list: https://groups.google.com/g/mtail-users
## Installation
There are various ways of installing **mtail**.
### Precompiled binaries
Precompiled binaries for released versions are available in the
[Releases page](https://github.com/google/mtail/releases) on Github. Using the
latest production release binary is the recommended way of installing **mtail**.
Windows, OSX and Linux binaries are available.
### Building from source
The simplest way to get `mtail` is to `go get` it directly.
`go get github.com/google/mtail/cmd/mtail`
This assumes you have a working Go environment with a recent Go version. Usually mtail is tested to work with the last two minor versions (e.g. Go 1.12 and Go 1.11).
If you want to fetch everything, you need to turn on Go Modules to succeed because of the way Go Modules have changed the way go get treats source trees with no Go code at the top level.
```
GO111MODULE=on go get -u github.com/google/mtail
cd $GOPATH/src/github.com/google/mtail
make install
```
If you develop the compiler you will need some additional tools
like `goyacc` to be able to rebuild the parser.
See the [Build instructions](docs/Building.md) for more details.
A `Dockerfile` is included in this repository for local development as an
alternative to installing Go in your environment, and takes care of all the
build dependency installation, if you don't care for that.
## Deployment
`mtail` works best when paired with a timeseries-based calculator and
alerting tool, like [Prometheus](http://prometheus.io).
> So what you do is you take the metrics from the log files and
> you bring them down to the monitoring system?
[It deals with the instrumentation so the engineers don't have
to!](http://www.imdb.com/title/tt0151804/quotes/?item=qt0386890) It has the
extraction skills! It is good at dealing with log files!!
## Read More
Full documentation at http://google.github.io/mtail/
Read more about writing `mtail` programs:
* [Programming Guide](docs/Programming-Guide.md)
* [Language Reference](docs/Language.md)
* [Metrics](docs/Metrics.md)
* [Managing internal state](docs/state.md)
* [Testing your programs](docs/Testing.md)
Read more about hacking on `mtail`
* [Building from source](docs/Building.md)
* [Contributing](CONTRIBUTING.md)
* [Style](docs/style.md)
Read more about deploying `mtail` and your programs in a monitoring environment
* [Deploying](docs/Deploying.md)
* [Interoperability](docs/Interoperability.md) with other systems
* [Troubleshooting](docs/Troubleshooting.md)
* [FAQ](docs/faq.md)
After that, if you have any questions, please email (and optionally join) the mailing list: https://groups.google.com/forum/#!forum/mtail-users or [file a new issue](https://github.com/google/mtail/issues/new).
mtail-3.0.0~rc54+git0ff5/TODO 0000664 0000000 0000000 00000016222 14600635717 0015460 0 ustar 00root root 0000000 0000000 Implement a standard library, search path:
Means we can provide standard syslog decorator.
Requires figuring out where we keep standard library definitions, and what the syntax for import looks like.
Can't put trailing newlines in cases in parser test, requires changes to expr stmt
parse tree/ast testing? - expected AST as result from parse/check instead of
merely getting a result. A similar version of this is in codegen_test.go:TestCodeGenFromAST
A mapping between progs and logs to reduce wasted processing- issue #35
Means we don't fan out log lines to every VM if reading from multiple sources.
Requires figuring out how to provide this configuration. Special syntax in a program? Not very flexible. A real config file? Been trying to avoid that. Commandline flag? Seems difficult to maintain.
bytecode like
[{push 1} {push 0} {cmp 1}
{jm 6} {push 0} {jmp 7} {push 1} {jnm 13}
{setmatched false} {mload 0} {dload 0} {inc } {setmatched true}]
can be expressed as
[{push 1} {push 0} {cmp 1}
{jm 9}
{setmatched false} {mload 0} {dload 0} {inc } {setmatched true}]
but jnm 13 is from the condExpr and the previous is from a comparison binary
expr; an optimizer is needed to collapse the bytecode to undersand that
cmp, jm, push, jump, push, jnm in sequence like so is the same as a cmp, jm
and we need to worry about the jump table too
reversed casts: s2i,i2s pairs as well
count stack size and preallocate stack
-> counts of push/pop per instruction
-> test to keep p/p counts updated
: seems like a lot of work for not much return
# Won't do
X Use capture group references to feed back to declaring regular expression,
X noting unused caprefs,
X possibly flipping back to noncapturing (and renumbering the caprefs?)
X -> unlikely to implement, probably won't impact regexp speed
When using a const by itself as a match condition, then we get warnings about
the capture group names not existing.
const A /(?.*)/
A {
x[$a]++
}
... => $a not defined in scope.
Can't define string constants, like const STRPTIME_FORMAT "Jan _2"
Multline const can't startwith a newline, must be const FOO // +\n..., yuo might want to do this for long first fragments, e.e.g const FOO\n /somethign/
Can't chain two matches in same expresison like getfilename() =~ 'name' &&
EXPR_RE because $0 is redefined
This seems like somethign you might weant to do, and we are unlikely to want to use $0, but this is also true for the first capture group. Do we standardise on "the last pattern match wins"?
Can't set the timestamp when processing one log line and reuse it in another; must use the
caching state metric pattern, hidden gauge time. (I think this shows up in the original mysql example.)
Could one preserve the last parsed timestamp in VM state between runs? How does this interact with programs that never strptime because they have no timestamp in the log? #pragma notimestamp?
Get a list of non-stdlib deps
go list -f "{{if not .Standard}}{{.ImportPath}}{{end}}" $(go list -f '{{join .Deps "\n"}}' ./...)
This is just a neat thing to remember for Go.
Programs may not use mtail_ as a metric prefix. Should just document this.
Theory: Implicitly cast Int shouldn't get the S2i conversion applied to them. Do we need to name Implicit Int separate from Int and then not create s2i or other conversions for implicits. (and we need to keep the runtime conversions?)
if you comment out the MATCH_NETWORK clase in dhcpd.mtail it gets 30x faster... because the regexp no longer backtracks... why... hints are that we exeute in an NFA regular expression becayuse it's unanchored.
Avoid byte to string conversions in the tailer and vm FindStringSubmatch > https://dave.cheney.net/high-performance-go-workshop/dotgo-paris.html#strings_and_bytes . Pro: speed. Con, not sure how we manage utf-8 in decode.go?
Use FindSubmatchIndex to avoid copies? Not sure if there's a performance win here, but we want to avoid memcpy if we can.
Why is strings.Builder slower than bytes.Buffer when the latter's docstring recommends the former?
ci: rerun failed tests to see if they're flaky.
Find out if OpenTelemetry is better than OpenCensus when creating no-op trace spans.
Test that when path/* is the logpathpattern that we handle log rotation, e.g. log -> log.1
= how can this work, we can't tell the difference between log.1 being a rotation or a new log. This could work if we can have a tailer-level registry of filenames currently with a goroutine. But we don't know the name of the new file when filestream creates a new goroutine for the replacement; fd.Stat() doesn't return the new name of the file.
- Workaround: warn when '*' is the last of a glob pattern.
VM profiler, when enabled, times instructions so user gets feedback on where their program is slow.
Can we create a linter that checks for code patterns like 'path.Join' and warns against them? Can govet be made to do this?
Detect when a regular expression compiled doesn't have a onepass program, and report a compile warning. we can't do this today with the regexp API, because it's not an exported field, and the onepass compilation step is not an exported function. IF we can do this, we can warn the user that their regular expression has ambiguity and will backtrack. See MATCH_NETWORK above.
Do we have a precision problem that shold be solved by using math/big for literals in the AST. Extra credit: find out if the vm runtime should use big internally as well?
regular expression matching is expensive. prefilter on match prefix. for extra credit, filter on all substrings of the expressions, using aho-corasick.
once the vm refactoring has completed, move the VM execute method into per-opcode functions, and use the same state machine function as in lexer.NextToken() to simulate threaded code as we don't get tail recursion in Go. The plan is to see if execution speed is same or better -- expect moving to function calls to be slower unless inlined, but gain in readability and reuse.
refactor vm further to replace stack with registers, we need typed registers to remove the pop runtime type cast. new opcodes to do migration from stack to register based ops required
Once the tailer can read from sockets, I'll move it out of `internal/`.
Pass a Logger as an option to tailer and vm.
StatusHTML in vm reads expvars; can we not do that?
Move from expvar to OpenSomething metrics.
Should the exporter move into the metric package?
Should the waker move into the tailer package?
Benchmarks on GHA are too variable. Compute benchmarks old and new in same instance, per guidelines from "Software Microbenchmarking in the Cloud. How Bad is it Really?" Laaber et al.
Move loc and useCurrentYear out of VM and into Runtime env.
Move const folding into parser during AST build.
Const-fold identity functions.
Both tailer and logstream probably don't need to do URL parsing. Tailer could do it on the log path patterns before filling up the poll patterns list. Non-globs don't need repolling, and any with a scheme can already be constructed by TailPattern.
Trim unused string and regexp constants, as .e.g /s/ && 1 gets optimised away.
Collapse duplicate string and regexp constants.
mtail-3.0.0~rc54+git0ff5/benchmark_results.csv 0000664 0000000 0000000 00000002102 14600635717 0021210 0 ustar 00root root 0000000 0000000 1350190388,1,4,examples/sftp.em,500,118000,3.165639s,6.331278,236,37.27525469581339,26.82744915254237
1350190385,1,4,examples/rsyncd.em,100,23500,1.79889s,17.9889,235,13.063611449282613,76.54851063829787
1350190383,1,4,examples/linecount.em,50000,50000,2.356123s,0.047122,1,21.221302962536335,47.12246
1359593792,1,4,examples/dhcpd.em,1,50000,8.55385s,8553.85,50000,5.845321112715327,171.077
1359593784,1,4,examples/sftp.em,200,47200,1.516004s,7.58002,236,31.13448249476914,32.11872881355932
1359593782,1,4,examples/rsyncd.em,100,23500,2.167435s,21.67435,235,10.842308996578904,92.23127659574467
1359593779,1,4,examples/linecount.em,50000,50000,2.695952s,0.053919,1,18.546324266900893,53.91904
1378745369,1,4,examples/dhcpd.em,1,50000,8.342115s,8342.115,50000,5.993683855952598,166.8423
1378745360,1,4,examples/sftp.em,500,118000,3.574926s,7.149852,236,33.00767624280894,30.295983050847457
1378745356,1,4,examples/rsyncd.em,100,23500,1.769277s,17.69277,235,13.28226162438103,75.2883829787234
1378745354,1,4,examples/linecount.em,50000,50000,2.569769s,0.051395,1,19.457001777202542,51.39538
mtail-3.0.0~rc54+git0ff5/cmd/ 0000775 0000000 0000000 00000000000 14600635717 0015530 5 ustar 00root root 0000000 0000000 mtail-3.0.0~rc54+git0ff5/cmd/mdot/ 0000775 0000000 0000000 00000000000 14600635717 0016473 5 ustar 00root root 0000000 0000000 mtail-3.0.0~rc54+git0ff5/cmd/mdot/main.go 0000664 0000000 0000000 00000011734 14600635717 0017754 0 ustar 00root root 0000000 0000000 // Copyright 2018 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
/*
Command mdot turns an mtail program AST into a graphviz graph on standard output.
To use, run it like (assuming your shell is in the same directory as this file)
go run github.com/google/mtail/cmd/mdot --prog ../../examples/dhcpd.mtail | xdot -
or
go run github.com/google/mtail/cmd/mdot --prog ../../examples/dhcpd.mtail --http_port 8080
to view the dot output visit http://localhost:8080
You'll need the graphviz `dot' command installed.
*/
package main
import (
"flag"
"fmt"
"io"
"net/http"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/golang/glog"
"github.com/google/mtail/internal/mtail"
"github.com/google/mtail/internal/runtime/compiler/ast"
"github.com/google/mtail/internal/runtime/compiler/checker"
"github.com/google/mtail/internal/runtime/compiler/parser"
)
var (
prog = flag.String("prog", "", "Name of the program source to parse.")
httpPort = flag.String("http_port", "", "Port number to run HTTP server on.")
)
type dotter struct {
w io.Writer
id int
parentID []int // id of the parent node
}
func (d *dotter) nextID() int {
d.id++
return d.id
}
func (d *dotter) emitNode(id int, node ast.Node) {
attrs := map[string]string{
"label": strings.Split(fmt.Sprintf("%T", node), ".")[1] + "\n",
"shape": "box",
"style": "filled",
"tooltip": node.Type().String(),
}
switch n := node.(type) {
case *ast.VarDecl, *ast.DecoDecl:
attrs["fillcolor"] = "lightgreen"
switch n := n.(type) {
case *ast.VarDecl:
attrs["label"] += fmt.Sprintf("%s %s", n.Kind, n.Name)
case *ast.DecoDecl:
attrs["label"] += n.Name
}
case *ast.IDTerm, *ast.CaprefTerm:
attrs["fillcolor"] = "pink"
attrs["shape"] = "ellipse"
switch n := n.(type) {
case *ast.IDTerm:
attrs["label"] += n.Name
case *ast.CaprefTerm:
attrs["label"] += fmt.Sprintf("$%s", n.Name)
}
case *ast.IntLit, *ast.FloatLit, *ast.PatternLit, *ast.StringLit:
attrs["fillcolor"] = "pink"
attrs["shape"] = "ellipse"
switch n := n.(type) {
case *ast.IntLit:
attrs["label"] += fmt.Sprintf("%d", n.I)
case *ast.FloatLit:
attrs["label"] += fmt.Sprintf("%g", n.F)
case *ast.PatternLit:
attrs["label"] += fmt.Sprintf("/%s/", n.Pattern)
case *ast.StringLit:
attrs["label"] += n.Text
}
case *ast.IndexedExpr, *ast.BinaryExpr, *ast.UnaryExpr, *ast.PatternExpr, *ast.BuiltinExpr:
attrs["fillcolor"] = "lightblue"
switch n := n.(type) {
case *ast.BinaryExpr:
attrs["label"] += parser.Kind(n.Op).String()
case *ast.UnaryExpr:
attrs["label"] += parser.Kind(n.Op).String()
case *ast.BuiltinExpr:
attrs["label"] += n.Name
}
}
pos := node.Pos()
if pos != nil {
attrs["xlabel"] = pos.String()
}
fmt.Fprintf(d.w, "n%d [", id)
for k, v := range attrs {
fmt.Fprintf(d.w, "%s=\"%s\" ", k, v)
}
fmt.Fprintf(d.w, "]\n")
}
func (d *dotter) emitLine(src, dst int) {
fmt.Fprintf(d.w, "n%d -> n%d\n", src, dst)
}
func (d *dotter) VisitBefore(node ast.Node) (ast.Visitor, ast.Node) {
id := d.nextID()
d.emitNode(id, node)
if len(d.parentID) > 0 {
parentID := d.parentID[len(d.parentID)-1]
d.emitLine(parentID, id)
}
d.parentID = append(d.parentID, id)
return d, node
}
func (d *dotter) VisitAfter(node ast.Node) ast.Node {
d.parentID = d.parentID[:len(d.parentID)-1]
return node
}
func makeDot(name string, w io.Writer) error {
f, err := os.Open(filepath.Clean(name))
if err != nil {
return err
}
n, err := parser.Parse(name, f)
if err != nil {
return err
}
n, err = checker.Check(n, 0, 0)
if err != nil {
return err
}
fmt.Fprintf(w, "digraph \"%s\" {\n", *prog)
dot := &dotter{w: w}
ast.Walk(dot, n)
fmt.Fprintf(w, "}\n")
return nil
}
func main() {
flag.Parse()
if *prog == "" {
glog.Exitf("No -prog given")
}
if *httpPort == "" {
glog.Exit(makeDot(*prog, os.Stdout))
}
http.HandleFunc("/",
func(w http.ResponseWriter, _ *http.Request) {
dot := exec.Command("dot", "-Tsvg")
in, err := dot.StdinPipe()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
out, err := dot.StdoutPipe()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = dot.Start()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = makeDot(*prog, in)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = in.Close()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Add("Content-type", "image/svg+xml")
w.WriteHeader(http.StatusOK)
_, err = io.Copy(w, out)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
err = dot.Wait()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
})
http.HandleFunc("/favicon.ico", mtail.FaviconHandler)
glog.Info(http.ListenAndServe(fmt.Sprintf(":%s", *httpPort), nil))
}
mtail-3.0.0~rc54+git0ff5/cmd/mfmt/ 0000775 0000000 0000000 00000000000 14600635717 0016473 5 ustar 00root root 0000000 0000000 mtail-3.0.0~rc54+git0ff5/cmd/mfmt/main.go 0000664 0000000 0000000 00000002134 14600635717 0017746 0 ustar 00root root 0000000 0000000 // Copyright 2018 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
/*
Command mfmt formats mtail programs.
*/
package main
import (
"flag"
"fmt"
"io"
"os"
"github.com/golang/glog"
"github.com/google/mtail/internal/runtime/compiler/checker"
"github.com/google/mtail/internal/runtime/compiler/parser"
)
var (
prog = flag.String("prog", "", "Name of the mtail program text to format.")
write = flag.Bool("write", false, "Write results to original file.")
)
func main() {
flag.Parse()
if *prog == "" {
glog.Exitf("No -prog given")
}
f, err := os.OpenFile(*prog, os.O_RDWR, 0)
if err != nil {
glog.Exit(err)
}
ast, err := parser.Parse(*prog, f)
if err != nil {
glog.Exit(err)
}
ast, err = checker.Check(ast, 0, 0)
if err != nil {
glog.Exit(err)
}
up := parser.Unparser{}
out := up.Unparse(ast)
if *write {
if err := f.Truncate(0); err != nil {
glog.Exit(err)
}
if _, err := f.Seek(0, io.SeekStart); err != nil {
glog.Exit(err)
}
if _, err := f.WriteString(out); err != nil {
glog.Exit(err)
}
} else {
fmt.Print(out)
}
}
mtail-3.0.0~rc54+git0ff5/cmd/mgen/ 0000775 0000000 0000000 00000000000 14600635717 0016456 5 ustar 00root root 0000000 0000000 mtail-3.0.0~rc54+git0ff5/cmd/mgen/main.go 0000664 0000000 0000000 00000011241 14600635717 0017730 0 ustar 00root root 0000000 0000000 // Copyright 2013 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
// Command mgen generates mtail programs for fuzz testing by following a simple grammar.
package main
import (
"flag"
"fmt"
"math/rand"
"github.com/google/mtail/internal/runtime/compiler/parser"
)
var (
randSeed = flag.Int64("rand_seed", 1, "Seed to use for math.rand.")
minIterations = flag.Int64("min_iterations", 5000, "Minimum number of iterations before stopping program generation.")
dictionary = flag.Bool("dictionary", false, "Generate a fuzz dictionary to stdout only.")
)
type node struct {
alts [][]string
term string
}
var table = map[string]node{
"start": {[][]string{{"stmt_list"}}, ""},
"stmt_list": {[][]string{{""}, {"stmt_list", "stmt"}}, ""},
"stmt": {[][]string{
{"cond", "{", "stmt_list", "}"},
{"expr"},
{"decl"},
{"def_spec"},
{"deco_spec"},
{"next"},
{"const", "ID", "pattern_expr"},
}, ""},
"expr": {[][]string{{"assign_expr"}}, ""},
"assign_expr": {[][]string{{"rel_expr"}, {"unary_expr", "=", "rel_expr"}, {"unary_expr", "+=", "rel_expr"}}, ""},
"rel_expr": {[][]string{{"additive_expr"}, {"additive_expr", "relop", "additive_expr"}}, ""},
"relop": {[][]string{{"<"}, {">"}, {"<="}, {">="}, {"=="}, {"!="}}, ""},
"additive_expr": {[][]string{{"unary_expr"}, {"additive_expr", "+", "unary_expr"}, {"additive_expr", "-", "unary_expr"}}, ""},
"unary_expr": {[][]string{{"postfix_expr"}, {"BUILTIN", "(", "arg_expr_list", ")"}}, ""},
"arg_expr_list": {[][]string{{""}, {"assign_expr"}, {"arg_expr_list", ",", "assign_expr"}}, ""},
"postfix_expr": {[][]string{{"primary_expr"}, {"postfix_expr", "++"}, {"postfix_expr", "[", "expr", "]"}}, ""},
"primary_expr": {[][]string{{"ID"}, {"CAPREF"}, {"STRING"}, {"(", "expr", ")"}, {"NUMERIC"}}, ""},
"cond": {[][]string{{"pattern_expr"}, {"rel_expr"}}, ""},
"pattern_expr": {[][]string{{"REGEX"}, {"pattern_expr", "+", "REGEX"}, {"pattern_expr", "+", "ID"}}, ""},
"decl": {[][]string{{"hide_spec", "type_spec", "declarator"}}, ""},
"hide_spec": {[][]string{{""}, {"hidden"}}, ""},
"declarator": {[][]string{{"declarator", "by_spec"}, {"declarator", "as_spec"}, {"ID"}, {"STRING"}}, ""},
"type_spec": {[][]string{{"counter"}, {"gauge"}, {"timer"}, {"text"}, {"histogram"}}, ""},
"by_spec": {[][]string{{"by", "by_expr_list"}}, ""},
"by_expr_list": {[][]string{{"ID"}, {"STRING"}, {"by_expr_list", ",", "ID"}, {"by_expr_list", ",", "STRING"}}, ""},
"as_spec": {[][]string{{"as", "STRING"}}, ""},
"def_spec": {[][]string{{"def", "ID", "{", "stmt_list", "}"}}, ""},
"deco_spec": {[][]string{{"deco", "{", "stmt_list", "}"}}, ""},
"BUILTIN": {[][]string{{"strptime"}, {"timestamp"}, {"len"}, {"tolower"}}, ""},
"CAPREF": {[][]string{}, "$1"},
"REGEX": {[][]string{}, "/foo/"},
"STRING": {[][]string{}, "\"bar\""},
"ID": {[][]string{}, "quux"},
"NUMERIC": {[][]string{}, "37"},
}
func emitter(c chan string) {
var l int
for w := range c {
if w == "\n" {
fmt.Println()
}
if w == "" {
continue
}
if l+len(w)+1 >= 80 {
fmt.Println()
fmt.Print(w)
l = len(w)
} else {
if l != 0 {
w = " " + w
}
l += len(w)
fmt.Print(w)
}
}
}
func generateProgram() {
rando := rand.New(rand.NewSource(*randSeed))
c := make(chan string, 1)
go emitter(c)
runs := *minIterations
// Initial state
states := []string{"start"}
// While the state stack is not empty
for len(states) > 0 && runs > 0 {
// Pop the next state
state := states[len(states)-1]
states = states[:len(states)-1]
// fmt.Println("state", state, "states", states)
// Look for the state transition
if n, ok := table[state]; ok {
// If there are state transition alternatives
// fmt.Println("n", n)
if len(n.alts) > 0 {
// Pick a state transition at random
a := rando.Intn(len(n.alts))
// fmt.Println("a", a, n.alts[a], len(n.alts[a]))
// Push the states picked onto the stack (in reverse order)
for i := 0; i < len(n.alts[a]); i++ {
// fmt.Println("i", i, n.alts[a][len(n.alts[a])-i-1])
states = append(states, n.alts[a][len(n.alts[a])-i-1])
}
// fmt.Println("states", states)
} else {
// If there is a terminal, emit it
// fmt.Println("(term)", state, n.term)
c <- n.term
}
} else {
// If the state doesn't exist in the table, treat it as a terminal, and emit it.
// fmt.Println("(state)", state, state)
c <- state
}
runs--
}
c <- "\n"
}
func generateDictionary() {
for _, k := range parser.Dictionary() {
fmt.Printf("\"%s\"\n", k)
}
}
func main() {
flag.Parse()
if *dictionary {
generateDictionary()
} else {
generateProgram()
}
}
mtail-3.0.0~rc54+git0ff5/cmd/mtail/ 0000775 0000000 0000000 00000000000 14600635717 0016636 5 ustar 00root root 0000000 0000000 mtail-3.0.0~rc54+git0ff5/cmd/mtail/main.go 0000664 0000000 0000000 00000026006 14600635717 0020115 0 ustar 00root root 0000000 0000000 // Copyright 2011 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package main
import (
"context"
"flag"
"fmt"
"os"
"os/signal"
"runtime"
"strings"
"sync"
"syscall"
"time"
"github.com/golang/glog"
"github.com/google/mtail/internal/exporter"
"github.com/google/mtail/internal/metrics"
"github.com/google/mtail/internal/mtail"
"github.com/google/mtail/internal/waker"
"go.opencensus.io/trace"
)
type seqStringFlag []string
func (f *seqStringFlag) String() string {
return fmt.Sprint(*f)
}
func (f *seqStringFlag) Set(value string) error {
for _, v := range strings.Split(value, ",") {
*f = append(*f, v)
}
return nil
}
var logs seqStringFlag
var (
port = flag.String("port", "3903", "HTTP port to listen on.")
address = flag.String("address", "", "Host or IP address on which to bind HTTP listener")
unixSocket = flag.String("unix_socket", "", "UNIX Socket to listen on")
progs = flag.String("progs", "", "Name of the directory containing mtail programs")
ignoreRegexPattern = flag.String("ignore_filename_regex_pattern", "", "")
version = flag.Bool("version", false, "Print mtail version information.")
// Compiler behaviour flags.
oneShot = flag.Bool("one_shot", false, "Compile the programs, then read the contents of the provided logs from start until EOF, print the values of the metrics store in the given format and exit. This is a debugging flag only, not for production use.")
oneShotFormat = flag.String("one_shot_format", "json", "Format to use with -one_shot. This is a debugging flag only, not for production use. Supported formats: json, prometheus.")
compileOnly = flag.Bool("compile_only", false, "Compile programs only, do not load the virtual machine.")
dumpAst = flag.Bool("dump_ast", false, "Dump AST of programs after parse (to INFO log).")
dumpAstTypes = flag.Bool("dump_ast_types", false, "Dump AST of programs with type annotation after typecheck (to INFO log).")
dumpBytecode = flag.Bool("dump_bytecode", false, "Dump bytecode of programs (to INFO log).")
// VM Runtime behaviour flags.
syslogUseCurrentYear = flag.Bool("syslog_use_current_year", true, "Patch yearless timestamps with the present year.")
overrideTimezone = flag.String("override_timezone", "", "If set, use the provided timezone in timestamp conversion, instead of UTC.")
emitProgLabel = flag.Bool("emit_prog_label", true, "Emit the 'prog' label in variable exports.")
emitMetricTimestamp = flag.Bool("emit_metric_timestamp", false, "Emit the recorded timestamp of a metric. If disabled (the default) no explicit timestamp is sent to a collector.")
logRuntimeErrors = flag.Bool("vm_logs_runtime_errors", true, "Enables logging of runtime errors to the standard log. Set to false to only have the errors printed to the HTTP console.")
// Ops flags.
pollInterval = flag.Duration("poll_interval", 250*time.Millisecond, "Set the interval to poll each log file for data; must be positive, or zero to disable polling. With polling mode, only the files found at mtail startup will be polled.")
pollLogInterval = flag.Duration("poll_log_interval", 250*time.Millisecond, "Set the interval to find all matched log files for polling; must be positive, or zero to disable polling. With polling mode, only the files found at mtail startup will be polled.")
expiredMetricGcTickInterval = flag.Duration("expired_metrics_gc_interval", time.Hour, "interval between expired metric garbage collection runs")
staleLogGcTickInterval = flag.Duration("stale_log_gc_interval", time.Hour, "interval between stale log garbage collection runs")
metricPushInterval = flag.Duration("metric_push_interval", time.Minute, "interval between metric pushes to passive collectors")
maxRegexpLength = flag.Int("max_regexp_length", 1024, "The maximum length a mtail regexp expression can have. Excessively long patterns are likely to cause compilation and runtime performance problems.")
maxRecursionDepth = flag.Int("max_recursion_depth", 100, "The maximum length a mtail statement can be, as measured by parsed tokens. Excessively long mtail expressions are likely to cause compilation and runtime performance problems.")
// Debugging flags.
blockProfileRate = flag.Int("block_profile_rate", 0, "Nanoseconds of block time before goroutine blocking events reported. 0 turns off. See https://golang.org/pkg/runtime/#SetBlockProfileRate")
mutexProfileFraction = flag.Int("mutex_profile_fraction", 0, "Fraction of mutex contention events reported. 0 turns off. See http://golang.org/pkg/runtime/#SetMutexProfileFraction")
httpDebugEndpoints = flag.Bool("http_debugging_endpoint", true, "Enable debugging endpoints (/debug/*).")
httpInfoEndpoints = flag.Bool("http_info_endpoint", true, "Enable info endpoints (/progz,/varz).")
// Tracing.
jaegerEndpoint = flag.String("jaeger_endpoint", "", "If set, collector endpoint URL of jaeger thrift service")
traceSamplePeriod = flag.Int("trace_sample_period", 0, "Sample period for traces. If non-zero, every nth trace will be sampled.")
// Deprecated.
_ = flag.Bool("disable_fsnotify", true, "DEPRECATED: this flag is no longer in use.")
_ = flag.Int("metric_push_interval_seconds", 0, "DEPRECATED: use --metric_push_interval instead")
)
func init() {
flag.Var(&logs, "logs", "List of log files to monitor, separated by commas. This flag may be specified multiple times.")
}
var (
// Branch as well as Version and Revision identifies where in the git
// history the build came from, as supplied by the linker when copmiled
// with `make'. The defaults here indicate that the user did not use
// `make' as instructed.
Branch = "invalid:-use-make-to-build"
Version = "invalid:-use-make-to-build"
Revision = "invalid:-use-make-to-build"
)
func main() {
buildInfo := mtail.BuildInfo{
Branch: Branch,
Version: Version,
Revision: Revision,
}
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "%s\n", buildInfo.String())
fmt.Fprintf(os.Stderr, "\nUsage:\n")
flag.PrintDefaults()
}
flag.Parse()
if *version {
fmt.Println(buildInfo.String())
os.Exit(0)
}
glog.Info(buildInfo.String())
glog.Infof("Commandline: %q", os.Args)
if len(flag.Args()) > 0 {
glog.Exitf("Too many extra arguments specified: %q\n(the logs flag can be repeated, or the filenames separated by commas.)", flag.Args())
}
loc, err := time.LoadLocation(*overrideTimezone)
if err != nil {
fmt.Fprintf(os.Stderr, "Couldn't parse timezone %q: %s", *overrideTimezone, err)
os.Exit(1)
}
if *blockProfileRate > 0 {
glog.Infof("Setting block profile rate to %d", *blockProfileRate)
runtime.SetBlockProfileRate(*blockProfileRate)
}
if *mutexProfileFraction > 0 {
glog.Infof("Setting mutex profile fraction to %d", *mutexProfileFraction)
runtime.SetMutexProfileFraction(*mutexProfileFraction)
}
if *progs == "" {
glog.Exitf("mtail requires programs that instruct it how to extract metrics from logs; please use the flag -progs to specify the directory containing the programs.")
}
if !(*dumpBytecode || *dumpAst || *dumpAstTypes || *compileOnly) {
if len(logs) == 0 {
glog.Exitf("mtail requires the names of logs to follow in order to extract logs from them; please use the flag -logs one or more times to specify glob patterns describing these logs.")
}
}
if *traceSamplePeriod > 0 {
trace.ApplyConfig(trace.Config{DefaultSampler: trace.ProbabilitySampler(1 / float64(*traceSamplePeriod))})
}
if *pollInterval == 0 {
glog.Infof("no poll log data interval specified; defaulting to 250ms poll")
*pollInterval = time.Millisecond * 250
}
if *pollLogInterval == 0 {
glog.Infof("no poll log pattern interval specified; defaulting to 250ms poll")
*pollLogInterval = time.Millisecond * 250
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
sigint := make(chan os.Signal, 1)
signal.Notify(sigint, os.Interrupt, syscall.SIGTERM)
go func() {
sig := <-sigint
glog.Infof("Received %+v, exiting...", sig)
cancel()
}()
opts := []mtail.Option{
mtail.ProgramPath(*progs),
mtail.LogPathPatterns(logs...),
mtail.IgnoreRegexPattern(*ignoreRegexPattern),
mtail.SetBuildInfo(buildInfo),
mtail.OverrideLocation(loc),
mtail.MetricPushInterval(*metricPushInterval),
mtail.MaxRegexpLength(*maxRegexpLength),
mtail.MaxRecursionDepth(*maxRecursionDepth),
}
eOpts := []exporter.Option{}
if *logRuntimeErrors {
opts = append(opts, mtail.LogRuntimeErrors)
}
if *staleLogGcTickInterval > 0 {
staleLogGcWaker := waker.NewTimed(ctx, *staleLogGcTickInterval)
opts = append(opts, mtail.StaleLogGcWaker(staleLogGcWaker))
}
if *pollInterval > 0 {
logStreamPollWaker := waker.NewTimed(ctx, *pollInterval)
logPatternPollWaker := waker.NewTimed(ctx, *pollLogInterval)
opts = append(opts, mtail.LogPatternPollWaker(logPatternPollWaker), mtail.LogstreamPollWaker(logStreamPollWaker))
}
if *unixSocket == "" {
opts = append(opts, mtail.BindAddress(*address, *port))
} else {
opts = append(opts, mtail.BindUnixSocket(*unixSocket))
}
if *oneShot {
opts = append(opts, mtail.OneShot)
}
if *compileOnly {
opts = append(opts, mtail.CompileOnly)
}
if *dumpAst {
opts = append(opts, mtail.DumpAst)
}
if *dumpAstTypes {
opts = append(opts, mtail.DumpAstTypes)
}
if *dumpBytecode {
opts = append(opts, mtail.DumpBytecode)
}
if *httpDebugEndpoints {
opts = append(opts, mtail.HTTPDebugEndpoints)
}
if *httpInfoEndpoints {
opts = append(opts, mtail.HTTPInfoEndpoints)
}
if *syslogUseCurrentYear {
opts = append(opts, mtail.SyslogUseCurrentYear)
}
if !*emitProgLabel {
opts = append(opts, mtail.OmitProgLabel)
eOpts = append(eOpts, exporter.OmitProgLabel())
}
if *emitMetricTimestamp {
opts = append(opts, mtail.EmitMetricTimestamp)
eOpts = append(eOpts, exporter.EmitTimestamp())
}
if *jaegerEndpoint != "" {
opts = append(opts, mtail.JaegerReporter(*jaegerEndpoint))
}
store := metrics.NewStore()
if *expiredMetricGcTickInterval > 0 {
store.StartGcLoop(ctx, *expiredMetricGcTickInterval)
}
m, err := mtail.New(ctx, store, opts...)
if err != nil {
glog.Error(err)
cancel()
os.Exit(1) //nolint:gocritic // false positive
}
err = m.Run()
if err != nil {
glog.Error(err)
cancel()
os.Exit(1) //nolint:gocritic // false positive
}
if *oneShot {
switch *oneShotFormat {
case "prometheus":
var wg sync.WaitGroup
e, err := exporter.New(ctx, &wg, store, eOpts...)
if err != nil {
glog.Error(err)
cancel()
wg.Wait()
os.Exit(1) //nolint:gocritic // false positive
}
err = e.Write(os.Stdout)
if err != nil {
glog.Error(err)
cancel()
wg.Wait()
os.Exit(1) //nolint:gocritic // false positive
}
cancel()
wg.Wait()
os.Exit(0) //nolint:gocritic // false positive
case "json":
err = store.WriteMetrics(os.Stdout)
if err != nil {
glog.Error(err)
os.Exit(1) //nolint:gocritic // false positive
}
cancel()
os.Exit(0) //nolint:gocritic // false positive
default:
glog.Errorf("unsupported format: %q", *oneShotFormat)
cancel()
os.Exit(1) //nolint:gocritic // false positive
}
}
}
mtail-3.0.0~rc54+git0ff5/docs/ 0000775 0000000 0000000 00000000000 14600635717 0015715 5 ustar 00root root 0000000 0000000 mtail-3.0.0~rc54+git0ff5/docs/Building.md 0000664 0000000 0000000 00000005142 14600635717 0017776 0 ustar 00root root 0000000 0000000 # Building mtail
`mtail` is implemented in [Go](http://golang.org).
You will need to install a recent Go.
## `go get`, quick and easy, no version information.
Fetch, build, and install the binary directly with `go get`
`go install github.com/google/mtail/cmd/mtail`
NOTE: If you do it this way, you won't have a supported version of `mtail`.
## The "Right Way"
[Clone](http://github.com/google/mtail) the source from GitHub into your `$GOPATH`. If you don't have a `$GOPATH`, see the next section.
```
git clone https://github.com/google/mtail
cd mtail
make test install
```
### Building
`mtail` uses a `Makefile` to build the source. This ensures the generated code is up to date and that the binary is tagged with release information.
Having fetched the source, use `make` from the top of the source tree. This will install all the dependencies, and then build `mtail`. This assumes that your Go environment is already set up -- see above for hints on setting it up.
The resulting binary will be in `$GOPATH/bin`.
The unit tests can be run with `make test`, which invokes `go test`. The slower race-detector tests can be run with `make testrace`.
### Cross-compilation
`goreleaser` is used to build the binaries available for download on the Releases page. If yuo want to build your own locally, fetch goreleaser and update the config file locally if necessary.
## No Go
You can still run `mtail` and its programmes with Docker.
```
docker build -t mtail .
docker run -it --rm mtail --help
```
`mtail` is not much use without programme files or logs to parse, you will need to mount a path containing them into the container, as is done with the `-v` flag in the example below:
```
docker run -it --rm -v examples/linecount.mtail:/progs/linecount.mtail -v /var/log:/logs mtail -logtostderr -one_shot -progs /progs/linecount.mtail -logs /logs/messages.log
```
Or, via Docker Compose, e.g. this `docker-compose.yml` snippet example shows with the `volume:` section:
```yaml
service:
mtail:
image: mtail
command:
- -logtostderr
- -one_shot
- -progs
- /progs/linecount.mtail
- -logs
- /logs/messages.log
volume:
- type: bind
source: /var/log
target: /logs
readonly: true
- type: bind
source: examples/linecount.mtail
target: /progs/linecount.mtail
```
## Contributing
Please use `gofmt` to format your code before committing. Emacs' go-mode has a lovely [gofmt-before-save](http://golang.org/misc/emacs/go-mode.el) function.
Please read the [test writing](Testing.md#test-writing) section for `mtail` test style guidelines.
mtail-3.0.0~rc54+git0ff5/docs/Deploying.md 0000664 0000000 0000000 00000016731 14600635717 0020201 0 ustar 00root root 0000000 0000000 # Deploying `mtail`
## Introduction
mtail is intended to run one per machine, and serve as monitoring glue for multiple applications running on that machine. It runs one or more programs in a 1:1 mapping to those client applications.
## Configuration Overview
mtail is configured with commandline flags.
The `--help` flag will print a list of flags for configuring `mtail`.
(Flags may be prefixed with either `-` or `--`)
## Quickstart
Basic flags necessary to start `mtail`:
* `--logs` is a comma separated list of filenames to extract from, but can also be used multiple times, and each filename can be a [glob pattern](http://godoc.org/path/filepath#Match). Named pipes can be read from when passed as a filename to this flag.
* `--progs` is a directory path containing [mtail programs](Language.md). Programs must have the `.mtail` suffix.
mtail runs an HTTP server on port 3903, which can be changed with the `--port` flag.
# Details
## Launching mtail
```
mtail --progs /etc/mtail --logs /var/log/syslog --logs /var/log/ntp/peerstats
```
`mtail` will start to read the specified logs from their current end-of-file,
and read new updates appended to these logs as they arrive. It will attempt to
correctly handle log files that have been rotated by renaming or symlink
changes.
### Getting the logs in
Use `--logs` multiple times to pass in glob patterns that match the logs you
want to tail. This includes named pipes.
### Polling the file system
`mtail` polls matched log files every `--poll_log_interval`, or 250ms by default, the supplied `--logs` patterns for newly created or deleted log pathnames.
Known and active logs are read until EOF every `--poll_interval`, or 250ms by default.
Example:
```
mtail --progs /etc/mtail --logs /var/log/syslog --poll_interval 250ms --poll_log_interval 250ms
```
### Setting garbage collection intervals
`mtail` accumulates metrics and log files during its operation. By default, *every hour* both a garbage collection pass occurs looking for expired metrics, and stale log files.
An expired metric is any metric that hasn't been updated in a time specified by a `del after` form in a program.
A stale log file is any log being watched that hasn't been read from in 24 hours.
The interval between garbage collection runs can be changed on the commandline with the `--expired_metrics_gc_interval` and `--stale_log_gc_interval` flags, which accept a time duration string compatible with the Go [time.ParseDuration](https://golang.org/pkg/time/#ParseDuration) function.
### Runtime error log rate
If your programs deliberately fail to parse some log lines then you may end up generating lots of runtime errors which are normally logged at the standard INFO level, which can fill your disk.
You can disable this with `--novm_logs_runtime_errors` or `--vm_logs_runtime_errors=false` on the commandline, and then you will only be able to see the most recent runtime error in the HTTP status console.
### Launching under Docker
`mtail` can be run as a sidecar process if you expose an application container's logs with a volume.
`docker run -d --name myapp -v /var/log/myapp myapp`
for example exports a volume called `/var/log/myapp` (named the same as the
hypothetical path where `myapp`s logs are written.
Then launch the `mtail` docker image and pass in the volume:
docker run -dP \
--name myapp-mtail \
--volumes-from myapp \
-v examples:/etc/mtail \
mtail --logs /var/log/myapp --progs /etc/mtail
This example fetches the volumes from the `myapp` container, and mounts them in
the mtail container (which we've called `myapp-mtail`). We also mount the
`examples` directory as `/etc/mtail` in the container. We launch `mtail` with
the `logs` and `progs` flags to point to our two mounted volumes.
The `-P` flag ensures `mtail-myapp`'s port 3903 is exposed for collection,
refer to `docker ps` to find out where it's mapped to on the host.
## Writing the programme
Read the [Programming Guide](Programming-Guide.md) for instructions on how to write an `mtail` program.
### Reloading programmes
`mtail` does not automatically reload programmes after it starts up. To ask `mtail` to scan for and reload programmes from the supplied `--progs` directory, send it a `SIGHUP` signal on UNIX-like systems.
For example, if configs are being delivered by a configuration management tool like Puppet, then program Puppet to send a SIGHUP when it has copied a new config file over.
```puppet
exec { 'reload_mtail_programmes':
command => "killall -HUP mtail",
refreshonly = True,
}
file { mtail_programme:
source => mtail_programme,
notify => Exec['reload_mtail_programmes'],
}
```
Alternatively, if you're using `scp` or some similar method to copy the programme files without a receiver, then either follow it with a `ssh $host 'killall -HUP mtail'` or use a tool like [`inotifywait`](https://linux.die.net/man/1/inotifywait) in a side process next to mtail to watch for changes and send the reload signal.
```shell
inotifywait -m /etc/mtail/progs | while read event; do killall -HUP mtail; done
```
## Getting the Metrics Out
### Pull based collection
Point your collection tool at `localhost:3903/json` for JSON format metrics.
Prometheus can be directed to the /metrics endpoint for Prometheus text-based format.
### Changing the listen address
The default port is `3903`, and can be changed with the `--port` commandline flag.
The default address is `""` on the TCP protocol, which means it will bind to all IP addresses on the system. This can be changed with the `--address` commandline flag.
```
mtail --address=127.0.0.1 --port=8080`
```
Depending on your version of Go, the address "0.0.0.0" is treated by Go as dual-stack; see https://github.com/golang/go/issues/17615 and https://pkg.go.dev/net#Listen
### Push based collection
Use the `collectd_socketpath` or `graphite_host_port` flags to enable pushing to a collectd or graphite instance.
Configure collectd on the same machine to use the unixsock plugin, and set `collectd_socketpath` to that unix socket.
```
mtail --progs /etc/mtail --logs /var/log/syslog,/var/log/rsyncd.log --collectd_socketpath=/var/run/collectd-unixsock
```
Set `graphite_host_port` to be the host:port of the carbon server.
```
mtail --progs /etc/mtail --logs /var/log/syslog,/var/log/rsyncd.log --graphite_host_port=localhost:9999
```
Likewise, set `statsd_hostport` to the host:port of the statsd server.
Additionally, the flag `metric_push_interval_seconds` can be used to configure the push frequency. It defaults to 60, i.e. a push every minute.
## Setting a default timezone
The `--override_timezone` flag sets the timezone that `mtail` uses for timestamp conversion. By default, `mtail` assumes timestamps are in UTC.
To use the machine's local timezone, `--override_timezone=Local` can be used.
## Troubleshooting
Lots of state is logged to the log file, by default in `/tmp/mtail.INFO`. See [Troubleshooting](Troubleshooting.md) for more information.
N.B. Oneshot mode (the `one_shot` flag on the commandline) can be used to check
that a program is correctly reading metrics from a log, but with the following
caveats:
* Unlike normal operations, oneshot mode will read the logs from the start of
the file to the end, then close them -- it does not continuously tail the
file
* The metrics will be printed to standard out when the logs are finished being
read from.
* mtail will exit after the metrics are printed out.
This mode is useful for debugging the behaviour of `mtail` programs and
possibly for permissions checking.
mtail-3.0.0~rc54+git0ff5/docs/Interoperability.md 0000664 0000000 0000000 00000016116 14600635717 0021571 0 ustar 00root root 0000000 0000000 # Interoperability of `mtail` with other monitoring tools
## Introduction
`mtail` is only part of a monitoring ecosystem -- it fills the gap between applications that export no metrics of their own in a [common protocol](Metrics.md) and the timeseries database.
`mtail` is intended to complement other tools to build a complete system, and usually does not try to add functionality better provided by systems specifically designed for that function.
# Metric export and collection
mtail actively exports (i.e. pushes) to the following timeseries databases:
* [collectd](http://collectd.org/)
* [graphite](http://graphite.wikidot.com/start)
* [statsd](https://github.com/etsy/statsd)
mtail also is a passive exporter (i.e. pull, or scrape based) by:
* [Prometheus](http://prometheus.io)
* Google's Borgmon
*Recommendation*
Of the above, `mtail` recommends using Prometheus to extract the metrics from mtail as it is a rich monitoring tool and has a lot of interoperability itself. The `collectd`, `graphite`, and `statsd` options are less battle-tested and originate from an earlier time when the industry had not yet crystallised around a metric protocol.
No configuration is required to enable Prometheus export from `mtail`.
## Prometheus Exporter Metrics
Prometheus' [writing exporters documentation](https://prometheus.io/docs/instrumenting/writing_exporters/) describes useful metrics for a Prometheus exporter to export. `mtail` does not follow that guide, for these reasons.
The exporter model described in that document is for active proxies between an application and Prometheus. The expectation is that when Prometheus scrapes the proxy (the exporter) that it then performs its own scrape of the target application, and translates the results back into the Prometheus exposition format. The time taken to query the target application is what is exported as `X_scrape_duration_seconds` and its availability as `X_up`.
`mtail` doesn't work like that. It is reacting to the input log events, not scrapes, and so there is no concept of how long it takes to query the application or if it is available. There are things that, if you squint, look like applications in `mtail`, the virtual machine programs. They could be exporting their time to process a single line, and are `up` as long as they are not crashing on input. This doesn't translate well into the exporter metrics meanings though.
TODO(jaq): Instead, mtail will export a histogram of the runtime per line of each VM program.
`mtail` doesn't export `mtail_up` or `mtail scrape_duration_seconds` because they are exactly equivalent* to the [synthetic metrics](https://prometheus.io/docs/concepts/jobs_instances/) that Prometheus creates automatically.
\* The difference between a scrape duration measured in mtail versus Prometheus would differ in the network round trip time, TCP setup time, and send/receive queue time. For practical purposes you can ignore them as the usefulness of a scrape duration metric is not in its absolute value, but how it changes over time.
# Log Collection, Distribution, and Filtering {: #syslog}
`mtail` is not intended to be used as a replacement for `syslogd`. `mtail` can read from named pipes and unix domain sockets on systems that support them, but the intent is that a proper `syslogd` can manage the collection of those logs, filter out interestnig ones if necessary, and forward them to `mtail` via a named pipe.
Both `rsyslogd` and `syslog-ng` are possible choices here.
It's probably not a good idea to have `mtail` listen directly to `/dev/log` or read from `/run/systemd/journal/syslog` unless you know what you're doing. `mtail` does not want to be in the business of API specialisation, but `syslog-ng` has done so with its [`system()` family of collector configuration options](https://www.syslog-ng.com/technical-documents/doc/syslog-ng-open-source-edition/3.22/administration-guide/26#TOPIC-1209162).
* rsyslog supports forwarding to a [named pipe](https://www.rsyslog.com/doc/master/configuration/modules/ompipe.html) and to a [unix domain socket](https://www.rsyslog.com/doc/master/configuration/modules/omuxsock.html)
* syslog-ng supports output to [named pipe](https://www.syslog-ng.com/technical-documents/doc/syslog-ng-open-source-edition/3.30/administration-guide/44#TOPIC-1595018) and [unix domain socket](https://www.syslog-ng.com/technical-documents/doc/syslog-ng-open-source-edition/3.30/administration-guide/54#TOPIC-1595060)
Additionally, use a proper syslog to transmit and receive logs over the network. `mtail` does not provide any transport security, nor does TCP itself guarantee that no loss of data will occur: the [RELP spec](https://www.rsyslog.com/doc/v8-stable/tutorials/reliable_forwarding.html) exists for the latter.
*Recommendation*
Run `mtail` with a `--logs unix:///run/mtail.sock` flag to specify a single unix domain socket, or `mkfifo /run/mtail.pipe` to create a named pipe and `--logs /run/mtail.pipe` to share between `mtail` and the syslog daemon. Instruct the syslog daemon to forward syslog to the socket or pipe so named with one of the options described above (or as documented by your syslog daemon manual.)
# Logs Analysis
While `mtail` does a form of logs analysis, it does _not_ do any copying,
indexing, or searching of log files for data mining applications. It is only
intended for real- or near-time monitoring data for the purposes of performance
measurement and alerting.
Instead, see logs ingestion and analysis systems like
* [Logstash](https://www.elastic.co/products/logstash)
* [Graylog](https://www.graylog.org/)
if that is what you need.
*Recommendation*
`mtail` provides no recommendations here as there is no direct interoperation between `mtail` and logs analysis. The interface to logs analysis will be from the syslog daemon or application logger directly. If a logs analysis collector is receiving application logs, then `mtail` is either running concurrently reading those application logs as well, or the logs analysis collector is teeing to `mtail` in a manner similar to syslog daemons above.
# TLS/SSL {: #tls-ssl}
Sometimes one may wish to expose `mtail` directly to the internet, but would like to protect it from unauthorized access. `mtail` doesn't support SSL or HTTP authentication, and should be used with a VPN tunnel or reverse proxy instead.
Assuming a VPN tunnel is out of the question, then termination of SSL connections is possible with tools like [`nginx`]() and [`varnish`]().
`mtail` can listen on either a TCP socket or a unix domain socket for HTTP requests; the latter is done with `--unix_socket` instead of the `--address` and `--port` flags.
Forwarding to a unix domain socket instead of TCP is possible with both [`nginx`](http://nginx.org/en/docs/http/ngx_http_upstream_module.html) and [`varnish`](https://varnish-cache.org/docs/trunk/whats-new/upgrading-6.0.html#upd-6-0-uds-backend).
*Recommendation*
If no VPN tunnel is possible, then use a reverse proxy to terminate HTTPS and then forward to `mtail` over a unix domain socket, by setting the `--unix_socket /run/mtail.http.sock` and then configuring the reverse proxy to use the unix socket as a backend.
mtail-3.0.0~rc54+git0ff5/docs/Language.md 0000664 0000000 0000000 00000044517 14600635717 0017775 0 ustar 00root root 0000000 0000000 # `mtail` Language Reference
## Description
As `mtail` is designed to tail log files and apply regular expressions to new
log lines to extract data, the language naturally follows this pattern-action
style.
It resembles another, more famous pattern-action language, that of AWK.
This page errs on the side of a language specification and reference. See the
[Programming Guide](Programming-Guide.md) for a gentler introduction to writing
`mtail` programs.
## Program Execution
`mtail` runs all programs on every line received by the log tailing subsystem.
The rough model of this looks like:
```
for line in lines:
for regex in regexes:
if match:
do something
```
Each program operates once on a single line of log data, and then terminates.
## Program Structure
An `mtail` program consists of exported variable definitions, pattern-action
statements, and optional decorator definitions.
```
exported variable
pattern {
action statements
}
def decorator {
pattern and action statements
}
```
## Exported Variables
`mtail`'s purpose is to extract information from logs and deliver them to a
monitoring system. Thus, variables must be named for export.
Variables, which have type `counter` or `gauge`, must be declared before their
use.
```
counter lines_total
gauge queue_length
```
They can be exported with a different name, with the `as` keyword, if one wants
to use characters that would cause a parse error. This example causes the metric
to be named `line-count` in the collecting monitoring system.
```
counter lines_total as "line-count"
```
Variables can be dimensioned with one or more axes, with the `by` keyword,
creating multidimensional data. Dimensions can be used for creating histograms,
as well.
```
counter bytes by operation, direction
counter latency_ms by bucket
```
Putting the `hidden` keyword at the start of the declaration means it won't be
exported, which can be useful for storing temporary information. This is the
only way to share state between each line being processed.
```
hidden counter login_failures
```
## Pattern/Action form.
`mtail` programs look a lot like `awk` programs. They consist of a conditional
expression followed by a brace-enclosed block of code:
```
COND {
ACTION
}
```
`COND` is a conditional expression. It can be a regular expression, which if
matched enters the action block, or a relational expression as you might
encounter in a C program's `if` statement (but without the `if`, it is
implicit.)
```
/foo/ {
ACTION1
}
variable > 0 {
ACTION2
}
/foo/ && variable > 0 {
ACTION3
}
```
In the above program, ACTION1 is taken on each line input if that line matches
the word `foo`, and ACTION2 is taken on each line if when that line is read, the
variable `variable` is greater than 0. ACTION3 occurs if both are true.
The action statements must be wrapped in curly braces, i.e. `{}`. `mtail`
programs have no single-line statement conditionals like C.
### Regular Expressions
`mtail` supports RE2-style regular expression syntax, but is limited by what is
supported by the Go implementation of [Go's
regexp/syntax](https://godoc.org/regexp).
#### Constant pattern fragments
To re-use parts of regular expressions, you can assign them to a `const` identifier:
```
const PREFIX /^\w+\W+\d+ /
PREFIX {
ACTION1
}
PREFIX + /foo/ {
ACTION2
}
```
In this example, ACTION1 is done for every line that starts with the prefix
regex, and ACTION2 is done for the subset of those lines that also contain
'foo'.
Pattern fragments like this don't need to be prefixes, they can be anywhere in the expression.
```
counter maybe_ipv4
const IPv4 /(?P\d+\.\d+\.\d+\.\d+)/
/something with an / + IPv4 + / address/ {
maybe_ipv4++
}
```
See [dhcpd.mtail](../examples/dhcpd.mtail) for more examples of this.
See also the section on decorators below for improving readability of
expressions that are only matched once.
### Conditionals
More complex expressions can be built up from relational expressions and other
pattern expressions.
#### Operators
The following relational operators are available in `mtail`:
* `<` less than
* `<=` less than or equal
* `>` greater than
* `>=` greater than or equal
* `==` is equal
* `!=` is not equal
* `=~` pattern match
* `!~` negated pattern match
* `||` logical or
* `&&` logical and
* `!` unary logical negation
The following arithmetic operators are available in `mtail`:
* `|` bitwise or
* `&` bitwise and
* `^` bitwise xor
* `+` addition
* `-` subtraction
* `*` multiplication
* `/` division
* `<<` bitwise shift left
* `>>` bitwise shift right
* `**` exponent
The following arithmetic operators act on exported variables.
* `=` assignment
* `++` increment
* `+=` increment by
* `--` decrement
#### `else` Clauses
When a conditional expression does not match, action can be taken as well:
```
/foo/ {
ACTION1
} else {
ACTION2
}
```
Else clauses can be nested. There is no ambiguity with the dangling-else
problem, as `mtail` programs must wrap all block statements in `{}`.
#### `otherwise` clauses
The `otherwise` keyword can be used as a conditional statement. It matches if no
preceding conditional in the current scope has matched. This behaves similarly
to the `default` clause in a C `switch` statement.
```
/foo/ {
/foo1/ {
ACTION1
}
/foo2/ {
ACTION2
}
otherwise {
ACTION3
}
}
```
In this example, ACTION3 will be executed if neither `/foo1/` or `/foo2/` match
on the input, but `/foo/` does.
### Actions
#### Incrementing a Counter
The simplest `mtail` program merely counts lines read:
```
/$/ {
lines_total++
}
```
This program instructs `mtail` to increment the `lines_total` counter variable on
every line received (specifically anytime an end-of-line is matched.)
#### Capture Groups
Regular expressions in patterns can contain capture groups -- subexpressions
wrapped in parentheses. These can be referred to in the action block to extract
data from the line being matched.
For example, part of a program that can extract from `rsyncd` logs may want to
break down transfers by operation and module.
```
counter transfers_total by operation, module
/(?P\S+) (\S+) \[\S+\] (\S+) \(\S*\) \S+ (?P\d+)/ {
transfers_total[$operation][$3]++
}
```
Or, the value of the counter can be increased by the value of a capture group:
```
counter bytes_total by operation, module
/(?P\S+) (\S+) \[\S+\] (\S+) \(\S*\) \S+ (?P\d+)/ {
bytes_total[$operation][$3] += $bytes
}
```
Numeric capture groups address subexpressions in the match result as you might
expect from regular expression groups in other languages, like awk and perl --
e.g. the expression `$3` refers to the third capture group in the regular
expression.
Named capture groups can be referred to by their name as indicated in the
regular expression using the `?P` notation, as popularised by the Python
regular expression library -- e.g. `$bytes` refers to `(?P\d+)` in the
examples above.
Capture groups can be used in the same expression that defines them, for example
in this expression that matches and produces `$x`, then compares against that
value.
```
/(?P\d+)/ && $x > 1 {
nonzero_positives++
}
```
#### Timestamps
It is also useful to timestamp a metric with the time the application thought an
event occurred. Logs typically prefix the log line with a timestamp string,
which can be extracted and then parsed into a timestamp internally, with the
`strptime` builtin function.
A regular expression that extracts the timestamp in boring old syslog format
looks like:
```
/^(?P\w+\s+\d+\s+\d+:\d+:\d+)/ {
strptime($date, "Jan 02 15:04:05")
...
}
```
Buyer beware! The format string used by `mtail` is the same as the [Go
time.Parse() format string](https://godoc.org/time#Parse), which is completely
unlike that used by C's strptime. The format string must always be the 2nd of
January 2006 at 3:04:05 PM. See the documentation for the **ANSIC** format in
the above link for more details. **NOTE** that *unlike* Go's `time.Parse()` (and
*like* C's) the format string is the *second* argument to this builtin function.
> NOTE: without a `strptime()` call, `mtail` will default to using the current
> system time for the timestamp of the event. This may be satisfactory for
> near-real-time logging.
#### Nested Actions
It is of course possible to nest more pattern-actions within actions. This lets
you factor out common parts of a match expression and deal with per-message
actions separately.
For example, parsing syslog timestamps is something you may only wish to do
once, as it's expensive to match (and difficult to read!)
```
counter foo
counter bar
/^(?P\w+\s+\d+\s+\d+:\d+:\d+)/ {
strptime($date, "Jan 02 15:04:05")
/foo/ {
foo++
}
/bar/ {
bar++
}
}
```
This will result in both foo and bar counters being timestamped with the current
log line's parsed time, once they match a line.
#### Decorated actions
Decorated actions are an inversion of nested actions. They allow the program to
define repetitive functions that perform the same extraction across many
different actions.
For example, most log file formats start with a timestamp prefix. To reduce
duplication of work, decorators can be used to factor out the common work of
extracting the timestamp. For example, to define a decorator, use the `def`
keyword:
```
def syslog {
/(?P\w+\s+\d+\s+\d+:\d+:\d+)/ {
strptime($date, "Jan 2 15:04:05")
next
}
}
```
The decorator definition starts and ends in a curly-braced block, and looks like
a normal pattern/action as above. The new part is the `next` keyword, which
indicates to `mtail` where to jump into the *decorated* block.
To use a decorator:
```
@syslog {
/some event/ {
variable++
}
}
```
The `@` notation, familiar to Python programmers, denotes that this block is
"wrapped" by the `syslog` decorator. The syslog decorator will be called on each
line first, which extracts the timestamp of the log line. Then, `next` causes
the wrapped block to execute, so then `mtail` matches the line against the
pattern `some event`, and if it does match, increments `variable`.
#### Types
`mtail` metrics have a *kind* and a *type*. The *kind* affects how the metric is recorded, and the *type* describes the data being recorded.
Ordinarily `mtail` doesn't treat kinds specially, except when they are being exported.
* `counter` assumes that the variable is a monotonically increasing measure,
so that computations on sampled data like rates can be performed without
loss. Use for counting events or summing up bytes transferred.
* `gauge` assumes that the variable can be set to any value at any time,
signalling that rate computations are risky. Use for measures like queue
length at a point in time.
* `histogram` is used to record frequency of events broken down by another dimension, for example by latency ranges. This kind does have special treatment within `mtail`.
The second dimension is the internal representation of a value, which is used by
`mtail` to attempt to generate efficient bytecode.
* Integer
* Float
* Bool
* String
Some of these types can only be used in certain locations -- for example, you
can't increment a counter by a string, but `mtail` will fall back to a attempt
to do so, logging an error if a runtime type conversion fails. Likewise, the
only type that a `histogram` can observe is a Float.
These types are usually inferred from use, but can be influenced by the
programmer with builtin functions. Read on.
#### Builtin functions
`mtail` contains some builtin functions for help with extracting information and
manipulating state.
There are "pure" builtin functions, in that they have no side effects on the
program state.
* `len(x)`, a function of one string argument, which returns the length of the
string argument `x`.
* `tolower(x)`, a function of one string argument, which returns the input `x`
in all lowercase.
* `subst(old, new, val)`, a function of three arguments which returns the
input `val` with all substrings or patterns `old` replaced by `new`. When
given a *string* for `old`, it is a direct proxy of the Go
[strings.ReplaceAll](https://golang.org/pkg/strings/#ReplaceAll) function.
`subst("old", "new", $val)`
When given a *regular expression pattern* for `old`, it uses
[regexp.ReplaceAllLiteralString](https://golang.org/pkg/regexp/#Regexp.ReplaceAllLiteralString).
`subst(/old/, "new", $val)`
Note the different quote characters in the first argument.
There are type coercion functions, useful for overriding the type inference made
by the compiler if it chooses badly. (If the choice is egregious, please file a
bug!)
* `int(x)`, a function of one argument performs type conversion to integer. If
`x` is a type that can be converted to integer, it does so. If the type of
`x` cannot be converted to an integer, a compile error is triggered. If the
value of `x` cannot be converted to an integer, then a runtime error is
triggered.
* `float(x)`, a function of one argument that performs type conversion to
floating point numbers. The same rules apply as for `int()` above.
* `string(x)`, a function of one argument that performs conversion to string
values.
* `strtol(x, y)`, a function of two arguments, which converts a string `x` to
an integer using base `y`. Useful for translating octal or hexadecimal
values in log messages.
A few builtin functions exist for manipulating the virtual machine state as side
effects for the metric export.
* `getfilename()`, a function of no arguments, which returns the filename from
which the current log line input came.
* `settime(x)`, a function of one integer argument, which sets the current
timestamp register.
* `strptime(x, y)`, a function of two string arguments, which parses the
timestamp in the string `x` with the parse format string in `y`, and sets
the current timestamp register. The parse format string must follow [Go's
time.Parse() format string](http://golang.org/src/pkg/time/format.go)
* `timestamp()`, a function of no arguments, which returns the current
timestamp. This is undefined if neither `settime` or `strptime` have been
called previously.
The **current timestamp register** refers to `mtail`'s idea of the time
associated with the current log line. This timestamp is used when the variables
are exported to the upstream collector. The value defaults to the time that the
log line arrives in `mtail`, and can be changed with the `settime()` or
`strptime()` builtins.
User defined functions are not supported, but read on to Decorated Actions for
how to reuse common code.
#### Numerical capture groups and Metric type information
By limiting the pattern of a capturing group to only numeric characters, the
programmer can provide hints to `mtail` about the type of an expression. For
example, in the regular expression
`/(\d+)/`
the first capture group can only match digits, and so the compiler will infer
that this is an integer match.
`/(\d+\.\d+)/`
looks like it matches floating point numbers, and so the compiler will infer
that this is of type float.
> NOTE: In the expression above, the dot is escaped. A regular expression
> operator `.` matches every character and so the inference assumes that the
> type of '.' is a string.
The compiler performs type inference on the expressions that use the capture
groups, and the metrics they are ultimately assigned to, and will assign a type
(either integer or floating point) to the metrics exported.
Thus in a program like:
```
gauge i
gauge f
/(\d+)/ {
i = $1
}
/(\d+\.\d+)/ {
f = $1
}
```
the metric `i` will be of type Int and the metric `f` will be of type Float.
The advantage of limiting pattern matches to specific values is that `mtail` can
generate faster bytecode if it knows at compile-time the types to expect. If
`mtail` can't infer the value types, they default to `String` and `mtail` will
attempt a value conversion at runtime if necessary. Runtime conversion errors
will be emitted to the standard INFO log, and terminate program execution for
that log line.
#### Variable Storage Management
##### `del`
`mtail` performs no implicit garbage collection in the metric storage. The
program can hint to the virtual machine that a specific datum in a dimensioned
metric is no longer going to be used with the `del` keyword.
```
gauge duration by session
hidden session_start by session
/end/ {
duration[$session] = timestamp() - session_start[$session]
del session_start[$session]
}
```
In this example, a hidden metric is used to record some internal state. It will
grow unbounded as the number of sessions increases. If the programmer knows that
the `/end/` pattern is the last time a session will be observed, then the datum
at `$session` will be freed, which keeps `mtail` memory usage under control and
will improve search time for finding dimensioned metrics.
`del` can be modified with the `after` keyword, signalling that the metric
should be deleted after some period of no activity. For example, the
expression
```
del session_start[$session] after 24h
```
would mean that the datum indexed by `$session` will be removed 24 hours after the last update is recorded.
The del-after form takes any time period supported by the go
[`time.ParseDuration`](https://golang.org/pkg/time/#ParseDuration) function.
Expiry is only processed once ever hour, so durations shorter than 1h won't take effect until the next hour has passed.
This command only makes sense for dimensioned metrics.
##### `limit`
A size limit can be specified on a metric with the modifier `limit`.
```
counter bytes_total by operation limit 500
```
When the garbage collection run encounters a variable with size limit that is over its size limit, it will remove the oldest values until the whole metric is below its limit again. Oldest values are chosen by the timestamp of the datum.
This modifier only makes sense for dimensioned metrics.
### Stopping the program
The program runs from start to finish once per line, but sometimes you may want to stop the program early. For example, if the log filename does not match a pattern, or some stateful metric indicates work shouldn't be done.
For this purpose, the `stop` keyword terminates the program immediately.
The simplest and most useless mtail program is thus:
```
stop
```
But for more useful situations, perhaps stopping if the log filename doesn't match a pattern:
```
getfilename() !~ /apache.access.log/ {
stop
}
```
mtail-3.0.0~rc54+git0ff5/docs/Metrics.md 0000664 0000000 0000000 00000005412 14600635717 0017647 0 ustar 00root root 0000000 0000000 # Metrics in `mtail`
## Introduction
A metric is a data type that describes a measurement.
It has a **name**, and a **value**, and a **time** that the measurement was taken.
It also has **units**, so that measurements can be compared and calculated with.
It has a **class**, so that tools can automatically perform some aggregation operations on collections of measurements.
It has a **type**, describing the sort of data it contains: floating point or integer values.
Finally, it has some **labels**, so that additional information about the measurement can be added to assist queries later. Labels are key/value pairs, where the value may change for a specific measurement, but the keys remain constant across all measurements in a metric.
## Classes of Metrics
The class of a Metric can be:
* a monotonically increasing counter, that allows the calculation of rates of change
* a variable gauge, that records instantaneous values
Counters are very powerful as they are resistant to errors caused by sampling frequency. Typically used to accumulate events, they can show changes in behaviour through the calculation of rates, and rates of rates. They can be summed across a group and that sum also derived. Counter resets can indicate crashes or restarts.
Gauges are less powerful as their ability to report is dependent on the sampling rate -- spikes in the timeseries can be missed. They record queue lengths, resource usage and quota, and other sized measurements.
(N.B. Gauges can be simulated with two counters.)
## Types of data
`mtail` records either integer or floating point values as the value of a metric. By default, all metrics are integer, unless the compiler can infer a floating point type.
Inference is done through the type checking pass of the compiler. It uses knowledge of the expressions written in the program as well as heuristics on capturing groups in the regular expressions given.
For example, in the program:
```
counter a
/(\S+)/ {
a = $1
}
```
the compiler will assume that `a` is of an integer type. With more information about the matched text:
```
counter a
/(\d+\.\d+)/ {
a = $1
}
```
the compiler can figure out that the capturing group reference `$1` contains digit and decimal point characters, and is likely then a floating point type.
## Labelling
Labels are added as dimensions on a metric:
```
counter a by x, y, z
```
creates a three dimensional metric called `a`, with each dimension key `x`, `y`, `z`.
Setting a measurement by label is done with an indexed expression:
```
a[1, 2, 3]++
```
which has the effect of incrementing the metric a when x = 1, y = 2, and z = 3.
Dimensions, aka *labels* in the metric name, can be used to export rich data to
the metrics collector, for potential slicing and aggregation by each dimension.
mtail-3.0.0~rc54+git0ff5/docs/Programming-Guide.md 0000664 0000000 0000000 00000035407 14600635717 0021565 0 ustar 00root root 0000000 0000000 # `mtail` Programming Guide
## Introduction
`mtail` is very simple and thus limits what is possible with metric
manipulation, but is very good for getting values into the metrics. This page
describes some common patterns for writing useful `mtail` programs.
## Changing the exported variable name
`mtail` only lets you use "C"-style identifier names in the program text, but
you can rename the exported variable as it gets presented to the collection
system if you don't like that.
```
counter connection_time_total as "connection-time_total"
```
## Reusing pattern pieces
If the same pattern gets used over and over, then define a constant and avoid
having to check the spelling of every occurrence.
```
# Define some pattern constants for reuse in the patterns below.
const IP /\d+(\.\d+){3}/
const MATCH_IP /(?P/ + IP + /)/
...
# Duplicate lease
/uid lease / + MATCH_IP + / for client .* is duplicate on / {
duplicate_lease++
}
```
## Parse the log line timestamp
`mtail` attributes a timestamp to each event.
If no timestamp exists in the log and none explicitly parsed by the mtail program, then mtail will use the current system time as the time of the event.
Many log files include the timestamp of the event as reported by the logging program. To parse the timestamp, use the `strptime` function with
a [Go time.Parse layout string](https://golang.org/pkg/time/#Parse).
```
/^(?P\w+\s+\d+\s+\d+:\d+:\d+)\s+[\w\.-]+\s+sftp-server/ {
strptime($date, "Jan _2 15:04:05")
```
Don't try to disassemble timestamps into component parts (e.g. year, month, day) separately. Keep them in the same format as the log file presents them and change the strptime format string to match it.
```
/^/ +
/(?P\d{4}\/\d{2}\/\d{2} \d{2}:\d{2}:\d{2}) / +
/.*/ +
/$/ {
strptime($date, "2006/01/02 15:04:05")
```
N.B. If no timestamp parsing is done, then the reported timestamp of the event
may add some latency to the measurement of when the event really occurred.
Between your program logging the event, and mtail reading it, there are many
moving parts: the log writer, some system calls perhaps, some disk IO, some
more system calls, some more disk IO, and then mtail's virtual machine
execution. While normally negligible, it is worth stating in case users notice
offsets in time between what mtail reports and the event really occurring. For
this reason, it's recommended to always use the log file's timestamp if one is
available.
## Repeating common timestamp parsing
The decorator syntax was designed with common timestamp parsing in mind. It
allows the code for getting the timestamp out of the log line to be reused and
make the rest of the program text more readable and thus maintainable.
```
# The `syslog' decorator defines a procedure. When a block of mtail code is
# "decorated", it is called before entering the block. The block is entered
# when the keyword `next' is reached.
def syslog {
/(?P(?P\w+\s+\d+\s+\d+:\d+:\d+)|(?P\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d+[+-]\d{2}:\d{2}))/ +
/\s+(?:\w+@)?(?P[\w\.-]+)\s+(?P[\w\.-]+)(?:\[(?P\d+)\])?:\s+(?P.*)/ {
# If the legacy_date regexp matched, try this format.
len($legacy_date) > 0 {
strptime($legacy_date, "Jan _2 15:04:05")
}
# If the RFC3339 style matched, parse it this way.
len($rfc3339_date) > 0 {
strptime($rfc3339_date, "2006-01-02T15:04:05-07:00")
}
# Call into the decorated block
next
}
}
```
This can be used around any blocks later in the program.
```
@syslog {
/foo/ {
...
}
/bar/ {
}
} # end @syslog decorator
```
Both the foo and bar pattern actions will have the syslog timestamp parsed from
them before being called.
### Timestamps with strange characters in them
Go's [time.Parse](https://golang.org/pkg/time/#Parse) does not like underscores in the format string, which may happen when one is attempting to parse a timestamp that does have underscores in the format. Go treats the underscore as placeholding an optional digit.
To work around this, you can use `subst()` to rewrite the timestamp before parsing:
```
/(\d{4}-\d{2}-\d{2}_\d{2}:\d{2}:\d{2}) / {
strptime(subst("_", " ", $1), "2006-01-02 15:04:05")
}
```
Note the position of the underscore in the regular expression match.
## Conditional structures
The `/pattern/ { action }` idiom is the normal conditional control flow structure in `mtail` programs.
If the pattern matches, then the actions in the block are executed. If the
pattern does not match, the block is skipped.
The `else` keyword allows the program to perform action if the pattern does not match.
```
/pattern/ {
action
} else {
alternative
}
```
The example above would execute the "alternative" block if the pattern did not
match the current line.
The `otherwise` keyword can be used to create control flow structure
reminiscent of the C `switch` statement. In a containing block, the
`otherwise` keyword indicates that this block should be executed only if no
other pattern in the same scope has matched.
```
{
/pattern1/ { _action1_ }
/pattern2/ { _action2_ }
otherwise { _action3_ }
}
```
In this example, "action3" would execute if both pattern1 and pattern2 did not
match the current line.
### Explicit matching
The above `/pattern/ { _action_ }` form implicitly matches the current input log line.
If one wants to match against another string variable, one can use the `=~` operator, or to negate the match the `!~`, like so:
```mtail
$1 =~ /GET/ {
...
}
```
## Storing intermediate state
Hidden metrics are metrics that can be used for internal state and are never
exported outside of `mtail`. For example if the time between pairs of log
lines needs to be computed, then a hidden metric can be used to record the
timestamp of the start of the pair.
**Note** that the `timestamp` builtin _requires_ that the program has set a log
line timestamp with `strptime` or `settime` before it is called.
```
hidden gauge connection_time by pid
...
# Connection starts
/connect from \S+ \(\d+\.\d+\.\d+\.\d+\)/ {
connections_total++
# Record the start time of the connection, using the log timestamp.
connection_time[$pid] = timestamp()
}
...
# Connection summary when session closed
/sent (?P\d+) bytes received (?P\d+) bytes total size \d+/ {
# Sum total bytes across all sessions for this process
bytes_total["sent"] += $sent
bytes_total["received"] += $received
# Count total time spent with connections open, according to the log timestamp.
connection_time_total += timestamp() - connection_time[$pid]
# Delete the datum referenced in this dimensional metric. We assume that
# this will never happen again, and hint to the VM that we can garbage
# collect the memory used.
del connection_time[$pid]
}
```
In this example, the connection timestamp is recorded in the hidden variable
`connection_time` keyed by the "pid" of the connection. Later when the
connection end is logged, the delta between the current log timestamp and the
start timestamp is computed and added to the total connection time.
In this example, the average connection time can be computed in a collection
system by taking the ratio of the number of connections (`connections_total`)
over the time spent (`connection_time_total`). For example
in [Prometheus](http://prometheus.io) one might write:
```
connection_time_10s_moving_avg =
rate(connections_total[10s])
/ on job
rate(connection_time_total[10s])
```
Note also that the `del` keyword is used to signal to `mtail` that the
connection_time value is no longer needed. This will cause `mtail` to delete
the datum referenced by that label from this metric, keeping `mtail`'s memory
usage under control and speeding up labelset search time (by reducing the
search space!)
Alternatively, the statement `del connection_time[$pid] after 72h` would do the
same, but only if `connection_time[$pid]` is not changed for 72 hours. This
form is more convenient when the connection close event is lossy or difficult
to determine.
See [state](state.md) for more information.
## Computing moving averages
`mtail` deliberately does not implement complex mathematical functions. It
wants to process a log line as fast as it can. Many other products on the
market already do complex mathematical functions on timeseries data,
like [Prometheus](http://prometheus.io) and [Riemann](http://riemann.io), so
`mtail` defers that responsibility to them. (Do One Thing, and Do It Pretty
Good.)
But say you still want to do a moving average in `mtail`. First note that
`mtail` has no history available, only point in time data. You can update an
average with a weighting to make it an exponential moving average (EMA).
```
gauge average
/some (\d+) match/ {
# Use a smoothing constant 2/(N + 1) to make the average over the last N observations
average = 0.9 * $1 + 0.1 * average
}
```
However this doesn't take into account the likely situation that the matches arrive irregularly (the time interval between them is not constant.) Unfortunately the formula for this requires the exp() function (`e^N`) as described here: http://stackoverflow.com/questions/1023860/exponential-moving-average-sampled-at-varying-times . I recommend you defer this computation to the collection system
## Histograms
Histograms are preferred over averages in many monitoring howtos, blogs, talks,
and rants, in order to give the operators better visibility into the behaviour
of a system.
`mtail` supports histograms as a first class metric kind, and should be created with a list of bucket boundaries:
```
histogram foo buckets 1, 2, 4, 8
```
creates a new histogram `foo` with buckets for ranges [0-1), [1-2), [2-4), [4-8), and from 8 to positive infinity.
> *NOTE: The 0-n and m-+Inf buckets are created automatically.*
You can put labels on a histogram as well:
```
histogram apache_http_request_time_seconds buckets 0.005, 0.01, 0.025, 0.05 by server_port, handler, request_method, request_status, request_protocol
```
At the moment all bucket boundaries (excepting 0 and positive infinity) need to be explicitly named (there is no shorthand form to create geometric progressions).
Assignment to the histogram records the observation:
```
###
# HTTP Requests with histogram buckets.
#
apache_http_request_time_seconds[$server_port][$handler][$request_method][$request_status][$request_protocol] = $time_us / 1000000
```
In tools like [Prometheus](http://prometheus.io) these can be manipulated in
aggregate for computing percentiles of response latency.
```
apache_http_request_time:rate10s = rate(apache_http_request_time_seconds_bucket[10s])
apache_http_request_time_count:rate10s = rate(apache_http_request_time_seconds_count[10s])
apache_http_request_time:percentiles =
apache_http_request_time:rate10s
/ on (job, port, handler, request_method, request_status, request_protocol)
apache_http_request_time_seconds_count:rate10s
```
This new timeseries can be plotted to see the percentile bands of each bucket,
for example to visualise the distribution of requests moving between buckets as
the performance of the server changes.
Further, these timeseries can be used
for
[Service Level](https://landing.google.com/sre/book/chapters/service-level-objectives.html)-based
alerting (a technique for declaring what a defensible service level is based on
the relative costs of engineering more reliability versus incident response,
maintenance costs, and other factors), as we can now see what percentage of
responses fall within and without a predefined service level:
```
apache_http_request_time:latency_sli =
apache_http_request_time:rate10s{le="200"}
/ on (job, port, handler, request_method, request_status, request_protocol)
apache_http_request_time_seconds_count:rate10s
ALERT LatencyTooHigh
IF apache_http_request_time:latency_sli < 0.555555555
LABELS { severity="page" }
ANNOTATIONS {
summary = "Latency is missing the service level objective"
description = "Latency service level indicator is {{ $value }}, which is below nine fives SLO."
}
```
In this example, prometheus computes a service level indicator of the ratio of
requests at or below the target of 200ms against the total count, and then
fires an alert if the indicator drops below nine fives.
## Parsing number fields that are sometimes not numbers
Some logs, for example Varnish and Apache access logs, use a hyphen rather than a zero.
You may be tempted to use a programme like
```
counter total
/^[a-z]+ ((?P\d+)|-)$/ {
$response_size > 0 {
total = $response_size
}
}
```
to parse a log like
```
a 99
b -
```
except that `mtail` will issue a runtime error on the second line like `Runtime error: strconv.ParseInt: parsing "": invalid syntax`.
This is because in this programme the capture group is only matching on a set of digits, and is not defined when the alternate group matches (i.e. the hyphen).
Instead one can test the value of the surrounding capture group and do nothing if the value matches a hyphen:
```
counter total
/^[a-z]+ ((?P\d+)|-)$/ {
$1 != "-" {
total = $response_size
}
}
```
`mtail` does not presently have a way to test if a capture group is defined or not.
## Parsing numbers with extra characters
Some logs contain human readable numbers, inserting thousands-separators (comma or full stop depending on your locale.) You can remove them with the `subst` function:
```
/sent (?P[\d,]+) bytes received (?P[\d,]+) bytes/ {
# Sum total bytes across all sessions for this process
bytes_total["sent"] += int(subst(",", "", $sent))
bytes_total["received"] += int(subst(",", "", $received))
}
```
As `subst` is of type String, the type inference will assign a Text type to bytes total, so here we must explicitly instruct `mtail` that we are expecting this to be an Int by using the `int` cast function.
# Avoiding unnecessary work
You can stop the program if it's fed data from a log file you know you want to ignore:
```
getfilename() !~ /apache.access.?log/ {
stop
}
```
This will check to see if the input filename looks like
`/var/log/apache/accesslog` and not attempt any further pattern matching on the
log line if it doesn't.
# Canonicalising keys
Some logs like webserver logs describe common elements with unique identifiers
in them, which can result in lots of metric keys and no useful count if left
alone. To rewrite these capture groups, use `subst()` with a pattern as the
first argument:
```mtail
hidden text route
counter http_requests_total by method, route
/(?P\S+)/ {
route = subst(/\/d+/, "/:num", $url)
http_requests_total[method][route]++
}
```
Here we replace any number part following a `/` in the `$url` capture group with
the literal string `/:num`, so we end up counting only the static part of a URL
route.
mtail-3.0.0~rc54+git0ff5/docs/Testing.md 0000664 0000000 0000000 00000004515 14600635717 0017661 0 ustar 00root root 0000000 0000000 # Testing `mtail` programmes
## Introduction
By default any compile errors are logged to the standard log `/tmp/mtail.INFO`
unless otherwise redirected. (You can emit to standard out with
`--logtostderr` flag.) Program errors are also printed on the HTTP status
handler, by default at porrt 3903.
If you want more debugging information, `mtail` provides a few flags to assist with testing your program in standalone mode.
# Details
## Compilation errors
The `compile_only` flag will run the `mtail` compiler, print any error messages, and then exit.
You can use this to check your programs are syntactically valid during the development process.
```
mtail --compile_only --progs ./progs
```
This could be added as a pre-commit hook to your source code repository.
## Testing programs
The `one_shot` flag will compile and run the `mtail` programs, then feed in any
logs specified from the beginning of the file (instead of tailing them), then
print to the log all metrics collected.
You can use this to check that your programs are giving the expected output
against some gold standard log file samples.
```
mtail --one_shot --progs ./progs --logs testdata/foo.log
```
### Continuous Testing
If you wish, send a PR containing your program, some sample input, and a golden
output to be run as a test in
http://github.com/google/mtail/blob/main/ex_test.go to ensure that mtail
never breaks your program (or that your program gets any updates if the
language changes.)
To have a syntax-only compile test, merely send in a PR with the program in the
examples directory.
The `TestExamplePrograms` behaves like the `one_shot` flag, and
`TestCompileExamplePrograms` tests that program syntax is correct.
# Test writing
Use the `testutil` module where possible.
Do not use time.Sleep; poll for events. The `TestServer` provides a `PollWatched()` method for this purpose. Even integration tests which write to disk can be fast and not require sleeps to synchronise.
Use the `if testing.Short()` signal in tests with disk access so that the `make smoke` command is fast.
Do not comment out tests, prefer to use the t.Skip() method indicating why it's not working if a test needs to be disabled. This keeps them visible and compilable.
# Troubleshooting
For more information about debugging mtail programs, see the tips under [Troubleshooting](Troubleshooting.md)
mtail-3.0.0~rc54+git0ff5/docs/Troubleshooting.md 0000664 0000000 0000000 00000012553 14600635717 0021434 0 ustar 00root root 0000000 0000000 # Troubleshooting `mtail` installations
This page gives an overview of some avenues to debug your `mtail` installation.
Also, see the [FAQ](faq.md).
## Reporting a problem
Please when reporting a problem, include the `mtail` version:
* the output of `mtail --version`
* the first lines of the INFO log (`/tmp/mtail.INFO` by default)
* the top of the status page (on HTTP port 3903 by default)
## `go get` or build problems
### `package github.com/google/mtail: no Go files`
You're using go 1.11 or higher, which now starts to use go modules, and doesn't like source code layouts like `mtail` which doesn't have any Go files in the top directory.
Either set `GO111MODULE=on` environment variable first, or `go get` the binary directly:
`go get github.com/google/mtail/cmd/mtail`
vs
```
GO111MODULE=on go get -u github.com/google/mtail
cd $GOPATH/src/github.com/google/mtail
make install
```
## Compilation problems
Compilation problems will be emitted to the standard INFO log
* which is visible either on stderr if `mtail` is run with the `--logtostderr` flag
* which is stored in the location provided by the `--log_dir` flag (usually, /tmp)
(The behaviour of glog is documented in https://github.com/golang/glog)
Errors for the most recent version of the program will also be displayed on the
standard status page (served over HTTP at port 3903 by default) in the *Program Loader* section.
If a program fails to compile, it will not be loaded. If an existing program
has been loaded, and a new version is written to disk (by you, or a
configuration management system) and that new version does not compile,
`mtail` will log the errors and not interrupt or restart the existing, older program.
The `--compile_only` flag will only attempt to compile the programs and not
execute them. This can be used for pre-commit testing, for example.
### Syntax trees, type information, and virtual machine bytecode
More detailed compiler debugging can be retrieved by using the `--dump_ast`, `--dump_ast_types`, and `--dump_bytecode`, all of which dump their state to the INFO log.
For example, type errors logged such as
`prog.mtail: Runtime error: conversion of "-0.000000912" to int failed: strconv.ParseInt: parsing "-0.000000912": invalid syntax` suggest an invalid type inference of `int` instead of `float` for some program symbol or expression. Use the `--dump_ast_types` flag to see the type annotated syntax tree of the program for more details.
When reporting a problem, please include the AST type dump.
## Memory or performance issues
`mtail` is a virtual machine emulator, and so strange performance issues can occur beyond the imagination of the author.
The standard Go profiling tool can help. Start with a cpu profile:
`go tool pprof /path/to/mtail http://localhost:3903/debug/pprof/profile'
or a memory profile:
`go tool pprof /path/to/mtail http://localhost:3903/debug/pprof/heap'
There are many good guides on using the profiling tool:
* https://software.intel.com/en-us/blogs/2014/05/10/debugging-performance-issues-in-go-programs is one such guide.
The goroutine stack dump can also help explain what is happening at the moment.
http://localhost:3903/debug/pprof/goroutine?debug=2 shows the full goroutine stack dump.
* `(*Watcher).readEvents` reads events from the filesystem
* `(*Tailer).run` processes log change events; `.read` reads the latest log lines
* `(*Loader).processEvents` handles filesystem event changes regarding new program text
* `(*Loader).processLines` handles new lines coming from the log tailer
* `(*MtailServer).WaitForShutdown` waits for the other components to terminate
* `(*Exporter).StartMetricPush` exists if there are any push collectors (e.g. Graphite) to push to
* `(*Exporter).HandlePrometheusMetrics` exists if an existing Prometheus pull collection is going on
There is one `(*VM).Run` stack per program. These are opaque to the goroutine
stack dump as they execute the bytecode. However, the second argument to `Run`
on the stack is the first four letters of the program name, encoded as ASCII.
You can transcode these back to their names by doing a conversion from the
int32 value in hex provided in the stack, e.g.: 0x61706163 -> 'apac' (probably
an apache log program); 0x7273796e -> 'rsyn' (probably an rsyncd log program)
Obvious problems seen in the goroutine stack dump are long-waiting gorotines, usually on mutexes.
(they show their block time in minutes, e.g. `goroutine 38 [semacquire, 1580
minutes]:`) which usually also manifest as a logjam (no pun intended) in the
loader, tailer, and watcher goroutines (in state 'chan send').
## Distributed Tracing
`mtail` can export traces to the [Jaeger](https://www.jaegertracing.io/) trace collector. Specify the Jaeger endpoint with the `--jaeger_endpoint` flag
```
mtail --jaeger_endpoint http://localhost:14268/api/traces
```
The `--trace_sample_period` flag can be used to set how often a trace is sampled and sent to the collector. Set it to `100` to collect one in 100 traces.
## Deployment problems
The INFO log at `/tmp/mtail.INFO` by default contains lots of information about
any errors encountered. Adding the `-v=2` flag raises the verbosity. See the
[glog](https://github.com/golang/glog) manual for more logging flag options.
The `one_shot` and `logtostderr` flags may come in helpful for quickly
launching mtail in non-daemon mode in order to flush out deployment issues like
permissions problems.
mtail-3.0.0~rc54+git0ff5/docs/debugging.md 0000664 0000000 0000000 00000010606 14600635717 0020175 0 ustar 00root root 0000000 0000000 # Tips for debugging `mtail`
## Parser bugs
Run a test with logtostderr and mtailDebug up to 3, and parser_test_debug enabled to see any AST results.
```
go test -run TestParserRoundTrip/decrement_counter --logtostderr --mtailDebug=3 --parser_test_debug
```
`mtailDebug` at 2 dumps the parser states being traversed, and 3 includes the lexer token stream as well.
## Improving parser syntax error messages
You can use this to improve error messages in the `%error` section of [`parser.y`](../internal/runtime/compiler/parser/parser.y), if you compare the "error recovery pops" messages with the state machine in the generated [`y.output`](../internal/runtime/compiler/parser/y.output).
```
go generate && go test -run TestParseInvalidPrograms/statement_with_no_effect --logtostderr --mtailDebug=3 --parser_test_debug
```
error log from test:
```
...
state-14 saw LSQUARE
error recovery pops state 14
error recovery pops state 102
error recovery pops state 46
error recovery pops state 14
error recovery pops state 2
error recovery pops state 0
```
This log says the lexer sent a LSQUARE token, and the parser was in state 14 when it saw it. The snippet below from `y.output` indicates state 14 is never expecting a LSQUARE, and the following lnies in the log above show the state stack being popped -- 0, 2, 14, 49, 102, 14.
Walking backwards from state 0 (`$start`), we can get a list of nonterminal names to put in the state machine match expression used in the `%error` directive, and fill in the gaps with our knowledge of the intermediate states in our parose tree.
`y.output`:
```
state 14
conditional_statement: logical_expr.compound_statement ELSE compound_statement
conditional_statement: logical_expr.compound_statement
logical_expr: logical_expr.logical_op opt_nl bitwise_expr
AND shift 47
OR shift 48
MATCH shift 49
NOT_MATCH shift 50
LCURLY shift 46
. error
compound_statement goto 44
logical_op goto 45
```
State 14 to state 46 shifts a LCURLY operator, follow state 46 and we will find ourselves in `compound_statement`.
Add to `parser.y` the names of the states that ended up at the unexpected token, followed by the error message:
```
%error stmt_list stmt conditional_statement logical_expr compound_statement conditional_statement logical_expr LSQUARE : "unexpected indexing of an expression"
```
and instead of "syntax error", the parser now emits "unexpected indexing of an expression".
## Fuzzer crashes
Build the fuzzer locally with clang and libfuzzer:
```
make vm-fuzzer fuzz CXX=clang CXXFLAGS=-fsanitize=fuzzer,address LIB_FUZZING_ENGINE=
```
Then we can run the fuzzer with our example crash; make sure it has no weird characters because the upstream fuzz executor doesn't shell-escape arguments.
```
./vm-fuzzer crash.mtail
```
If the crash is big, we can try to minimise it:
```
make fuzz-min CRASH=crash.mtail
```
Sometimes the minimiser will hit a local minima, but still look big; for example it doesn't know how to shrink variable names.
We can reformat the crash with [`cmd/mfmt`](../cmd/mfmt/main.go):
```
make mfmt
./mfmt --prog crash.mtail --write
```
so it's easier to read -- it'll be bigger cos of the whitespace and the minimiser should shrink it back to original size if everything is working well.
The formatted mtail program should help make it obvious what's happening and let you manually attempt to rename or remove parts of the program yourself -- perhaps a whole variable declaration and usage doesn't need to exist, but the minimiser will take a long time to figure that out.
Once we have the smallest program we can add it to the crash corpus in [`internal/runtime/fuzz/`](../internal/runtime/fuzz/) and running `make fuzz` should run and fail on it straight away.
Or, variants of the program can be added to the various `*Invalid` tests in parts of the `vm` module, e.g. [`parser_test.go`](../internal/runtime/compiler/parser/parser_test.go) or [`checker_test.go`](../internal/runtime/compiler/checker/checker_test.go) depending on where in the compiler the defect is occuring.
If the crash is in `vm.go` then we can dump the program to see what AST and types, and bytecode it generates.
```
make mtail
./mtail --logtostderr --dump_ast_types --dump_bytecode --mtailDebug=3 --compile_only --progs crash.mtail
```
### Fuzzer crashes, part 2
Run the fuzz-repro target with the CRASH variable set, it'll do all of the above:
```
make fuzz-repro CRASH=bug/20720.mtail
```
mtail-3.0.0~rc54+git0ff5/docs/designs/ 0000775 0000000 0000000 00000000000 14600635717 0017351 5 ustar 00root root 0000000 0000000 mtail-3.0.0~rc54+git0ff5/docs/designs/poll.md 0000664 0000000 0000000 00000005164 14600635717 0020647 0 ustar 00root root 0000000 0000000 # Polling filesystem watcher
Original date: 2018-08-13
Status: obsolete
Last Updated: 2020-11-17
## Overview
Implement a hybrid polling and notify based filesystem watcher.
## Background
Tracking issue #169
`mtail` has a filesystem watcher which is used to watch the state of programme
files and log files; if they change, then programmes are reloaded and log files
are read. `mtail` uses the [fsnotify](https://github.com/fsnotify/fsnotify)
package to implement the filesystem watcher. fsnotify, which uses the
`inotify(7)` system in Linux, lets `mtail` offload the work of polling the
filesystem for changes to one where it is notified by the kernel instead,
reducing the amount of work done.
Some users want a polling option instead of fsnotify as their platforms don't
support fsnotify, e.g. mipsel (bug in fsnotify) or no kernel support? (using on
AIX).
This design attempts to determine how to support a hybrid watcher.
To the best of our ability, users should not have to configure poll- or fsnotify-based filesystem watching.
From Linux's inotify(7):
Inotify reports only events that a user-space program triggers through the filesystem API. As a result,
it does not catch remote events that occur on network filesystems. (Applications must fall back to
polling the filesystem to catch such events.) Furthermore, various pseudo-filesystems such as /proc,
/sys, and /dev/pts are not monitorable with inotify.
## design ideas
fsnotify watch add error, fallback to poll. How does fsnotify report errors about watches not being supported? E.g on NFS or with AIX?
poll implemented similar to fsnotify poll loop? if that, will that be duplicated work? Do we care enough to avoid nested polling loops? should this be pushed upstream?
how to let users override the choice? Argument listing poll-only filesystem path prefixes?
Could poll be on by default for all files, with a timeout if no events have been received from inotify in some timeout? This could be tricky, we don't need to poll files that are inotified. But, again from inotify(7):
Note that the event queue can overflow. In this case, events are lost. Robust applications should handle
the possibility of lost events gracefully. For example, it may be necessary to rebuild part or all of the
application cache. (One simple, but possibly expensive, approach is to close the inotify file descriptor,
empty the cache, create a new inotify file descriptor, and then re-create watches and cache entries for
the objects to be monitored.)
## references
https://github.com/fsnotify/fsnotify
inotify(7)
mtail-3.0.0~rc54+git0ff5/docs/faq.md 0000664 0000000 0000000 00000011031 14600635717 0017002 0 ustar 00root root 0000000 0000000 # FAQ
"Frequently" is probably an overstatement, but here's a collection of questions and answers that pop up on the mailing list and issues.
## I don't like a particular label on the metrics. How do I remove it?
All the labels are under your own control, except for the `prog` label which is used for namespace deconfliction -- i.e. multiple programs can be running in `mtail` and they should not be able to affect each other.
It is best if you do some post processing in your collection system and configure it to filter out the `prog` label, so that strange aggregations don't occur.
In Prometheus, this could be achieved like so:
```
metric_relabel_configs:
- target_label: prog
replacement: ''
```
(See [this comment](https://github.com/google/mtail/issues/59#issuecomment-303531070)).
## `mtail` isn't propagating the scraped timestamp to Prometheus
`mtail` lets you use the `settimestamp()` function to extract a timestamp from
a log file, and use that timestamp to carry to the monitoring system the
closest thing that `mtail` knows to be the actual time of the event, and not
the time at which `mtail` scraped the log.
However, Prometheus needs to track the existence of a metric in the time series
database in order to avoid showing very old data when querying the same metric
for multiple instances at a specific timestamp. Exposing the timestamp can lead
to triggering this staleness handling.
`mtail`, being a metric creator, falls under bbrazil's comment on the
prometheus-users list, in which he says ["It doesn't make sense to have
timestamps for direct instrumentation, only for proxying metrics from another
monitoring system with a custom
collector."](https://groups.google.com/forum/#!msg/prometheus-users/qgxKH6_gYzM/LyO5wGO6BwAJ).
The `mtail` timestamp handling is also broken for counters. The timestamp is
set to 0 (UNIX epoch) at startup. If no matches are made, the initial zero
count will never be ingested and the metric will only appear when first
incremented. To avoid this, `mtail` disables exporting timestamps to Prometheus
by default.
You can turn this behaviour back on with the `--emit_metric_timestamp`
commandline flag, and if you have slow moving counters, you should tune your
Prometheus' `query.lookback-delta` parameter. See also [Staleness under
Querying
Basics](https://prometheus.io/docs/prometheus/latest/querying/basics/#staleness)
in the Prometheus docs.
If you are looking to expose the timestamp of an event, for example the start time of
a process, you can create a timestamp metric. This is a metric that contains
the timestamp as the value:
```mtail
counter mtail_lines_read_count by filename
gauge mtail_file_lastread_timestamp by filename
/.*/ {
mtail_lines_read_count[getfilename()]++
mtail_file_lastread_timestamp[getfilename()] = timestamp()
}
```
## Why doesn't `mtail` persist variables and metric values between restarts?
`mtail` is intended to be stateless, deferring the problem of long term metric
storage to a timeseries database and collector like
[Prometheus](https://prometheus.io).
Partially this reason is technical -- not having to save checkpoints and restore them makes the program much simpler.
This means that mtail programs should prefer metrics that perform better in
stateless systems, like counters rather than gauges. Prometheus for example is
capable of handling counter resets in its rate and delta calculations, so mtail
not remembering the value of a metric should not be cause for concern.
Another reason is that failure is normal, and thus Prometheus handles these
counter restarts because they are normal. If `mtail` checkpointed its state,
filesystem and state file corruption will still occur, and in those edge cases
a counter reset would still be observed, and thus need to be handled
regardless.
So, given that the monitoring system needs to handle missing and resetting data
already in a distributed system, there is no compelling reason to implement
metric checkpointing in `mtail` as well. It just adds complexity for little
overall gain.
## Why doesn't `mtail` automatically reload programme files?
`mtail` will reload programme files when it receives a `SIGHUP` signal.
It's assumed that programmes do not change very often, so it relies on an external trigger rather than spend resourecs of its own polling for changes at all. `inotify` is not used either, as programme reloads would be the only use of that library, and the benefit does not seem worth the cost of including the extra dependency.
See the [Deployment](Deployment.md) guide for suggestions for "automatic" programme reloads.
mtail-3.0.0~rc54+git0ff5/docs/index.md 0000664 0000000 0000000 00000003176 14600635717 0017355 0 ustar 00root root 0000000 0000000 mtail - extract internal monitoring data from application logs for collection into a timeseries database
========================================================================================================
`mtail` is a tool for extracting metrics from application logs to be exported
into a timeseries database or timeseries calculator for alerting and
dashboarding.
It fills a monitoring niche by being the glue between applications that do not
export their own internal state (other than via logs) and existing monitoring
systems, such that system operators do not need to patch those applications to
instrument them or writing custom extraction code for every such application.
The extraction is controlled by [mtail programs](Programming-Guide.md)
which define patterns and actions:
# simple line counter
counter lines_total
/$/ {
lines_total++
}
Metrics are exported for scraping by a collector as JSON or Prometheus format
over HTTP, or can be periodically sent to a collectd, StatsD, or Graphite
collector socket.
Read the [programming guide](Programming-Guide.md) if you want to learn how
to write mtail programs.
Ask general questions on the users mailing list: https://groups.google.com/g/mtail-users
## Table of Contents
* [Building `mtail`](Building.md)
* [Deploying `mtail`](Deploying.md)
* [Interoperability](Interoperability.md)
* [Troubleshooting](Troubleshooting.md)
* [Programming Guide](Programming-Guide.md)
* [Language](Language.md)
* [Metrics](Metrics.md)
* [Tracking State](state.md)
* [Testing](Testing.md)
* [Contributing to `mtail`](style.md)
* [Debugging](debugging.md)
* [FAQ](faq.md)
mtail-3.0.0~rc54+git0ff5/docs/reading-y-output.md 0000664 0000000 0000000 00000014217 14600635717 0021461 0 ustar 00root root 0000000 0000000 # Reading `y.output`
A Yacc parser is a state machine that responds to an input stream of tokens, and has two actions:
1. **shift**, which pushes a new state on the stack
2. **reduce**, which pops a state off the stack and sets the lookahead token
[`y.output`](../internal/runtime/compiler/parser/y.output) is a semi-human, semi-machine readable description of the parser state machine. `mtail` automatically generates this during the build process with the go:generate directive in [`driver.go`](../internal/runtime/compiler/parser/driver.go)
```y.output
state 0
$accept: .start $end
stmt_list: . (2)
. reduce 2 (src line 96)
stmt_list goto 2
start goto 1
```
There are several parts to the state described here.
The first section are the grammar rules. The first grammar rule says that an input is accepted if we can match a start token, and then the end of the input, and we're currently (the `.`) before the start token. The second rule has a number `(2)` (as it is the second grammar rule in the input `parser.y`, which looks like `stmt_list: stmt_list | stmt_list stmt`). The second rule says we can be in a state where we have parsed a valid `stmt_list`.
The output always encloses grammar rules in parentheses, and state numbers are left unadorned.
The second section has the actions, and in this case there is only one that says "match any token and reduce with rule 2". Rule 2 refers to the one in parentheses above, so it says we can match any token, pop the state off the stack, and set the lookahead token to `stmt_list`. For our convenience it also tells us where in the source file this reduce has come from -- if we look at line 96 we'll see the grammar for parsing a `stmt_list`. (You might wonder why the line number is in the action that uses the rule, rather than the definition of the rule in the previous section, and then you'll be in good company.)
The last section indicates what happens when we enter this state from a reduce action, although the mechanics inside the machine are identical -- if the next lookahead token is a `stmt_list`, go to state 2, and if it's a `start`, go to state 1.
For homework, look at state 1 and state 2 and describe what they mean.
Here's another example:
```y.output
state 14
conditional_statement: logical_expr.compound_statement ELSE compound_statement
conditional_statement: logical_expr.compound_statement
logical_expr: logical_expr.logical_op opt_nl bitwise_expr
AND shift 48
OR shift 49
MATCH shift 50
NOT_MATCH shift 51
LCURLY shift 47
. error
compound_statement goto 45
logical_op goto 46
```
State 14 parses the conditional statement. If we get here, we've already parsed a `logical_expr`, and we're trying to figure out which way to go down the parse tree. We could find a `compound_statement`, or a `logical_op` next.
If we see an `AND`, `OR`, `MATCH`, or `NOT_MATCH` next, we **shift** to the next state, which means pushing the next state onto the stack -- the stack represents the path down the tree to get to this token. Knowing the parser, these tokens mean we're going to parse a `logical_op`, and the difference between each is just because the parser executes a different action for each.
```y.output
state 48
logical_op: AND. (26)
. reduce 26 (src line 202)
```
In state 48 we have recognised an `AND`, and then reduction of rule 26 says we put a `logical_op` at the front of the token stream and pop the stack (back to state 14).
The last couple of actions for state 14 say we can expect a `LCURLY` (token name for `{`, see [`lexer.go`](../internal/runtime/compiler/parser/lexer.go)) and then move to state 47. Or anything else (`.`) and we're now in an error state.
Run a parser test with debugging flags enabled, and we can see how the parser and lexer see the input:
```
go test -run TestParseInvalidPrograms/pattern_without_block --logtostderr --mtailDebug=3 --parser_test_debug
```
`mtailDebug` at 2 dumps the parser states being traversed, and 3 includes the lexer token stream as well.
The command above emits:
```
reduce 2 in:
state-0
lex DIV(57376)
reduce 112 in:
state-2
reduce 113 in:
state-60
lex REGEX(57365)
lex DIV(57376)
reduce 82 in:
state-156
reduce 69 in:
state-35
reduce 62 in:
state-32
lex NL(57408)
reduce 60 in:
state-29
reduce 54 in:
state-26
reduce 47 in:
state-33
reduce 43 in:
state-31
reduce 35 in:
state-28
reduce 30 in:
state-25
reduce 24 in:
state-21
state-14 saw NL
error recovery pops state 14
error recovery pops state 2
error recovery pops state 0
```
We can see we start by reducing rule 2 in state 0, and then read a `DIV` token. The trace doesn't show the **shift** actions, but we reduce through states 2, then 60. Note that state 60 is just prior to the parser asking for the next token, indicated by the `lex REGEX` line -- this is emitted by the lexer when it returns the next token. So we can go look at state 60 to see why we've stopped to ask for more input.
Alternatively, some grepping around for `"DIV shift"` (with two spaces) we can see we shift to state 60 from state 65 on a `DIV` token, which helps understand where the reduces start. Because `DIV` appears in both a regex and a division expression context, there are several matches to the grep.
The error recovery trace is interesting here, as it is a good example of what happens during the `. error` rule. State 14 saw a `NL` (newline) unexpectedly, so the `.` matches. Error recovery doesn't do anything other than pop the stack until empty, so we can see the parse tree at the point of error.
This knowledge can come in handy when improving the parser error messages, using the '%error' directive in `parser.y`. See [debugging](debugging.md) for how to use it.
Note also that knowing the reduce movements within the state machine is useful as the reduce is when the parser action is executed. For example in the phrase `stmt_list: | stmt_list stmt` in `parser.y` the `stmt_list stmt` action is executed only once the leaves of the tree have already been accepted in order to be able to construct the tree. Thus the action on the empty option of the expression is the one that creates the `ast.StmtList`, while the other side's action appends those `stmt` children to that `ast.StmtList`.
mtail-3.0.0~rc54+git0ff5/docs/references.md 0000664 0000000 0000000 00000002326 14600635717 0020363 0 ustar 00root root 0000000 0000000 [ANSI C Grammar](http://www.lysator.liu.se/c/ANSI-C-grammar-y.html)
[Awk Grammar](https://pubs.opengroup.org/onlinepubs/7908799/xcu/awk.html#tag_000_000_108_016)
[Original Awk Grammar](https://github.com/onetrueawk/awk/blob/master/awkgram.y)
[GAWK Grammar](http://git.savannah.gnu.org/cgit/gawk.git/tree/awkgram.y)
[Smalltalk-80: The Language and Its Implementation](http://web.archive.org/web/20070927190722/http://users.ipa.net/~dwighth/smalltalk/bluebook/bluebook_imp_toc.html)
Adele Goldberg and David Robson
[The Java Virtual Machine Specification](https://docs.oracle.com/javase/specs/jvms/se7/html/index.html) Lindholm Yellin Bracha Buckley
[Perl operators](https://perldoc.perl.org/perlop#Regexp-Quote-Like-Operators)
["Generating LR syntax error messages from examples", Jeffery, ACM TOPLAS Volume 24 Issue 5 Sep 2003.](https://dl.acm.org/doi/abs/10.1145/937563.937566)
[Hindley Milner in Scala](http://dysphoria.net/2009/06/28/hindley-milner-type-inference-in-scala/)
[The Hindley-Milner Algorithm](http://web.archive.org/web/20050911123640/http://www.cs.berkeley.edu/~nikitab/courses/cs263/hm.html) in perl
by Nikita Borisov
https://medium.com/@dhruvrajvanshi/type-inference-for-beginners-part-2-f39c33ca9513
mtail-3.0.0~rc54+git0ff5/docs/state.md 0000664 0000000 0000000 00000007174 14600635717 0017370 0 ustar 00root root 0000000 0000000 # Keeping state in mtail programs
The program is run on each log line from start to finish, with no loops. The only state emitted by the program is the content of the exported metrics. Metrics can be read by the program, though, so exported metrics are the place to keep state between lines of input.
It's often the case that a log line is printed by an application at the start of some session-like interaction, and another at the end. Often these sessions have a session identifier, and every intermediate event in the same session is tagged with that identifier. Using map-valued exported metrics is the way to store session information keyed by session identifier.
The example program [`rsyncd.mtail`](../examples/rsyncd.mtail) shows how to use a session tracking metric for measuring the total user session time.
counter connection_time_total
hidden gauge connection_time by pid
/connect from \S+/ {
connection_time[$pid] = timestamp()
del connection_time[$pid] after 72h
}
/sent .* bytes received .* bytes total size \d+/ {
connection_time_total += timestamp() - connection_time[$pid]
del connection_time[$pid]
}
`rsyncd` uses a child process for each session so the `pid` field of the log format contains the session identifier in this example.
## hidden metrics
A hidden metric is only visible to the mtail program, it is hidden from export. Internal state can be kept out of the metric collection system to avoid unnecessary memory and network costs.
Hidden metrics are declared by prepending the word `hidden` to the declaration:
hidden gauge connection_time by pid
## Removing session information at the end of the session
The maps can grow unbounded with a key for every session identifier created as the logs are read. If you see `mtail` consuming a lot of memory, it is likely that there's one or more of these maps consuming memory.
(You can remove the `hidden` keyword from the declaration, and let `mtail` reload the program without restarting and the contents of the session information metric will appear on the exported metrics page. Be warned, that if it's very big, even loading this page may take a long time and cause mtail to crash.)
`mtail` can't know when a map value is ready to be garbage collected, so you need to tell it. One way is to defer deletion of the key and its value if it is not updated for some duration of time. The other way is to immediately delete it when the key and value are no longer needed.
```
del connection_time[$pid] after 72h
```
Upon creation of a connection time entry, the `rsyncd.mtail` program instructs mtail to remove it 72 hours after it's no longer updated. This means that the programmer expects, in this case, that sessions typically do not last longer than 72 hours because `mtail` does not track the timestamps for all accesses of metrics, only writes to them.
```
del connection_time[$pid]
```
The other form indicates that when the session is closed, the key and value can be removed. The caveat here is that logs can be lossy due to problems with the application restarting, mtail restarting, or the log delivery system (e.g. syslog) losing the messages too. Thus it is recommended to use both forms in programs.
1. `del ... after` form when the metric is created, giving it an expiration time longer than the expected lifespan of the session.
1. `del` form when the session is ended, explicitly removing it before the expiration time is up.
It is not an error to delete a nonexistent key from a map.
Expiry is only processed once ever hour, so durations shorter than 1h won't take effect until the next hour has passed.
mtail-3.0.0~rc54+git0ff5/docs/style.md 0000664 0000000 0000000 00000001536 14600635717 0017404 0 ustar 00root root 0000000 0000000 # Contribution style guide
## Table tests
Use the `t.Run` subtest form. This assists debugging by printing the name of
the table entry without additional parameters to t.Log and t.Error later on.
It also means that the `-run` and `-bench` flags can be used to filter a specific
test without excessive comment-and-rebuild cycles.
Prefer to construct the subtest's name from the test parameters with
`fmt.Sprintf`, otherwise use a `name` field.
When comparing results, use `deep.Equal`. The parameter order should always be
`expected`, then `observed`. This makes the diff output read like "the observed
value is not equal to the expected value."
If there is a non-nil diff result, emit it with `t.Error(diff)`. If multiple
diffs are emitted in a single test, prefix the emission with a `t.Log` of the
name of the result variable or function under test.
mtail-3.0.0~rc54+git0ff5/examples/ 0000775 0000000 0000000 00000000000 14600635717 0016603 5 ustar 00root root 0000000 0000000 mtail-3.0.0~rc54+git0ff5/examples/apache_combined.mtail 0000664 0000000 0000000 00000002234 14600635717 0022715 0 ustar 00root root 0000000 0000000 # Copyright 2015 Ben Kochie . All Rights Reserved.
# This file is available under the Apache license.
# Parser for the common apache "NCSA extended/combined" log format
# LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\"
counter apache_http_requests_total by request_method, http_version, request_status
counter apache_http_bytes_total by request_method, http_version, request_status
/^/ +
/(?P[0-9A-Za-z\.:-]+) / + # %h
/(?P[0-9A-Za-z-]+) / + # %l
/(?P[0-9A-Za-z-]+) / + # %u
/\[(?P\d{2}\/\w{3}\/\d{4}:\d{2}:\d{2}:\d{2} (\+|-)\d{4})\] / + # %t
/"(?P[A-Z]+) (?P\S+) (?PHTTP\/[0-9\.]+)" / + # \"%r\"
/(?P\d{3}) / + # %>s
/((?P\d+)|-) / + # %b
/"(?P\S+)" / + # \"%{Referer}i\"
/"(?P[[:print:]]+)"/ + # \"%{User-agent}i\"
/$/ {
strptime($timestamp, "02/Jan/2006:15:04:05 -0700") # for tests
apache_http_requests_total[$request_method][$http_version][$request_status]++
$response_size > 0 {
apache_http_bytes_total[$request_method][$http_version][$request_status] += $response_size
}
}
mtail-3.0.0~rc54+git0ff5/examples/apache_common.mtail 0000664 0000000 0000000 00000003052 14600635717 0022424 0 ustar 00root root 0000000 0000000 # Parser for the common apache log format as follow.
# LogFormat "%h %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-agent}i\"
# https://httpd.apache.org/docs/2.4/mod/mod_log_config.html
counter apache_http_requests_total by request_method, http_version, status_code
counter apache_http_bytes_total by request_method, http_version, status_code
gauge apache_http_response_time by remote_host, request_method, request_uri, status_code, user_agent
gauge apache_http_response_size by remote_host, request_method, request_uri, status_code, user_agent
/^/ +
/(?P[0-9A-Za-z\.:-]+) / + # %h
/(?P[0-9A-Za-z-]+) / + # %l
/(?P[0-9A-Za-z-]+) / + # %u
/\[(?P\d{2}\/\w{3}\/\d{4}:\d{2}:\d{2}:\d{2} (\+|-)\d{4})\] / + # %t
/"(?P[A-Z]+) (?P\S+) (?PHTTP\/[0-9\.]+)" / + # \"%r\"
/(?P\d{3}) / + # %>s
/((?P\d+)|-) / + # %b
/(?P\d+) / + # %D
/"(?P\S+)" / + # \"%{Referer}i\"
/"(?P[[:print:]]+)"/ + # \"%{User-agent}i\"
/$/ {
strptime($timestamp, "02/Jan/2006:15:04:05 -0700") # for tests
apache_http_requests_total[$request_method][$http_version][$status_code]++
$response_size > 0 {
apache_http_bytes_total[$request_method][$http_version][$status_code] += $response_size
apache_http_response_size[$remote_host][$request_method][$request_uri][$status_code][$user_agent] += $response_size
}
apache_http_response_time[$remote_host][$request_method][$request_uri][$status_code][$user_agent] = $response_time
}
mtail-3.0.0~rc54+git0ff5/examples/apache_metrics.mtail 0000664 0000000 0000000 00000004700 14600635717 0022603 0 ustar 00root root 0000000 0000000 # Copyright 2015 Ben Kochie . All Rights Reserved.
# This file is available under the Apache license.
# Parser for a metrics-friendly apache log format
# LogFormat "%v:%p %R %m %>s %H conn=%X %D %O %I %k" metrics
counter http_connections_aborted_total by server_port, handler, method, code, protocol, connection_status
counter http_connections_closed_total by server_port, handler, method, code, protocol, connection_status
counter http_request_size_bytes_total by server_port, handler, method, code, protocol
counter http_response_size_bytes_total by server_port, handler, method, code, protocol
histogram http_request_duration_seconds by server_port, handler, method, code, protocol buckets 0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 15
/^/ +
/(?P\S+) / + # %v:%p - The canonical ServerName of the server serving the request. : The canonical port of the server serving the request.
/(?P\S+) / + # %R - The handler generating the response (if any).
/(?P[A-Z]+) / + # %m - The request method.
/(?P\d{3}) / + # %>s - Status code.
/(?P\S+) / + # %H - The request protocol.
/(?Pconn=.) / + # %X - Connection status when response is completed
/(?P\d+) / + # %D - The time taken to serve the request, in microseconds.
/(?P\d+) / + # %O - Bytes sent, including headers.
/(?P\d+) / + # %I - Bytes received, including request and headers.
/(?P\d+)/ + # %k - Number of keepalive requests handled on this connection.
/$/ {
###
# HTTP Requests with histogram buckets.
#
http_request_duration_seconds[$server_port][$handler][$method][$code][$protocol] = $time_us / 1000000.0
###
# Sent/Received bytes.
http_response_size_bytes_total[$server_port][$handler][$method][$code][$protocol] += $sent_bytes
http_request_size_bytes_total[$server_port][$handler][$method][$code][$protocol] += $received_bytes
### Connection status when response is completed:
# X = Connection aborted before the response completed.
# + = Connection may be kept alive after the response is sent.
# - = Connection will be closed after the response is sent.
/ conn=X / {
http_connections_aborted_total[$server_port][$handler][$method][$code][$protocol][$connection_status]++
}
# Will not include all closed connections. :-(
/ conn=- / {
http_connections_closed_total[$server_port][$handler][$method][$code][$protocol][$connection_status]++
}
}
mtail-3.0.0~rc54+git0ff5/examples/dhcpd.mtail 0000664 0000000 0000000 00000011067 14600635717 0020722 0 ustar 00root root 0000000 0000000 # Copyright 2008 Google Inc. All Rights Reserved.
# This file is available under the Apache license.
# Define the exported metric names. The `by' keyword indicates the metric has
# dimensions. For example, `request_total' counts the frequency of each
# request's "command". The name `command' will be exported as the label name
# for the metric. The command provided in the code below will be exported as
# the label value.
counter request_total by command
counter config_file_errors
counter peer_disconnects
counter dhcpdiscovers by mac
counter bind_xid_mismatch
counter duplicate_lease
counter bad_udp_checksum
counter unknown_subnet
counter dhcpdiscover_nofree by network
counter unknown_lease by ip
counter update_rejected
counter failover_peer_timeout
counter ip_already_in_use
counter ip_abandoned by reason
counter invalid_state_transition
counter negative_poolreq by pool
counter lease_conflicts
# The `syslog' decorator defines a procedure. When a block of mtail code is
# "decorated", it is called before entering the block. The block is entered
# when the keyword `next' is reached.
def syslog {
/^(?P(?P\w+\s+\d+\s+\d+:\d+:\d+)|(?P\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d+[+-]\d{2}:\d{2}))/ +
/\s+(?:\w+@)?(?P[\w\.-]+)\s+(?P[\w\.-]+)(?:\[(?P\d+)\])?:\s+(?P.*)/ {
# If the legacy_date regexp matched, try this format.
len($legacy_date) > 0 {
strptime($2, "Jan _2 15:04:05")
}
# If the RFC3339 style matched, parse it this way.
len($rfc3339_date) > 0 {
strptime($rfc3339_date, "2006-01-02T03:04:05-0700")
}
# Call into the decorated block
next
}
}
# Define some pattern constants for reuse in the patterns below.
const IP /\d+(\.\d+){3}/
const MATCH_IP /(?P/ + IP + /)/
const MATCH_NETWORK /(?P\d+(\.\d+){1,3}\/\d+)/
const MATCH_MAC /(?P([\da-f]{2}:){5}[\da-f]{2})/
@syslog {
# Request
$message =~ /^(balanced|balancing|BOOTREPLY|BOOTREQUEST|DHCPACK|DHCPDECLINE|DHCPDISCOVER|DHCPINFORM|DHCPNAK|DHCPOFFER|DHCPRELEASE|DHCPREQUEST)/ {
# The lowercased name of the command matched in the regex is used to
# count the frequency of each command. An external collector can use
# this to compute the rate of each command independently.
request_total[tolower($1)]++
# DHCP Discover
$message =~ /^DHCPDISCOVER from / + MATCH_MAC {
# Counts the discovery requests per mac address, which can help
# identify bad clients on the network.
dhcpdiscovers[$mac]++
/network / + MATCH_NETWORK + /: no free leases/ {
# If the range is full, your clients may be having a bad time.
dhcpdiscover_nofree[$network]++
}
}
}
# Config file errors
/Configuration file errors encountered -- exiting/ {
# Counting config parse errors can he useful for detecting bad config
# pushes that made it to production.
config_file_errors++
}
# Peer disconnects
/peer ([^:]+): disconnected/ {
peer_disconnects++
}
# XID mismatches
/bind update on / + IP + / got ack from (?P\w+): xid mismatch./ {
bind_xid_mismatch++
}
# Duplicate lease
/uid lease / + MATCH_IP + / for client / + MATCH_MAC + / is duplicate on / + MATCH_NETWORK {
duplicate_lease++
}
# Bad UDP Checksum
/(?P\d+) bad udp checksums in \d+ packets/ {
bad_udp_checksum += $count
}
# Unknown subnet
/DHCPDISCOVER from / + MATCH_MAC + / via / + IP + /: unknown network segment/ {
unknown_subnet++
}
# Unknown lease
/DHCPREQUEST for / + IP + /\(/ + IP + /\) from / + MATCH_MAC + / via / + IP + /: unknown lease / + MATCH_IP {
unknown_lease[$ip]++
}
# Update rejected
/bind update on \S+ from \S+ rejected: incoming update is less critical than the outgoing update/ {
update_rejected++
}
/timeout waiting for failover peer \S+/ {
failover_peer_timeout++
}
/ICMP Echo reply while lease / + IP + /valid/ {
ip_already_in_use++
}
/unexpected ICMP Echo reply from / + IP {
ip_already_in_use++
}
/Abandoning IP address / + IP + /: (?P.*)/ {
ip_abandoned[$reason]++
}
/bind update on \S+ from \S+ rejected: / + IP + /: invalid state transition/ {
invalid_state_transition++
}
/peer (?P[^:]+): Got POOLREQ, answering negatively!/ {
negative_poolreq[$pool]++
}
/Lease conflict at/ {
lease_conflicts++
}
}
mtail-3.0.0~rc54+git0ff5/examples/histogram.mtail 0000664 0000000 0000000 00000002476 14600635717 0021641 0 ustar 00root root 0000000 0000000 # use mtail to extract the values you want in your histogram, and any labels like 'httpcode' and it will create the buckets and histogram metrics for you.
# this example might be something you put on a web server that logs latency. ex;
# GET /foo/bar.html latency=1s httpcode=200
# GET /foo/baz.html latency=0s httpcode=200
# would produce this:
# webserver_latency_by_code_bucket{httpcode="200",prog="software_errors.mtail",le="1"} 1
# webserver_latency_by_code_bucket{httpcode="200",prog="software_errors.mtail",le="2"} 1
# webserver_latency_by_code_bucket{httpcode="200",prog="software_errors.mtail",le="4"} 1
# webserver_latency_by_code_bucket{httpcode="200",prog="software_errors.mtail",le="8"} 1
# webserver_latency_by_code_bucket{httpcode="200",prog="software_errors.mtail",le="+Inf"} 1
# webserver_latency_by_code_sum{httpcode="200",prog="software_errors.mtail"} 1
# webserver_latency_by_code_count{httpcode="200",prog="software_errors.mtail"} 2
#
histogram webserver_latency_by_code by code buckets 0, 1, 2, 4, 8
/latency=(?P\d+)s httpcode=(?P\d+)/ {
webserver_latency_by_code [$httpcode] = $latency
}
# or if you don't need the http code label/dimension furthering the example, just use this
histogram webserver_latency buckets 0, 1, 2, 4, 8
/latency=(?P\d+)/ {
webserver_latency = $latency
}
mtail-3.0.0~rc54+git0ff5/examples/lighttpd.mtail 0000664 0000000 0000000 00000002246 14600635717 0021456 0 ustar 00root root 0000000 0000000 # Copyright 2010 Google Inc. All Rights Reserved.
# This file is available under the Apache license.
# mtail module for a lighttpd server
counter request by status
counter time_taken by status
counter bytes_out by subtotal, status
counter bytes_in by status
counter requests by proxy_cache
const ACCESSLOG_RE // +
/(?P\S+) (?P\S+) (?P\S+)/ +
/ \[(?P[^\]]+)\] "(?P\S+) (?P.+?) / +
/(?P\S+)" (?P\d+) (?P\d+) (?P\d+)/ +
/ (?P\d+) (?P\d+) "(?P[^"]+)" / +
/"(?P[^"]+)"/
# /var/log/lighttpd/access.log
getfilename() =~ /lighttpd.access.log/ {
// + ACCESSLOG_RE {
# Parse an accesslog entry.
$url == "/healthz" {
# nothing
}
otherwise {
strptime($access_time, "02/Jan/2006:15:04:05 -0700")
request[$status]++
time_taken[$status] += $time_taken
bytes_out["resp_body", $status] += $bytes_body
bytes_out["resp_header", $status] += $bytes_out - $bytes_body
bytes_in[$status] += $bytes_in
$proxied_for != "-" {
requests[$request_ip]++
}
}
}
}
mtail-3.0.0~rc54+git0ff5/examples/linecount.mtail 0000664 0000000 0000000 00000000333 14600635717 0021632 0 ustar 00root root 0000000 0000000 # Copyright 2011 Google Inc. All Rights Reserved.
# This file is available under the Apache license.
# The most basic of mtail programmes -- count the number of lines read.
counter lines_total
/$/ {
lines_total++
}
mtail-3.0.0~rc54+git0ff5/examples/mysql_slowqueries.mtail 0000664 0000000 0000000 00000005036 14600635717 0023446 0 ustar 00root root 0000000 0000000 # Copyright 2008 Google Inc. All Rights Reserved.
# This file is available under the Apache license.
# mysql-slowqueries -- mtail module tracking slow mysql queries
hidden text user
hidden text host
hidden text query_type
hidden text service
hidden gauge tmp_query_time
hidden gauge tmp_lock_time
hidden gauge partial
hidden gauge time
counter query_time by type, server, service, user
counter lock_time by type, server, service, user
counter query_time_overall_sum
counter query_time_total_count
counter lock_time_overall_sum
counter lock_time_total_count
# Example lines from the file and regex to match them:
# # User@Host: dbuser[dbuser] @ host [192.0.2.87]
const USER_HOST /^# User@Host: ([a-zA-Z]+)\[[a-zA-Z]+\] @ ([^\. ]+)/
# # Query_time: 30 Lock_time: 0 Rows_sent: 0 Rows_examined: 0
const QUERY_TIME /^# Query_time: (\d+)\s*Lock_time: (\d+)/
# UPDATE ... # outbox;
const FULL_QUERY_LINE /^(INSERT|UPDATE|DELETE|SELECT) .* # (.*);$/
# Not all queries have helpful comments at the end
const UNINSTRUMENTED_QUERY_LINE /^(INSERT|UPDATE|DELETE|SELECT) .*;$/
# If the query gets split up, the service may end up on another line
const PARTIAL_QUERY_LINE /^(INSERT|UPDATE|DELETE|SELECT) .*[^;]$/
# This one has the potential to catch too many things, so it can only be a last
# resort match.
const END_QUERY_LINE /.*;$/
/^# Time: (\d{6} .\d:\d\d:\d\d)/ {
strptime($1, "060102 3:04:05")
time = timestamp()
}
/^SET timestamp=(\d+);/ {
time = $1
}
settime(time)
// + USER_HOST {
user = $1
host = $2
}
# break if no user set yet
user == "" {
stop
}
// + QUERY_TIME {
tmp_query_time = $1
tmp_lock_time = $2
query_time_overall_sum += tmp_query_time
query_time_total_count++
lock_time_overall_sum += tmp_lock_time
lock_time_total_count++
}
// + FULL_QUERY_LINE {
# We should have everything we need now.
query_type = tolower($1)
service = $2
query_time[query_type, host, service, user] += tmp_query_time
lock_time[query_type, host, service, user] += tmp_lock_time
}
// + UNINSTRUMENTED_QUERY_LINE {
# We should have everything we need now.
query_type = tolower($1)
service = "n/a"
query_time[query_type, host, service, user] += tmp_query_time
lock_time[query_type, host, service, user] += tmp_lock_time
}
// + PARTIAL_QUERY_LINE {
query_type = tolower($1)
partial = 1
}
// + END_QUERY_LINE && partial == 1 {
partial = 0
/.*# (.*)$/ {
service = $1
}
otherwise {
service = "n/a"
}
query_time[query_type, host, service, user] += tmp_query_time
lock_time[query_type, host, service, user] += tmp_lock_time
}
mtail-3.0.0~rc54+git0ff5/examples/nocode.mtail 0000664 0000000 0000000 00000000361 14600635717 0021102 0 ustar 00root root 0000000 0000000 # This is an example mtail programme for exporting no code instrumentation
#
# No code has no instrumentation, thus requires an external program to sift
# and export metrics from other sources; in this case with mtail from any log
# files.
mtail-3.0.0~rc54+git0ff5/examples/ntpd.mtail 0000664 0000000 0000000 00000003171 14600635717 0020602 0 ustar 00root root 0000000 0000000 # Copyright 2008 Google Inc. All Rights Reserved.
# This file is available under the Apache license.
# Syslog decorator
def syslog {
/^(?P(?P\w+\s+\d+\s+\d+:\d+:\d+)|(?P\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d+[+-]\d{2}:\d{2}))/ +
/\s+(?:\w+@)?(?P[\w\.-]+)\s+(?P[\w\.-]+)(?:\[(?P\d+)\])?:\s+(?P.*)/ {
len($legacy_date) > 0 {
strptime($2, "Jan _2 15:04:05")
}
len($rfc3339_date) > 0 {
strptime($rfc3339_date, "2006-01-02T03:04:05-0700")
}
next
}
}
@syslog {
counter int_syscalls
/select\(.*\) error: Interrupted system call/ {
int_syscalls++
}
counter recvbuf_overflows
gauge last_recvbuf
/too many recvbufs allocated \((\d+)\)/ {
recvbuf_overflows++
last_recvbuf = $1
}
counter exits
/ntpd exiting on signal 15/ {
exits++
}
counter starts
/x?ntpd .* \w+\s+\w+\s+\d+\s+\d+:\d+:\d+\s+\w+\s+\d+\s+\(\d\)/ {
starts++
}
gauge sync_status
/kernel time sync (status (change)?|enabled|disabled) (?P\d+)/ {
sync_status = $status
}
# PLL status change.
#
# Described here: http://obswww.unige.ch/~bartho/xntp_faq/faq3Care.htm#araee
counter pll_changes
gauge pll_status
/kernel pll status change (?P\d+)/ {
pll_changes++
pll_status = $status
}
counter peer_syncs
/synchronized to (\d+\.\d+\.\d+\.\d+|LOCAL\(\d\)), stratum(=| )(\d+)/ {
peer_syncs++
}
counter driftfile_errors
/can't open .*drift.*: No such file or directory/ {
driftfile_errors++
}
counter sync_lost_total
/synchronisation lost/ {
sync_lost_total++
}
} # end syslog
mtail-3.0.0~rc54+git0ff5/examples/ntpd_peerstats.mtail 0000664 0000000 0000000 00000002015 14600635717 0022670 0 ustar 00root root 0000000 0000000 # Peerstats log handling
gauge peer_status by peer
gauge peer_select by peer
gauge peer_count by peer
gauge peer_code by peer
gauge peer_offset by peer
gauge peer_delay by peer
gauge peer_dispersion by peer
counter num_peerstats by peer
# TODO(jaq) seconds is int, not float
/^(?P\d+) (?P\d+)\.\d+ (?P\d+\.\d+\.\d+\.\d+) (?P[0-9a-f]+) (?P-?\d+\.\d+) (?P\d+\.\d+) (?P\d+\.\d+)/ {
# Unix epoch in MJD is 40587.
settime(($days - 40587) * 86400 + $seconds)
peer_offset[$peer] = $offset
peer_delay[$peer] = $delay
peer_dispersion[$peer] = $dispersion
# http://www.cis.udel.edu/~mills/ntp/html/decode.html#peer
# bits 0-4
peer_status[$peer] = (strtol($status, 16) >> (16 - 5)) & ((2 ** 5) - 1)
# bits 5-7
peer_select[$peer] = (strtol($status, 16) >> (16 - 8)) & ((2 ** 3) - 1)
# bits 6-11
peer_count[$peer] = (strtol($status, 16) >> (16 - 12)) & ((2 ** 4) - 1)
# bits 12-15
peer_code[$peer] = strtol($status, 16) & ((2 ** 4) - 1)
num_peerstats[$peer]++
}
mtail-3.0.0~rc54+git0ff5/examples/postfix.mtail 0000664 0000000 0000000 00000014616 14600635717 0021337 0 ustar 00root root 0000000 0000000 # vim:ts=2:sw=2:et:ai:sts=2:cinoptions=(0
# Copyright 2017 Martina Ferrari . All Rights Reserved.
# This file is available under the Apache license.
# Syslog parser for Postfix, based on the parsing rules from:
# https://github.com/kumina/postfix_exporter
# Copyright 2017 Kumina, https://kumina.nl/
# Available under the Apache license.
const DELIVERY_DELAY_LINE /.*, relay=(?P\S+), .*,/ +
/ delays=(?P[0-9\.]+)\/(?P[0-9\.]+)\/(?P[0-9\.]+)\/(?P[0-9\.]+),\s/
const SMTP_TLS_LINE /(\S+) TLS connection established to \S+: (\S+) with cipher (\S+) \((\d+)\/(\d+) bits\)/
const SMTPD_TLS_LINE /(\S+) TLS connection established from \S+: (\S+) with cipher (\S+) \((\d+)\/(\d+) bits\)/
const QMGR_INSERT_LINE /:.*, size=(?P\d+), nrcpt=(?P\d+)/
const QMGR_REMOVE_LINE /: removed$/
/^(?P(?P\w+\s+\d+\s+\d+:\d+:\d+)|(?P\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d+[+-]\d{2}:\d{2}))/ +
/\s+(?:\w+@)?(?P[\w\.-]+)\s+postfix\/(?P[\w\.\/-]+)(?:\[(?P\d+)\])?:\s+(?P.*)/ {
len($legacy_date) > 0 {
strptime($2, "Jan _2 15:04:05")
}
len($rfc3339_date) > 0 {
strptime($rfc3339_date, "2006-01-02T03:04:05-0700")
}
# Total number of messages processed by cleanup.
counter postfix_cleanup_messages_processed_total
# Total number of messages rejected by cleanup.
counter postfix_cleanup_messages_rejected_total
$application == "cleanup" {
/: message-id= {
postfix_cleanup_messages_processed_total++
}
/: reject: / {
postfix_cleanup_messages_rejected_total++
}
}
# LMTP message processing time in seconds.
histogram postfix_lmtp_delivery_delay_seconds by stage buckets 0.001, 0.01, 0.1, 10, 1e2, 1e3
# buckets: 1e-3, 1e-2, 1e-1, 1e0, 1e1, 1e2, 1e3
$application == "lmtp" {
// + DELIVERY_DELAY_LINE {
# 1st field: before_queue_manager
postfix_lmtp_delivery_delay_seconds["before_queue_manager"] = $bqm
# 2nd field: queue_manager
postfix_lmtp_delivery_delay_seconds["queue_manager"] = $qm
# 3rd field: connection_setup
postfix_lmtp_delivery_delay_seconds["connection_setup"] = $cs
# 4th field: transmission
postfix_lmtp_delivery_delay_seconds["transmission"] = $tx
}
}
# Pipe message processing time in seconds.
histogram postfix_pipe_delivery_delay_seconds by relay, stage buckets 0.001, 0.01, 0.1, 1, 10, 100, 1e3
# buckets: 1e-3, 1e-2, 1e-1, 1e0, 1e1, 1e2, 1e3
$application == "pipe" {
// + DELIVERY_DELAY_LINE {
# 1st field: before_queue_manager
postfix_pipe_delivery_delay_seconds[$relay]["before_queue_manager"] = $bqm
# 2nd field: queue_manager
postfix_pipe_delivery_delay_seconds[$relay]["queue_manager"] = $qm
# 3rd field: connection_setup
postfix_pipe_delivery_delay_seconds[$relay]["connection_setup"] = $cs
# 4th field: transmission
postfix_pipe_delivery_delay_seconds[$relay]["transmission"] = $tx
}
}
# Number of recipients per message inserted into the mail queues.
histogram postfix_qmgr_messages_inserted_recipients buckets 1, 2, 4, 7, 16, 32, 64, 128
# buckets: 1, 2, 4, 8, 16, 32, 64, 128
# Size of messages inserted into the mail queues in bytes.
histogram postfix_qmgr_messages_inserted_size_bytes buckets 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9
# buckets: 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9
# Total number of messages removed from mail queues.
counter postfix_qmgr_messages_removed_total
$application == "qmgr" {
// + QMGR_INSERT_LINE {
postfix_qmgr_messages_inserted_recipients = $nrcpt
postfix_qmgr_messages_inserted_size_bytes = $size
}
// + QMGR_REMOVE_LINE {
postfix_qmgr_messages_removed_total++
}
}
# SMTP message processing time in seconds.
histogram postfix_smtp_delivery_delay_seconds by stage buckets 1e-3, 1e-2, 1e-1, 1e0, 1e1, 1e2, 1e3
# buckets: 1e-3, 1e-2, 1e-1, 1e0, 1e1, 1e2, 1e3
# Total number of outgoing TLS connections.
counter postfix_smtp_tls_connections_total by trust, protocol, cipher, secret_bits, algorithm_bits
$application == "smtp" {
// + DELIVERY_DELAY_LINE {
# 1st field: before_queue_manager
postfix_smtp_delivery_delay_seconds["before_queue_manager"] = $bqm
# 2nd field: queue_manager
postfix_smtp_delivery_delay_seconds["queue_manager"] = $qm
# 3rd field: connection_setup
postfix_smtp_delivery_delay_seconds["connection_setup"] = $cs
# 4th field: transmission
postfix_smtp_delivery_delay_seconds["transmission"] = $tx
}
// + SMTP_TLS_LINE {
postfix_smtp_tls_connections_total[$1][$2][$3][$4][$5]++
}
}
# Total number of incoming connections.
counter postfix_smtpd_connects_total
# Total number of incoming disconnections.
counter postfix_smtpd_disconnects_total
# Total number of connections for which forward-confirmed DNS cannot be resolved.
counter postfix_smtpd_forward_confirmed_reverse_dns_errors_total
# Total number of connections lost.
counter postfix_smtpd_connections_lost_total by after_stage
# Total number of messages processed.
counter postfix_smtpd_messages_processed_total by sasl_username
# Total number of NOQUEUE rejects.
counter postfix_smtpd_messages_rejected_total by code
# Total number of SASL authentication failures.
counter postfix_smtpd_sasl_authentication_failures_total
# Total number of incoming TLS connections.
counter postfix_smtpd_tls_connections_total by trust, protocol, cipher, secret_bits, algorithm_bits
$application =~ /smtpd/ {
/ connect from / {
postfix_smtpd_connects_total++
}
/ disconnect from / {
postfix_smtpd_disconnects_total++
}
/ warning: hostname \S+ does not resolve to address / {
postfix_smtpd_forward_confirmed_reverse_dns_errors_total++
}
/ lost connection after (\w+) from / {
postfix_smtpd_connections_lost_total[$1]++
}
/: client=/ {
/, sasl_username=(\S+)/ {
postfix_smtpd_messages_processed_total[$1]++
} else {
postfix_smtpd_messages_processed_total[""]++
}
}
/NOQUEUE: reject: RCPT from \S+: (\d+) / {
postfix_smtpd_messages_rejected_total[$1]++
}
/warning: \S+: SASL \S+ authentication failed: / {
postfix_smtpd_sasl_authentication_failures_total++
}
// + SMTPD_TLS_LINE {
postfix_smtpd_tls_connections_total[$1][$2][$3][$4][$5]++
}
}
}
mtail-3.0.0~rc54+git0ff5/examples/rails.mtail 0000664 0000000 0000000 00000002167 14600635717 0020753 0 ustar 00root root 0000000 0000000 # Copyright 2017 Pablo Carranza . All Rights Reserved.
# This file is available under the Apache license.
#
# Rails production log parsing
counter rails_requests_started_total
counter rails_requests_started by verb
counter rails_requests_completed_total
counter rails_requests_completed by status
histogram rails_requests_completed_seconds by status buckets 0.005, 0.01, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 15.0
/^Started (?P[A-Z]+) .*/ {
###
# Started HTTP requests by verb (GET, POST, etc.)
#
rails_requests_started_total++
rails_requests_started[$verb]++
}
/^Completed (?P\d{3}) .+ in (?P\d+)ms .*$/ {
###
# Total numer of completed requests by status
#
rails_requests_completed_total++
rails_requests_completed[$status]++
###
# Completed requests by status with histogram buckets
#
# These statements "fall through", so the histogram is cumulative. The
# collecting system can compute the percentile bands by taking the ratio of
# each bucket value over the final bucket.
rails_requests_completed_seconds[$status] = $request_seconds / 1000.0
}
mtail-3.0.0~rc54+git0ff5/examples/rsyncd.mtail 0000664 0000000 0000000 00000003221 14600635717 0021133 0 ustar 00root root 0000000 0000000 # Copyright 2011 Google Inc. All Rights Reserved.
# This file is available under the Apache license.
counter bytes_total by operation
# total connections, and total connection time can be used to compute the
# average connection time.
counter connections_total
counter connection_time_total as "connection-time_total"
# See which modules are popular.
counter transfers_total by operation, module
# Use this gauge to measure duration between start and end time per connection.
# It is never used externally, so mark as `hidden'.
hidden gauge connection_time by pid
/^(?P\d+\/\d+\/\d+ \d+:\d+:\d+) \[(?P\d+)\] / {
strptime($date, "2006/01/02 15:04:05")
# Transfer log
# %o %h [%a] %m (%u) %f %l
/(?P\S+) (\S+) \[\S+\] (?P\S+) \(\S*\) \S+ (?P\d+)/ {
transfers_total[$operation, $module]++
}
# Connection starts
/connect from \S+ \(\d+\.\d+\.\d+\.\d+\)/ {
connections_total++
# Record the start time of the connection, using the log timestamp.
connection_time[$pid] = timestamp()
}
# Connection summary when session closed
/sent (?P\d+) bytes received (?P\d+) bytes total size \d+/ {
# Sum total bytes across all sessions for this process
bytes_total["sent"] += $sent
bytes_total["received"] += $received
# Count total time spent with connections open, according to the log timestamp.
connection_time_total += timestamp() - connection_time[$pid]
# Delete the datum referenced in this dimensional metric. We assume that
# this will never happen again, and hint to the VM that we can garbage
# collect the memory used.
del connection_time[$pid]
}
}
mtail-3.0.0~rc54+git0ff5/examples/sftp.mtail 0000664 0000000 0000000 00000002246 14600635717 0020613 0 ustar 00root root 0000000 0000000 # Copyright 2008 Google Inc. All Rights Reserved.
# This file is available under the Apache license.
counter login_count by username
counter logout_count by username
counter bytes_read
counter files_read
counter bytes_written
counter files_written
counter user_bytes_read by username
counter user_files_read by username
counter user_bytes_written by username
counter user_files_written by username
/^(?P\w+\s+\d+\s+\d+:\d+:\d+)\s+[\w\.-]+\s+sftp-server/ {
strptime($date, "Jan _2 15:04:05")
/session opened for local user (?P\w+)/ {
login_count[$username]++
}
/session closed for local user (?P\w+)/ {
logout_count[$username]++
}
/close "[^"]+" bytes read (?P\d+) written (?P\d+)/ {
$read != 0 {
bytes_read += $read
files_read++
}
$written != 0 {
bytes_written += $written
files_written++
}
/close "\/home\/(?P[^\/]+)\/[^"]+"/ {
$read != 0 {
user_bytes_read[$username] += $read
user_files_read[$username]++
}
$written != 0 {
user_bytes_written[$username] += $written
user_files_written[$username]++
}
}
}
}
mtail-3.0.0~rc54+git0ff5/examples/timer.mtail 0000664 0000000 0000000 00000000336 14600635717 0020755 0 ustar 00root root 0000000 0000000 # `timer` is the same as gauge but has special meaning for statsd export.
# Otherwise just use a gauge.
timer request_time_ms by vhost
/(?P\S+) (?P\d+)/ {
request_time_ms[$vhost] = $latency_s / 1000
}
mtail-3.0.0~rc54+git0ff5/examples/vsftpd.mtail 0000664 0000000 0000000 00000005410 14600635717 0021141 0 ustar 00root root 0000000 0000000 # Copyright 2011 Google Inc. All Rights Reserved.
# This file is available under the Apache license.
# A mtail module for monitoring vsftpd logs
#
# Configure your vsftpd to write the xferlog as well as vsftpd.log
hidden text direction
counter bytes_transferred by direction
counter transfer_time by direction
counter transfers by direction
counter connects
counter logins
counter uploads
counter commands by command
counter responses by response
hidden gauge sessions by client
counter session_time
def vsftpd_timestamp {
# Mon Feb 21 15:21:32 2011
/^\w+\s(?P\w+\s+\d+\s\d+:\d+:\d+\s\d+)/ {
strptime($date, "Jan _2 15:04:05 2006")
next
}
}
const XFERLOG_RE // +
# e.g. 1 172.18.115.36 528
# time spent transferring
/\s(?P\d+)/ +
# remote host
/\s\d+\.\d+\.\d+\.\d+/ +
# bytes transferred
/\s(?P\d+)/ +
# filename
/\s(?P\S+)/ +
# e.g. b _ i a anonymous@ ftp 0 * c
# transfertype
/\s\S/ +
# special action flag
/\s\S/ +
# direction
/\s(?P\S)/ +
# access mode
/\s\S/ +
# username
/\s\S+/ +
# service name
/\s\S+/ +
# authentication method
/\s\d/ +
# authenticated id
/\s\S+/ +
# completion status
/\s(?P\S)/
const VSFTPD_LOG_RE // +
/ \[pid \d+\]/ +
/( \[\w+\])?/ +
/ (?PCONNECT|OK LOGIN|OK UPLOAD|FTP (command|response)):/ +
/ Client "(?P\d+\.\d+\.\d+\.\d+)"/ +
/(, (?P.*))?/
const PAYLOAD_RESPONSE_RE /^"(\d{3})[" -]/
const PAYLOAD_COMMAND_RE /^"(\w{4})[" -]/
@vsftpd_timestamp {
getfilename() =~ /xferlog/ {
// + XFERLOG_RE {
# Handles log entries from the wuftpd format xferlog.
$direction == "i" {
direction = "incoming"
}
$direction == "o" {
direction = "outgoing"
}
$completionstatus == "c" {
transfers[direction]++
}
transfer_time[direction] += $transfertime
bytes_transferred[direction] += $bytestransferred
}
}
getfilename() =~ /vsftpd.log/ {
// + VSFTPD_LOG_RE {
# Handle vsftpd.log log file."""
$command == "CONNECT" {
sessions[$client] = timestamp()
del sessions[$client] after 168h
connects++
}
$command == "OK LOGIN" {
logins++
}
$command == "OK UPLOAD" {
uploads++
}
$command == "FTP command" {
$payload =~ // + PAYLOAD_COMMAND_RE {
commands[$1]++
$1 == "QUIT" {
session_time += timestamp() - sessions[$client]
del sessions[$client]
}
}
}
$command == "FTP response" {
$payload =~ // + PAYLOAD_RESPONSE_RE {
responses[$1]++
}
}
}
}
}
mtail-3.0.0~rc54+git0ff5/go.mod 0000664 0000000 0000000 00000001773 14600635717 0016103 0 ustar 00root root 0000000 0000000 module github.com/google/mtail
go 1.21.1
require (
contrib.go.opencensus.io/exporter/jaeger v0.2.1
github.com/golang/glog v1.2.0
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da
github.com/google/go-cmp v0.6.0
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.19.0
github.com/prometheus/common v0.51.1
go.opencensus.io v0.24.0
golang.org/x/sys v0.18.0
)
require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/prometheus/client_model v0.6.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
github.com/uber/jaeger-client-go v2.25.0+incompatible // indirect
golang.org/x/sync v0.3.0 // indirect
google.golang.org/api v0.105.0 // indirect
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
google.golang.org/grpc v1.56.3 // indirect
google.golang.org/protobuf v1.33.0 // indirect
)
mtail-3.0.0~rc54+git0ff5/go.sum 0000664 0000000 0000000 00000103371 14600635717 0016125 0 ustar 00root root 0000000 0000000 cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
contrib.go.opencensus.io/exporter/jaeger v0.2.1 h1:yGBYzYMewVL0yO9qqJv3Z5+IRhPdU7e9o/2oKpX4YvI=
contrib.go.opencensus.io/exporter/jaeger v0.2.1/go.mod h1:Y8IsLgdxqh1QxYxPC5IgXVmBaeLUeQFfBeBi9PbeZd0=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68=
github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU=
github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos=
github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8=
github.com/prometheus/common v0.51.1 h1:eIjN50Bwglz6a/c3hAgSMcofL3nD+nFQkV6Dd4DsQCw=
github.com/prometheus/common v0.51.1/go.mod h1:lrWtQx+iDfn2mbH5GUzlH9TSHyfZpHkSiG1W7y3sF2Q=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/uber/jaeger-client-go v2.25.0+incompatible h1:IxcNZ7WRY1Y3G4poYlx24szfsn/3LvK9QHCq9oQw8+U=
github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc=
golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.105.0 h1:t6P9Jj+6XTn4U9I2wycQai6Q/Kz7iOT+QzjJ3G2V4x8=
google.golang.org/api v0.105.0/go.mod h1:qh7eD5FJks5+BcE+cjBIm6Gz8vioK7EHvnlniqXBnqI=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A=
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc=
google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
mtail-3.0.0~rc54+git0ff5/hooks/ 0000775 0000000 0000000 00000000000 14600635717 0016110 5 ustar 00root root 0000000 0000000 mtail-3.0.0~rc54+git0ff5/hooks/build 0000775 0000000 0000000 00000000660 14600635717 0017137 0 ustar 00root root 0000000 0000000 #!/bin/bash
# $IMAGE_NAME var is injected into the build so the tag is correct.
echo "Build hook running"
docker build \
--build-arg version=$(git describe --tags --always) \
--build-arg commit_hash=$(git rev-parse HEAD) \
--build-arg vcs_url=$(git config --get remote.origin.url) \
--build-arg vcs_branch=$(git rev-parse --abbrev-ref HEAD) \
--build-arg build_date=$(date -u +"%Y-%m-%dT%H:%M:%SZ") \
-t $IMAGE_NAME .
mtail-3.0.0~rc54+git0ff5/hooks/post_checkout 0000775 0000000 0000000 00000000152 14600635717 0020706 0 ustar 00root root 0000000 0000000 #!/bin/bash
echo "Unshallowing to get correct tags to work."
git fetch --tags --unshallow --quiet origin
mtail-3.0.0~rc54+git0ff5/hooks/post_push 0000775 0000000 0000000 00000002105 14600635717 0020060 0 ustar 00root root 0000000 0000000 #!/bin/bash
# hooks/post_push
# https://docs.docker.com/docker-cloud/builds/advanced/
# https://semver.org/
function add_tag() {
echo "Adding tag ${1}"
docker tag $IMAGE_NAME $DOCKER_REPO:$1
docker push $DOCKER_REPO:$1
}
TAG=`git describe --tag --match "v*"`
MAJOR=`echo ${TAG} | awk -F'-' '{print $1}' | awk -F'.' '{print $1}' | sed 's/v//'`
MINOR=`echo ${TAG} | awk -F'-' '{print $1}' | awk -F'.' '{print $2}' | sed 's/v//'`
PATCH=`echo ${TAG} | awk -F'-' '{print $1}' | awk -F'.' '{print $3}' | sed 's/v//'`
PRLS=`echo ${TAG} | awk -F'-' '{print $2}'`
num='^[0-9]+$'
pre='^[0-9A-Za-z\.]+$'
echo "Current Build: ${TAG}"
if [ ! -z $MAJOR ] && [[ $MAJOR =~ $num ]]; then
add_tag ${MAJOR}
if [ ! -z $MINOR ] && [[ $MINOR =~ $num ]]; then
add_tag ${MAJOR}.${MINOR}
if [ ! -z $PATCH ] && [[ $PATCH =~ $num ]]; then
add_tag ${MAJOR}.${MINOR}.${PATCH}
if [ ! -z $PRLS ] && [[ ! $PRLS =~ $num ]] && [[ $PRLS =~ $pre ]]; then
add_tag ${MAJOR}.${MINOR}.${PATCH}-${PRLS}
fi
fi
fi
fi
exit $?
mtail-3.0.0~rc54+git0ff5/internal/ 0000775 0000000 0000000 00000000000 14600635717 0016601 5 ustar 00root root 0000000 0000000 mtail-3.0.0~rc54+git0ff5/internal/exporter/ 0000775 0000000 0000000 00000000000 14600635717 0020451 5 ustar 00root root 0000000 0000000 mtail-3.0.0~rc54+git0ff5/internal/exporter/collectd.go 0000664 0000000 0000000 00000002515 14600635717 0022574 0 ustar 00root root 0000000 0000000 // Copyright 2011 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package exporter
import (
"expvar"
"flag"
"fmt"
"strings"
"time"
"github.com/google/mtail/internal/metrics"
)
const (
// See https://collectd.org/wiki/index.php/Plain_text_protocol#PUTVAL
collectdFormat = "PUTVAL \"%s/%smtail-%s/%s-%s\" interval=%d %s:%s\n"
)
var (
collectdSocketPath = flag.String("collectd_socketpath", "",
"Path to collectd unixsock to write metrics to.")
collectdPrefix = flag.String("collectd_prefix", "",
"Prefix to use for collectd metrics.")
collectdExportTotal = expvar.NewInt("collectd_export_total")
collectdExportSuccess = expvar.NewInt("collectd_export_success")
)
// metricToCollectd encodes the metric data in the collectd text protocol format. The
// metric lock is held before entering this function.
func metricToCollectd(hostname string, m *metrics.Metric, l *metrics.LabelSet, interval time.Duration) string {
return fmt.Sprintf(collectdFormat,
hostname,
*collectdPrefix,
m.Program,
kindToCollectdType(m.Kind),
formatLabels(m.Name, l.Labels, "-", "-", "_"),
int64(interval.Seconds()),
l.Datum.TimeString(),
l.Datum.ValueString())
}
func kindToCollectdType(kind metrics.Kind) string {
if kind != metrics.Timer {
return strings.ToLower(kind.String())
}
return "gauge"
}
mtail-3.0.0~rc54+git0ff5/internal/exporter/export.go 0000664 0000000 0000000 00000015526 14600635717 0022332 0 ustar 00root root 0000000 0000000 // Copyright 2011 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
// Package exporter provides the interface for getting metrics out of mtail,
// into your monitoring system of choice.
package exporter
import (
"context"
"expvar"
"flag"
"fmt"
"io"
"net"
"os"
"sort"
"strings"
"sync"
"time"
"github.com/golang/glog"
"github.com/google/mtail/internal/metrics"
"github.com/pkg/errors"
)
// Commandline Flags.
var (
writeDeadline = flag.Duration("metric_push_write_deadline", 10*time.Second, "Time to wait for a push to succeed before exiting with an error.")
)
// Exporter manages the export of metrics to passive and active collectors.
type Exporter struct {
ctx context.Context
wg sync.WaitGroup
store *metrics.Store
pushInterval time.Duration
hostname string
omitProgLabel bool
emitTimestamp bool
exportDisabled bool
pushTargets []pushOptions
initDone chan struct{}
}
// Option configures a new Exporter.
type Option func(*Exporter) error
// Hostname specifies the mtail hostname to use in exported metrics.
func Hostname(hostname string) Option {
return func(e *Exporter) error {
e.hostname = hostname
return nil
}
}
// OmitProgLabel sets the Exporter to not put program names in metric labels.
func OmitProgLabel() Option {
return func(e *Exporter) error {
e.omitProgLabel = true
return nil
}
}
// EmitTimestamp instructs the exporter to send metric's timestamps to collectors.
func EmitTimestamp() Option {
return func(e *Exporter) error {
e.emitTimestamp = true
return nil
}
}
func PushInterval(opt time.Duration) Option {
return func(e *Exporter) error {
e.pushInterval = opt
return nil
}
}
func DisableExport() Option {
return func(e *Exporter) error {
e.exportDisabled = true
return nil
}
}
var (
ErrNeedsStore = errors.New("exporter needs a Store")
ErrNeedsWaitgroup = errors.New("exporter needs a WaitGroup")
)
// New creates a new Exporter.
func New(ctx context.Context, wg *sync.WaitGroup, store *metrics.Store, options ...Option) (*Exporter, error) {
if store == nil {
return nil, ErrNeedsStore
}
if wg == nil {
return nil, ErrNeedsWaitgroup
}
e := &Exporter{
ctx: ctx,
store: store,
initDone: make(chan struct{}),
}
defer close(e.initDone)
if err := e.SetOption(options...); err != nil {
return nil, err
}
// defaults after options have been set
if e.hostname == "" {
var err error
e.hostname, err = os.Hostname()
if err != nil {
return nil, errors.Wrap(err, "getting hostname")
}
}
if *collectdSocketPath != "" {
o := pushOptions{"unix", *collectdSocketPath, metricToCollectd, collectdExportTotal, collectdExportSuccess}
e.RegisterPushExport(o)
}
if *graphiteHostPort != "" {
o := pushOptions{"tcp", *graphiteHostPort, metricToGraphite, graphiteExportTotal, graphiteExportSuccess}
e.RegisterPushExport(o)
}
if *statsdHostPort != "" {
o := pushOptions{"udp", *statsdHostPort, metricToStatsd, statsdExportTotal, statsdExportSuccess}
e.RegisterPushExport(o)
}
e.StartMetricPush()
wg.Add(1)
// This routine manages shutdown of the Exporter.
go func() {
defer wg.Done()
<-e.initDone
// Wait for the context to be completed before waiting for subroutines.
if !e.exportDisabled {
<-e.ctx.Done()
}
e.wg.Wait()
}()
return e, nil
}
// SetOption takes one or more option functions and applies them in order to Exporter.
func (e *Exporter) SetOption(options ...Option) error {
for _, option := range options {
if err := option(e); err != nil {
return err
}
}
return nil
}
// formatLabels converts a metric name and key-value map of labels to a single
// string for exporting to the correct output format for each export target.
// ksep and sep mark what to use for key/val separator, and between label separators respoectively.
// If not empty, rep is used to replace cases of ksep and sep in the original strings.
func formatLabels(name string, m map[string]string, ksep, sep, rep string) string {
r := name
if len(m) > 0 {
var keys []string
for k := range m {
keys = append(keys, k)
}
sort.Strings(keys)
var s []string
for _, k := range keys {
k1 := strings.ReplaceAll(strings.ReplaceAll(k, ksep, rep), sep, rep)
v1 := strings.ReplaceAll(strings.ReplaceAll(m[k], ksep, rep), sep, rep)
s = append(s, fmt.Sprintf("%s%s%s", k1, ksep, v1))
}
return r + sep + strings.Join(s, sep)
}
return r
}
// Format a LabelSet into a string to be written to one of the timeseries
// sockets.
type formatter func(string, *metrics.Metric, *metrics.LabelSet, time.Duration) string
func (e *Exporter) writeSocketMetrics(c io.Writer, f formatter, exportTotal *expvar.Int, exportSuccess *expvar.Int) error {
return e.store.Range(func(m *metrics.Metric) error {
m.RLock()
// Don't try to send text metrics to any push service.
if m.Kind == metrics.Text {
m.RUnlock()
return nil
}
exportTotal.Add(1)
lc := make(chan *metrics.LabelSet)
go m.EmitLabelSets(lc)
for l := range lc {
line := f(e.hostname, m, l, e.pushInterval)
n, err := fmt.Fprint(c, line)
glog.V(2).Infof("Sent %d bytes\n", n)
if err == nil {
exportSuccess.Add(1)
} else {
return errors.Errorf("write error: %s", err)
}
}
m.RUnlock()
return nil
})
}
// PushMetrics sends metrics to each of the configured services.
func (e *Exporter) PushMetrics() {
for _, target := range e.pushTargets {
glog.V(2).Infof("pushing to %s", target.addr)
conn, err := net.DialTimeout(target.net, target.addr, *writeDeadline)
if err != nil {
glog.Infof("pusher dial error: %s", err)
continue
}
err = conn.SetDeadline(time.Now().Add(*writeDeadline))
if err != nil {
glog.Infof("Couldn't set deadline on connection: %s", err)
}
err = e.writeSocketMetrics(conn, target.f, target.total, target.success)
if err != nil {
glog.Infof("pusher write error: %s", err)
}
err = conn.Close()
if err != nil {
glog.Infof("connection close failed: %s", err)
}
}
}
// StartMetricPush pushes metrics to the configured services each interval.
func (e *Exporter) StartMetricPush() {
if e.exportDisabled {
glog.Info("Export loop disabled.")
return
}
if len(e.pushTargets) == 0 {
return
}
if e.pushInterval <= 0 {
return
}
e.wg.Add(1)
go func() {
defer e.wg.Done()
<-e.initDone
glog.Info("Started metric push.")
ticker := time.NewTicker(e.pushInterval)
defer ticker.Stop()
for {
select {
case <-e.ctx.Done():
return
case <-ticker.C:
e.PushMetrics()
}
}
}()
}
type pushOptions struct {
net, addr string
f formatter
total, success *expvar.Int
}
// RegisterPushExport adds a push export connection to the Exporter. Items in
// the list must describe a Dial()able connection and will have all the metrics
// pushed to each pushInterval.
func (e *Exporter) RegisterPushExport(p pushOptions) {
e.pushTargets = append(e.pushTargets, p)
}
mtail-3.0.0~rc54+git0ff5/internal/exporter/export_test.go 0000664 0000000 0000000 00000016340 14600635717 0023364 0 ustar 00root root 0000000 0000000 // Copyright 2011 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package exporter
import (
"context"
"errors"
"reflect"
"sort"
"strings"
"sync"
"testing"
"time"
"github.com/google/mtail/internal/metrics"
"github.com/google/mtail/internal/metrics/datum"
"github.com/google/mtail/internal/testutil"
)
const prefix = "prefix"
func TestCreateExporter(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
var wg sync.WaitGroup
store := metrics.NewStore()
_, err := New(ctx, &wg, store)
if err != nil {
t.Errorf("New(ctx, wg, store) unexpected error: %v", err)
}
cancel()
wg.Wait()
ctx, cancel = context.WithCancel(context.Background())
failopt := func(*Exporter) error {
return errors.New("busted") // nolint:goerr113
}
_, err = New(ctx, &wg, store, failopt)
if err == nil {
t.Errorf("unexpected success")
}
cancel()
wg.Wait()
}
func TestNewErrors(t *testing.T) {
ctx := context.Background()
store := metrics.NewStore()
var wg sync.WaitGroup
_, err := New(ctx, nil, store)
if err == nil {
t.Error("New(ctx, nil, store) expecting error, received nil")
}
_, err = New(ctx, &wg, nil)
if err == nil {
t.Error("New(ctx, wg, nil) expecting error, received nil")
}
}
func FakeSocketWrite(f formatter, m *metrics.Metric) []string {
ret := make([]string, 0)
lc := make(chan *metrics.LabelSet)
d := 60 * time.Second
go m.EmitLabelSets(lc)
for l := range lc {
ret = append(ret, f("gunstar", m, l, d))
}
sort.Strings(ret)
return ret
}
func TestMetricToCollectd(t *testing.T) {
*collectdPrefix = ""
ts, terr := time.Parse("2006/01/02 15:04:05", "2012/07/24 10:14:00")
if terr != nil {
t.Errorf("time parse error: %s", terr)
}
ms := metrics.NewStore()
scalarMetric := metrics.NewMetric("foo", "prog", metrics.Counter, metrics.Int)
d, _ := scalarMetric.GetDatum()
datum.SetInt(d, 37, ts)
testutil.FatalIfErr(t, ms.Add(scalarMetric))
r := FakeSocketWrite(metricToCollectd, scalarMetric)
expected := []string{"PUTVAL \"gunstar/mtail-prog/counter-foo\" interval=60 1343124840:37\n"}
testutil.ExpectNoDiff(t, expected, r)
dimensionedMetric := metrics.NewMetric("bar", "prog", metrics.Gauge, metrics.Int, "label")
d, _ = dimensionedMetric.GetDatum("quux")
datum.SetInt(d, 37, ts)
d, _ = dimensionedMetric.GetDatum("snuh")
datum.SetInt(d, 37, ts)
ms.ClearMetrics()
testutil.FatalIfErr(t, ms.Add(dimensionedMetric))
r = FakeSocketWrite(metricToCollectd, dimensionedMetric)
expected = []string{
"PUTVAL \"gunstar/mtail-prog/gauge-bar-label-quux\" interval=60 1343124840:37\n",
"PUTVAL \"gunstar/mtail-prog/gauge-bar-label-snuh\" interval=60 1343124840:37\n",
}
testutil.ExpectNoDiff(t, expected, r)
timingMetric := metrics.NewMetric("foo", "prog", metrics.Timer, metrics.Int)
d, _ = timingMetric.GetDatum()
datum.SetInt(d, 123, ts)
testutil.FatalIfErr(t, ms.Add(timingMetric))
r = FakeSocketWrite(metricToCollectd, timingMetric)
expected = []string{"PUTVAL \"gunstar/mtail-prog/gauge-foo\" interval=60 1343124840:123\n"}
testutil.ExpectNoDiff(t, expected, r)
*collectdPrefix = prefix
r = FakeSocketWrite(metricToCollectd, timingMetric)
expected = []string{"PUTVAL \"gunstar/prefixmtail-prog/gauge-foo\" interval=60 1343124840:123\n"}
testutil.ExpectNoDiff(t, expected, r)
}
func TestMetricToGraphite(t *testing.T) {
*graphitePrefix = ""
ts, terr := time.Parse("2006/01/02 15:04:05", "2012/07/24 10:14:00")
if terr != nil {
t.Errorf("time parse error: %s", terr)
}
scalarMetric := metrics.NewMetric("foo", "prog", metrics.Counter, metrics.Int)
d, _ := scalarMetric.GetDatum()
datum.SetInt(d, 37, ts)
r := FakeSocketWrite(metricToGraphite, scalarMetric)
expected := []string{"prog.foo 37 1343124840\n"}
testutil.ExpectNoDiff(t, expected, r)
dimensionedMetric := metrics.NewMetric("bar", "prog", metrics.Gauge, metrics.Int, "host")
d, _ = dimensionedMetric.GetDatum("quux.com")
datum.SetInt(d, 37, ts)
d, _ = dimensionedMetric.GetDatum("snuh.teevee")
datum.SetInt(d, 37, ts)
r = FakeSocketWrite(metricToGraphite, dimensionedMetric)
expected = []string{
"prog.bar.host.quux_com 37 1343124840\n",
"prog.bar.host.snuh_teevee 37 1343124840\n",
}
testutil.ExpectNoDiff(t, expected, r)
histogramMetric := metrics.NewMetric("hist", "prog", metrics.Histogram, metrics.Buckets, "xxx")
lv := &metrics.LabelValue{Labels: []string{"bar"}, Value: datum.MakeBuckets([]datum.Range{{0, 10}, {10, 20}}, time.Unix(0, 0))}
histogramMetric.AppendLabelValue(lv)
d, _ = histogramMetric.GetDatum("bar")
datum.SetFloat(d, 1, ts)
datum.SetFloat(d, 5, ts)
datum.SetFloat(d, 15, ts)
datum.SetFloat(d, 12, ts)
datum.SetFloat(d, 19, ts)
datum.SetFloat(d, 1000, ts)
r = FakeSocketWrite(metricToGraphite, histogramMetric)
r = strings.Split(strings.TrimSuffix(r[0], "\n"), "\n")
sort.Strings(r)
expected = []string{
"prog.hist.xxx.bar 1052 1343124840",
"prog.hist.xxx.bar.bin_10 2 1343124840",
"prog.hist.xxx.bar.bin_20 3 1343124840",
"prog.hist.xxx.bar.bin_inf 1 1343124840",
"prog.hist.xxx.bar.count 6 1343124840",
}
testutil.ExpectNoDiff(t, expected, r)
*graphitePrefix = prefix
r = FakeSocketWrite(metricToGraphite, dimensionedMetric)
expected = []string{
"prefixprog.bar.host.quux_com 37 1343124840\n",
"prefixprog.bar.host.snuh_teevee 37 1343124840\n",
}
testutil.ExpectNoDiff(t, expected, r)
}
func TestMetricToStatsd(t *testing.T) {
*statsdPrefix = ""
ts, terr := time.Parse("2006/01/02 15:04:05", "2012/07/24 10:14:00")
if terr != nil {
t.Errorf("time parse error: %s", terr)
}
scalarMetric := metrics.NewMetric("foo", "prog", metrics.Counter, metrics.Int)
d, _ := scalarMetric.GetDatum()
datum.SetInt(d, 37, ts)
r := FakeSocketWrite(metricToStatsd, scalarMetric)
expected := []string{"prog.foo:37|c"}
if !reflect.DeepEqual(expected, r) {
t.Errorf("String didn't match:\n\texpected: %v\n\treceived: %v", expected, r)
}
dimensionedMetric := metrics.NewMetric("bar", "prog", metrics.Gauge, metrics.Int, "l")
d, _ = dimensionedMetric.GetDatum("quux")
datum.SetInt(d, 37, ts)
d, _ = dimensionedMetric.GetDatum("snuh")
datum.SetInt(d, 42, ts)
r = FakeSocketWrite(metricToStatsd, dimensionedMetric)
expected = []string{
"prog.bar.l.quux:37|g",
"prog.bar.l.snuh:42|g",
}
if !reflect.DeepEqual(expected, r) {
t.Errorf("String didn't match:\n\texpected: %v\n\treceived: %v", expected, r)
}
multiLabelMetric := metrics.NewMetric("bar", "prog", metrics.Gauge, metrics.Int, "c", "a", "b")
d, _ = multiLabelMetric.GetDatum("x", "z", "y")
datum.SetInt(d, 37, ts)
r = FakeSocketWrite(metricToStatsd, multiLabelMetric)
expected = []string{"prog.bar.a.z.b.y.c.x:37|g"}
if !reflect.DeepEqual(expected, r) {
t.Errorf("String didn't match:\n\texpected: %v\n\treceived: %v", expected, r)
}
timingMetric := metrics.NewMetric("foo", "prog", metrics.Timer, metrics.Int)
d, _ = timingMetric.GetDatum()
datum.SetInt(d, 37, ts)
r = FakeSocketWrite(metricToStatsd, timingMetric)
expected = []string{"prog.foo:37|ms"}
if !reflect.DeepEqual(expected, r) {
t.Errorf("String didn't match:\n\texpected: %v\n\treceived: %v", expected, r)
}
*statsdPrefix = prefix
r = FakeSocketWrite(metricToStatsd, timingMetric)
expected = []string{"prefixprog.foo:37|ms"}
if !reflect.DeepEqual(expected, r) {
t.Errorf("prefixed string didn't match:\n\texpected: %v\n\treceived: %v", expected, r)
}
}
mtail-3.0.0~rc54+git0ff5/internal/exporter/graphite.go 0000664 0000000 0000000 00000004434 14600635717 0022610 0 ustar 00root root 0000000 0000000 // Copyright 2011 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package exporter
import (
"expvar"
"flag"
"fmt"
"math"
"net/http"
"strings"
"time"
"github.com/google/mtail/internal/metrics"
"github.com/google/mtail/internal/metrics/datum"
)
var (
graphiteHostPort = flag.String("graphite_host_port", "",
"Host:port to graphite carbon server to write metrics to.")
graphitePrefix = flag.String("graphite_prefix", "",
"Prefix to use for graphite metrics.")
graphiteExportTotal = expvar.NewInt("graphite_export_total")
graphiteExportSuccess = expvar.NewInt("graphite_export_success")
)
func (e *Exporter) HandleGraphite(w http.ResponseWriter, r *http.Request) {
w.Header().Add("Content-type", "text/plain")
err := e.store.Range(func(m *metrics.Metric) error {
select {
case <-r.Context().Done():
return r.Context().Err()
default:
}
m.RLock()
graphiteExportTotal.Add(1)
lc := make(chan *metrics.LabelSet)
go m.EmitLabelSets(lc)
for l := range lc {
line := metricToGraphite(e.hostname, m, l, 0)
fmt.Fprint(w, line)
}
m.RUnlock()
return nil
})
if err != nil {
http.Error(w, fmt.Sprintf("%s", err), http.StatusInternalServerError)
}
}
// metricToGraphite encodes a metric in the graphite text protocol format. The
// metric lock is held before entering this function.
func metricToGraphite(_ string, m *metrics.Metric, l *metrics.LabelSet, _ time.Duration) string {
var b strings.Builder
if m.Kind == metrics.Histogram && m.Type == metrics.Buckets {
d := m.LabelValues[0].Value
buckets := datum.GetBuckets(d)
for r, c := range buckets.GetBuckets() {
var binName string
if math.IsInf(r.Max, 1) {
binName = "inf"
} else {
binName = fmt.Sprintf("%v", r.Max)
}
fmt.Fprintf(&b, "%s%s.%s.bin_%s %v %v\n",
*graphitePrefix,
m.Program,
formatLabels(m.Name, l.Labels, ".", ".", "_"),
binName,
c,
l.Datum.TimeString())
}
fmt.Fprintf(&b, "%s%s.%s.count %v %v\n",
*graphitePrefix,
m.Program,
formatLabels(m.Name, l.Labels, ".", ".", "_"),
buckets.GetCount(),
l.Datum.TimeString())
}
fmt.Fprintf(&b, "%s%s.%s %v %v\n",
*graphitePrefix,
m.Program,
formatLabels(m.Name, l.Labels, ".", ".", "_"),
l.Datum.ValueString(),
l.Datum.TimeString())
return b.String()
}
mtail-3.0.0~rc54+git0ff5/internal/exporter/graphite_test.go 0000664 0000000 0000000 00000003145 14600635717 0023645 0 ustar 00root root 0000000 0000000 // Copyright 2021 Adam Romanek
// This file is available under the Apache license.
package exporter
import (
"context"
"io"
"net/http"
"net/http/httptest"
"sync"
"testing"
"time"
"github.com/google/mtail/internal/metrics"
"github.com/google/mtail/internal/metrics/datum"
"github.com/google/mtail/internal/testutil"
)
var handleGraphiteTests = []struct {
name string
metrics []*metrics.Metric
expected string
}{
{
"empty",
[]*metrics.Metric{},
"",
},
{
"single",
[]*metrics.Metric{
{
Name: "foo",
Program: "test",
Kind: metrics.Counter,
LabelValues: []*metrics.LabelValue{{Labels: []string{}, Value: datum.MakeInt(1, time.Unix(0, 0))}},
},
},
"foobar.test.foo 1 0\n",
},
}
func TestHandleGraphite(t *testing.T) {
*graphitePrefix = "foobar."
for _, tc := range handleGraphiteTests {
tc := tc
t.Run(tc.name, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
var wg sync.WaitGroup
ms := metrics.NewStore()
for _, metric := range tc.metrics {
testutil.FatalIfErr(t, ms.Add(metric))
}
e, err := New(ctx, &wg, ms, Hostname("gunstar"))
testutil.FatalIfErr(t, err)
response := httptest.NewRecorder()
e.HandleGraphite(response, &http.Request{})
if response.Code != 200 {
t.Errorf("response code not 200: %d", response.Code)
}
b, err := io.ReadAll(response.Body)
if err != nil {
t.Errorf("failed to read response %s", err)
}
testutil.ExpectNoDiff(t, tc.expected, string(b), testutil.IgnoreUnexported(sync.RWMutex{}))
cancel()
wg.Wait()
})
}
}
mtail-3.0.0~rc54+git0ff5/internal/exporter/json.go 0000664 0000000 0000000 00000001436 14600635717 0021755 0 ustar 00root root 0000000 0000000 // Copyright 2015 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package exporter
import (
"encoding/json"
"expvar"
"net/http"
"github.com/golang/glog"
)
var exportJSONErrors = expvar.NewInt("exporter_json_errors")
// HandleJSON exports the metrics in JSON format via HTTP.
func (e *Exporter) HandleJSON(w http.ResponseWriter, _ *http.Request) {
b, err := json.MarshalIndent(e.store, "", " ")
if err != nil {
exportJSONErrors.Add(1)
glog.Info("error marshalling metrics into json:", err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("content-type", "application/json")
if _, err := w.Write(b); err != nil {
glog.Error(err)
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
mtail-3.0.0~rc54+git0ff5/internal/exporter/json_test.go 0000664 0000000 0000000 00000006050 14600635717 0023011 0 ustar 00root root 0000000 0000000 // Copyright 2015 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package exporter
import (
"context"
"io"
"math"
"net/http"
"net/http/httptest"
"sync"
"testing"
"time"
"github.com/google/mtail/internal/metrics"
"github.com/google/mtail/internal/metrics/datum"
"github.com/google/mtail/internal/testutil"
)
var handleJSONTests = []struct {
name string
metrics []*metrics.Metric
expected string
}{
{
"empty",
[]*metrics.Metric{},
"[]",
},
{
"single",
[]*metrics.Metric{
{
Name: "foo",
Program: "test",
Kind: metrics.Counter,
LabelValues: []*metrics.LabelValue{{Labels: []string{}, Value: datum.MakeInt(1, time.Unix(0, 0))}},
},
},
`[
{
"Name": "foo",
"Program": "test",
"Kind": 1,
"Type": 0,
"LabelValues": [
{
"Value": {
"Value": 1,
"Time": 0
}
}
]
}
]`,
},
{
"dimensioned",
[]*metrics.Metric{
{
Name: "foo",
Program: "test",
Kind: metrics.Counter,
Keys: []string{"a", "b"},
LabelValues: []*metrics.LabelValue{{Labels: []string{"1", "2"}, Value: datum.MakeInt(1, time.Unix(0, 0))}},
},
},
`[
{
"Name": "foo",
"Program": "test",
"Kind": 1,
"Type": 0,
"Keys": [
"a",
"b"
],
"LabelValues": [
{
"Labels": [
"1",
"2"
],
"Value": {
"Value": 1,
"Time": 0
}
}
]
}
]`,
},
{
"histogram",
[]*metrics.Metric{
{
Name: "foo",
Program: "test",
Kind: metrics.Histogram,
Keys: []string{"a", "b"},
LabelValues: []*metrics.LabelValue{{Labels: []string{"1", "2"}, Value: datum.MakeInt(1, time.Unix(0, 0))}},
Buckets: []datum.Range{{Min: 0, Max: math.Inf(1)}},
},
},
`[
{
"Name": "foo",
"Program": "test",
"Kind": 5,
"Type": 0,
"Keys": [
"a",
"b"
],
"LabelValues": [
{
"Labels": [
"1",
"2"
],
"Value": {
"Value": 1,
"Time": 0
}
}
],
"Buckets": [
{
"Min": "0",
"Max": "+Inf"
}
]
}
]`,
},
}
func TestHandleJSON(t *testing.T) {
for _, tc := range handleJSONTests {
tc := tc
t.Run(tc.name, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
var wg sync.WaitGroup
ms := metrics.NewStore()
for _, metric := range tc.metrics {
testutil.FatalIfErr(t, ms.Add(metric))
}
e, err := New(ctx, &wg, ms, Hostname("gunstar"))
testutil.FatalIfErr(t, err)
response := httptest.NewRecorder()
e.HandleJSON(response, &http.Request{})
if response.Code != 200 {
t.Errorf("response code not 200: %d", response.Code)
}
b, err := io.ReadAll(response.Body)
if err != nil {
t.Errorf("failed to read response: %s", err)
}
testutil.ExpectNoDiff(t, tc.expected, string(b), testutil.IgnoreUnexported(sync.RWMutex{}))
cancel()
wg.Wait()
})
}
}
mtail-3.0.0~rc54+git0ff5/internal/exporter/prometheus.go 0000664 0000000 0000000 00000006671 14600635717 0023205 0 ustar 00root root 0000000 0000000 // Copyright 2015 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package exporter
import (
"expvar"
"fmt"
"io"
"strings"
"github.com/golang/glog"
"github.com/google/mtail/internal/metrics"
"github.com/google/mtail/internal/metrics/datum"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/expfmt"
)
var metricExportTotal = expvar.NewInt("metric_export_total")
func noHyphens(s string) string {
return strings.ReplaceAll(s, "-", "_")
}
// Describe implements the prometheus.Collector interface.
func (e *Exporter) Describe(c chan<- *prometheus.Desc) {
prometheus.DescribeByCollect(e, c)
}
// Collect implements the prometheus.Collector interface.
func (e *Exporter) Collect(c chan<- prometheus.Metric) {
lastMetric := ""
lastSource := ""
/* #nosec G104 always retursn nil */
e.store.Range(func(m *metrics.Metric) error {
m.RLock()
// We don't have a way of converting text metrics to prometheus format.
if m.Kind == metrics.Text {
m.RUnlock()
return nil
}
metricExportTotal.Add(1)
lsc := make(chan *metrics.LabelSet)
go m.EmitLabelSets(lsc)
for ls := range lsc {
if lastMetric != m.Name {
glog.V(2).Infof("setting source to %s", m.Source)
lastSource = m.Source
lastMetric = m.Name
}
var keys []string
var vals []string
if !e.omitProgLabel {
keys = append(keys, "prog")
vals = append(vals, m.Program)
}
for k, v := range ls.Labels {
keys = append(keys, k)
vals = append(vals, v)
}
var pM prometheus.Metric
var err error
if m.Kind == metrics.Histogram {
pM, err = prometheus.NewConstHistogram(
prometheus.NewDesc(noHyphens(m.Name),
fmt.Sprintf("defined at %s", lastSource), keys, nil),
datum.GetBucketsCount(ls.Datum),
datum.GetBucketsSum(ls.Datum),
datum.GetBucketsCumByMax(ls.Datum),
vals...)
} else {
pM, err = prometheus.NewConstMetric(
prometheus.NewDesc(noHyphens(m.Name),
fmt.Sprintf("defined at %s", lastSource), keys, nil),
promTypeForKind(m.Kind),
promValueForDatum(ls.Datum),
vals...)
}
if err != nil {
glog.Warning(err)
return nil
}
// By default no timestamp is emitted to Prometheus. Setting a
// timestamp is not recommended. It can lead to unexpected results
// if the timestamp is not updated or moved fowarded enough to avoid
// triggering Promtheus staleness handling.
// Read more in docs/faq.md
if e.emitTimestamp {
c <- prometheus.NewMetricWithTimestamp(ls.Datum.TimeUTC(), pM)
} else {
c <- pM
}
}
m.RUnlock()
return nil
})
}
// Write is used to write Prometheus metrics to an io.Writer.
func (e *Exporter) Write(w io.Writer) error {
reg := prometheus.NewRegistry()
err := reg.Register(e)
if err != nil {
return err
}
mfs, err := reg.Gather()
if err != nil {
return err
}
enc := expfmt.NewEncoder(w, expfmt.NewFormat(expfmt.TypeTextPlain))
for _, mf := range mfs {
err := enc.Encode(mf)
if err != nil {
return err
}
}
return nil
}
func promTypeForKind(k metrics.Kind) prometheus.ValueType {
switch k {
case metrics.Counter:
return prometheus.CounterValue
case metrics.Gauge:
return prometheus.GaugeValue
case metrics.Timer:
return prometheus.GaugeValue
}
return prometheus.UntypedValue
}
func promValueForDatum(d datum.Datum) float64 {
switch n := d.(type) {
case *datum.Int:
return float64(n.Get())
case *datum.Float:
return n.Get()
}
return 0.
}
mtail-3.0.0~rc54+git0ff5/internal/exporter/prometheus_test.go 0000664 0000000 0000000 00000017046 14600635717 0024242 0 ustar 00root root 0000000 0000000 // Copyright 2015 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package exporter
import (
"bytes"
"context"
"math"
"strings"
"sync"
"testing"
"time"
"github.com/google/mtail/internal/metrics"
"github.com/google/mtail/internal/metrics/datum"
"github.com/google/mtail/internal/testutil"
promtest "github.com/prometheus/client_golang/prometheus/testutil"
)
var handlePrometheusTests = []struct {
name string
progLabel bool
metrics []*metrics.Metric
expected string
}{
{
"empty",
false,
[]*metrics.Metric{},
"",
},
{
"single",
false,
[]*metrics.Metric{
{
Name: "foo",
Program: "test",
Kind: metrics.Counter,
LabelValues: []*metrics.LabelValue{{Labels: []string{}, Value: datum.MakeInt(1, time.Unix(0, 0))}},
},
},
`# HELP foo defined at
# TYPE foo counter
foo{} 1
`,
},
{
"with prog label",
true,
[]*metrics.Metric{
{
Name: "foo",
Program: "test",
Kind: metrics.Counter,
LabelValues: []*metrics.LabelValue{{Labels: []string{}, Value: datum.MakeInt(1, time.Unix(0, 0))}},
},
},
`# HELP foo defined at
# TYPE foo counter
foo{prog="test"} 1
`,
},
{
"dimensioned",
false,
[]*metrics.Metric{
{
Name: "foo",
Program: "test",
Kind: metrics.Counter,
Keys: []string{"a", "b"},
LabelValues: []*metrics.LabelValue{{Labels: []string{"1", "2"}, Value: datum.MakeInt(1, time.Unix(0, 0))}},
},
},
`# HELP foo defined at
# TYPE foo counter
foo{a="1",b="2"} 1
`,
},
{
"gauge",
false,
[]*metrics.Metric{
{
Name: "foo",
Program: "test",
Kind: metrics.Gauge,
LabelValues: []*metrics.LabelValue{{Labels: []string{}, Value: datum.MakeInt(1, time.Unix(0, 0))}},
},
},
`# HELP foo defined at
# TYPE foo gauge
foo{} 1
`,
},
{
"timer",
false,
[]*metrics.Metric{
{
Name: "foo",
Program: "test",
Kind: metrics.Timer,
LabelValues: []*metrics.LabelValue{{Labels: []string{}, Value: datum.MakeInt(1, time.Unix(0, 0))}},
},
},
`# HELP foo defined at
# TYPE foo gauge
foo{} 1
`,
},
{
"text",
false,
[]*metrics.Metric{
{
Name: "foo",
Program: "test",
Kind: metrics.Text,
LabelValues: []*metrics.LabelValue{{Labels: []string{}, Value: datum.MakeString("hi", time.Unix(0, 0))}},
},
},
"",
},
{
"quotes",
false,
[]*metrics.Metric{
{
Name: "foo",
Program: "test",
Kind: metrics.Counter,
Keys: []string{"a"},
LabelValues: []*metrics.LabelValue{{Labels: []string{"str\"bang\"blah"}, Value: datum.MakeInt(1, time.Unix(0, 0))}},
},
},
`# HELP foo defined at
# TYPE foo counter
foo{a="str\"bang\"blah"} 1
`,
},
{
"help",
false,
[]*metrics.Metric{
{
Name: "foo",
Program: "test",
Kind: metrics.Counter,
LabelValues: []*metrics.LabelValue{{Labels: []string{}, Value: datum.MakeInt(1, time.Unix(0, 0))}},
Source: "location.mtail:37",
},
},
`# HELP foo defined at location.mtail:37
# TYPE foo counter
foo{} 1
`,
},
{
"2 help with label",
true,
[]*metrics.Metric{
{
Name: "foo",
Program: "test2",
Kind: metrics.Counter,
LabelValues: []*metrics.LabelValue{{Labels: []string{}, Value: datum.MakeInt(1, time.Unix(0, 0))}},
Source: "location.mtail:37",
},
{
Name: "foo",
Program: "test1",
Kind: metrics.Counter,
LabelValues: []*metrics.LabelValue{{Labels: []string{}, Value: datum.MakeInt(1, time.Unix(0, 0))}},
Source: "different.mtail:37",
},
},
`# HELP foo defined at location.mtail:37
# TYPE foo counter
foo{prog="test2"} 1
foo{prog="test1"} 1
`,
},
{
"histo",
true,
[]*metrics.Metric{
{
Name: "foo",
Program: "test",
Kind: metrics.Histogram,
Keys: []string{"a"},
LabelValues: []*metrics.LabelValue{{Labels: []string{"bar"}, Value: datum.MakeBuckets([]datum.Range{{0, 1}, {1, 2}}, time.Unix(0, 0))}},
Source: "location.mtail:37",
},
},
`# HELP foo defined at location.mtail:37
# TYPE foo histogram
foo_bucket{a="bar",prog="test",le="1"} 0
foo_bucket{a="bar",prog="test",le="2"} 0
foo_bucket{a="bar",prog="test",le="+Inf"} 0
foo_sum{a="bar",prog="test"} 0
foo_count{a="bar",prog="test"} 0
`,
},
{
"histo-count-eq-inf",
true,
[]*metrics.Metric{
{
Name: "foo",
Program: "test",
Kind: metrics.Histogram,
Keys: []string{"a"},
LabelValues: []*metrics.LabelValue{
{
Labels: []string{"bar"},
Value: &datum.Buckets{
Buckets: []datum.BucketCount{
{
Range: datum.Range{Min: 0, Max: 1},
Count: 1,
},
{
Range: datum.Range{Min: 1, Max: 2},
Count: 1,
},
{
Range: datum.Range{Min: 2, Max: math.Inf(+1)},
Count: 2,
},
},
Count: 4,
Sum: 5,
},
},
},
Source: "location.mtail:37",
},
},
`# HELP foo defined at location.mtail:37
# TYPE foo histogram
foo_bucket{a="bar",prog="test",le="1"} 1
foo_bucket{a="bar",prog="test",le="2"} 2
foo_bucket{a="bar",prog="test",le="+Inf"} 4
foo_sum{a="bar",prog="test"} 5
foo_count{a="bar",prog="test"} 4
`,
},
}
func TestHandlePrometheus(t *testing.T) {
for _, tc := range handlePrometheusTests {
tc := tc
t.Run(tc.name, func(t *testing.T) {
var wg sync.WaitGroup
ctx, cancel := context.WithCancel(context.Background())
ms := metrics.NewStore()
for _, metric := range tc.metrics {
testutil.FatalIfErr(t, ms.Add(metric))
}
opts := []Option{
Hostname("gunstar"),
}
if !tc.progLabel {
opts = append(opts, OmitProgLabel())
}
e, err := New(ctx, &wg, ms, opts...)
testutil.FatalIfErr(t, err)
r := strings.NewReader(tc.expected)
if err = promtest.CollectAndCompare(e, r); err != nil {
t.Error(err)
}
cancel()
wg.Wait()
})
}
}
var writePrometheusTests = []struct {
name string
metrics []*metrics.Metric
expected string
}{
{
"empty",
[]*metrics.Metric{},
"",
},
{
"single",
[]*metrics.Metric{
{
Name: "foo",
Program: "test",
Kind: metrics.Counter,
LabelValues: []*metrics.LabelValue{{Labels: []string{}, Value: datum.MakeInt(1, time.Unix(0, 0))}},
},
},
`# HELP foo defined at
# TYPE foo counter
foo 1
`,
},
{
"multi",
[]*metrics.Metric{
{
Name: "foo",
Program: "test",
Kind: metrics.Counter,
LabelValues: []*metrics.LabelValue{{Labels: []string{}, Value: datum.MakeInt(1, time.Unix(0, 0))}},
},
{
Name: "bar",
Program: "test",
Kind: metrics.Counter,
LabelValues: []*metrics.LabelValue{{Labels: []string{}, Value: datum.MakeInt(2, time.Unix(0, 0))}},
},
},
`# HELP bar defined at
# TYPE bar counter
bar 2
# HELP foo defined at
# TYPE foo counter
foo 1
`,
},
}
func TestWritePrometheus(t *testing.T) {
for _, tc := range writePrometheusTests {
tc := tc
t.Run(tc.name, func(t *testing.T) {
var wg sync.WaitGroup
ctx, cancel := context.WithCancel(context.Background())
ms := metrics.NewStore()
for _, metric := range tc.metrics {
testutil.FatalIfErr(t, ms.Add(metric))
}
opts := []Option{
Hostname("gunstar"),
OmitProgLabel(),
}
e, err := New(ctx, &wg, ms, opts...)
testutil.FatalIfErr(t, err)
var buf bytes.Buffer
err = e.Write(&buf)
testutil.FatalIfErr(t, err)
testutil.ExpectNoDiff(t, tc.expected, buf.String())
cancel()
wg.Wait()
})
}
}
mtail-3.0.0~rc54+git0ff5/internal/exporter/statsd.go 0000664 0000000 0000000 00000002102 14600635717 0022275 0 ustar 00root root 0000000 0000000 // Copyright 2015 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package exporter
import (
"expvar"
"flag"
"fmt"
"time"
"github.com/google/mtail/internal/metrics"
)
var (
statsdHostPort = flag.String("statsd_hostport", "",
"Host:port to statsd server to write metrics to.")
statsdPrefix = flag.String("statsd_prefix", "",
"Prefix to use for statsd metrics.")
statsdExportTotal = expvar.NewInt("statsd_export_total")
statsdExportSuccess = expvar.NewInt("statsd_export_success")
)
// metricToStatsd encodes a metric in the statsd text protocol format. The
// metric lock is held before entering this function.
func metricToStatsd(_ string, m *metrics.Metric, l *metrics.LabelSet, _ time.Duration) string {
var t string
switch m.Kind {
case metrics.Counter:
t = "c" // StatsD Counter
case metrics.Gauge:
t = "g" // StatsD Gauge
case metrics.Timer:
t = "ms" // StatsD Timer
}
return fmt.Sprintf("%s%s.%s:%s|%s",
*statsdPrefix,
m.Program,
formatLabels(m.Name, l.Labels, ".", ".", "_"),
l.Datum.ValueString(), t)
}
mtail-3.0.0~rc54+git0ff5/internal/exporter/varz.go 0000664 0000000 0000000 00000002626 14600635717 0021770 0 ustar 00root root 0000000 0000000 // Copyright 2015 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package exporter
import (
"expvar"
"fmt"
"net/http"
"sort"
"strings"
"github.com/google/mtail/internal/metrics"
)
var exportVarzTotal = expvar.NewInt("exporter_varz_total")
const varzFormat = "%s{%s} %s\n"
// HandleVarz exports the metrics in Varz format via HTTP.
func (e *Exporter) HandleVarz(w http.ResponseWriter, r *http.Request) {
w.Header().Add("Content-type", "text/plain")
err := e.store.Range(func(m *metrics.Metric) error {
select {
case <-r.Context().Done():
return r.Context().Err()
default:
}
m.RLock()
exportVarzTotal.Add(1)
lc := make(chan *metrics.LabelSet)
go m.EmitLabelSets(lc)
for l := range lc {
line := metricToVarz(m, l, e.omitProgLabel, e.hostname)
fmt.Fprint(w, line)
}
m.RUnlock()
return nil
})
if err != nil {
http.Error(w, fmt.Sprintf("%s", err), http.StatusInternalServerError)
}
}
func metricToVarz(m *metrics.Metric, l *metrics.LabelSet, omitProgLabel bool, hostname string) string {
s := make([]string, 0, len(l.Labels)+2)
for k, v := range l.Labels {
s = append(s, fmt.Sprintf("%s=%s", k, v))
}
sort.Strings(s)
if !omitProgLabel {
s = append(s, fmt.Sprintf("prog=%s", m.Program))
}
s = append(s, fmt.Sprintf("instance=%s", hostname))
return fmt.Sprintf(varzFormat,
m.Name,
strings.Join(s, ","),
l.Datum.ValueString())
}
mtail-3.0.0~rc54+git0ff5/internal/exporter/varz_test.go 0000664 0000000 0000000 00000004243 14600635717 0023024 0 ustar 00root root 0000000 0000000 // Copyright 2015 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package exporter
import (
"context"
"io"
"net/http"
"net/http/httptest"
"sync"
"testing"
"time"
"github.com/google/mtail/internal/metrics"
"github.com/google/mtail/internal/metrics/datum"
"github.com/google/mtail/internal/testutil"
)
var handleVarzTests = []struct {
name string
metrics []*metrics.Metric
expected string
}{
{
"empty",
[]*metrics.Metric{},
"",
},
{
"single",
[]*metrics.Metric{
{
Name: "foo",
Program: "test",
Kind: metrics.Counter,
LabelValues: []*metrics.LabelValue{{Labels: []string{}, Value: datum.MakeInt(1, time.Unix(1397586900, 0))}},
},
},
`foo{prog=test,instance=gunstar} 1
`,
},
{
"dimensioned",
[]*metrics.Metric{
{
Name: "foo",
Program: "test",
Kind: metrics.Counter,
Keys: []string{"a", "b"},
LabelValues: []*metrics.LabelValue{{Labels: []string{"1", "2"}, Value: datum.MakeInt(1, time.Unix(1397586900, 0))}},
},
},
`foo{a=1,b=2,prog=test,instance=gunstar} 1
`,
},
{
"text",
[]*metrics.Metric{
{
Name: "foo",
Program: "test",
Kind: metrics.Text,
LabelValues: []*metrics.LabelValue{{Labels: []string{}, Value: datum.MakeString("hi", time.Unix(1397586900, 0))}},
},
},
`foo{prog=test,instance=gunstar} hi
`,
},
}
func TestHandleVarz(t *testing.T) {
for _, tc := range handleVarzTests {
tc := tc
t.Run(tc.name, func(t *testing.T) {
var wg sync.WaitGroup
ctx, cancel := context.WithCancel(context.Background())
ms := metrics.NewStore()
for _, metric := range tc.metrics {
testutil.FatalIfErr(t, ms.Add(metric))
}
e, err := New(ctx, &wg, ms, Hostname("gunstar"))
testutil.FatalIfErr(t, err)
response := httptest.NewRecorder()
e.HandleVarz(response, &http.Request{})
if response.Code != 200 {
t.Errorf("response code not 200: %d", response.Code)
}
b, err := io.ReadAll(response.Body)
if err != nil {
t.Errorf("failed to read response: %s", err)
}
testutil.ExpectNoDiff(t, tc.expected, string(b))
cancel()
wg.Wait()
})
}
}
mtail-3.0.0~rc54+git0ff5/internal/logline/ 0000775 0000000 0000000 00000000000 14600635717 0020232 5 ustar 00root root 0000000 0000000 mtail-3.0.0~rc54+git0ff5/internal/logline/logline.go 0000664 0000000 0000000 00000001046 14600635717 0022213 0 ustar 00root root 0000000 0000000 // Copyright 2017 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package logline
import "context"
// LogLine contains all the information about a line just read from a log.
type LogLine struct {
Context context.Context
Filename string // The log filename that this line was read from
Line string // The text of the log line itself up to the newline.
}
// New creates a new LogLine object.
func New(ctx context.Context, filename string, line string) *LogLine {
return &LogLine{ctx, filename, line}
}
mtail-3.0.0~rc54+git0ff5/internal/metrics/ 0000775 0000000 0000000 00000000000 14600635717 0020247 5 ustar 00root root 0000000 0000000 mtail-3.0.0~rc54+git0ff5/internal/metrics/datum/ 0000775 0000000 0000000 00000000000 14600635717 0021361 5 ustar 00root root 0000000 0000000 mtail-3.0.0~rc54+git0ff5/internal/metrics/datum/buckets.go 0000664 0000000 0000000 00000003531 14600635717 0023352 0 ustar 00root root 0000000 0000000 // Copyright 2017 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package datum
import (
"encoding/json"
"fmt"
"strconv"
"sync"
"sync/atomic"
"time"
)
type Range struct {
Min float64
Max float64
}
type BucketCount struct {
Range Range
Count uint64
}
func (r *Range) Contains(v float64) bool {
return r.Min < v && v <= r.Max
}
// Buckets describes a floating point value at a given timestamp.
type Buckets struct {
BaseDatum
sync.RWMutex
Buckets []BucketCount
Count uint64
Sum float64
}
func (d *Buckets) ValueString() string {
return fmt.Sprintf("%g", d.GetSum())
}
func (d *Buckets) Observe(v float64, ts time.Time) {
d.Lock()
defer d.Unlock()
for i, b := range d.Buckets {
if v <= b.Range.Max {
d.Buckets[i].Count++
break
}
}
d.Count++
d.Sum += v
d.stamp(ts)
}
func (d *Buckets) GetCount() uint64 {
d.RLock()
defer d.RUnlock()
return d.Count
}
func (d *Buckets) GetSum() float64 {
d.RLock()
defer d.RUnlock()
return d.Sum
}
func (d *Buckets) AddBucket(r Range) {
d.Lock()
defer d.Unlock()
d.Buckets = append(d.Buckets, BucketCount{r, 0})
}
func (d *Buckets) GetBuckets() map[Range]uint64 {
d.RLock()
defer d.RUnlock()
b := make(map[Range]uint64)
for _, bc := range d.Buckets {
b[bc.Range] = bc.Count
}
return b
}
func (d *Buckets) MarshalJSON() ([]byte, error) {
d.RLock()
defer d.RUnlock()
bs := make(map[string]uint64)
for _, b := range d.Buckets {
bs[strconv.FormatFloat(b.Range.Max, 'g', -1, 64)] = b.Count
}
j := struct {
Buckets map[string]uint64
Count uint64
Sum float64
Time int64
}{bs, d.Count, d.Sum, atomic.LoadInt64(&d.Time)}
return json.Marshal(j)
}
func (r *Range) MarshalJSON() ([]byte, error) {
j := struct {
Min string
Max string
}{fmt.Sprintf("%v", r.Min), fmt.Sprintf("%v", r.Max)}
return json.Marshal(j)
}
mtail-3.0.0~rc54+git0ff5/internal/metrics/datum/buckets_test.go 0000664 0000000 0000000 00000002166 14600635717 0024414 0 ustar 00root root 0000000 0000000 // Copyright 2019 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package datum_test
import (
"math"
"testing"
"testing/quick"
"time"
"github.com/google/mtail/internal/metrics/datum"
)
func TestBucketContains(t *testing.T) {
if err := quick.Check(func(min, max, val float64) bool {
r := &datum.Range{Min: min, Max: max}
truth := val < max && val >= min
return truth == r.Contains(val)
}, nil); err != nil {
t.Error(err)
}
}
func TestMakeBucket(t *testing.T) {
r := []datum.Range{
{0, 1},
{1, 2},
{2, 4},
}
b := datum.MakeBuckets(r, time.Unix(37, 42))
ts := time.Unix(37, 31)
datum.Observe(b, 2, ts)
if r := datum.GetBucketsSum(b); r != 2 {
t.Errorf("sum not 2, got %v", r)
}
if r := datum.GetBucketsCount(b); r != 1 {
t.Errorf("count not 1, got %v", r)
}
bs := datum.GetBucketsCumByMax(b)
if r := datum.GetBucketsCount(b); r != bs[math.Inf(+1)] {
t.Errorf("Inf bucket des not equal total observation count: %v vs %v", bs[math.Inf(+1)], r)
}
if len(bs) != len(r)+1 {
t.Errorf("missing buckets from BucketsByMax: expected %d, got %v", len(r)+1, len(bs))
}
}
mtail-3.0.0~rc54+git0ff5/internal/metrics/datum/datum.go 0000664 0000000 0000000 00000014364 14600635717 0023032 0 ustar 00root root 0000000 0000000 // Copyright 2017 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package datum
import (
"fmt"
"math"
"sort"
"sync/atomic"
"time"
)
// Datum is an interface for metric datums, with a type, value and timestamp to be exported.
type Datum interface {
// // Type returns the Datum type.
// Type() metrics.Type
// ValueString returns the value of a Datum as a string.
ValueString() string
// TimeString returns the timestamp of a Datum as a string.
TimeString() string
// Time returns the timestamp of the Datum as time.Time in UTC
TimeUTC() time.Time
}
// BaseDatum is a struct used to record timestamps across all Datum implementations.
type BaseDatum struct {
Time int64 // nanoseconds since unix epoch
}
var zeroTime time.Time
func (d *BaseDatum) stamp(timestamp time.Time) {
if timestamp.IsZero() {
atomic.StoreInt64(&d.Time, time.Now().UTC().UnixNano())
} else {
atomic.StoreInt64(&d.Time, timestamp.UnixNano())
}
}
// TimeString returns the timestamp of this Datum as a string.
func (d *BaseDatum) TimeString() string {
return fmt.Sprintf("%d", atomic.LoadInt64(&d.Time)/1e9)
}
func (d *BaseDatum) TimeUTC() time.Time {
tNsec := atomic.LoadInt64(&d.Time)
return time.Unix(tNsec/1e9, tNsec%1e9)
}
// NewInt creates a new zero integer datum.
func NewInt() Datum {
return MakeInt(0, zeroTime)
}
// NewFloat creates a new zero floating-point datum.
func NewFloat() Datum {
return MakeFloat(0., zeroTime)
}
// NewString creates a new zero string datum.
func NewString() Datum {
return MakeString("", zeroTime)
}
// NewBuckets creates a new zero buckets datum.
func NewBuckets(buckets []Range) Datum {
return MakeBuckets(buckets, zeroTime)
}
// MakeInt creates a new integer datum with the provided value and timestamp.
func MakeInt(v int64, ts time.Time) Datum {
d := &Int{}
d.Set(v, ts)
return d
}
// MakeFloat creates a new floating-point datum with the provided value and timestamp.
func MakeFloat(v float64, ts time.Time) Datum {
d := &Float{}
d.Set(v, ts)
return d
}
// MakeString creates a new string datum with the provided value and timestamp.
func MakeString(v string, ts time.Time) Datum {
d := &String{}
d.Set(v, ts)
return d
}
// MakeBuckets creates a new bucket datum with the provided list of ranges and
// timestamp. If no +inf bucket is provided, one is created.
func MakeBuckets(buckets []Range, _ time.Time) Datum {
d := &Buckets{}
seenInf := false
highest := 0.0
for _, b := range buckets {
d.AddBucket(b)
if math.IsInf(b.Max, +1) {
seenInf = true
} else if b.Max > highest {
highest = b.Max
}
}
if !seenInf {
d.AddBucket(Range{highest, math.Inf(+1)})
}
return d
}
// GetInt returns the integer value of a datum, or error.
func GetInt(d Datum) int64 {
switch d := d.(type) {
case *Int:
return d.Get()
default:
panic(fmt.Sprintf("datum %v is not an Int", d))
}
}
// GetFloat returns the floating-point value of a datum, or error.
func GetFloat(d Datum) float64 {
switch d := d.(type) {
case *Float:
return d.Get()
default:
panic(fmt.Sprintf("datum %v is not a Float", d))
}
}
// GetString returns the string of a datum, or error.
func GetString(d Datum) string {
switch d := d.(type) {
case *String:
return d.Get()
default:
panic(fmt.Sprintf("datum %v is not a String", d))
}
}
// SetInt sets an integer datum to the provided value and timestamp, or panics if the Datum is not an IntDatum.
func SetInt(d Datum, v int64, ts time.Time) {
switch d := d.(type) {
case *Int:
d.Set(v, ts)
case *Buckets:
d.Observe(float64(v), ts)
default:
panic(fmt.Sprintf("datum %v is not an Int", d))
}
}
// SetFloat sets a floating-point Datum to the provided value and timestamp, or panics if the Datum is not a FloatDatum.
func SetFloat(d Datum, v float64, ts time.Time) {
switch d := d.(type) {
case *Float:
d.Set(v, ts)
case *Buckets:
d.Observe(v, ts)
default:
panic(fmt.Sprintf("datum %v is not a Float", d))
}
}
// SetString sets a string Datum to the provided value and timestamp, or panics if the Datym is not a String Datum.
func SetString(d Datum, v string, ts time.Time) {
switch d := d.(type) {
case *String:
d.Set(v, ts)
default:
panic(fmt.Sprintf("datum %v is not a String", d))
}
}
// IncIntBy increments an integer Datum by the provided value, at time ts, or panics if the Datum is not an IntDatum.
func IncIntBy(d Datum, v int64, ts time.Time) {
switch d := d.(type) {
case *Int:
d.IncBy(v, ts)
default:
panic(fmt.Sprintf("datum %v is not an Int", d))
}
}
// DecIntBy increments an integer Datum by the provided value, at time ts, or panics if the Datum is not an IntDatum.
func DecIntBy(d Datum, v int64, ts time.Time) {
switch d := d.(type) {
case *Int:
d.DecBy(v, ts)
default:
panic(fmt.Sprintf("datum %v is not an Int", d))
}
}
func GetBuckets(d Datum) *Buckets {
switch d := d.(type) {
case *Buckets:
return d
default:
panic(fmt.Sprintf("datum %v is not a Buckets", d))
}
}
// Observe records an observation v at time ts in d, or panics if d is not a BucketsDatum.
func Observe(d Datum, v float64, ts time.Time) {
switch d := d.(type) {
case *Buckets:
d.Observe(v, ts)
default:
panic(fmt.Sprintf("datum %v is not a Buckets", d))
}
}
// GetBucketCount returns the total count of observations in d, or panics if d is not a BucketsDatum.
func GetBucketsCount(d Datum) uint64 {
switch d := d.(type) {
case *Buckets:
return d.GetCount()
default:
panic(fmt.Sprintf("datum %v is not a Buckets", d))
}
}
// GetBucketsSum returns the sum of observations in d, or panics if d is not a BucketsDatum.
func GetBucketsSum(d Datum) float64 {
switch d := d.(type) {
case *Buckets:
return d.GetSum()
default:
panic(fmt.Sprintf("datum %v is not a Buckets", d))
}
}
// GetBucketsCumByMax returns a map of cumulative bucket observations by their
// upper bonds, or panics if d is not a BucketsDatum.
func GetBucketsCumByMax(d Datum) map[float64]uint64 {
switch d := d.(type) {
case *Buckets:
buckets := make(map[float64]uint64)
maxes := make([]float64, 0)
for r, c := range d.GetBuckets() {
maxes = append(maxes, r.Max)
buckets[r.Max] = c
}
sort.Float64s(maxes)
cum := uint64(0)
for _, m := range maxes {
cum += buckets[m]
buckets[m] = cum
}
return buckets
default:
panic(fmt.Sprintf("datum %v is not a Buckets", d))
}
}
mtail-3.0.0~rc54+git0ff5/internal/metrics/datum/datum_test.go 0000664 0000000 0000000 00000002554 14600635717 0024067 0 ustar 00root root 0000000 0000000 // Copyright 2017 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package datum
import (
"encoding/json"
"testing"
"time"
"github.com/google/mtail/internal/testutil"
)
func TestDatumSetAndValue(t *testing.T) {
d := MakeInt(12, time.Unix(37, 42))
if r := GetInt(d); r != 12 {
t.Errorf("d ditn't return 12, got %v", r)
}
if r := d.ValueString(); r != "12" {
t.Errorf("d value is not 12, got %v", r)
}
if r := d.TimeString(); r != "37" {
t.Errorf("d Time not correct, got %v", r)
}
d = MakeFloat(1.2, time.Unix(37, 42))
if r := GetFloat(d); r != 1.2 {
t.Errorf("d ditn't return 12, got %v", r)
}
if r := d.ValueString(); r != "1.2" {
t.Errorf("d value is not 12, got %v", r)
}
if r := d.TimeString(); r != "37" {
t.Errorf("d Time not correct, got %v", r)
}
}
var datumJSONTests = []struct {
datum Datum
expected string
}{
{
MakeInt(37, time.Unix(42, 12)),
`{"Value":37,"Time":42000000012}`,
},
{
MakeFloat(37.1, time.Unix(42, 12)),
`{"Value":37.1,"Time":42000000012}`,
},
}
func TestMarshalJSON(t *testing.T) {
// This is not a round trip test because only the LabelValue knows how to unmarshal a Datum.
for i, tc := range datumJSONTests {
b, err := json.Marshal(tc.datum)
if err != nil {
t.Errorf("%d: Marshal failed: %v", i, err)
}
testutil.ExpectNoDiff(t, tc.expected, string(b))
}
}
mtail-3.0.0~rc54+git0ff5/internal/metrics/datum/float.go 0000664 0000000 0000000 00000001675 14600635717 0023026 0 ustar 00root root 0000000 0000000 // Copyright 2017 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package datum
import (
"encoding/json"
"fmt"
"math"
"sync/atomic"
"time"
)
// Float describes a floating point value at a given timestamp.
type Float struct {
BaseDatum
Valuebits uint64
}
// ValueString returns the value of the Float as a string.
func (d *Float) ValueString() string {
return fmt.Sprintf("%g", d.Get())
}
// Set sets value of the Float at the timestamp ts.
func (d *Float) Set(v float64, ts time.Time) {
atomic.StoreUint64(&d.Valuebits, math.Float64bits(v))
d.stamp(ts)
}
// Get returns the floating-point value.
func (d *Float) Get() float64 {
return math.Float64frombits(atomic.LoadUint64(&d.Valuebits))
}
// MarshalJSON returns a JSON encoding of the Float.
func (d *Float) MarshalJSON() ([]byte, error) {
j := struct {
Value float64
Time int64
}{d.Get(), atomic.LoadInt64(&d.Time)}
return json.Marshal(j)
}
mtail-3.0.0~rc54+git0ff5/internal/metrics/datum/int.go 0000664 0000000 0000000 00000002404 14600635717 0022502 0 ustar 00root root 0000000 0000000 // Copyright 2017 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package datum
import (
"encoding/json"
"fmt"
"sync/atomic"
"time"
)
// Int describes an integer value at a given timestamp.
type Int struct {
BaseDatum
Value int64
}
// Set sets the value of the Int to the value at timestamp.
func (d *Int) Set(value int64, timestamp time.Time) {
atomic.StoreInt64(&d.Value, value)
d.stamp(timestamp)
}
// IncBy increments the Int's value by the value provided, at timestamp.
func (d *Int) IncBy(delta int64, timestamp time.Time) {
atomic.AddInt64(&d.Value, delta)
d.stamp(timestamp)
}
// DecBy increments the Int's value by the value provided, at timestamp.
func (d *Int) DecBy(delta int64, timestamp time.Time) {
atomic.AddInt64(&d.Value, -delta)
d.stamp(timestamp)
}
// Get returns the value of the Int.
func (d *Int) Get() int64 {
return atomic.LoadInt64(&d.Value)
}
// ValueString returns the value of the Int as a string.
func (d *Int) ValueString() string {
return fmt.Sprintf("%d", atomic.LoadInt64(&d.Value))
}
// MarshalJSON returns a JSON encoding of the Int.
func (d *Int) MarshalJSON() ([]byte, error) {
j := struct {
Value int64
Time int64
}{d.Get(), atomic.LoadInt64(&d.Time)}
return json.Marshal(j)
}
mtail-3.0.0~rc54+git0ff5/internal/metrics/datum/int_test.go 0000664 0000000 0000000 00000001236 14600635717 0023543 0 ustar 00root root 0000000 0000000 // Copyright 2017 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package datum
import (
"testing"
"time"
)
func BenchmarkIncrementScalarInt(b *testing.B) {
d := &Int{}
ts := time.Now().UTC()
for i := 0; i < b.N; i++ {
d.IncBy(1, ts)
}
}
func BenchmarkDecrementScalarInt(b *testing.B) {
d := &Int{}
ts := time.Now().UTC()
for i := 0; i < b.N; i++ {
d.DecBy(1, ts)
}
}
func TestDecrementScalarInt(t *testing.T) {
d := &Int{}
ts := time.Now().UTC()
d.IncBy(1, ts)
r := d.Get()
if r != 1 {
t.Errorf("expected 1, got %d", r)
}
d.DecBy(1, ts)
r = d.Get()
if r != 0 {
t.Errorf("expected 0, got %d", r)
}
}
mtail-3.0.0~rc54+git0ff5/internal/metrics/datum/string.go 0000664 0000000 0000000 00000001670 14600635717 0023222 0 ustar 00root root 0000000 0000000 // Copyright 2018 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package datum
import (
"encoding/json"
"sync"
"sync/atomic"
"time"
)
// String describes a string value at a given timestamp.
type String struct {
BaseDatum
mu sync.RWMutex
Value string
}
// Set sets the value of the String to the value at timestamp.
func (d *String) Set(value string, timestamp time.Time) {
d.mu.Lock()
d.Value = value
d.stamp(timestamp)
d.mu.Unlock()
}
// Get returns the value of the String.
func (d *String) Get() string {
d.mu.RLock()
defer d.mu.RUnlock()
return d.Value
}
// ValueString returns the value of the String as a string.
func (d *String) ValueString() string {
return d.Get()
}
// MarshalJSON returns a JSON encoding of the String.
func (d *String) MarshalJSON() ([]byte, error) {
j := struct {
Value string
Time int64
}{d.Get(), atomic.LoadInt64(&d.Time)}
return json.Marshal(j)
}
mtail-3.0.0~rc54+git0ff5/internal/metrics/metric.go 0000664 0000000 0000000 00000020324 14600635717 0022062 0 ustar 00root root 0000000 0000000 // Copyright 2011 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
// Package metrics provides storage for metrics being recorded by mtail
// programs.
package metrics
import (
"encoding/json"
"fmt"
"math/rand"
"reflect"
"strings"
"sync"
"time"
"github.com/golang/glog"
"github.com/google/mtail/internal/metrics/datum"
"github.com/pkg/errors"
)
// Kind enumerates the types of metrics supported.
type Kind int
const (
_ Kind = iota
// Counter is a monotonically nondecreasing metric.
Counter
// Gauge is a Kind that can take on any value, and may be set
// discontinuously from its previous value.
Gauge
// Timer is a specialisation of Gauge that can be used to store time
// intervals, such as latency and durations. It enables certain behaviour
// in exporters that handle time intervals such as StatsD.
Timer
// Text is a special metric type for free text, usually for operating as a 'hidden' metric, as often these values cannot be exported.
Text
// Histogram is a Kind that observes a value and stores the value
// in a bucket.
Histogram
endKind // end of enumeration for testing
)
func (m Kind) String() string {
switch m {
case Counter:
return "Counter"
case Gauge:
return "Gauge"
case Timer:
return "Timer"
case Text:
return "Text"
case Histogram:
return "Histogram"
}
return "Unknown"
}
// Generate implements the quick.Generator interface for Kind.
func (Kind) Generate(rand *rand.Rand, _ int) reflect.Value {
return reflect.ValueOf(Kind(rand.Intn(int(endKind))))
}
// LabelValue is an object that names a Datum value with a list of label
// strings.
type LabelValue struct {
Labels []string `json:",omitempty"`
Value datum.Datum
// After this time of inactivity, the LabelValue is removed from the metric.
Expiry time.Duration `json:",omitempty"`
}
// Metric is an object that describes a metric, with its name, the creator and
// owner program name, its Kind, a sequence of Keys that may be used to
// add dimension to the metric, and a list of LabelValues that contain data for
// labels in each dimension of the Keys.
type Metric struct {
sync.RWMutex
Name string // Name
Program string // Instantiating program
Kind Kind
Type Type
Hidden bool `json:",omitempty"`
Keys []string `json:",omitempty"`
LabelValues []*LabelValue `json:",omitempty"`
labelValuesMap map[string]*LabelValue
Source string `json:",omitempty"`
Buckets []datum.Range `json:",omitempty"`
Limit int `json:",omitempty"`
}
// NewMetric returns a new empty metric of dimension len(keys).
func NewMetric(name string, prog string, kind Kind, typ Type, keys ...string) *Metric {
m := newMetric(len(keys))
m.Name = name
m.Program = prog
m.Kind = kind
m.Type = typ
copy(m.Keys, keys)
return m
}
// newMetric returns a new empty Metric.
func newMetric(keyLen int) *Metric {
return &Metric{
Keys: make([]string, keyLen),
LabelValues: make([]*LabelValue, 0),
labelValuesMap: make(map[string]*LabelValue),
}
}
// buildLabelValueKey returns a unique key for the given labels.
func buildLabelValueKey(labels []string) string {
var buf strings.Builder
for i := 0; i < len(labels); i++ {
rs := strings.ReplaceAll(labels[i], "-", "\\-")
buf.WriteString(rs)
buf.WriteString("-")
}
return buf.String()
}
func (m *Metric) AppendLabelValue(lv *LabelValue) error {
if len(lv.Labels) != len(m.Keys) {
return errors.Errorf("Label values requested (%q) not same length as keys for metric %v", lv.Labels, m)
}
m.LabelValues = append(m.LabelValues, lv)
k := buildLabelValueKey(lv.Labels)
m.labelValuesMap[k] = lv
return nil
}
func (m *Metric) FindLabelValueOrNil(labelvalues []string) *LabelValue {
k := buildLabelValueKey(labelvalues)
lv, ok := m.labelValuesMap[k]
if ok {
return lv
}
return nil
}
// GetDatum returns the datum named by a sequence of string label values from a
// Metric. If the sequence of label values does not yet exist, it is created.
func (m *Metric) GetDatum(labelvalues ...string) (d datum.Datum, err error) {
if len(labelvalues) != len(m.Keys) {
return nil, errors.Errorf("Label values requested (%q) not same length as keys for metric %v", labelvalues, m)
}
m.Lock()
defer m.Unlock()
if lv := m.FindLabelValueOrNil(labelvalues); lv != nil {
d = lv.Value
} else {
// TODO Check m.Limit and expire old data
switch m.Type {
case Int:
d = datum.NewInt()
case Float:
d = datum.NewFloat()
case String:
d = datum.NewString()
case Buckets:
buckets := m.Buckets
if buckets == nil {
buckets = make([]datum.Range, 0)
}
d = datum.NewBuckets(buckets)
}
lv := &LabelValue{Labels: labelvalues, Value: d}
if err := m.AppendLabelValue(lv); err != nil {
return nil, err
}
}
return d, nil
}
// RemoveOldestDatum scans the Metric's LabelValues for the Datum with the oldest timestamp, and removes it.
func (m *Metric) RemoveOldestDatum() {
var oldestLV *LabelValue
for _, lv := range m.LabelValues {
if oldestLV == nil || lv.Value.TimeUTC().Before(oldestLV.Value.TimeUTC()) {
oldestLV = lv
}
}
if oldestLV != nil {
glog.V(1).Infof("removeOldest: removing oldest LV: %v", oldestLV)
err := m.RemoveDatum(oldestLV.Labels...)
if err != nil {
glog.Warning(err)
}
}
}
// RemoveDatum removes the Datum described by labelvalues from the Metric m.
func (m *Metric) RemoveDatum(labelvalues ...string) error {
if len(labelvalues) != len(m.Keys) {
return errors.Errorf("Label values requested (%q) not same length as keys for metric %v", labelvalues, m)
}
m.Lock()
defer m.Unlock()
k := buildLabelValueKey(labelvalues)
olv, ok := m.labelValuesMap[k]
if ok {
for i := 0; i < len(m.LabelValues); i++ {
lv := m.LabelValues[i]
if lv == olv {
// remove from the slice
m.LabelValues = append(m.LabelValues[:i], m.LabelValues[i+1:]...)
delete(m.labelValuesMap, k)
break
}
}
}
return nil
}
func (m *Metric) ExpireDatum(expiry time.Duration, labelvalues ...string) error {
if len(labelvalues) != len(m.Keys) {
return errors.Errorf("Label values requested (%q) not same length as keys for metric %v", labelvalues, m)
}
m.Lock()
defer m.Unlock()
if lv := m.FindLabelValueOrNil(labelvalues); lv != nil {
lv.Expiry = expiry
return nil
}
return errors.Errorf("No datum for given labelvalues %q", labelvalues)
}
// LabelSet is an object that maps the keys of a Metric to the labels naming a
// Datum, for use when enumerating Datums from a Metric.
type LabelSet struct {
Labels map[string]string
Datum datum.Datum
}
func zip(keys []string, values []string) map[string]string {
r := make(map[string]string)
for i, v := range values {
r[keys[i]] = v
}
return r
}
// EmitLabelSets enumerates the LabelSets corresponding to the LabelValues of a
// Metric. It emits them onto the provided channel, then closes the channel to
// signal completion.
func (m *Metric) EmitLabelSets(c chan *LabelSet) {
for _, lv := range m.LabelValues {
ls := &LabelSet{zip(m.Keys, lv.Labels), lv.Value}
c <- ls
}
close(c)
}
// UnmarshalJSON converts a JSON byte string into a LabelValue.
func (lv *LabelValue) UnmarshalJSON(b []byte) error {
var obj map[string]*json.RawMessage
err := json.Unmarshal(b, &obj)
if err != nil {
return err
}
labels := make([]string, 0)
if _, ok := obj["Labels"]; ok {
err = json.Unmarshal(*obj["Labels"], &labels)
if err != nil {
return err
}
}
lv.Labels = labels
var valObj map[string]*json.RawMessage
err = json.Unmarshal(*obj["Value"], &valObj)
if err != nil {
return err
}
var t int64
err = json.Unmarshal(*valObj["Time"], &t)
if err != nil {
return err
}
var i int64
err = json.Unmarshal(*valObj["Value"], &i)
if err != nil {
return err
}
lv.Value = datum.MakeInt(i, time.Unix(t/1e9, t%1e9))
return nil
}
func (m *Metric) String() string {
m.RLock()
defer m.RUnlock()
return fmt.Sprintf("Metric: name=%s program=%s kind=%v type=%s hidden=%v keys=%v labelvalues=%v source=%s buckets=%v", m.Name, m.Program, m.Kind, m.Type, m.Hidden, m.Keys, m.LabelValues, m.Source, m.Buckets)
}
// SetSource sets the source of a metric, describing where in user programmes it was defined.
func (m *Metric) SetSource(source string) {
m.Lock()
defer m.Unlock()
m.Source = source
}
mtail-3.0.0~rc54+git0ff5/internal/metrics/metric_test.go 0000664 0000000 0000000 00000015273 14600635717 0023130 0 ustar 00root root 0000000 0000000 // Copyright 2011 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package metrics
import (
"encoding/json"
"fmt"
"math/rand"
"reflect"
"sync"
"testing"
"testing/quick"
"time"
"github.com/google/mtail/internal/metrics/datum"
"github.com/google/mtail/internal/testutil"
)
func TestKindType(t *testing.T) {
v := Kind(0)
if s := v.String(); s != "Unknown" {
t.Errorf("Kind.String() returned %q not Unknown", s)
}
v = Counter
if s := v.String(); s != "Counter" {
t.Errorf("Kind.String() returned %q not Counter", s)
}
v = Gauge
if s := v.String(); s != "Gauge" {
t.Errorf("Kind.String() returned %q not Gauge", s)
}
v = Timer
if s := v.String(); s != "Timer" {
t.Errorf("Kind.String() returned %q not Timer", s)
}
}
func TestScalarMetric(t *testing.T) {
v := NewMetric("test", "prog", Counter, Int)
d, err := v.GetDatum()
if err != nil {
t.Errorf("no datum: %s", err)
}
datum.IncIntBy(d, 1, time.Now().UTC())
lv := v.FindLabelValueOrNil([]string{})
if lv == nil {
t.Fatal("couldn't find labelvalue")
}
newD := lv.Value
if newD == nil {
t.Error("new_d is nil")
}
if newD.ValueString() != "1" {
t.Error("value not 1")
}
d2, err := v.GetDatum("a", "b")
if err == nil {
t.Errorf("datum with keys sohuld have returned no value, got %v", d2)
}
}
func TestDimensionedMetric(t *testing.T) {
v := NewMetric("test", "prog", Counter, Int, "foo")
d, _ := v.GetDatum("a")
datum.IncIntBy(d, 1, time.Now().UTC())
if v.FindLabelValueOrNil([]string{"a"}).Value.ValueString() != "1" {
t.Errorf("fail")
}
v = NewMetric("test", "prog", Counter, Int, "foo", "bar")
d, _ = v.GetDatum("a", "b")
datum.IncIntBy(d, 1, time.Now().UTC())
if v.FindLabelValueOrNil([]string{"a", "b"}).Value.ValueString() != "1" {
t.Errorf("fail")
}
v = NewMetric("test", "prog", Counter, Int, "foo", "bar", "quux")
d, _ = v.GetDatum("a", "b", "c")
datum.IncIntBy(d, 1, time.Now().UTC())
if v.FindLabelValueOrNil([]string{"a", "b", "c"}).Value.ValueString() != "1" {
t.Errorf("fail")
}
}
var labelSetTests = []struct {
values []string
expectedLabels map[string]string
}{
{
[]string{"a", "b", "c"},
map[string]string{"foo": "a", "bar": "b", "quux": "c"},
},
{
[]string{"a", "b", "d"},
map[string]string{"foo": "a", "bar": "b", "quux": "d"},
},
}
func TestEmitLabelSet(t *testing.T) {
ts := time.Now().UTC()
for _, tc := range labelSetTests {
tc := tc
t.Run(fmt.Sprintf("%v", tc.values), func(t *testing.T) {
m := NewMetric("test", "prog", Gauge, Int, "foo", "bar", "quux")
d, _ := m.GetDatum(tc.values...)
datum.SetInt(d, 37, ts)
c := make(chan *LabelSet)
go m.EmitLabelSets(c)
ls := <-c
testutil.ExpectNoDiff(t, tc.expectedLabels, ls.Labels)
})
}
}
func TestFindLabelValueOrNil(t *testing.T) {
m0 := NewMetric("foo", "prog", Counter, Int)
if r0 := m0.FindLabelValueOrNil([]string{}); r0 != nil {
t.Errorf("m0 should be nil: %v", r0)
}
d, err := m0.GetDatum()
if err != nil {
t.Errorf("Bad datum %v: %v\n", d, err)
}
if r1 := m0.FindLabelValueOrNil([]string{}); r1 == nil {
t.Errorf("m0 should not be nil: %v", r1)
}
m1 := NewMetric("bar", "prog", Counter, Int, "a")
d1, err1 := m1.GetDatum("1")
if err1 != nil {
t.Errorf("err1 %v: %v\n", d1, err1)
}
if r2 := m1.FindLabelValueOrNil([]string{"0"}); r2 != nil {
t.Errorf("r2 should be nil")
}
if r3 := m1.FindLabelValueOrNil([]string{"1"}); r3 == nil {
t.Errorf("r3 should be non nil")
}
}
func TestAppendLabelValue(t *testing.T) {
m := NewMetric("foo", "prog", Counter, Int, "bar")
l := []string{"test"}
d0 := datum.MakeInt(66, time.Unix(0, 0))
lv := &LabelValue{Labels: l, Value: d0}
err := m.AppendLabelValue(lv)
if err != nil {
t.Errorf("Bad append %v: %v\n", d0, err)
}
d1, err := m.GetDatum(l...)
if err != nil {
t.Errorf("Bad datum %v: %v\n", d1, err)
}
testutil.ExpectNoDiff(t, d0, d1)
}
func timeGenerator(rand *rand.Rand) time.Time {
months := []time.Month{
time.January, time.February, time.March,
time.April, time.May, time.June,
time.July, time.August, time.September,
time.October, time.November, time.December,
}
return time.Date(
rand.Intn(9999),
months[rand.Intn(len(months))],
rand.Intn(31),
rand.Intn(24),
rand.Intn(60),
rand.Intn(60),
int(rand.Int31()),
time.UTC,
)
}
func TestMetricJSONRoundTrip(t *testing.T) {
rand := rand.New(rand.NewSource(0))
f := func(name, prog string, kind Kind, keys []string, val, _, _ int64) bool {
m := NewMetric(name, prog, kind, Int, keys...)
labels := make([]string, 0)
for range keys {
if l, ok := quick.Value(reflect.TypeOf(name), rand); ok {
labels = append(labels, l.String())
} else {
t.Errorf("failed to create value for labels")
break
}
}
d, _ := m.GetDatum(labels...)
datum.SetInt(d, val, timeGenerator(rand))
j, e := json.Marshal(m)
if e != nil {
t.Errorf("json.Marshal failed: %s\n", e)
return false
}
r := newMetric(0)
e = json.Unmarshal(j, &r)
if e != nil {
t.Errorf("json.Unmarshal failed: %s\n", e)
return false
}
return testutil.ExpectNoDiff(t, m, r, testutil.IgnoreUnexported(sync.RWMutex{}, Metric{}))
}
if err := quick.Check(f, nil); err != nil {
t.Error(err)
}
}
func TestTimer(t *testing.T) {
m := NewMetric("test", "prog", Timer, Int)
n := NewMetric("test", "prog", Timer, Int)
testutil.ExpectNoDiff(t, m, n, testutil.IgnoreUnexported(sync.RWMutex{}, Metric{}))
d, _ := m.GetDatum()
datum.IncIntBy(d, 1, time.Now().UTC())
lv := m.FindLabelValueOrNil([]string{})
if lv == nil {
t.Fatal("couldn't find labelvalue")
}
newD := lv.Value
if newD == nil {
t.Errorf("new_d is nil")
}
if newD.ValueString() != "1" {
t.Errorf("value not 1")
}
}
func TestRemoveMetricLabelValue(t *testing.T) {
m := NewMetric("test", "prog", Counter, Int, "a", "b", "c")
_, e := m.GetDatum("a", "a", "a")
if e != nil {
t.Errorf("Getdatum failed: %s", e)
}
lv := m.FindLabelValueOrNil([]string{"a", "a", "a"})
if lv == nil {
t.Errorf("coidln't find labelvalue")
}
e = m.RemoveDatum("a", "a", "a")
if e != nil {
t.Errorf("couldn't remove datum: %s", e)
}
lv = m.FindLabelValueOrNil([]string{"a", "a", "a"})
if lv != nil {
t.Errorf("label value still exists")
}
}
func TestMetricLabelValueRemovePastLimit(t *testing.T) {
m := NewMetric("test", "prog", Counter, Int, "foo")
m.Limit = 1
_, err := m.GetDatum("a")
testutil.FatalIfErr(t, err)
m.RemoveOldestDatum()
_, err = m.GetDatum("b")
testutil.FatalIfErr(t, err)
m.RemoveOldestDatum()
_, err = m.GetDatum("c")
testutil.FatalIfErr(t, err)
m.RemoveOldestDatum()
if len(m.LabelValues) > 2 {
t.Errorf("Expected 2 labelvalues got %#v", m.LabelValues)
}
if x := m.FindLabelValueOrNil([]string{"a"}); x != nil {
t.Errorf("found label a which is unexpected: %#v", x)
}
}
mtail-3.0.0~rc54+git0ff5/internal/metrics/store.go 0000664 0000000 0000000 00000012547 14600635717 0021743 0 ustar 00root root 0000000 0000000 // Copyright 2011 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package metrics
import (
"context"
"encoding/json"
"io"
"reflect"
"sync"
"time"
"github.com/golang/glog"
"github.com/pkg/errors"
)
// Store contains Metrics.
type Store struct {
searchMu sync.RWMutex // read for iterate and insert, write for delete
insertMu sync.Mutex // locked for insert and delete, unlocked for iterate
Metrics map[string][]*Metric
}
// NewStore returns a new metric Store.
func NewStore() (s *Store) {
s = &Store{}
s.ClearMetrics()
return
}
// Add is used to add one metric to the Store.
func (s *Store) Add(m *Metric) error {
s.insertMu.Lock()
defer s.insertMu.Unlock()
s.searchMu.RLock()
glog.V(1).Infof("Adding a new metric %v", m)
dupeIndex := -1
if len(s.Metrics[m.Name]) > 0 {
t := s.Metrics[m.Name][0].Kind
if m.Kind != t {
s.searchMu.RUnlock()
return errors.Errorf("metric %s has different kind %v to existing %v", m.Name, m.Kind, t)
}
// To avoid duplicate metrics:
// - copy old LabelValues into new metric;
// - discard old metric.
for i, v := range s.Metrics[m.Name] {
if v.Program != m.Program {
continue
}
if v.Type != m.Type {
continue
}
if v.Source != m.Source {
continue
}
dupeIndex = i
glog.V(2).Infof("v keys: %v m.keys: %v", v.Keys, m.Keys)
// If a set of label keys has changed, discard
// old metric completely, w/o even copying old
// data, as they are now incompatible.
if len(v.Keys) != len(m.Keys) || !reflect.DeepEqual(v.Keys, m.Keys) {
break
}
glog.V(2).Infof("v buckets: %v m.buckets: %v", v.Buckets, m.Buckets)
// Otherwise, copy everything into the new metric
glog.V(2).Infof("Found duped metric: %d", dupeIndex)
for j, oldLabel := range v.LabelValues {
glog.V(2).Infof("Labels: %d %s", j, oldLabel.Labels)
d, err := v.GetDatum(oldLabel.Labels...)
if err != nil {
return err
}
if err = m.RemoveDatum(oldLabel.Labels...); err != nil {
return err
}
lv := &LabelValue{Labels: oldLabel.Labels, Value: d}
if err := m.AppendLabelValue(lv); err != nil {
return err
}
}
}
}
s.searchMu.RUnlock()
// We're in modify mode now so lock out search
s.searchMu.Lock()
s.Metrics[m.Name] = append(s.Metrics[m.Name], m)
if dupeIndex >= 0 {
glog.V(2).Infof("removing original, keeping its clone")
s.Metrics[m.Name] = append(s.Metrics[m.Name][0:dupeIndex], s.Metrics[m.Name][dupeIndex+1:]...)
}
s.searchMu.Unlock()
return nil
}
// FindMetricOrNil returns a metric in a store, or returns nil if not found.
func (s *Store) FindMetricOrNil(name, prog string) *Metric {
s.searchMu.RLock()
defer s.searchMu.RUnlock()
ml, ok := s.Metrics[name]
if !ok {
return nil
}
for _, m := range ml {
if m.Program != prog {
continue
}
return m
}
return nil
}
// ClearMetrics empties the store of all metrics.
func (s *Store) ClearMetrics() {
s.insertMu.Lock()
defer s.insertMu.Unlock()
s.searchMu.Lock()
defer s.searchMu.Unlock()
s.Metrics = make(map[string][]*Metric)
}
// MarshalJSON returns a JSON byte string representing the Store.
func (s *Store) MarshalJSON() (b []byte, err error) {
s.searchMu.RLock()
defer s.searchMu.RUnlock()
ms := make([]*Metric, 0)
for _, ml := range s.Metrics {
ms = append(ms, ml...)
}
return json.Marshal(ms)
}
// Range calls f sequentially for each Metric present in the store.
// The Metric is not locked when f is called.
// If f returns non nil error, Range stops the iteration.
// This looks a lot like sync.Map, ay.
func (s *Store) Range(f func(*Metric) error) error {
s.searchMu.RLock()
defer s.searchMu.RUnlock()
for _, ml := range s.Metrics {
for _, m := range ml {
if err := f(m); err != nil {
return err
}
}
}
return nil
}
// Gc iterates through the Store looking for metrics that can be tidied up,
// if they are passed their expiry or sized greater than their limit.
func (s *Store) Gc() error {
glog.Info("Running Store.Expire()")
now := time.Now()
return s.Range(func(m *Metric) error {
if m.Limit > 0 && len(m.LabelValues) >= m.Limit {
for i := len(m.LabelValues); i > m.Limit; i-- {
m.RemoveOldestDatum()
}
}
for i := 0; i < len(m.LabelValues); i++ {
lv := m.LabelValues[i]
if lv.Expiry <= 0 {
continue
}
if now.Sub(lv.Value.TimeUTC()) > lv.Expiry {
err := m.RemoveDatum(lv.Labels...)
if err != nil {
return err
}
i--
}
}
return nil
})
}
// StartGcLoop runs a permanent goroutine to expire metrics every duration.
func (s *Store) StartGcLoop(ctx context.Context, duration time.Duration) {
if duration <= 0 {
glog.Infof("Metric store expiration disabled")
return
}
go func() {
glog.Infof("Starting metric store expiry loop every %s", duration.String())
ticker := time.NewTicker(duration)
defer ticker.Stop()
for {
select {
case <-ticker.C:
if err := s.Gc(); err != nil {
glog.Info(err)
}
case <-ctx.Done():
return
}
}
}()
}
// WriteMetrics dumps the current state of the metrics store in JSON format to
// the io.Writer.
func (s *Store) WriteMetrics(w io.Writer) error {
s.searchMu.RLock()
b, err := json.MarshalIndent(s.Metrics, "", " ")
s.searchMu.RUnlock()
if err != nil {
return errors.Wrap(err, "failed to marshal metrics into json")
}
_, err = w.Write(b)
if err != nil {
return errors.Wrap(err, "failed to write metrics")
}
return nil
}
mtail-3.0.0~rc54+git0ff5/internal/metrics/store_bench_test.go 0000664 0000000 0000000 00000011506 14600635717 0024133 0 ustar 00root root 0000000 0000000 // Copyright 2017 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package metrics
import (
"fmt"
"math"
"math/rand"
"reflect"
"testing"
"testing/quick"
)
const (
maxItemsLog2 = 10
maxLabelsLog2 = 13
)
// newRandMetric makes a new, randomly filled Metric.
func newRandMetric(tb testing.TB, rand *rand.Rand, i int) *Metric {
tb.Helper()
nameVal, ok := quick.Value(reflect.TypeOf(""), rand)
if !ok {
tb.Fatalf("%d: can't make a name", i)
}
progVal, ok := quick.Value(reflect.TypeOf(""), rand)
if !ok {
tb.Fatalf("%d: can't make a prog", i)
}
kindVal, ok := quick.Value(reflect.TypeOf(Counter), rand)
if !ok {
tb.Fatalf("%d: can't make a kind", i)
}
typeVal, ok := quick.Value(reflect.TypeOf(Int), rand)
if !ok {
tb.Fatalf("%d: can't make a type", i)
}
keysVal, ok := quick.Value(reflect.TypeOf([]string{}), rand)
if !ok {
tb.Fatalf("%d: can't make a key list", i)
}
return NewMetric(nameVal.Interface().(string),
progVal.Interface().(string),
kindVal.Interface().(Kind),
typeVal.Interface().(Type),
keysVal.Interface().([]string)...)
}
type bench struct {
name string
setup func(b *testing.B, rand *rand.Rand, items int, m *[]*Metric, s *Store)
b func(b *testing.B, items int, m []*Metric, s *Store)
}
func fillMetric(b *testing.B, rand *rand.Rand, items int, m *[]*Metric, _ *Store) {
b.Helper()
for i := 0; i < items; i++ {
(*m)[i] = newRandMetric(b, rand, i)
}
}
func addToStore(b *testing.B, items int, m []*Metric, s *Store) {
b.Helper()
for j := 0; j < items; j++ {
s.Add(m[j])
}
}
func BenchmarkStore(b *testing.B) {
benches := []bench{
{
name: "Add",
setup: fillMetric,
b: addToStore,
},
{
name: "Iterate",
setup: func(b *testing.B, rand *rand.Rand, items int, m *[]*Metric, s *Store) {
b.Helper()
fillMetric(b, rand, items, m, s)
addToStore(b, items, *m, s)
},
b: func(b *testing.B, _ int, _ []*Metric, s *Store) {
b.Helper()
s.Range(func(*Metric) error {
return nil
})
},
},
}
rand := rand.New(rand.NewSource(99))
for _, bench := range benches {
bench := bench
for _, gc := range []bool{false, true} {
gc := gc
gcStr := ""
if gc {
gcStr = "WithGc"
}
for _, parallel := range []bool{false, true} {
parallel := parallel
parallelStr := ""
if parallel {
parallelStr = "Parallel"
}
for i := 0.; i <= maxItemsLog2; i++ {
items := int(math.Pow(2, i))
b.Run(fmt.Sprintf("%s%s%s-%d", bench.name, gcStr, parallelStr, items), func(b *testing.B) {
s := NewStore()
m := make([]*Metric, items)
if bench.setup != nil {
bench.setup(b, rand, items, &m, s)
}
b.ResetTimer()
if parallel {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
bench.b(b, items, m, s)
}
})
} else {
for n := 0; n < b.N; n++ {
bench.b(b, items, m, s)
if gc {
s.Gc()
}
}
}
})
}
}
}
}
}
func newRandLabels(tb testing.TB, rand *rand.Rand, i int) []string {
tb.Helper()
lv := make([]string, i)
for j := 0; j < i; j++ {
val, ok := quick.Value(reflect.TypeOf(""), rand)
if !ok {
tb.Fatalf("%d-%d: can't make a label", i, j)
}
lv[j] = val.Interface().(string)
}
return lv
}
func fillLabel(b *testing.B, rand *rand.Rand, items, keys int, lvs *[][]string, _ *Metric) {
b.Helper()
for i := 0; i < items; i++ {
(*lvs)[i] = newRandLabels(b, rand, keys)
}
}
func getDatum(b *testing.B, items int, lvs *[][]string, m *Metric) {
b.Helper()
for j := 0; j < items; j++ {
lv := (*lvs)[j]
m.GetDatum(lv...)
}
}
type metricBench struct {
name string
setup func(b *testing.B, rand *rand.Rand, items, keys int, lvs *[][]string, m *Metric)
b func(b *testing.B, items int, lv *[][]string, m *Metric)
}
func BenchmarkMetric(b *testing.B) {
maxKeys := 4
benches := []metricBench{
{
name: "GetDatum",
setup: fillLabel,
b: getDatum,
},
}
rand := rand.New(rand.NewSource(99))
for _, bench := range benches {
bench := bench
for _, parallel := range []bool{false, true} {
parallel := parallel
parallelStr := ""
if parallel {
parallelStr = "Parallel"
}
for i := 1; i <= maxLabelsLog2; i++ {
items := int(math.Pow(2, float64(i)))
lv := newRandLabels(b, rand, maxKeys)
b.Run(fmt.Sprintf("%s%s-%d", bench.name, parallelStr, items), func(b *testing.B) {
m := NewMetric("test", "prog", Counter, Int, lv...)
lvs := make([][]string, items)
if bench.setup != nil {
bench.setup(b, rand, items, maxKeys, &lvs, m)
}
b.ResetTimer()
if parallel {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
bench.b(b, items, &lvs, m)
}
})
} else {
for n := 0; n < b.N; n++ {
bench.b(b, items, &lvs, m)
}
}
})
}
}
}
}
mtail-3.0.0~rc54+git0ff5/internal/metrics/store_test.go 0000664 0000000 0000000 00000011777 14600635717 0023006 0 ustar 00root root 0000000 0000000 // Copyright 2017 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package metrics
import (
"strconv"
"testing"
"time"
"github.com/golang/glog"
"github.com/google/mtail/internal/metrics/datum"
"github.com/google/mtail/internal/testutil"
)
func TestMatchingKind(t *testing.T) {
s := NewStore()
m1 := NewMetric("foo", "prog", Counter, Int)
err := s.Add(m1)
testutil.FatalIfErr(t, err)
m2 := NewMetric("foo", "prog1", Gauge, Int)
err = s.Add(m2)
if err == nil {
t.Fatal("should be err")
}
}
func TestDuplicateMetric(t *testing.T) {
expectedMetrics := 0
s := NewStore()
_ = s.Add(NewMetric("foo", "prog", Counter, Int, "user", "host"))
_ = s.Add(NewMetric("foo", "prog", Counter, Int))
expectedMetrics++
if len(s.Metrics["foo"]) != expectedMetrics {
t.Fatalf("should not add duplicate metric. Store: %v", s)
}
_ = s.Add(NewMetric("foo", "prog", Counter, Float))
glog.Infof("Store: %v", s)
expectedMetrics++
if len(s.Metrics["foo"]) != expectedMetrics {
t.Fatalf("should add metric of a different type: %v", s)
}
_ = s.Add(NewMetric("foo", "prog", Counter, Int, "user", "host", "zone", "domain"))
glog.Infof("Store: %v", s)
if len(s.Metrics["foo"]) != expectedMetrics {
t.Fatalf("should not add duplicate metric, but replace the old one. Store: %v", s)
}
_ = s.Add(NewMetric("foo", "prog1", Counter, Int))
glog.Infof("Store: %v", s)
expectedMetrics++
if len(s.Metrics["foo"]) != expectedMetrics {
t.Fatalf("should add metric with a different prog: %v", s)
}
_ = s.Add(NewMetric("foo", "prog1", Counter, Float))
glog.Infof("Store: %v", s)
expectedMetrics++
if len(s.Metrics["foo"]) != expectedMetrics {
t.Fatalf("should add metric of a different type: %v", s)
}
}
// A program can add a metric with the same name and of different type.
// Prometheus behavior in this case is undefined. @see
// https://github.com/google/mtail/issues/130
func TestAddMetricDifferentType(t *testing.T) {
expected := 2
s := NewStore()
err := s.Add(NewMetric("foo", "prog", Counter, Int))
testutil.FatalIfErr(t, err)
// Duplicate metric of different type from *the same program
err = s.Add(NewMetric("foo", "prog", Counter, Float))
testutil.FatalIfErr(t, err)
if len(s.Metrics["foo"]) != expected {
t.Fatalf("should have %d metrics of different Type: %v", expected, s.Metrics)
}
// Duplicate metric of different type from a different program
err = s.Add(NewMetric("foo", "prog1", Counter, Float))
expected++
testutil.FatalIfErr(t, err)
if len(s.Metrics["foo"]) != expected {
t.Fatalf("should have %d metrics of different Type: %v", expected, s.Metrics)
}
}
func TestExpireOldDatum(t *testing.T) {
s := NewStore()
m := NewMetric("foo", "prog", Counter, Int, "a", "b", "c")
testutil.FatalIfErr(t, s.Add(m))
d, err := m.GetDatum("1", "2", "3")
if err != nil {
t.Error(err)
}
datum.SetInt(d, 1, time.Now().Add(-time.Hour))
lv := m.FindLabelValueOrNil([]string{"1", "2", "3"})
if lv == nil {
t.Fatal("couldn't find lv")
}
lv.Expiry = time.Minute
d, err = m.GetDatum("4", "5", "6")
if err != nil {
t.Error(err)
}
datum.SetInt(d, 1, time.Now().Add(-time.Hour))
lv = m.FindLabelValueOrNil([]string{"4", "5", "6"})
if lv == nil {
t.Errorf("couldn't find lv")
}
testutil.FatalIfErr(t, s.Gc())
lv = m.FindLabelValueOrNil([]string{"1", "2", "3"})
if lv != nil {
t.Errorf("lv not expired: %#v", lv)
t.Logf("Store: %#v", s)
}
lv = m.FindLabelValueOrNil([]string{"4", "5", "6"})
if lv == nil {
t.Errorf("lv expired")
t.Logf("Store: %#v", s)
}
}
func TestExpireOversizeDatum(t *testing.T) {
s := NewStore()
m := NewMetric("foo", "prog", Counter, Int, "foo")
m.Limit = 1
testutil.FatalIfErr(t, s.Add(m))
_, err := m.GetDatum("a")
testutil.FatalIfErr(t, err)
testutil.FatalIfErr(t, s.Gc())
_, err = m.GetDatum("b")
testutil.FatalIfErr(t, err)
testutil.FatalIfErr(t, s.Gc())
_, err = m.GetDatum("c")
testutil.FatalIfErr(t, err)
testutil.FatalIfErr(t, s.Gc())
if len(m.LabelValues) > 2 {
t.Errorf("Expected 2 labelvalues got %#v", m.LabelValues)
}
if x := m.FindLabelValueOrNil([]string{"a"}); x != nil {
t.Errorf("found label a which is unexpected: %#v", x)
}
}
func TestExpireManyMetrics(t *testing.T) {
s := NewStore()
m := NewMetric("foo", "prog", Counter, Int, "id")
testutil.FatalIfErr(t, s.Add(m))
d, err := m.GetDatum("0")
if err != nil {
t.Error(err)
}
datum.SetInt(d, 1, time.Now().Add(-time.Hour))
lv := m.FindLabelValueOrNil([]string{"0"})
if lv == nil {
t.Fatal("couldn't find lv")
}
for i := 1; i < 10; i++ {
d, err := m.GetDatum(strconv.Itoa(i))
if err != nil {
t.Error(err)
}
datum.SetInt(d, 1, time.Now().Add(-time.Hour))
lv = m.FindLabelValueOrNil([]string{strconv.Itoa(i)})
if lv == nil {
t.Fatal("couldn't find lv")
}
lv.Expiry = time.Minute
}
testutil.FatalIfErr(t, s.Gc())
lv = m.FindLabelValueOrNil([]string{"8"})
if lv != nil {
t.Errorf("lv not expired: %#v", lv)
t.Logf("Store: %#v", s)
}
lv = m.FindLabelValueOrNil([]string{"0"})
if lv == nil {
t.Errorf("lv expired")
t.Logf("Store: %#v", s)
}
}
mtail-3.0.0~rc54+git0ff5/internal/metrics/testing.go 0000664 0000000 0000000 00000002525 14600635717 0022257 0 ustar 00root root 0000000 0000000 // Copyright 2021 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package metrics
type MetricSlice []*Metric
func (s MetricSlice) Len() int { return len(s) }
func (s MetricSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s MetricSlice) Less(i, j int) bool {
return Less(s[i], s[j])
}
func Less(m1, m2 *Metric) bool {
if m1.Name < m2.Name {
return true
}
if m1.Name > m2.Name {
return false
}
if m1.Program < m2.Program {
return true
}
if m1.Program > m2.Program {
return false
}
if m1.Kind < m2.Kind {
return true
}
if m1.Kind > m2.Kind {
return false
}
if m1.Type < m2.Type {
return true
}
if m1.Type > m2.Type {
return false
}
if len(m1.Keys) < len(m2.Keys) {
return true
}
if len(m1.Keys) > len(m2.Keys) {
return false
}
for x, k := range m1.Keys {
if k < m2.Keys[x] {
return true
}
if k > m2.Keys[x] {
return false
}
}
for x, lv := range m1.LabelValues {
if len(lv.Labels) < len(m2.LabelValues[x].Labels) {
return true
}
if len(lv.Labels) > len(m2.LabelValues[x].Labels) {
return false
}
for y, k := range lv.Labels {
if k < m2.LabelValues[x].Labels[y] {
return true
}
if k > m2.LabelValues[x].Labels[y] {
return false
}
}
// if lv.Value < m2.LabelValues[x].Value {
// return true
// }
}
return false
}
mtail-3.0.0~rc54+git0ff5/internal/metrics/type.go 0000664 0000000 0000000 00000001653 14600635717 0021564 0 ustar 00root root 0000000 0000000 // Copyright 2017 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package metrics
import (
"math/rand"
"reflect"
)
// Type describes the type of value stored in a Datum.
type Type int
const (
// Int indicates this metric is an integer metric type.
Int Type = iota
// Float indicates this metric is a floating-point metric type.
Float
// String indicates this metric contains printable string values.
String
// Buckets indicates this metric is a histogram metric type.
Buckets
endType // end of enumeration for testing
)
func (t Type) String() string {
switch t {
case Int:
return "Int"
case Float:
return "Float"
case String:
return "String"
case Buckets:
return "Buckets"
}
return "?"
}
// Generate implements the quick.Generator interface for Type.
func (Type) Generate(rand *rand.Rand, _ int) reflect.Value {
return reflect.ValueOf(Type(rand.Intn(int(endType))))
}
mtail-3.0.0~rc54+git0ff5/internal/mtail/ 0000775 0000000 0000000 00000000000 14600635717 0017707 5 ustar 00root root 0000000 0000000 mtail-3.0.0~rc54+git0ff5/internal/mtail/basic_tail_integration_test.go 0000664 0000000 0000000 00000003336 14600635717 0025777 0 ustar 00root root 0000000 0000000 // Copyright 2019 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package mtail_test
import (
"fmt"
"os"
"path/filepath"
"sync"
"testing"
"github.com/google/mtail/internal/mtail"
"github.com/google/mtail/internal/testutil"
)
func TestBasicTail(t *testing.T) {
testutil.SkipIfShort(t)
if testing.Verbose() {
testutil.SetFlag(t, "vmodule", "tail=2,log_watcher=2")
}
logDir := testutil.TestTempDir(t)
m, stopM := mtail.TestStartServer(t, 1, mtail.LogPathPatterns(logDir+"/*"), mtail.ProgramPath("../../examples/linecount.mtail"))
defer stopM()
logFile := filepath.Join(logDir, "log")
lineCountCheck := m.ExpectMapExpvarDeltaWithDeadline("log_lines_total", logFile, 3)
logCountCheck := m.ExpectExpvarDeltaWithDeadline("log_count", 1)
f := testutil.TestOpenFile(t, logFile)
defer f.Close()
m.PollWatched(1) // Force sync to EOF
for i := 1; i <= 3; i++ {
testutil.WriteString(t, f, fmt.Sprintf("%d\n", i))
}
m.PollWatched(1) // Expect to read 3 lines here.
var wg sync.WaitGroup
wg.Add(2)
go func() {
defer wg.Done()
lineCountCheck()
}()
go func() {
defer wg.Done()
logCountCheck()
}()
wg.Wait()
}
func TestNewLogDoesNotMatchIsIgnored(t *testing.T) {
testutil.SkipIfShort(t)
workdir := testutil.TestTempDir(t)
// Start mtail
logFilepath := filepath.Join(workdir, "log")
m, stopM := mtail.TestStartServer(t, 0, mtail.LogPathPatterns(logFilepath))
defer stopM()
logCountCheck := m.ExpectExpvarDeltaWithDeadline("log_count", 0)
// touch log file
newLogFilepath := filepath.Join(workdir, "log1")
logFile, err := os.Create(newLogFilepath)
testutil.FatalIfErr(t, err)
defer logFile.Close()
m.PollWatched(0) // No streams so don't wait for any.
logCountCheck()
}
mtail-3.0.0~rc54+git0ff5/internal/mtail/buildinfo.go 0000664 0000000 0000000 00000001030 14600635717 0022203 0 ustar 00root root 0000000 0000000 // Copyright 2020 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package mtail
import (
"fmt"
"runtime"
)
// BuildInfo records the compile-time information for use when reporting the mtail version.
type BuildInfo struct {
Branch string
Version string
Revision string
}
func (b BuildInfo) String() string {
return fmt.Sprintf(
"mtail version %s git revision %s go version %s go arch %s go os %s",
b.Version,
b.Revision,
runtime.Version(),
runtime.GOARCH,
runtime.GOOS,
)
}
mtail-3.0.0~rc54+git0ff5/internal/mtail/compile_only_integration_test.go 0000664 0000000 0000000 00000001630 14600635717 0026371 0 ustar 00root root 0000000 0000000 // Copyright 2019 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package mtail_test
import (
"context"
"os"
"path/filepath"
"strings"
"testing"
"github.com/google/mtail/internal/metrics"
"github.com/google/mtail/internal/mtail"
"github.com/google/mtail/internal/testutil"
)
func TestBadProgramFailsCompilation(t *testing.T) {
testutil.SkipIfShort(t)
progDir := testutil.TestTempDir(t)
err := os.WriteFile(filepath.Join(progDir, "bad.mtail"), []byte("asdfasdf\n"), 0o666)
testutil.FatalIfErr(t, err)
ctx := context.Background()
// Compile-only fails program compilation at server start, not after it's running.
_, err = mtail.New(ctx, metrics.NewStore(), mtail.ProgramPath(progDir), mtail.CompileOnly)
if err == nil {
t.Error("expected error from mtail")
}
if !strings.Contains(err.Error(), "compile failed") {
t.Error("compile failed not reported")
}
}
mtail-3.0.0~rc54+git0ff5/internal/mtail/examples_integration_test.go 0000664 0000000 0000000 00000012134 14600635717 0025517 0 ustar 00root root 0000000 0000000 // Copyright 2011 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package mtail_test
import (
"context"
"fmt"
"io"
"os"
"path/filepath"
"sync"
"testing"
"time"
"github.com/google/mtail/internal/metrics"
"github.com/google/mtail/internal/metrics/datum"
"github.com/google/mtail/internal/mtail"
"github.com/google/mtail/internal/mtail/golden"
"github.com/google/mtail/internal/testutil"
"github.com/google/mtail/internal/waker"
)
const exampleTimeout = 10 * time.Second
var exampleProgramTests = []struct {
programfile string // Example program file.
logfile string // Sample log input.
goldenfile string // Expected metrics after processing.
}{
{
"examples/rsyncd.mtail",
"testdata/rsyncd.log",
"testdata/rsyncd.golden",
},
{
"examples/sftp.mtail",
"testdata/sftp_chroot.log",
"testdata/sftp_chroot.golden",
},
{
"examples/dhcpd.mtail",
"testdata/anonymised_dhcpd_log",
"testdata/anonymised_dhcpd_log.golden",
},
{
"examples/ntpd.mtail",
"testdata/ntp4",
"testdata/ntp4.golden",
},
{
"examples/ntpd_peerstats.mtail",
"testdata/xntp3_peerstats",
"testdata/xntp3_peerstats.golden",
},
{
"examples/apache_combined.mtail",
"testdata/apache-combined.log",
"testdata/apache-combined.golden",
},
{
"examples/apache_common.mtail",
"testdata/apache-common.log",
"testdata/apache-common.golden",
},
{
"examples/vsftpd.mtail",
"testdata/vsftpd_log",
"testdata/vsftpd_log.golden",
},
{
"examples/vsftpd.mtail",
"testdata/vsftpd_xferlog",
"testdata/vsftpd_xferlog.golden",
},
{
"examples/lighttpd.mtail",
"testdata/lighttpd_access.log",
"testdata/lighttpd_accesslog.golden",
},
{
"examples/mysql_slowqueries.mtail",
"testdata/mysql_slowqueries.log",
"testdata/mysql_slowqueries.golden",
},
}
func TestExamplePrograms(t *testing.T) {
testutil.SkipIfShort(t)
for _, tc := range exampleProgramTests {
tc := tc
t.Run(fmt.Sprintf("%s on %s", tc.programfile, tc.logfile),
testutil.TimeoutTest(exampleTimeout, func(t *testing.T) { //nolint:thelper
ctx, cancel := context.WithCancel(context.Background())
waker, _ := waker.NewTest(ctx, 0) // oneshot means we should never need to wake the stream
store := metrics.NewStore()
programFile := filepath.Join("../..", tc.programfile)
mtail, err := mtail.New(ctx, store, mtail.ProgramPath(programFile), mtail.LogPathPatterns(tc.logfile), mtail.OneShot, mtail.OmitMetricSource, mtail.DumpAstTypes, mtail.DumpBytecode, mtail.LogPatternPollWaker(waker), mtail.LogstreamPollWaker(waker))
testutil.FatalIfErr(t, err)
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
testutil.FatalIfErr(t, mtail.Run())
}()
// Oneshot mode means we can wait for shutdown before cancelling.
wg.Wait()
cancel()
g, err := os.Open(tc.goldenfile)
testutil.FatalIfErr(t, err)
defer g.Close()
goldenStore := golden.ReadTestData(g, tc.programfile)
var storeList metrics.MetricSlice
store.Range(func(m *metrics.Metric) error {
storeList = append(storeList, m)
return nil
})
testutil.ExpectNoDiff(t, goldenStore, storeList, testutil.SortSlices(metrics.Less), testutil.IgnoreUnexported(metrics.Metric{}, sync.RWMutex{}, datum.String{}))
}))
}
}
// This test only compiles examples, but has coverage over all examples
// provided. This ensures we ship at least syntactically correct examples.
func TestCompileExamplePrograms(t *testing.T) {
testutil.SkipIfShort(t)
matches, err := filepath.Glob("../../examples/*.mtail")
testutil.FatalIfErr(t, err)
for _, tc := range matches {
tc := tc
name := filepath.Base(tc)
t.Run(name, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
s := metrics.NewStore()
mtail, err := mtail.New(ctx, s, mtail.ProgramPath(tc), mtail.CompileOnly, mtail.OmitMetricSource, mtail.DumpAstTypes, mtail.DumpBytecode)
testutil.FatalIfErr(t, err)
// Ensure that run shuts down for CompileOnly
testutil.FatalIfErr(t, mtail.Run())
cancel()
})
}
}
func BenchmarkProgram(b *testing.B) {
for _, bm := range exampleProgramTests {
bm := bm
b.Run(fmt.Sprintf("%s on %s", bm.programfile, bm.logfile), func(b *testing.B) {
b.ReportAllocs()
logDir := testutil.TestTempDir(b)
logFile := filepath.Join(logDir, "test.log")
log := testutil.TestOpenFile(b, logFile)
ctx, cancel := context.WithCancel(context.Background())
waker, awaken := waker.NewTest(ctx, 1)
store := metrics.NewStore()
programFile := filepath.Join("../..", bm.programfile)
mtail, err := mtail.New(ctx, store, mtail.ProgramPath(programFile), mtail.LogPathPatterns(log.Name()), mtail.LogstreamPollWaker(waker))
testutil.FatalIfErr(b, err)
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
testutil.FatalIfErr(b, mtail.Run())
}()
var total int64
b.ResetTimer()
for i := 0; i < b.N; i++ {
l, err := os.Open(bm.logfile)
testutil.FatalIfErr(b, err)
count, err := io.Copy(log, l)
testutil.FatalIfErr(b, err)
total += count
awaken(1)
}
cancel()
wg.Wait()
b.StopTimer()
b.SetBytes(total)
})
}
}
mtail-3.0.0~rc54+git0ff5/internal/mtail/examples_integration_unix_test.go 0000664 0000000 0000000 00000015317 14600635717 0026570 0 ustar 00root root 0000000 0000000 // Copyright 2011 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
//go:build unix
// +build unix
package mtail_test
import (
"context"
"errors"
"fmt"
"io"
"net"
"os"
"path/filepath"
"sync"
"testing"
"time"
"github.com/golang/glog"
"github.com/google/mtail/internal/metrics"
"github.com/google/mtail/internal/metrics/datum"
"github.com/google/mtail/internal/mtail"
"github.com/google/mtail/internal/testutil"
"github.com/google/mtail/internal/waker"
"golang.org/x/sys/unix"
)
// TestFilePipeStreamComparison is a unix-specific test since unix.Mkfifo is not defined on Windows.
func TestFilePipeStreamComparison(t *testing.T) {
testutil.SkipIfShort(t)
for _, tc := range exampleProgramTests {
tc := tc
t.Run(fmt.Sprintf("%s on %s", tc.programfile, tc.logfile),
testutil.TimeoutTest(exampleTimeout, func(t *testing.T) { //nolint:thelper
ctx, cancel := context.WithCancel(context.Background())
waker := waker.NewTestAlways()
fileStore, pipeStore := metrics.NewStore(), metrics.NewStore()
programFile := filepath.Join("../..", tc.programfile)
// Set up the pipe
tmpDir := testutil.TestTempDir(t)
pipeName := filepath.Join(tmpDir, filepath.Base(tc.logfile))
testutil.FatalIfErr(t, unix.Mkfifo(pipeName, 0o600))
var wg sync.WaitGroup
wg.Add(3)
// This goroutine copies bytes from the source file into the
// fifo, once the fifo has been opened for read.
go func() {
defer wg.Done()
source, err := os.OpenFile(tc.logfile, os.O_RDONLY, 0)
testutil.FatalIfErr(t, err)
// not NONBLOCK to wait for pipeMtail to start reading the pipe
pipe, err := os.OpenFile(pipeName, os.O_WRONLY, os.ModeNamedPipe)
testutil.FatalIfErr(t, err)
n, err := io.Copy(pipe, source)
testutil.FatalIfErr(t, err)
glog.Infof("Copied %d bytes into pipe", n)
source.Close()
pipe.Close()
}()
// Two mtails both alike in dignity.
go func() {
defer wg.Done()
fileMtail, err := mtail.New(ctx, fileStore, mtail.ProgramPath(programFile), mtail.LogPathPatterns(tc.logfile), mtail.OneShot, mtail.OmitMetricSource, mtail.LogPatternPollWaker(waker), mtail.LogstreamPollWaker(waker))
if err != nil {
t.Error(err)
}
if err := fileMtail.Run(); err != nil {
t.Error(err)
}
}()
go func() {
defer wg.Done()
pipeMtail, err := mtail.New(ctx, pipeStore, mtail.ProgramPath(programFile), mtail.LogPathPatterns(pipeName), mtail.OneShot, mtail.OmitMetricSource, mtail.LogPatternPollWaker(waker), mtail.LogstreamPollWaker(waker))
testutil.FatalIfErr(t, err)
if err := pipeMtail.Run(); err != nil {
t.Error(err)
}
}()
// Oneshot mode means we can wait for shutdown before cancelling.
wg.Wait()
cancel()
var pipeMetrics, fileMetrics metrics.MetricSlice
pipeStore.Range(func(m *metrics.Metric) error {
pipeMetrics = append(pipeMetrics, m)
return nil
})
fileStore.Range(func(m *metrics.Metric) error {
fileMetrics = append(fileMetrics, m)
return nil
})
// Ignore the datum.Time field as well, as the results will be unstable otherwise.
testutil.ExpectNoDiff(t, fileMetrics, pipeMetrics, testutil.SortSlices(metrics.Less), testutil.IgnoreUnexported(metrics.Metric{}, sync.RWMutex{}, datum.String{}), testutil.IgnoreFields(datum.BaseDatum{}, "Time"))
}))
}
}
// TestFileSocketStreamComparison is a unix-specific test currently because on Windows, the constructed URL will
// be of the form unix://C:\\path, and this will be interpreted as protocol unix on host C and port \\path.
func TestFileSocketStreamComparison(t *testing.T) {
testutil.SkipIfShort(t)
for _, scheme := range []string{"unixgram", "unix"} {
scheme := scheme
for _, tc := range exampleProgramTests {
tc := tc
t.Run(fmt.Sprintf("%s on %s://%s", tc.programfile, scheme, tc.logfile),
testutil.TimeoutTest(exampleTimeout, func(t *testing.T) { //nolint:thelper
ctx, cancel := context.WithCancel(context.Background())
waker := waker.NewTestAlways()
fileStore, sockStore := metrics.NewStore(), metrics.NewStore()
programFile := filepath.Join("../..", tc.programfile)
// Set up the socket
tmpDir := testutil.TestTempDir(t)
sockName := filepath.Join(tmpDir, filepath.Base(tc.logfile))
var wg sync.WaitGroup
wg.Add(3)
go func() {
defer wg.Done()
fileMtail, err := mtail.New(ctx, fileStore, mtail.ProgramPath(programFile), mtail.LogPathPatterns(tc.logfile), mtail.OneShot, mtail.OmitMetricSource, mtail.LogPatternPollWaker(waker), mtail.LogstreamPollWaker(waker))
if err != nil {
t.Error(err)
}
if err := fileMtail.Run(); err != nil {
t.Error(err)
}
}()
sockMtail, err := mtail.New(ctx, sockStore, mtail.ProgramPath(programFile), mtail.LogPathPatterns(scheme+"://"+sockName), mtail.OneShot, mtail.OmitMetricSource, mtail.LogPatternPollWaker(waker), mtail.LogstreamPollWaker(waker))
testutil.FatalIfErr(t, err)
go func() {
defer wg.Done()
if err := sockMtail.Run(); err != nil {
t.Error(err)
}
}()
go func() {
defer wg.Done()
source, err := os.OpenFile(tc.logfile, os.O_RDONLY, 0)
testutil.FatalIfErr(t, err)
s, err := net.DialUnix(scheme, nil, &net.UnixAddr{sockName, scheme})
testutil.FatalIfErr(t, err)
n, err := io.Copy(s, source)
testutil.FatalIfErr(t, err)
glog.Infof("Copied %d bytes into socket", n)
if scheme == "unixgram" {
// Write zero bytes after Stop is called to signal that this is the "end of the stream".
for {
_, err = s.Write([]byte{})
if err == nil {
glog.Infof("Zero bytes written to socket to signal EOF")
break
}
var netErr net.Error
if errors.As(err, &netErr) && netErr.Timeout() {
glog.Infof("Write timeout")
time.Sleep(1 * time.Second)
} else {
testutil.FatalIfErr(t, err)
}
}
}
source.Close()
s.Close()
}()
// Oneshot mode means we can wait for shutdown before cancelling.
wg.Wait()
cancel()
var sockMetrics, fileMetrics metrics.MetricSlice
sockStore.Range(func(m *metrics.Metric) error {
sockMetrics = append(sockMetrics, m)
return nil
})
fileStore.Range(func(m *metrics.Metric) error {
fileMetrics = append(fileMetrics, m)
return nil
})
// Ignore the datum.Time field as well, as the results will be unstable otherwise.
testutil.ExpectNoDiff(t, fileMetrics, sockMetrics, testutil.SortSlices(metrics.Less), testutil.IgnoreUnexported(metrics.Metric{}, sync.RWMutex{}, datum.String{}), testutil.IgnoreFields(datum.BaseDatum{}, "Time"))
}))
}
}
}
mtail-3.0.0~rc54+git0ff5/internal/mtail/exec_integration_test.go 0000664 0000000 0000000 00000001674 14600635717 0024634 0 ustar 00root root 0000000 0000000 // Copyright 2024 Google Inc. ll Rights Reserved.
// This file is available under the Apache license.
package mtail_test
import (
"context"
"errors"
"os/exec"
"path/filepath"
"testing"
"time"
"github.com/golang/glog"
)
var mtailPath string
func init() {
path, err := exec.LookPath(filepath.Join("..", "..", "mtail"))
if errors.Is(err, exec.ErrDot) {
err = nil
}
if err != nil {
glog.Infof("exec_integration_test init(): %v", err)
}
mtailPath = path
}
func TestExecMtail(t *testing.T) {
if mtailPath == "" {
t.Log("mtail binary not found, skipping")
t.Skip()
}
cs := []string{
"-progs",
"../../examples",
"-logs", "testdata/rsyncd.log",
"-one_shot",
"-one_shot_format=prometheus",
}
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
defer cancel()
cmd := exec.CommandContext(ctx, mtailPath, cs...)
if out, err := cmd.CombinedOutput(); err != nil {
t.Logf("%s", out)
t.Error(err)
}
}
mtail-3.0.0~rc54+git0ff5/internal/mtail/golden/ 0000775 0000000 0000000 00000000000 14600635717 0021157 5 ustar 00root root 0000000 0000000 mtail-3.0.0~rc54+git0ff5/internal/mtail/golden/reader.go 0000664 0000000 0000000 00000010102 14600635717 0022742 0 ustar 00root root 0000000 0000000 // Copyright 2016 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package golden
import (
"bufio"
"io"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
"github.com/golang/glog"
"github.com/google/mtail/internal/metrics"
"github.com/google/mtail/internal/metrics/datum"
)
var varRe = regexp.MustCompile(`^(counter|gauge|timer|text|histogram) ([^ ]+)(?: {([^}]+)})?(?: (\S+))?(?: (.+))?`)
// ReadTestData loads a "golden" test data file from a programfile and returns as a slice of Metrics.
func ReadTestData(file io.Reader, programfile string) metrics.MetricSlice {
store := metrics.NewStore()
prog := filepath.Base(programfile)
scanner := bufio.NewScanner(file)
for scanner.Scan() {
glog.V(2).Infof("'%s'\n", scanner.Text())
match := varRe.FindStringSubmatch(scanner.Text())
glog.V(2).Infof("len match: %d\n", len(match))
if len(match) == 0 {
continue
}
keys := make([]string, 0)
vals := make([]string, 0)
if match[3] != "" {
for _, pair := range strings.Split(match[3], ",") {
glog.V(2).Infof("pair: %s\n", pair)
kv := strings.Split(pair, "=")
keys = append(keys, kv[0])
if kv[1] != "" {
if kv[1] == `""` {
vals = append(vals, "")
} else {
vals = append(vals, kv[1])
}
}
}
}
var kind metrics.Kind
switch match[1] {
case "counter":
kind = metrics.Counter
case "gauge":
kind = metrics.Gauge
case "timer":
kind = metrics.Timer
case "text":
kind = metrics.Text
case "histogram":
kind = metrics.Histogram
}
glog.V(2).Infof("match[4]: %q", match[4])
typ := metrics.Int
var (
ival int64
fval float64
sval string
err error
)
if match[4] != "" {
ival, err = strconv.ParseInt(match[4], 10, 64)
if err != nil {
fval, err = strconv.ParseFloat(match[4], 64)
typ = metrics.Float
if err != nil || fval == 0.0 {
sval = match[4]
typ = metrics.String
}
}
glog.V(2).Infof("type is %q", typ)
}
var timestamp time.Time
glog.V(2).Infof("match 5: %q\n", match[5])
if match[5] != "" {
timestamp, err = time.Parse(time.RFC3339, match[5])
if err != nil {
j, err := strconv.ParseInt(match[5], 10, 64)
if err == nil {
timestamp = time.Unix(j/1000000000, j%1000000000)
} else {
glog.V(2).Info(err)
}
}
}
glog.V(2).Infof("timestamp is %s which is %v in unix", timestamp.Format(time.RFC3339), timestamp.Unix())
// Now we have enough information to get or create a metric.
m := store.FindMetricOrNil(match[2], prog)
if m != nil {
if m.Type != typ {
glog.V(2).Infof("The type of the fetched metric is not %s: %s", typ, m)
continue
}
} else {
m = metrics.NewMetric(match[2], prog, kind, typ, keys...)
if kind == metrics.Counter && len(keys) == 0 {
d, err := m.GetDatum()
if err != nil {
glog.Fatal(err)
}
// Initialize to zero at the zero time.
switch typ {
case metrics.Int:
datum.SetInt(d, 0, time.Unix(0, 0))
case metrics.Float:
datum.SetFloat(d, 0, time.Unix(0, 0))
}
}
glog.V(2).Infof("making a new %v\n", m)
if err := store.Add(m); err != nil {
glog.Infof("Failed to add metric %v to store: %s", m, err)
}
}
if match[4] != "" {
d, err := m.GetDatum(vals...)
if err != nil {
glog.V(2).Infof("Failed to get datum: %s", err)
continue
}
glog.V(2).Infof("got datum %v", d)
switch typ {
case metrics.Int:
glog.V(2).Infof("setting %v with vals %v to %v at %v\n", d, vals, ival, timestamp)
datum.SetInt(d, ival, timestamp)
case metrics.Float:
glog.V(2).Infof("setting %v with vals %v to %v at %v\n", d, vals, fval, timestamp)
datum.SetFloat(d, fval, timestamp)
case metrics.String:
glog.V(2).Infof("setting %v with vals %v to %v at %v\n", d, vals, sval, timestamp)
datum.SetString(d, sval, timestamp)
}
}
glog.V(2).Infof("Metric is now %s", m)
}
storeList := make([]*metrics.Metric, 0)
/* #nosec G104 -- Always returns nil. nolint:errcheck */
store.Range(func(m *metrics.Metric) error {
storeList = append(storeList, m)
return nil
})
return storeList
}
mtail-3.0.0~rc54+git0ff5/internal/mtail/golden/reader_test.go 0000664 0000000 0000000 00000005516 14600635717 0024016 0 ustar 00root root 0000000 0000000 package golden
import (
"os"
"sync"
"testing"
"time"
"github.com/google/mtail/internal/metrics"
"github.com/google/mtail/internal/metrics/datum"
"github.com/google/mtail/internal/testutil"
)
var expectedMetrics = metrics.MetricSlice{
{
Name: "bytes_total",
Program: "reader_test",
Kind: metrics.Counter,
Keys: []string{"operation"},
LabelValues: []*metrics.LabelValue{
{
Labels: []string{"sent"},
Value: datum.MakeInt(62793673, time.Date(2011, 2, 23, 5, 54, 10, 0, time.UTC)),
},
{
Labels: []string{"received"},
Value: datum.MakeInt(975017, time.Date(2011, 2, 23, 5, 54, 10, 0, time.UTC)),
},
},
},
{
Name: "connections_total",
Program: "reader_test",
Kind: metrics.Counter,
Keys: []string{},
LabelValues: []*metrics.LabelValue{
{
Value: datum.MakeInt(52, time.Date(2011, 2, 22, 21, 54, 13, 0, time.UTC)),
},
},
},
{
Name: "connection-time_total",
Program: "reader_test",
Kind: metrics.Counter,
Keys: []string{},
LabelValues: []*metrics.LabelValue{
{
Value: datum.MakeInt(1181011, time.Date(2011, 2, 23, 5, 54, 10, 0, time.UTC)),
},
},
},
{
Name: "transfers_total",
Program: "reader_test",
Kind: metrics.Counter,
Keys: []string{"operation", "module"},
LabelValues: []*metrics.LabelValue{
{
Labels: []string{"send", "module"},
Value: datum.MakeInt(2, time.Date(2011, 2, 23, 5, 50, 32, 0, time.UTC)),
},
{
Labels: []string{"send", "repo"},
Value: datum.MakeInt(25, time.Date(2011, 2, 23, 5, 51, 14, 0, time.UTC)),
},
},
},
{
Name: "foo",
Program: "reader_test",
Kind: metrics.Gauge,
Keys: []string{"label"},
LabelValues: []*metrics.LabelValue{},
},
{
Name: "bar",
Program: "reader_test",
Kind: metrics.Counter,
Keys: []string{},
LabelValues: []*metrics.LabelValue{
{
Value: datum.MakeInt(0, time.Unix(0, 0)),
},
},
},
{
Name: "floaty",
Program: "reader_test",
Kind: metrics.Gauge,
Type: metrics.Float,
Keys: []string{},
LabelValues: []*metrics.LabelValue{
{
Labels: []string{},
Value: datum.MakeFloat(37.1, time.Date(2017, 6, 15, 18, 9, 37, 0, time.UTC)),
},
},
},
{
Name: "stringy",
Program: "reader_test",
Kind: metrics.Text,
Type: metrics.String,
Keys: []string{},
LabelValues: []*metrics.LabelValue{
{
Labels: []string{},
Value: datum.MakeString("hi", time.Date(2018, 6, 16, 18, 4, 0, 0, time.UTC)),
},
},
},
}
func TestReadTestData(t *testing.T) {
f, err := os.Open("reader_test.golden")
testutil.FatalIfErr(t, err)
defer f.Close()
readMetrics := ReadTestData(f, "reader_test")
testutil.ExpectNoDiff(t, expectedMetrics, readMetrics, testutil.SortSlices(metrics.Less), testutil.IgnoreUnexported(metrics.Metric{}, sync.RWMutex{}, datum.String{}))
}
mtail-3.0.0~rc54+git0ff5/internal/mtail/golden/reader_test.golden 0000664 0000000 0000000 00000000773 14600635717 0024661 0 ustar 00root root 0000000 0000000 counter bytes_total {operation=sent} 62793673 2011-02-23T05:54:10Z
counter bytes_total {operation=received} 975017 2011-02-23T05:54:10Z
counter connections_total 52 2011-02-22T21:54:13Z
counter connection-time_total 1181011 2011-02-23T05:54:10Z
counter transfers_total {operation=send,module=module} 2 2011-02-23T05:50:32Z
counter transfers_total {operation=send,module=repo} 25 2011-02-23T05:51:14Z
gauge foo {label=}
counter bar
gauge floaty 37.1 2017-06-15T18:09:37Z
text stringy hi 2018-06-16T18:04:00Z
mtail-3.0.0~rc54+git0ff5/internal/mtail/httpstatus.go 0000664 0000000 0000000 00000004653 14600635717 0022471 0 ustar 00root root 0000000 0000000 // Copyright 2020 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package mtail
import (
"html/template"
"net/http"
"github.com/golang/glog"
)
const statusTemplate = `
mtail on {{.BindAddress}}
mtail on {{.BindAddress}}
Build: {{.BuildInfo}}
Metrics: json, graphite, prometheus
Info: {{ if .HTTPInfoEndpoints }}varz, progz tracez
{{ else }} disabled {{ end }}
Debug: {{ if .HTTPDebugEndpoints }}debug/pprof, debug/vars{{ else }} disabled {{ end }}
`
const statusTemplateEnd = `
`
// ServeHTTP satisfies the http.Handler interface, and is used to serve the
// root page of mtail for online status reporting.
func (m *Server) ServeHTTP(w http.ResponseWriter, _ *http.Request) {
t, err := template.New("status").Parse(statusTemplate)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
te, err := template.New("statusend").Parse(statusTemplateEnd)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
data := struct {
BindAddress string
BuildInfo string
HTTPDebugEndpoints bool
HTTPInfoEndpoints bool
}{
m.listener.Addr().String(),
m.buildInfo.String(),
m.httpDebugEndpoints,
m.httpInfoEndpoints,
}
w.Header().Add("Content-type", "text/html")
w.WriteHeader(http.StatusOK)
if err = t.Execute(w, data); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
if m.httpInfoEndpoints {
err = m.r.WriteStatusHTML(w)
if err != nil {
glog.Warningf("Error while writing loader status: %s", err)
}
err = m.t.WriteStatusHTML(w)
if err != nil {
glog.Warningf("Error while writing tailer status: %s", err)
}
}
if err = te.Execute(w, data); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
// FaviconHandler is used to serve up the favicon.ico for mtail's http server.
func FaviconHandler(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Content-Type", "image/x-icon")
w.Header().Set("Cache-Control", "public, max-age=7776000")
if _, err := w.Write(logoFavicon); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
mtail-3.0.0~rc54+git0ff5/internal/mtail/log_deletion_integration_unix_test.go 0000664 0000000 0000000 00000002176 14600635717 0027415 0 ustar 00root root 0000000 0000000 // Copyright 2020 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
//go:build unix
// +build unix
package mtail_test
import (
"os"
"path/filepath"
"testing"
"github.com/golang/glog"
"github.com/google/mtail/internal/mtail"
"github.com/google/mtail/internal/testutil"
)
// TestLogDeletion is a unix-only test because on Windows files with open read handles cannot be deleted.
func TestLogDeletion(t *testing.T) {
testutil.SkipIfShort(t)
workdir := testutil.TestTempDir(t)
// touch log file
logFilepath := filepath.Join(workdir, "log")
logFile := testutil.TestOpenFile(t, logFilepath)
defer logFile.Close()
m, stopM := mtail.TestStartServer(t, 1, mtail.LogPathPatterns(logFilepath))
defer stopM()
logCloseCheck := m.ExpectMapExpvarDeltaWithDeadline("log_closes_total", logFilepath, 1)
logCountCheck := m.ExpectExpvarDeltaWithDeadline("log_count", -1)
m.PollWatched(1) // Force sync to EOF
glog.Info("remove")
testutil.FatalIfErr(t, os.Remove(logFilepath))
m.PollWatched(0) // one pass to stop
logCloseCheck()
m.PollWatched(0) // one pass to remove completed stream
logCountCheck()
}
mtail-3.0.0~rc54+git0ff5/internal/mtail/log_glob_integration_test.go 0000664 0000000 0000000 00000011775 14600635717 0025477 0 ustar 00root root 0000000 0000000 // Copyright 2019 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package mtail_test
import (
"expvar"
"os"
"path/filepath"
"testing"
"github.com/golang/glog"
"github.com/google/mtail/internal/mtail"
"github.com/google/mtail/internal/testutil"
)
func TestGlobBeforeStart(t *testing.T) {
testutil.SkipIfShort(t)
workdir := testutil.TestTempDir(t)
globTests := []struct {
name string
expected bool
}{
{
filepath.Join(workdir, "log1"),
true,
},
{
filepath.Join(workdir, "log2"),
true,
},
{
filepath.Join(workdir, "1log"),
false,
},
}
var count int64
for _, tt := range globTests {
log := testutil.TestOpenFile(t, tt.name)
if tt.expected {
count++
}
testutil.WriteString(t, log, "\n")
log.Close()
}
m, stopM := mtail.TestStartServer(t, 0, mtail.LogPathPatterns(filepath.Join(workdir, "log*")))
stopM()
if r := m.GetExpvar("log_count"); r.(*expvar.Int).Value() != count {
t.Errorf("Expecting log count of %d, received %d", count, r)
}
}
func TestGlobAfterStart(t *testing.T) {
testutil.SkipIfShort(t)
workdir := testutil.TestTempDir(t)
globTests := []struct {
name string
expected bool
}{
{
filepath.Join(workdir, "log1"),
true,
},
{
filepath.Join(workdir, "log2"),
true,
},
{
filepath.Join(workdir, "1log"),
false,
},
}
m, stopM := mtail.TestStartServer(t, 0, mtail.LogPathPatterns(filepath.Join(workdir, "log*")))
defer stopM()
m.PollWatched(0) // Force sync to EOF
var count int64
for _, tt := range globTests {
if tt.expected {
count++
}
}
logCountCheck := m.ExpectExpvarDeltaWithDeadline("log_count", count)
for _, tt := range globTests {
log := testutil.TestOpenFile(t, tt.name)
defer log.Close()
m.PollWatched(0) // Force sync to EOF
}
// m.PollWatched(2)
logCountCheck()
}
func TestGlobIgnoreFolder(t *testing.T) {
testutil.SkipIfShort(t)
workdir := testutil.TestTempDir(t)
globTests := []struct {
name string
isFolder bool
expected bool
}{
{
filepath.Join(workdir, "log1"),
false,
true,
},
{
filepath.Join(workdir, "logarchive"),
true,
false,
},
{
filepath.Join(workdir, "log2.gz"),
false,
false,
},
}
var count int64
for _, tt := range globTests {
var err error
var log *os.File
if tt.isFolder {
err = os.Mkdir(tt.name, 0o700)
testutil.FatalIfErr(t, err)
continue
}
log, err = os.Create(tt.name)
if !tt.isFolder && tt.expected {
count++
}
defer log.Close()
testutil.FatalIfErr(t, err)
testutil.WriteString(t, log, "\n")
}
m, stopM := mtail.TestStartServer(t, 0, mtail.LogPathPatterns(filepath.Join(workdir, "log*")), mtail.IgnoreRegexPattern("\\.gz"))
stopM()
if r := m.GetExpvar("log_count"); r.(*expvar.Int).Value() != count {
t.Errorf("Expecting log count of %d, received %v", count, r)
}
}
func TestFilenameRegexIgnore(t *testing.T) {
testutil.SkipIfShort(t)
workdir := testutil.TestTempDir(t)
globTests := []struct {
name string
expected bool
}{
{
filepath.Join(workdir, "log1"),
true,
},
{
filepath.Join(workdir, "log1.gz"),
false,
},
{
filepath.Join(workdir, "log2gz"),
true,
},
}
var count int64
for _, tt := range globTests {
log, err := os.Create(tt.name)
testutil.FatalIfErr(t, err)
defer log.Close()
if tt.expected {
count++
}
testutil.WriteString(t, log, "\n")
}
m, stopM := mtail.TestStartServer(t, 0, mtail.LogPathPatterns(filepath.Join(workdir, "log*")), mtail.IgnoreRegexPattern("\\.gz"))
stopM()
if r := m.GetExpvar("log_count"); r.(*expvar.Int).Value() != count {
t.Errorf("Log count not matching, expected: %d received: %v", count, r)
}
}
func TestGlobRelativeAfterStart(t *testing.T) {
testutil.SkipIfShort(t)
tmpDir := testutil.TestTempDir(t)
logDir := filepath.Join(tmpDir, "logs")
progDir := filepath.Join(tmpDir, "progs")
err := os.Mkdir(logDir, 0o700)
testutil.FatalIfErr(t, err)
err = os.Mkdir(progDir, 0o700)
testutil.FatalIfErr(t, err)
// Move to logdir to make relative paths
testutil.Chdir(t, logDir)
m, stopM := mtail.TestStartServer(t, 1, mtail.ProgramPath(progDir), mtail.LogPathPatterns("log.*"))
defer stopM()
{
logCountCheck := m.ExpectExpvarDeltaWithDeadline("log_count", 1)
logFile := filepath.Join(logDir, "log.1.txt")
f := testutil.TestOpenFile(t, logFile)
defer f.Close()
m.PollWatched(1) // Force sync to EOF
testutil.WriteString(t, f, "line 1\n")
m.PollWatched(1)
logCountCheck()
}
{
logCountCheck := m.ExpectExpvarDeltaWithDeadline("log_count", 1)
logFile := filepath.Join(logDir, "log.2.txt")
f := testutil.TestOpenFile(t, logFile)
defer f.Close()
m.PollWatched(2)
testutil.WriteString(t, f, "line 1\n")
m.PollWatched(2)
logCountCheck()
}
{
logCountCheck := m.ExpectExpvarDeltaWithDeadline("log_count", 0)
logFile := filepath.Join(logDir, "log.2.txt")
f := testutil.TestOpenFile(t, logFile)
defer f.Close()
m.PollWatched(2)
testutil.WriteString(t, f, "line 2\n")
m.PollWatched(2)
logCountCheck()
}
glog.Infof("end")
}
mtail-3.0.0~rc54+git0ff5/internal/mtail/log_rotation_integration_test.go 0000664 0000000 0000000 00000004515 14600635717 0026405 0 ustar 00root root 0000000 0000000 // Copyright 2019 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package mtail_test
import (
"fmt"
"os"
"path/filepath"
"sync"
"testing"
"github.com/golang/glog"
"github.com/google/mtail/internal/mtail"
"github.com/google/mtail/internal/testutil"
)
func TestLogSoftLinkChange(t *testing.T) {
testutil.SkipIfShort(t)
for _, tc := range []bool{false, true} {
tc := tc
name := "disabled"
if tc {
name = "enabled"
}
t.Run(fmt.Sprintf("race simulation %s", name), func(t *testing.T) {
workdir := testutil.TestTempDir(t)
logFilepath := filepath.Join(workdir, "log")
m, stopM := mtail.TestStartServer(t, 1, mtail.LogPathPatterns(logFilepath))
defer stopM()
logCountCheck := m.ExpectExpvarDeltaWithDeadline("log_count", 1)
logOpensTotalCheck := m.ExpectMapExpvarDeltaWithDeadline("log_opens_total", logFilepath, 2)
trueLog1 := testutil.TestOpenFile(t, logFilepath+".true1")
defer trueLog1.Close()
testutil.FatalIfErr(t, os.Symlink(logFilepath+".true1", logFilepath))
glog.Info("symlinked")
m.PollWatched(1)
inputLines := []string{"hi1", "hi2", "hi3"}
for _, x := range inputLines {
testutil.WriteString(t, trueLog1, x+"\n")
}
m.PollWatched(1)
trueLog2 := testutil.TestOpenFile(t, logFilepath+".true2")
defer trueLog2.Close()
m.PollWatched(1)
logClosedCheck := m.ExpectMapExpvarDeltaWithDeadline("log_closes_total", logFilepath, 1)
logCompletedCheck := m.ExpectExpvarDeltaWithDeadline("log_count", -1)
testutil.FatalIfErr(t, os.Remove(logFilepath))
if tc {
m.PollWatched(0) // simulate race condition with this poll.
logClosedCheck() // sync when filestream closes fd
m.PollWatched(0) // invoke the GC
logCompletedCheck() // sync to when the logstream is removed from tailer
}
testutil.FatalIfErr(t, os.Symlink(logFilepath+".true2", logFilepath))
m.PollWatched(1)
for _, x := range inputLines {
testutil.WriteString(t, trueLog2, x+"\n")
}
m.PollWatched(1)
var wg sync.WaitGroup
wg.Add(2)
go func() {
defer wg.Done()
logCountCheck()
}()
go func() {
defer wg.Done()
logOpensTotalCheck()
}()
wg.Wait()
_, err := os.Stat(logFilepath + ".true1")
testutil.FatalIfErr(t, err)
_, err = os.Stat(logFilepath + ".true2")
testutil.FatalIfErr(t, err)
})
}
}
mtail-3.0.0~rc54+git0ff5/internal/mtail/log_rotation_integration_unix_test.go 0000664 0000000 0000000 00000004752 14600635717 0027453 0 ustar 00root root 0000000 0000000 // Copyright 2019 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
//go:build unix
// +build unix
package mtail_test
import (
"fmt"
"os"
"path/filepath"
"sync"
"testing"
"github.com/golang/glog"
"github.com/google/mtail/internal/mtail"
"github.com/google/mtail/internal/testutil"
)
// TestLogRotation is a unix-specific test because on Windows, files cannot be removed
// or renamed while there is an open read handle on them. Instead, log rotation would
// have to be implemented by copying and then truncating the original file. That test
// case is already covered by TestLogTruncation.
func TestLogRotation(t *testing.T) {
testutil.SkipIfShort(t)
for _, tc := range []bool{false, true} {
tc := tc
name := "disabled"
if tc {
name = "enabled"
}
t.Run(fmt.Sprintf("race simulation %s", name), func(t *testing.T) {
tmpDir := testutil.TestTempDir(t)
logDir := filepath.Join(tmpDir, "logs")
progDir := filepath.Join(tmpDir, "progs")
err := os.Mkdir(logDir, 0o700)
testutil.FatalIfErr(t, err)
err = os.Mkdir(progDir, 0o700)
testutil.FatalIfErr(t, err)
logFile := filepath.Join(logDir, "log")
f := testutil.TestOpenFile(t, logFile)
defer f.Close()
m, stopM := mtail.TestStartServer(t, 1, mtail.ProgramPath(progDir), mtail.LogPathPatterns(logDir+"/log"))
defer stopM()
logOpensTotalCheck := m.ExpectMapExpvarDeltaWithDeadline("log_opens_total", logFile, 1)
logLinesTotalCheck := m.ExpectMapExpvarDeltaWithDeadline("log_lines_total", logFile, 3)
testutil.WriteString(t, f, "line 1\n")
m.PollWatched(1)
testutil.WriteString(t, f, "line 2\n")
m.PollWatched(1)
logClosedCheck := m.ExpectMapExpvarDeltaWithDeadline("log_closes_total", logFile, 1)
logCompletedCheck := m.ExpectExpvarDeltaWithDeadline("log_count", -1)
glog.Info("rename")
err = os.Rename(logFile, logFile+".1")
testutil.FatalIfErr(t, err)
if tc {
m.PollWatched(0) // simulate race condition with this poll.
logClosedCheck() // sync when filestream closes fd
m.PollWatched(0) // invoke the GC
logCompletedCheck() // sync to when the logstream is removed from tailer
}
glog.Info("create")
f = testutil.TestOpenFile(t, logFile)
m.PollWatched(1)
testutil.WriteString(t, f, "line 1\n")
m.PollWatched(1)
var wg sync.WaitGroup
wg.Add(2)
go func() {
defer wg.Done()
logLinesTotalCheck()
}()
go func() {
defer wg.Done()
logOpensTotalCheck()
}()
wg.Wait()
})
}
}
mtail-3.0.0~rc54+git0ff5/internal/mtail/log_truncation_integration_test.go 0000664 0000000 0000000 00000003020 14600635717 0026722 0 ustar 00root root 0000000 0000000 // Copyright 2019 Google Inc. All Rights Reserved.
// This file is available under the Apache license.
package mtail_test
import (
"os"
"path/filepath"
"testing"
"github.com/golang/glog"
"github.com/google/mtail/internal/mtail"
"github.com/google/mtail/internal/testutil"
)
func TestLogTruncation(t *testing.T) {
testutil.SkipIfShort(t)
tmpDir := testutil.TestTempDir(t)
logDir := filepath.Join(tmpDir, "logs")
progDir := filepath.Join(tmpDir, "progs")
testutil.FatalIfErr(t, os.Mkdir(logDir, 0o700))
testutil.FatalIfErr(t, os.Mkdir(progDir, 0o700))
m, stopM := mtail.TestStartServer(t, 1, mtail.ProgramPath(progDir), mtail.LogPathPatterns(logDir+"/log"))
defer stopM()
logCountCheck := m.ExpectExpvarDeltaWithDeadline("log_count", 1)
linesCountCheck := m.ExpectExpvarDeltaWithDeadline("lines_total", 2)
logFile := filepath.Join(logDir, "log")
f := testutil.TestOpenFile(t, logFile)
defer f.Close()
m.PollWatched(1)
testutil.WriteString(t, f, "line 1\n")
m.PollWatched(1)
// After the last barrier, the filestream may not race ahead of the test
// here, so we need to ensure that a whole filestream loop occurs and that
// the file offset advances for this test to succeed, hence the second
// barrier here.
m.PollWatched(1)
err := f.Close()
testutil.FatalIfErr(t, err)
glog.Info("truncate")
f, err = os.OpenFile(logFile, os.O_TRUNC|os.O_WRONLY, 0o600)
testutil.FatalIfErr(t, err)
defer f.Close()
m.PollWatched(1)
testutil.WriteString(t, f, "2\n")
m.PollWatched(1)
linesCountCheck()
logCountCheck()
}
mtail-3.0.0~rc54+git0ff5/internal/mtail/logo.ico 0000664 0000000 0000000 00000076446 14600635717 0021364 0 ustar 00root root 0000000 0000000 @@ (B F 00 % nB h h x ( @ @ ۘ4 ۘ4 ۘ4ڗ3ڗ3 ה0 ۘ4 ۘ4%ۘ4ۘ4ڗ3ܙ5 ۘ4 ڗ3 ۘ4 ۘ4%ۘ4ۘ4ڗ3ڗ3ڗ3 ڗ3 ڗ3 ۘ4 ۘ4%ۘ4ۘ4ۘ4ڗ3ڗ3ڗ3 ۘ4 ۘ4%ۘ4ۘ4ۘ4ۘ4ۘ4ڗ3ۘ4 ܙ5 ۘ4 ۘ4 ۘ4%ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ڗ3ۘ4 ֓/ ۘ4 ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ڗ3ڗ3 ؕ1 ۘ1 ڙ/ۘ3ۘ4ۘ4!ۘ4Aۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ڗ3ۘ4 ڗ3 ڗ3 ۠ ܗ; ۙ2ۗ36ۘ3lۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ڗ3ڗ3ژ3 ڗ3 ۘ3 ۗ3ۙ3*ۘ3ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ڗ3ڗ3ڗ3 ۚ2 ݜ0ۘ3Bۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ژ3ژ3ژ3 &