pax_global_header00006660000000000000000000000064146006357170014523gustar00rootroot0000000000000052 comment=0ff5a18c11af1970b0ef16a31423c66623ac24e9 mtail-3.0.0~rc54+git0ff5/000077500000000000000000000000001460063571700147655ustar00rootroot00000000000000mtail-3.0.0~rc54+git0ff5/.github/000077500000000000000000000000001460063571700163255ustar00rootroot00000000000000mtail-3.0.0~rc54+git0ff5/.github/CODEOWNERS000066400000000000000000000000121460063571700177110ustar00rootroot00000000000000* @jaqx0r mtail-3.0.0~rc54+git0ff5/.github/ISSUE_TEMPLATE/000077500000000000000000000000001460063571700205105ustar00rootroot00000000000000mtail-3.0.0~rc54+git0ff5/.github/ISSUE_TEMPLATE/bug_report.md000066400000000000000000000013541460063571700232050ustar00rootroot00000000000000--- name: Bug report about: Create a report to help us improve title: '' labels: '' assignees: '' --- Thanks for discovering a problem in `mtail`! When reporting bugs in mtail behaviour, please be as detailed as possible; describe the problem, what you wanted to have happen, what you observed instead. If your problem is with the way an `mtail` program is behaving, please attach or include inline any mtail programs that demonstrate the bug, any log files that mtail was processing, and the observed output. If your problem is with `mtail`, please include the commandline you started it with, and the INFO log. See also [Reporting a problem](https://github.com/google/mtail/blob/main/docs/Troubleshooting.md#reporting-a-problem). Thanks! mtail-3.0.0~rc54+git0ff5/.github/dependabot.yml000066400000000000000000000003151460063571700211540ustar00rootroot00000000000000version: 2 updates: - package-ecosystem: "github-actions" directory: "/" schedule: interval: "daily" - package-ecosystem: "gomod" directory: "/" schedule: interval: "daily" mtail-3.0.0~rc54+git0ff5/.github/workflows/000077500000000000000000000000001460063571700203625ustar00rootroot00000000000000mtail-3.0.0~rc54+git0ff5/.github/workflows/auto-review.yml000066400000000000000000000026241460063571700233600ustar00rootroot00000000000000# This is a single-maintainer project but I want to require reviews before # merge, which means that I need a bot to review my own work. name: Automatic pull request approvals on: merge_group: pull_request_target: types: - opened - reopened - synchronize - ready_for_review check_suite: types: - completed jobs: auto-approve: runs-on: ubuntu-latest if: > github.event.pull_request.head.repo.full_name == github.repository && github.event.pull_request.draft == false && ( github.event.action == 'opened' || github.event.action == 'reopened' || github.event.action == 'synchronize' ) && ( github.actor == 'jaqx0r' ) permissions: # wait on check checks: read # create review pull-requests: write steps: - uses: lewagon/wait-on-check-action@v1.3.3 with: ref: ${{ github.event.pull_request.head.sha }} repo-token: ${{ github.token }} check-regexp: "test.*" wait-interval: 60 - uses: "actions/github-script@v7" with: github-token: ${{ github.token }} script: | await github.rest.pulls.createReview({ event: "APPROVE", owner: context.repo.owner, pull_number: context.payload.pull_request.number, repo: context.repo.repo, }) mtail-3.0.0~rc54+git0ff5/.github/workflows/automerge.yml000066400000000000000000000042461460063571700231030ustar00rootroot00000000000000# We "trust" dependabot updates once they pass tests. # (this still requires all other checks to pass!) # This doesn't work on forked repos per the discussion in # https://github.com/pascalgn/automerge-action/issues/46 so don't attempt to # add people other than dependabot to the if field below. name: dependabot-auto-merge on: pull_request_target: types: # Dependabot will label the PR - labeled # Dependabot has rebased the PR - synchronize jobs: enable-automerge: if: github.event.pull_request.user.login == 'dependabot[bot]' && contains(github.event.pull_request.labels.*.name, 'dependencies') runs-on: ubuntu-latest permissions: # enable-automerge is a graphql query, not REST, so isn't documented, # except in a mention in # https://github.blog/changelog/2021-02-04-pull-request-auto-merge-is-now-generally-available/ # which says "can only be enabled by users with permissino to merge"; the # REST documentation says you need contents: write to perform a merge. # https://github.community/t/what-permission-does-a-github-action-need-to-call-graphql-enablepullrequestautomerge/197708 # says this is it contents: write steps: # Enable auto-merge *before* issuing an approval. - uses: alexwilson/enable-github-automerge-action@main with: github-token: "${{ secrets.GITHUB_TOKEN }}" wait-on-checks: needs: enable-automerge runs-on: ubuntu-latest permissions: # wait-on-check requires only checks read checks: read steps: - uses: lewagon/wait-on-check-action@v1.3.3 with: ref: ${{ github.event.pull_request.head.sha }} check-regexp: "test.*" repo-token: ${{ secrets.GITHUB_TOKEN }} wait-interval: 60 approve: needs: wait-on-checks runs-on: ubuntu-latest permissions: # https://github.com/hmarr/auto-approve-action/issues/183 says # auto-approve-action requires write on pull-requests pull-requests: write steps: - uses: hmarr/auto-approve-action@f0939ea97e9205ef24d872e76833fa908a770363 with: github-token: "${{ secrets.GITHUB_TOKEN }}" mtail-3.0.0~rc54+git0ff5/.github/workflows/ci-done.yml000066400000000000000000000033671460063571700224340ustar00rootroot00000000000000name: Comment CI test results on PR on: workflow_run: workflows: ["CI"] types: - completed jobs: comment: strategy: matrix: # Sync with matrix in ci.yml runs-on: [ubuntu-latest] runs-on: ${{ matrix.runs-on }} permissions: # list and download actions: read # post results as comment pull-requests: write # publish creates a check run checks: write steps: - uses: actions/github-script@v7 with: script: | var artifacts = await github.rest.actions.listWorkflowRunArtifacts({ owner: context.repo.owner, repo: context.repo.repo, run_id: ${{github.event.workflow_run.id }}, }); var matchArtifact = artifacts.data.artifacts.filter((artifact) => { return artifact.name == "test-results-${{ matrix.runs-on }}" })[0]; var download = await github.rest.actions.downloadArtifact({ owner: context.repo.owner, repo: context.repo.repo, artifact_id: matchArtifact.id, archive_format: 'zip', }); var fs = require('fs'); fs.writeFileSync('${{github.workspace}}/test-results.zip', Buffer.from(download.data)); - id: unpack run: | mkdir -p test-results unzip -d test-results test-results.zip echo "sha=$(cat test-results/sha-number)" >> $GITHUB_OUTPUT - uses: docker://ghcr.io/enricomi/publish-unit-test-result-action:v1.6 with: commit: ${{ steps.unpack.outputs.sha }} check_name: Unit Test Results github_token: ${{ secrets.GITHUB_TOKEN }} files: "**/test-results/**/*.xml" mtail-3.0.0~rc54+git0ff5/.github/workflows/ci.yml000066400000000000000000000065421460063571700215070ustar00rootroot00000000000000name: CI on: push: tags: - v* branches: - main pull_request: merge_group: env: GOPROXY: "https://proxy.golang.org" permissions: # none-all, which doesn't exist, but # https://docs.github.com/en/actions/reference/authentication-in-a-workflow#using-the-github_token-in-a-workflow # implies that the token still gets created. Elsewhere we learn that any # permission not mentioned here gets turned to `none`. actions: none jobs: test: strategy: matrix: # macos-latest is slow and has weird test failures with unixgram message sizes, so it's been disabled. # Sync with matrix in ci-done.yml runs-on: [ubuntu-latest, windows-latest] runs-on: ${{ matrix.runs-on }} steps: - uses: actions/checkout@v4 - uses: actions/setup-go@v5 with: go-version-file: 'go.mod' cache: true - name: install deps run: go mod download - name: build run: make --debug all - name: test run: | mkdir -p test-results # Don't use GITHUB_SHA as we need the head of the branch, not the # secret merge commit of the PR itself. https://help.github.com/en/actions/automating-your-workflow-with-github-actions/events-that-trigger-workflows#pull-request-event-pull_request if [[ ${{ github.event_name }} == 'pull_request' ]]; then echo ${{ github.event.pull_request.head.sha }} > test-results/sha-number else echo ${{ github.sha }} > test-results/sha-number fi make --debug junit-regtest TESTCOVERPROFILE=coverprofile shell: bash - uses: codecov/codecov-action@v4 if: always() with: file: coverprofile - uses: actions/upload-artifact@v4 if: always() with: name: test-results-${{ matrix.runs-on }} path: test-results/ container: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 with: fetch-depth: 0 # so that we get tags - run: make --debug container gosec: runs-on: ubuntu-latest # gosec is regularly broken and reporting false positives, don't let it interfere continue-on-error: true permissions: security-events: write steps: - uses: actions/checkout@v4 - uses: securego/gosec@master with: # we let the report trigger content trigger a failure using the GitHub Security features. args: '-no-fail -fmt sarif -out results.sarif -tags fuzz ./...' - uses: github/codeql-action/upload-sarif@v3 with: # Path to SARIF file relative to the root of the repository sarif_file: results.sarif fuzz: runs-on: ubuntu-latest container: image: gcr.io/oss-fuzz-base/base-builder steps: - uses: actions/checkout@v4 - uses: actions/setup-go@v5 with: go-version: '^1.x' - uses: actions/cache@v4 id: cache with: path: ~/go/pkg/mod key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} restore-keys: | ${{ runner.os }}-go- - name: install deps if: steps.cache.output.cache-hit != 'true' run: make --debug install_deps - name: local fuzz regtest run: make --debug CXX=clang LIB_FUZZING_ENGINE=-fsanitize=fuzzer fuzz-regtest mtail-3.0.0~rc54+git0ff5/.github/workflows/codeql-analysis.yml000066400000000000000000000046161460063571700242040ustar00rootroot00000000000000# For most projects, this workflow file will not need changing; you simply need # to commit it to your repository. # # You may wish to alter this file to override the set of languages analyzed, # or to provide custom queries or build logic. # # ******** NOTE ******** # We have attempted to detect the languages in your repository. Please check # the `language` matrix defined below to confirm you have the correct set of # supported CodeQL languages. # name: "CodeQL" on: push: branches: [ main ] pull_request: # The branches below must be a subset of the branches above branches: [ main ] schedule: - cron: '34 6 * * 3' permissions: # https://github.com/github/codeql-action/issues/464 security-events: write jobs: analyze: name: Analyze runs-on: ubuntu-latest strategy: fail-fast: false matrix: language: [ 'go' ] # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ] # Learn more: # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed steps: - name: Checkout repository uses: actions/checkout@v4 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL uses: github/codeql-action/init@v3 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. # By default, queries listed here will override any specified in a config file. # Prefix the list here with "+" to use these queries and those in the config file. # queries: ./path/to/local/query, your-org/your-repo/queries@main # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild uses: github/codeql-action/autobuild@v3 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines # and modify them (or add more) to build your code if your project # uses a compiled language #- run: | # make bootstrap # make release - name: Perform CodeQL Analysis uses: github/codeql-action/analyze@v3 mtail-3.0.0~rc54+git0ff5/.github/workflows/golangci-lint.yml000066400000000000000000000012641460063571700236370ustar00rootroot00000000000000name: golangci-lint on: push: tags: - v* branches: - main pull_request: permissions: # golangci-lint does annotations, not comments # No-one knows what an annotation is, but I suspect it's printing file:line: msg to stdout. # https://github.community/t/what-are-annotations/16173/2 checks: none jobs: golangci: name: lint runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: golangci/golangci-lint-action@v4 with: # Required: the version of golangci-lint is required and must be # specified without patch version: we always use the latest patch # version. version: v1.57 mtail-3.0.0~rc54+git0ff5/.github/workflows/oss-fuzz.yml000066400000000000000000000015171460063571700227110ustar00rootroot00000000000000name: OSS-Fuzz on: pull_request: paths: - '**.go' - 'internal/runtime/compiler/parser/parser.y' - 'Makefile' - 'Dockerfile' permissions: # Secret code for "the github token should have no tokens at all" actions: none jobs: oss-fuzz: runs-on: ubuntu-latest steps: - name: Build Fuzzers id: build uses: google/oss-fuzz/infra/cifuzz/actions/build_fuzzers@master with: oss-fuzz-project-name: 'mtail' dry-run: false - name: Run Fuzzers uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@master with: oss-fuzz-project-name: 'mtail' dry-run: false - name: Upload Crash uses: actions/upload-artifact@v4 if: failure() && steps.build.outcome == 'success' with: name: artifacts path: ./out/artifacts mtail-3.0.0~rc54+git0ff5/.github/workflows/release.yml000066400000000000000000000041561460063571700225330ustar00rootroot00000000000000name: release on: # Test that it works on pull_request or merge group; # goreleaser goes into snapshot mode if not a tag; # docker image will be built but not pushed for pull requests or merge group events. pull_request: merge_group: push: tags: - v* env: # Use docker.io for Docker Hub if empty REGISTRY: ghcr.io # github.repository as / IMAGE_NAME: ${{ github.repository }} jobs: goreleaser: runs-on: ubuntu-latest permissions: # goreleaser writes to the releases api contents: write env: flags: "" steps: - if: ${{ !startsWith(github.ref, 'refs/tags/v') }} run: echo "flags=--snapshot" >> $GITHUB_ENV - uses: actions/checkout@v4 with: fetch-depth: 0 - run: git fetch --force --tags - uses: actions/setup-go@v5 with: go-version-file: 'go.mod' cache: true - uses: goreleaser/goreleaser-action@v5 with: version: latest args: release --rm-dist ${{ env.flags }} env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} docker-release: runs-on: ubuntu-latest permissions: # docker writes packages to container registry packages: write steps: - uses: actions/checkout@v4 with: fetch-depth: 0 - run: git fetch --force --tags - name: Log in to the Container registry uses: docker/login-action@v3.1.0 with: registry: ${{ env.REGISTRY }} username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: Extract metadata (tags, labels) for Docker id: meta uses: docker/metadata-action@v5.5.1 with: images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} - name: Build and push Docker image (image is not pushed on pull request) uses: docker/build-push-action@v5.3.0 with: context: . push: ${{ github.event_name != 'pull_request' && github.event_name != 'merge_group' }} tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} mtail-3.0.0~rc54+git0ff5/.github/workflows/stale.yml000066400000000000000000000011211460063571700222100ustar00rootroot00000000000000name: "Close stale issues" on: schedule: - cron: "30 1 * * *" permissions: pull-requests: write issues: write jobs: stale: runs-on: ubuntu-latest steps: - uses: actions/stale@v9 with: repo-token: ${{ secrets.GITHUB_TOKEN }} stale-issue-message: 'This issue has been waiting for more information for more than 60 days and will be closed in 7 if no update is provided.' stale-pr-message: 'This PR has been waiting for an update for more than 60 days and wlil be closed in 7 if no update is provided.' only-labels: 'more-info-needed' mtail-3.0.0~rc54+git0ff5/.gitlab-ci.yml000066400000000000000000000007661460063571700174320ustar00rootroot00000000000000image: golang:latest stages: - test - build before_script: - mkdir ${CI_PROJECT_DIR}/build - mkdir -p ${GOPATH}/src/github.com/google/ - ln -s $(pwd) ${GOPATH}/src/github.com/google/mtail - cd ${GOPATH}/src/github.com/google/mtail test: stage: test allow_failure: true script: - make install_deps - make test build: stage: build script: - PREFIX=${CI_PROJECT_DIR}/build make install artifacts: expire_in: 1 week when: on_success paths: - build mtail-3.0.0~rc54+git0ff5/.golangci.yml000066400000000000000000000053471460063571700173620ustar00rootroot00000000000000run: tests: true build-tags: - integration - fuzz # fail if go.mod needs changing modules-download-mode: readonly linters-settings: govet: enable-all: true disable: - composites # same as exhaustruct below - fieldalignment asasalint: exclude: - glog\.Infof linters: presets: - bugs - error - format - import - module - performance - test - unused - metalinter enable: - exportloopref # A general rule is if the lint author can't be bothered supplying automated # fixes for obvious lint warnings, I'm not bothered using their tool. disable: - cyclop # boo cyclomatic complexity - dupl # exclude test code - depguard - errcheck # handled by gosec, lots of false posi - exhaustive # this false-positives for switches with a default - exhaustivestruct # too noisy, labelling fields is not my jam - exhaustruct # above, renamed - forbidigo # exclude non prod tools - forcetypeassert # too many at the moment - funlen # My tests will be as long as they need to be thanks - gci - gochecknoglobals # Flags are fine, as are test tables. - gochecknoinits # How dare you tell me not to use inits. - gocognit # boo cyclomatic complexity - gocyclo # boo cyclomatic complexity - godox # TODOs are fine - golint # deprecated - gomnd # magic numbers in test tables are fine actually - gosec # run independently - ifshort # buggy, false positives - interfacer # deprecated - lll # go says long lines are ok, and this is trivially automatable - maligned # deprecated - musttag # don't agree with the premise - nakedret # weird thing to report on - nestif # cognitive complexity - nlreturn # Not a fan of this one, looks messy - nolintlint # broken on gocritic - paralleltest # i had a good reason for this - perfsprint - testpackage # need to test internal methods - unparam # too noisy - whitespace # broken by goyacc - wrapcheck # not sure this is necessary - wsl # wsl doesn't explain any of its recommendations issues: # Show everything. max-issues-per-linter: 0 max-same-issues: 0 exclude-use-default: true exclude: # `gofix` should fix this if it really cared - 'composite literal uses unkeyed fields' # I like common patterns of shadowing: ctx and err - 'declaration of "ctx" shadows declaration' - 'declaration of "err" shadows declaration' # goyacc generated error in three locations - 'this value of `mtailDollar.* is never used' # Incorrectly reports undeclared in same package - "undeclared name:" # Disagree with capitalisation of identifier names - "ST1003:" mtail-3.0.0~rc54+git0ff5/.goreleaser.yml000066400000000000000000000011141460063571700177130ustar00rootroot00000000000000before: hooks: - go mod download builds: - id: mtail main: ./cmd/mtail/main.go binary: mtail env: - CGO_ENABLED=0 goos: - linux - windows - darwin ldflags: - -X main.Branch={{.Branch}} - -X main.Version={{.Version}} - -X main.Revision={{.Commit}} gcflags: # I love errors. - -e checksum: name_template: 'checksums.txt' snapshot: name_template: "{{ .Tag }}-next" changelog: filters: exclude: - '^docs:' - '^test:' - '^Merge' release: github: name_template: v{{.Version}} mtail-3.0.0~rc54+git0ff5/CODE_OF_CONDUCT.md000066400000000000000000000066201460063571700175700ustar00rootroot00000000000000# Contributor Covenant Code of Conduct ## Our Pledge In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. ## Our Standards Examples of behavior that contributes to creating a positive environment include: * Using welcoming and inclusive language * Being respectful of differing viewpoints and experiences * Gracefully accepting constructive criticism * Focusing on what is best for the community * Showing empathy towards other community members Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery and unwelcome sexual attention or advances * Trolling, insulting/derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or electronic address, without explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting Paraphrasing [@pinksoulstudios](https://www.pinksoulstudios.com/shop/we-believe-reclaimed-wood-sign): >In this project we believe >Black Lives Matter >Women's Rights are Human Rights >No human is illegal >Science is real >Love is love >KINDNESS IS EVERYTHING ## Our Responsibilities Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. ## Scope This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at jaq@google.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] [homepage]: http://contributor-covenant.org [version]: http://contributor-covenant.org/version/1/4/ mtail-3.0.0~rc54+git0ff5/CONTRIBUTING.md000066400000000000000000000034031460063571700172160ustar00rootroot00000000000000Want to contribute? Great! First, read this page (including the small print at the end). ### Before you contribute Before we can use your code, you must sign the [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual?csw=1) (CLA), which you can do online. The CLA is necessary mainly because you own the copyright to your changes, even after your contribution becomes part of our codebase, so we need your permission to use and distribute your code. We also need to be sure of various other things—for instance that you'll tell us if you know that your code infringes on other people's patents. You don't have to sign the CLA until after you've submitted your code for review and a member has approved it, but you must do it before we can put your code into our codebase. Before you start working on a larger contribution, you should get in touch with us first through the issue tracker with your idea so that we can help out and possibly guide you. Coordinating up front makes it much easier to avoid frustration later on. ### Code reviews All submissions, including submissions by project members, require review. We use GitHub pull requests for this purpose. Please read the [style guide](docs/style.md) for tips on the project coding guidelines. ### Response Time This repository is maintained as a best effort service. Response times to issues and PRs may vary with the availability of the maintainers. We appreciate your patience. PRs with unit tests will be merged promptly. All other requests (issues and PRs) may take longer to be responded to. ### The small print Contributions made by corporations are covered by a different agreement than the one above, the Software Grant and Corporate Contributor License Agreement. mtail-3.0.0~rc54+git0ff5/Dockerfile000066400000000000000000000023161460063571700167610ustar00rootroot00000000000000FROM golang:alpine AS builder RUN apk add --update git make WORKDIR /go/src/github.com/google/mtail COPY . /go/src/github.com/google/mtail RUN make depclean && make install_deps && PREFIX=/go make STATIC=y -B install FROM scratch COPY --from=builder /go/bin/mtail /usr/bin/mtail ENTRYPOINT ["/usr/bin/mtail"] EXPOSE 3903 WORKDIR /tmp ARG version=0.0.0-local ARG build_date=unknown ARG commit_hash=unknown ARG vcs_url=unknown ARG vcs_branch=unknown LABEL org.opencontainers.image.ref.name="google/mtail" \ org.opencontainers.image.vendor="Google" \ org.opencontainers.image.title="mtail" \ org.opencontainers.image.description="extract internal monitoring data from application logs for collection in a timeseries database" \ org.opencontainers.image.authors="Jamie Wilkinson (@jaqx0r)" \ org.opencontainers.image.licenses="Apache-2.0" \ org.opencontainers.image.version=$version \ org.opencontainers.image.revision=$commit_hash \ org.opencontainers.image.source=$vcs_url \ org.opencontainers.image.documentation="https://google.github.io/mtail/" \ org.opencontainers.image.created=$build_date \ org.opencontainers.image.url="https://github.com/google/mtail" mtail-3.0.0~rc54+git0ff5/ISSUE_TEMPLATE.md000066400000000000000000000012051460063571700174700ustar00rootroot00000000000000Thanks for discovering a problem in `mtail`! When reporting bugs in mtail behaviour, please be as detailed as possible; describe the problem, what you wanted to have happen, what you observed instead. If your problem is with the way an `mtail` program is behaving, please attach or include inline any mtail programs that demonstrate the bug, any log files that mtail was processing, and the observed output. If your problem is with `mtail`, please include the commandline you started it with, and the INFO log. See also [Reporting a problem](https://github.com/google/mtail/blob/main/docs/Troubleshooting.md#reporting-a-problem). Thanks! mtail-3.0.0~rc54+git0ff5/LICENSE000066400000000000000000000261361460063571700160020ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. mtail-3.0.0~rc54+git0ff5/Makefile000066400000000000000000000232211460063571700164250ustar00rootroot00000000000000# Copyright 2011 Google Inc. All Rights Reserved. # This file is available under the Apache license. export GO111MODULE ?= on # Build these. TARGETS = mtail mgen mdot mfmt GO_TEST_FLAGS ?= BENCH_COUNT ?= 1 BASE_REF ?= main HEAD_REF ?= $(shell git symbolic-ref HEAD -q --short) BASE_REF := $(subst /,-,$(BASE_REF)) HEAD_REF := $(subst /,-,$(HEAD_REF)) all: $(TARGETS) # Install them here PREFIX ?= usr/local # Place to store dependencies. DEPDIR = .d # Can't use a dependency rule here. $(shell install -d $(DEPDIR)) # This rule finds all non-standard-library dependencies of each target and emits them to a makefile include. # Thanks mrtazz: https://unwiredcouch.com/2016/05/31/go-make.html MAKEDEPEND = echo "$@: $$(go list -f '{{if not .Standard}}{{.Dir}}{{end}}' $$(go list -f '{{ join .Deps "\n" }}' $<) | sed -e 's@$$@/*.go@' | tr "\n" " " )" > $(DEPDIR)/$@.d # This rule allows the dependencies to not exist yet, for the first run. $(DEPDIR)/%.d: ; .PRECIOUS: $(DEPDIR)/%.d # This instruction loads any dependency includes for our targets. -include $(patsubst %,$(DEPDIR)/%.d,$(TARGETS)) # Set the timeout for tests. test_timeout := 20s testrace_timeout := 4m ifeq ($(CI),true) test_timeout := 100s testrace_timeout := 20m endif # Let the benchmarks run for a long time. The timeout is for the total time of # all benchmarks, not per bench. benchtimeout := 120m GOFILES=$(shell find . -name '*.go' -a ! -name '*_test.go') GOTESTFILES=$(shell find . -name '*_test.go') GOGENFILES=internal/runtime/compiler/parser/parser.go\ internal/mtail/logo.ico.go CLEANFILES+=\ internal/runtime/compiler/parser/parser.go\ internal/runtime/compiler/parser/y.output\ internal/mtail/logo.ico.go\ internal/mtail/logo.ico\ # A place to install tool dependencies. GOBIN ?= $(firstword $(subst :, ,$(shell go env GOPATH)))/bin export PATH := $(GOBIN):$(PATH) TOGO = $(GOBIN)/togo $(TOGO): go install github.com/flazz/togo@latest GOYACC = $(GOBIN)/goyacc $(GOYACC): go install golang.org/x/tools/cmd/goyacc@latest GOFUZZBUILD = $(GOBIN)/go114-fuzz-build $(GOFUZZBUILD): go install github.com/mdempsky/go114-fuzz-build@latest GOFUZZ = $(GOBIN)/go-fuzz $(GOFUZZ): go install github.com/dvyukov/go-fuzz/go-fuzz@latest GOTESTSUM = $(GOBIN)/gotestsum $(GOTESTSUM): go install gotest.tools/gotestsum@latest BENCHSTAT = $(GOBIN)/benchstat $(BENCHSTAT): go install golang.org/x/perf/cmd/benchstat@latest GOSEC = $(GOBIN)/gosec $(GOSEC): go install github.com/securego/gosec/v2/cmd/gosec@latest .PHONY: clean covclean crossclean depclean veryclean clean: covclean crossclean rm -f $(CLEANFILES) covclean: rm -f *.coverprofile coverage.html $(COVERPROFILES) crossclean: rm -rf build depclean: rm -f .d/* .*dep-stamp veryclean: clean depclean # This version should match the one in .github/workflows/golangci-lint.yml GOLANGCILINT_VERSION=$(shell grep 'version: v' .github/workflows/golangci-lint.yml | cut -f2 -d: | tr -d ' ') # lint .PHONY: lint lint: $(GOFILES) $(GOGENFILES) $(GOTESTFILES) mkdir -p $(HOME)/.cache/golangci-lint/$(GOLANGCILINT_VERSION) podman run --rm -v $(shell pwd):/app -v $(HOME)/.cache/golangci-lint/$(GOLANGCILINT_VERSION):/root/.cache -w /app docker.io/golangci/golangci-lint:$(GOLANGCILINT_VERSION) golangci-lint run -v branch := $(shell git rev-parse --abbrev-ref HEAD) version := $(shell git describe --tags --always --dirty) revision := $(shell git rev-parse HEAD) release := $(shell git describe --tags --always --dirty | cut -d"-" -f 1,2) GO_LDFLAGS := -X main.Branch=${branch} -X main.Version=${version} -X main.Revision=${revision} ifeq ($(STATIC),y) # -s Omit symbol table and debug info # -w Omit DWARF symbol table # -extldflags -static and CGO_ENABLED=0 to make pure static GO_LDFLAGS += -w -s -extldflags "-static" export CGO_ENABLED=0 endif # Show all errors, not just limit to 10. GO_GCFLAGS = -e # Very specific static pattern rule to only do this for commandline targets. # Each commandline must be in a 'main.go' in their respective directory. The # MAKEDEPEND rule generates a list of dependencies for the next make run -- the # first time the rule executes because the target doesn't exist, subsequent # runs can read the dependencies and update iff they change. $(TARGETS): %: cmd/%/main.go $(DEPDIR)/%.d | print-version .dep-stamp $(MAKEDEPEND) go build -gcflags "$(GO_GCFLAGS)" -ldflags "$(GO_LDFLAGS)" -o $@ $< internal/runtime/compiler/parser/parser.go: internal/runtime/compiler/parser/parser.y | $(GOYACC) go generate -x ./$(@D) internal/mtail/logo.ico: logo.png /usr/bin/convert $< -define icon:auto-resize=64,48,32,16 $@ || touch $@ internal/mtail/logo.ico.go: | internal/mtail/logo.ico $(TOGO) togo -pkg mtail -name logoFavicon -input internal/mtail/logo.ico ### ## Emit the current toolchain version at the start of every goal, if that goal depends on this. # .PHONY: print-version print-version: which go go version go env ### ## Install rules # # Would subst all $(TARGETS) except other binaries are just for development. INSTALLED_TARGETS = $(PREFIX)/bin/mtail .PHONY: install install: $(INSTALLED_TARGETS) $(PREFIX)/bin/%: % install -d $(@D) install -m 755 $< $@ .PHONY: test check check test: $(GOFILES) $(GOGENFILES) $(GOTESTFILES) | print-version $(LOGO_GO) .dep-stamp go test $(GO_TEST_FLAGS) -gcflags "$(GO_GCFLAGS)" -timeout ${test_timeout} ./... .PHONY: testrace testrace: $(GOFILES) $(GOGENFILES) $(GOTESTFILES) | print-version $(LOGO_GO) .dep-stamp go test $(GO_TEST_FLAGS) -gcflags "$(GO_GCFLAGS)" -timeout ${testrace_timeout} -race -v ./... .PHONY: smoke smoke: $(GOFILES) $(GOGENFILES) $(GOTESTFILES) | print-version $(LOGO_GO) .dep-stamp go test $(GO_TEST_FLAGS) -gcflags "$(GO_GCFLAGS)" -timeout 1s -test.short ./... .PHONY: regtest regtest: $(GOFILES) $(GOGENFILES) $(GOTESTFILES) | print-version $(LOGO_GO) .dep-stamp go test $(GO_TEST_FLAGS) -gcflags "$(GO_GCFLAGS)" -v -timeout=${testrace_timeout} ./... TESTRESULTS ?= test-results TESTCOVERPROFILE ?= out.coverprofile .PHONY: junit-regtest junit-regtest: $(TESTRESULTS)/test-output.xml $(TESTCOVERPROFILE) $(TESTRESULTS)/test-output.xml $(TESTCOVERPROFILE): $(GOFILES) $(GOGENFILES) $(GOTESTFILES) | print-version .dep-stamp $(GOTESTSUM) mkdir -p $(TESTRESULTS) gotestsum --debug --junitfile $(TESTRESULTS)/test-output.xml -- $(GO_TEST_FLAGS) -p=1 -cpu=1,2,4 -race -count=1 -parallel=1 -coverprofile=$(TESTCOVERPROFILE) --covermode=atomic -v -timeout=30m -gcflags "$(GO_GCFLAGS)" ./... .PHONY: bench bench: $(TESTRESULTS)/benchmark-results-$(HEAD_REF).txt $(TESTRESULTS)/benchmark-results-$(HEAD_REF).txt: $(GOFILES) $(GOGENFILES) $(GOTESTFILES) | print-version .dep-stamp mkdir -p $(TESTRESULTS) go test -cpu 1,2,4 -bench=. -count=$(BENCH_COUNT) -timeout=${benchtimeout} -run=^a ./... | tee $@ .PHONY: benchstat benchstat: $(TESTRESULTS)/benchstat.txt $(TESTRESULTS)/benchstat.txt: $(TESTRESULTS)/benchmark-results-$(HEAD_REF).txt | print-version $(BENCHSTAT) (test -s $(TESTRESULTS)/benchmark-results-$(BASE_REF).txt && benchstat -sort=-delta $(TESTRESULTS)/benchmark-results-$(BASE_REF).txt $< || benchstat $<) | tee $@ PACKAGES := $(shell go list -f '{{.Dir}}' ./... | grep -v /vendor/ | grep -v /cmd/ | sed -e "s@$$(pwd)@.@") .PHONY: testall testall: testrace fuzz-regtest bench .PHONY: checkall checkall: check all fuzz-targets ## make u a container .PHONY: container container: Dockerfile docker build -t mtail \ --build-arg version=${version} \ --build-arg commit_hash=${revision} \ --build-arg build_date=$(shell date -Iseconds --utc) \ . ## Run gosec .PHONY: gosec gosec: $(GOFILES) $(GOGENFILES) $(GOTESTFILES) | $(GOSEC) $(GOSEC) -tags fuzz ./... ### ## Fuzz testing # # These flags set compatibility with OSS-Fuzz CXX = clang CXXFLAGS ?= -fsanitize=fuzzer,address LIB_FUZZING_ENGINE ?= OUT ?= . .PHONY: fuzz-targets fuzz-targets: $(OUT)/vm-fuzzer $(OUT)/vm-fuzzer: $(GOFILES) | $(GOFUZZBUILD) go114-fuzz-build -o fuzzer.a ./internal/runtime $(CXX) $(CXXFLAGS) $(LIB_FUZZING_ENGINE) fuzzer.a -lpthread -o $(OUT)/vm-fuzzer $(OUT)/vm-fuzzer.dict: mgen ./mgen --dictionary | sort > $@ $(OUT)/vm-fuzzer_seed_corpus.zip: $(wildcard examples/*.mtail) $(wildcard internal/runtime/fuzz/*.mtail) zip -j $@ $^ FUZZER_FLAGS=-rss_limit_mb=4096 -timeout=60s .INTERMEDIATE: SEED/* SEED: $(OUT)/vm-fuzzer_seed_corpus.zip mkdir -p SEED unzip -o -d SEED $< .PHONY: fuzz fuzz: SEED $(OUT)/vm-fuzzer $(OUT)/vm-fuzzer.dict mkdir -p CORPUS $(OUT)/vm-fuzzer $(FUZZER_FLAGS) -dict=$(OUT)/vm-fuzzer.dict CORPUS SEED .PHONY: fuzz-regtest fuzz-regtest: $(OUT)/vm-fuzzer SEED $(OUT)/vm-fuzzer $(FUZZER_FLAGS) $(shell ls SEED/*.mtail) CRASH ?= .PHONY: fuzz-repro fuzz-repro: $(OUT)/vm-fuzzer mtail $(OUT)/vm-fuzzer $(FUZZER_FLAGS) $(CRASH) || true # Want to continue ./mtail --logtostderr --vmodule=runtime=2,lexer=2,parser=2,checker=2,types=2,codegen=2 --mtailDebug=3 --dump_ast --dump_ast_types --dump_bytecode --compile_only --progs $(CRASH) # make fuzz-min CRASH=example crash .PHONY: fuzz-min fuzz-min: $(OUT)/vm-fuzzer $(OUT)/vm-fuzzer.dict $(OUT)/vm-fuzzer -dict=$(OUT)/vm-fuzzer.dict -minimize_crash=1 -runs=10000 $(CRASH) ### ## dependency section # .PHONY: install_deps install_deps: .dep-stamp .dep-stamp: | print-version $(GOGENFILES) go mod download touch $@ ### ## Coverage # .PHONY: coverage covrep coverage: coverprofile coverprofile: $(GOFILES) $(GOGENFILES) $(GOTESTFILES) | print-version $(LOGO_GO) .dep-stamp go test -v -covermode=count -coverprofile=$@ -timeout=${timeout} $(PACKAGES) coverage.html: coverprofile | print-version go tool cover -html=$< -o $@ covrep: coverage.html xdg-open $< ### ## Github issue tracking # GHI = $(GOBIN)/ghi $(GHI): go install github.com/markbates/ghi@latest issue-fetch: | $(GHI) ghi fetch issue-list: | $(GHI) ghi list ISSUE?=1 issue-show: | $(GHI) ghi show $(ISSUE) mtail-3.0.0~rc54+git0ff5/README.md000066400000000000000000000110611460063571700162430ustar00rootroot00000000000000mtail # mtail - extract internal monitoring data from application logs for collection into a timeseries database [![ci](https://github.com/google/mtail/workflows/CI/badge.svg)](https://github.com/google/mtail/actions?query=workflow%3ACI+branch%3main) [![GoDoc](https://godoc.org/github.com/google/mtail?status.png)](http://godoc.org/github.com/google/mtail) [![Go Report Card](https://goreportcard.com/badge/github.com/google/mtail)](https://goreportcard.com/report/github.com/google/mtail) [![OSS-Fuzz](https://oss-fuzz-build-logs.storage.googleapis.com/badges/mtail.svg)](https://bugs.chromium.org/p/oss-fuzz/issues/list?sort=-opened&can=1&q=proj:mtail) [![codecov](https://codecov.io/gh/google/mtail/branch/main/graph/badge.svg)](https://codecov.io/gh/google/mtail) `mtail` is a tool for extracting metrics from application logs to be exported into a timeseries database or timeseries calculator for alerting and dashboarding. It fills a monitoring niche by being the glue between applications that do not export their own internal state (other than via logs) and existing monitoring systems, such that system operators do not need to patch those applications to instrument them or writing custom extraction code for every such application. The extraction is controlled by [mtail programs](docs/Programming-Guide.md) which define patterns and actions: # simple line counter counter lines_total /$/ { lines_total++ } Metrics are exported for scraping by a collector as JSON or Prometheus format over HTTP, or can be periodically sent to a collectd, StatsD, or Graphite collector socket. Read the [programming guide](docs/Programming-Guide.md) if you want to learn how to write mtail programs. Ask general questions on the users mailing list: https://groups.google.com/g/mtail-users ## Installation There are various ways of installing **mtail**. ### Precompiled binaries Precompiled binaries for released versions are available in the [Releases page](https://github.com/google/mtail/releases) on Github. Using the latest production release binary is the recommended way of installing **mtail**. Windows, OSX and Linux binaries are available. ### Building from source The simplest way to get `mtail` is to `go get` it directly. `go get github.com/google/mtail/cmd/mtail` This assumes you have a working Go environment with a recent Go version. Usually mtail is tested to work with the last two minor versions (e.g. Go 1.12 and Go 1.11). If you want to fetch everything, you need to turn on Go Modules to succeed because of the way Go Modules have changed the way go get treats source trees with no Go code at the top level. ``` GO111MODULE=on go get -u github.com/google/mtail cd $GOPATH/src/github.com/google/mtail make install ``` If you develop the compiler you will need some additional tools like `goyacc` to be able to rebuild the parser. See the [Build instructions](docs/Building.md) for more details. A `Dockerfile` is included in this repository for local development as an alternative to installing Go in your environment, and takes care of all the build dependency installation, if you don't care for that. ## Deployment `mtail` works best when paired with a timeseries-based calculator and alerting tool, like [Prometheus](http://prometheus.io). > So what you do is you take the metrics from the log files and > you bring them down to the monitoring system? [It deals with the instrumentation so the engineers don't have to!](http://www.imdb.com/title/tt0151804/quotes/?item=qt0386890) It has the extraction skills! It is good at dealing with log files!! ## Read More Full documentation at http://google.github.io/mtail/ Read more about writing `mtail` programs: * [Programming Guide](docs/Programming-Guide.md) * [Language Reference](docs/Language.md) * [Metrics](docs/Metrics.md) * [Managing internal state](docs/state.md) * [Testing your programs](docs/Testing.md) Read more about hacking on `mtail` * [Building from source](docs/Building.md) * [Contributing](CONTRIBUTING.md) * [Style](docs/style.md) Read more about deploying `mtail` and your programs in a monitoring environment * [Deploying](docs/Deploying.md) * [Interoperability](docs/Interoperability.md) with other systems * [Troubleshooting](docs/Troubleshooting.md) * [FAQ](docs/faq.md) After that, if you have any questions, please email (and optionally join) the mailing list: https://groups.google.com/forum/#!forum/mtail-users or [file a new issue](https://github.com/google/mtail/issues/new). mtail-3.0.0~rc54+git0ff5/TODO000066400000000000000000000162221460063571700154600ustar00rootroot00000000000000Implement a standard library, search path: Means we can provide standard syslog decorator. Requires figuring out where we keep standard library definitions, and what the syntax for import looks like. Can't put trailing newlines in cases in parser test, requires changes to expr stmt parse tree/ast testing? - expected AST as result from parse/check instead of merely getting a result. A similar version of this is in codegen_test.go:TestCodeGenFromAST A mapping between progs and logs to reduce wasted processing- issue #35 Means we don't fan out log lines to every VM if reading from multiple sources. Requires figuring out how to provide this configuration. Special syntax in a program? Not very flexible. A real config file? Been trying to avoid that. Commandline flag? Seems difficult to maintain. bytecode like [{push 1} {push 0} {cmp 1} {jm 6} {push 0} {jmp 7} {push 1} {jnm 13} {setmatched false} {mload 0} {dload 0} {inc } {setmatched true}] can be expressed as [{push 1} {push 0} {cmp 1} {jm 9} {setmatched false} {mload 0} {dload 0} {inc } {setmatched true}] but jnm 13 is from the condExpr and the previous is from a comparison binary expr; an optimizer is needed to collapse the bytecode to undersand that cmp, jm, push, jump, push, jnm in sequence like so is the same as a cmp, jm and we need to worry about the jump table too reversed casts: s2i,i2s pairs as well count stack size and preallocate stack -> counts of push/pop per instruction -> test to keep p/p counts updated : seems like a lot of work for not much return # Won't do X Use capture group references to feed back to declaring regular expression, X noting unused caprefs, X possibly flipping back to noncapturing (and renumbering the caprefs?) X -> unlikely to implement, probably won't impact regexp speed When using a const by itself as a match condition, then we get warnings about the capture group names not existing. const A /(?.*)/ A { x[$a]++ } ... => $a not defined in scope. Can't define string constants, like const STRPTIME_FORMAT "Jan _2" Multline const can't startwith a newline, must be const FOO // +\n..., yuo might want to do this for long first fragments, e.e.g const FOO\n /somethign/ Can't chain two matches in same expresison like getfilename() =~ 'name' && EXPR_RE because $0 is redefined This seems like somethign you might weant to do, and we are unlikely to want to use $0, but this is also true for the first capture group. Do we standardise on "the last pattern match wins"? Can't set the timestamp when processing one log line and reuse it in another; must use the caching state metric pattern, hidden gauge time. (I think this shows up in the original mysql example.) Could one preserve the last parsed timestamp in VM state between runs? How does this interact with programs that never strptime because they have no timestamp in the log? #pragma notimestamp? Get a list of non-stdlib deps go list -f "{{if not .Standard}}{{.ImportPath}}{{end}}" $(go list -f '{{join .Deps "\n"}}' ./...) This is just a neat thing to remember for Go. Programs may not use mtail_ as a metric prefix. Should just document this. Theory: Implicitly cast Int shouldn't get the S2i conversion applied to them. Do we need to name Implicit Int separate from Int and then not create s2i or other conversions for implicits. (and we need to keep the runtime conversions?) if you comment out the MATCH_NETWORK clase in dhcpd.mtail it gets 30x faster... because the regexp no longer backtracks... why... hints are that we exeute in an NFA regular expression becayuse it's unanchored. Avoid byte to string conversions in the tailer and vm FindStringSubmatch > https://dave.cheney.net/high-performance-go-workshop/dotgo-paris.html#strings_and_bytes . Pro: speed. Con, not sure how we manage utf-8 in decode.go? Use FindSubmatchIndex to avoid copies? Not sure if there's a performance win here, but we want to avoid memcpy if we can. Why is strings.Builder slower than bytes.Buffer when the latter's docstring recommends the former? ci: rerun failed tests to see if they're flaky. Find out if OpenTelemetry is better than OpenCensus when creating no-op trace spans. Test that when path/* is the logpathpattern that we handle log rotation, e.g. log -> log.1 = how can this work, we can't tell the difference between log.1 being a rotation or a new log. This could work if we can have a tailer-level registry of filenames currently with a goroutine. But we don't know the name of the new file when filestream creates a new goroutine for the replacement; fd.Stat() doesn't return the new name of the file. - Workaround: warn when '*' is the last of a glob pattern. VM profiler, when enabled, times instructions so user gets feedback on where their program is slow. Can we create a linter that checks for code patterns like 'path.Join' and warns against them? Can govet be made to do this? Detect when a regular expression compiled doesn't have a onepass program, and report a compile warning. we can't do this today with the regexp API, because it's not an exported field, and the onepass compilation step is not an exported function. IF we can do this, we can warn the user that their regular expression has ambiguity and will backtrack. See MATCH_NETWORK above. Do we have a precision problem that shold be solved by using math/big for literals in the AST. Extra credit: find out if the vm runtime should use big internally as well? regular expression matching is expensive. prefilter on match prefix. for extra credit, filter on all substrings of the expressions, using aho-corasick. once the vm refactoring has completed, move the VM execute method into per-opcode functions, and use the same state machine function as in lexer.NextToken() to simulate threaded code as we don't get tail recursion in Go. The plan is to see if execution speed is same or better -- expect moving to function calls to be slower unless inlined, but gain in readability and reuse. refactor vm further to replace stack with registers, we need typed registers to remove the pop runtime type cast. new opcodes to do migration from stack to register based ops required Once the tailer can read from sockets, I'll move it out of `internal/`. Pass a Logger as an option to tailer and vm. StatusHTML in vm reads expvars; can we not do that? Move from expvar to OpenSomething metrics. Should the exporter move into the metric package? Should the waker move into the tailer package? Benchmarks on GHA are too variable. Compute benchmarks old and new in same instance, per guidelines from "Software Microbenchmarking in the Cloud. How Bad is it Really?" Laaber et al. Move loc and useCurrentYear out of VM and into Runtime env. Move const folding into parser during AST build. Const-fold identity functions. Both tailer and logstream probably don't need to do URL parsing. Tailer could do it on the log path patterns before filling up the poll patterns list. Non-globs don't need repolling, and any with a scheme can already be constructed by TailPattern. Trim unused string and regexp constants, as .e.g /s/ && 1 gets optimised away. Collapse duplicate string and regexp constants. mtail-3.0.0~rc54+git0ff5/benchmark_results.csv000066400000000000000000000021021460063571700212100ustar00rootroot000000000000001350190388,1,4,examples/sftp.em,500,118000,3.165639s,6.331278,236,37.27525469581339,26.82744915254237 1350190385,1,4,examples/rsyncd.em,100,23500,1.79889s,17.9889,235,13.063611449282613,76.54851063829787 1350190383,1,4,examples/linecount.em,50000,50000,2.356123s,0.047122,1,21.221302962536335,47.12246 1359593792,1,4,examples/dhcpd.em,1,50000,8.55385s,8553.85,50000,5.845321112715327,171.077 1359593784,1,4,examples/sftp.em,200,47200,1.516004s,7.58002,236,31.13448249476914,32.11872881355932 1359593782,1,4,examples/rsyncd.em,100,23500,2.167435s,21.67435,235,10.842308996578904,92.23127659574467 1359593779,1,4,examples/linecount.em,50000,50000,2.695952s,0.053919,1,18.546324266900893,53.91904 1378745369,1,4,examples/dhcpd.em,1,50000,8.342115s,8342.115,50000,5.993683855952598,166.8423 1378745360,1,4,examples/sftp.em,500,118000,3.574926s,7.149852,236,33.00767624280894,30.295983050847457 1378745356,1,4,examples/rsyncd.em,100,23500,1.769277s,17.69277,235,13.28226162438103,75.2883829787234 1378745354,1,4,examples/linecount.em,50000,50000,2.569769s,0.051395,1,19.457001777202542,51.39538 mtail-3.0.0~rc54+git0ff5/cmd/000077500000000000000000000000001460063571700155305ustar00rootroot00000000000000mtail-3.0.0~rc54+git0ff5/cmd/mdot/000077500000000000000000000000001460063571700164735ustar00rootroot00000000000000mtail-3.0.0~rc54+git0ff5/cmd/mdot/main.go000066400000000000000000000117341460063571700177540ustar00rootroot00000000000000// Copyright 2018 Google Inc. All Rights Reserved. // This file is available under the Apache license. /* Command mdot turns an mtail program AST into a graphviz graph on standard output. To use, run it like (assuming your shell is in the same directory as this file) go run github.com/google/mtail/cmd/mdot --prog ../../examples/dhcpd.mtail | xdot - or go run github.com/google/mtail/cmd/mdot --prog ../../examples/dhcpd.mtail --http_port 8080 to view the dot output visit http://localhost:8080 You'll need the graphviz `dot' command installed. */ package main import ( "flag" "fmt" "io" "net/http" "os" "os/exec" "path/filepath" "strings" "github.com/golang/glog" "github.com/google/mtail/internal/mtail" "github.com/google/mtail/internal/runtime/compiler/ast" "github.com/google/mtail/internal/runtime/compiler/checker" "github.com/google/mtail/internal/runtime/compiler/parser" ) var ( prog = flag.String("prog", "", "Name of the program source to parse.") httpPort = flag.String("http_port", "", "Port number to run HTTP server on.") ) type dotter struct { w io.Writer id int parentID []int // id of the parent node } func (d *dotter) nextID() int { d.id++ return d.id } func (d *dotter) emitNode(id int, node ast.Node) { attrs := map[string]string{ "label": strings.Split(fmt.Sprintf("%T", node), ".")[1] + "\n", "shape": "box", "style": "filled", "tooltip": node.Type().String(), } switch n := node.(type) { case *ast.VarDecl, *ast.DecoDecl: attrs["fillcolor"] = "lightgreen" switch n := n.(type) { case *ast.VarDecl: attrs["label"] += fmt.Sprintf("%s %s", n.Kind, n.Name) case *ast.DecoDecl: attrs["label"] += n.Name } case *ast.IDTerm, *ast.CaprefTerm: attrs["fillcolor"] = "pink" attrs["shape"] = "ellipse" switch n := n.(type) { case *ast.IDTerm: attrs["label"] += n.Name case *ast.CaprefTerm: attrs["label"] += fmt.Sprintf("$%s", n.Name) } case *ast.IntLit, *ast.FloatLit, *ast.PatternLit, *ast.StringLit: attrs["fillcolor"] = "pink" attrs["shape"] = "ellipse" switch n := n.(type) { case *ast.IntLit: attrs["label"] += fmt.Sprintf("%d", n.I) case *ast.FloatLit: attrs["label"] += fmt.Sprintf("%g", n.F) case *ast.PatternLit: attrs["label"] += fmt.Sprintf("/%s/", n.Pattern) case *ast.StringLit: attrs["label"] += n.Text } case *ast.IndexedExpr, *ast.BinaryExpr, *ast.UnaryExpr, *ast.PatternExpr, *ast.BuiltinExpr: attrs["fillcolor"] = "lightblue" switch n := n.(type) { case *ast.BinaryExpr: attrs["label"] += parser.Kind(n.Op).String() case *ast.UnaryExpr: attrs["label"] += parser.Kind(n.Op).String() case *ast.BuiltinExpr: attrs["label"] += n.Name } } pos := node.Pos() if pos != nil { attrs["xlabel"] = pos.String() } fmt.Fprintf(d.w, "n%d [", id) for k, v := range attrs { fmt.Fprintf(d.w, "%s=\"%s\" ", k, v) } fmt.Fprintf(d.w, "]\n") } func (d *dotter) emitLine(src, dst int) { fmt.Fprintf(d.w, "n%d -> n%d\n", src, dst) } func (d *dotter) VisitBefore(node ast.Node) (ast.Visitor, ast.Node) { id := d.nextID() d.emitNode(id, node) if len(d.parentID) > 0 { parentID := d.parentID[len(d.parentID)-1] d.emitLine(parentID, id) } d.parentID = append(d.parentID, id) return d, node } func (d *dotter) VisitAfter(node ast.Node) ast.Node { d.parentID = d.parentID[:len(d.parentID)-1] return node } func makeDot(name string, w io.Writer) error { f, err := os.Open(filepath.Clean(name)) if err != nil { return err } n, err := parser.Parse(name, f) if err != nil { return err } n, err = checker.Check(n, 0, 0) if err != nil { return err } fmt.Fprintf(w, "digraph \"%s\" {\n", *prog) dot := &dotter{w: w} ast.Walk(dot, n) fmt.Fprintf(w, "}\n") return nil } func main() { flag.Parse() if *prog == "" { glog.Exitf("No -prog given") } if *httpPort == "" { glog.Exit(makeDot(*prog, os.Stdout)) } http.HandleFunc("/", func(w http.ResponseWriter, _ *http.Request) { dot := exec.Command("dot", "-Tsvg") in, err := dot.StdinPipe() if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } out, err := dot.StdoutPipe() if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } err = dot.Start() if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } err = makeDot(*prog, in) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } err = in.Close() if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } w.Header().Add("Content-type", "image/svg+xml") w.WriteHeader(http.StatusOK) _, err = io.Copy(w, out) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) } err = dot.Wait() if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) } }) http.HandleFunc("/favicon.ico", mtail.FaviconHandler) glog.Info(http.ListenAndServe(fmt.Sprintf(":%s", *httpPort), nil)) } mtail-3.0.0~rc54+git0ff5/cmd/mfmt/000077500000000000000000000000001460063571700164735ustar00rootroot00000000000000mtail-3.0.0~rc54+git0ff5/cmd/mfmt/main.go000066400000000000000000000021341460063571700177460ustar00rootroot00000000000000// Copyright 2018 Google Inc. All Rights Reserved. // This file is available under the Apache license. /* Command mfmt formats mtail programs. */ package main import ( "flag" "fmt" "io" "os" "github.com/golang/glog" "github.com/google/mtail/internal/runtime/compiler/checker" "github.com/google/mtail/internal/runtime/compiler/parser" ) var ( prog = flag.String("prog", "", "Name of the mtail program text to format.") write = flag.Bool("write", false, "Write results to original file.") ) func main() { flag.Parse() if *prog == "" { glog.Exitf("No -prog given") } f, err := os.OpenFile(*prog, os.O_RDWR, 0) if err != nil { glog.Exit(err) } ast, err := parser.Parse(*prog, f) if err != nil { glog.Exit(err) } ast, err = checker.Check(ast, 0, 0) if err != nil { glog.Exit(err) } up := parser.Unparser{} out := up.Unparse(ast) if *write { if err := f.Truncate(0); err != nil { glog.Exit(err) } if _, err := f.Seek(0, io.SeekStart); err != nil { glog.Exit(err) } if _, err := f.WriteString(out); err != nil { glog.Exit(err) } } else { fmt.Print(out) } } mtail-3.0.0~rc54+git0ff5/cmd/mgen/000077500000000000000000000000001460063571700164565ustar00rootroot00000000000000mtail-3.0.0~rc54+git0ff5/cmd/mgen/main.go000066400000000000000000000112411460063571700177300ustar00rootroot00000000000000// Copyright 2013 Google Inc. All Rights Reserved. // This file is available under the Apache license. // Command mgen generates mtail programs for fuzz testing by following a simple grammar. package main import ( "flag" "fmt" "math/rand" "github.com/google/mtail/internal/runtime/compiler/parser" ) var ( randSeed = flag.Int64("rand_seed", 1, "Seed to use for math.rand.") minIterations = flag.Int64("min_iterations", 5000, "Minimum number of iterations before stopping program generation.") dictionary = flag.Bool("dictionary", false, "Generate a fuzz dictionary to stdout only.") ) type node struct { alts [][]string term string } var table = map[string]node{ "start": {[][]string{{"stmt_list"}}, ""}, "stmt_list": {[][]string{{""}, {"stmt_list", "stmt"}}, ""}, "stmt": {[][]string{ {"cond", "{", "stmt_list", "}"}, {"expr"}, {"decl"}, {"def_spec"}, {"deco_spec"}, {"next"}, {"const", "ID", "pattern_expr"}, }, ""}, "expr": {[][]string{{"assign_expr"}}, ""}, "assign_expr": {[][]string{{"rel_expr"}, {"unary_expr", "=", "rel_expr"}, {"unary_expr", "+=", "rel_expr"}}, ""}, "rel_expr": {[][]string{{"additive_expr"}, {"additive_expr", "relop", "additive_expr"}}, ""}, "relop": {[][]string{{"<"}, {">"}, {"<="}, {">="}, {"=="}, {"!="}}, ""}, "additive_expr": {[][]string{{"unary_expr"}, {"additive_expr", "+", "unary_expr"}, {"additive_expr", "-", "unary_expr"}}, ""}, "unary_expr": {[][]string{{"postfix_expr"}, {"BUILTIN", "(", "arg_expr_list", ")"}}, ""}, "arg_expr_list": {[][]string{{""}, {"assign_expr"}, {"arg_expr_list", ",", "assign_expr"}}, ""}, "postfix_expr": {[][]string{{"primary_expr"}, {"postfix_expr", "++"}, {"postfix_expr", "[", "expr", "]"}}, ""}, "primary_expr": {[][]string{{"ID"}, {"CAPREF"}, {"STRING"}, {"(", "expr", ")"}, {"NUMERIC"}}, ""}, "cond": {[][]string{{"pattern_expr"}, {"rel_expr"}}, ""}, "pattern_expr": {[][]string{{"REGEX"}, {"pattern_expr", "+", "REGEX"}, {"pattern_expr", "+", "ID"}}, ""}, "decl": {[][]string{{"hide_spec", "type_spec", "declarator"}}, ""}, "hide_spec": {[][]string{{""}, {"hidden"}}, ""}, "declarator": {[][]string{{"declarator", "by_spec"}, {"declarator", "as_spec"}, {"ID"}, {"STRING"}}, ""}, "type_spec": {[][]string{{"counter"}, {"gauge"}, {"timer"}, {"text"}, {"histogram"}}, ""}, "by_spec": {[][]string{{"by", "by_expr_list"}}, ""}, "by_expr_list": {[][]string{{"ID"}, {"STRING"}, {"by_expr_list", ",", "ID"}, {"by_expr_list", ",", "STRING"}}, ""}, "as_spec": {[][]string{{"as", "STRING"}}, ""}, "def_spec": {[][]string{{"def", "ID", "{", "stmt_list", "}"}}, ""}, "deco_spec": {[][]string{{"deco", "{", "stmt_list", "}"}}, ""}, "BUILTIN": {[][]string{{"strptime"}, {"timestamp"}, {"len"}, {"tolower"}}, ""}, "CAPREF": {[][]string{}, "$1"}, "REGEX": {[][]string{}, "/foo/"}, "STRING": {[][]string{}, "\"bar\""}, "ID": {[][]string{}, "quux"}, "NUMERIC": {[][]string{}, "37"}, } func emitter(c chan string) { var l int for w := range c { if w == "\n" { fmt.Println() } if w == "" { continue } if l+len(w)+1 >= 80 { fmt.Println() fmt.Print(w) l = len(w) } else { if l != 0 { w = " " + w } l += len(w) fmt.Print(w) } } } func generateProgram() { rando := rand.New(rand.NewSource(*randSeed)) c := make(chan string, 1) go emitter(c) runs := *minIterations // Initial state states := []string{"start"} // While the state stack is not empty for len(states) > 0 && runs > 0 { // Pop the next state state := states[len(states)-1] states = states[:len(states)-1] // fmt.Println("state", state, "states", states) // Look for the state transition if n, ok := table[state]; ok { // If there are state transition alternatives // fmt.Println("n", n) if len(n.alts) > 0 { // Pick a state transition at random a := rando.Intn(len(n.alts)) // fmt.Println("a", a, n.alts[a], len(n.alts[a])) // Push the states picked onto the stack (in reverse order) for i := 0; i < len(n.alts[a]); i++ { // fmt.Println("i", i, n.alts[a][len(n.alts[a])-i-1]) states = append(states, n.alts[a][len(n.alts[a])-i-1]) } // fmt.Println("states", states) } else { // If there is a terminal, emit it // fmt.Println("(term)", state, n.term) c <- n.term } } else { // If the state doesn't exist in the table, treat it as a terminal, and emit it. // fmt.Println("(state)", state, state) c <- state } runs-- } c <- "\n" } func generateDictionary() { for _, k := range parser.Dictionary() { fmt.Printf("\"%s\"\n", k) } } func main() { flag.Parse() if *dictionary { generateDictionary() } else { generateProgram() } } mtail-3.0.0~rc54+git0ff5/cmd/mtail/000077500000000000000000000000001460063571700166365ustar00rootroot00000000000000mtail-3.0.0~rc54+git0ff5/cmd/mtail/main.go000066400000000000000000000260061460063571700201150ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // This file is available under the Apache license. package main import ( "context" "flag" "fmt" "os" "os/signal" "runtime" "strings" "sync" "syscall" "time" "github.com/golang/glog" "github.com/google/mtail/internal/exporter" "github.com/google/mtail/internal/metrics" "github.com/google/mtail/internal/mtail" "github.com/google/mtail/internal/waker" "go.opencensus.io/trace" ) type seqStringFlag []string func (f *seqStringFlag) String() string { return fmt.Sprint(*f) } func (f *seqStringFlag) Set(value string) error { for _, v := range strings.Split(value, ",") { *f = append(*f, v) } return nil } var logs seqStringFlag var ( port = flag.String("port", "3903", "HTTP port to listen on.") address = flag.String("address", "", "Host or IP address on which to bind HTTP listener") unixSocket = flag.String("unix_socket", "", "UNIX Socket to listen on") progs = flag.String("progs", "", "Name of the directory containing mtail programs") ignoreRegexPattern = flag.String("ignore_filename_regex_pattern", "", "") version = flag.Bool("version", false, "Print mtail version information.") // Compiler behaviour flags. oneShot = flag.Bool("one_shot", false, "Compile the programs, then read the contents of the provided logs from start until EOF, print the values of the metrics store in the given format and exit. This is a debugging flag only, not for production use.") oneShotFormat = flag.String("one_shot_format", "json", "Format to use with -one_shot. This is a debugging flag only, not for production use. Supported formats: json, prometheus.") compileOnly = flag.Bool("compile_only", false, "Compile programs only, do not load the virtual machine.") dumpAst = flag.Bool("dump_ast", false, "Dump AST of programs after parse (to INFO log).") dumpAstTypes = flag.Bool("dump_ast_types", false, "Dump AST of programs with type annotation after typecheck (to INFO log).") dumpBytecode = flag.Bool("dump_bytecode", false, "Dump bytecode of programs (to INFO log).") // VM Runtime behaviour flags. syslogUseCurrentYear = flag.Bool("syslog_use_current_year", true, "Patch yearless timestamps with the present year.") overrideTimezone = flag.String("override_timezone", "", "If set, use the provided timezone in timestamp conversion, instead of UTC.") emitProgLabel = flag.Bool("emit_prog_label", true, "Emit the 'prog' label in variable exports.") emitMetricTimestamp = flag.Bool("emit_metric_timestamp", false, "Emit the recorded timestamp of a metric. If disabled (the default) no explicit timestamp is sent to a collector.") logRuntimeErrors = flag.Bool("vm_logs_runtime_errors", true, "Enables logging of runtime errors to the standard log. Set to false to only have the errors printed to the HTTP console.") // Ops flags. pollInterval = flag.Duration("poll_interval", 250*time.Millisecond, "Set the interval to poll each log file for data; must be positive, or zero to disable polling. With polling mode, only the files found at mtail startup will be polled.") pollLogInterval = flag.Duration("poll_log_interval", 250*time.Millisecond, "Set the interval to find all matched log files for polling; must be positive, or zero to disable polling. With polling mode, only the files found at mtail startup will be polled.") expiredMetricGcTickInterval = flag.Duration("expired_metrics_gc_interval", time.Hour, "interval between expired metric garbage collection runs") staleLogGcTickInterval = flag.Duration("stale_log_gc_interval", time.Hour, "interval between stale log garbage collection runs") metricPushInterval = flag.Duration("metric_push_interval", time.Minute, "interval between metric pushes to passive collectors") maxRegexpLength = flag.Int("max_regexp_length", 1024, "The maximum length a mtail regexp expression can have. Excessively long patterns are likely to cause compilation and runtime performance problems.") maxRecursionDepth = flag.Int("max_recursion_depth", 100, "The maximum length a mtail statement can be, as measured by parsed tokens. Excessively long mtail expressions are likely to cause compilation and runtime performance problems.") // Debugging flags. blockProfileRate = flag.Int("block_profile_rate", 0, "Nanoseconds of block time before goroutine blocking events reported. 0 turns off. See https://golang.org/pkg/runtime/#SetBlockProfileRate") mutexProfileFraction = flag.Int("mutex_profile_fraction", 0, "Fraction of mutex contention events reported. 0 turns off. See http://golang.org/pkg/runtime/#SetMutexProfileFraction") httpDebugEndpoints = flag.Bool("http_debugging_endpoint", true, "Enable debugging endpoints (/debug/*).") httpInfoEndpoints = flag.Bool("http_info_endpoint", true, "Enable info endpoints (/progz,/varz).") // Tracing. jaegerEndpoint = flag.String("jaeger_endpoint", "", "If set, collector endpoint URL of jaeger thrift service") traceSamplePeriod = flag.Int("trace_sample_period", 0, "Sample period for traces. If non-zero, every nth trace will be sampled.") // Deprecated. _ = flag.Bool("disable_fsnotify", true, "DEPRECATED: this flag is no longer in use.") _ = flag.Int("metric_push_interval_seconds", 0, "DEPRECATED: use --metric_push_interval instead") ) func init() { flag.Var(&logs, "logs", "List of log files to monitor, separated by commas. This flag may be specified multiple times.") } var ( // Branch as well as Version and Revision identifies where in the git // history the build came from, as supplied by the linker when copmiled // with `make'. The defaults here indicate that the user did not use // `make' as instructed. Branch = "invalid:-use-make-to-build" Version = "invalid:-use-make-to-build" Revision = "invalid:-use-make-to-build" ) func main() { buildInfo := mtail.BuildInfo{ Branch: Branch, Version: Version, Revision: Revision, } flag.Usage = func() { fmt.Fprintf(os.Stderr, "%s\n", buildInfo.String()) fmt.Fprintf(os.Stderr, "\nUsage:\n") flag.PrintDefaults() } flag.Parse() if *version { fmt.Println(buildInfo.String()) os.Exit(0) } glog.Info(buildInfo.String()) glog.Infof("Commandline: %q", os.Args) if len(flag.Args()) > 0 { glog.Exitf("Too many extra arguments specified: %q\n(the logs flag can be repeated, or the filenames separated by commas.)", flag.Args()) } loc, err := time.LoadLocation(*overrideTimezone) if err != nil { fmt.Fprintf(os.Stderr, "Couldn't parse timezone %q: %s", *overrideTimezone, err) os.Exit(1) } if *blockProfileRate > 0 { glog.Infof("Setting block profile rate to %d", *blockProfileRate) runtime.SetBlockProfileRate(*blockProfileRate) } if *mutexProfileFraction > 0 { glog.Infof("Setting mutex profile fraction to %d", *mutexProfileFraction) runtime.SetMutexProfileFraction(*mutexProfileFraction) } if *progs == "" { glog.Exitf("mtail requires programs that instruct it how to extract metrics from logs; please use the flag -progs to specify the directory containing the programs.") } if !(*dumpBytecode || *dumpAst || *dumpAstTypes || *compileOnly) { if len(logs) == 0 { glog.Exitf("mtail requires the names of logs to follow in order to extract logs from them; please use the flag -logs one or more times to specify glob patterns describing these logs.") } } if *traceSamplePeriod > 0 { trace.ApplyConfig(trace.Config{DefaultSampler: trace.ProbabilitySampler(1 / float64(*traceSamplePeriod))}) } if *pollInterval == 0 { glog.Infof("no poll log data interval specified; defaulting to 250ms poll") *pollInterval = time.Millisecond * 250 } if *pollLogInterval == 0 { glog.Infof("no poll log pattern interval specified; defaulting to 250ms poll") *pollLogInterval = time.Millisecond * 250 } ctx, cancel := context.WithCancel(context.Background()) defer cancel() sigint := make(chan os.Signal, 1) signal.Notify(sigint, os.Interrupt, syscall.SIGTERM) go func() { sig := <-sigint glog.Infof("Received %+v, exiting...", sig) cancel() }() opts := []mtail.Option{ mtail.ProgramPath(*progs), mtail.LogPathPatterns(logs...), mtail.IgnoreRegexPattern(*ignoreRegexPattern), mtail.SetBuildInfo(buildInfo), mtail.OverrideLocation(loc), mtail.MetricPushInterval(*metricPushInterval), mtail.MaxRegexpLength(*maxRegexpLength), mtail.MaxRecursionDepth(*maxRecursionDepth), } eOpts := []exporter.Option{} if *logRuntimeErrors { opts = append(opts, mtail.LogRuntimeErrors) } if *staleLogGcTickInterval > 0 { staleLogGcWaker := waker.NewTimed(ctx, *staleLogGcTickInterval) opts = append(opts, mtail.StaleLogGcWaker(staleLogGcWaker)) } if *pollInterval > 0 { logStreamPollWaker := waker.NewTimed(ctx, *pollInterval) logPatternPollWaker := waker.NewTimed(ctx, *pollLogInterval) opts = append(opts, mtail.LogPatternPollWaker(logPatternPollWaker), mtail.LogstreamPollWaker(logStreamPollWaker)) } if *unixSocket == "" { opts = append(opts, mtail.BindAddress(*address, *port)) } else { opts = append(opts, mtail.BindUnixSocket(*unixSocket)) } if *oneShot { opts = append(opts, mtail.OneShot) } if *compileOnly { opts = append(opts, mtail.CompileOnly) } if *dumpAst { opts = append(opts, mtail.DumpAst) } if *dumpAstTypes { opts = append(opts, mtail.DumpAstTypes) } if *dumpBytecode { opts = append(opts, mtail.DumpBytecode) } if *httpDebugEndpoints { opts = append(opts, mtail.HTTPDebugEndpoints) } if *httpInfoEndpoints { opts = append(opts, mtail.HTTPInfoEndpoints) } if *syslogUseCurrentYear { opts = append(opts, mtail.SyslogUseCurrentYear) } if !*emitProgLabel { opts = append(opts, mtail.OmitProgLabel) eOpts = append(eOpts, exporter.OmitProgLabel()) } if *emitMetricTimestamp { opts = append(opts, mtail.EmitMetricTimestamp) eOpts = append(eOpts, exporter.EmitTimestamp()) } if *jaegerEndpoint != "" { opts = append(opts, mtail.JaegerReporter(*jaegerEndpoint)) } store := metrics.NewStore() if *expiredMetricGcTickInterval > 0 { store.StartGcLoop(ctx, *expiredMetricGcTickInterval) } m, err := mtail.New(ctx, store, opts...) if err != nil { glog.Error(err) cancel() os.Exit(1) //nolint:gocritic // false positive } err = m.Run() if err != nil { glog.Error(err) cancel() os.Exit(1) //nolint:gocritic // false positive } if *oneShot { switch *oneShotFormat { case "prometheus": var wg sync.WaitGroup e, err := exporter.New(ctx, &wg, store, eOpts...) if err != nil { glog.Error(err) cancel() wg.Wait() os.Exit(1) //nolint:gocritic // false positive } err = e.Write(os.Stdout) if err != nil { glog.Error(err) cancel() wg.Wait() os.Exit(1) //nolint:gocritic // false positive } cancel() wg.Wait() os.Exit(0) //nolint:gocritic // false positive case "json": err = store.WriteMetrics(os.Stdout) if err != nil { glog.Error(err) os.Exit(1) //nolint:gocritic // false positive } cancel() os.Exit(0) //nolint:gocritic // false positive default: glog.Errorf("unsupported format: %q", *oneShotFormat) cancel() os.Exit(1) //nolint:gocritic // false positive } } } mtail-3.0.0~rc54+git0ff5/docs/000077500000000000000000000000001460063571700157155ustar00rootroot00000000000000mtail-3.0.0~rc54+git0ff5/docs/Building.md000066400000000000000000000051421460063571700177760ustar00rootroot00000000000000# Building mtail `mtail` is implemented in [Go](http://golang.org). You will need to install a recent Go. ## `go get`, quick and easy, no version information. Fetch, build, and install the binary directly with `go get` `go install github.com/google/mtail/cmd/mtail` NOTE: If you do it this way, you won't have a supported version of `mtail`. ## The "Right Way" [Clone](http://github.com/google/mtail) the source from GitHub into your `$GOPATH`. If you don't have a `$GOPATH`, see the next section. ``` git clone https://github.com/google/mtail cd mtail make test install ``` ### Building `mtail` uses a `Makefile` to build the source. This ensures the generated code is up to date and that the binary is tagged with release information. Having fetched the source, use `make` from the top of the source tree. This will install all the dependencies, and then build `mtail`. This assumes that your Go environment is already set up -- see above for hints on setting it up. The resulting binary will be in `$GOPATH/bin`. The unit tests can be run with `make test`, which invokes `go test`. The slower race-detector tests can be run with `make testrace`. ### Cross-compilation `goreleaser` is used to build the binaries available for download on the Releases page. If yuo want to build your own locally, fetch goreleaser and update the config file locally if necessary. ## No Go You can still run `mtail` and its programmes with Docker. ``` docker build -t mtail . docker run -it --rm mtail --help ``` `mtail` is not much use without programme files or logs to parse, you will need to mount a path containing them into the container, as is done with the `-v` flag in the example below: ``` docker run -it --rm -v examples/linecount.mtail:/progs/linecount.mtail -v /var/log:/logs mtail -logtostderr -one_shot -progs /progs/linecount.mtail -logs /logs/messages.log ``` Or, via Docker Compose, e.g. this `docker-compose.yml` snippet example shows with the `volume:` section: ```yaml service: mtail: image: mtail command: - -logtostderr - -one_shot - -progs - /progs/linecount.mtail - -logs - /logs/messages.log volume: - type: bind source: /var/log target: /logs readonly: true - type: bind source: examples/linecount.mtail target: /progs/linecount.mtail ``` ## Contributing Please use `gofmt` to format your code before committing. Emacs' go-mode has a lovely [gofmt-before-save](http://golang.org/misc/emacs/go-mode.el) function. Please read the [test writing](Testing.md#test-writing) section for `mtail` test style guidelines. mtail-3.0.0~rc54+git0ff5/docs/Deploying.md000066400000000000000000000167311460063571700202010ustar00rootroot00000000000000# Deploying `mtail` ## Introduction mtail is intended to run one per machine, and serve as monitoring glue for multiple applications running on that machine. It runs one or more programs in a 1:1 mapping to those client applications. ## Configuration Overview mtail is configured with commandline flags. The `--help` flag will print a list of flags for configuring `mtail`. (Flags may be prefixed with either `-` or `--`) ## Quickstart Basic flags necessary to start `mtail`: * `--logs` is a comma separated list of filenames to extract from, but can also be used multiple times, and each filename can be a [glob pattern](http://godoc.org/path/filepath#Match). Named pipes can be read from when passed as a filename to this flag. * `--progs` is a directory path containing [mtail programs](Language.md). Programs must have the `.mtail` suffix. mtail runs an HTTP server on port 3903, which can be changed with the `--port` flag. # Details ## Launching mtail ``` mtail --progs /etc/mtail --logs /var/log/syslog --logs /var/log/ntp/peerstats ``` `mtail` will start to read the specified logs from their current end-of-file, and read new updates appended to these logs as they arrive. It will attempt to correctly handle log files that have been rotated by renaming or symlink changes. ### Getting the logs in Use `--logs` multiple times to pass in glob patterns that match the logs you want to tail. This includes named pipes. ### Polling the file system `mtail` polls matched log files every `--poll_log_interval`, or 250ms by default, the supplied `--logs` patterns for newly created or deleted log pathnames. Known and active logs are read until EOF every `--poll_interval`, or 250ms by default. Example: ``` mtail --progs /etc/mtail --logs /var/log/syslog --poll_interval 250ms --poll_log_interval 250ms ``` ### Setting garbage collection intervals `mtail` accumulates metrics and log files during its operation. By default, *every hour* both a garbage collection pass occurs looking for expired metrics, and stale log files. An expired metric is any metric that hasn't been updated in a time specified by a `del after` form in a program. A stale log file is any log being watched that hasn't been read from in 24 hours. The interval between garbage collection runs can be changed on the commandline with the `--expired_metrics_gc_interval` and `--stale_log_gc_interval` flags, which accept a time duration string compatible with the Go [time.ParseDuration](https://golang.org/pkg/time/#ParseDuration) function. ### Runtime error log rate If your programs deliberately fail to parse some log lines then you may end up generating lots of runtime errors which are normally logged at the standard INFO level, which can fill your disk. You can disable this with `--novm_logs_runtime_errors` or `--vm_logs_runtime_errors=false` on the commandline, and then you will only be able to see the most recent runtime error in the HTTP status console. ### Launching under Docker `mtail` can be run as a sidecar process if you expose an application container's logs with a volume. `docker run -d --name myapp -v /var/log/myapp myapp` for example exports a volume called `/var/log/myapp` (named the same as the hypothetical path where `myapp`s logs are written. Then launch the `mtail` docker image and pass in the volume: docker run -dP \ --name myapp-mtail \ --volumes-from myapp \ -v examples:/etc/mtail \ mtail --logs /var/log/myapp --progs /etc/mtail This example fetches the volumes from the `myapp` container, and mounts them in the mtail container (which we've called `myapp-mtail`). We also mount the `examples` directory as `/etc/mtail` in the container. We launch `mtail` with the `logs` and `progs` flags to point to our two mounted volumes. The `-P` flag ensures `mtail-myapp`'s port 3903 is exposed for collection, refer to `docker ps` to find out where it's mapped to on the host. ## Writing the programme Read the [Programming Guide](Programming-Guide.md) for instructions on how to write an `mtail` program. ### Reloading programmes `mtail` does not automatically reload programmes after it starts up. To ask `mtail` to scan for and reload programmes from the supplied `--progs` directory, send it a `SIGHUP` signal on UNIX-like systems. For example, if configs are being delivered by a configuration management tool like Puppet, then program Puppet to send a SIGHUP when it has copied a new config file over. ```puppet exec { 'reload_mtail_programmes': command => "killall -HUP mtail", refreshonly = True, } file { mtail_programme: source => mtail_programme, notify => Exec['reload_mtail_programmes'], } ``` Alternatively, if you're using `scp` or some similar method to copy the programme files without a receiver, then either follow it with a `ssh $host 'killall -HUP mtail'` or use a tool like [`inotifywait`](https://linux.die.net/man/1/inotifywait) in a side process next to mtail to watch for changes and send the reload signal. ```shell inotifywait -m /etc/mtail/progs | while read event; do killall -HUP mtail; done ``` ## Getting the Metrics Out ### Pull based collection Point your collection tool at `localhost:3903/json` for JSON format metrics. Prometheus can be directed to the /metrics endpoint for Prometheus text-based format. ### Changing the listen address The default port is `3903`, and can be changed with the `--port` commandline flag. The default address is `""` on the TCP protocol, which means it will bind to all IP addresses on the system. This can be changed with the `--address` commandline flag. ``` mtail --address=127.0.0.1 --port=8080` ``` Depending on your version of Go, the address "0.0.0.0" is treated by Go as dual-stack; see https://github.com/golang/go/issues/17615 and https://pkg.go.dev/net#Listen ### Push based collection Use the `collectd_socketpath` or `graphite_host_port` flags to enable pushing to a collectd or graphite instance. Configure collectd on the same machine to use the unixsock plugin, and set `collectd_socketpath` to that unix socket. ``` mtail --progs /etc/mtail --logs /var/log/syslog,/var/log/rsyncd.log --collectd_socketpath=/var/run/collectd-unixsock ``` Set `graphite_host_port` to be the host:port of the carbon server. ``` mtail --progs /etc/mtail --logs /var/log/syslog,/var/log/rsyncd.log --graphite_host_port=localhost:9999 ``` Likewise, set `statsd_hostport` to the host:port of the statsd server. Additionally, the flag `metric_push_interval_seconds` can be used to configure the push frequency. It defaults to 60, i.e. a push every minute. ## Setting a default timezone The `--override_timezone` flag sets the timezone that `mtail` uses for timestamp conversion. By default, `mtail` assumes timestamps are in UTC. To use the machine's local timezone, `--override_timezone=Local` can be used. ## Troubleshooting Lots of state is logged to the log file, by default in `/tmp/mtail.INFO`. See [Troubleshooting](Troubleshooting.md) for more information. N.B. Oneshot mode (the `one_shot` flag on the commandline) can be used to check that a program is correctly reading metrics from a log, but with the following caveats: * Unlike normal operations, oneshot mode will read the logs from the start of the file to the end, then close them -- it does not continuously tail the file * The metrics will be printed to standard out when the logs are finished being read from. * mtail will exit after the metrics are printed out. This mode is useful for debugging the behaviour of `mtail` programs and possibly for permissions checking. mtail-3.0.0~rc54+git0ff5/docs/Interoperability.md000066400000000000000000000161161460063571700215710ustar00rootroot00000000000000# Interoperability of `mtail` with other monitoring tools ## Introduction `mtail` is only part of a monitoring ecosystem -- it fills the gap between applications that export no metrics of their own in a [common protocol](Metrics.md) and the timeseries database. `mtail` is intended to complement other tools to build a complete system, and usually does not try to add functionality better provided by systems specifically designed for that function. # Metric export and collection mtail actively exports (i.e. pushes) to the following timeseries databases: * [collectd](http://collectd.org/) * [graphite](http://graphite.wikidot.com/start) * [statsd](https://github.com/etsy/statsd) mtail also is a passive exporter (i.e. pull, or scrape based) by: * [Prometheus](http://prometheus.io) * Google's Borgmon *Recommendation* Of the above, `mtail` recommends using Prometheus to extract the metrics from mtail as it is a rich monitoring tool and has a lot of interoperability itself. The `collectd`, `graphite`, and `statsd` options are less battle-tested and originate from an earlier time when the industry had not yet crystallised around a metric protocol. No configuration is required to enable Prometheus export from `mtail`. ## Prometheus Exporter Metrics Prometheus' [writing exporters documentation](https://prometheus.io/docs/instrumenting/writing_exporters/) describes useful metrics for a Prometheus exporter to export. `mtail` does not follow that guide, for these reasons. The exporter model described in that document is for active proxies between an application and Prometheus. The expectation is that when Prometheus scrapes the proxy (the exporter) that it then performs its own scrape of the target application, and translates the results back into the Prometheus exposition format. The time taken to query the target application is what is exported as `X_scrape_duration_seconds` and its availability as `X_up`. `mtail` doesn't work like that. It is reacting to the input log events, not scrapes, and so there is no concept of how long it takes to query the application or if it is available. There are things that, if you squint, look like applications in `mtail`, the virtual machine programs. They could be exporting their time to process a single line, and are `up` as long as they are not crashing on input. This doesn't translate well into the exporter metrics meanings though. TODO(jaq): Instead, mtail will export a histogram of the runtime per line of each VM program. `mtail` doesn't export `mtail_up` or `mtail scrape_duration_seconds` because they are exactly equivalent* to the [synthetic metrics](https://prometheus.io/docs/concepts/jobs_instances/) that Prometheus creates automatically. \* The difference between a scrape duration measured in mtail versus Prometheus would differ in the network round trip time, TCP setup time, and send/receive queue time. For practical purposes you can ignore them as the usefulness of a scrape duration metric is not in its absolute value, but how it changes over time. # Log Collection, Distribution, and Filtering {: #syslog} `mtail` is not intended to be used as a replacement for `syslogd`. `mtail` can read from named pipes and unix domain sockets on systems that support them, but the intent is that a proper `syslogd` can manage the collection of those logs, filter out interestnig ones if necessary, and forward them to `mtail` via a named pipe. Both `rsyslogd` and `syslog-ng` are possible choices here. It's probably not a good idea to have `mtail` listen directly to `/dev/log` or read from `/run/systemd/journal/syslog` unless you know what you're doing. `mtail` does not want to be in the business of API specialisation, but `syslog-ng` has done so with its [`system()` family of collector configuration options](https://www.syslog-ng.com/technical-documents/doc/syslog-ng-open-source-edition/3.22/administration-guide/26#TOPIC-1209162). * rsyslog supports forwarding to a [named pipe](https://www.rsyslog.com/doc/master/configuration/modules/ompipe.html) and to a [unix domain socket](https://www.rsyslog.com/doc/master/configuration/modules/omuxsock.html) * syslog-ng supports output to [named pipe](https://www.syslog-ng.com/technical-documents/doc/syslog-ng-open-source-edition/3.30/administration-guide/44#TOPIC-1595018) and [unix domain socket](https://www.syslog-ng.com/technical-documents/doc/syslog-ng-open-source-edition/3.30/administration-guide/54#TOPIC-1595060) Additionally, use a proper syslog to transmit and receive logs over the network. `mtail` does not provide any transport security, nor does TCP itself guarantee that no loss of data will occur: the [RELP spec](https://www.rsyslog.com/doc/v8-stable/tutorials/reliable_forwarding.html) exists for the latter. *Recommendation* Run `mtail` with a `--logs unix:///run/mtail.sock` flag to specify a single unix domain socket, or `mkfifo /run/mtail.pipe` to create a named pipe and `--logs /run/mtail.pipe` to share between `mtail` and the syslog daemon. Instruct the syslog daemon to forward syslog to the socket or pipe so named with one of the options described above (or as documented by your syslog daemon manual.) # Logs Analysis While `mtail` does a form of logs analysis, it does _not_ do any copying, indexing, or searching of log files for data mining applications. It is only intended for real- or near-time monitoring data for the purposes of performance measurement and alerting. Instead, see logs ingestion and analysis systems like * [Logstash](https://www.elastic.co/products/logstash) * [Graylog](https://www.graylog.org/) if that is what you need. *Recommendation* `mtail` provides no recommendations here as there is no direct interoperation between `mtail` and logs analysis. The interface to logs analysis will be from the syslog daemon or application logger directly. If a logs analysis collector is receiving application logs, then `mtail` is either running concurrently reading those application logs as well, or the logs analysis collector is teeing to `mtail` in a manner similar to syslog daemons above. # TLS/SSL {: #tls-ssl} Sometimes one may wish to expose `mtail` directly to the internet, but would like to protect it from unauthorized access. `mtail` doesn't support SSL or HTTP authentication, and should be used with a VPN tunnel or reverse proxy instead. Assuming a VPN tunnel is out of the question, then termination of SSL connections is possible with tools like [`nginx`]() and [`varnish`](). `mtail` can listen on either a TCP socket or a unix domain socket for HTTP requests; the latter is done with `--unix_socket` instead of the `--address` and `--port` flags. Forwarding to a unix domain socket instead of TCP is possible with both [`nginx`](http://nginx.org/en/docs/http/ngx_http_upstream_module.html) and [`varnish`](https://varnish-cache.org/docs/trunk/whats-new/upgrading-6.0.html#upd-6-0-uds-backend). *Recommendation* If no VPN tunnel is possible, then use a reverse proxy to terminate HTTPS and then forward to `mtail` over a unix domain socket, by setting the `--unix_socket /run/mtail.http.sock` and then configuring the reverse proxy to use the unix socket as a backend. mtail-3.0.0~rc54+git0ff5/docs/Language.md000066400000000000000000000445171460063571700177750ustar00rootroot00000000000000# `mtail` Language Reference ## Description As `mtail` is designed to tail log files and apply regular expressions to new log lines to extract data, the language naturally follows this pattern-action style. It resembles another, more famous pattern-action language, that of AWK. This page errs on the side of a language specification and reference. See the [Programming Guide](Programming-Guide.md) for a gentler introduction to writing `mtail` programs. ## Program Execution `mtail` runs all programs on every line received by the log tailing subsystem. The rough model of this looks like: ``` for line in lines: for regex in regexes: if match: do something ``` Each program operates once on a single line of log data, and then terminates. ## Program Structure An `mtail` program consists of exported variable definitions, pattern-action statements, and optional decorator definitions. ``` exported variable pattern { action statements } def decorator { pattern and action statements } ``` ## Exported Variables `mtail`'s purpose is to extract information from logs and deliver them to a monitoring system. Thus, variables must be named for export. Variables, which have type `counter` or `gauge`, must be declared before their use. ``` counter lines_total gauge queue_length ``` They can be exported with a different name, with the `as` keyword, if one wants to use characters that would cause a parse error. This example causes the metric to be named `line-count` in the collecting monitoring system. ``` counter lines_total as "line-count" ``` Variables can be dimensioned with one or more axes, with the `by` keyword, creating multidimensional data. Dimensions can be used for creating histograms, as well. ``` counter bytes by operation, direction counter latency_ms by bucket ``` Putting the `hidden` keyword at the start of the declaration means it won't be exported, which can be useful for storing temporary information. This is the only way to share state between each line being processed. ``` hidden counter login_failures ``` ## Pattern/Action form. `mtail` programs look a lot like `awk` programs. They consist of a conditional expression followed by a brace-enclosed block of code: ``` COND { ACTION } ``` `COND` is a conditional expression. It can be a regular expression, which if matched enters the action block, or a relational expression as you might encounter in a C program's `if` statement (but without the `if`, it is implicit.) ``` /foo/ { ACTION1 } variable > 0 { ACTION2 } /foo/ && variable > 0 { ACTION3 } ``` In the above program, ACTION1 is taken on each line input if that line matches the word `foo`, and ACTION2 is taken on each line if when that line is read, the variable `variable` is greater than 0. ACTION3 occurs if both are true. The action statements must be wrapped in curly braces, i.e. `{}`. `mtail` programs have no single-line statement conditionals like C. ### Regular Expressions `mtail` supports RE2-style regular expression syntax, but is limited by what is supported by the Go implementation of [Go's regexp/syntax](https://godoc.org/regexp). #### Constant pattern fragments To re-use parts of regular expressions, you can assign them to a `const` identifier: ``` const PREFIX /^\w+\W+\d+ / PREFIX { ACTION1 } PREFIX + /foo/ { ACTION2 } ``` In this example, ACTION1 is done for every line that starts with the prefix regex, and ACTION2 is done for the subset of those lines that also contain 'foo'. Pattern fragments like this don't need to be prefixes, they can be anywhere in the expression. ``` counter maybe_ipv4 const IPv4 /(?P\d+\.\d+\.\d+\.\d+)/ /something with an / + IPv4 + / address/ { maybe_ipv4++ } ``` See [dhcpd.mtail](../examples/dhcpd.mtail) for more examples of this. See also the section on decorators below for improving readability of expressions that are only matched once. ### Conditionals More complex expressions can be built up from relational expressions and other pattern expressions. #### Operators The following relational operators are available in `mtail`: * `<` less than * `<=` less than or equal * `>` greater than * `>=` greater than or equal * `==` is equal * `!=` is not equal * `=~` pattern match * `!~` negated pattern match * `||` logical or * `&&` logical and * `!` unary logical negation The following arithmetic operators are available in `mtail`: * `|` bitwise or * `&` bitwise and * `^` bitwise xor * `+` addition * `-` subtraction * `*` multiplication * `/` division * `<<` bitwise shift left * `>>` bitwise shift right * `**` exponent The following arithmetic operators act on exported variables. * `=` assignment * `++` increment * `+=` increment by * `--` decrement #### `else` Clauses When a conditional expression does not match, action can be taken as well: ``` /foo/ { ACTION1 } else { ACTION2 } ``` Else clauses can be nested. There is no ambiguity with the dangling-else problem, as `mtail` programs must wrap all block statements in `{}`. #### `otherwise` clauses The `otherwise` keyword can be used as a conditional statement. It matches if no preceding conditional in the current scope has matched. This behaves similarly to the `default` clause in a C `switch` statement. ``` /foo/ { /foo1/ { ACTION1 } /foo2/ { ACTION2 } otherwise { ACTION3 } } ``` In this example, ACTION3 will be executed if neither `/foo1/` or `/foo2/` match on the input, but `/foo/` does. ### Actions #### Incrementing a Counter The simplest `mtail` program merely counts lines read: ``` /$/ { lines_total++ } ``` This program instructs `mtail` to increment the `lines_total` counter variable on every line received (specifically anytime an end-of-line is matched.) #### Capture Groups Regular expressions in patterns can contain capture groups -- subexpressions wrapped in parentheses. These can be referred to in the action block to extract data from the line being matched. For example, part of a program that can extract from `rsyncd` logs may want to break down transfers by operation and module. ``` counter transfers_total by operation, module /(?P\S+) (\S+) \[\S+\] (\S+) \(\S*\) \S+ (?P\d+)/ { transfers_total[$operation][$3]++ } ``` Or, the value of the counter can be increased by the value of a capture group: ``` counter bytes_total by operation, module /(?P\S+) (\S+) \[\S+\] (\S+) \(\S*\) \S+ (?P\d+)/ { bytes_total[$operation][$3] += $bytes } ``` Numeric capture groups address subexpressions in the match result as you might expect from regular expression groups in other languages, like awk and perl -- e.g. the expression `$3` refers to the third capture group in the regular expression. Named capture groups can be referred to by their name as indicated in the regular expression using the `?P` notation, as popularised by the Python regular expression library -- e.g. `$bytes` refers to `(?P\d+)` in the examples above. Capture groups can be used in the same expression that defines them, for example in this expression that matches and produces `$x`, then compares against that value. ``` /(?P\d+)/ && $x > 1 { nonzero_positives++ } ``` #### Timestamps It is also useful to timestamp a metric with the time the application thought an event occurred. Logs typically prefix the log line with a timestamp string, which can be extracted and then parsed into a timestamp internally, with the `strptime` builtin function. A regular expression that extracts the timestamp in boring old syslog format looks like: ``` /^(?P\w+\s+\d+\s+\d+:\d+:\d+)/ { strptime($date, "Jan 02 15:04:05") ... } ``` Buyer beware! The format string used by `mtail` is the same as the [Go time.Parse() format string](https://godoc.org/time#Parse), which is completely unlike that used by C's strptime. The format string must always be the 2nd of January 2006 at 3:04:05 PM. See the documentation for the **ANSIC** format in the above link for more details. **NOTE** that *unlike* Go's `time.Parse()` (and *like* C's) the format string is the *second* argument to this builtin function. > NOTE: without a `strptime()` call, `mtail` will default to using the current > system time for the timestamp of the event. This may be satisfactory for > near-real-time logging. #### Nested Actions It is of course possible to nest more pattern-actions within actions. This lets you factor out common parts of a match expression and deal with per-message actions separately. For example, parsing syslog timestamps is something you may only wish to do once, as it's expensive to match (and difficult to read!) ``` counter foo counter bar /^(?P\w+\s+\d+\s+\d+:\d+:\d+)/ { strptime($date, "Jan 02 15:04:05") /foo/ { foo++ } /bar/ { bar++ } } ``` This will result in both foo and bar counters being timestamped with the current log line's parsed time, once they match a line. #### Decorated actions Decorated actions are an inversion of nested actions. They allow the program to define repetitive functions that perform the same extraction across many different actions. For example, most log file formats start with a timestamp prefix. To reduce duplication of work, decorators can be used to factor out the common work of extracting the timestamp. For example, to define a decorator, use the `def` keyword: ``` def syslog { /(?P\w+\s+\d+\s+\d+:\d+:\d+)/ { strptime($date, "Jan 2 15:04:05") next } } ``` The decorator definition starts and ends in a curly-braced block, and looks like a normal pattern/action as above. The new part is the `next` keyword, which indicates to `mtail` where to jump into the *decorated* block. To use a decorator: ``` @syslog { /some event/ { variable++ } } ``` The `@` notation, familiar to Python programmers, denotes that this block is "wrapped" by the `syslog` decorator. The syslog decorator will be called on each line first, which extracts the timestamp of the log line. Then, `next` causes the wrapped block to execute, so then `mtail` matches the line against the pattern `some event`, and if it does match, increments `variable`. #### Types `mtail` metrics have a *kind* and a *type*. The *kind* affects how the metric is recorded, and the *type* describes the data being recorded. Ordinarily `mtail` doesn't treat kinds specially, except when they are being exported. * `counter` assumes that the variable is a monotonically increasing measure, so that computations on sampled data like rates can be performed without loss. Use for counting events or summing up bytes transferred. * `gauge` assumes that the variable can be set to any value at any time, signalling that rate computations are risky. Use for measures like queue length at a point in time. * `histogram` is used to record frequency of events broken down by another dimension, for example by latency ranges. This kind does have special treatment within `mtail`. The second dimension is the internal representation of a value, which is used by `mtail` to attempt to generate efficient bytecode. * Integer * Float * Bool * String Some of these types can only be used in certain locations -- for example, you can't increment a counter by a string, but `mtail` will fall back to a attempt to do so, logging an error if a runtime type conversion fails. Likewise, the only type that a `histogram` can observe is a Float. These types are usually inferred from use, but can be influenced by the programmer with builtin functions. Read on. #### Builtin functions `mtail` contains some builtin functions for help with extracting information and manipulating state. There are "pure" builtin functions, in that they have no side effects on the program state. * `len(x)`, a function of one string argument, which returns the length of the string argument `x`. * `tolower(x)`, a function of one string argument, which returns the input `x` in all lowercase. * `subst(old, new, val)`, a function of three arguments which returns the input `val` with all substrings or patterns `old` replaced by `new`. When given a *string* for `old`, it is a direct proxy of the Go [strings.ReplaceAll](https://golang.org/pkg/strings/#ReplaceAll) function. `subst("old", "new", $val)` When given a *regular expression pattern* for `old`, it uses [regexp.ReplaceAllLiteralString](https://golang.org/pkg/regexp/#Regexp.ReplaceAllLiteralString). `subst(/old/, "new", $val)` Note the different quote characters in the first argument. There are type coercion functions, useful for overriding the type inference made by the compiler if it chooses badly. (If the choice is egregious, please file a bug!) * `int(x)`, a function of one argument performs type conversion to integer. If `x` is a type that can be converted to integer, it does so. If the type of `x` cannot be converted to an integer, a compile error is triggered. If the value of `x` cannot be converted to an integer, then a runtime error is triggered. * `float(x)`, a function of one argument that performs type conversion to floating point numbers. The same rules apply as for `int()` above. * `string(x)`, a function of one argument that performs conversion to string values. * `strtol(x, y)`, a function of two arguments, which converts a string `x` to an integer using base `y`. Useful for translating octal or hexadecimal values in log messages. A few builtin functions exist for manipulating the virtual machine state as side effects for the metric export. * `getfilename()`, a function of no arguments, which returns the filename from which the current log line input came. * `settime(x)`, a function of one integer argument, which sets the current timestamp register. * `strptime(x, y)`, a function of two string arguments, which parses the timestamp in the string `x` with the parse format string in `y`, and sets the current timestamp register. The parse format string must follow [Go's time.Parse() format string](http://golang.org/src/pkg/time/format.go) * `timestamp()`, a function of no arguments, which returns the current timestamp. This is undefined if neither `settime` or `strptime` have been called previously. The **current timestamp register** refers to `mtail`'s idea of the time associated with the current log line. This timestamp is used when the variables are exported to the upstream collector. The value defaults to the time that the log line arrives in `mtail`, and can be changed with the `settime()` or `strptime()` builtins. User defined functions are not supported, but read on to Decorated Actions for how to reuse common code. #### Numerical capture groups and Metric type information By limiting the pattern of a capturing group to only numeric characters, the programmer can provide hints to `mtail` about the type of an expression. For example, in the regular expression `/(\d+)/` the first capture group can only match digits, and so the compiler will infer that this is an integer match. `/(\d+\.\d+)/` looks like it matches floating point numbers, and so the compiler will infer that this is of type float. > NOTE: In the expression above, the dot is escaped. A regular expression > operator `.` matches every character and so the inference assumes that the > type of '.' is a string. The compiler performs type inference on the expressions that use the capture groups, and the metrics they are ultimately assigned to, and will assign a type (either integer or floating point) to the metrics exported. Thus in a program like: ``` gauge i gauge f /(\d+)/ { i = $1 } /(\d+\.\d+)/ { f = $1 } ``` the metric `i` will be of type Int and the metric `f` will be of type Float. The advantage of limiting pattern matches to specific values is that `mtail` can generate faster bytecode if it knows at compile-time the types to expect. If `mtail` can't infer the value types, they default to `String` and `mtail` will attempt a value conversion at runtime if necessary. Runtime conversion errors will be emitted to the standard INFO log, and terminate program execution for that log line. #### Variable Storage Management ##### `del` `mtail` performs no implicit garbage collection in the metric storage. The program can hint to the virtual machine that a specific datum in a dimensioned metric is no longer going to be used with the `del` keyword. ``` gauge duration by session hidden session_start by session /end/ { duration[$session] = timestamp() - session_start[$session] del session_start[$session] } ``` In this example, a hidden metric is used to record some internal state. It will grow unbounded as the number of sessions increases. If the programmer knows that the `/end/` pattern is the last time a session will be observed, then the datum at `$session` will be freed, which keeps `mtail` memory usage under control and will improve search time for finding dimensioned metrics. `del` can be modified with the `after` keyword, signalling that the metric should be deleted after some period of no activity. For example, the expression ``` del session_start[$session] after 24h ``` would mean that the datum indexed by `$session` will be removed 24 hours after the last update is recorded. The del-after form takes any time period supported by the go [`time.ParseDuration`](https://golang.org/pkg/time/#ParseDuration) function. Expiry is only processed once ever hour, so durations shorter than 1h won't take effect until the next hour has passed. This command only makes sense for dimensioned metrics. ##### `limit` A size limit can be specified on a metric with the modifier `limit`. ``` counter bytes_total by operation limit 500 ``` When the garbage collection run encounters a variable with size limit that is over its size limit, it will remove the oldest values until the whole metric is below its limit again. Oldest values are chosen by the timestamp of the datum. This modifier only makes sense for dimensioned metrics. ### Stopping the program The program runs from start to finish once per line, but sometimes you may want to stop the program early. For example, if the log filename does not match a pattern, or some stateful metric indicates work shouldn't be done. For this purpose, the `stop` keyword terminates the program immediately. The simplest and most useless mtail program is thus: ``` stop ``` But for more useful situations, perhaps stopping if the log filename doesn't match a pattern: ``` getfilename() !~ /apache.access.log/ { stop } ``` mtail-3.0.0~rc54+git0ff5/docs/Metrics.md000066400000000000000000000054121460063571700176470ustar00rootroot00000000000000# Metrics in `mtail` ## Introduction A metric is a data type that describes a measurement. It has a **name**, and a **value**, and a **time** that the measurement was taken. It also has **units**, so that measurements can be compared and calculated with. It has a **class**, so that tools can automatically perform some aggregation operations on collections of measurements. It has a **type**, describing the sort of data it contains: floating point or integer values. Finally, it has some **labels**, so that additional information about the measurement can be added to assist queries later. Labels are key/value pairs, where the value may change for a specific measurement, but the keys remain constant across all measurements in a metric. ## Classes of Metrics The class of a Metric can be: * a monotonically increasing counter, that allows the calculation of rates of change * a variable gauge, that records instantaneous values Counters are very powerful as they are resistant to errors caused by sampling frequency. Typically used to accumulate events, they can show changes in behaviour through the calculation of rates, and rates of rates. They can be summed across a group and that sum also derived. Counter resets can indicate crashes or restarts. Gauges are less powerful as their ability to report is dependent on the sampling rate -- spikes in the timeseries can be missed. They record queue lengths, resource usage and quota, and other sized measurements. (N.B. Gauges can be simulated with two counters.) ## Types of data `mtail` records either integer or floating point values as the value of a metric. By default, all metrics are integer, unless the compiler can infer a floating point type. Inference is done through the type checking pass of the compiler. It uses knowledge of the expressions written in the program as well as heuristics on capturing groups in the regular expressions given. For example, in the program: ``` counter a /(\S+)/ { a = $1 } ``` the compiler will assume that `a` is of an integer type. With more information about the matched text: ``` counter a /(\d+\.\d+)/ { a = $1 } ``` the compiler can figure out that the capturing group reference `$1` contains digit and decimal point characters, and is likely then a floating point type. ## Labelling Labels are added as dimensions on a metric: ``` counter a by x, y, z ``` creates a three dimensional metric called `a`, with each dimension key `x`, `y`, `z`. Setting a measurement by label is done with an indexed expression: ``` a[1, 2, 3]++ ``` which has the effect of incrementing the metric a when x = 1, y = 2, and z = 3. Dimensions, aka *labels* in the metric name, can be used to export rich data to the metrics collector, for potential slicing and aggregation by each dimension. mtail-3.0.0~rc54+git0ff5/docs/Programming-Guide.md000066400000000000000000000354071460063571700215650ustar00rootroot00000000000000# `mtail` Programming Guide ## Introduction `mtail` is very simple and thus limits what is possible with metric manipulation, but is very good for getting values into the metrics. This page describes some common patterns for writing useful `mtail` programs. ## Changing the exported variable name `mtail` only lets you use "C"-style identifier names in the program text, but you can rename the exported variable as it gets presented to the collection system if you don't like that. ``` counter connection_time_total as "connection-time_total" ``` ## Reusing pattern pieces If the same pattern gets used over and over, then define a constant and avoid having to check the spelling of every occurrence. ``` # Define some pattern constants for reuse in the patterns below. const IP /\d+(\.\d+){3}/ const MATCH_IP /(?P/ + IP + /)/ ... # Duplicate lease /uid lease / + MATCH_IP + / for client .* is duplicate on / { duplicate_lease++ } ``` ## Parse the log line timestamp `mtail` attributes a timestamp to each event. If no timestamp exists in the log and none explicitly parsed by the mtail program, then mtail will use the current system time as the time of the event. Many log files include the timestamp of the event as reported by the logging program. To parse the timestamp, use the `strptime` function with a [Go time.Parse layout string](https://golang.org/pkg/time/#Parse). ``` /^(?P\w+\s+\d+\s+\d+:\d+:\d+)\s+[\w\.-]+\s+sftp-server/ { strptime($date, "Jan _2 15:04:05") ``` Don't try to disassemble timestamps into component parts (e.g. year, month, day) separately. Keep them in the same format as the log file presents them and change the strptime format string to match it. ``` /^/ + /(?P\d{4}\/\d{2}\/\d{2} \d{2}:\d{2}:\d{2}) / + /.*/ + /$/ { strptime($date, "2006/01/02 15:04:05") ``` N.B. If no timestamp parsing is done, then the reported timestamp of the event may add some latency to the measurement of when the event really occurred. Between your program logging the event, and mtail reading it, there are many moving parts: the log writer, some system calls perhaps, some disk IO, some more system calls, some more disk IO, and then mtail's virtual machine execution. While normally negligible, it is worth stating in case users notice offsets in time between what mtail reports and the event really occurring. For this reason, it's recommended to always use the log file's timestamp if one is available. ## Repeating common timestamp parsing The decorator syntax was designed with common timestamp parsing in mind. It allows the code for getting the timestamp out of the log line to be reused and make the rest of the program text more readable and thus maintainable. ``` # The `syslog' decorator defines a procedure. When a block of mtail code is # "decorated", it is called before entering the block. The block is entered # when the keyword `next' is reached. def syslog { /(?P(?P\w+\s+\d+\s+\d+:\d+:\d+)|(?P\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d+[+-]\d{2}:\d{2}))/ + /\s+(?:\w+@)?(?P[\w\.-]+)\s+(?P[\w\.-]+)(?:\[(?P\d+)\])?:\s+(?P.*)/ { # If the legacy_date regexp matched, try this format. len($legacy_date) > 0 { strptime($legacy_date, "Jan _2 15:04:05") } # If the RFC3339 style matched, parse it this way. len($rfc3339_date) > 0 { strptime($rfc3339_date, "2006-01-02T15:04:05-07:00") } # Call into the decorated block next } } ``` This can be used around any blocks later in the program. ``` @syslog { /foo/ { ... } /bar/ { } } # end @syslog decorator ``` Both the foo and bar pattern actions will have the syslog timestamp parsed from them before being called. ### Timestamps with strange characters in them Go's [time.Parse](https://golang.org/pkg/time/#Parse) does not like underscores in the format string, which may happen when one is attempting to parse a timestamp that does have underscores in the format. Go treats the underscore as placeholding an optional digit. To work around this, you can use `subst()` to rewrite the timestamp before parsing: ``` /(\d{4}-\d{2}-\d{2}_\d{2}:\d{2}:\d{2}) / { strptime(subst("_", " ", $1), "2006-01-02 15:04:05") } ``` Note the position of the underscore in the regular expression match. ## Conditional structures The `/pattern/ { action }` idiom is the normal conditional control flow structure in `mtail` programs. If the pattern matches, then the actions in the block are executed. If the pattern does not match, the block is skipped. The `else` keyword allows the program to perform action if the pattern does not match. ``` /pattern/ { action } else { alternative } ``` The example above would execute the "alternative" block if the pattern did not match the current line. The `otherwise` keyword can be used to create control flow structure reminiscent of the C `switch` statement. In a containing block, the `otherwise` keyword indicates that this block should be executed only if no other pattern in the same scope has matched. ``` { /pattern1/ { _action1_ } /pattern2/ { _action2_ } otherwise { _action3_ } } ``` In this example, "action3" would execute if both pattern1 and pattern2 did not match the current line. ### Explicit matching The above `/pattern/ { _action_ }` form implicitly matches the current input log line. If one wants to match against another string variable, one can use the `=~` operator, or to negate the match the `!~`, like so: ```mtail $1 =~ /GET/ { ... } ``` ## Storing intermediate state Hidden metrics are metrics that can be used for internal state and are never exported outside of `mtail`. For example if the time between pairs of log lines needs to be computed, then a hidden metric can be used to record the timestamp of the start of the pair. **Note** that the `timestamp` builtin _requires_ that the program has set a log line timestamp with `strptime` or `settime` before it is called. ``` hidden gauge connection_time by pid ... # Connection starts /connect from \S+ \(\d+\.\d+\.\d+\.\d+\)/ { connections_total++ # Record the start time of the connection, using the log timestamp. connection_time[$pid] = timestamp() } ... # Connection summary when session closed /sent (?P\d+) bytes received (?P\d+) bytes total size \d+/ { # Sum total bytes across all sessions for this process bytes_total["sent"] += $sent bytes_total["received"] += $received # Count total time spent with connections open, according to the log timestamp. connection_time_total += timestamp() - connection_time[$pid] # Delete the datum referenced in this dimensional metric. We assume that # this will never happen again, and hint to the VM that we can garbage # collect the memory used. del connection_time[$pid] } ``` In this example, the connection timestamp is recorded in the hidden variable `connection_time` keyed by the "pid" of the connection. Later when the connection end is logged, the delta between the current log timestamp and the start timestamp is computed and added to the total connection time. In this example, the average connection time can be computed in a collection system by taking the ratio of the number of connections (`connections_total`) over the time spent (`connection_time_total`). For example in [Prometheus](http://prometheus.io) one might write: ``` connection_time_10s_moving_avg = rate(connections_total[10s]) / on job rate(connection_time_total[10s]) ``` Note also that the `del` keyword is used to signal to `mtail` that the connection_time value is no longer needed. This will cause `mtail` to delete the datum referenced by that label from this metric, keeping `mtail`'s memory usage under control and speeding up labelset search time (by reducing the search space!) Alternatively, the statement `del connection_time[$pid] after 72h` would do the same, but only if `connection_time[$pid]` is not changed for 72 hours. This form is more convenient when the connection close event is lossy or difficult to determine. See [state](state.md) for more information. ## Computing moving averages `mtail` deliberately does not implement complex mathematical functions. It wants to process a log line as fast as it can. Many other products on the market already do complex mathematical functions on timeseries data, like [Prometheus](http://prometheus.io) and [Riemann](http://riemann.io), so `mtail` defers that responsibility to them. (Do One Thing, and Do It Pretty Good.) But say you still want to do a moving average in `mtail`. First note that `mtail` has no history available, only point in time data. You can update an average with a weighting to make it an exponential moving average (EMA). ``` gauge average /some (\d+) match/ { # Use a smoothing constant 2/(N + 1) to make the average over the last N observations average = 0.9 * $1 + 0.1 * average } ``` However this doesn't take into account the likely situation that the matches arrive irregularly (the time interval between them is not constant.) Unfortunately the formula for this requires the exp() function (`e^N`) as described here: http://stackoverflow.com/questions/1023860/exponential-moving-average-sampled-at-varying-times . I recommend you defer this computation to the collection system ## Histograms Histograms are preferred over averages in many monitoring howtos, blogs, talks, and rants, in order to give the operators better visibility into the behaviour of a system. `mtail` supports histograms as a first class metric kind, and should be created with a list of bucket boundaries: ``` histogram foo buckets 1, 2, 4, 8 ``` creates a new histogram `foo` with buckets for ranges [0-1), [1-2), [2-4), [4-8), and from 8 to positive infinity. > *NOTE: The 0-n and m-+Inf buckets are created automatically.* You can put labels on a histogram as well: ``` histogram apache_http_request_time_seconds buckets 0.005, 0.01, 0.025, 0.05 by server_port, handler, request_method, request_status, request_protocol ``` At the moment all bucket boundaries (excepting 0 and positive infinity) need to be explicitly named (there is no shorthand form to create geometric progressions). Assignment to the histogram records the observation: ``` ### # HTTP Requests with histogram buckets. # apache_http_request_time_seconds[$server_port][$handler][$request_method][$request_status][$request_protocol] = $time_us / 1000000 ``` In tools like [Prometheus](http://prometheus.io) these can be manipulated in aggregate for computing percentiles of response latency. ``` apache_http_request_time:rate10s = rate(apache_http_request_time_seconds_bucket[10s]) apache_http_request_time_count:rate10s = rate(apache_http_request_time_seconds_count[10s]) apache_http_request_time:percentiles = apache_http_request_time:rate10s / on (job, port, handler, request_method, request_status, request_protocol) apache_http_request_time_seconds_count:rate10s ``` This new timeseries can be plotted to see the percentile bands of each bucket, for example to visualise the distribution of requests moving between buckets as the performance of the server changes. Further, these timeseries can be used for [Service Level](https://landing.google.com/sre/book/chapters/service-level-objectives.html)-based alerting (a technique for declaring what a defensible service level is based on the relative costs of engineering more reliability versus incident response, maintenance costs, and other factors), as we can now see what percentage of responses fall within and without a predefined service level: ``` apache_http_request_time:latency_sli = apache_http_request_time:rate10s{le="200"} / on (job, port, handler, request_method, request_status, request_protocol) apache_http_request_time_seconds_count:rate10s ALERT LatencyTooHigh IF apache_http_request_time:latency_sli < 0.555555555 LABELS { severity="page" } ANNOTATIONS { summary = "Latency is missing the service level objective" description = "Latency service level indicator is {{ $value }}, which is below nine fives SLO." } ``` In this example, prometheus computes a service level indicator of the ratio of requests at or below the target of 200ms against the total count, and then fires an alert if the indicator drops below nine fives. ## Parsing number fields that are sometimes not numbers Some logs, for example Varnish and Apache access logs, use a hyphen rather than a zero. You may be tempted to use a programme like ``` counter total /^[a-z]+ ((?P\d+)|-)$/ { $response_size > 0 { total = $response_size } } ``` to parse a log like ``` a 99 b - ``` except that `mtail` will issue a runtime error on the second line like `Runtime error: strconv.ParseInt: parsing "": invalid syntax`. This is because in this programme the capture group is only matching on a set of digits, and is not defined when the alternate group matches (i.e. the hyphen). Instead one can test the value of the surrounding capture group and do nothing if the value matches a hyphen: ``` counter total /^[a-z]+ ((?P\d+)|-)$/ { $1 != "-" { total = $response_size } } ``` `mtail` does not presently have a way to test if a capture group is defined or not. ## Parsing numbers with extra characters Some logs contain human readable numbers, inserting thousands-separators (comma or full stop depending on your locale.) You can remove them with the `subst` function: ``` /sent (?P[\d,]+) bytes received (?P[\d,]+) bytes/ { # Sum total bytes across all sessions for this process bytes_total["sent"] += int(subst(",", "", $sent)) bytes_total["received"] += int(subst(",", "", $received)) } ``` As `subst` is of type String, the type inference will assign a Text type to bytes total, so here we must explicitly instruct `mtail` that we are expecting this to be an Int by using the `int` cast function. # Avoiding unnecessary work You can stop the program if it's fed data from a log file you know you want to ignore: ``` getfilename() !~ /apache.access.?log/ { stop } ``` This will check to see if the input filename looks like `/var/log/apache/accesslog` and not attempt any further pattern matching on the log line if it doesn't. # Canonicalising keys Some logs like webserver logs describe common elements with unique identifiers in them, which can result in lots of metric keys and no useful count if left alone. To rewrite these capture groups, use `subst()` with a pattern as the first argument: ```mtail hidden text route counter http_requests_total by method, route /(?P\S+)/ { route = subst(/\/d+/, "/:num", $url) http_requests_total[method][route]++ } ``` Here we replace any number part following a `/` in the `$url` capture group with the literal string `/:num`, so we end up counting only the static part of a URL route. mtail-3.0.0~rc54+git0ff5/docs/Testing.md000066400000000000000000000045151460063571700176610ustar00rootroot00000000000000# Testing `mtail` programmes ## Introduction By default any compile errors are logged to the standard log `/tmp/mtail.INFO` unless otherwise redirected. (You can emit to standard out with `--logtostderr` flag.) Program errors are also printed on the HTTP status handler, by default at porrt 3903. If you want more debugging information, `mtail` provides a few flags to assist with testing your program in standalone mode. # Details ## Compilation errors The `compile_only` flag will run the `mtail` compiler, print any error messages, and then exit. You can use this to check your programs are syntactically valid during the development process. ``` mtail --compile_only --progs ./progs ``` This could be added as a pre-commit hook to your source code repository. ## Testing programs The `one_shot` flag will compile and run the `mtail` programs, then feed in any logs specified from the beginning of the file (instead of tailing them), then print to the log all metrics collected. You can use this to check that your programs are giving the expected output against some gold standard log file samples. ``` mtail --one_shot --progs ./progs --logs testdata/foo.log ``` ### Continuous Testing If you wish, send a PR containing your program, some sample input, and a golden output to be run as a test in http://github.com/google/mtail/blob/main/ex_test.go to ensure that mtail never breaks your program (or that your program gets any updates if the language changes.) To have a syntax-only compile test, merely send in a PR with the program in the examples directory. The `TestExamplePrograms` behaves like the `one_shot` flag, and `TestCompileExamplePrograms` tests that program syntax is correct. # Test writing Use the `testutil` module where possible. Do not use time.Sleep; poll for events. The `TestServer` provides a `PollWatched()` method for this purpose. Even integration tests which write to disk can be fast and not require sleeps to synchronise. Use the `if testing.Short()` signal in tests with disk access so that the `make smoke` command is fast. Do not comment out tests, prefer to use the t.Skip() method indicating why it's not working if a test needs to be disabled. This keeps them visible and compilable. # Troubleshooting For more information about debugging mtail programs, see the tips under [Troubleshooting](Troubleshooting.md) mtail-3.0.0~rc54+git0ff5/docs/Troubleshooting.md000066400000000000000000000125531460063571700214340ustar00rootroot00000000000000# Troubleshooting `mtail` installations This page gives an overview of some avenues to debug your `mtail` installation. Also, see the [FAQ](faq.md). ## Reporting a problem Please when reporting a problem, include the `mtail` version: * the output of `mtail --version` * the first lines of the INFO log (`/tmp/mtail.INFO` by default) * the top of the status page (on HTTP port 3903 by default) ## `go get` or build problems ### `package github.com/google/mtail: no Go files` You're using go 1.11 or higher, which now starts to use go modules, and doesn't like source code layouts like `mtail` which doesn't have any Go files in the top directory. Either set `GO111MODULE=on` environment variable first, or `go get` the binary directly: `go get github.com/google/mtail/cmd/mtail` vs ``` GO111MODULE=on go get -u github.com/google/mtail cd $GOPATH/src/github.com/google/mtail make install ``` ## Compilation problems Compilation problems will be emitted to the standard INFO log * which is visible either on stderr if `mtail` is run with the `--logtostderr` flag * which is stored in the location provided by the `--log_dir` flag (usually, /tmp) (The behaviour of glog is documented in https://github.com/golang/glog) Errors for the most recent version of the program will also be displayed on the standard status page (served over HTTP at port 3903 by default) in the *Program Loader* section. If a program fails to compile, it will not be loaded. If an existing program has been loaded, and a new version is written to disk (by you, or a configuration management system) and that new version does not compile, `mtail` will log the errors and not interrupt or restart the existing, older program. The `--compile_only` flag will only attempt to compile the programs and not execute them. This can be used for pre-commit testing, for example. ### Syntax trees, type information, and virtual machine bytecode More detailed compiler debugging can be retrieved by using the `--dump_ast`, `--dump_ast_types`, and `--dump_bytecode`, all of which dump their state to the INFO log. For example, type errors logged such as `prog.mtail: Runtime error: conversion of "-0.000000912" to int failed: strconv.ParseInt: parsing "-0.000000912": invalid syntax` suggest an invalid type inference of `int` instead of `float` for some program symbol or expression. Use the `--dump_ast_types` flag to see the type annotated syntax tree of the program for more details. When reporting a problem, please include the AST type dump. ## Memory or performance issues `mtail` is a virtual machine emulator, and so strange performance issues can occur beyond the imagination of the author. The standard Go profiling tool can help. Start with a cpu profile: `go tool pprof /path/to/mtail http://localhost:3903/debug/pprof/profile' or a memory profile: `go tool pprof /path/to/mtail http://localhost:3903/debug/pprof/heap' There are many good guides on using the profiling tool: * https://software.intel.com/en-us/blogs/2014/05/10/debugging-performance-issues-in-go-programs is one such guide. The goroutine stack dump can also help explain what is happening at the moment. http://localhost:3903/debug/pprof/goroutine?debug=2 shows the full goroutine stack dump. * `(*Watcher).readEvents` reads events from the filesystem * `(*Tailer).run` processes log change events; `.read` reads the latest log lines * `(*Loader).processEvents` handles filesystem event changes regarding new program text * `(*Loader).processLines` handles new lines coming from the log tailer * `(*MtailServer).WaitForShutdown` waits for the other components to terminate * `(*Exporter).StartMetricPush` exists if there are any push collectors (e.g. Graphite) to push to * `(*Exporter).HandlePrometheusMetrics` exists if an existing Prometheus pull collection is going on There is one `(*VM).Run` stack per program. These are opaque to the goroutine stack dump as they execute the bytecode. However, the second argument to `Run` on the stack is the first four letters of the program name, encoded as ASCII. You can transcode these back to their names by doing a conversion from the int32 value in hex provided in the stack, e.g.: 0x61706163 -> 'apac' (probably an apache log program); 0x7273796e -> 'rsyn' (probably an rsyncd log program) Obvious problems seen in the goroutine stack dump are long-waiting gorotines, usually on mutexes. (they show their block time in minutes, e.g. `goroutine 38 [semacquire, 1580 minutes]:`) which usually also manifest as a logjam (no pun intended) in the loader, tailer, and watcher goroutines (in state 'chan send'). ## Distributed Tracing `mtail` can export traces to the [Jaeger](https://www.jaegertracing.io/) trace collector. Specify the Jaeger endpoint with the `--jaeger_endpoint` flag ``` mtail --jaeger_endpoint http://localhost:14268/api/traces ``` The `--trace_sample_period` flag can be used to set how often a trace is sampled and sent to the collector. Set it to `100` to collect one in 100 traces. ## Deployment problems The INFO log at `/tmp/mtail.INFO` by default contains lots of information about any errors encountered. Adding the `-v=2` flag raises the verbosity. See the [glog](https://github.com/golang/glog) manual for more logging flag options. The `one_shot` and `logtostderr` flags may come in helpful for quickly launching mtail in non-daemon mode in order to flush out deployment issues like permissions problems. mtail-3.0.0~rc54+git0ff5/docs/debugging.md000066400000000000000000000106061460063571700201750ustar00rootroot00000000000000# Tips for debugging `mtail` ## Parser bugs Run a test with logtostderr and mtailDebug up to 3, and parser_test_debug enabled to see any AST results. ``` go test -run TestParserRoundTrip/decrement_counter --logtostderr --mtailDebug=3 --parser_test_debug ``` `mtailDebug` at 2 dumps the parser states being traversed, and 3 includes the lexer token stream as well. ## Improving parser syntax error messages You can use this to improve error messages in the `%error` section of [`parser.y`](../internal/runtime/compiler/parser/parser.y), if you compare the "error recovery pops" messages with the state machine in the generated [`y.output`](../internal/runtime/compiler/parser/y.output). ``` go generate && go test -run TestParseInvalidPrograms/statement_with_no_effect --logtostderr --mtailDebug=3 --parser_test_debug ``` error log from test: ``` ... state-14 saw LSQUARE error recovery pops state 14 error recovery pops state 102 error recovery pops state 46 error recovery pops state 14 error recovery pops state 2 error recovery pops state 0 ``` This log says the lexer sent a LSQUARE token, and the parser was in state 14 when it saw it. The snippet below from `y.output` indicates state 14 is never expecting a LSQUARE, and the following lnies in the log above show the state stack being popped -- 0, 2, 14, 49, 102, 14. Walking backwards from state 0 (`$start`), we can get a list of nonterminal names to put in the state machine match expression used in the `%error` directive, and fill in the gaps with our knowledge of the intermediate states in our parose tree. `y.output`: ``` state 14 conditional_statement: logical_expr.compound_statement ELSE compound_statement conditional_statement: logical_expr.compound_statement logical_expr: logical_expr.logical_op opt_nl bitwise_expr AND shift 47 OR shift 48 MATCH shift 49 NOT_MATCH shift 50 LCURLY shift 46 . error compound_statement goto 44 logical_op goto 45 ``` State 14 to state 46 shifts a LCURLY operator, follow state 46 and we will find ourselves in `compound_statement`. Add to `parser.y` the names of the states that ended up at the unexpected token, followed by the error message: ``` %error stmt_list stmt conditional_statement logical_expr compound_statement conditional_statement logical_expr LSQUARE : "unexpected indexing of an expression" ``` and instead of "syntax error", the parser now emits "unexpected indexing of an expression". ## Fuzzer crashes Build the fuzzer locally with clang and libfuzzer: ``` make vm-fuzzer fuzz CXX=clang CXXFLAGS=-fsanitize=fuzzer,address LIB_FUZZING_ENGINE= ``` Then we can run the fuzzer with our example crash; make sure it has no weird characters because the upstream fuzz executor doesn't shell-escape arguments. ``` ./vm-fuzzer crash.mtail ``` If the crash is big, we can try to minimise it: ``` make fuzz-min CRASH=crash.mtail ``` Sometimes the minimiser will hit a local minima, but still look big; for example it doesn't know how to shrink variable names. We can reformat the crash with [`cmd/mfmt`](../cmd/mfmt/main.go): ``` make mfmt ./mfmt --prog crash.mtail --write ``` so it's easier to read -- it'll be bigger cos of the whitespace and the minimiser should shrink it back to original size if everything is working well. The formatted mtail program should help make it obvious what's happening and let you manually attempt to rename or remove parts of the program yourself -- perhaps a whole variable declaration and usage doesn't need to exist, but the minimiser will take a long time to figure that out. Once we have the smallest program we can add it to the crash corpus in [`internal/runtime/fuzz/`](../internal/runtime/fuzz/) and running `make fuzz` should run and fail on it straight away. Or, variants of the program can be added to the various `*Invalid` tests in parts of the `vm` module, e.g. [`parser_test.go`](../internal/runtime/compiler/parser/parser_test.go) or [`checker_test.go`](../internal/runtime/compiler/checker/checker_test.go) depending on where in the compiler the defect is occuring. If the crash is in `vm.go` then we can dump the program to see what AST and types, and bytecode it generates. ``` make mtail ./mtail --logtostderr --dump_ast_types --dump_bytecode --mtailDebug=3 --compile_only --progs crash.mtail ``` ### Fuzzer crashes, part 2 Run the fuzz-repro target with the CRASH variable set, it'll do all of the above: ``` make fuzz-repro CRASH=bug/20720.mtail ``` mtail-3.0.0~rc54+git0ff5/docs/designs/000077500000000000000000000000001460063571700173515ustar00rootroot00000000000000mtail-3.0.0~rc54+git0ff5/docs/designs/poll.md000066400000000000000000000051641460063571700206470ustar00rootroot00000000000000# Polling filesystem watcher Original date: 2018-08-13 Status: obsolete Last Updated: 2020-11-17 ## Overview Implement a hybrid polling and notify based filesystem watcher. ## Background Tracking issue #169 `mtail` has a filesystem watcher which is used to watch the state of programme files and log files; if they change, then programmes are reloaded and log files are read. `mtail` uses the [fsnotify](https://github.com/fsnotify/fsnotify) package to implement the filesystem watcher. fsnotify, which uses the `inotify(7)` system in Linux, lets `mtail` offload the work of polling the filesystem for changes to one where it is notified by the kernel instead, reducing the amount of work done. Some users want a polling option instead of fsnotify as their platforms don't support fsnotify, e.g. mipsel (bug in fsnotify) or no kernel support? (using on AIX). This design attempts to determine how to support a hybrid watcher. To the best of our ability, users should not have to configure poll- or fsnotify-based filesystem watching. From Linux's inotify(7): Inotify reports only events that a user-space program triggers through the filesystem API. As a result, it does not catch remote events that occur on network filesystems. (Applications must fall back to polling the filesystem to catch such events.) Furthermore, various pseudo-filesystems such as /proc, /sys, and /dev/pts are not monitorable with inotify. ## design ideas fsnotify watch add error, fallback to poll. How does fsnotify report errors about watches not being supported? E.g on NFS or with AIX? poll implemented similar to fsnotify poll loop? if that, will that be duplicated work? Do we care enough to avoid nested polling loops? should this be pushed upstream? how to let users override the choice? Argument listing poll-only filesystem path prefixes? Could poll be on by default for all files, with a timeout if no events have been received from inotify in some timeout? This could be tricky, we don't need to poll files that are inotified. But, again from inotify(7): Note that the event queue can overflow. In this case, events are lost. Robust applications should handle the possibility of lost events gracefully. For example, it may be necessary to rebuild part or all of the application cache. (One simple, but possibly expensive, approach is to close the inotify file descriptor, empty the cache, create a new inotify file descriptor, and then re-create watches and cache entries for the objects to be monitored.) ## references https://github.com/fsnotify/fsnotify inotify(7) mtail-3.0.0~rc54+git0ff5/docs/faq.md000066400000000000000000000110311460063571700170020ustar00rootroot00000000000000# FAQ "Frequently" is probably an overstatement, but here's a collection of questions and answers that pop up on the mailing list and issues. ## I don't like a particular label on the metrics. How do I remove it? All the labels are under your own control, except for the `prog` label which is used for namespace deconfliction -- i.e. multiple programs can be running in `mtail` and they should not be able to affect each other. It is best if you do some post processing in your collection system and configure it to filter out the `prog` label, so that strange aggregations don't occur. In Prometheus, this could be achieved like so: ``` metric_relabel_configs: - target_label: prog replacement: '' ``` (See [this comment](https://github.com/google/mtail/issues/59#issuecomment-303531070)). ## `mtail` isn't propagating the scraped timestamp to Prometheus `mtail` lets you use the `settimestamp()` function to extract a timestamp from a log file, and use that timestamp to carry to the monitoring system the closest thing that `mtail` knows to be the actual time of the event, and not the time at which `mtail` scraped the log. However, Prometheus needs to track the existence of a metric in the time series database in order to avoid showing very old data when querying the same metric for multiple instances at a specific timestamp. Exposing the timestamp can lead to triggering this staleness handling. `mtail`, being a metric creator, falls under bbrazil's comment on the prometheus-users list, in which he says ["It doesn't make sense to have timestamps for direct instrumentation, only for proxying metrics from another monitoring system with a custom collector."](https://groups.google.com/forum/#!msg/prometheus-users/qgxKH6_gYzM/LyO5wGO6BwAJ). The `mtail` timestamp handling is also broken for counters. The timestamp is set to 0 (UNIX epoch) at startup. If no matches are made, the initial zero count will never be ingested and the metric will only appear when first incremented. To avoid this, `mtail` disables exporting timestamps to Prometheus by default. You can turn this behaviour back on with the `--emit_metric_timestamp` commandline flag, and if you have slow moving counters, you should tune your Prometheus' `query.lookback-delta` parameter. See also [Staleness under Querying Basics](https://prometheus.io/docs/prometheus/latest/querying/basics/#staleness) in the Prometheus docs. If you are looking to expose the timestamp of an event, for example the start time of a process, you can create a timestamp metric. This is a metric that contains the timestamp as the value: ```mtail counter mtail_lines_read_count by filename gauge mtail_file_lastread_timestamp by filename /.*/ { mtail_lines_read_count[getfilename()]++ mtail_file_lastread_timestamp[getfilename()] = timestamp() } ``` ## Why doesn't `mtail` persist variables and metric values between restarts? `mtail` is intended to be stateless, deferring the problem of long term metric storage to a timeseries database and collector like [Prometheus](https://prometheus.io). Partially this reason is technical -- not having to save checkpoints and restore them makes the program much simpler. This means that mtail programs should prefer metrics that perform better in stateless systems, like counters rather than gauges. Prometheus for example is capable of handling counter resets in its rate and delta calculations, so mtail not remembering the value of a metric should not be cause for concern. Another reason is that failure is normal, and thus Prometheus handles these counter restarts because they are normal. If `mtail` checkpointed its state, filesystem and state file corruption will still occur, and in those edge cases a counter reset would still be observed, and thus need to be handled regardless. So, given that the monitoring system needs to handle missing and resetting data already in a distributed system, there is no compelling reason to implement metric checkpointing in `mtail` as well. It just adds complexity for little overall gain. ## Why doesn't `mtail` automatically reload programme files? `mtail` will reload programme files when it receives a `SIGHUP` signal. It's assumed that programmes do not change very often, so it relies on an external trigger rather than spend resourecs of its own polling for changes at all. `inotify` is not used either, as programme reloads would be the only use of that library, and the benefit does not seem worth the cost of including the extra dependency. See the [Deployment](Deployment.md) guide for suggestions for "automatic" programme reloads. mtail-3.0.0~rc54+git0ff5/docs/index.md000066400000000000000000000031761460063571700173550ustar00rootroot00000000000000mtail - extract internal monitoring data from application logs for collection into a timeseries database ======================================================================================================== `mtail` is a tool for extracting metrics from application logs to be exported into a timeseries database or timeseries calculator for alerting and dashboarding. It fills a monitoring niche by being the glue between applications that do not export their own internal state (other than via logs) and existing monitoring systems, such that system operators do not need to patch those applications to instrument them or writing custom extraction code for every such application. The extraction is controlled by [mtail programs](Programming-Guide.md) which define patterns and actions: # simple line counter counter lines_total /$/ { lines_total++ } Metrics are exported for scraping by a collector as JSON or Prometheus format over HTTP, or can be periodically sent to a collectd, StatsD, or Graphite collector socket. Read the [programming guide](Programming-Guide.md) if you want to learn how to write mtail programs. Ask general questions on the users mailing list: https://groups.google.com/g/mtail-users ## Table of Contents * [Building `mtail`](Building.md) * [Deploying `mtail`](Deploying.md) * [Interoperability](Interoperability.md) * [Troubleshooting](Troubleshooting.md) * [Programming Guide](Programming-Guide.md) * [Language](Language.md) * [Metrics](Metrics.md) * [Tracking State](state.md) * [Testing](Testing.md) * [Contributing to `mtail`](style.md) * [Debugging](debugging.md) * [FAQ](faq.md) mtail-3.0.0~rc54+git0ff5/docs/reading-y-output.md000066400000000000000000000142171460063571700214610ustar00rootroot00000000000000# Reading `y.output` A Yacc parser is a state machine that responds to an input stream of tokens, and has two actions: 1. **shift**, which pushes a new state on the stack 2. **reduce**, which pops a state off the stack and sets the lookahead token [`y.output`](../internal/runtime/compiler/parser/y.output) is a semi-human, semi-machine readable description of the parser state machine. `mtail` automatically generates this during the build process with the go:generate directive in [`driver.go`](../internal/runtime/compiler/parser/driver.go) ```y.output state 0 $accept: .start $end stmt_list: . (2) . reduce 2 (src line 96) stmt_list goto 2 start goto 1 ``` There are several parts to the state described here. The first section are the grammar rules. The first grammar rule says that an input is accepted if we can match a start token, and then the end of the input, and we're currently (the `.`) before the start token. The second rule has a number `(2)` (as it is the second grammar rule in the input `parser.y`, which looks like `stmt_list: stmt_list | stmt_list stmt`). The second rule says we can be in a state where we have parsed a valid `stmt_list`. The output always encloses grammar rules in parentheses, and state numbers are left unadorned. The second section has the actions, and in this case there is only one that says "match any token and reduce with rule 2". Rule 2 refers to the one in parentheses above, so it says we can match any token, pop the state off the stack, and set the lookahead token to `stmt_list`. For our convenience it also tells us where in the source file this reduce has come from -- if we look at line 96 we'll see the grammar for parsing a `stmt_list`. (You might wonder why the line number is in the action that uses the rule, rather than the definition of the rule in the previous section, and then you'll be in good company.) The last section indicates what happens when we enter this state from a reduce action, although the mechanics inside the machine are identical -- if the next lookahead token is a `stmt_list`, go to state 2, and if it's a `start`, go to state 1. For homework, look at state 1 and state 2 and describe what they mean. Here's another example: ```y.output state 14 conditional_statement: logical_expr.compound_statement ELSE compound_statement conditional_statement: logical_expr.compound_statement logical_expr: logical_expr.logical_op opt_nl bitwise_expr AND shift 48 OR shift 49 MATCH shift 50 NOT_MATCH shift 51 LCURLY shift 47 . error compound_statement goto 45 logical_op goto 46 ``` State 14 parses the conditional statement. If we get here, we've already parsed a `logical_expr`, and we're trying to figure out which way to go down the parse tree. We could find a `compound_statement`, or a `logical_op` next. If we see an `AND`, `OR`, `MATCH`, or `NOT_MATCH` next, we **shift** to the next state, which means pushing the next state onto the stack -- the stack represents the path down the tree to get to this token. Knowing the parser, these tokens mean we're going to parse a `logical_op`, and the difference between each is just because the parser executes a different action for each. ```y.output state 48 logical_op: AND. (26) . reduce 26 (src line 202) ``` In state 48 we have recognised an `AND`, and then reduction of rule 26 says we put a `logical_op` at the front of the token stream and pop the stack (back to state 14). The last couple of actions for state 14 say we can expect a `LCURLY` (token name for `{`, see [`lexer.go`](../internal/runtime/compiler/parser/lexer.go)) and then move to state 47. Or anything else (`.`) and we're now in an error state. Run a parser test with debugging flags enabled, and we can see how the parser and lexer see the input: ``` go test -run TestParseInvalidPrograms/pattern_without_block --logtostderr --mtailDebug=3 --parser_test_debug ``` `mtailDebug` at 2 dumps the parser states being traversed, and 3 includes the lexer token stream as well. The command above emits: ``` reduce 2 in: state-0 lex DIV(57376) reduce 112 in: state-2 reduce 113 in: state-60 lex REGEX(57365) lex DIV(57376) reduce 82 in: state-156 reduce 69 in: state-35 reduce 62 in: state-32 lex NL(57408) reduce 60 in: state-29 reduce 54 in: state-26 reduce 47 in: state-33 reduce 43 in: state-31 reduce 35 in: state-28 reduce 30 in: state-25 reduce 24 in: state-21 state-14 saw NL error recovery pops state 14 error recovery pops state 2 error recovery pops state 0 ``` We can see we start by reducing rule 2 in state 0, and then read a `DIV` token. The trace doesn't show the **shift** actions, but we reduce through states 2, then 60. Note that state 60 is just prior to the parser asking for the next token, indicated by the `lex REGEX` line -- this is emitted by the lexer when it returns the next token. So we can go look at state 60 to see why we've stopped to ask for more input. Alternatively, some grepping around for `"DIV shift"` (with two spaces) we can see we shift to state 60 from state 65 on a `DIV` token, which helps understand where the reduces start. Because `DIV` appears in both a regex and a division expression context, there are several matches to the grep. The error recovery trace is interesting here, as it is a good example of what happens during the `. error` rule. State 14 saw a `NL` (newline) unexpectedly, so the `.` matches. Error recovery doesn't do anything other than pop the stack until empty, so we can see the parse tree at the point of error. This knowledge can come in handy when improving the parser error messages, using the '%error' directive in `parser.y`. See [debugging](debugging.md) for how to use it. Note also that knowing the reduce movements within the state machine is useful as the reduce is when the parser action is executed. For example in the phrase `stmt_list: | stmt_list stmt` in `parser.y` the `stmt_list stmt` action is executed only once the leaves of the tree have already been accepted in order to be able to construct the tree. Thus the action on the empty option of the expression is the one that creates the `ast.StmtList`, while the other side's action appends those `stmt` children to that `ast.StmtList`. mtail-3.0.0~rc54+git0ff5/docs/references.md000066400000000000000000000023261460063571700203630ustar00rootroot00000000000000[ANSI C Grammar](http://www.lysator.liu.se/c/ANSI-C-grammar-y.html) [Awk Grammar](https://pubs.opengroup.org/onlinepubs/7908799/xcu/awk.html#tag_000_000_108_016) [Original Awk Grammar](https://github.com/onetrueawk/awk/blob/master/awkgram.y) [GAWK Grammar](http://git.savannah.gnu.org/cgit/gawk.git/tree/awkgram.y) [Smalltalk-80: The Language and Its Implementation](http://web.archive.org/web/20070927190722/http://users.ipa.net/~dwighth/smalltalk/bluebook/bluebook_imp_toc.html) Adele Goldberg and David Robson [The Java Virtual Machine Specification](https://docs.oracle.com/javase/specs/jvms/se7/html/index.html) Lindholm Yellin Bracha Buckley [Perl operators](https://perldoc.perl.org/perlop#Regexp-Quote-Like-Operators) ["Generating LR syntax error messages from examples", Jeffery, ACM TOPLAS Volume 24 Issue 5 Sep 2003.](https://dl.acm.org/doi/abs/10.1145/937563.937566) [Hindley Milner in Scala](http://dysphoria.net/2009/06/28/hindley-milner-type-inference-in-scala/) [The Hindley-Milner Algorithm](http://web.archive.org/web/20050911123640/http://www.cs.berkeley.edu/~nikitab/courses/cs263/hm.html) in perl by Nikita Borisov https://medium.com/@dhruvrajvanshi/type-inference-for-beginners-part-2-f39c33ca9513 mtail-3.0.0~rc54+git0ff5/docs/state.md000066400000000000000000000071741460063571700173700ustar00rootroot00000000000000# Keeping state in mtail programs The program is run on each log line from start to finish, with no loops. The only state emitted by the program is the content of the exported metrics. Metrics can be read by the program, though, so exported metrics are the place to keep state between lines of input. It's often the case that a log line is printed by an application at the start of some session-like interaction, and another at the end. Often these sessions have a session identifier, and every intermediate event in the same session is tagged with that identifier. Using map-valued exported metrics is the way to store session information keyed by session identifier. The example program [`rsyncd.mtail`](../examples/rsyncd.mtail) shows how to use a session tracking metric for measuring the total user session time. counter connection_time_total hidden gauge connection_time by pid /connect from \S+/ { connection_time[$pid] = timestamp() del connection_time[$pid] after 72h } /sent .* bytes received .* bytes total size \d+/ { connection_time_total += timestamp() - connection_time[$pid] del connection_time[$pid] } `rsyncd` uses a child process for each session so the `pid` field of the log format contains the session identifier in this example. ## hidden metrics A hidden metric is only visible to the mtail program, it is hidden from export. Internal state can be kept out of the metric collection system to avoid unnecessary memory and network costs. Hidden metrics are declared by prepending the word `hidden` to the declaration: hidden gauge connection_time by pid ## Removing session information at the end of the session The maps can grow unbounded with a key for every session identifier created as the logs are read. If you see `mtail` consuming a lot of memory, it is likely that there's one or more of these maps consuming memory. (You can remove the `hidden` keyword from the declaration, and let `mtail` reload the program without restarting and the contents of the session information metric will appear on the exported metrics page. Be warned, that if it's very big, even loading this page may take a long time and cause mtail to crash.) `mtail` can't know when a map value is ready to be garbage collected, so you need to tell it. One way is to defer deletion of the key and its value if it is not updated for some duration of time. The other way is to immediately delete it when the key and value are no longer needed. ``` del connection_time[$pid] after 72h ``` Upon creation of a connection time entry, the `rsyncd.mtail` program instructs mtail to remove it 72 hours after it's no longer updated. This means that the programmer expects, in this case, that sessions typically do not last longer than 72 hours because `mtail` does not track the timestamps for all accesses of metrics, only writes to them. ``` del connection_time[$pid] ``` The other form indicates that when the session is closed, the key and value can be removed. The caveat here is that logs can be lossy due to problems with the application restarting, mtail restarting, or the log delivery system (e.g. syslog) losing the messages too. Thus it is recommended to use both forms in programs. 1. `del ... after` form when the metric is created, giving it an expiration time longer than the expected lifespan of the session. 1. `del` form when the session is ended, explicitly removing it before the expiration time is up. It is not an error to delete a nonexistent key from a map. Expiry is only processed once ever hour, so durations shorter than 1h won't take effect until the next hour has passed. mtail-3.0.0~rc54+git0ff5/docs/style.md000066400000000000000000000015361460063571700174040ustar00rootroot00000000000000# Contribution style guide ## Table tests Use the `t.Run` subtest form. This assists debugging by printing the name of the table entry without additional parameters to t.Log and t.Error later on. It also means that the `-run` and `-bench` flags can be used to filter a specific test without excessive comment-and-rebuild cycles. Prefer to construct the subtest's name from the test parameters with `fmt.Sprintf`, otherwise use a `name` field. When comparing results, use `deep.Equal`. The parameter order should always be `expected`, then `observed`. This makes the diff output read like "the observed value is not equal to the expected value." If there is a non-nil diff result, emit it with `t.Error(diff)`. If multiple diffs are emitted in a single test, prefix the emission with a `t.Log` of the name of the result variable or function under test. mtail-3.0.0~rc54+git0ff5/examples/000077500000000000000000000000001460063571700166035ustar00rootroot00000000000000mtail-3.0.0~rc54+git0ff5/examples/apache_combined.mtail000066400000000000000000000022341460063571700227150ustar00rootroot00000000000000# Copyright 2015 Ben Kochie . All Rights Reserved. # This file is available under the Apache license. # Parser for the common apache "NCSA extended/combined" log format # LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" counter apache_http_requests_total by request_method, http_version, request_status counter apache_http_bytes_total by request_method, http_version, request_status /^/ + /(?P[0-9A-Za-z\.:-]+) / + # %h /(?P[0-9A-Za-z-]+) / + # %l /(?P[0-9A-Za-z-]+) / + # %u /\[(?P\d{2}\/\w{3}\/\d{4}:\d{2}:\d{2}:\d{2} (\+|-)\d{4})\] / + # %t /"(?P[A-Z]+) (?P\S+) (?PHTTP\/[0-9\.]+)" / + # \"%r\" /(?P\d{3}) / + # %>s /((?P\d+)|-) / + # %b /"(?P\S+)" / + # \"%{Referer}i\" /"(?P[[:print:]]+)"/ + # \"%{User-agent}i\" /$/ { strptime($timestamp, "02/Jan/2006:15:04:05 -0700") # for tests apache_http_requests_total[$request_method][$http_version][$request_status]++ $response_size > 0 { apache_http_bytes_total[$request_method][$http_version][$request_status] += $response_size } } mtail-3.0.0~rc54+git0ff5/examples/apache_common.mtail000066400000000000000000000030521460063571700224240ustar00rootroot00000000000000# Parser for the common apache log format as follow. # LogFormat "%h %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-agent}i\" # https://httpd.apache.org/docs/2.4/mod/mod_log_config.html counter apache_http_requests_total by request_method, http_version, status_code counter apache_http_bytes_total by request_method, http_version, status_code gauge apache_http_response_time by remote_host, request_method, request_uri, status_code, user_agent gauge apache_http_response_size by remote_host, request_method, request_uri, status_code, user_agent /^/ + /(?P[0-9A-Za-z\.:-]+) / + # %h /(?P[0-9A-Za-z-]+) / + # %l /(?P[0-9A-Za-z-]+) / + # %u /\[(?P\d{2}\/\w{3}\/\d{4}:\d{2}:\d{2}:\d{2} (\+|-)\d{4})\] / + # %t /"(?P[A-Z]+) (?P\S+) (?PHTTP\/[0-9\.]+)" / + # \"%r\" /(?P\d{3}) / + # %>s /((?P\d+)|-) / + # %b /(?P\d+) / + # %D /"(?P\S+)" / + # \"%{Referer}i\" /"(?P[[:print:]]+)"/ + # \"%{User-agent}i\" /$/ { strptime($timestamp, "02/Jan/2006:15:04:05 -0700") # for tests apache_http_requests_total[$request_method][$http_version][$status_code]++ $response_size > 0 { apache_http_bytes_total[$request_method][$http_version][$status_code] += $response_size apache_http_response_size[$remote_host][$request_method][$request_uri][$status_code][$user_agent] += $response_size } apache_http_response_time[$remote_host][$request_method][$request_uri][$status_code][$user_agent] = $response_time } mtail-3.0.0~rc54+git0ff5/examples/apache_metrics.mtail000066400000000000000000000047001460063571700226030ustar00rootroot00000000000000# Copyright 2015 Ben Kochie . All Rights Reserved. # This file is available under the Apache license. # Parser for a metrics-friendly apache log format # LogFormat "%v:%p %R %m %>s %H conn=%X %D %O %I %k" metrics counter http_connections_aborted_total by server_port, handler, method, code, protocol, connection_status counter http_connections_closed_total by server_port, handler, method, code, protocol, connection_status counter http_request_size_bytes_total by server_port, handler, method, code, protocol counter http_response_size_bytes_total by server_port, handler, method, code, protocol histogram http_request_duration_seconds by server_port, handler, method, code, protocol buckets 0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 15 /^/ + /(?P\S+) / + # %v:%p - The canonical ServerName of the server serving the request. : The canonical port of the server serving the request. /(?P\S+) / + # %R - The handler generating the response (if any). /(?P[A-Z]+) / + # %m - The request method. /(?P\d{3}) / + # %>s - Status code. /(?P\S+) / + # %H - The request protocol. /(?Pconn=.) / + # %X - Connection status when response is completed /(?P\d+) / + # %D - The time taken to serve the request, in microseconds. /(?P\d+) / + # %O - Bytes sent, including headers. /(?P\d+) / + # %I - Bytes received, including request and headers. /(?P\d+)/ + # %k - Number of keepalive requests handled on this connection. /$/ { ### # HTTP Requests with histogram buckets. # http_request_duration_seconds[$server_port][$handler][$method][$code][$protocol] = $time_us / 1000000.0 ### # Sent/Received bytes. http_response_size_bytes_total[$server_port][$handler][$method][$code][$protocol] += $sent_bytes http_request_size_bytes_total[$server_port][$handler][$method][$code][$protocol] += $received_bytes ### Connection status when response is completed: # X = Connection aborted before the response completed. # + = Connection may be kept alive after the response is sent. # - = Connection will be closed after the response is sent. / conn=X / { http_connections_aborted_total[$server_port][$handler][$method][$code][$protocol][$connection_status]++ } # Will not include all closed connections. :-( / conn=- / { http_connections_closed_total[$server_port][$handler][$method][$code][$protocol][$connection_status]++ } } mtail-3.0.0~rc54+git0ff5/examples/dhcpd.mtail000066400000000000000000000110671460063571700207220ustar00rootroot00000000000000# Copyright 2008 Google Inc. All Rights Reserved. # This file is available under the Apache license. # Define the exported metric names. The `by' keyword indicates the metric has # dimensions. For example, `request_total' counts the frequency of each # request's "command". The name `command' will be exported as the label name # for the metric. The command provided in the code below will be exported as # the label value. counter request_total by command counter config_file_errors counter peer_disconnects counter dhcpdiscovers by mac counter bind_xid_mismatch counter duplicate_lease counter bad_udp_checksum counter unknown_subnet counter dhcpdiscover_nofree by network counter unknown_lease by ip counter update_rejected counter failover_peer_timeout counter ip_already_in_use counter ip_abandoned by reason counter invalid_state_transition counter negative_poolreq by pool counter lease_conflicts # The `syslog' decorator defines a procedure. When a block of mtail code is # "decorated", it is called before entering the block. The block is entered # when the keyword `next' is reached. def syslog { /^(?P(?P\w+\s+\d+\s+\d+:\d+:\d+)|(?P\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d+[+-]\d{2}:\d{2}))/ + /\s+(?:\w+@)?(?P[\w\.-]+)\s+(?P[\w\.-]+)(?:\[(?P\d+)\])?:\s+(?P.*)/ { # If the legacy_date regexp matched, try this format. len($legacy_date) > 0 { strptime($2, "Jan _2 15:04:05") } # If the RFC3339 style matched, parse it this way. len($rfc3339_date) > 0 { strptime($rfc3339_date, "2006-01-02T03:04:05-0700") } # Call into the decorated block next } } # Define some pattern constants for reuse in the patterns below. const IP /\d+(\.\d+){3}/ const MATCH_IP /(?P/ + IP + /)/ const MATCH_NETWORK /(?P\d+(\.\d+){1,3}\/\d+)/ const MATCH_MAC /(?P([\da-f]{2}:){5}[\da-f]{2})/ @syslog { # Request $message =~ /^(balanced|balancing|BOOTREPLY|BOOTREQUEST|DHCPACK|DHCPDECLINE|DHCPDISCOVER|DHCPINFORM|DHCPNAK|DHCPOFFER|DHCPRELEASE|DHCPREQUEST)/ { # The lowercased name of the command matched in the regex is used to # count the frequency of each command. An external collector can use # this to compute the rate of each command independently. request_total[tolower($1)]++ # DHCP Discover $message =~ /^DHCPDISCOVER from / + MATCH_MAC { # Counts the discovery requests per mac address, which can help # identify bad clients on the network. dhcpdiscovers[$mac]++ /network / + MATCH_NETWORK + /: no free leases/ { # If the range is full, your clients may be having a bad time. dhcpdiscover_nofree[$network]++ } } } # Config file errors /Configuration file errors encountered -- exiting/ { # Counting config parse errors can he useful for detecting bad config # pushes that made it to production. config_file_errors++ } # Peer disconnects /peer ([^:]+): disconnected/ { peer_disconnects++ } # XID mismatches /bind update on / + IP + / got ack from (?P\w+): xid mismatch./ { bind_xid_mismatch++ } # Duplicate lease /uid lease / + MATCH_IP + / for client / + MATCH_MAC + / is duplicate on / + MATCH_NETWORK { duplicate_lease++ } # Bad UDP Checksum /(?P\d+) bad udp checksums in \d+ packets/ { bad_udp_checksum += $count } # Unknown subnet /DHCPDISCOVER from / + MATCH_MAC + / via / + IP + /: unknown network segment/ { unknown_subnet++ } # Unknown lease /DHCPREQUEST for / + IP + /\(/ + IP + /\) from / + MATCH_MAC + / via / + IP + /: unknown lease / + MATCH_IP { unknown_lease[$ip]++ } # Update rejected /bind update on \S+ from \S+ rejected: incoming update is less critical than the outgoing update/ { update_rejected++ } /timeout waiting for failover peer \S+/ { failover_peer_timeout++ } /ICMP Echo reply while lease / + IP + /valid/ { ip_already_in_use++ } /unexpected ICMP Echo reply from / + IP { ip_already_in_use++ } /Abandoning IP address / + IP + /: (?P.*)/ { ip_abandoned[$reason]++ } /bind update on \S+ from \S+ rejected: / + IP + /: invalid state transition/ { invalid_state_transition++ } /peer (?P[^:]+): Got POOLREQ, answering negatively!/ { negative_poolreq[$pool]++ } /Lease conflict at/ { lease_conflicts++ } } mtail-3.0.0~rc54+git0ff5/examples/histogram.mtail000066400000000000000000000024761460063571700216410ustar00rootroot00000000000000# use mtail to extract the values you want in your histogram, and any labels like 'httpcode' and it will create the buckets and histogram metrics for you. # this example might be something you put on a web server that logs latency. ex; # GET /foo/bar.html latency=1s httpcode=200 # GET /foo/baz.html latency=0s httpcode=200 # would produce this: # webserver_latency_by_code_bucket{httpcode="200",prog="software_errors.mtail",le="1"} 1 # webserver_latency_by_code_bucket{httpcode="200",prog="software_errors.mtail",le="2"} 1 # webserver_latency_by_code_bucket{httpcode="200",prog="software_errors.mtail",le="4"} 1 # webserver_latency_by_code_bucket{httpcode="200",prog="software_errors.mtail",le="8"} 1 # webserver_latency_by_code_bucket{httpcode="200",prog="software_errors.mtail",le="+Inf"} 1 # webserver_latency_by_code_sum{httpcode="200",prog="software_errors.mtail"} 1 # webserver_latency_by_code_count{httpcode="200",prog="software_errors.mtail"} 2 # histogram webserver_latency_by_code by code buckets 0, 1, 2, 4, 8 /latency=(?P\d+)s httpcode=(?P\d+)/ { webserver_latency_by_code [$httpcode] = $latency } # or if you don't need the http code label/dimension furthering the example, just use this histogram webserver_latency buckets 0, 1, 2, 4, 8 /latency=(?P\d+)/ { webserver_latency = $latency } mtail-3.0.0~rc54+git0ff5/examples/lighttpd.mtail000066400000000000000000000022461460063571700214560ustar00rootroot00000000000000# Copyright 2010 Google Inc. All Rights Reserved. # This file is available under the Apache license. # mtail module for a lighttpd server counter request by status counter time_taken by status counter bytes_out by subtotal, status counter bytes_in by status counter requests by proxy_cache const ACCESSLOG_RE // + /(?P\S+) (?P\S+) (?P\S+)/ + / \[(?P[^\]]+)\] "(?P\S+) (?P.+?) / + /(?P\S+)" (?P\d+) (?P\d+) (?P\d+)/ + / (?P\d+) (?P\d+) "(?P[^"]+)" / + /"(?P[^"]+)"/ # /var/log/lighttpd/access.log getfilename() =~ /lighttpd.access.log/ { // + ACCESSLOG_RE { # Parse an accesslog entry. $url == "/healthz" { # nothing } otherwise { strptime($access_time, "02/Jan/2006:15:04:05 -0700") request[$status]++ time_taken[$status] += $time_taken bytes_out["resp_body", $status] += $bytes_body bytes_out["resp_header", $status] += $bytes_out - $bytes_body bytes_in[$status] += $bytes_in $proxied_for != "-" { requests[$request_ip]++ } } } } mtail-3.0.0~rc54+git0ff5/examples/linecount.mtail000066400000000000000000000003331460063571700216320ustar00rootroot00000000000000# Copyright 2011 Google Inc. All Rights Reserved. # This file is available under the Apache license. # The most basic of mtail programmes -- count the number of lines read. counter lines_total /$/ { lines_total++ } mtail-3.0.0~rc54+git0ff5/examples/mysql_slowqueries.mtail000066400000000000000000000050361460063571700234460ustar00rootroot00000000000000# Copyright 2008 Google Inc. All Rights Reserved. # This file is available under the Apache license. # mysql-slowqueries -- mtail module tracking slow mysql queries hidden text user hidden text host hidden text query_type hidden text service hidden gauge tmp_query_time hidden gauge tmp_lock_time hidden gauge partial hidden gauge time counter query_time by type, server, service, user counter lock_time by type, server, service, user counter query_time_overall_sum counter query_time_total_count counter lock_time_overall_sum counter lock_time_total_count # Example lines from the file and regex to match them: # # User@Host: dbuser[dbuser] @ host [192.0.2.87] const USER_HOST /^# User@Host: ([a-zA-Z]+)\[[a-zA-Z]+\] @ ([^\. ]+)/ # # Query_time: 30 Lock_time: 0 Rows_sent: 0 Rows_examined: 0 const QUERY_TIME /^# Query_time: (\d+)\s*Lock_time: (\d+)/ # UPDATE ... # outbox; const FULL_QUERY_LINE /^(INSERT|UPDATE|DELETE|SELECT) .* # (.*);$/ # Not all queries have helpful comments at the end const UNINSTRUMENTED_QUERY_LINE /^(INSERT|UPDATE|DELETE|SELECT) .*;$/ # If the query gets split up, the service may end up on another line const PARTIAL_QUERY_LINE /^(INSERT|UPDATE|DELETE|SELECT) .*[^;]$/ # This one has the potential to catch too many things, so it can only be a last # resort match. const END_QUERY_LINE /.*;$/ /^# Time: (\d{6} .\d:\d\d:\d\d)/ { strptime($1, "060102 3:04:05") time = timestamp() } /^SET timestamp=(\d+);/ { time = $1 } settime(time) // + USER_HOST { user = $1 host = $2 } # break if no user set yet user == "" { stop } // + QUERY_TIME { tmp_query_time = $1 tmp_lock_time = $2 query_time_overall_sum += tmp_query_time query_time_total_count++ lock_time_overall_sum += tmp_lock_time lock_time_total_count++ } // + FULL_QUERY_LINE { # We should have everything we need now. query_type = tolower($1) service = $2 query_time[query_type, host, service, user] += tmp_query_time lock_time[query_type, host, service, user] += tmp_lock_time } // + UNINSTRUMENTED_QUERY_LINE { # We should have everything we need now. query_type = tolower($1) service = "n/a" query_time[query_type, host, service, user] += tmp_query_time lock_time[query_type, host, service, user] += tmp_lock_time } // + PARTIAL_QUERY_LINE { query_type = tolower($1) partial = 1 } // + END_QUERY_LINE && partial == 1 { partial = 0 /.*# (.*)$/ { service = $1 } otherwise { service = "n/a" } query_time[query_type, host, service, user] += tmp_query_time lock_time[query_type, host, service, user] += tmp_lock_time } mtail-3.0.0~rc54+git0ff5/examples/nocode.mtail000066400000000000000000000003611460063571700211020ustar00rootroot00000000000000# This is an example mtail programme for exporting no code instrumentation # # No code has no instrumentation, thus requires an external program to sift # and export metrics from other sources; in this case with mtail from any log # files. mtail-3.0.0~rc54+git0ff5/examples/ntpd.mtail000066400000000000000000000031711460063571700206020ustar00rootroot00000000000000# Copyright 2008 Google Inc. All Rights Reserved. # This file is available under the Apache license. # Syslog decorator def syslog { /^(?P(?P\w+\s+\d+\s+\d+:\d+:\d+)|(?P\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d+[+-]\d{2}:\d{2}))/ + /\s+(?:\w+@)?(?P[\w\.-]+)\s+(?P[\w\.-]+)(?:\[(?P\d+)\])?:\s+(?P.*)/ { len($legacy_date) > 0 { strptime($2, "Jan _2 15:04:05") } len($rfc3339_date) > 0 { strptime($rfc3339_date, "2006-01-02T03:04:05-0700") } next } } @syslog { counter int_syscalls /select\(.*\) error: Interrupted system call/ { int_syscalls++ } counter recvbuf_overflows gauge last_recvbuf /too many recvbufs allocated \((\d+)\)/ { recvbuf_overflows++ last_recvbuf = $1 } counter exits /ntpd exiting on signal 15/ { exits++ } counter starts /x?ntpd .* \w+\s+\w+\s+\d+\s+\d+:\d+:\d+\s+\w+\s+\d+\s+\(\d\)/ { starts++ } gauge sync_status /kernel time sync (status (change)?|enabled|disabled) (?P\d+)/ { sync_status = $status } # PLL status change. # # Described here: http://obswww.unige.ch/~bartho/xntp_faq/faq3Care.htm#araee counter pll_changes gauge pll_status /kernel pll status change (?P\d+)/ { pll_changes++ pll_status = $status } counter peer_syncs /synchronized to (\d+\.\d+\.\d+\.\d+|LOCAL\(\d\)), stratum(=| )(\d+)/ { peer_syncs++ } counter driftfile_errors /can't open .*drift.*: No such file or directory/ { driftfile_errors++ } counter sync_lost_total /synchronisation lost/ { sync_lost_total++ } } # end syslog mtail-3.0.0~rc54+git0ff5/examples/ntpd_peerstats.mtail000066400000000000000000000020151460063571700226700ustar00rootroot00000000000000# Peerstats log handling gauge peer_status by peer gauge peer_select by peer gauge peer_count by peer gauge peer_code by peer gauge peer_offset by peer gauge peer_delay by peer gauge peer_dispersion by peer counter num_peerstats by peer # TODO(jaq) seconds is int, not float /^(?P\d+) (?P\d+)\.\d+ (?P\d+\.\d+\.\d+\.\d+) (?P[0-9a-f]+) (?P-?\d+\.\d+) (?P\d+\.\d+) (?P\d+\.\d+)/ { # Unix epoch in MJD is 40587. settime(($days - 40587) * 86400 + $seconds) peer_offset[$peer] = $offset peer_delay[$peer] = $delay peer_dispersion[$peer] = $dispersion # http://www.cis.udel.edu/~mills/ntp/html/decode.html#peer # bits 0-4 peer_status[$peer] = (strtol($status, 16) >> (16 - 5)) & ((2 ** 5) - 1) # bits 5-7 peer_select[$peer] = (strtol($status, 16) >> (16 - 8)) & ((2 ** 3) - 1) # bits 6-11 peer_count[$peer] = (strtol($status, 16) >> (16 - 12)) & ((2 ** 4) - 1) # bits 12-15 peer_code[$peer] = strtol($status, 16) & ((2 ** 4) - 1) num_peerstats[$peer]++ } mtail-3.0.0~rc54+git0ff5/examples/postfix.mtail000066400000000000000000000146161460063571700213370ustar00rootroot00000000000000# vim:ts=2:sw=2:et:ai:sts=2:cinoptions=(0 # Copyright 2017 Martina Ferrari . All Rights Reserved. # This file is available under the Apache license. # Syslog parser for Postfix, based on the parsing rules from: # https://github.com/kumina/postfix_exporter # Copyright 2017 Kumina, https://kumina.nl/ # Available under the Apache license. const DELIVERY_DELAY_LINE /.*, relay=(?P\S+), .*,/ + / delays=(?P[0-9\.]+)\/(?P[0-9\.]+)\/(?P[0-9\.]+)\/(?P[0-9\.]+),\s/ const SMTP_TLS_LINE /(\S+) TLS connection established to \S+: (\S+) with cipher (\S+) \((\d+)\/(\d+) bits\)/ const SMTPD_TLS_LINE /(\S+) TLS connection established from \S+: (\S+) with cipher (\S+) \((\d+)\/(\d+) bits\)/ const QMGR_INSERT_LINE /:.*, size=(?P\d+), nrcpt=(?P\d+)/ const QMGR_REMOVE_LINE /: removed$/ /^(?P(?P\w+\s+\d+\s+\d+:\d+:\d+)|(?P\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d+[+-]\d{2}:\d{2}))/ + /\s+(?:\w+@)?(?P[\w\.-]+)\s+postfix\/(?P[\w\.\/-]+)(?:\[(?P\d+)\])?:\s+(?P.*)/ { len($legacy_date) > 0 { strptime($2, "Jan _2 15:04:05") } len($rfc3339_date) > 0 { strptime($rfc3339_date, "2006-01-02T03:04:05-0700") } # Total number of messages processed by cleanup. counter postfix_cleanup_messages_processed_total # Total number of messages rejected by cleanup. counter postfix_cleanup_messages_rejected_total $application == "cleanup" { /: message-id=. All Rights Reserved. # This file is available under the Apache license. # # Rails production log parsing counter rails_requests_started_total counter rails_requests_started by verb counter rails_requests_completed_total counter rails_requests_completed by status histogram rails_requests_completed_seconds by status buckets 0.005, 0.01, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 15.0 /^Started (?P[A-Z]+) .*/ { ### # Started HTTP requests by verb (GET, POST, etc.) # rails_requests_started_total++ rails_requests_started[$verb]++ } /^Completed (?P\d{3}) .+ in (?P\d+)ms .*$/ { ### # Total numer of completed requests by status # rails_requests_completed_total++ rails_requests_completed[$status]++ ### # Completed requests by status with histogram buckets # # These statements "fall through", so the histogram is cumulative. The # collecting system can compute the percentile bands by taking the ratio of # each bucket value over the final bucket. rails_requests_completed_seconds[$status] = $request_seconds / 1000.0 } mtail-3.0.0~rc54+git0ff5/examples/rsyncd.mtail000066400000000000000000000032211460063571700211330ustar00rootroot00000000000000# Copyright 2011 Google Inc. All Rights Reserved. # This file is available under the Apache license. counter bytes_total by operation # total connections, and total connection time can be used to compute the # average connection time. counter connections_total counter connection_time_total as "connection-time_total" # See which modules are popular. counter transfers_total by operation, module # Use this gauge to measure duration between start and end time per connection. # It is never used externally, so mark as `hidden'. hidden gauge connection_time by pid /^(?P\d+\/\d+\/\d+ \d+:\d+:\d+) \[(?P\d+)\] / { strptime($date, "2006/01/02 15:04:05") # Transfer log # %o %h [%a] %m (%u) %f %l /(?P\S+) (\S+) \[\S+\] (?P\S+) \(\S*\) \S+ (?P\d+)/ { transfers_total[$operation, $module]++ } # Connection starts /connect from \S+ \(\d+\.\d+\.\d+\.\d+\)/ { connections_total++ # Record the start time of the connection, using the log timestamp. connection_time[$pid] = timestamp() } # Connection summary when session closed /sent (?P\d+) bytes received (?P\d+) bytes total size \d+/ { # Sum total bytes across all sessions for this process bytes_total["sent"] += $sent bytes_total["received"] += $received # Count total time spent with connections open, according to the log timestamp. connection_time_total += timestamp() - connection_time[$pid] # Delete the datum referenced in this dimensional metric. We assume that # this will never happen again, and hint to the VM that we can garbage # collect the memory used. del connection_time[$pid] } } mtail-3.0.0~rc54+git0ff5/examples/sftp.mtail000066400000000000000000000022461460063571700206130ustar00rootroot00000000000000# Copyright 2008 Google Inc. All Rights Reserved. # This file is available under the Apache license. counter login_count by username counter logout_count by username counter bytes_read counter files_read counter bytes_written counter files_written counter user_bytes_read by username counter user_files_read by username counter user_bytes_written by username counter user_files_written by username /^(?P\w+\s+\d+\s+\d+:\d+:\d+)\s+[\w\.-]+\s+sftp-server/ { strptime($date, "Jan _2 15:04:05") /session opened for local user (?P\w+)/ { login_count[$username]++ } /session closed for local user (?P\w+)/ { logout_count[$username]++ } /close "[^"]+" bytes read (?P\d+) written (?P\d+)/ { $read != 0 { bytes_read += $read files_read++ } $written != 0 { bytes_written += $written files_written++ } /close "\/home\/(?P[^\/]+)\/[^"]+"/ { $read != 0 { user_bytes_read[$username] += $read user_files_read[$username]++ } $written != 0 { user_bytes_written[$username] += $written user_files_written[$username]++ } } } } mtail-3.0.0~rc54+git0ff5/examples/timer.mtail000066400000000000000000000003361460063571700207550ustar00rootroot00000000000000# `timer` is the same as gauge but has special meaning for statsd export. # Otherwise just use a gauge. timer request_time_ms by vhost /(?P\S+) (?P\d+)/ { request_time_ms[$vhost] = $latency_s / 1000 } mtail-3.0.0~rc54+git0ff5/examples/vsftpd.mtail000066400000000000000000000054101460063571700211410ustar00rootroot00000000000000# Copyright 2011 Google Inc. All Rights Reserved. # This file is available under the Apache license. # A mtail module for monitoring vsftpd logs # # Configure your vsftpd to write the xferlog as well as vsftpd.log hidden text direction counter bytes_transferred by direction counter transfer_time by direction counter transfers by direction counter connects counter logins counter uploads counter commands by command counter responses by response hidden gauge sessions by client counter session_time def vsftpd_timestamp { # Mon Feb 21 15:21:32 2011 /^\w+\s(?P\w+\s+\d+\s\d+:\d+:\d+\s\d+)/ { strptime($date, "Jan _2 15:04:05 2006") next } } const XFERLOG_RE // + # e.g. 1 172.18.115.36 528 # time spent transferring /\s(?P\d+)/ + # remote host /\s\d+\.\d+\.\d+\.\d+/ + # bytes transferred /\s(?P\d+)/ + # filename /\s(?P\S+)/ + # e.g. b _ i a anonymous@ ftp 0 * c # transfertype /\s\S/ + # special action flag /\s\S/ + # direction /\s(?P\S)/ + # access mode /\s\S/ + # username /\s\S+/ + # service name /\s\S+/ + # authentication method /\s\d/ + # authenticated id /\s\S+/ + # completion status /\s(?P\S)/ const VSFTPD_LOG_RE // + / \[pid \d+\]/ + /( \[\w+\])?/ + / (?PCONNECT|OK LOGIN|OK UPLOAD|FTP (command|response)):/ + / Client "(?P\d+\.\d+\.\d+\.\d+)"/ + /(, (?P.*))?/ const PAYLOAD_RESPONSE_RE /^"(\d{3})[" -]/ const PAYLOAD_COMMAND_RE /^"(\w{4})[" -]/ @vsftpd_timestamp { getfilename() =~ /xferlog/ { // + XFERLOG_RE { # Handles log entries from the wuftpd format xferlog. $direction == "i" { direction = "incoming" } $direction == "o" { direction = "outgoing" } $completionstatus == "c" { transfers[direction]++ } transfer_time[direction] += $transfertime bytes_transferred[direction] += $bytestransferred } } getfilename() =~ /vsftpd.log/ { // + VSFTPD_LOG_RE { # Handle vsftpd.log log file.""" $command == "CONNECT" { sessions[$client] = timestamp() del sessions[$client] after 168h connects++ } $command == "OK LOGIN" { logins++ } $command == "OK UPLOAD" { uploads++ } $command == "FTP command" { $payload =~ // + PAYLOAD_COMMAND_RE { commands[$1]++ $1 == "QUIT" { session_time += timestamp() - sessions[$client] del sessions[$client] } } } $command == "FTP response" { $payload =~ // + PAYLOAD_RESPONSE_RE { responses[$1]++ } } } } } mtail-3.0.0~rc54+git0ff5/go.mod000066400000000000000000000017731460063571700161030ustar00rootroot00000000000000module github.com/google/mtail go 1.21.1 require ( contrib.go.opencensus.io/exporter/jaeger v0.2.1 github.com/golang/glog v1.2.0 github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da github.com/google/go-cmp v0.6.0 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.19.0 github.com/prometheus/common v0.51.1 go.opencensus.io v0.24.0 golang.org/x/sys v0.18.0 ) require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/prometheus/client_model v0.6.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect github.com/uber/jaeger-client-go v2.25.0+incompatible // indirect golang.org/x/sync v0.3.0 // indirect google.golang.org/api v0.105.0 // indirect google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect google.golang.org/grpc v1.56.3 // indirect google.golang.org/protobuf v1.33.0 // indirect ) mtail-3.0.0~rc54+git0ff5/go.sum000066400000000000000000001033711460063571700161250ustar00rootroot00000000000000cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= contrib.go.opencensus.io/exporter/jaeger v0.2.1 h1:yGBYzYMewVL0yO9qqJv3Z5+IRhPdU7e9o/2oKpX4YvI= contrib.go.opencensus.io/exporter/jaeger v0.2.1/go.mod h1:Y8IsLgdxqh1QxYxPC5IgXVmBaeLUeQFfBeBi9PbeZd0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68= github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos= github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8= github.com/prometheus/common v0.51.1 h1:eIjN50Bwglz6a/c3hAgSMcofL3nD+nFQkV6Dd4DsQCw= github.com/prometheus/common v0.51.1/go.mod h1:lrWtQx+iDfn2mbH5GUzlH9TSHyfZpHkSiG1W7y3sF2Q= github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/uber/jaeger-client-go v2.25.0+incompatible h1:IxcNZ7WRY1Y3G4poYlx24szfsn/3LvK9QHCq9oQw8+U= github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.105.0 h1:t6P9Jj+6XTn4U9I2wycQai6Q/Kz7iOT+QzjJ3G2V4x8= google.golang.org/api v0.105.0/go.mod h1:qh7eD5FJks5+BcE+cjBIm6Gz8vioK7EHvnlniqXBnqI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= mtail-3.0.0~rc54+git0ff5/hooks/000077500000000000000000000000001460063571700161105ustar00rootroot00000000000000mtail-3.0.0~rc54+git0ff5/hooks/build000077500000000000000000000006601460063571700171370ustar00rootroot00000000000000#!/bin/bash # $IMAGE_NAME var is injected into the build so the tag is correct. echo "Build hook running" docker build \ --build-arg version=$(git describe --tags --always) \ --build-arg commit_hash=$(git rev-parse HEAD) \ --build-arg vcs_url=$(git config --get remote.origin.url) \ --build-arg vcs_branch=$(git rev-parse --abbrev-ref HEAD) \ --build-arg build_date=$(date -u +"%Y-%m-%dT%H:%M:%SZ") \ -t $IMAGE_NAME . mtail-3.0.0~rc54+git0ff5/hooks/post_checkout000077500000000000000000000001521460063571700207060ustar00rootroot00000000000000#!/bin/bash echo "Unshallowing to get correct tags to work." git fetch --tags --unshallow --quiet origin mtail-3.0.0~rc54+git0ff5/hooks/post_push000077500000000000000000000021051460063571700200600ustar00rootroot00000000000000#!/bin/bash # hooks/post_push # https://docs.docker.com/docker-cloud/builds/advanced/ # https://semver.org/ function add_tag() { echo "Adding tag ${1}" docker tag $IMAGE_NAME $DOCKER_REPO:$1 docker push $DOCKER_REPO:$1 } TAG=`git describe --tag --match "v*"` MAJOR=`echo ${TAG} | awk -F'-' '{print $1}' | awk -F'.' '{print $1}' | sed 's/v//'` MINOR=`echo ${TAG} | awk -F'-' '{print $1}' | awk -F'.' '{print $2}' | sed 's/v//'` PATCH=`echo ${TAG} | awk -F'-' '{print $1}' | awk -F'.' '{print $3}' | sed 's/v//'` PRLS=`echo ${TAG} | awk -F'-' '{print $2}'` num='^[0-9]+$' pre='^[0-9A-Za-z\.]+$' echo "Current Build: ${TAG}" if [ ! -z $MAJOR ] && [[ $MAJOR =~ $num ]]; then add_tag ${MAJOR} if [ ! -z $MINOR ] && [[ $MINOR =~ $num ]]; then add_tag ${MAJOR}.${MINOR} if [ ! -z $PATCH ] && [[ $PATCH =~ $num ]]; then add_tag ${MAJOR}.${MINOR}.${PATCH} if [ ! -z $PRLS ] && [[ ! $PRLS =~ $num ]] && [[ $PRLS =~ $pre ]]; then add_tag ${MAJOR}.${MINOR}.${PATCH}-${PRLS} fi fi fi fi exit $? mtail-3.0.0~rc54+git0ff5/internal/000077500000000000000000000000001460063571700166015ustar00rootroot00000000000000mtail-3.0.0~rc54+git0ff5/internal/exporter/000077500000000000000000000000001460063571700204515ustar00rootroot00000000000000mtail-3.0.0~rc54+git0ff5/internal/exporter/collectd.go000066400000000000000000000025151460063571700225740ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // This file is available under the Apache license. package exporter import ( "expvar" "flag" "fmt" "strings" "time" "github.com/google/mtail/internal/metrics" ) const ( // See https://collectd.org/wiki/index.php/Plain_text_protocol#PUTVAL collectdFormat = "PUTVAL \"%s/%smtail-%s/%s-%s\" interval=%d %s:%s\n" ) var ( collectdSocketPath = flag.String("collectd_socketpath", "", "Path to collectd unixsock to write metrics to.") collectdPrefix = flag.String("collectd_prefix", "", "Prefix to use for collectd metrics.") collectdExportTotal = expvar.NewInt("collectd_export_total") collectdExportSuccess = expvar.NewInt("collectd_export_success") ) // metricToCollectd encodes the metric data in the collectd text protocol format. The // metric lock is held before entering this function. func metricToCollectd(hostname string, m *metrics.Metric, l *metrics.LabelSet, interval time.Duration) string { return fmt.Sprintf(collectdFormat, hostname, *collectdPrefix, m.Program, kindToCollectdType(m.Kind), formatLabels(m.Name, l.Labels, "-", "-", "_"), int64(interval.Seconds()), l.Datum.TimeString(), l.Datum.ValueString()) } func kindToCollectdType(kind metrics.Kind) string { if kind != metrics.Timer { return strings.ToLower(kind.String()) } return "gauge" } mtail-3.0.0~rc54+git0ff5/internal/exporter/export.go000066400000000000000000000155261460063571700223320ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // This file is available under the Apache license. // Package exporter provides the interface for getting metrics out of mtail, // into your monitoring system of choice. package exporter import ( "context" "expvar" "flag" "fmt" "io" "net" "os" "sort" "strings" "sync" "time" "github.com/golang/glog" "github.com/google/mtail/internal/metrics" "github.com/pkg/errors" ) // Commandline Flags. var ( writeDeadline = flag.Duration("metric_push_write_deadline", 10*time.Second, "Time to wait for a push to succeed before exiting with an error.") ) // Exporter manages the export of metrics to passive and active collectors. type Exporter struct { ctx context.Context wg sync.WaitGroup store *metrics.Store pushInterval time.Duration hostname string omitProgLabel bool emitTimestamp bool exportDisabled bool pushTargets []pushOptions initDone chan struct{} } // Option configures a new Exporter. type Option func(*Exporter) error // Hostname specifies the mtail hostname to use in exported metrics. func Hostname(hostname string) Option { return func(e *Exporter) error { e.hostname = hostname return nil } } // OmitProgLabel sets the Exporter to not put program names in metric labels. func OmitProgLabel() Option { return func(e *Exporter) error { e.omitProgLabel = true return nil } } // EmitTimestamp instructs the exporter to send metric's timestamps to collectors. func EmitTimestamp() Option { return func(e *Exporter) error { e.emitTimestamp = true return nil } } func PushInterval(opt time.Duration) Option { return func(e *Exporter) error { e.pushInterval = opt return nil } } func DisableExport() Option { return func(e *Exporter) error { e.exportDisabled = true return nil } } var ( ErrNeedsStore = errors.New("exporter needs a Store") ErrNeedsWaitgroup = errors.New("exporter needs a WaitGroup") ) // New creates a new Exporter. func New(ctx context.Context, wg *sync.WaitGroup, store *metrics.Store, options ...Option) (*Exporter, error) { if store == nil { return nil, ErrNeedsStore } if wg == nil { return nil, ErrNeedsWaitgroup } e := &Exporter{ ctx: ctx, store: store, initDone: make(chan struct{}), } defer close(e.initDone) if err := e.SetOption(options...); err != nil { return nil, err } // defaults after options have been set if e.hostname == "" { var err error e.hostname, err = os.Hostname() if err != nil { return nil, errors.Wrap(err, "getting hostname") } } if *collectdSocketPath != "" { o := pushOptions{"unix", *collectdSocketPath, metricToCollectd, collectdExportTotal, collectdExportSuccess} e.RegisterPushExport(o) } if *graphiteHostPort != "" { o := pushOptions{"tcp", *graphiteHostPort, metricToGraphite, graphiteExportTotal, graphiteExportSuccess} e.RegisterPushExport(o) } if *statsdHostPort != "" { o := pushOptions{"udp", *statsdHostPort, metricToStatsd, statsdExportTotal, statsdExportSuccess} e.RegisterPushExport(o) } e.StartMetricPush() wg.Add(1) // This routine manages shutdown of the Exporter. go func() { defer wg.Done() <-e.initDone // Wait for the context to be completed before waiting for subroutines. if !e.exportDisabled { <-e.ctx.Done() } e.wg.Wait() }() return e, nil } // SetOption takes one or more option functions and applies them in order to Exporter. func (e *Exporter) SetOption(options ...Option) error { for _, option := range options { if err := option(e); err != nil { return err } } return nil } // formatLabels converts a metric name and key-value map of labels to a single // string for exporting to the correct output format for each export target. // ksep and sep mark what to use for key/val separator, and between label separators respoectively. // If not empty, rep is used to replace cases of ksep and sep in the original strings. func formatLabels(name string, m map[string]string, ksep, sep, rep string) string { r := name if len(m) > 0 { var keys []string for k := range m { keys = append(keys, k) } sort.Strings(keys) var s []string for _, k := range keys { k1 := strings.ReplaceAll(strings.ReplaceAll(k, ksep, rep), sep, rep) v1 := strings.ReplaceAll(strings.ReplaceAll(m[k], ksep, rep), sep, rep) s = append(s, fmt.Sprintf("%s%s%s", k1, ksep, v1)) } return r + sep + strings.Join(s, sep) } return r } // Format a LabelSet into a string to be written to one of the timeseries // sockets. type formatter func(string, *metrics.Metric, *metrics.LabelSet, time.Duration) string func (e *Exporter) writeSocketMetrics(c io.Writer, f formatter, exportTotal *expvar.Int, exportSuccess *expvar.Int) error { return e.store.Range(func(m *metrics.Metric) error { m.RLock() // Don't try to send text metrics to any push service. if m.Kind == metrics.Text { m.RUnlock() return nil } exportTotal.Add(1) lc := make(chan *metrics.LabelSet) go m.EmitLabelSets(lc) for l := range lc { line := f(e.hostname, m, l, e.pushInterval) n, err := fmt.Fprint(c, line) glog.V(2).Infof("Sent %d bytes\n", n) if err == nil { exportSuccess.Add(1) } else { return errors.Errorf("write error: %s", err) } } m.RUnlock() return nil }) } // PushMetrics sends metrics to each of the configured services. func (e *Exporter) PushMetrics() { for _, target := range e.pushTargets { glog.V(2).Infof("pushing to %s", target.addr) conn, err := net.DialTimeout(target.net, target.addr, *writeDeadline) if err != nil { glog.Infof("pusher dial error: %s", err) continue } err = conn.SetDeadline(time.Now().Add(*writeDeadline)) if err != nil { glog.Infof("Couldn't set deadline on connection: %s", err) } err = e.writeSocketMetrics(conn, target.f, target.total, target.success) if err != nil { glog.Infof("pusher write error: %s", err) } err = conn.Close() if err != nil { glog.Infof("connection close failed: %s", err) } } } // StartMetricPush pushes metrics to the configured services each interval. func (e *Exporter) StartMetricPush() { if e.exportDisabled { glog.Info("Export loop disabled.") return } if len(e.pushTargets) == 0 { return } if e.pushInterval <= 0 { return } e.wg.Add(1) go func() { defer e.wg.Done() <-e.initDone glog.Info("Started metric push.") ticker := time.NewTicker(e.pushInterval) defer ticker.Stop() for { select { case <-e.ctx.Done(): return case <-ticker.C: e.PushMetrics() } } }() } type pushOptions struct { net, addr string f formatter total, success *expvar.Int } // RegisterPushExport adds a push export connection to the Exporter. Items in // the list must describe a Dial()able connection and will have all the metrics // pushed to each pushInterval. func (e *Exporter) RegisterPushExport(p pushOptions) { e.pushTargets = append(e.pushTargets, p) } mtail-3.0.0~rc54+git0ff5/internal/exporter/export_test.go000066400000000000000000000163401460063571700233640ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // This file is available under the Apache license. package exporter import ( "context" "errors" "reflect" "sort" "strings" "sync" "testing" "time" "github.com/google/mtail/internal/metrics" "github.com/google/mtail/internal/metrics/datum" "github.com/google/mtail/internal/testutil" ) const prefix = "prefix" func TestCreateExporter(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) var wg sync.WaitGroup store := metrics.NewStore() _, err := New(ctx, &wg, store) if err != nil { t.Errorf("New(ctx, wg, store) unexpected error: %v", err) } cancel() wg.Wait() ctx, cancel = context.WithCancel(context.Background()) failopt := func(*Exporter) error { return errors.New("busted") // nolint:goerr113 } _, err = New(ctx, &wg, store, failopt) if err == nil { t.Errorf("unexpected success") } cancel() wg.Wait() } func TestNewErrors(t *testing.T) { ctx := context.Background() store := metrics.NewStore() var wg sync.WaitGroup _, err := New(ctx, nil, store) if err == nil { t.Error("New(ctx, nil, store) expecting error, received nil") } _, err = New(ctx, &wg, nil) if err == nil { t.Error("New(ctx, wg, nil) expecting error, received nil") } } func FakeSocketWrite(f formatter, m *metrics.Metric) []string { ret := make([]string, 0) lc := make(chan *metrics.LabelSet) d := 60 * time.Second go m.EmitLabelSets(lc) for l := range lc { ret = append(ret, f("gunstar", m, l, d)) } sort.Strings(ret) return ret } func TestMetricToCollectd(t *testing.T) { *collectdPrefix = "" ts, terr := time.Parse("2006/01/02 15:04:05", "2012/07/24 10:14:00") if terr != nil { t.Errorf("time parse error: %s", terr) } ms := metrics.NewStore() scalarMetric := metrics.NewMetric("foo", "prog", metrics.Counter, metrics.Int) d, _ := scalarMetric.GetDatum() datum.SetInt(d, 37, ts) testutil.FatalIfErr(t, ms.Add(scalarMetric)) r := FakeSocketWrite(metricToCollectd, scalarMetric) expected := []string{"PUTVAL \"gunstar/mtail-prog/counter-foo\" interval=60 1343124840:37\n"} testutil.ExpectNoDiff(t, expected, r) dimensionedMetric := metrics.NewMetric("bar", "prog", metrics.Gauge, metrics.Int, "label") d, _ = dimensionedMetric.GetDatum("quux") datum.SetInt(d, 37, ts) d, _ = dimensionedMetric.GetDatum("snuh") datum.SetInt(d, 37, ts) ms.ClearMetrics() testutil.FatalIfErr(t, ms.Add(dimensionedMetric)) r = FakeSocketWrite(metricToCollectd, dimensionedMetric) expected = []string{ "PUTVAL \"gunstar/mtail-prog/gauge-bar-label-quux\" interval=60 1343124840:37\n", "PUTVAL \"gunstar/mtail-prog/gauge-bar-label-snuh\" interval=60 1343124840:37\n", } testutil.ExpectNoDiff(t, expected, r) timingMetric := metrics.NewMetric("foo", "prog", metrics.Timer, metrics.Int) d, _ = timingMetric.GetDatum() datum.SetInt(d, 123, ts) testutil.FatalIfErr(t, ms.Add(timingMetric)) r = FakeSocketWrite(metricToCollectd, timingMetric) expected = []string{"PUTVAL \"gunstar/mtail-prog/gauge-foo\" interval=60 1343124840:123\n"} testutil.ExpectNoDiff(t, expected, r) *collectdPrefix = prefix r = FakeSocketWrite(metricToCollectd, timingMetric) expected = []string{"PUTVAL \"gunstar/prefixmtail-prog/gauge-foo\" interval=60 1343124840:123\n"} testutil.ExpectNoDiff(t, expected, r) } func TestMetricToGraphite(t *testing.T) { *graphitePrefix = "" ts, terr := time.Parse("2006/01/02 15:04:05", "2012/07/24 10:14:00") if terr != nil { t.Errorf("time parse error: %s", terr) } scalarMetric := metrics.NewMetric("foo", "prog", metrics.Counter, metrics.Int) d, _ := scalarMetric.GetDatum() datum.SetInt(d, 37, ts) r := FakeSocketWrite(metricToGraphite, scalarMetric) expected := []string{"prog.foo 37 1343124840\n"} testutil.ExpectNoDiff(t, expected, r) dimensionedMetric := metrics.NewMetric("bar", "prog", metrics.Gauge, metrics.Int, "host") d, _ = dimensionedMetric.GetDatum("quux.com") datum.SetInt(d, 37, ts) d, _ = dimensionedMetric.GetDatum("snuh.teevee") datum.SetInt(d, 37, ts) r = FakeSocketWrite(metricToGraphite, dimensionedMetric) expected = []string{ "prog.bar.host.quux_com 37 1343124840\n", "prog.bar.host.snuh_teevee 37 1343124840\n", } testutil.ExpectNoDiff(t, expected, r) histogramMetric := metrics.NewMetric("hist", "prog", metrics.Histogram, metrics.Buckets, "xxx") lv := &metrics.LabelValue{Labels: []string{"bar"}, Value: datum.MakeBuckets([]datum.Range{{0, 10}, {10, 20}}, time.Unix(0, 0))} histogramMetric.AppendLabelValue(lv) d, _ = histogramMetric.GetDatum("bar") datum.SetFloat(d, 1, ts) datum.SetFloat(d, 5, ts) datum.SetFloat(d, 15, ts) datum.SetFloat(d, 12, ts) datum.SetFloat(d, 19, ts) datum.SetFloat(d, 1000, ts) r = FakeSocketWrite(metricToGraphite, histogramMetric) r = strings.Split(strings.TrimSuffix(r[0], "\n"), "\n") sort.Strings(r) expected = []string{ "prog.hist.xxx.bar 1052 1343124840", "prog.hist.xxx.bar.bin_10 2 1343124840", "prog.hist.xxx.bar.bin_20 3 1343124840", "prog.hist.xxx.bar.bin_inf 1 1343124840", "prog.hist.xxx.bar.count 6 1343124840", } testutil.ExpectNoDiff(t, expected, r) *graphitePrefix = prefix r = FakeSocketWrite(metricToGraphite, dimensionedMetric) expected = []string{ "prefixprog.bar.host.quux_com 37 1343124840\n", "prefixprog.bar.host.snuh_teevee 37 1343124840\n", } testutil.ExpectNoDiff(t, expected, r) } func TestMetricToStatsd(t *testing.T) { *statsdPrefix = "" ts, terr := time.Parse("2006/01/02 15:04:05", "2012/07/24 10:14:00") if terr != nil { t.Errorf("time parse error: %s", terr) } scalarMetric := metrics.NewMetric("foo", "prog", metrics.Counter, metrics.Int) d, _ := scalarMetric.GetDatum() datum.SetInt(d, 37, ts) r := FakeSocketWrite(metricToStatsd, scalarMetric) expected := []string{"prog.foo:37|c"} if !reflect.DeepEqual(expected, r) { t.Errorf("String didn't match:\n\texpected: %v\n\treceived: %v", expected, r) } dimensionedMetric := metrics.NewMetric("bar", "prog", metrics.Gauge, metrics.Int, "l") d, _ = dimensionedMetric.GetDatum("quux") datum.SetInt(d, 37, ts) d, _ = dimensionedMetric.GetDatum("snuh") datum.SetInt(d, 42, ts) r = FakeSocketWrite(metricToStatsd, dimensionedMetric) expected = []string{ "prog.bar.l.quux:37|g", "prog.bar.l.snuh:42|g", } if !reflect.DeepEqual(expected, r) { t.Errorf("String didn't match:\n\texpected: %v\n\treceived: %v", expected, r) } multiLabelMetric := metrics.NewMetric("bar", "prog", metrics.Gauge, metrics.Int, "c", "a", "b") d, _ = multiLabelMetric.GetDatum("x", "z", "y") datum.SetInt(d, 37, ts) r = FakeSocketWrite(metricToStatsd, multiLabelMetric) expected = []string{"prog.bar.a.z.b.y.c.x:37|g"} if !reflect.DeepEqual(expected, r) { t.Errorf("String didn't match:\n\texpected: %v\n\treceived: %v", expected, r) } timingMetric := metrics.NewMetric("foo", "prog", metrics.Timer, metrics.Int) d, _ = timingMetric.GetDatum() datum.SetInt(d, 37, ts) r = FakeSocketWrite(metricToStatsd, timingMetric) expected = []string{"prog.foo:37|ms"} if !reflect.DeepEqual(expected, r) { t.Errorf("String didn't match:\n\texpected: %v\n\treceived: %v", expected, r) } *statsdPrefix = prefix r = FakeSocketWrite(metricToStatsd, timingMetric) expected = []string{"prefixprog.foo:37|ms"} if !reflect.DeepEqual(expected, r) { t.Errorf("prefixed string didn't match:\n\texpected: %v\n\treceived: %v", expected, r) } } mtail-3.0.0~rc54+git0ff5/internal/exporter/graphite.go000066400000000000000000000044341460063571700226100ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // This file is available under the Apache license. package exporter import ( "expvar" "flag" "fmt" "math" "net/http" "strings" "time" "github.com/google/mtail/internal/metrics" "github.com/google/mtail/internal/metrics/datum" ) var ( graphiteHostPort = flag.String("graphite_host_port", "", "Host:port to graphite carbon server to write metrics to.") graphitePrefix = flag.String("graphite_prefix", "", "Prefix to use for graphite metrics.") graphiteExportTotal = expvar.NewInt("graphite_export_total") graphiteExportSuccess = expvar.NewInt("graphite_export_success") ) func (e *Exporter) HandleGraphite(w http.ResponseWriter, r *http.Request) { w.Header().Add("Content-type", "text/plain") err := e.store.Range(func(m *metrics.Metric) error { select { case <-r.Context().Done(): return r.Context().Err() default: } m.RLock() graphiteExportTotal.Add(1) lc := make(chan *metrics.LabelSet) go m.EmitLabelSets(lc) for l := range lc { line := metricToGraphite(e.hostname, m, l, 0) fmt.Fprint(w, line) } m.RUnlock() return nil }) if err != nil { http.Error(w, fmt.Sprintf("%s", err), http.StatusInternalServerError) } } // metricToGraphite encodes a metric in the graphite text protocol format. The // metric lock is held before entering this function. func metricToGraphite(_ string, m *metrics.Metric, l *metrics.LabelSet, _ time.Duration) string { var b strings.Builder if m.Kind == metrics.Histogram && m.Type == metrics.Buckets { d := m.LabelValues[0].Value buckets := datum.GetBuckets(d) for r, c := range buckets.GetBuckets() { var binName string if math.IsInf(r.Max, 1) { binName = "inf" } else { binName = fmt.Sprintf("%v", r.Max) } fmt.Fprintf(&b, "%s%s.%s.bin_%s %v %v\n", *graphitePrefix, m.Program, formatLabels(m.Name, l.Labels, ".", ".", "_"), binName, c, l.Datum.TimeString()) } fmt.Fprintf(&b, "%s%s.%s.count %v %v\n", *graphitePrefix, m.Program, formatLabels(m.Name, l.Labels, ".", ".", "_"), buckets.GetCount(), l.Datum.TimeString()) } fmt.Fprintf(&b, "%s%s.%s %v %v\n", *graphitePrefix, m.Program, formatLabels(m.Name, l.Labels, ".", ".", "_"), l.Datum.ValueString(), l.Datum.TimeString()) return b.String() } mtail-3.0.0~rc54+git0ff5/internal/exporter/graphite_test.go000066400000000000000000000031451460063571700236450ustar00rootroot00000000000000// Copyright 2021 Adam Romanek // This file is available under the Apache license. package exporter import ( "context" "io" "net/http" "net/http/httptest" "sync" "testing" "time" "github.com/google/mtail/internal/metrics" "github.com/google/mtail/internal/metrics/datum" "github.com/google/mtail/internal/testutil" ) var handleGraphiteTests = []struct { name string metrics []*metrics.Metric expected string }{ { "empty", []*metrics.Metric{}, "", }, { "single", []*metrics.Metric{ { Name: "foo", Program: "test", Kind: metrics.Counter, LabelValues: []*metrics.LabelValue{{Labels: []string{}, Value: datum.MakeInt(1, time.Unix(0, 0))}}, }, }, "foobar.test.foo 1 0\n", }, } func TestHandleGraphite(t *testing.T) { *graphitePrefix = "foobar." for _, tc := range handleGraphiteTests { tc := tc t.Run(tc.name, func(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) var wg sync.WaitGroup ms := metrics.NewStore() for _, metric := range tc.metrics { testutil.FatalIfErr(t, ms.Add(metric)) } e, err := New(ctx, &wg, ms, Hostname("gunstar")) testutil.FatalIfErr(t, err) response := httptest.NewRecorder() e.HandleGraphite(response, &http.Request{}) if response.Code != 200 { t.Errorf("response code not 200: %d", response.Code) } b, err := io.ReadAll(response.Body) if err != nil { t.Errorf("failed to read response %s", err) } testutil.ExpectNoDiff(t, tc.expected, string(b), testutil.IgnoreUnexported(sync.RWMutex{})) cancel() wg.Wait() }) } } mtail-3.0.0~rc54+git0ff5/internal/exporter/json.go000066400000000000000000000014361460063571700217550ustar00rootroot00000000000000// Copyright 2015 Google Inc. All Rights Reserved. // This file is available under the Apache license. package exporter import ( "encoding/json" "expvar" "net/http" "github.com/golang/glog" ) var exportJSONErrors = expvar.NewInt("exporter_json_errors") // HandleJSON exports the metrics in JSON format via HTTP. func (e *Exporter) HandleJSON(w http.ResponseWriter, _ *http.Request) { b, err := json.MarshalIndent(e.store, "", " ") if err != nil { exportJSONErrors.Add(1) glog.Info("error marshalling metrics into json:", err.Error()) http.Error(w, err.Error(), http.StatusInternalServerError) return } w.Header().Set("content-type", "application/json") if _, err := w.Write(b); err != nil { glog.Error(err) http.Error(w, err.Error(), http.StatusInternalServerError) } } mtail-3.0.0~rc54+git0ff5/internal/exporter/json_test.go000066400000000000000000000060501460063571700230110ustar00rootroot00000000000000// Copyright 2015 Google Inc. All Rights Reserved. // This file is available under the Apache license. package exporter import ( "context" "io" "math" "net/http" "net/http/httptest" "sync" "testing" "time" "github.com/google/mtail/internal/metrics" "github.com/google/mtail/internal/metrics/datum" "github.com/google/mtail/internal/testutil" ) var handleJSONTests = []struct { name string metrics []*metrics.Metric expected string }{ { "empty", []*metrics.Metric{}, "[]", }, { "single", []*metrics.Metric{ { Name: "foo", Program: "test", Kind: metrics.Counter, LabelValues: []*metrics.LabelValue{{Labels: []string{}, Value: datum.MakeInt(1, time.Unix(0, 0))}}, }, }, `[ { "Name": "foo", "Program": "test", "Kind": 1, "Type": 0, "LabelValues": [ { "Value": { "Value": 1, "Time": 0 } } ] } ]`, }, { "dimensioned", []*metrics.Metric{ { Name: "foo", Program: "test", Kind: metrics.Counter, Keys: []string{"a", "b"}, LabelValues: []*metrics.LabelValue{{Labels: []string{"1", "2"}, Value: datum.MakeInt(1, time.Unix(0, 0))}}, }, }, `[ { "Name": "foo", "Program": "test", "Kind": 1, "Type": 0, "Keys": [ "a", "b" ], "LabelValues": [ { "Labels": [ "1", "2" ], "Value": { "Value": 1, "Time": 0 } } ] } ]`, }, { "histogram", []*metrics.Metric{ { Name: "foo", Program: "test", Kind: metrics.Histogram, Keys: []string{"a", "b"}, LabelValues: []*metrics.LabelValue{{Labels: []string{"1", "2"}, Value: datum.MakeInt(1, time.Unix(0, 0))}}, Buckets: []datum.Range{{Min: 0, Max: math.Inf(1)}}, }, }, `[ { "Name": "foo", "Program": "test", "Kind": 5, "Type": 0, "Keys": [ "a", "b" ], "LabelValues": [ { "Labels": [ "1", "2" ], "Value": { "Value": 1, "Time": 0 } } ], "Buckets": [ { "Min": "0", "Max": "+Inf" } ] } ]`, }, } func TestHandleJSON(t *testing.T) { for _, tc := range handleJSONTests { tc := tc t.Run(tc.name, func(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) var wg sync.WaitGroup ms := metrics.NewStore() for _, metric := range tc.metrics { testutil.FatalIfErr(t, ms.Add(metric)) } e, err := New(ctx, &wg, ms, Hostname("gunstar")) testutil.FatalIfErr(t, err) response := httptest.NewRecorder() e.HandleJSON(response, &http.Request{}) if response.Code != 200 { t.Errorf("response code not 200: %d", response.Code) } b, err := io.ReadAll(response.Body) if err != nil { t.Errorf("failed to read response: %s", err) } testutil.ExpectNoDiff(t, tc.expected, string(b), testutil.IgnoreUnexported(sync.RWMutex{})) cancel() wg.Wait() }) } } mtail-3.0.0~rc54+git0ff5/internal/exporter/prometheus.go000066400000000000000000000066711460063571700232050ustar00rootroot00000000000000// Copyright 2015 Google Inc. All Rights Reserved. // This file is available under the Apache license. package exporter import ( "expvar" "fmt" "io" "strings" "github.com/golang/glog" "github.com/google/mtail/internal/metrics" "github.com/google/mtail/internal/metrics/datum" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/expfmt" ) var metricExportTotal = expvar.NewInt("metric_export_total") func noHyphens(s string) string { return strings.ReplaceAll(s, "-", "_") } // Describe implements the prometheus.Collector interface. func (e *Exporter) Describe(c chan<- *prometheus.Desc) { prometheus.DescribeByCollect(e, c) } // Collect implements the prometheus.Collector interface. func (e *Exporter) Collect(c chan<- prometheus.Metric) { lastMetric := "" lastSource := "" /* #nosec G104 always retursn nil */ e.store.Range(func(m *metrics.Metric) error { m.RLock() // We don't have a way of converting text metrics to prometheus format. if m.Kind == metrics.Text { m.RUnlock() return nil } metricExportTotal.Add(1) lsc := make(chan *metrics.LabelSet) go m.EmitLabelSets(lsc) for ls := range lsc { if lastMetric != m.Name { glog.V(2).Infof("setting source to %s", m.Source) lastSource = m.Source lastMetric = m.Name } var keys []string var vals []string if !e.omitProgLabel { keys = append(keys, "prog") vals = append(vals, m.Program) } for k, v := range ls.Labels { keys = append(keys, k) vals = append(vals, v) } var pM prometheus.Metric var err error if m.Kind == metrics.Histogram { pM, err = prometheus.NewConstHistogram( prometheus.NewDesc(noHyphens(m.Name), fmt.Sprintf("defined at %s", lastSource), keys, nil), datum.GetBucketsCount(ls.Datum), datum.GetBucketsSum(ls.Datum), datum.GetBucketsCumByMax(ls.Datum), vals...) } else { pM, err = prometheus.NewConstMetric( prometheus.NewDesc(noHyphens(m.Name), fmt.Sprintf("defined at %s", lastSource), keys, nil), promTypeForKind(m.Kind), promValueForDatum(ls.Datum), vals...) } if err != nil { glog.Warning(err) return nil } // By default no timestamp is emitted to Prometheus. Setting a // timestamp is not recommended. It can lead to unexpected results // if the timestamp is not updated or moved fowarded enough to avoid // triggering Promtheus staleness handling. // Read more in docs/faq.md if e.emitTimestamp { c <- prometheus.NewMetricWithTimestamp(ls.Datum.TimeUTC(), pM) } else { c <- pM } } m.RUnlock() return nil }) } // Write is used to write Prometheus metrics to an io.Writer. func (e *Exporter) Write(w io.Writer) error { reg := prometheus.NewRegistry() err := reg.Register(e) if err != nil { return err } mfs, err := reg.Gather() if err != nil { return err } enc := expfmt.NewEncoder(w, expfmt.NewFormat(expfmt.TypeTextPlain)) for _, mf := range mfs { err := enc.Encode(mf) if err != nil { return err } } return nil } func promTypeForKind(k metrics.Kind) prometheus.ValueType { switch k { case metrics.Counter: return prometheus.CounterValue case metrics.Gauge: return prometheus.GaugeValue case metrics.Timer: return prometheus.GaugeValue } return prometheus.UntypedValue } func promValueForDatum(d datum.Datum) float64 { switch n := d.(type) { case *datum.Int: return float64(n.Get()) case *datum.Float: return n.Get() } return 0. } mtail-3.0.0~rc54+git0ff5/internal/exporter/prometheus_test.go000066400000000000000000000170461460063571700242420ustar00rootroot00000000000000// Copyright 2015 Google Inc. All Rights Reserved. // This file is available under the Apache license. package exporter import ( "bytes" "context" "math" "strings" "sync" "testing" "time" "github.com/google/mtail/internal/metrics" "github.com/google/mtail/internal/metrics/datum" "github.com/google/mtail/internal/testutil" promtest "github.com/prometheus/client_golang/prometheus/testutil" ) var handlePrometheusTests = []struct { name string progLabel bool metrics []*metrics.Metric expected string }{ { "empty", false, []*metrics.Metric{}, "", }, { "single", false, []*metrics.Metric{ { Name: "foo", Program: "test", Kind: metrics.Counter, LabelValues: []*metrics.LabelValue{{Labels: []string{}, Value: datum.MakeInt(1, time.Unix(0, 0))}}, }, }, `# HELP foo defined at # TYPE foo counter foo{} 1 `, }, { "with prog label", true, []*metrics.Metric{ { Name: "foo", Program: "test", Kind: metrics.Counter, LabelValues: []*metrics.LabelValue{{Labels: []string{}, Value: datum.MakeInt(1, time.Unix(0, 0))}}, }, }, `# HELP foo defined at # TYPE foo counter foo{prog="test"} 1 `, }, { "dimensioned", false, []*metrics.Metric{ { Name: "foo", Program: "test", Kind: metrics.Counter, Keys: []string{"a", "b"}, LabelValues: []*metrics.LabelValue{{Labels: []string{"1", "2"}, Value: datum.MakeInt(1, time.Unix(0, 0))}}, }, }, `# HELP foo defined at # TYPE foo counter foo{a="1",b="2"} 1 `, }, { "gauge", false, []*metrics.Metric{ { Name: "foo", Program: "test", Kind: metrics.Gauge, LabelValues: []*metrics.LabelValue{{Labels: []string{}, Value: datum.MakeInt(1, time.Unix(0, 0))}}, }, }, `# HELP foo defined at # TYPE foo gauge foo{} 1 `, }, { "timer", false, []*metrics.Metric{ { Name: "foo", Program: "test", Kind: metrics.Timer, LabelValues: []*metrics.LabelValue{{Labels: []string{}, Value: datum.MakeInt(1, time.Unix(0, 0))}}, }, }, `# HELP foo defined at # TYPE foo gauge foo{} 1 `, }, { "text", false, []*metrics.Metric{ { Name: "foo", Program: "test", Kind: metrics.Text, LabelValues: []*metrics.LabelValue{{Labels: []string{}, Value: datum.MakeString("hi", time.Unix(0, 0))}}, }, }, "", }, { "quotes", false, []*metrics.Metric{ { Name: "foo", Program: "test", Kind: metrics.Counter, Keys: []string{"a"}, LabelValues: []*metrics.LabelValue{{Labels: []string{"str\"bang\"blah"}, Value: datum.MakeInt(1, time.Unix(0, 0))}}, }, }, `# HELP foo defined at # TYPE foo counter foo{a="str\"bang\"blah"} 1 `, }, { "help", false, []*metrics.Metric{ { Name: "foo", Program: "test", Kind: metrics.Counter, LabelValues: []*metrics.LabelValue{{Labels: []string{}, Value: datum.MakeInt(1, time.Unix(0, 0))}}, Source: "location.mtail:37", }, }, `# HELP foo defined at location.mtail:37 # TYPE foo counter foo{} 1 `, }, { "2 help with label", true, []*metrics.Metric{ { Name: "foo", Program: "test2", Kind: metrics.Counter, LabelValues: []*metrics.LabelValue{{Labels: []string{}, Value: datum.MakeInt(1, time.Unix(0, 0))}}, Source: "location.mtail:37", }, { Name: "foo", Program: "test1", Kind: metrics.Counter, LabelValues: []*metrics.LabelValue{{Labels: []string{}, Value: datum.MakeInt(1, time.Unix(0, 0))}}, Source: "different.mtail:37", }, }, `# HELP foo defined at location.mtail:37 # TYPE foo counter foo{prog="test2"} 1 foo{prog="test1"} 1 `, }, { "histo", true, []*metrics.Metric{ { Name: "foo", Program: "test", Kind: metrics.Histogram, Keys: []string{"a"}, LabelValues: []*metrics.LabelValue{{Labels: []string{"bar"}, Value: datum.MakeBuckets([]datum.Range{{0, 1}, {1, 2}}, time.Unix(0, 0))}}, Source: "location.mtail:37", }, }, `# HELP foo defined at location.mtail:37 # TYPE foo histogram foo_bucket{a="bar",prog="test",le="1"} 0 foo_bucket{a="bar",prog="test",le="2"} 0 foo_bucket{a="bar",prog="test",le="+Inf"} 0 foo_sum{a="bar",prog="test"} 0 foo_count{a="bar",prog="test"} 0 `, }, { "histo-count-eq-inf", true, []*metrics.Metric{ { Name: "foo", Program: "test", Kind: metrics.Histogram, Keys: []string{"a"}, LabelValues: []*metrics.LabelValue{ { Labels: []string{"bar"}, Value: &datum.Buckets{ Buckets: []datum.BucketCount{ { Range: datum.Range{Min: 0, Max: 1}, Count: 1, }, { Range: datum.Range{Min: 1, Max: 2}, Count: 1, }, { Range: datum.Range{Min: 2, Max: math.Inf(+1)}, Count: 2, }, }, Count: 4, Sum: 5, }, }, }, Source: "location.mtail:37", }, }, `# HELP foo defined at location.mtail:37 # TYPE foo histogram foo_bucket{a="bar",prog="test",le="1"} 1 foo_bucket{a="bar",prog="test",le="2"} 2 foo_bucket{a="bar",prog="test",le="+Inf"} 4 foo_sum{a="bar",prog="test"} 5 foo_count{a="bar",prog="test"} 4 `, }, } func TestHandlePrometheus(t *testing.T) { for _, tc := range handlePrometheusTests { tc := tc t.Run(tc.name, func(t *testing.T) { var wg sync.WaitGroup ctx, cancel := context.WithCancel(context.Background()) ms := metrics.NewStore() for _, metric := range tc.metrics { testutil.FatalIfErr(t, ms.Add(metric)) } opts := []Option{ Hostname("gunstar"), } if !tc.progLabel { opts = append(opts, OmitProgLabel()) } e, err := New(ctx, &wg, ms, opts...) testutil.FatalIfErr(t, err) r := strings.NewReader(tc.expected) if err = promtest.CollectAndCompare(e, r); err != nil { t.Error(err) } cancel() wg.Wait() }) } } var writePrometheusTests = []struct { name string metrics []*metrics.Metric expected string }{ { "empty", []*metrics.Metric{}, "", }, { "single", []*metrics.Metric{ { Name: "foo", Program: "test", Kind: metrics.Counter, LabelValues: []*metrics.LabelValue{{Labels: []string{}, Value: datum.MakeInt(1, time.Unix(0, 0))}}, }, }, `# HELP foo defined at # TYPE foo counter foo 1 `, }, { "multi", []*metrics.Metric{ { Name: "foo", Program: "test", Kind: metrics.Counter, LabelValues: []*metrics.LabelValue{{Labels: []string{}, Value: datum.MakeInt(1, time.Unix(0, 0))}}, }, { Name: "bar", Program: "test", Kind: metrics.Counter, LabelValues: []*metrics.LabelValue{{Labels: []string{}, Value: datum.MakeInt(2, time.Unix(0, 0))}}, }, }, `# HELP bar defined at # TYPE bar counter bar 2 # HELP foo defined at # TYPE foo counter foo 1 `, }, } func TestWritePrometheus(t *testing.T) { for _, tc := range writePrometheusTests { tc := tc t.Run(tc.name, func(t *testing.T) { var wg sync.WaitGroup ctx, cancel := context.WithCancel(context.Background()) ms := metrics.NewStore() for _, metric := range tc.metrics { testutil.FatalIfErr(t, ms.Add(metric)) } opts := []Option{ Hostname("gunstar"), OmitProgLabel(), } e, err := New(ctx, &wg, ms, opts...) testutil.FatalIfErr(t, err) var buf bytes.Buffer err = e.Write(&buf) testutil.FatalIfErr(t, err) testutil.ExpectNoDiff(t, tc.expected, buf.String()) cancel() wg.Wait() }) } } mtail-3.0.0~rc54+git0ff5/internal/exporter/statsd.go000066400000000000000000000021021460063571700222750ustar00rootroot00000000000000// Copyright 2015 Google Inc. All Rights Reserved. // This file is available under the Apache license. package exporter import ( "expvar" "flag" "fmt" "time" "github.com/google/mtail/internal/metrics" ) var ( statsdHostPort = flag.String("statsd_hostport", "", "Host:port to statsd server to write metrics to.") statsdPrefix = flag.String("statsd_prefix", "", "Prefix to use for statsd metrics.") statsdExportTotal = expvar.NewInt("statsd_export_total") statsdExportSuccess = expvar.NewInt("statsd_export_success") ) // metricToStatsd encodes a metric in the statsd text protocol format. The // metric lock is held before entering this function. func metricToStatsd(_ string, m *metrics.Metric, l *metrics.LabelSet, _ time.Duration) string { var t string switch m.Kind { case metrics.Counter: t = "c" // StatsD Counter case metrics.Gauge: t = "g" // StatsD Gauge case metrics.Timer: t = "ms" // StatsD Timer } return fmt.Sprintf("%s%s.%s:%s|%s", *statsdPrefix, m.Program, formatLabels(m.Name, l.Labels, ".", ".", "_"), l.Datum.ValueString(), t) } mtail-3.0.0~rc54+git0ff5/internal/exporter/varz.go000066400000000000000000000026261460063571700217700ustar00rootroot00000000000000// Copyright 2015 Google Inc. All Rights Reserved. // This file is available under the Apache license. package exporter import ( "expvar" "fmt" "net/http" "sort" "strings" "github.com/google/mtail/internal/metrics" ) var exportVarzTotal = expvar.NewInt("exporter_varz_total") const varzFormat = "%s{%s} %s\n" // HandleVarz exports the metrics in Varz format via HTTP. func (e *Exporter) HandleVarz(w http.ResponseWriter, r *http.Request) { w.Header().Add("Content-type", "text/plain") err := e.store.Range(func(m *metrics.Metric) error { select { case <-r.Context().Done(): return r.Context().Err() default: } m.RLock() exportVarzTotal.Add(1) lc := make(chan *metrics.LabelSet) go m.EmitLabelSets(lc) for l := range lc { line := metricToVarz(m, l, e.omitProgLabel, e.hostname) fmt.Fprint(w, line) } m.RUnlock() return nil }) if err != nil { http.Error(w, fmt.Sprintf("%s", err), http.StatusInternalServerError) } } func metricToVarz(m *metrics.Metric, l *metrics.LabelSet, omitProgLabel bool, hostname string) string { s := make([]string, 0, len(l.Labels)+2) for k, v := range l.Labels { s = append(s, fmt.Sprintf("%s=%s", k, v)) } sort.Strings(s) if !omitProgLabel { s = append(s, fmt.Sprintf("prog=%s", m.Program)) } s = append(s, fmt.Sprintf("instance=%s", hostname)) return fmt.Sprintf(varzFormat, m.Name, strings.Join(s, ","), l.Datum.ValueString()) } mtail-3.0.0~rc54+git0ff5/internal/exporter/varz_test.go000066400000000000000000000042431460063571700230240ustar00rootroot00000000000000// Copyright 2015 Google Inc. All Rights Reserved. // This file is available under the Apache license. package exporter import ( "context" "io" "net/http" "net/http/httptest" "sync" "testing" "time" "github.com/google/mtail/internal/metrics" "github.com/google/mtail/internal/metrics/datum" "github.com/google/mtail/internal/testutil" ) var handleVarzTests = []struct { name string metrics []*metrics.Metric expected string }{ { "empty", []*metrics.Metric{}, "", }, { "single", []*metrics.Metric{ { Name: "foo", Program: "test", Kind: metrics.Counter, LabelValues: []*metrics.LabelValue{{Labels: []string{}, Value: datum.MakeInt(1, time.Unix(1397586900, 0))}}, }, }, `foo{prog=test,instance=gunstar} 1 `, }, { "dimensioned", []*metrics.Metric{ { Name: "foo", Program: "test", Kind: metrics.Counter, Keys: []string{"a", "b"}, LabelValues: []*metrics.LabelValue{{Labels: []string{"1", "2"}, Value: datum.MakeInt(1, time.Unix(1397586900, 0))}}, }, }, `foo{a=1,b=2,prog=test,instance=gunstar} 1 `, }, { "text", []*metrics.Metric{ { Name: "foo", Program: "test", Kind: metrics.Text, LabelValues: []*metrics.LabelValue{{Labels: []string{}, Value: datum.MakeString("hi", time.Unix(1397586900, 0))}}, }, }, `foo{prog=test,instance=gunstar} hi `, }, } func TestHandleVarz(t *testing.T) { for _, tc := range handleVarzTests { tc := tc t.Run(tc.name, func(t *testing.T) { var wg sync.WaitGroup ctx, cancel := context.WithCancel(context.Background()) ms := metrics.NewStore() for _, metric := range tc.metrics { testutil.FatalIfErr(t, ms.Add(metric)) } e, err := New(ctx, &wg, ms, Hostname("gunstar")) testutil.FatalIfErr(t, err) response := httptest.NewRecorder() e.HandleVarz(response, &http.Request{}) if response.Code != 200 { t.Errorf("response code not 200: %d", response.Code) } b, err := io.ReadAll(response.Body) if err != nil { t.Errorf("failed to read response: %s", err) } testutil.ExpectNoDiff(t, tc.expected, string(b)) cancel() wg.Wait() }) } } mtail-3.0.0~rc54+git0ff5/internal/logline/000077500000000000000000000000001460063571700202325ustar00rootroot00000000000000mtail-3.0.0~rc54+git0ff5/internal/logline/logline.go000066400000000000000000000010461460063571700222130ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // This file is available under the Apache license. package logline import "context" // LogLine contains all the information about a line just read from a log. type LogLine struct { Context context.Context Filename string // The log filename that this line was read from Line string // The text of the log line itself up to the newline. } // New creates a new LogLine object. func New(ctx context.Context, filename string, line string) *LogLine { return &LogLine{ctx, filename, line} } mtail-3.0.0~rc54+git0ff5/internal/metrics/000077500000000000000000000000001460063571700202475ustar00rootroot00000000000000mtail-3.0.0~rc54+git0ff5/internal/metrics/datum/000077500000000000000000000000001460063571700213615ustar00rootroot00000000000000mtail-3.0.0~rc54+git0ff5/internal/metrics/datum/buckets.go000066400000000000000000000035311460063571700233520ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // This file is available under the Apache license. package datum import ( "encoding/json" "fmt" "strconv" "sync" "sync/atomic" "time" ) type Range struct { Min float64 Max float64 } type BucketCount struct { Range Range Count uint64 } func (r *Range) Contains(v float64) bool { return r.Min < v && v <= r.Max } // Buckets describes a floating point value at a given timestamp. type Buckets struct { BaseDatum sync.RWMutex Buckets []BucketCount Count uint64 Sum float64 } func (d *Buckets) ValueString() string { return fmt.Sprintf("%g", d.GetSum()) } func (d *Buckets) Observe(v float64, ts time.Time) { d.Lock() defer d.Unlock() for i, b := range d.Buckets { if v <= b.Range.Max { d.Buckets[i].Count++ break } } d.Count++ d.Sum += v d.stamp(ts) } func (d *Buckets) GetCount() uint64 { d.RLock() defer d.RUnlock() return d.Count } func (d *Buckets) GetSum() float64 { d.RLock() defer d.RUnlock() return d.Sum } func (d *Buckets) AddBucket(r Range) { d.Lock() defer d.Unlock() d.Buckets = append(d.Buckets, BucketCount{r, 0}) } func (d *Buckets) GetBuckets() map[Range]uint64 { d.RLock() defer d.RUnlock() b := make(map[Range]uint64) for _, bc := range d.Buckets { b[bc.Range] = bc.Count } return b } func (d *Buckets) MarshalJSON() ([]byte, error) { d.RLock() defer d.RUnlock() bs := make(map[string]uint64) for _, b := range d.Buckets { bs[strconv.FormatFloat(b.Range.Max, 'g', -1, 64)] = b.Count } j := struct { Buckets map[string]uint64 Count uint64 Sum float64 Time int64 }{bs, d.Count, d.Sum, atomic.LoadInt64(&d.Time)} return json.Marshal(j) } func (r *Range) MarshalJSON() ([]byte, error) { j := struct { Min string Max string }{fmt.Sprintf("%v", r.Min), fmt.Sprintf("%v", r.Max)} return json.Marshal(j) } mtail-3.0.0~rc54+git0ff5/internal/metrics/datum/buckets_test.go000066400000000000000000000021661460063571700244140ustar00rootroot00000000000000// Copyright 2019 Google Inc. All Rights Reserved. // This file is available under the Apache license. package datum_test import ( "math" "testing" "testing/quick" "time" "github.com/google/mtail/internal/metrics/datum" ) func TestBucketContains(t *testing.T) { if err := quick.Check(func(min, max, val float64) bool { r := &datum.Range{Min: min, Max: max} truth := val < max && val >= min return truth == r.Contains(val) }, nil); err != nil { t.Error(err) } } func TestMakeBucket(t *testing.T) { r := []datum.Range{ {0, 1}, {1, 2}, {2, 4}, } b := datum.MakeBuckets(r, time.Unix(37, 42)) ts := time.Unix(37, 31) datum.Observe(b, 2, ts) if r := datum.GetBucketsSum(b); r != 2 { t.Errorf("sum not 2, got %v", r) } if r := datum.GetBucketsCount(b); r != 1 { t.Errorf("count not 1, got %v", r) } bs := datum.GetBucketsCumByMax(b) if r := datum.GetBucketsCount(b); r != bs[math.Inf(+1)] { t.Errorf("Inf bucket des not equal total observation count: %v vs %v", bs[math.Inf(+1)], r) } if len(bs) != len(r)+1 { t.Errorf("missing buckets from BucketsByMax: expected %d, got %v", len(r)+1, len(bs)) } } mtail-3.0.0~rc54+git0ff5/internal/metrics/datum/datum.go000066400000000000000000000143641460063571700230320ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // This file is available under the Apache license. package datum import ( "fmt" "math" "sort" "sync/atomic" "time" ) // Datum is an interface for metric datums, with a type, value and timestamp to be exported. type Datum interface { // // Type returns the Datum type. // Type() metrics.Type // ValueString returns the value of a Datum as a string. ValueString() string // TimeString returns the timestamp of a Datum as a string. TimeString() string // Time returns the timestamp of the Datum as time.Time in UTC TimeUTC() time.Time } // BaseDatum is a struct used to record timestamps across all Datum implementations. type BaseDatum struct { Time int64 // nanoseconds since unix epoch } var zeroTime time.Time func (d *BaseDatum) stamp(timestamp time.Time) { if timestamp.IsZero() { atomic.StoreInt64(&d.Time, time.Now().UTC().UnixNano()) } else { atomic.StoreInt64(&d.Time, timestamp.UnixNano()) } } // TimeString returns the timestamp of this Datum as a string. func (d *BaseDatum) TimeString() string { return fmt.Sprintf("%d", atomic.LoadInt64(&d.Time)/1e9) } func (d *BaseDatum) TimeUTC() time.Time { tNsec := atomic.LoadInt64(&d.Time) return time.Unix(tNsec/1e9, tNsec%1e9) } // NewInt creates a new zero integer datum. func NewInt() Datum { return MakeInt(0, zeroTime) } // NewFloat creates a new zero floating-point datum. func NewFloat() Datum { return MakeFloat(0., zeroTime) } // NewString creates a new zero string datum. func NewString() Datum { return MakeString("", zeroTime) } // NewBuckets creates a new zero buckets datum. func NewBuckets(buckets []Range) Datum { return MakeBuckets(buckets, zeroTime) } // MakeInt creates a new integer datum with the provided value and timestamp. func MakeInt(v int64, ts time.Time) Datum { d := &Int{} d.Set(v, ts) return d } // MakeFloat creates a new floating-point datum with the provided value and timestamp. func MakeFloat(v float64, ts time.Time) Datum { d := &Float{} d.Set(v, ts) return d } // MakeString creates a new string datum with the provided value and timestamp. func MakeString(v string, ts time.Time) Datum { d := &String{} d.Set(v, ts) return d } // MakeBuckets creates a new bucket datum with the provided list of ranges and // timestamp. If no +inf bucket is provided, one is created. func MakeBuckets(buckets []Range, _ time.Time) Datum { d := &Buckets{} seenInf := false highest := 0.0 for _, b := range buckets { d.AddBucket(b) if math.IsInf(b.Max, +1) { seenInf = true } else if b.Max > highest { highest = b.Max } } if !seenInf { d.AddBucket(Range{highest, math.Inf(+1)}) } return d } // GetInt returns the integer value of a datum, or error. func GetInt(d Datum) int64 { switch d := d.(type) { case *Int: return d.Get() default: panic(fmt.Sprintf("datum %v is not an Int", d)) } } // GetFloat returns the floating-point value of a datum, or error. func GetFloat(d Datum) float64 { switch d := d.(type) { case *Float: return d.Get() default: panic(fmt.Sprintf("datum %v is not a Float", d)) } } // GetString returns the string of a datum, or error. func GetString(d Datum) string { switch d := d.(type) { case *String: return d.Get() default: panic(fmt.Sprintf("datum %v is not a String", d)) } } // SetInt sets an integer datum to the provided value and timestamp, or panics if the Datum is not an IntDatum. func SetInt(d Datum, v int64, ts time.Time) { switch d := d.(type) { case *Int: d.Set(v, ts) case *Buckets: d.Observe(float64(v), ts) default: panic(fmt.Sprintf("datum %v is not an Int", d)) } } // SetFloat sets a floating-point Datum to the provided value and timestamp, or panics if the Datum is not a FloatDatum. func SetFloat(d Datum, v float64, ts time.Time) { switch d := d.(type) { case *Float: d.Set(v, ts) case *Buckets: d.Observe(v, ts) default: panic(fmt.Sprintf("datum %v is not a Float", d)) } } // SetString sets a string Datum to the provided value and timestamp, or panics if the Datym is not a String Datum. func SetString(d Datum, v string, ts time.Time) { switch d := d.(type) { case *String: d.Set(v, ts) default: panic(fmt.Sprintf("datum %v is not a String", d)) } } // IncIntBy increments an integer Datum by the provided value, at time ts, or panics if the Datum is not an IntDatum. func IncIntBy(d Datum, v int64, ts time.Time) { switch d := d.(type) { case *Int: d.IncBy(v, ts) default: panic(fmt.Sprintf("datum %v is not an Int", d)) } } // DecIntBy increments an integer Datum by the provided value, at time ts, or panics if the Datum is not an IntDatum. func DecIntBy(d Datum, v int64, ts time.Time) { switch d := d.(type) { case *Int: d.DecBy(v, ts) default: panic(fmt.Sprintf("datum %v is not an Int", d)) } } func GetBuckets(d Datum) *Buckets { switch d := d.(type) { case *Buckets: return d default: panic(fmt.Sprintf("datum %v is not a Buckets", d)) } } // Observe records an observation v at time ts in d, or panics if d is not a BucketsDatum. func Observe(d Datum, v float64, ts time.Time) { switch d := d.(type) { case *Buckets: d.Observe(v, ts) default: panic(fmt.Sprintf("datum %v is not a Buckets", d)) } } // GetBucketCount returns the total count of observations in d, or panics if d is not a BucketsDatum. func GetBucketsCount(d Datum) uint64 { switch d := d.(type) { case *Buckets: return d.GetCount() default: panic(fmt.Sprintf("datum %v is not a Buckets", d)) } } // GetBucketsSum returns the sum of observations in d, or panics if d is not a BucketsDatum. func GetBucketsSum(d Datum) float64 { switch d := d.(type) { case *Buckets: return d.GetSum() default: panic(fmt.Sprintf("datum %v is not a Buckets", d)) } } // GetBucketsCumByMax returns a map of cumulative bucket observations by their // upper bonds, or panics if d is not a BucketsDatum. func GetBucketsCumByMax(d Datum) map[float64]uint64 { switch d := d.(type) { case *Buckets: buckets := make(map[float64]uint64) maxes := make([]float64, 0) for r, c := range d.GetBuckets() { maxes = append(maxes, r.Max) buckets[r.Max] = c } sort.Float64s(maxes) cum := uint64(0) for _, m := range maxes { cum += buckets[m] buckets[m] = cum } return buckets default: panic(fmt.Sprintf("datum %v is not a Buckets", d)) } } mtail-3.0.0~rc54+git0ff5/internal/metrics/datum/datum_test.go000066400000000000000000000025541460063571700240670ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // This file is available under the Apache license. package datum import ( "encoding/json" "testing" "time" "github.com/google/mtail/internal/testutil" ) func TestDatumSetAndValue(t *testing.T) { d := MakeInt(12, time.Unix(37, 42)) if r := GetInt(d); r != 12 { t.Errorf("d ditn't return 12, got %v", r) } if r := d.ValueString(); r != "12" { t.Errorf("d value is not 12, got %v", r) } if r := d.TimeString(); r != "37" { t.Errorf("d Time not correct, got %v", r) } d = MakeFloat(1.2, time.Unix(37, 42)) if r := GetFloat(d); r != 1.2 { t.Errorf("d ditn't return 12, got %v", r) } if r := d.ValueString(); r != "1.2" { t.Errorf("d value is not 12, got %v", r) } if r := d.TimeString(); r != "37" { t.Errorf("d Time not correct, got %v", r) } } var datumJSONTests = []struct { datum Datum expected string }{ { MakeInt(37, time.Unix(42, 12)), `{"Value":37,"Time":42000000012}`, }, { MakeFloat(37.1, time.Unix(42, 12)), `{"Value":37.1,"Time":42000000012}`, }, } func TestMarshalJSON(t *testing.T) { // This is not a round trip test because only the LabelValue knows how to unmarshal a Datum. for i, tc := range datumJSONTests { b, err := json.Marshal(tc.datum) if err != nil { t.Errorf("%d: Marshal failed: %v", i, err) } testutil.ExpectNoDiff(t, tc.expected, string(b)) } } mtail-3.0.0~rc54+git0ff5/internal/metrics/datum/float.go000066400000000000000000000016751460063571700230260ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // This file is available under the Apache license. package datum import ( "encoding/json" "fmt" "math" "sync/atomic" "time" ) // Float describes a floating point value at a given timestamp. type Float struct { BaseDatum Valuebits uint64 } // ValueString returns the value of the Float as a string. func (d *Float) ValueString() string { return fmt.Sprintf("%g", d.Get()) } // Set sets value of the Float at the timestamp ts. func (d *Float) Set(v float64, ts time.Time) { atomic.StoreUint64(&d.Valuebits, math.Float64bits(v)) d.stamp(ts) } // Get returns the floating-point value. func (d *Float) Get() float64 { return math.Float64frombits(atomic.LoadUint64(&d.Valuebits)) } // MarshalJSON returns a JSON encoding of the Float. func (d *Float) MarshalJSON() ([]byte, error) { j := struct { Value float64 Time int64 }{d.Get(), atomic.LoadInt64(&d.Time)} return json.Marshal(j) } mtail-3.0.0~rc54+git0ff5/internal/metrics/datum/int.go000066400000000000000000000024041460063571700225020ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // This file is available under the Apache license. package datum import ( "encoding/json" "fmt" "sync/atomic" "time" ) // Int describes an integer value at a given timestamp. type Int struct { BaseDatum Value int64 } // Set sets the value of the Int to the value at timestamp. func (d *Int) Set(value int64, timestamp time.Time) { atomic.StoreInt64(&d.Value, value) d.stamp(timestamp) } // IncBy increments the Int's value by the value provided, at timestamp. func (d *Int) IncBy(delta int64, timestamp time.Time) { atomic.AddInt64(&d.Value, delta) d.stamp(timestamp) } // DecBy increments the Int's value by the value provided, at timestamp. func (d *Int) DecBy(delta int64, timestamp time.Time) { atomic.AddInt64(&d.Value, -delta) d.stamp(timestamp) } // Get returns the value of the Int. func (d *Int) Get() int64 { return atomic.LoadInt64(&d.Value) } // ValueString returns the value of the Int as a string. func (d *Int) ValueString() string { return fmt.Sprintf("%d", atomic.LoadInt64(&d.Value)) } // MarshalJSON returns a JSON encoding of the Int. func (d *Int) MarshalJSON() ([]byte, error) { j := struct { Value int64 Time int64 }{d.Get(), atomic.LoadInt64(&d.Time)} return json.Marshal(j) } mtail-3.0.0~rc54+git0ff5/internal/metrics/datum/int_test.go000066400000000000000000000012361460063571700235430ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // This file is available under the Apache license. package datum import ( "testing" "time" ) func BenchmarkIncrementScalarInt(b *testing.B) { d := &Int{} ts := time.Now().UTC() for i := 0; i < b.N; i++ { d.IncBy(1, ts) } } func BenchmarkDecrementScalarInt(b *testing.B) { d := &Int{} ts := time.Now().UTC() for i := 0; i < b.N; i++ { d.DecBy(1, ts) } } func TestDecrementScalarInt(t *testing.T) { d := &Int{} ts := time.Now().UTC() d.IncBy(1, ts) r := d.Get() if r != 1 { t.Errorf("expected 1, got %d", r) } d.DecBy(1, ts) r = d.Get() if r != 0 { t.Errorf("expected 0, got %d", r) } } mtail-3.0.0~rc54+git0ff5/internal/metrics/datum/string.go000066400000000000000000000016701460063571700232220ustar00rootroot00000000000000// Copyright 2018 Google Inc. All Rights Reserved. // This file is available under the Apache license. package datum import ( "encoding/json" "sync" "sync/atomic" "time" ) // String describes a string value at a given timestamp. type String struct { BaseDatum mu sync.RWMutex Value string } // Set sets the value of the String to the value at timestamp. func (d *String) Set(value string, timestamp time.Time) { d.mu.Lock() d.Value = value d.stamp(timestamp) d.mu.Unlock() } // Get returns the value of the String. func (d *String) Get() string { d.mu.RLock() defer d.mu.RUnlock() return d.Value } // ValueString returns the value of the String as a string. func (d *String) ValueString() string { return d.Get() } // MarshalJSON returns a JSON encoding of the String. func (d *String) MarshalJSON() ([]byte, error) { j := struct { Value string Time int64 }{d.Get(), atomic.LoadInt64(&d.Time)} return json.Marshal(j) } mtail-3.0.0~rc54+git0ff5/internal/metrics/metric.go000066400000000000000000000203241460063571700220620ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // This file is available under the Apache license. // Package metrics provides storage for metrics being recorded by mtail // programs. package metrics import ( "encoding/json" "fmt" "math/rand" "reflect" "strings" "sync" "time" "github.com/golang/glog" "github.com/google/mtail/internal/metrics/datum" "github.com/pkg/errors" ) // Kind enumerates the types of metrics supported. type Kind int const ( _ Kind = iota // Counter is a monotonically nondecreasing metric. Counter // Gauge is a Kind that can take on any value, and may be set // discontinuously from its previous value. Gauge // Timer is a specialisation of Gauge that can be used to store time // intervals, such as latency and durations. It enables certain behaviour // in exporters that handle time intervals such as StatsD. Timer // Text is a special metric type for free text, usually for operating as a 'hidden' metric, as often these values cannot be exported. Text // Histogram is a Kind that observes a value and stores the value // in a bucket. Histogram endKind // end of enumeration for testing ) func (m Kind) String() string { switch m { case Counter: return "Counter" case Gauge: return "Gauge" case Timer: return "Timer" case Text: return "Text" case Histogram: return "Histogram" } return "Unknown" } // Generate implements the quick.Generator interface for Kind. func (Kind) Generate(rand *rand.Rand, _ int) reflect.Value { return reflect.ValueOf(Kind(rand.Intn(int(endKind)))) } // LabelValue is an object that names a Datum value with a list of label // strings. type LabelValue struct { Labels []string `json:",omitempty"` Value datum.Datum // After this time of inactivity, the LabelValue is removed from the metric. Expiry time.Duration `json:",omitempty"` } // Metric is an object that describes a metric, with its name, the creator and // owner program name, its Kind, a sequence of Keys that may be used to // add dimension to the metric, and a list of LabelValues that contain data for // labels in each dimension of the Keys. type Metric struct { sync.RWMutex Name string // Name Program string // Instantiating program Kind Kind Type Type Hidden bool `json:",omitempty"` Keys []string `json:",omitempty"` LabelValues []*LabelValue `json:",omitempty"` labelValuesMap map[string]*LabelValue Source string `json:",omitempty"` Buckets []datum.Range `json:",omitempty"` Limit int `json:",omitempty"` } // NewMetric returns a new empty metric of dimension len(keys). func NewMetric(name string, prog string, kind Kind, typ Type, keys ...string) *Metric { m := newMetric(len(keys)) m.Name = name m.Program = prog m.Kind = kind m.Type = typ copy(m.Keys, keys) return m } // newMetric returns a new empty Metric. func newMetric(keyLen int) *Metric { return &Metric{ Keys: make([]string, keyLen), LabelValues: make([]*LabelValue, 0), labelValuesMap: make(map[string]*LabelValue), } } // buildLabelValueKey returns a unique key for the given labels. func buildLabelValueKey(labels []string) string { var buf strings.Builder for i := 0; i < len(labels); i++ { rs := strings.ReplaceAll(labels[i], "-", "\\-") buf.WriteString(rs) buf.WriteString("-") } return buf.String() } func (m *Metric) AppendLabelValue(lv *LabelValue) error { if len(lv.Labels) != len(m.Keys) { return errors.Errorf("Label values requested (%q) not same length as keys for metric %v", lv.Labels, m) } m.LabelValues = append(m.LabelValues, lv) k := buildLabelValueKey(lv.Labels) m.labelValuesMap[k] = lv return nil } func (m *Metric) FindLabelValueOrNil(labelvalues []string) *LabelValue { k := buildLabelValueKey(labelvalues) lv, ok := m.labelValuesMap[k] if ok { return lv } return nil } // GetDatum returns the datum named by a sequence of string label values from a // Metric. If the sequence of label values does not yet exist, it is created. func (m *Metric) GetDatum(labelvalues ...string) (d datum.Datum, err error) { if len(labelvalues) != len(m.Keys) { return nil, errors.Errorf("Label values requested (%q) not same length as keys for metric %v", labelvalues, m) } m.Lock() defer m.Unlock() if lv := m.FindLabelValueOrNil(labelvalues); lv != nil { d = lv.Value } else { // TODO Check m.Limit and expire old data switch m.Type { case Int: d = datum.NewInt() case Float: d = datum.NewFloat() case String: d = datum.NewString() case Buckets: buckets := m.Buckets if buckets == nil { buckets = make([]datum.Range, 0) } d = datum.NewBuckets(buckets) } lv := &LabelValue{Labels: labelvalues, Value: d} if err := m.AppendLabelValue(lv); err != nil { return nil, err } } return d, nil } // RemoveOldestDatum scans the Metric's LabelValues for the Datum with the oldest timestamp, and removes it. func (m *Metric) RemoveOldestDatum() { var oldestLV *LabelValue for _, lv := range m.LabelValues { if oldestLV == nil || lv.Value.TimeUTC().Before(oldestLV.Value.TimeUTC()) { oldestLV = lv } } if oldestLV != nil { glog.V(1).Infof("removeOldest: removing oldest LV: %v", oldestLV) err := m.RemoveDatum(oldestLV.Labels...) if err != nil { glog.Warning(err) } } } // RemoveDatum removes the Datum described by labelvalues from the Metric m. func (m *Metric) RemoveDatum(labelvalues ...string) error { if len(labelvalues) != len(m.Keys) { return errors.Errorf("Label values requested (%q) not same length as keys for metric %v", labelvalues, m) } m.Lock() defer m.Unlock() k := buildLabelValueKey(labelvalues) olv, ok := m.labelValuesMap[k] if ok { for i := 0; i < len(m.LabelValues); i++ { lv := m.LabelValues[i] if lv == olv { // remove from the slice m.LabelValues = append(m.LabelValues[:i], m.LabelValues[i+1:]...) delete(m.labelValuesMap, k) break } } } return nil } func (m *Metric) ExpireDatum(expiry time.Duration, labelvalues ...string) error { if len(labelvalues) != len(m.Keys) { return errors.Errorf("Label values requested (%q) not same length as keys for metric %v", labelvalues, m) } m.Lock() defer m.Unlock() if lv := m.FindLabelValueOrNil(labelvalues); lv != nil { lv.Expiry = expiry return nil } return errors.Errorf("No datum for given labelvalues %q", labelvalues) } // LabelSet is an object that maps the keys of a Metric to the labels naming a // Datum, for use when enumerating Datums from a Metric. type LabelSet struct { Labels map[string]string Datum datum.Datum } func zip(keys []string, values []string) map[string]string { r := make(map[string]string) for i, v := range values { r[keys[i]] = v } return r } // EmitLabelSets enumerates the LabelSets corresponding to the LabelValues of a // Metric. It emits them onto the provided channel, then closes the channel to // signal completion. func (m *Metric) EmitLabelSets(c chan *LabelSet) { for _, lv := range m.LabelValues { ls := &LabelSet{zip(m.Keys, lv.Labels), lv.Value} c <- ls } close(c) } // UnmarshalJSON converts a JSON byte string into a LabelValue. func (lv *LabelValue) UnmarshalJSON(b []byte) error { var obj map[string]*json.RawMessage err := json.Unmarshal(b, &obj) if err != nil { return err } labels := make([]string, 0) if _, ok := obj["Labels"]; ok { err = json.Unmarshal(*obj["Labels"], &labels) if err != nil { return err } } lv.Labels = labels var valObj map[string]*json.RawMessage err = json.Unmarshal(*obj["Value"], &valObj) if err != nil { return err } var t int64 err = json.Unmarshal(*valObj["Time"], &t) if err != nil { return err } var i int64 err = json.Unmarshal(*valObj["Value"], &i) if err != nil { return err } lv.Value = datum.MakeInt(i, time.Unix(t/1e9, t%1e9)) return nil } func (m *Metric) String() string { m.RLock() defer m.RUnlock() return fmt.Sprintf("Metric: name=%s program=%s kind=%v type=%s hidden=%v keys=%v labelvalues=%v source=%s buckets=%v", m.Name, m.Program, m.Kind, m.Type, m.Hidden, m.Keys, m.LabelValues, m.Source, m.Buckets) } // SetSource sets the source of a metric, describing where in user programmes it was defined. func (m *Metric) SetSource(source string) { m.Lock() defer m.Unlock() m.Source = source } mtail-3.0.0~rc54+git0ff5/internal/metrics/metric_test.go000066400000000000000000000152731460063571700231300ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // This file is available under the Apache license. package metrics import ( "encoding/json" "fmt" "math/rand" "reflect" "sync" "testing" "testing/quick" "time" "github.com/google/mtail/internal/metrics/datum" "github.com/google/mtail/internal/testutil" ) func TestKindType(t *testing.T) { v := Kind(0) if s := v.String(); s != "Unknown" { t.Errorf("Kind.String() returned %q not Unknown", s) } v = Counter if s := v.String(); s != "Counter" { t.Errorf("Kind.String() returned %q not Counter", s) } v = Gauge if s := v.String(); s != "Gauge" { t.Errorf("Kind.String() returned %q not Gauge", s) } v = Timer if s := v.String(); s != "Timer" { t.Errorf("Kind.String() returned %q not Timer", s) } } func TestScalarMetric(t *testing.T) { v := NewMetric("test", "prog", Counter, Int) d, err := v.GetDatum() if err != nil { t.Errorf("no datum: %s", err) } datum.IncIntBy(d, 1, time.Now().UTC()) lv := v.FindLabelValueOrNil([]string{}) if lv == nil { t.Fatal("couldn't find labelvalue") } newD := lv.Value if newD == nil { t.Error("new_d is nil") } if newD.ValueString() != "1" { t.Error("value not 1") } d2, err := v.GetDatum("a", "b") if err == nil { t.Errorf("datum with keys sohuld have returned no value, got %v", d2) } } func TestDimensionedMetric(t *testing.T) { v := NewMetric("test", "prog", Counter, Int, "foo") d, _ := v.GetDatum("a") datum.IncIntBy(d, 1, time.Now().UTC()) if v.FindLabelValueOrNil([]string{"a"}).Value.ValueString() != "1" { t.Errorf("fail") } v = NewMetric("test", "prog", Counter, Int, "foo", "bar") d, _ = v.GetDatum("a", "b") datum.IncIntBy(d, 1, time.Now().UTC()) if v.FindLabelValueOrNil([]string{"a", "b"}).Value.ValueString() != "1" { t.Errorf("fail") } v = NewMetric("test", "prog", Counter, Int, "foo", "bar", "quux") d, _ = v.GetDatum("a", "b", "c") datum.IncIntBy(d, 1, time.Now().UTC()) if v.FindLabelValueOrNil([]string{"a", "b", "c"}).Value.ValueString() != "1" { t.Errorf("fail") } } var labelSetTests = []struct { values []string expectedLabels map[string]string }{ { []string{"a", "b", "c"}, map[string]string{"foo": "a", "bar": "b", "quux": "c"}, }, { []string{"a", "b", "d"}, map[string]string{"foo": "a", "bar": "b", "quux": "d"}, }, } func TestEmitLabelSet(t *testing.T) { ts := time.Now().UTC() for _, tc := range labelSetTests { tc := tc t.Run(fmt.Sprintf("%v", tc.values), func(t *testing.T) { m := NewMetric("test", "prog", Gauge, Int, "foo", "bar", "quux") d, _ := m.GetDatum(tc.values...) datum.SetInt(d, 37, ts) c := make(chan *LabelSet) go m.EmitLabelSets(c) ls := <-c testutil.ExpectNoDiff(t, tc.expectedLabels, ls.Labels) }) } } func TestFindLabelValueOrNil(t *testing.T) { m0 := NewMetric("foo", "prog", Counter, Int) if r0 := m0.FindLabelValueOrNil([]string{}); r0 != nil { t.Errorf("m0 should be nil: %v", r0) } d, err := m0.GetDatum() if err != nil { t.Errorf("Bad datum %v: %v\n", d, err) } if r1 := m0.FindLabelValueOrNil([]string{}); r1 == nil { t.Errorf("m0 should not be nil: %v", r1) } m1 := NewMetric("bar", "prog", Counter, Int, "a") d1, err1 := m1.GetDatum("1") if err1 != nil { t.Errorf("err1 %v: %v\n", d1, err1) } if r2 := m1.FindLabelValueOrNil([]string{"0"}); r2 != nil { t.Errorf("r2 should be nil") } if r3 := m1.FindLabelValueOrNil([]string{"1"}); r3 == nil { t.Errorf("r3 should be non nil") } } func TestAppendLabelValue(t *testing.T) { m := NewMetric("foo", "prog", Counter, Int, "bar") l := []string{"test"} d0 := datum.MakeInt(66, time.Unix(0, 0)) lv := &LabelValue{Labels: l, Value: d0} err := m.AppendLabelValue(lv) if err != nil { t.Errorf("Bad append %v: %v\n", d0, err) } d1, err := m.GetDatum(l...) if err != nil { t.Errorf("Bad datum %v: %v\n", d1, err) } testutil.ExpectNoDiff(t, d0, d1) } func timeGenerator(rand *rand.Rand) time.Time { months := []time.Month{ time.January, time.February, time.March, time.April, time.May, time.June, time.July, time.August, time.September, time.October, time.November, time.December, } return time.Date( rand.Intn(9999), months[rand.Intn(len(months))], rand.Intn(31), rand.Intn(24), rand.Intn(60), rand.Intn(60), int(rand.Int31()), time.UTC, ) } func TestMetricJSONRoundTrip(t *testing.T) { rand := rand.New(rand.NewSource(0)) f := func(name, prog string, kind Kind, keys []string, val, _, _ int64) bool { m := NewMetric(name, prog, kind, Int, keys...) labels := make([]string, 0) for range keys { if l, ok := quick.Value(reflect.TypeOf(name), rand); ok { labels = append(labels, l.String()) } else { t.Errorf("failed to create value for labels") break } } d, _ := m.GetDatum(labels...) datum.SetInt(d, val, timeGenerator(rand)) j, e := json.Marshal(m) if e != nil { t.Errorf("json.Marshal failed: %s\n", e) return false } r := newMetric(0) e = json.Unmarshal(j, &r) if e != nil { t.Errorf("json.Unmarshal failed: %s\n", e) return false } return testutil.ExpectNoDiff(t, m, r, testutil.IgnoreUnexported(sync.RWMutex{}, Metric{})) } if err := quick.Check(f, nil); err != nil { t.Error(err) } } func TestTimer(t *testing.T) { m := NewMetric("test", "prog", Timer, Int) n := NewMetric("test", "prog", Timer, Int) testutil.ExpectNoDiff(t, m, n, testutil.IgnoreUnexported(sync.RWMutex{}, Metric{})) d, _ := m.GetDatum() datum.IncIntBy(d, 1, time.Now().UTC()) lv := m.FindLabelValueOrNil([]string{}) if lv == nil { t.Fatal("couldn't find labelvalue") } newD := lv.Value if newD == nil { t.Errorf("new_d is nil") } if newD.ValueString() != "1" { t.Errorf("value not 1") } } func TestRemoveMetricLabelValue(t *testing.T) { m := NewMetric("test", "prog", Counter, Int, "a", "b", "c") _, e := m.GetDatum("a", "a", "a") if e != nil { t.Errorf("Getdatum failed: %s", e) } lv := m.FindLabelValueOrNil([]string{"a", "a", "a"}) if lv == nil { t.Errorf("coidln't find labelvalue") } e = m.RemoveDatum("a", "a", "a") if e != nil { t.Errorf("couldn't remove datum: %s", e) } lv = m.FindLabelValueOrNil([]string{"a", "a", "a"}) if lv != nil { t.Errorf("label value still exists") } } func TestMetricLabelValueRemovePastLimit(t *testing.T) { m := NewMetric("test", "prog", Counter, Int, "foo") m.Limit = 1 _, err := m.GetDatum("a") testutil.FatalIfErr(t, err) m.RemoveOldestDatum() _, err = m.GetDatum("b") testutil.FatalIfErr(t, err) m.RemoveOldestDatum() _, err = m.GetDatum("c") testutil.FatalIfErr(t, err) m.RemoveOldestDatum() if len(m.LabelValues) > 2 { t.Errorf("Expected 2 labelvalues got %#v", m.LabelValues) } if x := m.FindLabelValueOrNil([]string{"a"}); x != nil { t.Errorf("found label a which is unexpected: %#v", x) } } mtail-3.0.0~rc54+git0ff5/internal/metrics/store.go000066400000000000000000000125471460063571700217430ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // This file is available under the Apache license. package metrics import ( "context" "encoding/json" "io" "reflect" "sync" "time" "github.com/golang/glog" "github.com/pkg/errors" ) // Store contains Metrics. type Store struct { searchMu sync.RWMutex // read for iterate and insert, write for delete insertMu sync.Mutex // locked for insert and delete, unlocked for iterate Metrics map[string][]*Metric } // NewStore returns a new metric Store. func NewStore() (s *Store) { s = &Store{} s.ClearMetrics() return } // Add is used to add one metric to the Store. func (s *Store) Add(m *Metric) error { s.insertMu.Lock() defer s.insertMu.Unlock() s.searchMu.RLock() glog.V(1).Infof("Adding a new metric %v", m) dupeIndex := -1 if len(s.Metrics[m.Name]) > 0 { t := s.Metrics[m.Name][0].Kind if m.Kind != t { s.searchMu.RUnlock() return errors.Errorf("metric %s has different kind %v to existing %v", m.Name, m.Kind, t) } // To avoid duplicate metrics: // - copy old LabelValues into new metric; // - discard old metric. for i, v := range s.Metrics[m.Name] { if v.Program != m.Program { continue } if v.Type != m.Type { continue } if v.Source != m.Source { continue } dupeIndex = i glog.V(2).Infof("v keys: %v m.keys: %v", v.Keys, m.Keys) // If a set of label keys has changed, discard // old metric completely, w/o even copying old // data, as they are now incompatible. if len(v.Keys) != len(m.Keys) || !reflect.DeepEqual(v.Keys, m.Keys) { break } glog.V(2).Infof("v buckets: %v m.buckets: %v", v.Buckets, m.Buckets) // Otherwise, copy everything into the new metric glog.V(2).Infof("Found duped metric: %d", dupeIndex) for j, oldLabel := range v.LabelValues { glog.V(2).Infof("Labels: %d %s", j, oldLabel.Labels) d, err := v.GetDatum(oldLabel.Labels...) if err != nil { return err } if err = m.RemoveDatum(oldLabel.Labels...); err != nil { return err } lv := &LabelValue{Labels: oldLabel.Labels, Value: d} if err := m.AppendLabelValue(lv); err != nil { return err } } } } s.searchMu.RUnlock() // We're in modify mode now so lock out search s.searchMu.Lock() s.Metrics[m.Name] = append(s.Metrics[m.Name], m) if dupeIndex >= 0 { glog.V(2).Infof("removing original, keeping its clone") s.Metrics[m.Name] = append(s.Metrics[m.Name][0:dupeIndex], s.Metrics[m.Name][dupeIndex+1:]...) } s.searchMu.Unlock() return nil } // FindMetricOrNil returns a metric in a store, or returns nil if not found. func (s *Store) FindMetricOrNil(name, prog string) *Metric { s.searchMu.RLock() defer s.searchMu.RUnlock() ml, ok := s.Metrics[name] if !ok { return nil } for _, m := range ml { if m.Program != prog { continue } return m } return nil } // ClearMetrics empties the store of all metrics. func (s *Store) ClearMetrics() { s.insertMu.Lock() defer s.insertMu.Unlock() s.searchMu.Lock() defer s.searchMu.Unlock() s.Metrics = make(map[string][]*Metric) } // MarshalJSON returns a JSON byte string representing the Store. func (s *Store) MarshalJSON() (b []byte, err error) { s.searchMu.RLock() defer s.searchMu.RUnlock() ms := make([]*Metric, 0) for _, ml := range s.Metrics { ms = append(ms, ml...) } return json.Marshal(ms) } // Range calls f sequentially for each Metric present in the store. // The Metric is not locked when f is called. // If f returns non nil error, Range stops the iteration. // This looks a lot like sync.Map, ay. func (s *Store) Range(f func(*Metric) error) error { s.searchMu.RLock() defer s.searchMu.RUnlock() for _, ml := range s.Metrics { for _, m := range ml { if err := f(m); err != nil { return err } } } return nil } // Gc iterates through the Store looking for metrics that can be tidied up, // if they are passed their expiry or sized greater than their limit. func (s *Store) Gc() error { glog.Info("Running Store.Expire()") now := time.Now() return s.Range(func(m *Metric) error { if m.Limit > 0 && len(m.LabelValues) >= m.Limit { for i := len(m.LabelValues); i > m.Limit; i-- { m.RemoveOldestDatum() } } for i := 0; i < len(m.LabelValues); i++ { lv := m.LabelValues[i] if lv.Expiry <= 0 { continue } if now.Sub(lv.Value.TimeUTC()) > lv.Expiry { err := m.RemoveDatum(lv.Labels...) if err != nil { return err } i-- } } return nil }) } // StartGcLoop runs a permanent goroutine to expire metrics every duration. func (s *Store) StartGcLoop(ctx context.Context, duration time.Duration) { if duration <= 0 { glog.Infof("Metric store expiration disabled") return } go func() { glog.Infof("Starting metric store expiry loop every %s", duration.String()) ticker := time.NewTicker(duration) defer ticker.Stop() for { select { case <-ticker.C: if err := s.Gc(); err != nil { glog.Info(err) } case <-ctx.Done(): return } } }() } // WriteMetrics dumps the current state of the metrics store in JSON format to // the io.Writer. func (s *Store) WriteMetrics(w io.Writer) error { s.searchMu.RLock() b, err := json.MarshalIndent(s.Metrics, "", " ") s.searchMu.RUnlock() if err != nil { return errors.Wrap(err, "failed to marshal metrics into json") } _, err = w.Write(b) if err != nil { return errors.Wrap(err, "failed to write metrics") } return nil } mtail-3.0.0~rc54+git0ff5/internal/metrics/store_bench_test.go000066400000000000000000000115061460063571700241330ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // This file is available under the Apache license. package metrics import ( "fmt" "math" "math/rand" "reflect" "testing" "testing/quick" ) const ( maxItemsLog2 = 10 maxLabelsLog2 = 13 ) // newRandMetric makes a new, randomly filled Metric. func newRandMetric(tb testing.TB, rand *rand.Rand, i int) *Metric { tb.Helper() nameVal, ok := quick.Value(reflect.TypeOf(""), rand) if !ok { tb.Fatalf("%d: can't make a name", i) } progVal, ok := quick.Value(reflect.TypeOf(""), rand) if !ok { tb.Fatalf("%d: can't make a prog", i) } kindVal, ok := quick.Value(reflect.TypeOf(Counter), rand) if !ok { tb.Fatalf("%d: can't make a kind", i) } typeVal, ok := quick.Value(reflect.TypeOf(Int), rand) if !ok { tb.Fatalf("%d: can't make a type", i) } keysVal, ok := quick.Value(reflect.TypeOf([]string{}), rand) if !ok { tb.Fatalf("%d: can't make a key list", i) } return NewMetric(nameVal.Interface().(string), progVal.Interface().(string), kindVal.Interface().(Kind), typeVal.Interface().(Type), keysVal.Interface().([]string)...) } type bench struct { name string setup func(b *testing.B, rand *rand.Rand, items int, m *[]*Metric, s *Store) b func(b *testing.B, items int, m []*Metric, s *Store) } func fillMetric(b *testing.B, rand *rand.Rand, items int, m *[]*Metric, _ *Store) { b.Helper() for i := 0; i < items; i++ { (*m)[i] = newRandMetric(b, rand, i) } } func addToStore(b *testing.B, items int, m []*Metric, s *Store) { b.Helper() for j := 0; j < items; j++ { s.Add(m[j]) } } func BenchmarkStore(b *testing.B) { benches := []bench{ { name: "Add", setup: fillMetric, b: addToStore, }, { name: "Iterate", setup: func(b *testing.B, rand *rand.Rand, items int, m *[]*Metric, s *Store) { b.Helper() fillMetric(b, rand, items, m, s) addToStore(b, items, *m, s) }, b: func(b *testing.B, _ int, _ []*Metric, s *Store) { b.Helper() s.Range(func(*Metric) error { return nil }) }, }, } rand := rand.New(rand.NewSource(99)) for _, bench := range benches { bench := bench for _, gc := range []bool{false, true} { gc := gc gcStr := "" if gc { gcStr = "WithGc" } for _, parallel := range []bool{false, true} { parallel := parallel parallelStr := "" if parallel { parallelStr = "Parallel" } for i := 0.; i <= maxItemsLog2; i++ { items := int(math.Pow(2, i)) b.Run(fmt.Sprintf("%s%s%s-%d", bench.name, gcStr, parallelStr, items), func(b *testing.B) { s := NewStore() m := make([]*Metric, items) if bench.setup != nil { bench.setup(b, rand, items, &m, s) } b.ResetTimer() if parallel { b.RunParallel(func(pb *testing.PB) { for pb.Next() { bench.b(b, items, m, s) } }) } else { for n := 0; n < b.N; n++ { bench.b(b, items, m, s) if gc { s.Gc() } } } }) } } } } } func newRandLabels(tb testing.TB, rand *rand.Rand, i int) []string { tb.Helper() lv := make([]string, i) for j := 0; j < i; j++ { val, ok := quick.Value(reflect.TypeOf(""), rand) if !ok { tb.Fatalf("%d-%d: can't make a label", i, j) } lv[j] = val.Interface().(string) } return lv } func fillLabel(b *testing.B, rand *rand.Rand, items, keys int, lvs *[][]string, _ *Metric) { b.Helper() for i := 0; i < items; i++ { (*lvs)[i] = newRandLabels(b, rand, keys) } } func getDatum(b *testing.B, items int, lvs *[][]string, m *Metric) { b.Helper() for j := 0; j < items; j++ { lv := (*lvs)[j] m.GetDatum(lv...) } } type metricBench struct { name string setup func(b *testing.B, rand *rand.Rand, items, keys int, lvs *[][]string, m *Metric) b func(b *testing.B, items int, lv *[][]string, m *Metric) } func BenchmarkMetric(b *testing.B) { maxKeys := 4 benches := []metricBench{ { name: "GetDatum", setup: fillLabel, b: getDatum, }, } rand := rand.New(rand.NewSource(99)) for _, bench := range benches { bench := bench for _, parallel := range []bool{false, true} { parallel := parallel parallelStr := "" if parallel { parallelStr = "Parallel" } for i := 1; i <= maxLabelsLog2; i++ { items := int(math.Pow(2, float64(i))) lv := newRandLabels(b, rand, maxKeys) b.Run(fmt.Sprintf("%s%s-%d", bench.name, parallelStr, items), func(b *testing.B) { m := NewMetric("test", "prog", Counter, Int, lv...) lvs := make([][]string, items) if bench.setup != nil { bench.setup(b, rand, items, maxKeys, &lvs, m) } b.ResetTimer() if parallel { b.RunParallel(func(pb *testing.PB) { for pb.Next() { bench.b(b, items, &lvs, m) } }) } else { for n := 0; n < b.N; n++ { bench.b(b, items, &lvs, m) } } }) } } } } mtail-3.0.0~rc54+git0ff5/internal/metrics/store_test.go000066400000000000000000000117771460063571700230060ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // This file is available under the Apache license. package metrics import ( "strconv" "testing" "time" "github.com/golang/glog" "github.com/google/mtail/internal/metrics/datum" "github.com/google/mtail/internal/testutil" ) func TestMatchingKind(t *testing.T) { s := NewStore() m1 := NewMetric("foo", "prog", Counter, Int) err := s.Add(m1) testutil.FatalIfErr(t, err) m2 := NewMetric("foo", "prog1", Gauge, Int) err = s.Add(m2) if err == nil { t.Fatal("should be err") } } func TestDuplicateMetric(t *testing.T) { expectedMetrics := 0 s := NewStore() _ = s.Add(NewMetric("foo", "prog", Counter, Int, "user", "host")) _ = s.Add(NewMetric("foo", "prog", Counter, Int)) expectedMetrics++ if len(s.Metrics["foo"]) != expectedMetrics { t.Fatalf("should not add duplicate metric. Store: %v", s) } _ = s.Add(NewMetric("foo", "prog", Counter, Float)) glog.Infof("Store: %v", s) expectedMetrics++ if len(s.Metrics["foo"]) != expectedMetrics { t.Fatalf("should add metric of a different type: %v", s) } _ = s.Add(NewMetric("foo", "prog", Counter, Int, "user", "host", "zone", "domain")) glog.Infof("Store: %v", s) if len(s.Metrics["foo"]) != expectedMetrics { t.Fatalf("should not add duplicate metric, but replace the old one. Store: %v", s) } _ = s.Add(NewMetric("foo", "prog1", Counter, Int)) glog.Infof("Store: %v", s) expectedMetrics++ if len(s.Metrics["foo"]) != expectedMetrics { t.Fatalf("should add metric with a different prog: %v", s) } _ = s.Add(NewMetric("foo", "prog1", Counter, Float)) glog.Infof("Store: %v", s) expectedMetrics++ if len(s.Metrics["foo"]) != expectedMetrics { t.Fatalf("should add metric of a different type: %v", s) } } // A program can add a metric with the same name and of different type. // Prometheus behavior in this case is undefined. @see // https://github.com/google/mtail/issues/130 func TestAddMetricDifferentType(t *testing.T) { expected := 2 s := NewStore() err := s.Add(NewMetric("foo", "prog", Counter, Int)) testutil.FatalIfErr(t, err) // Duplicate metric of different type from *the same program err = s.Add(NewMetric("foo", "prog", Counter, Float)) testutil.FatalIfErr(t, err) if len(s.Metrics["foo"]) != expected { t.Fatalf("should have %d metrics of different Type: %v", expected, s.Metrics) } // Duplicate metric of different type from a different program err = s.Add(NewMetric("foo", "prog1", Counter, Float)) expected++ testutil.FatalIfErr(t, err) if len(s.Metrics["foo"]) != expected { t.Fatalf("should have %d metrics of different Type: %v", expected, s.Metrics) } } func TestExpireOldDatum(t *testing.T) { s := NewStore() m := NewMetric("foo", "prog", Counter, Int, "a", "b", "c") testutil.FatalIfErr(t, s.Add(m)) d, err := m.GetDatum("1", "2", "3") if err != nil { t.Error(err) } datum.SetInt(d, 1, time.Now().Add(-time.Hour)) lv := m.FindLabelValueOrNil([]string{"1", "2", "3"}) if lv == nil { t.Fatal("couldn't find lv") } lv.Expiry = time.Minute d, err = m.GetDatum("4", "5", "6") if err != nil { t.Error(err) } datum.SetInt(d, 1, time.Now().Add(-time.Hour)) lv = m.FindLabelValueOrNil([]string{"4", "5", "6"}) if lv == nil { t.Errorf("couldn't find lv") } testutil.FatalIfErr(t, s.Gc()) lv = m.FindLabelValueOrNil([]string{"1", "2", "3"}) if lv != nil { t.Errorf("lv not expired: %#v", lv) t.Logf("Store: %#v", s) } lv = m.FindLabelValueOrNil([]string{"4", "5", "6"}) if lv == nil { t.Errorf("lv expired") t.Logf("Store: %#v", s) } } func TestExpireOversizeDatum(t *testing.T) { s := NewStore() m := NewMetric("foo", "prog", Counter, Int, "foo") m.Limit = 1 testutil.FatalIfErr(t, s.Add(m)) _, err := m.GetDatum("a") testutil.FatalIfErr(t, err) testutil.FatalIfErr(t, s.Gc()) _, err = m.GetDatum("b") testutil.FatalIfErr(t, err) testutil.FatalIfErr(t, s.Gc()) _, err = m.GetDatum("c") testutil.FatalIfErr(t, err) testutil.FatalIfErr(t, s.Gc()) if len(m.LabelValues) > 2 { t.Errorf("Expected 2 labelvalues got %#v", m.LabelValues) } if x := m.FindLabelValueOrNil([]string{"a"}); x != nil { t.Errorf("found label a which is unexpected: %#v", x) } } func TestExpireManyMetrics(t *testing.T) { s := NewStore() m := NewMetric("foo", "prog", Counter, Int, "id") testutil.FatalIfErr(t, s.Add(m)) d, err := m.GetDatum("0") if err != nil { t.Error(err) } datum.SetInt(d, 1, time.Now().Add(-time.Hour)) lv := m.FindLabelValueOrNil([]string{"0"}) if lv == nil { t.Fatal("couldn't find lv") } for i := 1; i < 10; i++ { d, err := m.GetDatum(strconv.Itoa(i)) if err != nil { t.Error(err) } datum.SetInt(d, 1, time.Now().Add(-time.Hour)) lv = m.FindLabelValueOrNil([]string{strconv.Itoa(i)}) if lv == nil { t.Fatal("couldn't find lv") } lv.Expiry = time.Minute } testutil.FatalIfErr(t, s.Gc()) lv = m.FindLabelValueOrNil([]string{"8"}) if lv != nil { t.Errorf("lv not expired: %#v", lv) t.Logf("Store: %#v", s) } lv = m.FindLabelValueOrNil([]string{"0"}) if lv == nil { t.Errorf("lv expired") t.Logf("Store: %#v", s) } } mtail-3.0.0~rc54+git0ff5/internal/metrics/testing.go000066400000000000000000000025251460063571700222570ustar00rootroot00000000000000// Copyright 2021 Google Inc. All Rights Reserved. // This file is available under the Apache license. package metrics type MetricSlice []*Metric func (s MetricSlice) Len() int { return len(s) } func (s MetricSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s MetricSlice) Less(i, j int) bool { return Less(s[i], s[j]) } func Less(m1, m2 *Metric) bool { if m1.Name < m2.Name { return true } if m1.Name > m2.Name { return false } if m1.Program < m2.Program { return true } if m1.Program > m2.Program { return false } if m1.Kind < m2.Kind { return true } if m1.Kind > m2.Kind { return false } if m1.Type < m2.Type { return true } if m1.Type > m2.Type { return false } if len(m1.Keys) < len(m2.Keys) { return true } if len(m1.Keys) > len(m2.Keys) { return false } for x, k := range m1.Keys { if k < m2.Keys[x] { return true } if k > m2.Keys[x] { return false } } for x, lv := range m1.LabelValues { if len(lv.Labels) < len(m2.LabelValues[x].Labels) { return true } if len(lv.Labels) > len(m2.LabelValues[x].Labels) { return false } for y, k := range lv.Labels { if k < m2.LabelValues[x].Labels[y] { return true } if k > m2.LabelValues[x].Labels[y] { return false } } // if lv.Value < m2.LabelValues[x].Value { // return true // } } return false } mtail-3.0.0~rc54+git0ff5/internal/metrics/type.go000066400000000000000000000016531460063571700215640ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // This file is available under the Apache license. package metrics import ( "math/rand" "reflect" ) // Type describes the type of value stored in a Datum. type Type int const ( // Int indicates this metric is an integer metric type. Int Type = iota // Float indicates this metric is a floating-point metric type. Float // String indicates this metric contains printable string values. String // Buckets indicates this metric is a histogram metric type. Buckets endType // end of enumeration for testing ) func (t Type) String() string { switch t { case Int: return "Int" case Float: return "Float" case String: return "String" case Buckets: return "Buckets" } return "?" } // Generate implements the quick.Generator interface for Type. func (Type) Generate(rand *rand.Rand, _ int) reflect.Value { return reflect.ValueOf(Type(rand.Intn(int(endType)))) } mtail-3.0.0~rc54+git0ff5/internal/mtail/000077500000000000000000000000001460063571700177075ustar00rootroot00000000000000mtail-3.0.0~rc54+git0ff5/internal/mtail/basic_tail_integration_test.go000066400000000000000000000033361460063571700257770ustar00rootroot00000000000000// Copyright 2019 Google Inc. All Rights Reserved. // This file is available under the Apache license. package mtail_test import ( "fmt" "os" "path/filepath" "sync" "testing" "github.com/google/mtail/internal/mtail" "github.com/google/mtail/internal/testutil" ) func TestBasicTail(t *testing.T) { testutil.SkipIfShort(t) if testing.Verbose() { testutil.SetFlag(t, "vmodule", "tail=2,log_watcher=2") } logDir := testutil.TestTempDir(t) m, stopM := mtail.TestStartServer(t, 1, mtail.LogPathPatterns(logDir+"/*"), mtail.ProgramPath("../../examples/linecount.mtail")) defer stopM() logFile := filepath.Join(logDir, "log") lineCountCheck := m.ExpectMapExpvarDeltaWithDeadline("log_lines_total", logFile, 3) logCountCheck := m.ExpectExpvarDeltaWithDeadline("log_count", 1) f := testutil.TestOpenFile(t, logFile) defer f.Close() m.PollWatched(1) // Force sync to EOF for i := 1; i <= 3; i++ { testutil.WriteString(t, f, fmt.Sprintf("%d\n", i)) } m.PollWatched(1) // Expect to read 3 lines here. var wg sync.WaitGroup wg.Add(2) go func() { defer wg.Done() lineCountCheck() }() go func() { defer wg.Done() logCountCheck() }() wg.Wait() } func TestNewLogDoesNotMatchIsIgnored(t *testing.T) { testutil.SkipIfShort(t) workdir := testutil.TestTempDir(t) // Start mtail logFilepath := filepath.Join(workdir, "log") m, stopM := mtail.TestStartServer(t, 0, mtail.LogPathPatterns(logFilepath)) defer stopM() logCountCheck := m.ExpectExpvarDeltaWithDeadline("log_count", 0) // touch log file newLogFilepath := filepath.Join(workdir, "log1") logFile, err := os.Create(newLogFilepath) testutil.FatalIfErr(t, err) defer logFile.Close() m.PollWatched(0) // No streams so don't wait for any. logCountCheck() } mtail-3.0.0~rc54+git0ff5/internal/mtail/buildinfo.go000066400000000000000000000010301460063571700222030ustar00rootroot00000000000000// Copyright 2020 Google Inc. All Rights Reserved. // This file is available under the Apache license. package mtail import ( "fmt" "runtime" ) // BuildInfo records the compile-time information for use when reporting the mtail version. type BuildInfo struct { Branch string Version string Revision string } func (b BuildInfo) String() string { return fmt.Sprintf( "mtail version %s git revision %s go version %s go arch %s go os %s", b.Version, b.Revision, runtime.Version(), runtime.GOARCH, runtime.GOOS, ) } mtail-3.0.0~rc54+git0ff5/internal/mtail/compile_only_integration_test.go000066400000000000000000000016301460063571700263710ustar00rootroot00000000000000// Copyright 2019 Google Inc. All Rights Reserved. // This file is available under the Apache license. package mtail_test import ( "context" "os" "path/filepath" "strings" "testing" "github.com/google/mtail/internal/metrics" "github.com/google/mtail/internal/mtail" "github.com/google/mtail/internal/testutil" ) func TestBadProgramFailsCompilation(t *testing.T) { testutil.SkipIfShort(t) progDir := testutil.TestTempDir(t) err := os.WriteFile(filepath.Join(progDir, "bad.mtail"), []byte("asdfasdf\n"), 0o666) testutil.FatalIfErr(t, err) ctx := context.Background() // Compile-only fails program compilation at server start, not after it's running. _, err = mtail.New(ctx, metrics.NewStore(), mtail.ProgramPath(progDir), mtail.CompileOnly) if err == nil { t.Error("expected error from mtail") } if !strings.Contains(err.Error(), "compile failed") { t.Error("compile failed not reported") } } mtail-3.0.0~rc54+git0ff5/internal/mtail/examples_integration_test.go000066400000000000000000000121341460063571700255170ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // This file is available under the Apache license. package mtail_test import ( "context" "fmt" "io" "os" "path/filepath" "sync" "testing" "time" "github.com/google/mtail/internal/metrics" "github.com/google/mtail/internal/metrics/datum" "github.com/google/mtail/internal/mtail" "github.com/google/mtail/internal/mtail/golden" "github.com/google/mtail/internal/testutil" "github.com/google/mtail/internal/waker" ) const exampleTimeout = 10 * time.Second var exampleProgramTests = []struct { programfile string // Example program file. logfile string // Sample log input. goldenfile string // Expected metrics after processing. }{ { "examples/rsyncd.mtail", "testdata/rsyncd.log", "testdata/rsyncd.golden", }, { "examples/sftp.mtail", "testdata/sftp_chroot.log", "testdata/sftp_chroot.golden", }, { "examples/dhcpd.mtail", "testdata/anonymised_dhcpd_log", "testdata/anonymised_dhcpd_log.golden", }, { "examples/ntpd.mtail", "testdata/ntp4", "testdata/ntp4.golden", }, { "examples/ntpd_peerstats.mtail", "testdata/xntp3_peerstats", "testdata/xntp3_peerstats.golden", }, { "examples/apache_combined.mtail", "testdata/apache-combined.log", "testdata/apache-combined.golden", }, { "examples/apache_common.mtail", "testdata/apache-common.log", "testdata/apache-common.golden", }, { "examples/vsftpd.mtail", "testdata/vsftpd_log", "testdata/vsftpd_log.golden", }, { "examples/vsftpd.mtail", "testdata/vsftpd_xferlog", "testdata/vsftpd_xferlog.golden", }, { "examples/lighttpd.mtail", "testdata/lighttpd_access.log", "testdata/lighttpd_accesslog.golden", }, { "examples/mysql_slowqueries.mtail", "testdata/mysql_slowqueries.log", "testdata/mysql_slowqueries.golden", }, } func TestExamplePrograms(t *testing.T) { testutil.SkipIfShort(t) for _, tc := range exampleProgramTests { tc := tc t.Run(fmt.Sprintf("%s on %s", tc.programfile, tc.logfile), testutil.TimeoutTest(exampleTimeout, func(t *testing.T) { //nolint:thelper ctx, cancel := context.WithCancel(context.Background()) waker, _ := waker.NewTest(ctx, 0) // oneshot means we should never need to wake the stream store := metrics.NewStore() programFile := filepath.Join("../..", tc.programfile) mtail, err := mtail.New(ctx, store, mtail.ProgramPath(programFile), mtail.LogPathPatterns(tc.logfile), mtail.OneShot, mtail.OmitMetricSource, mtail.DumpAstTypes, mtail.DumpBytecode, mtail.LogPatternPollWaker(waker), mtail.LogstreamPollWaker(waker)) testutil.FatalIfErr(t, err) var wg sync.WaitGroup wg.Add(1) go func() { defer wg.Done() testutil.FatalIfErr(t, mtail.Run()) }() // Oneshot mode means we can wait for shutdown before cancelling. wg.Wait() cancel() g, err := os.Open(tc.goldenfile) testutil.FatalIfErr(t, err) defer g.Close() goldenStore := golden.ReadTestData(g, tc.programfile) var storeList metrics.MetricSlice store.Range(func(m *metrics.Metric) error { storeList = append(storeList, m) return nil }) testutil.ExpectNoDiff(t, goldenStore, storeList, testutil.SortSlices(metrics.Less), testutil.IgnoreUnexported(metrics.Metric{}, sync.RWMutex{}, datum.String{})) })) } } // This test only compiles examples, but has coverage over all examples // provided. This ensures we ship at least syntactically correct examples. func TestCompileExamplePrograms(t *testing.T) { testutil.SkipIfShort(t) matches, err := filepath.Glob("../../examples/*.mtail") testutil.FatalIfErr(t, err) for _, tc := range matches { tc := tc name := filepath.Base(tc) t.Run(name, func(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) s := metrics.NewStore() mtail, err := mtail.New(ctx, s, mtail.ProgramPath(tc), mtail.CompileOnly, mtail.OmitMetricSource, mtail.DumpAstTypes, mtail.DumpBytecode) testutil.FatalIfErr(t, err) // Ensure that run shuts down for CompileOnly testutil.FatalIfErr(t, mtail.Run()) cancel() }) } } func BenchmarkProgram(b *testing.B) { for _, bm := range exampleProgramTests { bm := bm b.Run(fmt.Sprintf("%s on %s", bm.programfile, bm.logfile), func(b *testing.B) { b.ReportAllocs() logDir := testutil.TestTempDir(b) logFile := filepath.Join(logDir, "test.log") log := testutil.TestOpenFile(b, logFile) ctx, cancel := context.WithCancel(context.Background()) waker, awaken := waker.NewTest(ctx, 1) store := metrics.NewStore() programFile := filepath.Join("../..", bm.programfile) mtail, err := mtail.New(ctx, store, mtail.ProgramPath(programFile), mtail.LogPathPatterns(log.Name()), mtail.LogstreamPollWaker(waker)) testutil.FatalIfErr(b, err) var wg sync.WaitGroup wg.Add(1) go func() { defer wg.Done() testutil.FatalIfErr(b, mtail.Run()) }() var total int64 b.ResetTimer() for i := 0; i < b.N; i++ { l, err := os.Open(bm.logfile) testutil.FatalIfErr(b, err) count, err := io.Copy(log, l) testutil.FatalIfErr(b, err) total += count awaken(1) } cancel() wg.Wait() b.StopTimer() b.SetBytes(total) }) } } mtail-3.0.0~rc54+git0ff5/internal/mtail/examples_integration_unix_test.go000066400000000000000000000153171460063571700265700ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // This file is available under the Apache license. //go:build unix // +build unix package mtail_test import ( "context" "errors" "fmt" "io" "net" "os" "path/filepath" "sync" "testing" "time" "github.com/golang/glog" "github.com/google/mtail/internal/metrics" "github.com/google/mtail/internal/metrics/datum" "github.com/google/mtail/internal/mtail" "github.com/google/mtail/internal/testutil" "github.com/google/mtail/internal/waker" "golang.org/x/sys/unix" ) // TestFilePipeStreamComparison is a unix-specific test since unix.Mkfifo is not defined on Windows. func TestFilePipeStreamComparison(t *testing.T) { testutil.SkipIfShort(t) for _, tc := range exampleProgramTests { tc := tc t.Run(fmt.Sprintf("%s on %s", tc.programfile, tc.logfile), testutil.TimeoutTest(exampleTimeout, func(t *testing.T) { //nolint:thelper ctx, cancel := context.WithCancel(context.Background()) waker := waker.NewTestAlways() fileStore, pipeStore := metrics.NewStore(), metrics.NewStore() programFile := filepath.Join("../..", tc.programfile) // Set up the pipe tmpDir := testutil.TestTempDir(t) pipeName := filepath.Join(tmpDir, filepath.Base(tc.logfile)) testutil.FatalIfErr(t, unix.Mkfifo(pipeName, 0o600)) var wg sync.WaitGroup wg.Add(3) // This goroutine copies bytes from the source file into the // fifo, once the fifo has been opened for read. go func() { defer wg.Done() source, err := os.OpenFile(tc.logfile, os.O_RDONLY, 0) testutil.FatalIfErr(t, err) // not NONBLOCK to wait for pipeMtail to start reading the pipe pipe, err := os.OpenFile(pipeName, os.O_WRONLY, os.ModeNamedPipe) testutil.FatalIfErr(t, err) n, err := io.Copy(pipe, source) testutil.FatalIfErr(t, err) glog.Infof("Copied %d bytes into pipe", n) source.Close() pipe.Close() }() // Two mtails both alike in dignity. go func() { defer wg.Done() fileMtail, err := mtail.New(ctx, fileStore, mtail.ProgramPath(programFile), mtail.LogPathPatterns(tc.logfile), mtail.OneShot, mtail.OmitMetricSource, mtail.LogPatternPollWaker(waker), mtail.LogstreamPollWaker(waker)) if err != nil { t.Error(err) } if err := fileMtail.Run(); err != nil { t.Error(err) } }() go func() { defer wg.Done() pipeMtail, err := mtail.New(ctx, pipeStore, mtail.ProgramPath(programFile), mtail.LogPathPatterns(pipeName), mtail.OneShot, mtail.OmitMetricSource, mtail.LogPatternPollWaker(waker), mtail.LogstreamPollWaker(waker)) testutil.FatalIfErr(t, err) if err := pipeMtail.Run(); err != nil { t.Error(err) } }() // Oneshot mode means we can wait for shutdown before cancelling. wg.Wait() cancel() var pipeMetrics, fileMetrics metrics.MetricSlice pipeStore.Range(func(m *metrics.Metric) error { pipeMetrics = append(pipeMetrics, m) return nil }) fileStore.Range(func(m *metrics.Metric) error { fileMetrics = append(fileMetrics, m) return nil }) // Ignore the datum.Time field as well, as the results will be unstable otherwise. testutil.ExpectNoDiff(t, fileMetrics, pipeMetrics, testutil.SortSlices(metrics.Less), testutil.IgnoreUnexported(metrics.Metric{}, sync.RWMutex{}, datum.String{}), testutil.IgnoreFields(datum.BaseDatum{}, "Time")) })) } } // TestFileSocketStreamComparison is a unix-specific test currently because on Windows, the constructed URL will // be of the form unix://C:\\path, and this will be interpreted as protocol unix on host C and port \\path. func TestFileSocketStreamComparison(t *testing.T) { testutil.SkipIfShort(t) for _, scheme := range []string{"unixgram", "unix"} { scheme := scheme for _, tc := range exampleProgramTests { tc := tc t.Run(fmt.Sprintf("%s on %s://%s", tc.programfile, scheme, tc.logfile), testutil.TimeoutTest(exampleTimeout, func(t *testing.T) { //nolint:thelper ctx, cancel := context.WithCancel(context.Background()) waker := waker.NewTestAlways() fileStore, sockStore := metrics.NewStore(), metrics.NewStore() programFile := filepath.Join("../..", tc.programfile) // Set up the socket tmpDir := testutil.TestTempDir(t) sockName := filepath.Join(tmpDir, filepath.Base(tc.logfile)) var wg sync.WaitGroup wg.Add(3) go func() { defer wg.Done() fileMtail, err := mtail.New(ctx, fileStore, mtail.ProgramPath(programFile), mtail.LogPathPatterns(tc.logfile), mtail.OneShot, mtail.OmitMetricSource, mtail.LogPatternPollWaker(waker), mtail.LogstreamPollWaker(waker)) if err != nil { t.Error(err) } if err := fileMtail.Run(); err != nil { t.Error(err) } }() sockMtail, err := mtail.New(ctx, sockStore, mtail.ProgramPath(programFile), mtail.LogPathPatterns(scheme+"://"+sockName), mtail.OneShot, mtail.OmitMetricSource, mtail.LogPatternPollWaker(waker), mtail.LogstreamPollWaker(waker)) testutil.FatalIfErr(t, err) go func() { defer wg.Done() if err := sockMtail.Run(); err != nil { t.Error(err) } }() go func() { defer wg.Done() source, err := os.OpenFile(tc.logfile, os.O_RDONLY, 0) testutil.FatalIfErr(t, err) s, err := net.DialUnix(scheme, nil, &net.UnixAddr{sockName, scheme}) testutil.FatalIfErr(t, err) n, err := io.Copy(s, source) testutil.FatalIfErr(t, err) glog.Infof("Copied %d bytes into socket", n) if scheme == "unixgram" { // Write zero bytes after Stop is called to signal that this is the "end of the stream". for { _, err = s.Write([]byte{}) if err == nil { glog.Infof("Zero bytes written to socket to signal EOF") break } var netErr net.Error if errors.As(err, &netErr) && netErr.Timeout() { glog.Infof("Write timeout") time.Sleep(1 * time.Second) } else { testutil.FatalIfErr(t, err) } } } source.Close() s.Close() }() // Oneshot mode means we can wait for shutdown before cancelling. wg.Wait() cancel() var sockMetrics, fileMetrics metrics.MetricSlice sockStore.Range(func(m *metrics.Metric) error { sockMetrics = append(sockMetrics, m) return nil }) fileStore.Range(func(m *metrics.Metric) error { fileMetrics = append(fileMetrics, m) return nil }) // Ignore the datum.Time field as well, as the results will be unstable otherwise. testutil.ExpectNoDiff(t, fileMetrics, sockMetrics, testutil.SortSlices(metrics.Less), testutil.IgnoreUnexported(metrics.Metric{}, sync.RWMutex{}, datum.String{}), testutil.IgnoreFields(datum.BaseDatum{}, "Time")) })) } } } mtail-3.0.0~rc54+git0ff5/internal/mtail/exec_integration_test.go000066400000000000000000000016741460063571700246340ustar00rootroot00000000000000// Copyright 2024 Google Inc. ll Rights Reserved. // This file is available under the Apache license. package mtail_test import ( "context" "errors" "os/exec" "path/filepath" "testing" "time" "github.com/golang/glog" ) var mtailPath string func init() { path, err := exec.LookPath(filepath.Join("..", "..", "mtail")) if errors.Is(err, exec.ErrDot) { err = nil } if err != nil { glog.Infof("exec_integration_test init(): %v", err) } mtailPath = path } func TestExecMtail(t *testing.T) { if mtailPath == "" { t.Log("mtail binary not found, skipping") t.Skip() } cs := []string{ "-progs", "../../examples", "-logs", "testdata/rsyncd.log", "-one_shot", "-one_shot_format=prometheus", } ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) defer cancel() cmd := exec.CommandContext(ctx, mtailPath, cs...) if out, err := cmd.CombinedOutput(); err != nil { t.Logf("%s", out) t.Error(err) } } mtail-3.0.0~rc54+git0ff5/internal/mtail/golden/000077500000000000000000000000001460063571700211575ustar00rootroot00000000000000mtail-3.0.0~rc54+git0ff5/internal/mtail/golden/reader.go000066400000000000000000000101021460063571700227420ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // This file is available under the Apache license. package golden import ( "bufio" "io" "path/filepath" "regexp" "strconv" "strings" "time" "github.com/golang/glog" "github.com/google/mtail/internal/metrics" "github.com/google/mtail/internal/metrics/datum" ) var varRe = regexp.MustCompile(`^(counter|gauge|timer|text|histogram) ([^ ]+)(?: {([^}]+)})?(?: (\S+))?(?: (.+))?`) // ReadTestData loads a "golden" test data file from a programfile and returns as a slice of Metrics. func ReadTestData(file io.Reader, programfile string) metrics.MetricSlice { store := metrics.NewStore() prog := filepath.Base(programfile) scanner := bufio.NewScanner(file) for scanner.Scan() { glog.V(2).Infof("'%s'\n", scanner.Text()) match := varRe.FindStringSubmatch(scanner.Text()) glog.V(2).Infof("len match: %d\n", len(match)) if len(match) == 0 { continue } keys := make([]string, 0) vals := make([]string, 0) if match[3] != "" { for _, pair := range strings.Split(match[3], ",") { glog.V(2).Infof("pair: %s\n", pair) kv := strings.Split(pair, "=") keys = append(keys, kv[0]) if kv[1] != "" { if kv[1] == `""` { vals = append(vals, "") } else { vals = append(vals, kv[1]) } } } } var kind metrics.Kind switch match[1] { case "counter": kind = metrics.Counter case "gauge": kind = metrics.Gauge case "timer": kind = metrics.Timer case "text": kind = metrics.Text case "histogram": kind = metrics.Histogram } glog.V(2).Infof("match[4]: %q", match[4]) typ := metrics.Int var ( ival int64 fval float64 sval string err error ) if match[4] != "" { ival, err = strconv.ParseInt(match[4], 10, 64) if err != nil { fval, err = strconv.ParseFloat(match[4], 64) typ = metrics.Float if err != nil || fval == 0.0 { sval = match[4] typ = metrics.String } } glog.V(2).Infof("type is %q", typ) } var timestamp time.Time glog.V(2).Infof("match 5: %q\n", match[5]) if match[5] != "" { timestamp, err = time.Parse(time.RFC3339, match[5]) if err != nil { j, err := strconv.ParseInt(match[5], 10, 64) if err == nil { timestamp = time.Unix(j/1000000000, j%1000000000) } else { glog.V(2).Info(err) } } } glog.V(2).Infof("timestamp is %s which is %v in unix", timestamp.Format(time.RFC3339), timestamp.Unix()) // Now we have enough information to get or create a metric. m := store.FindMetricOrNil(match[2], prog) if m != nil { if m.Type != typ { glog.V(2).Infof("The type of the fetched metric is not %s: %s", typ, m) continue } } else { m = metrics.NewMetric(match[2], prog, kind, typ, keys...) if kind == metrics.Counter && len(keys) == 0 { d, err := m.GetDatum() if err != nil { glog.Fatal(err) } // Initialize to zero at the zero time. switch typ { case metrics.Int: datum.SetInt(d, 0, time.Unix(0, 0)) case metrics.Float: datum.SetFloat(d, 0, time.Unix(0, 0)) } } glog.V(2).Infof("making a new %v\n", m) if err := store.Add(m); err != nil { glog.Infof("Failed to add metric %v to store: %s", m, err) } } if match[4] != "" { d, err := m.GetDatum(vals...) if err != nil { glog.V(2).Infof("Failed to get datum: %s", err) continue } glog.V(2).Infof("got datum %v", d) switch typ { case metrics.Int: glog.V(2).Infof("setting %v with vals %v to %v at %v\n", d, vals, ival, timestamp) datum.SetInt(d, ival, timestamp) case metrics.Float: glog.V(2).Infof("setting %v with vals %v to %v at %v\n", d, vals, fval, timestamp) datum.SetFloat(d, fval, timestamp) case metrics.String: glog.V(2).Infof("setting %v with vals %v to %v at %v\n", d, vals, sval, timestamp) datum.SetString(d, sval, timestamp) } } glog.V(2).Infof("Metric is now %s", m) } storeList := make([]*metrics.Metric, 0) /* #nosec G104 -- Always returns nil. nolint:errcheck */ store.Range(func(m *metrics.Metric) error { storeList = append(storeList, m) return nil }) return storeList } mtail-3.0.0~rc54+git0ff5/internal/mtail/golden/reader_test.go000066400000000000000000000055161460063571700240160ustar00rootroot00000000000000package golden import ( "os" "sync" "testing" "time" "github.com/google/mtail/internal/metrics" "github.com/google/mtail/internal/metrics/datum" "github.com/google/mtail/internal/testutil" ) var expectedMetrics = metrics.MetricSlice{ { Name: "bytes_total", Program: "reader_test", Kind: metrics.Counter, Keys: []string{"operation"}, LabelValues: []*metrics.LabelValue{ { Labels: []string{"sent"}, Value: datum.MakeInt(62793673, time.Date(2011, 2, 23, 5, 54, 10, 0, time.UTC)), }, { Labels: []string{"received"}, Value: datum.MakeInt(975017, time.Date(2011, 2, 23, 5, 54, 10, 0, time.UTC)), }, }, }, { Name: "connections_total", Program: "reader_test", Kind: metrics.Counter, Keys: []string{}, LabelValues: []*metrics.LabelValue{ { Value: datum.MakeInt(52, time.Date(2011, 2, 22, 21, 54, 13, 0, time.UTC)), }, }, }, { Name: "connection-time_total", Program: "reader_test", Kind: metrics.Counter, Keys: []string{}, LabelValues: []*metrics.LabelValue{ { Value: datum.MakeInt(1181011, time.Date(2011, 2, 23, 5, 54, 10, 0, time.UTC)), }, }, }, { Name: "transfers_total", Program: "reader_test", Kind: metrics.Counter, Keys: []string{"operation", "module"}, LabelValues: []*metrics.LabelValue{ { Labels: []string{"send", "module"}, Value: datum.MakeInt(2, time.Date(2011, 2, 23, 5, 50, 32, 0, time.UTC)), }, { Labels: []string{"send", "repo"}, Value: datum.MakeInt(25, time.Date(2011, 2, 23, 5, 51, 14, 0, time.UTC)), }, }, }, { Name: "foo", Program: "reader_test", Kind: metrics.Gauge, Keys: []string{"label"}, LabelValues: []*metrics.LabelValue{}, }, { Name: "bar", Program: "reader_test", Kind: metrics.Counter, Keys: []string{}, LabelValues: []*metrics.LabelValue{ { Value: datum.MakeInt(0, time.Unix(0, 0)), }, }, }, { Name: "floaty", Program: "reader_test", Kind: metrics.Gauge, Type: metrics.Float, Keys: []string{}, LabelValues: []*metrics.LabelValue{ { Labels: []string{}, Value: datum.MakeFloat(37.1, time.Date(2017, 6, 15, 18, 9, 37, 0, time.UTC)), }, }, }, { Name: "stringy", Program: "reader_test", Kind: metrics.Text, Type: metrics.String, Keys: []string{}, LabelValues: []*metrics.LabelValue{ { Labels: []string{}, Value: datum.MakeString("hi", time.Date(2018, 6, 16, 18, 4, 0, 0, time.UTC)), }, }, }, } func TestReadTestData(t *testing.T) { f, err := os.Open("reader_test.golden") testutil.FatalIfErr(t, err) defer f.Close() readMetrics := ReadTestData(f, "reader_test") testutil.ExpectNoDiff(t, expectedMetrics, readMetrics, testutil.SortSlices(metrics.Less), testutil.IgnoreUnexported(metrics.Metric{}, sync.RWMutex{}, datum.String{})) } mtail-3.0.0~rc54+git0ff5/internal/mtail/golden/reader_test.golden000066400000000000000000000007731460063571700246610ustar00rootroot00000000000000counter bytes_total {operation=sent} 62793673 2011-02-23T05:54:10Z counter bytes_total {operation=received} 975017 2011-02-23T05:54:10Z counter connections_total 52 2011-02-22T21:54:13Z counter connection-time_total 1181011 2011-02-23T05:54:10Z counter transfers_total {operation=send,module=module} 2 2011-02-23T05:50:32Z counter transfers_total {operation=send,module=repo} 25 2011-02-23T05:51:14Z gauge foo {label=} counter bar gauge floaty 37.1 2017-06-15T18:09:37Z text stringy hi 2018-06-16T18:04:00Z mtail-3.0.0~rc54+git0ff5/internal/mtail/httpstatus.go000066400000000000000000000046531460063571700224710ustar00rootroot00000000000000// Copyright 2020 Google Inc. All Rights Reserved. // This file is available under the Apache license. package mtail import ( "html/template" "net/http" "github.com/golang/glog" ) const statusTemplate = ` mtail on {{.BindAddress}}

mtail on {{.BindAddress}}

Build: {{.BuildInfo}}

Info: {{ if .HTTPInfoEndpoints }}varz, progz tracez

{{ else }} disabled {{ end }}

Debug: {{ if .HTTPDebugEndpoints }}debug/pprof, debug/vars{{ else }} disabled {{ end }}

` const statusTemplateEnd = ` ` // ServeHTTP satisfies the http.Handler interface, and is used to serve the // root page of mtail for online status reporting. func (m *Server) ServeHTTP(w http.ResponseWriter, _ *http.Request) { t, err := template.New("status").Parse(statusTemplate) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } te, err := template.New("statusend").Parse(statusTemplateEnd) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } data := struct { BindAddress string BuildInfo string HTTPDebugEndpoints bool HTTPInfoEndpoints bool }{ m.listener.Addr().String(), m.buildInfo.String(), m.httpDebugEndpoints, m.httpInfoEndpoints, } w.Header().Add("Content-type", "text/html") w.WriteHeader(http.StatusOK) if err = t.Execute(w, data); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) } if m.httpInfoEndpoints { err = m.r.WriteStatusHTML(w) if err != nil { glog.Warningf("Error while writing loader status: %s", err) } err = m.t.WriteStatusHTML(w) if err != nil { glog.Warningf("Error while writing tailer status: %s", err) } } if err = te.Execute(w, data); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) } } // FaviconHandler is used to serve up the favicon.ico for mtail's http server. func FaviconHandler(w http.ResponseWriter, _ *http.Request) { w.Header().Set("Content-Type", "image/x-icon") w.Header().Set("Cache-Control", "public, max-age=7776000") if _, err := w.Write(logoFavicon); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) } } mtail-3.0.0~rc54+git0ff5/internal/mtail/log_deletion_integration_unix_test.go000066400000000000000000000021761460063571700274150ustar00rootroot00000000000000// Copyright 2020 Google Inc. All Rights Reserved. // This file is available under the Apache license. //go:build unix // +build unix package mtail_test import ( "os" "path/filepath" "testing" "github.com/golang/glog" "github.com/google/mtail/internal/mtail" "github.com/google/mtail/internal/testutil" ) // TestLogDeletion is a unix-only test because on Windows files with open read handles cannot be deleted. func TestLogDeletion(t *testing.T) { testutil.SkipIfShort(t) workdir := testutil.TestTempDir(t) // touch log file logFilepath := filepath.Join(workdir, "log") logFile := testutil.TestOpenFile(t, logFilepath) defer logFile.Close() m, stopM := mtail.TestStartServer(t, 1, mtail.LogPathPatterns(logFilepath)) defer stopM() logCloseCheck := m.ExpectMapExpvarDeltaWithDeadline("log_closes_total", logFilepath, 1) logCountCheck := m.ExpectExpvarDeltaWithDeadline("log_count", -1) m.PollWatched(1) // Force sync to EOF glog.Info("remove") testutil.FatalIfErr(t, os.Remove(logFilepath)) m.PollWatched(0) // one pass to stop logCloseCheck() m.PollWatched(0) // one pass to remove completed stream logCountCheck() } mtail-3.0.0~rc54+git0ff5/internal/mtail/log_glob_integration_test.go000066400000000000000000000117751460063571700254770ustar00rootroot00000000000000// Copyright 2019 Google Inc. All Rights Reserved. // This file is available under the Apache license. package mtail_test import ( "expvar" "os" "path/filepath" "testing" "github.com/golang/glog" "github.com/google/mtail/internal/mtail" "github.com/google/mtail/internal/testutil" ) func TestGlobBeforeStart(t *testing.T) { testutil.SkipIfShort(t) workdir := testutil.TestTempDir(t) globTests := []struct { name string expected bool }{ { filepath.Join(workdir, "log1"), true, }, { filepath.Join(workdir, "log2"), true, }, { filepath.Join(workdir, "1log"), false, }, } var count int64 for _, tt := range globTests { log := testutil.TestOpenFile(t, tt.name) if tt.expected { count++ } testutil.WriteString(t, log, "\n") log.Close() } m, stopM := mtail.TestStartServer(t, 0, mtail.LogPathPatterns(filepath.Join(workdir, "log*"))) stopM() if r := m.GetExpvar("log_count"); r.(*expvar.Int).Value() != count { t.Errorf("Expecting log count of %d, received %d", count, r) } } func TestGlobAfterStart(t *testing.T) { testutil.SkipIfShort(t) workdir := testutil.TestTempDir(t) globTests := []struct { name string expected bool }{ { filepath.Join(workdir, "log1"), true, }, { filepath.Join(workdir, "log2"), true, }, { filepath.Join(workdir, "1log"), false, }, } m, stopM := mtail.TestStartServer(t, 0, mtail.LogPathPatterns(filepath.Join(workdir, "log*"))) defer stopM() m.PollWatched(0) // Force sync to EOF var count int64 for _, tt := range globTests { if tt.expected { count++ } } logCountCheck := m.ExpectExpvarDeltaWithDeadline("log_count", count) for _, tt := range globTests { log := testutil.TestOpenFile(t, tt.name) defer log.Close() m.PollWatched(0) // Force sync to EOF } // m.PollWatched(2) logCountCheck() } func TestGlobIgnoreFolder(t *testing.T) { testutil.SkipIfShort(t) workdir := testutil.TestTempDir(t) globTests := []struct { name string isFolder bool expected bool }{ { filepath.Join(workdir, "log1"), false, true, }, { filepath.Join(workdir, "logarchive"), true, false, }, { filepath.Join(workdir, "log2.gz"), false, false, }, } var count int64 for _, tt := range globTests { var err error var log *os.File if tt.isFolder { err = os.Mkdir(tt.name, 0o700) testutil.FatalIfErr(t, err) continue } log, err = os.Create(tt.name) if !tt.isFolder && tt.expected { count++ } defer log.Close() testutil.FatalIfErr(t, err) testutil.WriteString(t, log, "\n") } m, stopM := mtail.TestStartServer(t, 0, mtail.LogPathPatterns(filepath.Join(workdir, "log*")), mtail.IgnoreRegexPattern("\\.gz")) stopM() if r := m.GetExpvar("log_count"); r.(*expvar.Int).Value() != count { t.Errorf("Expecting log count of %d, received %v", count, r) } } func TestFilenameRegexIgnore(t *testing.T) { testutil.SkipIfShort(t) workdir := testutil.TestTempDir(t) globTests := []struct { name string expected bool }{ { filepath.Join(workdir, "log1"), true, }, { filepath.Join(workdir, "log1.gz"), false, }, { filepath.Join(workdir, "log2gz"), true, }, } var count int64 for _, tt := range globTests { log, err := os.Create(tt.name) testutil.FatalIfErr(t, err) defer log.Close() if tt.expected { count++ } testutil.WriteString(t, log, "\n") } m, stopM := mtail.TestStartServer(t, 0, mtail.LogPathPatterns(filepath.Join(workdir, "log*")), mtail.IgnoreRegexPattern("\\.gz")) stopM() if r := m.GetExpvar("log_count"); r.(*expvar.Int).Value() != count { t.Errorf("Log count not matching, expected: %d received: %v", count, r) } } func TestGlobRelativeAfterStart(t *testing.T) { testutil.SkipIfShort(t) tmpDir := testutil.TestTempDir(t) logDir := filepath.Join(tmpDir, "logs") progDir := filepath.Join(tmpDir, "progs") err := os.Mkdir(logDir, 0o700) testutil.FatalIfErr(t, err) err = os.Mkdir(progDir, 0o700) testutil.FatalIfErr(t, err) // Move to logdir to make relative paths testutil.Chdir(t, logDir) m, stopM := mtail.TestStartServer(t, 1, mtail.ProgramPath(progDir), mtail.LogPathPatterns("log.*")) defer stopM() { logCountCheck := m.ExpectExpvarDeltaWithDeadline("log_count", 1) logFile := filepath.Join(logDir, "log.1.txt") f := testutil.TestOpenFile(t, logFile) defer f.Close() m.PollWatched(1) // Force sync to EOF testutil.WriteString(t, f, "line 1\n") m.PollWatched(1) logCountCheck() } { logCountCheck := m.ExpectExpvarDeltaWithDeadline("log_count", 1) logFile := filepath.Join(logDir, "log.2.txt") f := testutil.TestOpenFile(t, logFile) defer f.Close() m.PollWatched(2) testutil.WriteString(t, f, "line 1\n") m.PollWatched(2) logCountCheck() } { logCountCheck := m.ExpectExpvarDeltaWithDeadline("log_count", 0) logFile := filepath.Join(logDir, "log.2.txt") f := testutil.TestOpenFile(t, logFile) defer f.Close() m.PollWatched(2) testutil.WriteString(t, f, "line 2\n") m.PollWatched(2) logCountCheck() } glog.Infof("end") } mtail-3.0.0~rc54+git0ff5/internal/mtail/log_rotation_integration_test.go000066400000000000000000000045151460063571700264050ustar00rootroot00000000000000// Copyright 2019 Google Inc. All Rights Reserved. // This file is available under the Apache license. package mtail_test import ( "fmt" "os" "path/filepath" "sync" "testing" "github.com/golang/glog" "github.com/google/mtail/internal/mtail" "github.com/google/mtail/internal/testutil" ) func TestLogSoftLinkChange(t *testing.T) { testutil.SkipIfShort(t) for _, tc := range []bool{false, true} { tc := tc name := "disabled" if tc { name = "enabled" } t.Run(fmt.Sprintf("race simulation %s", name), func(t *testing.T) { workdir := testutil.TestTempDir(t) logFilepath := filepath.Join(workdir, "log") m, stopM := mtail.TestStartServer(t, 1, mtail.LogPathPatterns(logFilepath)) defer stopM() logCountCheck := m.ExpectExpvarDeltaWithDeadline("log_count", 1) logOpensTotalCheck := m.ExpectMapExpvarDeltaWithDeadline("log_opens_total", logFilepath, 2) trueLog1 := testutil.TestOpenFile(t, logFilepath+".true1") defer trueLog1.Close() testutil.FatalIfErr(t, os.Symlink(logFilepath+".true1", logFilepath)) glog.Info("symlinked") m.PollWatched(1) inputLines := []string{"hi1", "hi2", "hi3"} for _, x := range inputLines { testutil.WriteString(t, trueLog1, x+"\n") } m.PollWatched(1) trueLog2 := testutil.TestOpenFile(t, logFilepath+".true2") defer trueLog2.Close() m.PollWatched(1) logClosedCheck := m.ExpectMapExpvarDeltaWithDeadline("log_closes_total", logFilepath, 1) logCompletedCheck := m.ExpectExpvarDeltaWithDeadline("log_count", -1) testutil.FatalIfErr(t, os.Remove(logFilepath)) if tc { m.PollWatched(0) // simulate race condition with this poll. logClosedCheck() // sync when filestream closes fd m.PollWatched(0) // invoke the GC logCompletedCheck() // sync to when the logstream is removed from tailer } testutil.FatalIfErr(t, os.Symlink(logFilepath+".true2", logFilepath)) m.PollWatched(1) for _, x := range inputLines { testutil.WriteString(t, trueLog2, x+"\n") } m.PollWatched(1) var wg sync.WaitGroup wg.Add(2) go func() { defer wg.Done() logCountCheck() }() go func() { defer wg.Done() logOpensTotalCheck() }() wg.Wait() _, err := os.Stat(logFilepath + ".true1") testutil.FatalIfErr(t, err) _, err = os.Stat(logFilepath + ".true2") testutil.FatalIfErr(t, err) }) } } mtail-3.0.0~rc54+git0ff5/internal/mtail/log_rotation_integration_unix_test.go000066400000000000000000000047521460063571700274530ustar00rootroot00000000000000// Copyright 2019 Google Inc. All Rights Reserved. // This file is available under the Apache license. //go:build unix // +build unix package mtail_test import ( "fmt" "os" "path/filepath" "sync" "testing" "github.com/golang/glog" "github.com/google/mtail/internal/mtail" "github.com/google/mtail/internal/testutil" ) // TestLogRotation is a unix-specific test because on Windows, files cannot be removed // or renamed while there is an open read handle on them. Instead, log rotation would // have to be implemented by copying and then truncating the original file. That test // case is already covered by TestLogTruncation. func TestLogRotation(t *testing.T) { testutil.SkipIfShort(t) for _, tc := range []bool{false, true} { tc := tc name := "disabled" if tc { name = "enabled" } t.Run(fmt.Sprintf("race simulation %s", name), func(t *testing.T) { tmpDir := testutil.TestTempDir(t) logDir := filepath.Join(tmpDir, "logs") progDir := filepath.Join(tmpDir, "progs") err := os.Mkdir(logDir, 0o700) testutil.FatalIfErr(t, err) err = os.Mkdir(progDir, 0o700) testutil.FatalIfErr(t, err) logFile := filepath.Join(logDir, "log") f := testutil.TestOpenFile(t, logFile) defer f.Close() m, stopM := mtail.TestStartServer(t, 1, mtail.ProgramPath(progDir), mtail.LogPathPatterns(logDir+"/log")) defer stopM() logOpensTotalCheck := m.ExpectMapExpvarDeltaWithDeadline("log_opens_total", logFile, 1) logLinesTotalCheck := m.ExpectMapExpvarDeltaWithDeadline("log_lines_total", logFile, 3) testutil.WriteString(t, f, "line 1\n") m.PollWatched(1) testutil.WriteString(t, f, "line 2\n") m.PollWatched(1) logClosedCheck := m.ExpectMapExpvarDeltaWithDeadline("log_closes_total", logFile, 1) logCompletedCheck := m.ExpectExpvarDeltaWithDeadline("log_count", -1) glog.Info("rename") err = os.Rename(logFile, logFile+".1") testutil.FatalIfErr(t, err) if tc { m.PollWatched(0) // simulate race condition with this poll. logClosedCheck() // sync when filestream closes fd m.PollWatched(0) // invoke the GC logCompletedCheck() // sync to when the logstream is removed from tailer } glog.Info("create") f = testutil.TestOpenFile(t, logFile) m.PollWatched(1) testutil.WriteString(t, f, "line 1\n") m.PollWatched(1) var wg sync.WaitGroup wg.Add(2) go func() { defer wg.Done() logLinesTotalCheck() }() go func() { defer wg.Done() logOpensTotalCheck() }() wg.Wait() }) } } mtail-3.0.0~rc54+git0ff5/internal/mtail/log_truncation_integration_test.go000066400000000000000000000030201460063571700267220ustar00rootroot00000000000000// Copyright 2019 Google Inc. All Rights Reserved. // This file is available under the Apache license. package mtail_test import ( "os" "path/filepath" "testing" "github.com/golang/glog" "github.com/google/mtail/internal/mtail" "github.com/google/mtail/internal/testutil" ) func TestLogTruncation(t *testing.T) { testutil.SkipIfShort(t) tmpDir := testutil.TestTempDir(t) logDir := filepath.Join(tmpDir, "logs") progDir := filepath.Join(tmpDir, "progs") testutil.FatalIfErr(t, os.Mkdir(logDir, 0o700)) testutil.FatalIfErr(t, os.Mkdir(progDir, 0o700)) m, stopM := mtail.TestStartServer(t, 1, mtail.ProgramPath(progDir), mtail.LogPathPatterns(logDir+"/log")) defer stopM() logCountCheck := m.ExpectExpvarDeltaWithDeadline("log_count", 1) linesCountCheck := m.ExpectExpvarDeltaWithDeadline("lines_total", 2) logFile := filepath.Join(logDir, "log") f := testutil.TestOpenFile(t, logFile) defer f.Close() m.PollWatched(1) testutil.WriteString(t, f, "line 1\n") m.PollWatched(1) // After the last barrier, the filestream may not race ahead of the test // here, so we need to ensure that a whole filestream loop occurs and that // the file offset advances for this test to succeed, hence the second // barrier here. m.PollWatched(1) err := f.Close() testutil.FatalIfErr(t, err) glog.Info("truncate") f, err = os.OpenFile(logFile, os.O_TRUNC|os.O_WRONLY, 0o600) testutil.FatalIfErr(t, err) defer f.Close() m.PollWatched(1) testutil.WriteString(t, f, "2\n") m.PollWatched(1) linesCountCheck() logCountCheck() } mtail-3.0.0~rc54+git0ff5/internal/mtail/logo.ico000066400000000000000000000764461460063571700213640ustar00rootroot00000000000000@@ (BF00 %nB  h hx(@ @  ۘ4ۘ4 ۘ4ڗ3ڗ3ה0ۘ4ۘ4%ۘ4ۘ4ڗ3ܙ5ۘ4ڗ3ۘ4ۘ4%ۘ4ۘ4ڗ3ڗ3ڗ3ڗ3ڗ3ۘ4ۘ4%ۘ4ۘ4ۘ4ڗ3ڗ3ڗ3ۘ4ۘ4%ۘ4ۘ4ۘ4ۘ4ۘ4ڗ3ۘ4ܙ5ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ڗ3ۘ4֓/ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ڗ3ڗ3ؕ1ۘ1ڙ/ۘ3ۘ4ۘ4!ۘ4Aۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ڗ3ۘ4ڗ3ڗ3۠ܗ;ۙ2ۗ36ۘ3lۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ڗ3ڗ3ژ3ڗ3ۘ3ۗ3ۙ3*ۘ3ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ڗ3ڗ3 ڗ3ۚ2ݜ0ۘ3Bۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ژ3ژ3 ژ3&ږ4ܙ3ۘ4ۘ4ۗ4ۗ4&ږ4ܙ3ۘ4ۘ4ۗ4ۗ4ږ5ܚ2ۗ49ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ژ3ڗ3ڙ3ۙ3ۙ2ۘ3gۘ4ۘ4ۘ4iܙ2ۙ3ۙ3ۙ2ۘ3gۘ4ۘ4ۘ4iܙ2ۙ3$ژ4ܗ2ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۙ4Iܘ4*ۘ4Aۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ژ3ژ3٘2ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ3ۘ3ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ3ۘ3ڗ2ג*ۘ3`ۘ4ۘ4ۘ4ۘ4ۘ4ژ3#؜,,ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ژ3ژ3ژ3٘2ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۙ1ۚ2ۚ2ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4ۘ4ۘ4ژ3ژ3ۘ4ؘ1ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ3ۘ4ۘ3?ۘ4ۘ4ۘ4ۘ4ܘ3A٘5-ۘ4ۘ4%ۘ4ۘ4ۘ4ۘ4ۘ4ژ3ژ3ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ܘ3ט7ۘ4iۘ4ۘ4ۘ4ۘ4ݗ2 ݗ2ۘ4ۘ4%ۘ4ۘ4ۘ4ژ3ژ3ژ3ܘ5ܘ5ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۗ4ۖ3 ۘ4ۘ4ۘ4ۘ4ژ3|ٗ1ژ3ۘ4ۘ4%ۘ4ۘ4ۘ4ژ3ژ3ژ3ژ3ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ڗ3ڗ3ۗ4ۘ4ۘ4ۘ4ۘ3Rܗ7ۘ2ۘ4ۘ4&ۘ4ۘ4ژ3ۘ4٘2ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ژ3ژ3ۘ4ۘ4ۘ4ۘ4ۘ4;ۙ4ۖ4ژ3ژ3ژ3rژ3ژ3ژ3ژ3ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4ۘ4ۘ4'ۘ4ۖ4ژ3ژ3ژ3 ژ3ژ3ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ٛ.ۘ4ۘ3)ۘ4ۘ4ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۘ2ۘ5ۘ3=ۘ4ۘ4ۘ4ۘ4ۘ3 ۘ3ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4$ۘ4ۘ4ۘ4ۘ4ۘ4%ۘ4ۗ4؛6ۘ4Pۘ4ۘ4ۘ4ۘ4ۘ3*ۘ5؞'ٜ,ۗ5ۘ3)ۘ4ۘ4ۘ4ۘ4ۘ3*ۘ5؞'ٜ,ۗ5ۘ3)ۘ4ۘ4ۘ4ۘ4ۘ4"ۘ4ܘ4ܘ3ۘ4{ۘ4ۘ4ۘ4ۘ4ۘ3Lݘ8ژ1ژ1ޙ8ۘ3Lۘ4ۘ4ۘ4ۘ4ۘ3Lݘ8ژ1ژ1ޙ8ۘ3Lۘ4ۘ4ۘ4ۘ4ڗ4ۗ4ۘ2ۘ2ۘ3ۘ4ۘ4ۘ4ۘ4ۗ4ۗ4ۗ4ۗ4ۗ4ۘ4ۘ4ۘ4ۘ4ۘ4ۗ4ۗ4ۗ4ۗ4ۗ4ۘ4ۘ4ۘ4ۘ4ۘ4ܘ2 ܘ29ژ3ۗ4?ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ3Jޙ4ܘ4ܗ46ۘ3Jۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ3Jޙ4ܘ4ܗ46ۘ3Jۘ4ۘ4ۘ4ۘ4ژ3Xە9ڜ-ژ3ڗ3 ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ3Jۖ4ۘ4ۖ4ۖ4ژ3ۗ4ۘ3Iۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ3Jۖ4ۘ4ۖ4ۖ4ژ3ۗ4ۘ3Iۘ4ۘ4ۘ4ۘ4ۘ3ڙ2ۙ2ڗ3ٖ2ڗ3_ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۗ4ۘ3Lۘ3)ۘ3*ژ3Lۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۗ4ۘ3Lۘ3)ۘ3*ژ3Lۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ3]ڛ0ۙ3ۙ2ܘ3ޘ2ۘ3Yۘ4ۘ4ۘ4ۘ4ۘ3ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ܘ4 ڗ41ۘ3ۘ3ۘ4ۘ4ۘ4ۘ4ۘ4ۘ3Sژ2ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ژ3ژ3ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ژ3ژ3ۘ4ۘ4!ۘ4ۘ4ۘ4ۘ4ۘ4rܚ2 ۘ3ۘ4 ۘ4Yۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ3^ܙ4 ܘ4ܙ4ܙ4 ۘ3^ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ3]ܙ4 ܘ4ܘ3ܘ3 ۘ3mۘ4ۘ4ۘ4fښ4 ڙ4ۙ4ޞ1ݟ0ۙ3"ۘ4`ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4_ۙ4#ۚ3ۙ3ۙ4ۙ4ޝ1ޟ0ۙ3"ۘ4`ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ3]ۙ4"ښ3ڙ3ٙ6ܘ1ۘ3ۘ3ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4$ۘ4$ۘ4ۘ4ܘ5ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4$ۘ4!ۘ4ۘ3 ٝ-ڛ0/`?_? `P(0` $  ۘ4ۘ4 ۘ4~ڗ3ڗ3ڗ3ڗ3ۘ4ۘ4#ۘ4ۘ4ڗ3ڗ3ڗ3ڗ3ۘ4ۘ4#ۘ4ۘ4ۘ4ڗ3ڗ3ڗ3ڗ3ۘ4ۘ4#ۘ4ۘ4ۘ4ۘ4ڗ3ڗ3ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4ۘ4ڗ3ڗ3ۘ2ۘ1ۘ3ۘ4ۘ4?ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ڗ3ڗ3ۙ2ۙ1ۘ3:ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ڗ3ڗ3ۛ0ۗ4ۙ3ۘ3ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ژ3Kژ3*ږ5ۘ3 ۘ4ۗ4 ۗ4*ږ5ۘ3 ۘ4ۗ4 ۗ4ܒ;ۘ4ۗ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ژ3ژ3ۘ3ۘ3 ۘ4ۘ4ۘ4ۘ2 ۘ3ۘ3ۘ3 ۘ4ۘ4ۘ4ۘ2 ۘ3ܖ1ݒ)ۘ3jۘ4ۘ4ۘ4ۘ4ۘ4_ۙ4/ۘ4?ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ژ3ژ3ۘ4ۘ4!ۘ4ۘ4ۘ4ۘ3!ۘ3ۘ4ۘ4!ۘ4ۘ4ۘ4ۘ3!ۘ3ۚ0ۘ3ۘ3ۘ4ۘ4ۘ4ۘ4ژ3ۖ6ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4ۘ4ژ3ژ3ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ3ۘ4ۘ4Xۘ4ۘ4ۘ4ܘ3"ۘ3ۘ4ۘ4#ۘ4ۘ4ۘ4ۘ4ژ3ژ3ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۗ4ܗ3ۘ4ۘ4ۘ4ۘ4Tޗ1ۘ4ۘ4#ۘ4ۘ4ۘ4ژ3ژ3ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ڗ3ڗ3ۘ4ۘ4ۘ4ۘ3Sۘ4ۘ4ۘ4$ۘ4ۘ4ژ3ژ3ژ3ژ3ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ژ3ژ3ۘ4ۘ4ۘ4ۘ45ۘ4ۘ4ۘ4ژ3oژ3ژ3ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4!ۘ4ۘ4ۘ4ۘ4%ۘ4ژ3ژ3ژ3ژ3٘2ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ڙ0ۘ4ۘ3)ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ۘ3ۘ4ۘ4:ۘ4ۘ4ۘ4ۘ3"ۘ3ؠ&ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4#ۘ4ؠ&ۘ4ۘ4"ۘ4ۘ4ۘ4ۘ4"ۘ4ܙ3ۘ4ۘ4\ۘ4ۘ4ۘ4ۘ3;ۘ4ژ0ۘ4ۘ3;ۘ4ۘ4ۘ4ۘ3;ۘ4ژ0ۘ4ۘ3;ۘ4ۘ4ۘ4ژ4ۘ4ۗ3ڗ2ۘ4ۘ4ۘ4ۘ4ۘ4ۘ2ۘ3ژ3ژ2ۘ4ۘ4ۘ4ۘ4ۘ4ۘ2ۘ3ژ3ژ2ۘ4ۘ4ۘ4ۘ4ܘ2 ۘ2ۘ4ۗ4+ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4Pۘ2ۘ3ڙ2ژ1ۘ4Oۘ4ۘ4ۘ4ۘ4ۘ4ۘ4Pۘ2ۘ3ڙ2ژ1ۘ4Oۘ4ۘ4ۘ4ۘ3\ۗ4ٛ.ڗ3ڗ3ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ3;ۘ4$ۘ3;ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ3;ۘ4$ۘ3;ۘ4ۘ4ۘ4ۘ4ۘ4ۘ3ۘ3ܘ3ܘ3 ۘ4ۘ4ۘ4ۘ4ۘ3ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4gۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ46ٗ4ޚ3ۘ4ۘ4 ۘ4ۘ4ۘ4ۘ4ۘ2ۘ44ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ46Ԙ.ۘ46ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ4ۘ46Ҙ,٘2ۘ4ۘ4ۘ4ۘ4ۘ4ڙ4ژ4ۜ3ۙ3ۘ4aۘ4ۘ4ۘ4ۘ4ۘ4ۘ4aۙ3֓;ߜ1Ǐ@ۙ3ۘ4aۘ4ۘ4ۘ4ۘ4ۘ4ۘ4aۙ3Ԓ:ޜ1֚9ۘ3 ۘ3ۙ3 ڕ4ۘ4ۘ4ۘ4 ۘ4ۘ4"ۘ4ۘ4 ܘ5ܘ5ۘ4ۘ4ۘ4 ۘ4ۘ4!ۘ4ۘ3 ݒ<ښ1_??? ??<?" Mon Feb 21 17:44:31 2011 [pid 1] [ftp] OK LOGIN: Client "192.0.2.143", anon password "?" Mon Feb 21 17:44:32 2011 [pid 3] [ftp] FTP response: Client "192.0.2.143", "230 Login successful." Mon Feb 21 17:44:32 2011 [pid 3] [ftp] FTP command: Client "192.0.2.143", "STAT" Mon Feb 21 17:44:32 2011 [pid 3] [ftp] FTP response: Client "192.0.2.143", "211-FTP server status:" Mon Feb 21 17:44:32 2011 [pid 3] [ftp] FTP response: Client "192.0.2.143", " Connected to " Mon Feb 21 17:44:32 2011 [pid 3] [ftp] FTP response: Client "192.0.2.143", "192.0.2.143" Mon Feb 21 17:44:32 2011 [pid 3] [ftp] FTP response: Client "192.0.2.143", "??" Mon Feb 21 17:44:32 2011 [pid 3] [ftp] FTP response: Client "192.0.2.143", " Logged in as " Mon Feb 21 17:44:32 2011 [pid 3] [ftp] FTP response: Client "192.0.2.143", "ftp" Mon Feb 21 17:44:32 2011 [pid 3] [ftp] FTP response: Client "192.0.2.143", "??" Mon Feb 21 17:44:32 2011 [pid 3] [ftp] FTP response: Client "192.0.2.143", " TYPE: " Mon Feb 21 17:44:32 2011 [pid 3] [ftp] FTP response: Client "192.0.2.143", "ASCII??" Mon Feb 21 17:44:32 2011 [pid 3] [ftp] FTP response: Client "192.0.2.143", " No session bandwidth limit??" Mon Feb 21 17:44:32 2011 [pid 3] [ftp] FTP response: Client "192.0.2.143", " Session timeout in seconds is " Mon Feb 21 17:44:32 2011 [pid 3] [ftp] FTP response: Client "192.0.2.143", "300" Mon Feb 21 17:44:32 2011 [pid 3] [ftp] FTP response: Client "192.0.2.143", "??" Mon Feb 21 17:44:32 2011 [pid 3] [ftp] FTP response: Client "192.0.2.143", " Control connection is plain text??" Mon Feb 21 17:44:32 2011 [pid 3] [ftp] FTP response: Client "192.0.2.143", " Data connections will be plain text??" Mon Feb 21 17:44:32 2011 [pid 3] [ftp] FTP response: Client "192.0.2.143", " At session startup, client count was " Mon Feb 21 17:44:32 2011 [pid 3] [ftp] FTP response: Client "192.0.2.143", "1" Mon Feb 21 17:44:32 2011 [pid 3] [ftp] FTP response: Client "192.0.2.143", "??" Mon Feb 21 17:44:32 2011 [pid 3] [ftp] FTP response: Client "192.0.2.143", " vsFTPd 2.2.2 - secure, fast, stable??" Mon Feb 21 17:44:32 2011 [pid 3] [ftp] FTP response: Client "192.0.2.143", "211 End of status" Mon Feb 21 17:44:32 2011 [pid 3] [ftp] FTP command: Client "192.0.2.143", "PASV" Mon Feb 21 17:44:32 2011 [pid 3] [ftp] FTP response: Client "192.0.2.143", "227 Entering Passive Mode (192,0,2,208,141,0)." Mon Feb 21 17:44:32 2011 [pid 3] [ftp] FTP command: Client "192.0.2.143", "STAT" Mon Feb 21 17:44:32 2011 [pid 3] [ftp] FTP response: Client "192.0.2.143", "211-FTP server status:" Mon Feb 21 17:44:32 2011 [pid 3] [ftp] FTP response: Client "192.0.2.143", " Connected to " Mon Feb 21 17:44:32 2011 [pid 3] [ftp] FTP response: Client "192.0.2.143", "192.0.2.143" Mon Feb 21 17:44:32 2011 [pid 3] [ftp] FTP response: Client "192.0.2.143", "??" Mon Feb 21 17:44:32 2011 [pid 3] [ftp] FTP response: Client "192.0.2.143", " Logged in as " Mon Feb 21 17:44:32 2011 [pid 3] [ftp] FTP response: Client "192.0.2.143", "ftp" Mon Feb 21 17:44:32 2011 [pid 3] [ftp] FTP response: Client "192.0.2.143", "??" Mon Feb 21 17:44:32 2011 [pid 3] [ftp] FTP response: Client "192.0.2.143", " TYPE: " Mon Feb 21 17:44:32 2011 [pid 3] [ftp] FTP response: Client "192.0.2.143", "ASCII??" Mon Feb 21 17:44:32 2011 [pid 3] [ftp] FTP response: Client "192.0.2.143", " No session bandwidth limit??" Mon Feb 21 17:44:32 2011 [pid 3] [ftp] FTP response: Client "192.0.2.143", " Session timeout in seconds is " Mon Feb 21 17:44:32 2011 [pid 3] [ftp] FTP response: Client "192.0.2.143", "300" Mon Feb 21 17:44:32 2011 [pid 3] [ftp] FTP response: Client "192.0.2.143", "??" Mon Feb 21 17:44:32 2011 [pid 3] [ftp] FTP response: Client "192.0.2.143", " Control connection is plain text??" Mon Feb 21 17:44:32 2011 [pid 3] [ftp] FTP response: Client "192.0.2.143", " Data connections will be plain text??" Mon Feb 21 17:44:32 2011 [pid 3] [ftp] FTP response: Client "192.0.2.143", " At session startup, client count was " Mon Feb 21 17:44:32 2011 [pid 3] [ftp] FTP response: Client "192.0.2.143", "1" Mon Feb 21 17:44:32 2011 [pid 3] [ftp] FTP response: Client "192.0.2.143", "??" Mon Feb 21 17:44:32 2011 [pid 3] [ftp] FTP response: Client "192.0.2.143", " vsFTPd 2.2.2 - secure, fast, stable??" Mon Feb 21 17:44:32 2011 [pid 3] [ftp] FTP response: Client "192.0.2.143", "211 End of status" Mon Feb 21 17:44:32 2011 [pid 3] [ftp] FTP command: Client "192.0.2.143", "QUIT" Mon Feb 21 17:44:32 2011 [pid 3] [ftp] FTP response: Client "192.0.2.143", "221 Goodbye." Mon Feb 21 18:31:52 2011 [pid 2] CONNECT: Client "198.51.100.36" Mon Feb 21 18:31:52 2011 [pid 2] FTP response: Client "198.51.100.36", "220 (vsFTPd 2.2.2)" Mon Feb 21 18:31:52 2011 [pid 2] FTP command: Client "198.51.100.36", "USER anonymous" Mon Feb 21 18:31:52 2011 [pid 2] [anonymous] FTP response: Client "198.51.100.36", "331 Please specify the password." Mon Feb 21 18:31:52 2011 [pid 2] [anonymous] FTP command: Client "198.51.100.36", "PASS " Mon Feb 21 18:31:52 2011 [pid 1] [ftp] OK LOGIN: Client "198.51.100.36", anon password "anonymous@" Mon Feb 21 18:31:53 2011 [pid 3] [ftp] FTP response: Client "198.51.100.36", "230 Login successful." Mon Feb 21 18:31:53 2011 [pid 3] [ftp] FTP command: Client "198.51.100.36", "TYPE I" Mon Feb 21 18:31:53 2011 [pid 3] [ftp] FTP response: Client "198.51.100.36", "200 Switching to Binary mode." Mon Feb 21 18:31:54 2011 [pid 3] [ftp] FTP command: Client "198.51.100.36", "PASV" Mon Feb 21 18:31:54 2011 [pid 3] [ftp] FTP response: Client "198.51.100.36", "227 Entering Passive Mode (192,0,2,208,180,139)." Mon Feb 21 18:31:54 2011 [pid 3] [ftp] FTP command: Client "198.51.100.36", "STOR /incoming/example_324592677_all.deb" Mon Feb 21 18:31:54 2011 [pid 3] [ftp] FTP response: Client "198.51.100.36", "150 Ok to send data." Mon Feb 21 18:31:54 2011 [pid 3] [ftp] OK UPLOAD: Client "198.51.100.36", "/incoming/example_324592677_all.deb", 530 bytes, 1.56Kbyte/sec Mon Feb 21 18:31:54 2011 [pid 3] [ftp] FTP response: Client "198.51.100.36", "226 Transfer complete." Mon Feb 21 18:31:54 2011 [pid 3] [ftp] FTP command: Client "198.51.100.36", "TYPE I" Mon Feb 21 18:31:54 2011 [pid 3] [ftp] FTP response: Client "198.51.100.36", "200 Switching to Binary mode." Mon Feb 21 18:31:55 2011 [pid 3] [ftp] FTP command: Client "198.51.100.36", "PASV" Mon Feb 21 18:31:55 2011 [pid 3] [ftp] FTP response: Client "198.51.100.36", "227 Entering Passive Mode (192,0,2,208,182,218)." Mon Feb 21 18:31:55 2011 [pid 3] [ftp] FTP command: Client "198.51.100.36", "STOR /incoming/example_324592677_all.changes" Mon Feb 21 18:31:55 2011 [pid 3] [ftp] FTP response: Client "198.51.100.36", "150 Ok to send data." Mon Feb 21 18:31:55 2011 [pid 3] [ftp] OK UPLOAD: Client "198.51.100.36", "/incoming/example_324592677_all.changes", 752 bytes, 2.25Kbyte/sec Mon Feb 21 18:31:55 2011 [pid 3] [ftp] FTP response: Client "198.51.100.36", "226 Transfer complete." Mon Feb 21 18:31:55 2011 [pid 3] [ftp] FTP command: Client "198.51.100.36", "QUIT" Mon Feb 21 18:31:55 2011 [pid 3] [ftp] FTP response: Client "198.51.100.36", "221 Goodbye." mtail-3.0.0~rc54+git0ff5/internal/mtail/testdata/vsftpd_log.golden000066400000000000000000000023041460063571700250600ustar00rootroot00000000000000counter transfers {direction=} counter transfer_time {direction=} counter bytes_transferred {direction=} counter connects 2 2011-02-21T18:31:52Z counter logins 2 2011-02-21T18:31:52Z counter uploads 2 2011-02-21T18:31:55Z counter session_time 1298310264 2011-02-21T18:31:55Z counter commands {command=STAT} 4 2011-02-21T17:44:32Z counter commands {command=PASV} 4 2011-02-21T18:31:55Z counter commands {command=QUIT} 3 2011-02-21T18:31:55Z counter commands {command=USER} 2 2011-02-21T18:31:52Z counter commands {command=PASS} 2 2011-02-21T18:31:52Z counter commands {command=TYPE} 2 2011-02-21T18:31:54Z counter commands {command=STOR} 2 2011-02-21T18:31:55Z counter responses {response=230} 3 2011-02-21T18:31:53Z counter responses {response=211} 8 2011-02-21T17:44:32Z counter responses {response=300} 4 2011-02-21T17:44:32Z counter responses {response=227} 4 2011-02-21T18:31:55Z counter responses {response=221} 3 2011-02-21T18:31:55Z counter responses {response=220} 2 2011-02-21T18:31:52Z counter responses {response=331} 2 2011-02-21T18:31:52Z counter responses {response=200} 2 2011-02-21T18:31:54Z counter responses {response=150} 2 2011-02-21T18:31:55Z counter responses {response=226} 2 2011-02-21T18:31:55Z mtail-3.0.0~rc54+git0ff5/internal/mtail/testdata/vsftpd_xferlog000066400000000000000000000021401460063571700244740ustar00rootroot00000000000000Mon Feb 21 15:21:32 2011 1 198.51.100.36 528 /incoming/example_324589822_all.deb b _ i a anonymous@ ftp 0 * c Mon Feb 21 15:21:33 2011 1 198.51.100.36 752 /incoming/example_324589822_all.changes b _ i a anonymous@ ftp 0 * c Mon Feb 21 15:26:16 2011 1 198.51.100.36 530 /incoming/example_324589893_all.deb b _ i a anonymous@ ftp 0 * c Mon Feb 21 15:26:17 2011 1 198.51.100.36 752 /incoming/example_324589893_all.changes b _ i a anonymous@ ftp 0 * c Mon Feb 21 15:31:21 2011 1 198.51.100.36 530 /incoming/example_324589969_all.deb b _ i a anonymous@ ftp 0 * c Mon Feb 21 15:31:22 2011 1 198.51.100.36 752 /incoming/example_324589969_all.changes b _ i a anonymous@ ftp 0 * c Mon Feb 21 15:36:11 2011 1 198.51.100.36 528 /incoming/example_324590042_all.deb b _ i a anonymous@ ftp 0 * c Mon Feb 21 15:36:12 2011 1 198.51.100.36 752 /incoming/example_324590042_all.changes b _ i a anonymous@ ftp 0 * c Mon Feb 21 15:41:14 2011 1 198.51.100.36 528 /incoming/example_324590117_all.deb b _ i a anonymous@ ftp 0 * c Mon Feb 21 15:41:15 2011 1 198.51.100.36 752 /incoming/example_324590117_all.changes b _ i a anonymous@ ftp 0 * c mtail-3.0.0~rc54+git0ff5/internal/mtail/testdata/vsftpd_xferlog.golden000066400000000000000000000005121460063571700257440ustar00rootroot00000000000000counter transfers {direction=incoming} 10 2011-02-21T15:41:15Z counter transfer_time {direction=incoming} 10 2011-02-21T15:41:15Z counter bytes_transferred {direction=incoming} 6404 2011-02-21T15:41:15Z counter connects counter logins counter uploads counter session_time counter commands {command=} counter responses {response=} mtail-3.0.0~rc54+git0ff5/internal/mtail/testdata/xntp3_peerstats000066400000000000000000000000711460063571700246070ustar00rootroot0000000000000054695 7690.466 64.113.32.5 93b4 0.002345 0.01001 0.00090 mtail-3.0.0~rc54+git0ff5/internal/mtail/testdata/xntp3_peerstats.golden000066400000000000000000000007711460063571700260650ustar00rootroot00000000000000gauge peer_status {peer=64.113.32.5} 18 2008-08-17T02:08:10Z gauge peer_select {peer=64.113.32.5} 3 2008-08-17T02:08:10Z gauge peer_count {peer=64.113.32.5} 11 2008-08-17T02:08:10Z gauge peer_code {peer=64.113.32.5} 4 2008-08-17T02:08:10Z gauge peer_offset {peer=64.113.32.5} 0.002345 2008-08-17T02:08:10Z gauge peer_delay {peer=64.113.32.5} 0.01001 2008-08-17T02:08:10Z gauge peer_dispersion {peer=64.113.32.5} 0.00090 2008-08-17T02:08:10Z counter num_peerstats {peer=64.113.32.5} 1 2008-08-17T02:08:10Z mtail-3.0.0~rc54+git0ff5/internal/mtail/testing.go000066400000000000000000000130561460063571700217200ustar00rootroot00000000000000// Copyright 2019 Google Inc. All Rights Reserved. // This file is available under the Apache license. package mtail import ( "context" "expvar" "fmt" "os" "runtime" "testing" "time" "github.com/golang/glog" "github.com/google/mtail/internal/metrics" "github.com/google/mtail/internal/metrics/datum" "github.com/google/mtail/internal/testutil" "github.com/google/mtail/internal/waker" ) const defaultDoOrTimeoutDeadline = 10 * time.Second type TestServer struct { *Server waker waker.Waker // for idle logstreams; others are polled explicitly in PollWatched awaken func(int) tb testing.TB cancel context.CancelFunc // Set this to change the poll deadline when using DoOrTimeout within this TestServer. DoOrTimeoutDeadline time.Duration } // TestMakeServer makes a new TestServer for use in tests, but does not start // the server. If an error occurs during creation, a testing.Fatal is issued. func TestMakeServer(tb testing.TB, wakers int, options ...Option) *TestServer { tb.Helper() // Reset counters when running multiple tests. Tests that use expvar // helpers cannot be made parallel. glog.Info("resetting counters") expvar.Get("lines_total").(*expvar.Int).Set(0) expvar.Get("log_count").(*expvar.Int).Set(0) expvar.Get("log_lines_total").(*expvar.Map).Init() expvar.Get("log_opens_total").(*expvar.Map).Init() expvar.Get("log_closes_total").(*expvar.Map).Init() expvar.Get("file_truncates_total").(*expvar.Map).Init() expvar.Get("prog_loads_total").(*expvar.Map).Init() ctx, cancel := context.WithCancel(context.Background()) waker, awaken := waker.NewTest(ctx, wakers) options = append(options, LogstreamPollWaker(waker), ) m, err := New(ctx, metrics.NewStore(), options...) testutil.FatalIfErr(tb, err) return &TestServer{Server: m, waker: waker, awaken: awaken, tb: tb, cancel: cancel} } // TestStartServer creates a new TestServer and starts it running. It // returns the server, and a stop function. func TestStartServer(tb testing.TB, wakers int, options ...Option) (*TestServer, func()) { tb.Helper() ts := TestMakeServer(tb, wakers, options...) return ts, ts.Start() } // Start starts the TestServer and returns a stop function. func (ts *TestServer) Start() func() { ts.tb.Helper() errc := make(chan error, 1) go func() { err := ts.Run() errc <- err }() return func() { ts.cancel() select { case err := <-errc: testutil.FatalIfErr(ts.tb, err) case <-time.After(6 * time.Second): buf := make([]byte, 1<<16) n := runtime.Stack(buf, true) fmt.Fprintf(os.Stderr, "%s", buf[0:n]) ts.tb.Fatal("timeout waiting for shutdown") } } } // Poll all watched objects for updates. The parameter n indicates how many logstreams to wait on before waking them. func (ts *TestServer) PollWatched(n int) { glog.Info("Testserver starting poll") glog.Infof("TestServer polling filesystem patterns") if err := ts.t.Poll(); err != nil { glog.Info(err) } glog.Infof("TestServer reloading programs") if err := ts.r.LoadAllPrograms(); err != nil { glog.Info(err) } glog.Infof("TestServer tailer gcing") if err := ts.t.ExpireStaleLogstreams(); err != nil { glog.Info(err) } glog.Info("TestServer waking idle routines") ts.awaken(n) glog.Info("Testserver finishing poll") } // GetExpvar is a helper function on TestServer that acts like TestGetExpvar. func (ts *TestServer) GetExpvar(name string) expvar.Var { ts.tb.Helper() return testutil.TestGetExpvar(ts.tb, name) } // ExpectExpvarDeltaWithDeadline returns a deferrable function which tests if the expvar metric with name has changed by delta within the given deadline, once the function begins. Before returning, it fetches the original value for comparison. func (ts *TestServer) ExpectExpvarDeltaWithDeadline(name string, want int64) func() { ts.tb.Helper() return testutil.ExpectExpvarDeltaWithDeadline(ts.tb, name, want) } // ExpectMapExpvarMetricDeltaWithDeadline returns a deferrable function which tests if the expvar map metric with name and key has changed by delta within the given deadline, once the function begins. Before returning, it fetches the original value for comparison. func (ts *TestServer) ExpectMapExpvarDeltaWithDeadline(name, key string, want int64) func() { ts.tb.Helper() return testutil.ExpectMapExpvarDeltaWithDeadline(ts.tb, name, key, want) } // GetProgramMetric fetches the datum of the program metric name. func (ts *TestServer) GetProgramMetric(name, prog string) datum.Datum { ts.tb.Helper() m := ts.store.FindMetricOrNil(name, prog) if m == nil { ts.tb.Fatalf("Unexpected metric store content, got nil instead of %s %s", name, prog) return nil } d, derr := m.GetDatum() testutil.FatalIfErr(ts.tb, derr) return d } // ExpectProgMetricDeltaWithDeadline tests that a given program metric increases by want within the deadline. It assumes that the named metric is an Int type datum.Datum. func (ts *TestServer) ExpectProgMetricDeltaWithDeadline(name, prog string, want int64) func() { ts.tb.Helper() deadline := ts.DoOrTimeoutDeadline if deadline == 0 { deadline = defaultDoOrTimeoutDeadline } start := datum.GetInt(ts.GetProgramMetric(name, prog)) check := func() (bool, error) { ts.tb.Helper() now := datum.GetInt(ts.GetProgramMetric(name, prog)) return now-start == want, nil } return func() { ts.tb.Helper() ok, err := testutil.DoOrTimeout(check, deadline, 10*time.Millisecond) if err != nil { ts.tb.Fatal(err) } if !ok { now := datum.GetInt(ts.GetProgramMetric(name, prog)) delta := now - start ts.tb.Errorf("Did not see %s have delta by deadline: got %v - %v = %d, want %d", name, now, start, delta, want) } } } mtail-3.0.0~rc54+git0ff5/internal/mtail/unix_socket_export_integration_test.go000066400000000000000000000015071460063571700276370ustar00rootroot00000000000000// Copyright 2019 Google Inc. All Rights Reserved. // This file is available under the Apache license. package mtail_test import ( "net" "path/filepath" "testing" "github.com/golang/glog" "github.com/google/mtail/internal/mtail" "github.com/google/mtail/internal/testutil" ) func TestBasicUNIXSockets(t *testing.T) { testutil.SkipIfShort(t) tmpDir := testutil.TestTempDir(t) sockListenAddr := filepath.Join(tmpDir, "mtail_test.sock") _, stopM := mtail.TestStartServer(t, 1, mtail.LogPathPatterns(tmpDir+"/*"), mtail.ProgramPath("../../examples/linecount.mtail"), mtail.BindUnixSocket(sockListenAddr)) defer stopM() glog.Infof("check that server is listening") addr, err := net.ResolveUnixAddr("unix", sockListenAddr) testutil.FatalIfErr(t, err) _, err = net.DialUnix("unix", nil, addr) testutil.FatalIfErr(t, err) } mtail-3.0.0~rc54+git0ff5/internal/runtime/000077500000000000000000000000001460063571700202645ustar00rootroot00000000000000mtail-3.0.0~rc54+git0ff5/internal/runtime/code/000077500000000000000000000000001460063571700211765ustar00rootroot00000000000000mtail-3.0.0~rc54+git0ff5/internal/runtime/code/instr.go000066400000000000000000000007741460063571700226740ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // This file is available under the Apache license. // Package code contains the bytecode instructions for the mtail virtual machine. package code import "fmt" type Instr struct { Opcode Opcode Operand interface{} SourceLine int // Line number of the original source file, zero-based numbering. } // debug print for instructions. func (i Instr) String() string { return fmt.Sprintf("{%s %v %d}", opNames[i.Opcode], i.Operand, i.SourceLine) } mtail-3.0.0~rc54+git0ff5/internal/runtime/code/instr_test.go000066400000000000000000000006151460063571700237250ustar00rootroot00000000000000// Copyright 2018 Google Inc. All Rights Reserved. // This file is available under the Apache license. package code_test import ( "testing" "github.com/google/mtail/internal/runtime/code" "github.com/google/mtail/internal/testutil" ) func TestInstrString(t *testing.T) { expected := "{match 0 0}" testutil.ExpectNoDiff(t, code.Instr{Opcode: code.Match, Operand: 0}.String(), expected) } mtail-3.0.0~rc54+git0ff5/internal/runtime/code/object.go000066400000000000000000000007711460063571700230000ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // This file is available under the Apache license. package code import ( "regexp" "github.com/google/mtail/internal/metrics" ) // Object is the data and bytecode resulting from compiled program source. type Object struct { Program []Instr // The program bytecode. Strings []string // Static strings. Regexps []*regexp.Regexp // Static regular expressions. Metrics []*metrics.Metric // Metrics accessible to this program. } mtail-3.0.0~rc54+git0ff5/internal/runtime/code/opcodes.go000066400000000000000000000123771460063571700231730ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // This file is available under the Apache license. // Package code contains the bytecode instructions for the mtail virtual machine. package code type Opcode int const ( Bad Opcode = iota // Invalid instruction, indicates a bug in the generator. Stop // Stop the program, ending processing of this input. Match // Match a regular expression against input, and set the match register. Smatch // Match a regular expression against top of stack, and set the match register. Cmp // Compare two values on the stack and set the match register. Jnm // Jump if no match. Jm // Jump if match. Jmp // Unconditional jump Inc // Increment a variable value Dec // Decrement a variable value Strptime // Parse into the timestamp register Timestamp // Return value of timestamp register onto TOS. Settime // Set timestamp register to value at TOS. Push // Push operand onto stack Capref // Push capture group reference at operand onto stack Str // Push string constant at operand onto stack Sset // Set a string variable value. Iset // Set a variable value Iadd // Add top values on stack and push to stack Isub // Subtract top value from second top value on stack, and push to stack. Imul // Multiply top values on stack and push to stack Idiv // Divide top value into second top on stack, and push Imod // Integer divide top value into second top on stack, and push remainder Ipow // Put second TOS to power of TOS, and push. And // Bitwise AND the 2 at top of stack, and push result Or // Bitwise OR the 2 at top of stack, and push result Xor // Bitwise XOR the 2 at top of stack, and push result Neg // Bitwise NOT the top of stack, and push result Not // Boolean NOT the top of stack, and push result Shl // Shift TOS left, push result Shr // Shift TOS right, push result Mload // Load metric at operand onto top of stack Dload // Pop `operand` keys and metric off stack, and push datum at metric[key,...] onto stack. Iget // Pop a datum off the stack, and push its integer value back on the stack. Fget // Pop a datum off the stack, and push its float value back on the stack. Sget // Pop a datum off the stack, and push its string value back on the stack. Tolower // Convert the string at the top of the stack to lowercase. Length // Compute the length of a string. Cat // string concatenation Setmatched // Set "matched" flag Otherwise // Only match if "matched" flag is false. Del // Pop `operand` keys and metric off stack, and remove the datum at metric[key,...] from memory Expire // Set the expiry duration of a datum, perfoming the same as del but after the expiry time passes. // Floating point ops. Fadd Fsub Fmul Fdiv Fmod Fpow Fset // Floating point assignment Getfilename // Push input.Filename onto the stack. // Conversions. I2f // int to float S2i // string to int S2f // string to float I2s // int to string F2s // float to string // Typed comparisons, behave the same as cmp but do no conversion. Icmp // integer compare Fcmp // floating point compare Scmp // string compare // String opcodes. Subst Rsubst lastOpcode ) var opNames = map[Opcode]string{ Stop: "stop", Match: "match", Smatch: "smatch", Cmp: "cmp", Jnm: "jnm", Jm: "jm", Jmp: "jmp", Inc: "inc", Strptime: "strptime", Timestamp: "timestamp", Settime: "settime", Push: "push", Capref: "capref", Str: "str", Sset: "sset", Iset: "iset", Iadd: "iadd", Isub: "isub", Imul: "imul", Idiv: "idiv", Imod: "imod", Ipow: "ipow", Shl: "shl", Shr: "shr", And: "and", Or: "or", Xor: "xor", Not: "not", Neg: "neg", Mload: "mload", Dload: "dload", Iget: "iget", Fget: "fget", Sget: "sget", Tolower: "tolower", Length: "length", Cat: "cat", Setmatched: "setmatched", Otherwise: "otherwise", Del: "del", Fadd: "fadd", Fsub: "fsub", Fmul: "fmul", Fdiv: "fdiv", Fmod: "fmod", Fpow: "fpow", Fset: "fset", Getfilename: "getfilename", I2f: "i2f", S2i: "s2i", S2f: "s2f", I2s: "i2s", F2s: "f2s", Icmp: "icmp", Fcmp: "fcmp", Scmp: "scmp", Subst: "subst", Rsubst: "rsubst", } func (o Opcode) String() string { return opNames[o] } mtail-3.0.0~rc54+git0ff5/internal/runtime/code/opcodes_test.go000066400000000000000000000005311460063571700242170ustar00rootroot00000000000000// Copyright 2018 Google Inc. All Rights Reserved. // This file is available under the Apache license. package code import "testing" func TestOpcodeHasString(t *testing.T) { for o := Bad; o < lastOpcode; o++ { if o.String() != opNames[o] { t.Errorf("opcode string not match. Expected %s, received %s", opNames[o], o.String()) } } } mtail-3.0.0~rc54+git0ff5/internal/runtime/compiler/000077500000000000000000000000001460063571700220765ustar00rootroot00000000000000mtail-3.0.0~rc54+git0ff5/internal/runtime/compiler/ast/000077500000000000000000000000001460063571700226655ustar00rootroot00000000000000mtail-3.0.0~rc54+git0ff5/internal/runtime/compiler/ast/ast.go000066400000000000000000000202051460063571700240020ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // This file is available under the Apache license. package ast import ( "sync" "time" "github.com/google/mtail/internal/metrics" "github.com/google/mtail/internal/runtime/compiler/position" "github.com/google/mtail/internal/runtime/compiler/symbol" "github.com/google/mtail/internal/runtime/compiler/types" ) type Node interface { Pos() *position.Position // Returns the position of the node from the original source Type() types.Type // Returns the type of the expression in this node } type StmtList struct { Scope *symbol.Scope // Pointer to the local scope for this enclosing block Children []Node } func (n *StmtList) Pos() *position.Position { return mergepositionlist(n.Children) } func (n *StmtList) Type() types.Type { return types.None } type ExprList struct { Children []Node typMu sync.RWMutex typ types.Type } func (n *ExprList) Pos() *position.Position { return mergepositionlist(n.Children) } func (n *ExprList) Type() types.Type { n.typMu.RLock() defer n.typMu.RUnlock() return n.typ } func (n *ExprList) SetType(t types.Type) { n.typMu.Lock() defer n.typMu.Unlock() n.typ = t } type CondStmt struct { Cond Node Truth Node Else Node Scope *symbol.Scope // a conditional expression can cause new variables to be defined } func (n *CondStmt) Pos() *position.Position { return mergepositionlist([]Node{n.Cond, n.Truth, n.Else}) } func (n *CondStmt) Type() types.Type { return types.None } type IDTerm struct { P position.Position Name string Symbol *symbol.Symbol Lvalue bool // If set, then this node appears on the left side of an // assignment and needs to have its address taken only. } func (n *IDTerm) Pos() *position.Position { return &n.P } func (n *IDTerm) Type() types.Type { if n.Symbol != nil { return n.Symbol.Type } return types.Error // id not defined } type CaprefTerm struct { P position.Position Name string IsNamed bool // true if the capref is a named reference, not positional Symbol *symbol.Symbol } func (n *CaprefTerm) Pos() *position.Position { return &n.P } func (n *CaprefTerm) Type() types.Type { if n.Symbol != nil { return n.Symbol.Type } return types.Error // sym not defined due to undefined capref error } type BuiltinExpr struct { P position.Position Name string Args Node typMu sync.RWMutex typ types.Type } func (n *BuiltinExpr) Pos() *position.Position { return &n.P } func (n *BuiltinExpr) Type() types.Type { n.typMu.RLock() defer n.typMu.RUnlock() return n.typ } func (n *BuiltinExpr) SetType(t types.Type) { n.typMu.Lock() defer n.typMu.Unlock() n.typ = t } type BinaryExpr struct { LHS, RHS Node Op int typMu sync.RWMutex typ types.Type } func (n *BinaryExpr) Pos() *position.Position { return position.Merge(n.LHS.Pos(), n.RHS.Pos()) } func (n *BinaryExpr) Type() types.Type { n.typMu.RLock() defer n.typMu.RUnlock() return n.typ } func (n *BinaryExpr) SetType(t types.Type) { n.typMu.Lock() defer n.typMu.Unlock() n.typ = t } type UnaryExpr struct { P position.Position // pos is the position of the op Expr Node Op int typMu sync.RWMutex typ types.Type } func (n *UnaryExpr) Pos() *position.Position { return position.Merge(&n.P, n.Expr.Pos()) } func (n *UnaryExpr) Type() types.Type { n.typMu.RLock() defer n.typMu.RUnlock() return n.typ } func (n *UnaryExpr) SetType(t types.Type) { n.typMu.Lock() defer n.typMu.Unlock() n.typ = t } type IndexedExpr struct { LHS, Index Node typMu sync.RWMutex typ types.Type } func (n *IndexedExpr) Pos() *position.Position { return position.Merge(n.LHS.Pos(), n.Index.Pos()) } func (n *IndexedExpr) Type() types.Type { n.typMu.RLock() defer n.typMu.RUnlock() return n.typ } func (n *IndexedExpr) SetType(t types.Type) { n.typMu.Lock() defer n.typMu.Unlock() n.typ = t } type VarDecl struct { P position.Position Name string Hidden bool Keys []string Limit int64 Buckets []float64 Kind metrics.Kind ExportedName string Symbol *symbol.Symbol } func (n *VarDecl) Pos() *position.Position { return &n.P } func (n *VarDecl) Type() types.Type { if n.Kind == metrics.Histogram { return types.Buckets } else if n.Symbol != nil { return n.Symbol.Type } return types.Error } type StringLit struct { P position.Position Text string } func (n *StringLit) Pos() *position.Position { return &n.P } func (n *StringLit) Type() types.Type { return types.String } type IntLit struct { P position.Position I int64 } func (n *IntLit) Pos() *position.Position { return &n.P } func (n *IntLit) Type() types.Type { return types.Int } type FloatLit struct { P position.Position F float64 } func (n *FloatLit) Pos() *position.Position { return &n.P } func (n *FloatLit) Type() types.Type { return types.Float } // PatternExpr is the top of a pattern expression. type PatternExpr struct { Expr Node Pattern string // if not empty, the fully defined pattern after typecheck Index int // reference to the compiled object offset after codegen } func (n *PatternExpr) Pos() *position.Position { return n.Expr.Pos() } func (n *PatternExpr) Type() types.Type { return types.Pattern } // PatternLit holds inline constant pattern fragments. type PatternLit struct { P position.Position Pattern string } func (n *PatternLit) Pos() *position.Position { return &n.P } func (n *PatternLit) Type() types.Type { return types.Pattern } // PatternFragment holds a named pattern part. type PatternFragment struct { ID Node Expr Node Symbol *symbol.Symbol // Optional Symbol for a named pattern Pattern string // If not empty, contains the complete evaluated pattern of the expr } func (n *PatternFragment) Pos() *position.Position { return n.ID.Pos() } func (n *PatternFragment) Type() types.Type { return types.Pattern } type DecoDecl struct { P position.Position Name string Block Node Symbol *symbol.Symbol Scope *symbol.Scope // The declaration creates its own scope, as a zygote to be instantiated later. } func (n *DecoDecl) Pos() *position.Position { return position.Merge(&n.P, n.Block.Pos()) } func (n *DecoDecl) Type() types.Type { if n.Symbol != nil { return n.Symbol.Type } return types.Int } type DecoStmt struct { P position.Position Name string Block Node Decl *DecoDecl // Pointer to the declaration of the decorator this statement invokes. Scope *symbol.Scope // Instantiated with a copy of the Def's Scope. } func (n *DecoStmt) Pos() *position.Position { return position.Merge(&n.P, n.Block.Pos()) } func (n *DecoStmt) Type() types.Type { return types.None } type NextStmt struct { P position.Position } func (n *NextStmt) Pos() *position.Position { return &n.P } func (n *NextStmt) Type() types.Type { return types.None } type OtherwiseStmt struct { P position.Position } func (n *OtherwiseStmt) Pos() *position.Position { return &n.P } func (n *OtherwiseStmt) Type() types.Type { return types.None } type DelStmt struct { P position.Position N Node Expiry time.Duration } func (n *DelStmt) Pos() *position.Position { return &n.P } func (n *DelStmt) Type() types.Type { return types.None } type ConvExpr struct { N Node mu sync.RWMutex typ types.Type } func (n *ConvExpr) Pos() *position.Position { return n.N.Pos() } func (n *ConvExpr) Type() types.Type { n.mu.RLock() defer n.mu.RUnlock() return n.typ } func (n *ConvExpr) SetType(t types.Type) { n.mu.Lock() defer n.mu.Unlock() n.typ = t } type Error struct { P position.Position Spelling string } func (n *Error) Pos() *position.Position { return &n.P } func (n *Error) Type() types.Type { return types.Error } type StopStmt struct { P position.Position } func (n *StopStmt) Pos() *position.Position { return &n.P } func (n *StopStmt) Type() types.Type { return types.None } // mergepositionlist is a helper that merges the positions of all the nodes in a list. func mergepositionlist(l []Node) *position.Position { if len(l) == 0 { return nil } if len(l) == 1 { if l[0] != nil { return l[0].Pos() } return nil } return position.Merge(l[0].Pos(), mergepositionlist(l[1:])) } mtail-3.0.0~rc54+git0ff5/internal/runtime/compiler/ast/walk.go000066400000000000000000000042251460063571700241550ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // This file is available under the Apache license. package ast import ( "fmt" "github.com/golang/glog" ) // Visitor VisitBefore method is invoked for each node encountered by Walk. // If the result Visitor v is not nil, Walk visits each of the children of that // node with v. VisitAfter is called on n at the end. type Visitor interface { VisitBefore(n Node) (Visitor, Node) VisitAfter(n Node) Node } // convenience function. func walknodelist(v Visitor, list []Node) []Node { r := make([]Node, 0, len(list)) for _, x := range list { r = append(r, Walk(v, x)) } return r } // Walk traverses (walks) an AST node with the provided Visitor v. func Walk(v Visitor, node Node) Node { glog.V(2).Infof("About to VisitBefore node at %s", node.Pos()) // Returning nil from VisitBefore signals to Walk that the Visitor has // handled the children of this node. VisitAfter will not be called. if v, node = v.VisitBefore(node); v == nil { return node } switch n := node.(type) { case *StmtList: n.Children = walknodelist(v, n.Children) case *ExprList: n.Children = walknodelist(v, n.Children) case *CondStmt: if n.Cond != nil { n.Cond = Walk(v, n.Cond) } n.Truth = Walk(v, n.Truth) if n.Else != nil { n.Else = Walk(v, n.Else) } case *BuiltinExpr: if n.Args != nil { n.Args = Walk(v, n.Args) } case *BinaryExpr: n.LHS = Walk(v, n.LHS) n.RHS = Walk(v, n.RHS) case *UnaryExpr: n.Expr = Walk(v, n.Expr) case *IndexedExpr: n.Index = Walk(v, n.Index) n.LHS = Walk(v, n.LHS) case *DecoDecl: n.Block = Walk(v, n.Block) case *DecoStmt: n.Block = Walk(v, n.Block) case *ConvExpr: n.N = Walk(v, n.N) case *PatternExpr: n.Expr = Walk(v, n.Expr) case *PatternFragment: n.Expr = Walk(v, n.Expr) case *IDTerm, *CaprefTerm, *VarDecl, *StringLit, *IntLit, *FloatLit, *PatternLit, *NextStmt, *OtherwiseStmt, *DelStmt, *StopStmt: // These nodes are terminals, thus have no children to walk. default: panic(fmt.Sprintf("Walk: unexpected node type %T: %v", n, n)) } glog.V(2).Infof("About to VisitAfter node at %s", node.Pos()) node = v.VisitAfter(node) return node } mtail-3.0.0~rc54+git0ff5/internal/runtime/compiler/ast/walk_test.go000066400000000000000000000035461460063571700252210ustar00rootroot00000000000000// Copyright 2018 Google Inc. All Rights Reserved. // This file is available under the Apache license. package ast_test import ( "testing" "github.com/google/mtail/internal/runtime/compiler/ast" "github.com/google/mtail/internal/runtime/compiler/parser" "github.com/google/mtail/internal/runtime/compiler/position" "github.com/google/mtail/internal/runtime/compiler/types" "github.com/google/mtail/internal/testutil" ) type testNode struct{} func (t testNode) Pos() *position.Position { return &position.Position{} } func (t testNode) Type() types.Type { return types.None } type testVisitor struct{} func (v testVisitor) VisitBefore(n ast.Node) (ast.Visitor, ast.Node) { return v, n } func (v testVisitor) VisitAfter(n ast.Node) ast.Node { return n } func TestWalkPanicsOnUnknown(t *testing.T) { defer func() { s := recover() if s == nil { t.Errorf("No panic received") } }() ast.Walk(testVisitor{}, testNode{}) } type testWalker struct{} func (t *testWalker) VisitBefore(n ast.Node) (ast.Visitor, ast.Node) { if v, ok := n.(*ast.BinaryExpr); ok { if v.Op == parser.DIV { n = &ast.IntLit{I: 4} } } return t, n } func (t *testWalker) VisitAfter(n ast.Node) ast.Node { if v, ok := n.(*ast.BinaryExpr); ok { if v.Op == parser.MINUS { n = &ast.IntLit{I: 5} } } return n } func TestAstReplacement(t *testing.T) { var a ast.Node = &ast.BinaryExpr{ LHS: &ast.BinaryExpr{LHS: &ast.IntLit{I: 0}, RHS: &ast.IntLit{I: 1}, Op: parser.DIV}, RHS: &ast.BinaryExpr{LHS: &ast.IntLit{I: 2}, RHS: &ast.IntLit{I: 3}, Op: parser.MINUS}, Op: parser.PLUS, } tw := &testWalker{} a = ast.Walk(tw, a) expected := &ast.BinaryExpr{ LHS: &ast.IntLit{I: 4}, RHS: &ast.IntLit{I: 5}, Op: parser.PLUS, } if !testutil.ExpectNoDiff(t, expected, a, testutil.IgnoreUnexported(ast.BinaryExpr{})) { s := parser.Sexp{} t.Log("AST:\n" + s.Dump(a)) } } mtail-3.0.0~rc54+git0ff5/internal/runtime/compiler/checker/000077500000000000000000000000001460063571700235025ustar00rootroot00000000000000mtail-3.0.0~rc54+git0ff5/internal/runtime/compiler/checker/checker.go000066400000000000000000000737261460063571700254540ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // This file is available under the Apache license. package checker import ( goerrors "errors" "fmt" "strings" "time" "github.com/golang/glog" "github.com/google/mtail/internal/metrics" "github.com/google/mtail/internal/runtime/compiler/ast" "github.com/google/mtail/internal/runtime/compiler/errors" "github.com/google/mtail/internal/runtime/compiler/parser" "github.com/google/mtail/internal/runtime/compiler/symbol" "github.com/google/mtail/internal/runtime/compiler/types" ) const ( defaultMaxRegexpLength = 1024 defaultMaxRecursionDepth = 100 ) // checker holds data for a semantic checker. type checker struct { scope *symbol.Scope // the current scope decoScopes []*symbol.Scope // A stack of scopes used for resolving symbols in decorated nodes errors errors.ErrorList depth int tooDeep bool maxRecursionDepth int maxRegexLength int } // Check performs a semantic check of the astNode, and returns a potentially // modified astNode and either a list of errors found, or nil if the program is // semantically valid. At the completion of Check, the symbol table and type // annotation are also complete. func Check(node ast.Node, maxRegexpLength int, maxRecursionDepth int) (ast.Node, error) { // set defaults if maxRegexpLength == 0 { maxRegexpLength = defaultMaxRegexpLength } if maxRecursionDepth == 0 { maxRecursionDepth = defaultMaxRecursionDepth } c := &checker{maxRegexLength: maxRegexpLength, maxRecursionDepth: maxRecursionDepth} node = ast.Walk(c, node) if len(c.errors) > 0 { return node, c.errors } return node, nil } // VisitBefore performs most of the symbol table construction, so that symbols // are guaranteed to exist before their use. func (c *checker) VisitBefore(node ast.Node) (ast.Visitor, ast.Node) { c.depth++ if c.depth > c.maxRecursionDepth { if !c.tooDeep { c.errors.Add(node.Pos(), fmt.Sprintf("Expression exceeded maximum recursion depth of %d", c.maxRecursionDepth)) c.tooDeep = true } c.depth-- return nil, node } switch n := node.(type) { case *ast.StmtList: n.Scope = symbol.NewScope(c.scope) c.scope = n.Scope glog.V(2).Infof("Created new scope %v in stmtlist", n.Scope) return c, n case *ast.CondStmt: n.Scope = symbol.NewScope(c.scope) c.scope = n.Scope glog.V(2).Infof("Created new scope %v in condstmt", n.Scope) return c, n case *ast.CaprefTerm: if n.Symbol == nil { sym := c.scope.Lookup(n.Name, symbol.CaprefSymbol) if sym == nil { msg := fmt.Sprintf("Capture group `$%s' was not defined by a regular expression visible to this scope.", n.Name) if n.IsNamed { msg = fmt.Sprintf("%s\n\tTry using `(?P<%s>...)' to name the capture group.", msg, n.Name) } else { msg = fmt.Sprintf("%s\n\tCheck that there are at least %s pairs of parentheses.", msg, n.Name) } c.errors.Add(n.Pos(), msg) c.depth-- return nil, n } glog.V(2).Infof("Found %q as %v in scope %v", n.Name, sym, c.scope) sym.Used = true n.Symbol = sym } return c, n case *ast.VarDecl: n.Symbol = symbol.NewSymbol(n.Name, symbol.VarSymbol, n.Pos()) if alt := c.scope.Insert(n.Symbol); alt != nil { c.errors.Add(n.Pos(), fmt.Sprintf("Redeclaration of metric `%s' previously declared at %s", n.Name, alt.Pos)) c.depth-- return nil, n } var rType types.Type switch n.Kind { case metrics.Counter, metrics.Gauge, metrics.Timer, metrics.Histogram: // TODO(jaq): This should be a numeric type, unless we want to // enforce more specific rules like "Counter can only be Int." rType = types.NewVariable() case metrics.Text: rType = types.String default: c.errors.Add(n.Pos(), fmt.Sprintf("internal compiler error: unrecognised Kind %v for declNode %v", n.Kind, n)) c.depth-- return nil, n } if len(n.Buckets) > 0 && n.Kind != metrics.Histogram { c.errors.Add(n.Pos(), fmt.Sprintf("Can't specify buckets for non-histogram metric `%s'.", n.Name)) c.depth-- return nil, n } if len(n.Keys) > 0 { // One type per key keyTypes := make([]types.Type, 0, len(n.Keys)) for i := 0; i < len(n.Keys); i++ { keyTypes = append(keyTypes, types.NewVariable()) } // and one for the value. keyTypes = append(keyTypes, rType) n.Symbol.Type = types.Dimension(keyTypes...) } else { n.Symbol.Type = rType } return c, n case *ast.IDTerm: if n.Symbol == nil { if sym := c.scope.Lookup(n.Name, symbol.VarSymbol); sym != nil { glog.V(2).Infof("found varsymbol sym %v", sym) sym.Used = true n.Symbol = sym } else if sym := c.scope.Lookup(n.Name, symbol.PatternSymbol); sym != nil { glog.V(2).Infof("Found patternsymbol Sym %v", sym) sym.Used = true n.Symbol = sym } else { // Apply a terribly bad heuristic to choose a suggestion. sug := fmt.Sprintf("Try adding `counter %s' to the top of the program.", n.Name) if n.Name == strings.ToUpper(n.Name) { // If the string is all uppercase, pretend it was a const // pattern because that's what the docs do. sug = fmt.Sprintf("Try adding `const %s /.../' earlier in the program.", n.Name) } c.errors.Add(n.Pos(), fmt.Sprintf("Identifier `%s' not declared.\n\t%s", n.Name, sug)) c.depth-- return nil, n } } return c, n case *ast.DecoDecl: n.Symbol = symbol.NewSymbol(n.Name, symbol.DecoSymbol, n.Pos()) n.Symbol.Binding = n if alt := c.scope.Insert(n.Symbol); alt != nil { c.errors.Add(n.Pos(), fmt.Sprintf("Redeclaration of decorator `@%s' previously declared at %s", n.Name, alt.Pos)) c.depth-- return nil, n } // Append a scope placeholder for the recursion into the block. It has no parent, it'll be cloned when the decorator is instantiated. c.decoScopes = append(c.decoScopes, symbol.NewScope(nil)) return c, n case *ast.DecoStmt: if sym := c.scope.Lookup(n.Name, symbol.DecoSymbol); sym != nil { if sym.Binding == nil { c.errors.Add(n.Pos(), fmt.Sprintf("Internal error: Decorator %q not bound to its definition.", n.Name)) c.depth-- return nil, n } sym.Used = true n.Decl = sym.Binding.(*ast.DecoDecl) } else { c.errors.Add(n.Pos(), fmt.Sprintf("Decorator `@%s' is not defined.\n\tTry adding a definition `def %s {}' earlier in the program.", n.Name, n.Name)) c.depth-- return nil, n } // Create a new scope for the decorator instantiation. n.Scope = symbol.NewScope(c.scope) if n.Decl == nil { glog.V(2).Infof("No DecoDecl on DecoStmt: %v", n) c.errors.Add(n.Pos(), fmt.Sprintf("Internal error: no declaration for decorator: %#v", n)) c.depth-- return nil, n } if n.Decl.Scope == nil { glog.V(2).Infof("No Scope on DecoDecl: %#v", n.Decl) c.errors.Add(n.Pos(), fmt.Sprintf("Decorator `@%s' is not completely defined yet.\n\tTry removing @%s from here.", n.Name, n.Name)) c.depth-- return nil, n } // Clone the DecoDecl scope zygote into this scope. n.Scope.CopyFrom(n.Decl.Scope) c.scope = n.Scope return c, n case *ast.PatternFragment: id, ok := n.ID.(*ast.IDTerm) if !ok { c.errors.Add(n.Pos(), fmt.Sprintf("Internal error: no identifier attached to pattern fragment %#v", n)) c.depth-- return nil, n } n.Symbol = symbol.NewSymbol(id.Name, symbol.PatternSymbol, id.Pos()) if alt := c.scope.Insert(n.Symbol); alt != nil { c.errors.Add(n.Pos(), fmt.Sprintf("Redefinition of pattern constant `%s' previously defined at %s", id.Name, alt.Pos)) c.depth-- return nil, n } n.Symbol.Binding = n n.Symbol.Type = types.Pattern return c, n case *ast.DelStmt: n.N = ast.Walk(c, n.N) return c, n } return c, node } // checkSymbolTable emits errors if any eligible symbols in the current scope // are not marked as used or have an invalid type. func (c *checker) checkSymbolTable() { for _, sym := range c.scope.Symbols { if !sym.Used { // Users don't have control over the patterns given from decorators // so this should never be an error; but it can be useful to know // if a program is doing unnecessary work. if sym.Kind == symbol.CaprefSymbol { if sym.Addr == 0 { // Don't warn about the zeroth capture group; it's not user-defined. continue } glog.Infof("capture group reference `%s' at %s appears to be unused", sym.Name, sym.Pos) continue } c.errors.Add(sym.Pos, fmt.Sprintf("Declaration of %s `%s' here is never used.", sym.Kind, sym.Name)) } } } // VisitAfter performs the type annotation and checking, once the child nodes // of expressions have been annotated and checked. Within this function, // gotType refers to the types inferred in the AST, and wantType is the type // expected for this expression. After unification, uType is the concrete type // of the expression, and the visitor should set any node Types as appropriate. // // The notation for type inference used comes from the 2010 lecture notes for // Stanford's CS413 class. // https://web.stanford.edu/class/cs143/lectures/lecture09.pdf func (c *checker) VisitAfter(node ast.Node) ast.Node { if c.tooDeep { return node } defer func() { c.depth-- }() switch n := node.(type) { case *ast.StmtList: c.checkSymbolTable() // Pop the scope c.scope = n.Scope.Parent return n case *ast.CondStmt: switch n.Cond.(type) { case *ast.BinaryExpr, *ast.OtherwiseStmt, *ast.UnaryExpr: // OK as conditions case *ast.PatternExpr: // If the parser saw an IDTerm with type Pattern, then we know it's really a pattern constant and need to wrap it in an unary match in this context. cond := &ast.UnaryExpr{Expr: n.Cond, Op: parser.MATCH} cond.SetType(types.Bool) n.Cond = cond default: c.errors.Add(n.Cond.Pos(), fmt.Sprintf("Can't interpret %s as a boolean expression here.\n\tTry using comparison operators to make the condition explicit.", n.Cond.Type())) } c.checkSymbolTable() // Pop the scope. c.scope = n.Scope.Parent return n case *ast.DecoStmt: // Don't check symbol usage here because the decorator is only partially defined. // Pop the scope. c.scope = n.Scope.Parent return n case *ast.NextStmt: // The last element in this list will be the empty stack created by the // DecoDecl on the way in. If there's no last element, then we can't // have entered a DecoDecl yet. last := len(c.decoScopes) - 1 if last < 0 { c.errors.Add(n.Pos(), "Can't use `next' outside of a decorator.") return n } decoScope := c.decoScopes[last] if len(decoScope.Symbols) > 0 { c.errors.Add(n.Pos(), "Can't use `next' statement twice in a decorator.") return n } // Merge the current scope into it. decoScope.CopyFrom(c.scope) return n case *ast.DecoDecl: // Pop the scope off the list, and insert it into this node. last := len(c.decoScopes) - 1 decoScope := c.decoScopes[last] if len(decoScope.Symbols) == 0 { c.errors.Add(n.Pos(), fmt.Sprintf("No symbols found in decorator `@%s'.\n\tTry adding a `next' statement inside the `{}' block.", n.Name)) } // Store the zygote from the scope stack on this declaration. n.Scope = decoScope c.decoScopes = c.decoScopes[:last] return n case *ast.BinaryExpr: lT := n.LHS.Type() if types.IsTypeError(lT) { n.SetType(lT) return n } rT := n.RHS.Type() if types.IsTypeError(rT) { n.SetType(rT) return n } var rType types.Type switch n.Op { case parser.DIV, parser.MOD, parser.MUL, parser.MINUS, parser.PLUS, parser.POW: // Arithmetic: e1 OP e2 // O ⊢ e1 : Tl, O ⊢ e2 : Tr // Tl <= Tr , Tr <= Tl // ⇒ O ⊢ e : lub(Tl, Tr) // First handle the Tl <= Tr and vice versa. rType = types.LeastUpperBound(lT, rT) var err *types.TypeError if types.AsTypeError(rType, &err) { // Change the type mismatch error to make more sense in this context. if goerrors.Is(err, types.ErrTypeMismatch) { c.errors.Add(n.Pos(), fmt.Sprintf("type mismatch: can't apply %s to LHS of type %q with RHS of type %q.", parser.Kind(n.Op), lT, rT)) } else { c.errors.Add(n.Pos(), err.Error()) } n.SetType(err) return n } gotType := types.Function(lT, rT, rType) t := types.NewVariable() wantType := types.Function(t, t, t) uType := types.Unify(wantType, gotType) if types.AsTypeError(uType, &err) { c.errors.Add(n.Pos(), err.Error()) n.SetType(err) return n } // Implicit type conversion for non-comparisons, promoting each // half to the return type of the op. if !types.Equals(rType, lT) { conv := &ast.ConvExpr{N: n.LHS} conv.SetType(rType) n.LHS = conv } if !types.Equals(rType, rT) { conv := &ast.ConvExpr{N: n.RHS} conv.SetType(rType) n.RHS = conv } if n.Op == parser.DIV || n.Op == parser.MOD { if i, ok := n.RHS.(*ast.IntLit); ok { if i.I == 0 { c.errors.Add(n.Pos(), "Can't divide by zero.") n.SetType(types.Error) return n } } } case parser.SHL, parser.SHR, parser.BITAND, parser.BITOR, parser.XOR, parser.NOT: // bitwise: e1 OP e2 // O ⊢ e1 : Int, O ⊢ e2 : Int // ⇒ O ⊢ e : Int rType = types.Int wantType := types.Function(rType, rType, rType) gotType := types.Function(lT, rT, types.NewVariable()) uType := types.Unify(wantType, gotType) var err *types.TypeError if types.AsTypeError(uType, &err) { if goerrors.Is(err, types.ErrTypeMismatch) { c.errors.Add(n.Pos(), fmt.Sprintf("Integer types expected for bitwise %s, got %s and %s", parser.Kind(n.Op), lT, rT)) } else { c.errors.Add(n.Pos(), err.Error()) } n.SetType(err) return n } case parser.AND, parser.OR: // If the parser saw an IDTerm with type Pattern, then we know it's really a pattern constant and need to wrap it in an unary match in this context. if v, ok := n.LHS.(*ast.PatternExpr); ok { match := &ast.UnaryExpr{Expr: v, Op: parser.MATCH} match.SetType(types.Bool) n.LHS = match } // Likewise for the RHS if v, ok := n.RHS.(*ast.PatternExpr); ok { match := &ast.UnaryExpr{Expr: v, Op: parser.MATCH} match.SetType(types.Bool) n.RHS = match } // logical: e1 OP e2 // O ⊢ e1 : Bool, O ⊢ e2 : Bool // ⇒ O ⊢ e : Bool rType = types.Bool wantType := types.Function(rType, rType, rType) gotType := types.Function(lT, rT, types.NewVariable()) uType := types.Unify(wantType, gotType) var err *types.TypeError if types.AsTypeError(uType, &err) { if goerrors.Is(err, types.ErrTypeMismatch) { c.errors.Add(n.Pos(), fmt.Sprintf("Boolean types expected for logical %s, got %s and %s", parser.Kind(n.Op), lT, rT)) } else { c.errors.Add(n.Pos(), err.Error()) } n.SetType(err) return n } case parser.LT, parser.GT, parser.LE, parser.GE, parser.EQ, parser.NE: // comparable, logical: e2 OP e2 // O ⊢ e1 : Tl, O ⊢ e2 : Tr // Tl <= Tr , Tr <= Tl // ⇒ O ⊢ e : Bool // First handle the Tl <= Tr and vice versa. t := types.LeastUpperBound(lT, rT) var err *types.TypeError if types.AsTypeError(t, &err) { if goerrors.Is(err, types.ErrTypeMismatch) { c.errors.Add(n.Pos(), fmt.Sprintf("type mismatch: can't apply %s to LHS of type %q with RHS of type %q.", parser.Kind(n.Op), lT, rT)) } else { c.errors.Add(n.Pos(), err.Error()) } n.SetType(err) return n } rType = types.Bool gotType := types.Function(lT, rT, rType) wantType := types.Function(t, t, types.Bool) uType := types.Unify(wantType, gotType) if types.AsTypeError(uType, &err) { c.errors.Add(n.Pos(), err.Error()) n.SetType(err) return n } // Implicit type conversion: Promote types if the ast types are not // the same as the expression type. if !types.Equals(t, lT) { conv := &ast.ConvExpr{N: n.LHS} conv.SetType(t) n.LHS = conv glog.V(2).Infof("Emitting convnode %#v on %#v", conv, n) } if !types.Equals(t, rT) { conv := &ast.ConvExpr{N: n.RHS} conv.SetType(t) n.RHS = conv glog.V(2).Infof("Emitting convnode %+v", conv) } case parser.ASSIGN, parser.ADD_ASSIGN: // e1 = e2; e1 += e2 // O ⊢ e1 : Tl, O ⊢ e2 : Tr // Tr <= Tl // ⇒ O ⊢ e : Tl rType = lT // TODO(jaq): the rT <= lT relationship is not correctly encoded here. t := types.LeastUpperBound(lT, rT) uType := types.Unify(rType, t) var err *types.TypeError if types.AsTypeError(uType, &err) { c.errors.Add(n.Pos(), err.Error()) n.SetType(err) return n } // If the LHS is assignable, mark it as an lvalue, otherwise error. switch v := n.LHS.(type) { case *ast.IDTerm: v.Lvalue = true case *ast.IndexedExpr: v.LHS.(*ast.IDTerm).Lvalue = true default: glog.V(2).Infof("The lhs is a %T %v", n.LHS, n.LHS) c.errors.Add(n.LHS.Pos(), "Can't assign to expression on left; expecting a variable here.") n.SetType(types.Error) return n } case parser.MATCH, parser.NOT_MATCH: // e1 =~ e2, e1 !~ e2 // O ⊢ e1 : String , O ⊢ e2 : Pattern // ⇒ O ⊢ e : Bool // TODO(jaq): We're not correctly encoding this. rType = types.Bool wantType := types.Function(types.NewVariable(), types.Pattern, rType) gotType := types.Function(lT, rT, types.NewVariable()) uType := types.Unify(wantType, gotType) var err *types.TypeError if types.AsTypeError(uType, &err) { if goerrors.Is(err, types.ErrTypeMismatch) { c.errors.Add(n.Pos(), fmt.Sprintf("Parameter to %s has a %s.", parser.Kind(n.Op), err)) } else { c.errors.Add(n.Pos(), err.Error()) } n.SetType(err) return n } // Implicit conversion of the RHS to a PatternExpr if not already. if !types.Equals(rT, types.Pattern) { n.RHS = ast.Walk(c, &ast.PatternExpr{Expr: n.RHS}) } default: c.errors.Add(n.Pos(), fmt.Sprintf("Unexpected operator %s (%v) in node %#v", parser.Kind(n.Op), n.Op, n)) n.SetType(types.InternalError) return n } n.SetType(rType) return n case *ast.UnaryExpr: if types.IsTypeError(n.Expr.Type()) { n.SetType(n.Expr.Type()) return n } var rType types.Type switch n.Op { case parser.NOT: // !e1 // O ⊢ e1 : Int // ⇒ O ⊢ e : Bool rType = types.Bool wantType := types.Function(types.Int, rType) gotType := types.Function(n.Expr.Type(), types.NewVariable()) uType := types.Unify(wantType, gotType) var err *types.TypeError if types.AsTypeError(uType, &err) { c.errors.Add(n.Expr.Pos(), fmt.Sprintf("%s for `~' operator.", err)) n.SetType(err) return n } case parser.INC, parser.DEC: // e1++ , e1-- // O ⊢ e1 : Int // ⇒ O ⊢ e : Int // TODO we do this backwards versus ADD_ASSIGN above, why // If the expr is assignable, mark it as an lvalue, otherwise error. switch v := n.Expr.(type) { case *ast.IDTerm: v.Lvalue = true case *ast.IndexedExpr: v.LHS.(*ast.IDTerm).Lvalue = true default: glog.V(2).Infof("the expr is a %T %v", n.Expr, n.Expr) c.errors.Add(n.Expr.Pos(), "Can't assign to expression; expecting a variable here.") n.SetType(types.Error) return n } rType = types.NewVariable() wantType := types.Function(types.Int, types.Int) gotType := types.Function(n.Expr.Type(), rType) uType := types.Unify(wantType, gotType) var err *types.TypeError if types.AsTypeError(uType, &err) { c.errors.Add(n.Pos(), err.Error()) n.SetType(err) return n } uTypeOperator, ok := uType.(*types.Operator) if !ok { c.errors.Add(n.Pos(), fmt.Sprintf("internal error: unexpected type for Expr %v", uType)) n.SetType(types.InternalError) return n } // After unification, the expr still has to be of Int type. if !types.OccursIn(types.Int, []types.Type{uTypeOperator.Args[0]}) { c.errors.Add(n.Expr.Pos(), fmt.Sprintf("type mismatch: expecting an Int for %s, not %v.", parser.Kind(n.Op), n.Expr.Type())) n.SetType(types.Error) return n } case parser.MATCH: // Implicit match expressions, an expression of type Pattern returning Bool // /e1/ // O ⊢ e1 : Pattern // ⇒ O ⊢ e : Bool rType = types.Bool wantType := types.Function(types.Pattern, rType) gotType := types.Function(n.Expr.Type(), types.NewVariable()) uType := types.Unify(wantType, gotType) var err *types.TypeError if types.AsTypeError(uType, &err) { if goerrors.Is(err, types.ErrTypeMismatch) { c.errors.Add(n.Pos(), fmt.Sprintf("type mismatch: Unary MATCH expects Pattern, received %s", n.Expr.Type())) } else { c.errors.Add(n.Pos(), err.Error()) } n.SetType(err) return n } default: c.errors.Add(n.Pos(), fmt.Sprintf("unknown unary op %s in expr %#v", parser.Kind(n.Op), n)) n.SetType(types.InternalError) return n } n.SetType(rType) return n case *ast.ExprList: // (e1, e2, ...) // ⇒ O ⊢ e: e1⨯e1⨯... argTypes := []types.Type{} for _, arg := range n.Children { if types.IsTypeError(arg.Type()) { n.SetType(arg.Type()) return n } argTypes = append(argTypes, arg.Type()) } n.SetType(types.Dimension(argTypes...)) return n case *ast.IndexedExpr: // e1[e2, e3, ..., en] // O ⊢ e1 : T1⨯T2⨯...Tn⨯Tr // O ⊢ e2,e3,...,en : T1,T2,...,Tn // ⇒ O ⊢ e : Tr // prune this node to n.LHS if Index is nil. Leave 0 length exprlist as that's a type error. exprList, ok := n.Index.(*ast.ExprList) if !ok { return n.LHS } argTypes := []types.Type{} for _, arg := range exprList.Children { if types.IsTypeError(arg.Type()) { n.SetType(arg.Type()) return n } argTypes = append(argTypes, arg.Type()) } switch v := n.LHS.(type) { case *ast.IDTerm: if v.Symbol == nil { // undefined, already caught (where?) glog.V(2).Infof("undefined ID %v", v) n.SetType(types.Error) return n } if types.Equals(types.Pattern, v.Type()) { // We now have enough information to tell that something the // parser thought was an IDTerm is really a pattern constant, // so we can rewrite the AST here. We can't yet wrap the // pattern expression with Unary Match because we don't know // the context yet, but see CondExpr and BinaryExpr's // logical-op. return ast.Walk(c, &ast.PatternExpr{Expr: v}) } if !types.IsDimension(v.Type()) { if len(argTypes) > 0 { c.errors.Add(n.Pos(), "Index taken on unindexable expression") n.SetType(types.Error) } else { n.SetType(v.Type()) } return n } // it's a Dimension, continue after switch default: c.errors.Add(n.Pos(), "Index taken on unindexable expression") n.SetType(types.Error) return n } rType := types.NewVariable() argTypes = append(argTypes, rType) // T1,T2,...,Tn gotType := types.Dimension(argTypes...) // Tr wantType, ok := n.LHS.Type().(*types.Operator) if !ok { c.errors.Add(n.Pos(), fmt.Sprintf("internal error: unexpected type on LHS %v", n.LHS.Type())) n.SetType(types.InternalError) return n } uType := types.Unify(wantType, gotType) var err *types.TypeError if types.AsTypeError(uType, &err) { switch { case len(wantType.Args) > len(gotType.Args): c.errors.Add(n.Pos(), fmt.Sprintf("Not enough keys for indexed expression: expecting %d, received %d", len(wantType.Args)-1, len(gotType.Args)-1)) n.SetType(types.Error) return n case len(wantType.Args) < len(gotType.Args): c.errors.Add(n.Pos(), fmt.Sprintf("Too many keys for indexed expression: expecting %d, received %d.", len(wantType.Args)-1, len(gotType.Args)-1)) default: c.errors.Add(n.Pos(), err.Error()) } n.SetType(types.Error) return n } // Having typechecked the expression against the expected types, and // have detected mismatched keylengths, we have a well-formed // expression, so can now fold to just IDTerm if there's no ExprList. if len(exprList.Children) == 0 { return n.LHS } n.SetType(rType) return n case *ast.BuiltinExpr: // f(e1, e2, ..., en) // O ⊢ f : T1⨯T2⨯...Tn⨯Tr // O ⊢ e1,e2,...,en : T1,T2,...,Tn // ⇒ O ⊢ e : Tr // TODO: recall the syntax for subst a fresh type above argTypes := []types.Type{} if args, ok := n.Args.(*ast.ExprList); ok { for _, arg := range args.Children { argTypes = append(argTypes, arg.Type()) } } rType := types.NewVariable() argTypes = append(argTypes, rType) gotType := types.Function(argTypes...) wantType := types.FreshType(types.Builtins[n.Name]) uType := types.Unify(wantType, gotType) var err *types.TypeError if types.AsTypeError(uType, &err) { if goerrors.Is(err, types.ErrTypeMismatch) { c.errors.Add(n.Pos(), fmt.Sprintf("call to `%s': %s", n.Name, err)) } else { c.errors.Add(n.Pos(), err.Error()) } n.SetType(err) return n } n.SetType(rType) switch n.Name { case "strptime": if !types.Equals(gotType.Args[1], types.String) { c.errors.Add(n.Args.(*ast.ExprList).Children[1].Pos(), fmt.Sprintf("Expecting a format string for argument 2 of strptime(), not %v.", gotType.Args[1])) n.SetType(types.Error) return n } // Second argument to strptime is the format string. If it is // defined at compile time, we can verify it can be use as a format // string by parsing itself. if f, ok := n.Args.(*ast.ExprList).Children[1].(*ast.StringLit); ok { // Layout strings can contain an underscore to indicate a digit // field if the layout field can contain two digits; but they // won't parse themselves. Zulu Timezones in the layout need // to be converted to offset in the parsed time. timeStr := strings.ReplaceAll(strings.ReplaceAll(f.Text, "_", ""), "Z", "+") glog.V(2).Infof("time_str is %q", timeStr) _, err := time.Parse(f.Text, timeStr) if err != nil { glog.Infof("time.Parse(%q, %q) failed: %s", f.Text, timeStr, err) c.errors.Add(f.Pos(), fmt.Sprintf("invalid time format string %q\n\tRefer to the documentation at https://golang.org/pkg/time/#pkg-constants for advice.", f.Text)) n.SetType(types.Error) return n } } else { c.errors.Add(n.Pos(), "Internal error: exprlist child is not string literal.") return n } case "tolower": if !types.Equals(gotType.Args[0], types.String) { c.errors.Add(n.Args.(*ast.ExprList).Children[0].Pos(), fmt.Sprintf("Expecting a String for argument 1 of tolower(), not %v.", gotType.Args[0])) n.SetType(types.Error) return n } } return n case *ast.PatternExpr: // Evaluate the expression. pe := &patternEvaluator{scope: c.scope, errors: &c.errors} n = ast.Walk(pe, n).(*ast.PatternExpr) if pe.pattern.String() == "" { return n } n.Pattern = pe.pattern.String() c.checkRegex(n.Pattern, n) return n case *ast.PatternFragment: // Evaluate the expression. pe := &patternEvaluator{scope: c.scope, errors: &c.errors} n.Expr = ast.Walk(pe, n.Expr) if pe.pattern.String() == "" { return n } n.Pattern = pe.pattern.String() return n case *ast.DelStmt: if ix, ok := n.N.(*ast.IndexedExpr); ok { if len(ix.Index.(*ast.ExprList).Children) == 0 { c.errors.Add(n.N.Pos(), "Cannot delete this.\n\tTry deleting an index from this dimensioned metric.") return n } ix.LHS.(*ast.IDTerm).Lvalue = true return n } c.errors.Add(n.N.Pos(), "Cannot delete this.\n\tTry deleting from a dimensioned metric with this as an index.") } return node } // checkRegex is a helper method to compile and check a regular expression, and // to generate its capture groups as symbols. func (c *checker) checkRegex(pattern string, n ast.Node) { plen := len(pattern) if plen > c.maxRegexLength { c.errors.Add(n.Pos(), fmt.Sprintf("Exceeded maximum regular expression pattern length of %d bytes with %d.\n\tExcessively long patterns are likely to cause compilation and runtime performance problems.", c.maxRegexLength, plen)) return } if reAst, err := types.ParseRegexp(pattern); err == nil { // We reserve the names of the capturing groups as declarations // of those symbols, so that future CAPREF tokens parsed can // retrieve their value. By recording them in the symbol table, we // can warn the user about unknown capture group references. for i, capref := range reAst.CapNames() { sym := symbol.NewSymbol(fmt.Sprintf("%d", i), symbol.CaprefSymbol, n.Pos()) sym.Type = types.InferCaprefType(reAst, i) sym.Binding = n sym.Addr = i if alt := c.scope.Insert(sym); alt != nil { c.errors.Add(n.Pos(), fmt.Sprintf("Redeclaration of capture group `%s' previously declared at %s", sym.Name, alt.Pos)) // No return, let this loop collect all errors } if capref != "" { sym.Name = capref if alt := c.scope.InsertAlias(sym, capref); alt != nil { c.errors.Add(n.Pos(), fmt.Sprintf("Redeclaration of capture group `%s' previously declared at %s", sym.Name, alt.Pos)) // No return, let this loop collect all errors } } glog.V(2).Infof("Added capref %v to scope %v", sym, c.scope) } } else { c.errors.Add(n.Pos(), err.Error()) return } } // patternEvaluator is a helper that performs concatenation of pattern // fragments so that they can be compiled as whole regular expression patterns. type patternEvaluator struct { scope *symbol.Scope errors *errors.ErrorList pattern strings.Builder } func (p *patternEvaluator) VisitBefore(n ast.Node) (ast.Visitor, ast.Node) { switch v := n.(type) { case *ast.BinaryExpr: if v.Op != parser.PLUS { p.errors.Add(v.Pos(), fmt.Sprintf("internal error: Invalid operator in concatenation: %v", v)) return nil, n } return p, v case *ast.PatternLit: p.pattern.WriteString(v.Pattern) return p, v case *ast.IDTerm: // Already looked up sym, if still nil then undefined. if v.Symbol == nil { return nil, n } pf, ok := v.Symbol.Binding.(*ast.PatternFragment) if !ok { p.errors.Add(v.Pos(), fmt.Sprintf("Can't append %s `%s' to this pattern.\n\tTry using a `const'-defined pattern fragment.", v.Symbol.Kind, v.Symbol.Name)) return nil, n } if pf.Pattern == "" { p.errors.Add(v.Pos(), fmt.Sprintf("Can't evaluate pattern fragment `%s' here.\n\tTry defining it earlier in the program.", pf.Symbol.Name)) } p.pattern.WriteString(pf.Pattern) return p, v case *ast.IntLit: p.pattern.WriteString(fmt.Sprintf("%d", v.I)) return p, v case *ast.FloatLit: p.pattern.WriteString(fmt.Sprintf("%g", v.F)) return p, v case *ast.StringLit: p.pattern.WriteString(v.Text) return p, v } return p, n } func (p *patternEvaluator) VisitAfter(n ast.Node) ast.Node { return n } mtail-3.0.0~rc54+git0ff5/internal/runtime/compiler/checker/checker_test.go000066400000000000000000000323661460063571700265060ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // This file is available under the Apache license. package checker_test import ( "flag" "strings" "testing" "github.com/google/go-cmp/cmp/cmpopts" "github.com/google/mtail/internal/runtime/compiler/ast" "github.com/google/mtail/internal/runtime/compiler/checker" "github.com/google/mtail/internal/runtime/compiler/parser" "github.com/google/mtail/internal/runtime/compiler/symbol" "github.com/google/mtail/internal/runtime/compiler/types" "github.com/google/mtail/internal/testutil" ) var checkerTestDebug = flag.Bool("checker_test_debug", false, "Turn on to log AST in tests") var checkerInvalidPrograms = []struct { name string program string errors []string }{ { "undefined named capture group", "/blurgh/ { $undef++\n }\n", []string{"undefined named capture group:1:12-17: Capture group `$undef' was not defined by a regular expression visible to this scope.", "\tTry using `(?P...)' to name the capture group."}, }, { "out of bounds capref", "/(blyurg)/ { $2++ \n}\n", []string{"out of bounds capref:1:14-15: Capture group `$2' was not defined by a regular expression " + "visible to this scope.", "\tCheck that there are at least 2 pairs of parentheses."}, }, { "undefined decorator", "@foo {}\n", []string{"undefined decorator:1:1-4: Decorator `@foo' is not defined.", "\tTry adding a definition `def foo {}' earlier in the program."}, }, { "undefined identifier", "// { x++ \n}\n", []string{"undefined identifier:1:6: Identifier `x' not declared.", "\tTry adding `counter x' to the top of the program."}, }, { "invalid regex 1", "/foo(/ {}\n", []string{"invalid regex 1:1:1-6: error parsing regexp: missing closing ): `foo(`"}, }, { "invalid regex 2", "/blurg(?P[[:alph:]])/ {}\n", []string{"invalid regex 3:1:1-24: error parsing regexp: invalid character class range: `[:alph:]`"}, }, { "duplicate declaration", "counter foo\ncounter foo\n", []string{ "duplicate declaration:2:9-11: Redeclaration of metric `foo' previously declared at duplicate declaration:1:9-11", "duplicate declaration:1:9-11: Declaration of variable `foo' here is never used.", }, }, { "indexedExpr parameter count", `counter n counter foo by a, b counter bar by a, b counter quux by a /(\d+)/ { n[$1]++ foo[$1]++ bar[$1][0]++ quux[$1][0]++ } `, []string{ // n[$1] is syntactically valid, but n is not indexable "indexedExpr parameter count:6:7-10: Index taken on unindexable expression", // foo[$1] is short one key "indexedExpr parameter count:7:7-12: Not enough keys for indexed expression: expecting 2, received 1", // bar[$1][0] is ok // quux[$1][0] has too many keys "indexedExpr parameter count:9:7-16: Too many keys for indexed expression: expecting 1, received 2.", }, }, { "indexedExpr binary expression", `counter foo by a, b counter bar by a, b /(\d+)/ { foo[$1]+=$1 } /(.*)/ { foo = bar[$1] + 1 } `, []string{ "indexedExpr binary expression:4:3-8: Not enough keys for indexed expression: expecting 2, received 1", "indexedExpr binary expression:7:3-5: Not enough keys for indexed expression: expecting 2, received 0", "indexedExpr binary expression:7:9-14: Not enough keys for indexed expression: expecting 2, received 1", }, }, { "builtin parameter mismatch", `/\d+/ { strptime() } /\d+/ { timestamp() } `, []string{"builtin parameter mismatch:2:4-13: call to `strptime': type mismatch; expected String→String→None received incomplete type"}, }, { "bad strptime format", `strptime("2017-10-16 06:50:25", "2017-10-16 06:50:25") `, []string{ "bad strptime format:1:33-53: invalid time format string \"2017-10-16 06:50:25\"", "\tRefer to the documentation at https://golang.org/pkg/time/#pkg-constants for advice.", }, }, { "undefined const regex", "/foo / + X + / bar/ {}\n", []string{"undefined const regex:1:10: Identifier `X' not declared.", "\tTry adding `const X /.../' earlier in the program."}, }, { "unused symbols", `counter foo const ID /bar/ /asdf/ { } `, []string{ "unused symbols:1:9-11: Declaration of variable `foo' here is never used.", "unused symbols:2:7-8: Declaration of named pattern constant `ID' here is never used.", }, }, { "invalid del index count", `gauge t by x, y /.*/ { del t["x"] t["x"]["y"] } `, []string{"invalid del index count:3:7-11: Not enough keys for indexed expression: expecting 2, received 1"}, }, // TODO(jaq): is it an error to make a counter of type string? // {"counter as string", // `counter foo // /(?P.*)/ { // foo = $v // } // `, // []string{"counter as string:4:4-11: Can't assign rhs of type String to lhs of type Int"}}, { "def without usage", `def x{next}`, []string{"def without usage:1:1-10: Declaration of decorator `x' here is never used."}, }, { "def without next", `def x{} @x { }`, []string{"def without next:1:1-3: No symbols found in decorator `@x'.", "\tTry adding a `next' statement inside the `{}' block."}, }, { "def with two nexts", `def x{ /a/ { next } /b/ { next } } @x { }`, []string{"def with two nexts:6:5-8: Can't use `next' statement twice in a decorator."}, }, { "counter with buckets", `counter foo buckets 1, 2, 3 /(\d)/ { foo = $1 }`, []string{"counter with buckets:1:9-11: Can't specify buckets for non-histogram metric `foo'."}, }, { "next outside of decorator", `def x{ next } @x { next } `, []string{"next outside of decorator:5:1-4: Can't use `next' outside of a decorator."}, }, { "use decorator in decorator", `def x { @x {} }`, []string{"use decorator in decorator:2:1-2: Decorator `@x' is not completely defined yet.", "\tTry removing @x from here.", "use decorator in decorator:2:1-2: No symbols found in decorator `@x'.", "\tTry adding a `next' statement inside the `{}' block."}, }, { "delete incorrect object", `/(.*)/ { del $0 }`, []string{"delete incorrect object:2:5-6: Cannot delete this.", "\tTry deleting from a dimensioned metric with this as an index."}, }, { "pattern fragment plus anything", `gauge e // + e { } `, []string{"pattern fragment plus anything:2:6: Can't append variable `e' to this pattern.", "\tTry using a `const'-defined pattern fragment."}, }, { "recursive pattern fragment", `const P//+P`, []string{"recursive pattern fragment:1:11: Can't evaluate pattern fragment `P' here.", "\tTry defining it earlier in the program."}, }, { "delete a histogram", `histogram# m del# m`, []string{"delete a histogram:3:7: Cannot delete this.", "\tTry deleting an index from this dimensioned metric."}, }, { "int as bool", `1 {}`, []string{"int as bool:1:1: Can't interpret Int as a boolean expression here.", "\tTry using comparison operators to make the condition explicit."}, }, { "regexp too long", "/" + strings.Repeat("c", 1025) + "/ {}", []string{"regexp too long:1:1-1027: Exceeded maximum regular expression pattern length of 1024 bytes with 1025.", "\tExcessively long patterns are likely to cause compilation and runtime performance problems."}, }, { "strptime invalid args", `strptime("",8) `, []string{"strptime invalid args:1:13: Expecting a format string for argument 2 of strptime(), not Int."}, }, { "inc invalid args", `text l l++ `, []string{"inc invalid args:2:1: type mismatch: expecting an Int for INC, not String."}, }, { "mod by zero", `2=9%0 `, []string{"mod by zero:1:3-5: Can't divide by zero."}, }, { "assign to rvalue", `gauge l l++=l `, []string{"assign to rvalue:2:1-3: Can't assign to expression on left; expecting a variable here."}, }, { "tolower non string", `tolower(2) `, []string{"tolower non string:1:9: Expecting a String for argument 1 of tolower(), not Int."}, }, { "dec non var", `strptime("", "")-- `, []string{"dec non var:1:1-16: Can't assign to expression; expecting a variable here."}, }, // TODO(jaq): This is an instance of bug #190, the capref is ambiguous. // {"regexp with no zero capref", // `//||/;0/ {$0||// {}} // `, []string{"regexp with no zero capref:1:5-6: Nonexistent capref =."}}, { "cmp to None", `strptime("","")<5{} `, []string{"cmp to None:1:1-17: type mismatch: can't apply LT to LHS of type \"None\" with RHS of type \"Int\"."}, }, { "negate None", `~strptime("", "") {} `, []string{"negate None:1:2-17: type mismatch; expected Int received None for `~' operator."}, }, // {"match against gauge", // `gauge t // t = 6 =~ t // `, // []string{"match against gauge:2:5-10: Parameter to MATCH has a type mismatch; expected Pattern received Numeric."}, // }, } func TestCheckInvalidPrograms(t *testing.T) { for _, tc := range checkerInvalidPrograms { tc := tc t.Run(tc.name, func(t *testing.T) { ast, err := parser.Parse(tc.name, strings.NewReader(tc.program)) testutil.FatalIfErr(t, err) ast, err = checker.Check(ast, 0, 0) if err == nil { s := parser.Sexp{} s.EmitTypes = true t.Log(s.Dump(ast)) t.Fatal("check didn't fail") } if !testutil.ExpectNoDiff(t, tc.errors, // want strings.Split(err.Error(), "\n"), // got cmpopts.SortSlices(func(x, y string) bool { return x < y })) { t.Logf("Got: %s", err.Error()) s := parser.Sexp{} s.EmitTypes = true t.Log(s.Dump(ast)) } }) } } var checkerValidPrograms = []struct { name string program string }{ { "capture group", `counter foo /(.*)/ { foo += $1 } `, }, { "shadowed positionals", `counter foo /(.*)/ { foo += $1 /bar(\d+)/ { foo += $1 } } `, }, { "sibling positionals", `counter foo /(.*)/ { foo += $1 } /bar(\d+)/ { foo += $1 } `, }, { "index expression", `counter foo by a, b /(\d)/ { foo[1,$1] = 3 }`, }, { "odd indexes", `counter foo by a,b,c /(\d) (\d)/ { foo[$1,$2][0]++ } `, }, { "implicit int", `counter foo /$/ { foo++ }`, }, { "function return value", `len("foo") > 0 {}`, }, { "conversions", `counter i counter f /(.*)/ { i = int($1) f = float($1) } `, }, { "logical operators", `0 || 1 { } 1 && 0 { } `, }, { "nested binary conditional", `1 != 0 && 0 == 1 { } `, }, {"paren expr", ` (0) || (1 && 3) { }`}, {"strptime format", ` strptime("2006-01-02 15:04:05", "2006-01-02 15:04:05") `}, {"string concat", ` counter f by s /(.*), (.*)/ { f[$1 + $2]++ } `}, {"namespace", ` counter test /(?P.*)/ { test++ } `}, {"match expr 1", ` /(?P.*)/ { $foo =~ /bar/ { } }`}, {"capref used in def", ` /(?P\d+)/ && $x > 0 { }`}, {"binop compare type conversion", ` gauge var /(?P\d+) (\d+\.\d+)/ { var = $x + $2 }`}, {"binop arith type conversion", ` gauge var /(?P\d+) (\d+\.\d+)/ { var = $x + $2 }`}, {"concat expr 1", ` const X /foo/ /bar/ + X { }`}, {"concat expr 2", ` const X /foo/ X { }`}, {"match expression 3", ` const X /foo/ "a" =~ X { } `}, {"match expr 4", ` /(?P.{6}) (?P.*)/ { $foo =~ $bar { } }`}, {"decorator scopes", ` counter a def decorator { /(.).*/ { next } } @decorator { $1 == "A" { a++ } } `}, {"concat with add_assign", ` text foo /(?P.*)/ { foo += $v } `}, {"decrement", ` counter i /.*/ { i-- }`}, {"stop", ` stop // { stop }`}, {"declare histogram", ` histogram foo buckets 1, 2, 3 /(\d+)/ { foo = $1 }`}, {"match a pattern in cond", ` const N /n/ N { }`}, {"match a pattern in a binary expr in cond", ` const N /n/ N && 1 { }`}, {"negative numbers in capture groups", ` gauge foo /(?P-?\d+)/ { foo += $value_ms / 1000.0 }`}, {"substitution", ` gauge foo /(\d,\d)/ { foo = subst(",", "", $1) }`}, {"regexp subst", ` subst(/\d+/, "d", "1234") `}, } func TestCheckValidPrograms(t *testing.T) { for _, tc := range checkerValidPrograms { tc := tc t.Run(tc.name, func(t *testing.T) { ast, err := parser.Parse(tc.name, strings.NewReader(tc.program)) testutil.FatalIfErr(t, err) ast, err = checker.Check(ast, 0, 0) if *checkerTestDebug { s := parser.Sexp{} s.EmitTypes = true t.Log("Typed AST:\n" + s.Dump(ast)) } if err != nil { t.Errorf("check failed: %s", err) } }) } } var checkerTypeExpressionTests = []struct { name string expr ast.Node expected types.Type }{ { "Int + Int -> Int", &ast.BinaryExpr{ LHS: &ast.IntLit{I: 1}, RHS: &ast.IntLit{I: 1}, Op: parser.PLUS, }, types.Int, }, { "Int + Float -> Float", &ast.BinaryExpr{ LHS: &ast.IntLit{I: 1}, RHS: &ast.FloatLit{F: 1.0}, Op: parser.PLUS, }, types.Float, }, { "⍺ + Float -> Float", &ast.BinaryExpr{ LHS: &ast.IDTerm{Symbol: &symbol.Symbol{Name: "i", Kind: symbol.VarSymbol, Type: types.NewVariable()}}, RHS: &ast.CaprefTerm{Symbol: &symbol.Symbol{Kind: symbol.CaprefSymbol, Type: types.Float}}, Op: parser.PLUS, }, types.Float, }, } func TestCheckTypeExpressions(t *testing.T) { for _, tc := range checkerTypeExpressionTests { tc := tc t.Run(tc.name, func(t *testing.T) { ast, err := checker.Check(tc.expr, 0, 0) testutil.FatalIfErr(t, err) if !testutil.ExpectNoDiff(t, tc.expected, ast.Type().Root()) { s := parser.Sexp{} s.EmitTypes = true t.Log("Typed AST:\n" + s.Dump(ast)) } }) } } mtail-3.0.0~rc54+git0ff5/internal/runtime/compiler/codegen/000077500000000000000000000000001460063571700235025ustar00rootroot00000000000000mtail-3.0.0~rc54+git0ff5/internal/runtime/compiler/codegen/codegen.go000066400000000000000000000405431460063571700254430ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // This file is available under the Apache license. package codegen import ( "fmt" "math" "regexp" "time" "github.com/golang/glog" "github.com/google/mtail/internal/metrics" "github.com/google/mtail/internal/metrics/datum" "github.com/google/mtail/internal/runtime/code" "github.com/google/mtail/internal/runtime/compiler/ast" "github.com/google/mtail/internal/runtime/compiler/errors" "github.com/google/mtail/internal/runtime/compiler/parser" "github.com/google/mtail/internal/runtime/compiler/position" "github.com/google/mtail/internal/runtime/compiler/symbol" "github.com/google/mtail/internal/runtime/compiler/types" ) // codegen represents a code generator. type codegen struct { name string // Name of the program. errors errors.ErrorList // Any compile errors detected are accumulated here. obj code.Object // The object to return, if successful. l []int // Label table for recording jump destinations. decos []*ast.DecoStmt // Decorator stack to unwind when entering decorated blocks. } // CodeGen is the function that compiles the program to bytecode and data. func CodeGen(name string, n ast.Node) (*code.Object, error) { c := &codegen{name: name} _ = ast.Walk(c, n) c.writeJumps() if len(c.errors) > 0 { return nil, c.errors } return &c.obj, nil } func (c *codegen) errorf(pos *position.Position, format string, args ...interface{}) { e := "Internal compiler error, aborting compilation: " + fmt.Sprintf(format, args...) c.errors.Add(pos, e) } func (c *codegen) emit(n ast.Node, opcode code.Opcode, operand interface{}) { glog.V(2).Infof("emitting `%s %v' from line %d node %#v\n", opcode, operand, n.Pos().Line, n) c.obj.Program = append(c.obj.Program, code.Instr{opcode, operand, n.Pos().Line}) } // newLabel creates a new label to jump to. func (c *codegen) newLabel() (l int) { l = len(c.l) c.l = append(c.l, -1) return } // setLabel points a label to the next instruction. func (c *codegen) setLabel(l int) { c.l[l] = c.pc() + 1 } // pc returns the program offset of the last instruction. func (c *codegen) pc() int { return len(c.obj.Program) - 1 } func (c *codegen) VisitBefore(node ast.Node) (ast.Visitor, ast.Node) { switch n := node.(type) { case *ast.VarDecl: var name string if n.ExportedName != "" { name = n.ExportedName } else { name = n.Name } // If the Type is not in the map, then default to metrics.Int. This is // a hack for metrics that no type can be inferred, retaining // historical behaviour. t := n.Type() if types.IsDimension(t) { t = t.(*types.Operator).Args[len(t.(*types.Operator).Args)-1] } var dtyp metrics.Type switch { case types.Equals(types.Float, t): dtyp = metrics.Float case types.Equals(types.String, t): dtyp = metrics.String case types.Equals(types.Buckets, t): dtyp = metrics.Buckets default: if !types.IsComplete(t) { glog.Infof("Incomplete type %v for %#v", t, n) } dtyp = metrics.Int } m := metrics.NewMetric(name, c.name, n.Kind, dtyp, n.Keys...) m.SetSource(n.Pos().String()) // Scalar counters can be initialized to zero. Dimensioned counters we // don't know the values of the labels yet. Gauges and Timers we can't // assume start at zero. if len(n.Keys) == 0 && n.Kind == metrics.Counter { // Calling GetDatum here causes the storage to be allocated. d, err := m.GetDatum() if err != nil { c.errorf(n.Pos(), "%s", err) return nil, n } // Initialize to zero at the zero time. switch dtyp { case metrics.Int: datum.SetInt(d, 0, time.Unix(0, 0)) case metrics.Float: datum.SetFloat(d, 0, time.Unix(0, 0)) default: c.errorf(n.Pos(), "Can't initialize to zero a %#v", n) return nil, n } } if n.Kind == metrics.Histogram { if len(n.Buckets) < 2 { c.errorf(n.Pos(), "a histogram need at least two boundaries") return nil, n } if n.Buckets[0] > 0 { m.Buckets = append(m.Buckets, datum.Range{0, n.Buckets[0]}) } min := n.Buckets[0] for _, max := range n.Buckets[1:] { if max <= min { c.errorf(n.Pos(), "buckets boundaries must be sorted") return nil, n } m.Buckets = append(m.Buckets, datum.Range{min, max}) min = max } m.Buckets = append(m.Buckets, datum.Range{min, math.Inf(+1)}) if len(n.Keys) == 0 { // Calling GetDatum here causes the storage to be allocated. _, err := m.GetDatum() if err != nil { c.errorf(n.Pos(), "%s", err) return nil, n } } } m.Hidden = n.Hidden // int is int64 only on 64bit platforms. To be fair MaxInt is a // ridiculously excessive size for this anyway, you're going to use 2GiB // x sizeof(datum) in a single metric. if n.Limit > math.MaxInt { c.errorf(n.Pos(), "limit %d too large; max %d", n.Limit, math.MaxInt) return nil, n } m.Limit = int(n.Limit) n.Symbol.Binding = m n.Symbol.Addr = len(c.obj.Metrics) c.obj.Metrics = append(c.obj.Metrics, m) return nil, n case *ast.CondStmt: lElse := c.newLabel() lEnd := c.newLabel() if n.Cond != nil { n.Cond = ast.Walk(c, n.Cond) c.emit(n, code.Jnm, lElse) } // Set matched flag false for children. c.emit(n, code.Setmatched, false) n.Truth = ast.Walk(c, n.Truth) // Re-set matched flag to true for rest of current block. c.emit(n, code.Setmatched, true) if n.Else != nil { c.emit(n, code.Jmp, lEnd) } c.setLabel(lElse) if n.Else != nil { n.Else = ast.Walk(c, n.Else) } c.setLabel(lEnd) return nil, n case *ast.PatternExpr: re, err := regexp.Compile(n.Pattern) if err != nil { c.errorf(n.Pos(), "%s", err) return nil, n } c.obj.Regexps = append(c.obj.Regexps, re) // Store the location of this regular expression in the PatternExpr n.Index = len(c.obj.Regexps) - 1 return nil, n case *ast.PatternFragment: // Skip, const pattern fragments are concatenated into PatternExpr storage, not executable. return nil, n case *ast.StringLit: c.obj.Strings = append(c.obj.Strings, n.Text) c.emit(n, code.Str, len(c.obj.Strings)-1) case *ast.IntLit: c.emit(n, code.Push, n.I) case *ast.FloatLit: c.emit(n, code.Push, n.F) case *ast.StopStmt: c.emit(n, code.Stop, nil) case *ast.IDTerm: if n.Symbol == nil || n.Symbol.Kind != symbol.VarSymbol { break } if n.Symbol.Binding == nil { c.errorf(n.Pos(), "No metric bound to identifier %q", n.Name) return nil, n } c.emit(n, code.Mload, n.Symbol.Addr) m := n.Symbol.Binding.(*metrics.Metric) c.emit(n, code.Dload, len(m.Keys)) if !n.Lvalue { t := n.Type() if types.IsDimension(t) { l := len(t.(*types.Operator).Args) t = t.(*types.Operator).Args[l-1] } switch { case types.Equals(t, types.Float): c.emit(n, code.Fget, nil) case types.Equals(t, types.Int): c.emit(n, code.Iget, nil) case types.Equals(t, types.String): c.emit(n, code.Sget, nil) default: c.errorf(n.Pos(), "invalid type for get %q in %#v", n.Type(), n) return nil, n } } case *ast.CaprefTerm: if n.Symbol == nil || n.Symbol.Binding == nil { c.errorf(n.Pos(), "No regular expression bound to capref %q", n.Name) return nil, n } rn := n.Symbol.Binding.(*ast.PatternExpr) // rn.index contains the index of the compiled regular expression object // in the re slice of the object code c.emit(n, code.Push, rn.Index) // n.Symbol.Addr is the capture group offset c.emit(n, code.Capref, n.Symbol.Addr) if types.Equals(n.Type(), types.Float) { c.emit(n, code.S2f, nil) } else if types.Equals(n.Type(), types.Int) { c.emit(n, code.S2i, nil) } case *ast.IndexedExpr: if args, ok := n.Index.(*ast.ExprList); ok { for _, arg := range args.Children { _ = ast.Walk(c, arg) if types.Equals(arg.Type(), types.Float) { c.emit(n, code.F2s, nil) } else if types.Equals(arg.Type(), types.Int) { c.emit(n, code.I2s, nil) } } } ast.Walk(c, n.LHS) return nil, n case *ast.DecoDecl: // Do nothing, defs are inlined. return nil, n case *ast.DecoStmt: // Put the current block on the stack decoLen := len(c.decos) c.decos = append(c.decos, n) if n.Decl == nil { c.errorf(n.Pos(), "No definition found for decorator %q", n.Name) return nil, n } // then iterate over the decorator's nodes ast.Walk(c, n.Decl.Block) if len(c.decos) > decoLen { glog.V(1).Info("Too many blocks on stack, was there no `next' in the last one?") } return nil, n case *ast.NextStmt: // Visit the 'next' block on the decorated block stack top := len(c.decos) - 1 deco := c.decos[top] c.decos = c.decos[:top] ast.Walk(c, deco.Block) return nil, n case *ast.OtherwiseStmt: c.emit(n, code.Otherwise, nil) case *ast.DelStmt: if n.Expiry > 0 { c.emit(n, code.Push, n.Expiry) } ast.Walk(c, n.N) // overwrite the dload instruction pc := c.pc() c.obj.Program[pc].Opcode = code.Del if n.Expiry > 0 { c.obj.Program[pc].Opcode = code.Expire } case *ast.BinaryExpr: switch n.Op { case parser.AND: lFalse := c.newLabel() lEnd := c.newLabel() ast.Walk(c, n.LHS) c.emit(n, code.Jnm, lFalse) ast.Walk(c, n.RHS) c.emit(n, code.Jnm, lFalse) c.emit(n, code.Push, true) c.emit(n, code.Jmp, lEnd) c.setLabel(lFalse) c.emit(n, code.Push, false) c.setLabel(lEnd) return nil, n case parser.OR: lTrue := c.newLabel() lEnd := c.newLabel() ast.Walk(c, n.LHS) c.emit(n, code.Jm, lTrue) ast.Walk(c, n.RHS) c.emit(n, code.Jm, lTrue) c.emit(n, code.Push, false) c.emit(n, code.Jmp, lEnd) c.setLabel(lTrue) c.emit(n, code.Push, true) c.setLabel(lEnd) return nil, n case parser.ADD_ASSIGN: if !types.Equals(n.Type(), types.Int) { // Double-emit the lhs so that it can be assigned to ast.Walk(c, n.LHS) } default: // Didn't handle it, let normal walk proceed return c, n } } return c, node } var typedOperators = map[int]map[types.Type]code.Opcode{ parser.PLUS: { types.Int: code.Iadd, types.Float: code.Fadd, types.String: code.Cat, types.Pattern: code.Cat, }, parser.MINUS: { types.Int: code.Isub, types.Float: code.Fsub, }, parser.MUL: { types.Int: code.Imul, types.Float: code.Fmul, }, parser.DIV: { types.Int: code.Idiv, types.Float: code.Fdiv, }, parser.MOD: { types.Int: code.Imod, types.Float: code.Fmod, }, parser.POW: { types.Int: code.Ipow, types.Float: code.Fpow, }, parser.ASSIGN: { types.Int: code.Iset, types.Float: code.Fset, types.String: code.Sset, }, } func getOpcodeForType(op int, opT types.Type) (code.Opcode, error) { opmap, ok := typedOperators[op] if !ok { return -1, errors.Errorf("no typed operator for type %v", op) } for t, opcode := range opmap { if types.Equals(t, opT) { return opcode, nil } } return -1, errors.Errorf("no opcode for type %s in op %v", opT, op) } var builtin = map[string]code.Opcode{ "getfilename": code.Getfilename, "len": code.Length, "settime": code.Settime, "strptime": code.Strptime, "strtol": code.S2i, "subst": code.Subst, "timestamp": code.Timestamp, "tolower": code.Tolower, } func (c *codegen) VisitAfter(node ast.Node) ast.Node { switch n := node.(type) { case *ast.BuiltinExpr: arglen := 0 if n.Args != nil { arglen = len(n.Args.(*ast.ExprList).Children) } switch n.Name { case "bool": // TODO(jaq): Nothing, no support in VM yet. case "int", "float", "string": // len args should be 1 if arglen > 1 { c.errorf(n.Pos(), "too many arguments to builtin %q: %#v", n.Name, n) return n } if err := c.emitConversion(n, n.Args.(*ast.ExprList).Children[0].Type(), n.Type()); err != nil { c.errorf(n.Pos(), "%s on node %v", err.Error(), n) return n } case "subst": if types.Equals(n.Args.(*ast.ExprList).Children[0].Type(), types.Pattern) { index := n.Args.(*ast.ExprList).Children[0].(*ast.PatternExpr).Index c.emit(n, code.Push, index) c.emit(n, code.Rsubst, arglen) } else { c.emit(n, code.Subst, arglen) } default: c.emit(n, builtin[n.Name], arglen) } case *ast.UnaryExpr: switch n.Op { case parser.INC: c.emit(n, code.Inc, nil) case parser.DEC: c.emit(n, code.Dec, nil) case parser.NOT: c.emit(n, code.Neg, nil) case parser.MATCH: index := n.Expr.(*ast.PatternExpr).Index c.emit(n, code.Match, index) } case *ast.BinaryExpr: switch n.Op { case parser.LT, parser.GT, parser.LE, parser.GE, parser.EQ, parser.NE: lFail := c.newLabel() lEnd := c.newLabel() var cmpArg int var jumpOp code.Opcode switch n.Op { case parser.LT: cmpArg = -1 jumpOp = code.Jnm case parser.GT: cmpArg = 1 jumpOp = code.Jnm case parser.LE: cmpArg = 1 jumpOp = code.Jm case parser.GE: cmpArg = -1 jumpOp = code.Jm case parser.EQ: cmpArg = 0 jumpOp = code.Jnm case parser.NE: cmpArg = 0 jumpOp = code.Jm } cmpOp := code.Cmp if types.Equals(n.LHS.Type(), n.RHS.Type()) { switch n.LHS.Type() { case types.Float: cmpOp = code.Fcmp case types.Int: cmpOp = code.Icmp case types.String: cmpOp = code.Scmp default: cmpOp = code.Cmp } } c.emit(n, cmpOp, cmpArg) c.emit(n, jumpOp, lFail) c.emit(n, code.Push, true) c.emit(n, code.Jmp, lEnd) c.setLabel(lFail) c.emit(n, code.Push, false) c.setLabel(lEnd) case parser.ADD_ASSIGN: // When operand is not nil, inc pops the delta from the stack. switch { case types.Equals(n.Type(), types.Int): c.emit(n, code.Inc, 0) case types.Equals(n.Type(), types.Float), types.Equals(n.Type(), types.String): // Already walked the lhs and rhs of this expression opcode, err := getOpcodeForType(parser.PLUS, n.Type()) if err != nil { c.errorf(n.Pos(), "%s", err) return n } c.emit(n, opcode, nil) // And a second lhs opcode, err = getOpcodeForType(parser.ASSIGN, n.Type()) if err != nil { c.errorf(n.Pos(), "%s", err) return n } c.emit(n, opcode, nil) default: c.errorf(n.Pos(), "invalid type for add-assignment: %v", n.Type()) return n } case parser.PLUS, parser.MINUS, parser.MUL, parser.DIV, parser.MOD, parser.POW, parser.ASSIGN: opcode, err := getOpcodeForType(n.Op, n.Type()) if err != nil { c.errorf(n.Pos(), "%s", err) return n } c.emit(n, opcode, nil) case parser.BITAND: c.emit(n, code.And, nil) case parser.BITOR: c.emit(n, code.Or, nil) case parser.XOR: c.emit(n, code.Xor, nil) case parser.SHL: c.emit(n, code.Shl, nil) case parser.SHR: c.emit(n, code.Shr, nil) case parser.MATCH, parser.NOT_MATCH: switch v := n.RHS.(type) { case *ast.PatternExpr: index := v.Index c.emit(n, code.Smatch, index) default: c.errorf(n.Pos(), "unexpected rhs expression for match %#v", n.RHS) return n } if n.Op == parser.NOT_MATCH { c.emit(n, code.Not, nil) } default: c.errorf(n.Pos(), "unexpected op %v", n.Op) } case *ast.ConvExpr: if err := c.emitConversion(n, n.N.Type(), n.Type()); err != nil { c.errorf(n.Pos(), "internal error: %s on node %v", err.Error(), n) return n } } return node } func (c *codegen) emitConversion(n ast.Node, inType, outType types.Type) error { glog.V(2).Infof("Conversion: %q to %q", inType, outType) switch { case types.Equals(types.Int, inType) && types.Equals(types.Float, outType): c.emit(n, code.I2f, nil) case types.Equals(types.String, inType) && types.Equals(types.Float, outType): c.emit(n, code.S2f, nil) case types.Equals(types.String, inType) && types.Equals(types.Int, outType): c.emit(n, code.S2i, nil) case types.Equals(types.Float, inType) && types.Equals(types.String, outType): c.emit(n, code.F2s, nil) case types.Equals(types.Int, inType) && types.Equals(types.String, outType): c.emit(n, code.I2s, nil) case types.Equals(types.Pattern, inType) && types.Equals(types.Bool, outType): // nothing, pattern is implicit bool case types.Equals(inType, outType): // Nothing; no-op. default: return errors.Errorf("can't convert %q to %q", inType, outType) } return nil } func (c *codegen) writeJumps() { for j, i := range c.obj.Program { switch i.Opcode { case code.Jmp, code.Jm, code.Jnm: index := i.Operand.(int) if index > len(c.l) { c.errorf(nil, "no jump at label %v, table is %v", i.Operand, c.l) continue } offset := c.l[index] if offset < 0 { c.errorf(nil, "offset for label %v is negative, table is %v", i.Operand, c.l) continue } c.obj.Program[j].Operand = c.l[index] } } } mtail-3.0.0~rc54+git0ff5/internal/runtime/compiler/codegen/codegen_test.go000066400000000000000000000522071460063571700265020ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // This file is available under the Apache license. package codegen_test import ( "flag" "strings" "testing" "time" "github.com/google/mtail/internal/runtime/code" "github.com/google/mtail/internal/runtime/compiler/ast" "github.com/google/mtail/internal/runtime/compiler/checker" "github.com/google/mtail/internal/runtime/compiler/codegen" "github.com/google/mtail/internal/runtime/compiler/parser" "github.com/google/mtail/internal/testutil" ) var codegenTestDebug = flag.Bool("codegen_test_debug", false, "Log ASTs and debugging information ") var testCodeGenPrograms = []struct { name string source string prog []code.Instr // expected bytecode }{ // Composite literals require too many explicit conversions. { "simple line counter", "counter lines_total\n/$/ { lines_total++\n }\n", []code.Instr{ {code.Match, 0, 1}, {code.Jnm, 7, 1}, {code.Setmatched, false, 1}, {code.Mload, 0, 1}, {code.Dload, 0, 1}, {code.Inc, nil, 1}, {code.Setmatched, true, 1}, }, }, { "count a", "counter a_count\n/a$/ { a_count++\n }\n", []code.Instr{ {code.Match, 0, 1}, {code.Jnm, 7, 1}, {code.Setmatched, false, 1}, {code.Mload, 0, 1}, {code.Dload, 0, 1}, {code.Inc, nil, 1}, {code.Setmatched, true, 1}, }, }, { "strptime and capref", "counter foo\n" + "/(.*)/ { strptime($1, \"2006-01-02T15:04:05\")\n" + "foo++\n}\n", []code.Instr{ {code.Match, 0, 1}, {code.Jnm, 11, 1}, {code.Setmatched, false, 1}, {code.Push, 0, 1}, {code.Capref, 1, 1}, {code.Str, 0, 1}, {code.Strptime, 2, 1}, {code.Mload, 0, 2}, {code.Dload, 0, 2}, {code.Inc, nil, 2}, {code.Setmatched, true, 1}, }, }, { "strptime and named capref", "counter foo\n" + "/(?P.*)/ { strptime($date, \"2006-01-02T15:04:05\")\n" + "foo++\n }\n", []code.Instr{ {code.Match, 0, 1}, {code.Jnm, 11, 1}, {code.Setmatched, false, 1}, {code.Push, 0, 1}, {code.Capref, 1, 1}, {code.Str, 0, 1}, {code.Strptime, 2, 1}, {code.Mload, 0, 2}, {code.Dload, 0, 2}, {code.Inc, nil, 2}, {code.Setmatched, true, 1}, }, }, { "inc by and set", "counter foo\ncounter bar\n" + "/([0-9]+)/ {\n" + "foo += $1\n" + "bar = $1\n" + "}\n", []code.Instr{ {code.Match, 0, 2}, {code.Jnm, 16, 2}, {code.Setmatched, false, 2}, {code.Mload, 0, 3}, {code.Dload, 0, 3}, {code.Push, 0, 3}, {code.Capref, 1, 3}, {code.S2i, nil, 3}, {code.Inc, 0, 3}, {code.Mload, 1, 4}, {code.Dload, 0, 4}, {code.Push, 0, 4}, {code.Capref, 1, 4}, {code.S2i, nil, 4}, {code.Iset, nil, 4}, {code.Setmatched, true, 2}, }, }, { "cond expr gt", "counter foo\n" + "1 > 0 {\n" + " foo++\n" + "}\n", []code.Instr{ {code.Push, int64(1), 1}, {code.Push, int64(0), 1}, {code.Icmp, 1, 1}, {code.Jnm, 6, 1}, {code.Push, true, 1}, {code.Jmp, 7, 1}, {code.Push, false, 1}, {code.Jnm, 13, 1}, {code.Setmatched, false, 1}, {code.Mload, 0, 2}, {code.Dload, 0, 2}, {code.Inc, nil, 2}, {code.Setmatched, true, 1}, }, }, { "cond expr lt", "counter foo\n" + "1 < 0 {\n" + " foo++\n" + "}\n", []code.Instr{ {code.Push, int64(1), 1}, {code.Push, int64(0), 1}, {code.Icmp, -1, 1}, {code.Jnm, 6, 1}, {code.Push, true, 1}, {code.Jmp, 7, 1}, {code.Push, false, 1}, {code.Jnm, 13, 1}, {code.Setmatched, false, 1}, {code.Mload, 0, 2}, {code.Dload, 0, 2}, {code.Inc, nil, 2}, {code.Setmatched, true, 1}, }, }, { "cond expr eq", "counter foo\n" + "1 == 0 {\n" + " foo++\n" + "}\n", []code.Instr{ {code.Push, int64(1), 1}, {code.Push, int64(0), 1}, {code.Icmp, 0, 1}, {code.Jnm, 6, 1}, {code.Push, true, 1}, {code.Jmp, 7, 1}, {code.Push, false, 1}, {code.Jnm, 13, 1}, {code.Setmatched, false, 1}, {code.Mload, 0, 2}, {code.Dload, 0, 2}, {code.Inc, nil, 2}, {code.Setmatched, true, 1}, }, }, { "cond expr le", "counter foo\n" + "1 <= 0 {\n" + " foo++\n" + "}\n", []code.Instr{ {code.Push, int64(1), 1}, {code.Push, int64(0), 1}, {code.Icmp, 1, 1}, {code.Jm, 6, 1}, {code.Push, true, 1}, {code.Jmp, 7, 1}, {code.Push, false, 1}, {code.Jnm, 13, 1}, {code.Setmatched, false, 1}, {code.Mload, 0, 2}, {code.Dload, 0, 2}, {code.Inc, nil, 2}, {code.Setmatched, true, 1}, }, }, { "cond expr ge", "counter foo\n" + "1 >= 0 {\n" + " foo++\n" + "}\n", []code.Instr{ {code.Push, int64(1), 1}, {code.Push, int64(0), 1}, {code.Icmp, -1, 1}, {code.Jm, 6, 1}, {code.Push, true, 1}, {code.Jmp, 7, 1}, {code.Push, false, 1}, {code.Jnm, 13, 1}, {code.Setmatched, false, 1}, {code.Mload, 0, 2}, {code.Dload, 0, 2}, {code.Inc, nil, 2}, {code.Setmatched, true, 1}, }, }, { "cond expr ne", "counter foo\n" + "1 != 0 {\n" + " foo++\n" + "}\n", []code.Instr{ {code.Push, int64(1), 1}, {code.Push, int64(0), 1}, {code.Icmp, 0, 1}, {code.Jm, 6, 1}, {code.Push, true, 1}, {code.Jmp, 7, 1}, {code.Push, false, 1}, {code.Jnm, 13, 1}, {code.Setmatched, false, 1}, {code.Mload, 0, 2}, {code.Dload, 0, 2}, {code.Inc, nil, 2}, {code.Setmatched, true, 1}, }, }, { "nested cond", "counter foo\n" + "/(\\d+)/ {\n" + " $1 <= 1 {\n" + " foo++\n" + " }\n" + "}\n", []code.Instr{ {code.Match, 0, 1}, {code.Jnm, 19, 1}, {code.Setmatched, false, 1}, {code.Push, 0, 2}, {code.Capref, 1, 2}, {code.S2i, nil, 2}, {code.Push, int64(1), 2}, {code.Icmp, 1, 2}, {code.Jm, 11, 2}, {code.Push, true, 2}, {code.Jmp, 12, 2}, {code.Push, false, 2}, {code.Jnm, 18, 2}, {code.Setmatched, false, 2}, {code.Mload, 0, 3}, {code.Dload, 0, 3}, {code.Inc, nil, 3}, {code.Setmatched, true, 2}, {code.Setmatched, true, 1}, }, }, { "deco", "counter foo\n" + "counter bar\n" + "def fooWrap {\n" + " /.*/ {\n" + " foo++\n" + " next\n" + " }\n" + "}\n" + "" + "@fooWrap { bar++\n }\n", []code.Instr{ {code.Match, 0, 3}, {code.Jnm, 10, 3}, {code.Setmatched, false, 3}, {code.Mload, 0, 4}, {code.Dload, 0, 4}, {code.Inc, nil, 4}, {code.Mload, 1, 8}, {code.Dload, 0, 8}, {code.Inc, nil, 8}, {code.Setmatched, true, 3}, }, }, { "length", "len(\"foo\") > 0 {\n" + "}\n", []code.Instr{ {code.Str, 0, 0}, {code.Length, 1, 0}, {code.Push, int64(0), 0}, {code.Cmp, 1, 0}, {code.Jnm, 7, 0}, {code.Push, true, 0}, {code.Jmp, 8, 0}, {code.Push, false, 0}, {code.Jnm, 11, 0}, {code.Setmatched, false, 0}, {code.Setmatched, true, 0}, }, }, { "bitwise", ` gauge a a = 1 & 7 ^ 15 | 8 a = ~ 16 << 2 a = 1 >> 20 `, []code.Instr{ {code.Mload, 0, 3}, {code.Dload, 0, 3}, {code.Push, int64(1), 3}, {code.Push, int64(7), 3}, {code.And, nil, 3}, {code.Push, int64(15), 3}, {code.Xor, nil, 3}, {code.Push, int64(8), 3}, {code.Or, nil, 3}, {code.Iset, nil, 3}, {code.Mload, 0, 4}, {code.Dload, 0, 4}, {code.Push, int64(16), 4}, {code.Neg, nil, 4}, {code.Push, int64(2), 4}, {code.Shl, nil, 4}, {code.Iset, nil, 4}, {code.Mload, 0, 5}, {code.Dload, 0, 5}, {code.Push, int64(1), 5}, {code.Push, int64(20), 5}, {code.Shr, nil, 5}, {code.Iset, nil, 5}, }, }, { "pow", ` gauge a /(\d+) (\d+)/ { a = $1 ** $2 } `, []code.Instr{ {code.Match, 0, 2}, {code.Jnm, 14, 2}, {code.Setmatched, false, 2}, {code.Mload, 0, 3}, {code.Dload, 0, 3}, {code.Push, 0, 3}, {code.Capref, 1, 3}, {code.S2i, nil, 3}, {code.Push, 0, 3}, {code.Capref, 2, 3}, {code.S2i, nil, 3}, {code.Ipow, nil, 3}, {code.Iset, nil, 3}, {code.Setmatched, true, 2}, }, }, { "indexed expr", ` counter a by b a["string"]++ `, []code.Instr{ {code.Str, 0, 2}, {code.Mload, 0, 2}, {code.Dload, 1, 2}, {code.Inc, nil, 2}, }, }, { "strtol", ` strtol("deadbeef", 16) `, []code.Instr{ {code.Str, 0, 1}, {code.Push, int64(16), 1}, {code.S2i, 2, 1}, }, }, { "float", ` 20.0 `, []code.Instr{ {code.Push, 20.0, 1}, }, }, { "otherwise", ` counter a otherwise { a++ } `, []code.Instr{ {code.Otherwise, nil, 2}, {code.Jnm, 7, 2}, {code.Setmatched, false, 2}, {code.Mload, 0, 3}, {code.Dload, 0, 3}, {code.Inc, nil, 3}, {code.Setmatched, true, 2}, }, }, { "cond else", `counter foo counter bar 1 > 0 { foo++ } else { bar++ }`, []code.Instr{ {code.Push, int64(1), 2}, {code.Push, int64(0), 2}, {code.Icmp, 1, 2}, {code.Jnm, 6, 2}, {code.Push, true, 2}, {code.Jmp, 7, 2}, {code.Push, false, 2}, {code.Jnm, 14, 2}, {code.Setmatched, false, 2}, {code.Mload, 0, 3}, {code.Dload, 0, 3}, {code.Inc, nil, 3}, {code.Setmatched, true, 2}, {code.Jmp, 17, 2}, {code.Mload, 1, 5}, {code.Dload, 0, 5}, {code.Inc, nil, 5}, }, }, { "mod", ` gauge a a = 3 % 1 `, []code.Instr{ {code.Mload, 0, 2}, {code.Dload, 0, 2}, {code.Push, int64(3), 2}, {code.Push, int64(1), 2}, {code.Imod, nil, 2}, {code.Iset, nil, 2}, }, }, { "del", ` counter a by b del a["string"] `, []code.Instr{ {code.Str, 0, 2}, {code.Mload, 0, 2}, {code.Del, 1, 2}, }, }, { "del after", ` counter a by b del a["string"] after 1h `, []code.Instr{ {code.Push, time.Hour, 2}, {code.Str, 0, 2}, {code.Mload, 0, 2}, {code.Expire, 1, 2}, }, }, { "types", ` gauge i gauge f /(\d+)/ { i = $1 } /(\d+\.\d+)/ { f = $1 } `, []code.Instr{ {code.Match, 0, 3}, {code.Jnm, 10, 3}, {code.Setmatched, false, 3}, {code.Mload, 0, 4}, {code.Dload, 0, 4}, {code.Push, 0, 4}, {code.Capref, 1, 4}, {code.S2i, nil, 4}, {code.Iset, nil, 4}, {code.Setmatched, true, 3}, {code.Match, 1, 6}, {code.Jnm, 20, 6}, {code.Setmatched, false, 6}, {code.Mload, 1, 7}, {code.Dload, 0, 7}, {code.Push, 1, 7}, {code.Capref, 1, 7}, {code.S2f, nil, 7}, {code.Fset, nil, 7}, {code.Setmatched, true, 6}, }, }, { "getfilename", ` getfilename() `, []code.Instr{ {code.Getfilename, 0, 1}, }, }, { "dimensioned counter", `counter c by a,b,c /(\d) (\d) (\d)/ { c[$1,$2][$3]++ } `, []code.Instr{ {code.Match, 0, 1}, {code.Jnm, 13, 1}, {code.Setmatched, false, 1}, {code.Push, 0, 2}, {code.Capref, 1, 2}, {code.Push, 0, 2}, {code.Capref, 2, 2}, {code.Push, 0, 2}, {code.Capref, 3, 2}, {code.Mload, 0, 2}, {code.Dload, 3, 2}, {code.Inc, nil, 2}, {code.Setmatched, true, 1}, }, }, { "string to int", `counter c /(.*)/ { c = int($1) } `, []code.Instr{ {code.Match, 0, 1}, {code.Jnm, 10, 1}, {code.Setmatched, false, 1}, {code.Mload, 0, 2}, {code.Dload, 0, 2}, {code.Push, 0, 2}, {code.Capref, 1, 2}, {code.S2i, nil, 2}, {code.Iset, nil, 2}, {code.Setmatched, true, 1}, }, }, { "int to float", `counter c /(\d)/ { c = float($1) } `, []code.Instr{ {code.Match, 0, 1}, {code.Jnm, 10, 1}, {code.Setmatched, false, 1}, {code.Mload, 0, 2}, {code.Dload, 0, 2}, {code.Push, 0, 2}, {code.Capref, 1, 2}, {code.S2f, nil, 2}, {code.Fset, nil, 2}, {code.Setmatched, true, 1}, }, }, { "string to float", `counter c /(.*)/ { c = float($1) } `, []code.Instr{ {code.Match, 0, 1}, {code.Jnm, 10, 1}, {code.Setmatched, false, 1}, {code.Mload, 0, 2}, {code.Dload, 0, 2}, {code.Push, 0, 2}, {code.Capref, 1, 2}, {code.S2f, nil, 2}, {code.Fset, nil, 2}, {code.Setmatched, true, 1}, }, }, { "float to string", `counter c by a /(\d+\.\d+)/ { c[string($1)] ++ } `, []code.Instr{ {code.Match, 0, 1}, {code.Jnm, 11, 1}, {code.Setmatched, false, 1}, {code.Push, 0, 2}, {code.Capref, 1, 2}, {code.S2f, nil, 2}, {code.F2s, nil, 2}, {code.Mload, 0, 2}, {code.Dload, 1, 2}, {code.Inc, nil, 2}, {code.Setmatched, true, 1}, }, }, { "int to string", `counter c by a /(\d+)/ { c[string($1)] ++ } `, []code.Instr{ {code.Match, 0, 1}, {code.Jnm, 11, 1}, {code.Setmatched, false, 1}, {code.Push, 0, 2}, {code.Capref, 1, 2}, {code.S2i, nil, 2}, {code.I2s, nil, 2}, {code.Mload, 0, 2}, {code.Dload, 1, 2}, {code.Inc, nil, 2}, {code.Setmatched, true, 1}, }, }, { "nested comparisons", `counter foo /(.*)/ { $1 == "foo" || $1 == "bar" { foo++ } } `, []code.Instr{ {code.Match, 0, 1}, {code.Jnm, 31, 1}, {code.Setmatched, false, 1}, {code.Push, 0, 2}, {code.Capref, 1, 2}, {code.Str, 0, 2}, {code.Scmp, 0, 2}, {code.Jnm, 10, 2}, {code.Push, true, 2}, {code.Jmp, 11, 2}, {code.Push, false, 2}, {code.Jm, 23, 2}, {code.Push, 0, 2}, {code.Capref, 1, 2}, {code.Str, 1, 2}, {code.Scmp, 0, 2}, {code.Jnm, 19, 2}, {code.Push, true, 2}, {code.Jmp, 20, 2}, {code.Push, false, 2}, {code.Jm, 23, 2}, {code.Push, false, 2}, {code.Jmp, 24, 2}, {code.Push, true, 2}, {code.Jnm, 30, 2}, {code.Setmatched, false, 2}, {code.Mload, 0, 3}, {code.Dload, 0, 3}, {code.Inc, nil, 3}, {code.Setmatched, true, 2}, {code.Setmatched, true, 1}, }, }, { "string concat", ` counter f by s /(.*), (.*)/ { f[$1 + $2]++ } `, []code.Instr{ {code.Match, 0, 2}, {code.Jnm, 12, 2}, {code.Setmatched, false, 2}, {code.Push, 0, 3}, {code.Capref, 1, 3}, {code.Push, 0, 3}, {code.Capref, 2, 3}, {code.Cat, nil, 3}, {code.Mload, 0, 3}, {code.Dload, 1, 3}, {code.Inc, nil, 3}, {code.Setmatched, true, 2}, }, }, { "add assign float", ` gauge foo /(\d+\.\d+)/ { foo += $1 } `, []code.Instr{ {code.Match, 0, 2}, {code.Jnm, 13, 2}, {code.Setmatched, false, 2}, {code.Mload, 0, 3}, {code.Dload, 0, 3}, {code.Mload, 0, 3}, {code.Dload, 0, 3}, {code.Push, 0, 3}, {code.Capref, 1, 3}, {code.S2f, nil, 3}, {code.Fadd, nil, 3}, {code.Fset, nil, 3}, {code.Setmatched, true, 2}, }, }, { "match expression", ` counter foo /(.*)/ { $1 =~ /asdf/ { foo++ } }`, []code.Instr{ {code.Match, 0, 2}, {code.Jnm, 13, 2}, {code.Setmatched, false, 2}, {code.Push, 0, 3}, {code.Capref, 1, 3}, {code.Smatch, 1, 3}, {code.Jnm, 12, 3}, {code.Setmatched, false, 3}, {code.Mload, 0, 4}, {code.Dload, 0, 4}, {code.Inc, nil, 4}, {code.Setmatched, true, 3}, {code.Setmatched, true, 2}, }, }, { "negative match expression", ` counter foo /(.*)/ { $1 !~ /asdf/ { foo++ } }`, []code.Instr{ {code.Match, 0, 2}, {code.Jnm, 14, 2}, {code.Setmatched, false, 2}, {code.Push, 0, 3}, {code.Capref, 1, 3}, {code.Smatch, 1, 3}, {code.Not, nil, 3}, {code.Jnm, 13, 3}, {code.Setmatched, false, 3}, {code.Mload, 0, 4}, {code.Dload, 0, 4}, {code.Inc, nil, 4}, {code.Setmatched, true, 3}, {code.Setmatched, true, 2}, }, }, { "capref used in def", ` /(?P\d+)/ && $x > 5 { }`, []code.Instr{ {code.Match, 0, 1}, {code.Jnm, 14, 1}, {code.Push, 0, 1}, {code.Capref, 1, 1}, {code.S2i, nil, 1}, {code.Push, int64(5), 1}, {code.Icmp, 1, 1}, {code.Jnm, 10, 1}, {code.Push, true, 1}, {code.Jmp, 11, 1}, {code.Push, false, 1}, {code.Jnm, 14, 1}, {code.Push, true, 1}, {code.Jmp, 15, 1}, {code.Push, false, 1}, {code.Jnm, 18, 1}, {code.Setmatched, false, 1}, {code.Setmatched, true, 1}, }, }, { "binop arith type conversion", ` gauge var /(?P\d+) (\d+\.\d+)/ { var = $x + $2 }`, []code.Instr{ {code.Match, 0, 2}, {code.Jnm, 15, 2}, {code.Setmatched, false, 2}, {code.Mload, 0, 3}, {code.Dload, 0, 3}, {code.Push, 0, 3}, {code.Capref, 1, 3}, {code.S2i, nil, 3}, {code.I2f, nil, 3}, {code.Push, 0, 3}, {code.Capref, 2, 3}, {code.S2f, nil, 3}, {code.Fadd, nil, 3}, {code.Fset, nil, 3}, {code.Setmatched, true, 2}, }, }, { "binop compare type conversion", ` counter var /(?P\d+) (\d+\.\d+)/ { $x > $2 { var++ } }`, []code.Instr{ {code.Match, 0, 2}, {code.Jnm, 22, 2}, {code.Setmatched, false, 2}, {code.Push, 0, 3}, {code.Capref, 1, 3}, {code.S2i, nil, 3}, {code.I2f, nil, 3}, {code.Push, 0, 3}, {code.Capref, 2, 3}, {code.S2f, nil, 3}, {code.Fcmp, 1, 3}, {code.Jnm, 14, 3}, {code.Push, true, 3}, {code.Jmp, 15, 3}, {code.Push, false, 3}, {code.Jnm, 21, 3}, {code.Setmatched, false, 3}, {code.Mload, 0, 4}, {code.Dload, 0, 4}, {code.Inc, nil, 4}, {code.Setmatched, true, 3}, {code.Setmatched, true, 2}, }, }, {"set string", ` text foo /(.*)/ { foo = $1 } `, []code.Instr{ {code.Match, 0, 2}, {code.Jnm, 9, 2}, {code.Setmatched, false, 2}, {code.Mload, 0, 3}, {code.Dload, 0, 3}, {code.Push, 0, 3}, {code.Capref, 1, 3}, {code.Sset, nil, 3}, {code.Setmatched, true, 2}, }}, { "concat to text", ` text foo /(?P.*)/ { foo += $v }`, []code.Instr{ {code.Match, 0, 2}, {code.Jnm, 12, 2}, {code.Setmatched, false, 2}, {code.Mload, 0, 3}, {code.Dload, 0, 3}, {code.Mload, 0, 3}, {code.Dload, 0, 3}, {code.Push, 0, 3}, {code.Capref, 1, 3}, {code.Cat, nil, 3}, {code.Sset, nil, 3}, {code.Setmatched, true, 2}, }, }, {"decrement", ` counter i // { i-- }`, []code.Instr{ {code.Match, 0, 2}, {code.Jnm, 7, 2}, {code.Setmatched, false, 2}, {code.Mload, 0, 3}, {code.Dload, 0, 3}, {code.Dec, nil, 3}, {code.Setmatched, true, 2}, }}, {"capref and settime", ` /(\d+)/ { settime($1) }`, []code.Instr{ {code.Match, 0, 1}, {code.Jnm, 8, 1}, {code.Setmatched, false, 1}, {code.Push, 0, 2}, {code.Capref, 1, 2}, {code.S2i, nil, 2}, {code.Settime, 1, 2}, {code.Setmatched, true, 1}, }}, {"cast to self", ` /(\d+)/ { settime(int($1)) }`, []code.Instr{ {code.Match, 0, 1}, {code.Jnm, 8, 1}, {code.Setmatched, false, 1}, {code.Push, 0, 2}, {code.Capref, 1, 2}, {code.S2i, nil, 2}, {code.Settime, 1, 2}, {code.Setmatched, true, 1}, }}, {"stop", ` stop `, []code.Instr{ {code.Stop, nil, 1}, }}, {"stop inside", ` // { stop } `, []code.Instr{ {code.Match, 0, 1}, {code.Jnm, 5, 1}, {code.Setmatched, false, 1}, {code.Stop, nil, 2}, {code.Setmatched, true, 1}, }}, { "nested decorators", `def b { def b { next } @b { next } } @b { }`, nil, }, {"negative numbers in capture groups", ` gauge foo /(?P-?\d+)/ { foo += $value_ms / 1000.0 }`, []code.Instr{ {code.Match, 0, 2}, {code.Jnm, 16, 2}, {code.Setmatched, false, 2}, {code.Mload, 0, 3}, {code.Dload, 0, 3}, {code.Mload, 0, 3}, {code.Dload, 0, 3}, {code.Push, 0, 3}, {code.Capref, 1, 3}, {code.S2i, nil, 3}, {code.I2f, nil, 3}, {code.Push, 1000.0, 3}, {code.Fdiv, nil, 3}, {code.Fadd, nil, 3}, {code.Fset, nil, 3}, {code.Setmatched, true, 2}, }}, {"substitution", ` gauge foo /(\d+,\d)/ { foo = int(subst(",", "", $1)) }`, []code.Instr{ {code.Match, 0, 2}, {code.Jnm, 13, 2}, {code.Setmatched, false, 2}, {code.Mload, 0, 3}, {code.Dload, 0, 3}, {code.Str, 0, 3}, {code.Str, 1, 3}, {code.Push, 0, 3}, {code.Capref, 1, 3}, {code.Subst, 3, 3}, {code.S2i, nil, 3}, {code.Iset, nil, 3}, {code.Setmatched, true, 2}, }}, {"const term as pattern", ` const A /n/ A && 1 { } `, []code.Instr{ {code.Match, 0, 0}, {code.Jnm, 6, 0}, {code.Push, int64(1), 2}, {code.Jnm, 6, 0}, {code.Push, true, 0}, {code.Jmp, 7, 0}, {code.Push, false, 0}, {code.Jnm, 10, 0}, {code.Setmatched, false, 0}, {code.Setmatched, true, 0}, }}, } func TestCodeGenFromSource(t *testing.T) { for _, tc := range testCodeGenPrograms { tc := tc t.Run(tc.name, func(t *testing.T) { ast, err := parser.Parse(tc.name, strings.NewReader(tc.source)) testutil.FatalIfErr(t, err) ast, err = checker.Check(ast, 0, 0) if *codegenTestDebug { s := parser.Sexp{} s.EmitTypes = true t.Log("Typed AST:\n" + s.Dump(ast)) } testutil.FatalIfErr(t, err) obj, err := codegen.CodeGen(tc.name, ast) testutil.FatalIfErr(t, err) testutil.ExpectNoDiff(t, tc.prog, obj.Program, testutil.AllowUnexported(code.Instr{})) }) } } var testCodeGenASTs = []struct { name string ast ast.Node // partial AST to be converted to bytecode prog []code.Instr // expected bytecode }{ { name: "subst", ast: &ast.BuiltinExpr{ Name: "subst", Args: &ast.ExprList{ Children: []ast.Node{ &ast.StringLit{ Text: "old", }, &ast.StringLit{ Text: "new", }, &ast.StringLit{ Text: "value", }, }, }, }, prog: []code.Instr{ {code.Str, 0, 0}, {code.Str, 1, 0}, {code.Str, 2, 0}, {code.Subst, 3, 0}, }, }, { name: "regexp subst", ast: &ast.BuiltinExpr{ Name: "subst", Args: &ast.ExprList{ Children: []ast.Node{ &ast.PatternExpr{ Pattern: "a+", Expr: &ast.PatternLit{ Pattern: "a+", }, }, &ast.StringLit{ Text: "b", }, &ast.StringLit{ Text: "aaaaaa", }, }, }, }, prog: []code.Instr{ {code.Str, 0, 0}, {code.Str, 1, 0}, {code.Push, 0, 0}, {code.Rsubst, 3, 0}, }, }, } func TestCodeGenFromAST(t *testing.T) { for _, tc := range testCodeGenASTs { tc := tc t.Run(tc.name, func(t *testing.T) { obj, err := codegen.CodeGen(tc.name, tc.ast) testutil.FatalIfErr(t, err) testutil.ExpectNoDiff(t, tc.prog, obj.Program, testutil.AllowUnexported(code.Instr{})) }) } } mtail-3.0.0~rc54+git0ff5/internal/runtime/compiler/compiler.go000066400000000000000000000061361460063571700242450ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // This file is available under the Apache license. package compiler import ( "io" "path/filepath" "github.com/golang/glog" "github.com/google/mtail/internal/runtime/code" "github.com/google/mtail/internal/runtime/compiler/ast" "github.com/google/mtail/internal/runtime/compiler/checker" "github.com/google/mtail/internal/runtime/compiler/codegen" "github.com/google/mtail/internal/runtime/compiler/opt" "github.com/google/mtail/internal/runtime/compiler/parser" ) type Compiler struct { emitAst bool emitAstTypes bool maxRegexpLength int maxRecursionDepth int disableOptimisation bool } func New(options ...Option) (*Compiler, error) { c := &Compiler{} if err := c.SetOption(options...); err != nil { return nil, err } return c, nil } func (c *Compiler) SetOption(options ...Option) error { for _, option := range options { if err := option(c); err != nil { return err } } return nil } // Option configures a new Compiler. type Option func(*Compiler) error // EmitAst emits the AST after the parse phase. func EmitAst() Option { return func(c *Compiler) error { c.emitAst = true return nil } } // EmitAstTypes emits the AST with types after the type checking phase. func EmitAstTypes() Option { return func(c *Compiler) error { c.emitAstTypes = true return nil } } // MaxRegexpLength sets the maximum allowable length of a regular expression. func MaxRegexpLength(maxRegexpLength int) Option { return func(c *Compiler) error { c.maxRegexpLength = maxRegexpLength return nil } } // MaxRecursionDepth sets the maximum allowable depth of the AST. func MaxRecursionDepth(maxRecursionDepth int) Option { return func(c *Compiler) error { c.maxRecursionDepth = maxRecursionDepth return nil } } // DisableOptimisation disables the optimisation phase. func DisableOptimisation() Option { return func(c *Compiler) error { c.disableOptimisation = true return nil } } // Compile compiles a program from the input into bytecode and data stored in an Object, or a list // of compile errors. func (c *Compiler) Compile(name string, input io.Reader) (obj *code.Object, err error) { name = filepath.Base(name) var ast ast.Node ast, err = parser.Parse(name, input) if err != nil { return } if c.emitAst { s := parser.Sexp{} glog.Infof("%s AST:\n%s", name, s.Dump(ast)) } if !c.disableOptimisation { ast, err = opt.Optimise(ast) if err != nil { return } if c.emitAstTypes { s := parser.Sexp{} glog.Infof("Post optimisation %s AST:\n%s", name, s.Dump(ast)) } } ast, err = checker.Check(ast, c.maxRegexpLength, c.maxRecursionDepth) if err != nil { return } if c.emitAstTypes { s := parser.Sexp{} s.EmitTypes = true glog.Infof("%s AST with Type Annotation:\n%s", name, s.Dump(ast)) } if !c.disableOptimisation { ast, err = opt.Optimise(ast) if err != nil { return } if c.emitAstTypes { s := parser.Sexp{} s.EmitTypes = true glog.Infof("Post optimisation %s AST with Type Annotation:\n%s", name, s.Dump(ast)) } } obj, err = codegen.CodeGen(name, ast) return } mtail-3.0.0~rc54+git0ff5/internal/runtime/compiler/compiler_test.go000066400000000000000000000017641460063571700253060ustar00rootroot00000000000000// Copyright 2019 Google Inc. All Rights Reserved. // This file is available under the Apache license. package compiler_test import ( "strings" "testing" "github.com/google/mtail/internal/runtime/compiler" "github.com/google/mtail/internal/testutil" ) func makeCompiler(t *testing.T) *compiler.Compiler { t.Helper() c, err := compiler.New(compiler.EmitAst(), compiler.EmitAstTypes()) testutil.FatalIfErr(t, err) return c } func TestCompileParserError(t *testing.T) { c := makeCompiler(t) r := strings.NewReader("bad program") _, err := c.Compile("test", r) if err == nil { t.Errorf("expected error, got nil") } } func TestCompileCheckerError(t *testing.T) { c := makeCompiler(t) r := strings.NewReader(`// { i++ }`) _, err := c.Compile("test", r) if err == nil { t.Error("expected error, got nil") } } func TestCompileCodegen(t *testing.T) { c := makeCompiler(t) r := strings.NewReader(`counter i // { i++ }`) _, err := c.Compile("test", r) if err != nil { t.Error(err) } } mtail-3.0.0~rc54+git0ff5/internal/runtime/compiler/errors/000077500000000000000000000000001460063571700234125ustar00rootroot00000000000000mtail-3.0.0~rc54+git0ff5/internal/runtime/compiler/errors/errors.go000066400000000000000000000022631460063571700252600ustar00rootroot00000000000000// Copyright 2015 Google Inc. All Rights Reserved. // This file is available under the Apache license. package errors import ( "fmt" "strings" "github.com/google/mtail/internal/runtime/compiler/position" "github.com/pkg/errors" ) type compileError struct { pos position.Position msg string } func (e compileError) Error() string { return e.pos.String() + ": " + e.msg } // ErrorList contains a list of compile errors. type ErrorList []*compileError // Add appends an error at a position to the list of errors. func (p *ErrorList) Add(pos *position.Position, msg string) { if pos == nil { pos = &position.Position{"", -1, -1, -1} } *p = append(*p, &compileError{*pos, msg}) } // Append puts an ErrorList on the end of this ErrorList. func (p *ErrorList) Append(l ErrorList) { *p = append(*p, l...) } // ErrorList implements the error interface. func (p ErrorList) Error() string { switch len(p) { case 0: return "no errors" case 1: return p[0].Error() } var r strings.Builder for _, e := range p { r.WriteString(fmt.Sprintf("%s\n", e)) } return r.String()[:r.Len()-1] } func Errorf(format string, args ...interface{}) error { return errors.Errorf(format, args...) } mtail-3.0.0~rc54+git0ff5/internal/runtime/compiler/errors/errors_test.go000066400000000000000000000006301460063571700263130ustar00rootroot00000000000000// Copyright 2019 Google Inc. All Rights Reserved. // This file is available under the Apache license. package errors_test import ( "testing" "github.com/google/mtail/internal/runtime/compiler/errors" ) func TestNilErrorPosition(t *testing.T) { e := errors.ErrorList{} e.Add(nil, "error") r := e.Error() expected := ":0:0: error" if r != expected { t.Errorf("want %q, got %q", expected, r) } } mtail-3.0.0~rc54+git0ff5/internal/runtime/compiler/fuzz/000077500000000000000000000000001460063571700230745ustar00rootroot00000000000000mtail-3.0.0~rc54+git0ff5/internal/runtime/compiler/fuzz/const-as-cond.mtail000066400000000000000000000000221460063571700265660ustar00rootroot00000000000000const A /n/ A { } mtail-3.0.0~rc54+git0ff5/internal/runtime/compiler/opt/000077500000000000000000000000001460063571700227005ustar00rootroot00000000000000mtail-3.0.0~rc54+git0ff5/internal/runtime/compiler/opt/opt.go000066400000000000000000000076711460063571700240440ustar00rootroot00000000000000// Copyright 2021 Google Inc. All Rights Reserved. // This file is available under the Apache license. // package opt has a compiler pass for making optimisations on the AST. package opt import ( "math" "github.com/google/mtail/internal/runtime/compiler/ast" "github.com/google/mtail/internal/runtime/compiler/errors" "github.com/google/mtail/internal/runtime/compiler/parser" "github.com/google/mtail/internal/runtime/compiler/position" "github.com/google/mtail/internal/runtime/compiler/types" ) func Optimise(n ast.Node) (ast.Node, error) { o := &optimiser{} r := ast.Walk(o, n) if len(o.errors) > 0 { return r, o.errors } return r, nil } type optimiser struct { errors errors.ErrorList } func (o *optimiser) VisitBefore(node ast.Node) (ast.Visitor, ast.Node) { return o, node } func (o *optimiser) VisitAfter(node ast.Node) ast.Node { switch n := node.(type) { case *ast.BinaryExpr: switch lhs := n.LHS.(type) { case *ast.IntLit: switch rhs := n.RHS.(type) { case *ast.IntLit: r := &ast.IntLit{P: *position.Merge(&(lhs.P), &(rhs.P))} switch n.Op { case parser.PLUS: r.I = lhs.I + rhs.I case parser.MINUS: r.I = lhs.I - rhs.I case parser.MUL: r.I = lhs.I * rhs.I case parser.DIV: if rhs.I == 0 { o.errors.Add(n.Pos(), "divide by zero") n.SetType(types.Error) return n } r.I = lhs.I / rhs.I case parser.MOD: if rhs.I == 0 { o.errors.Add(n.Pos(), "mod by zero") n.SetType(types.Error) return n } r.I = lhs.I % rhs.I case parser.POW: r.I = int64(math.Pow(float64(lhs.I), float64(rhs.I))) default: return node } return r case *ast.FloatLit: r := &ast.FloatLit{P: *position.Merge(&(lhs.P), &(rhs.P))} switch n.Op { case parser.PLUS: r.F = float64(lhs.I) + rhs.F case parser.MINUS: r.F = float64(lhs.I) - rhs.F case parser.MUL: r.F = float64(lhs.I) * rhs.F case parser.DIV: if rhs.F == 0 { o.errors.Add(n.Pos(), "divide by zero") n.SetType(types.Error) return n } r.F = float64(lhs.I) / rhs.F case parser.MOD: if rhs.F == 0 { o.errors.Add(n.Pos(), "mod by zero") n.SetType(types.Error) return n } rhs.F = math.Mod(float64(lhs.I), rhs.F) case parser.POW: r.F = math.Pow(float64(lhs.I), rhs.F) default: return node } return r default: return node } case *ast.FloatLit: switch rhs := n.RHS.(type) { case *ast.IntLit: r := &ast.FloatLit{P: *position.Merge(&(lhs.P), &(rhs.P))} switch n.Op { case parser.PLUS: r.F = lhs.F + float64(rhs.I) case parser.MINUS: r.F = lhs.F - float64(rhs.I) case parser.MUL: r.F = lhs.F * float64(rhs.I) case parser.DIV: if rhs.I == 0 { o.errors.Add(n.Pos(), "divide by zero") n.SetType(types.Error) return n } r.F = lhs.F / float64(rhs.I) case parser.MOD: if rhs.I == 0 { o.errors.Add(n.Pos(), "mod by zero") n.SetType(types.Error) return n } r.F = math.Mod(lhs.F, float64(rhs.I)) case parser.POW: r.F = math.Pow(lhs.F, float64(rhs.I)) default: return node } return r case *ast.FloatLit: r := &ast.FloatLit{P: *position.Merge(&(lhs.P), &(rhs.P))} switch n.Op { case parser.PLUS: r.F = lhs.F + rhs.F case parser.MINUS: r.F = lhs.F - rhs.F case parser.MUL: r.F = lhs.F * rhs.F case parser.DIV: if rhs.F == 0 { o.errors.Add(n.Pos(), "divide by zero") n.SetType(types.Error) return n } r.F = lhs.F / rhs.F case parser.MOD: if rhs.F == 0 { o.errors.Add(n.Pos(), "mod by zero") n.SetType(types.Error) return n } r.F = math.Mod(lhs.F, rhs.F) case parser.POW: r.F = math.Pow(lhs.F, rhs.F) default: return node } return r default: return node } default: return node } default: return node } } mtail-3.0.0~rc54+git0ff5/internal/runtime/compiler/opt/opt_test.go000066400000000000000000000247511460063571700251010ustar00rootroot00000000000000// Copyright 2021 Google Inc. All Rights Reserved. // This file is available under the Apache license. package opt_test import ( "math" "math/rand" "reflect" "strings" "testing" "testing/quick" "github.com/google/go-cmp/cmp" "github.com/google/mtail/internal/runtime/compiler/ast" "github.com/google/mtail/internal/runtime/compiler/opt" "github.com/google/mtail/internal/runtime/compiler/parser" "github.com/google/mtail/internal/testutil" ) var optimiserTests = []struct { name string ast ast.Node want ast.Node }{ { "int add", &ast.BinaryExpr{ LHS: &ast.IntLit{I: 1}, RHS: &ast.IntLit{I: 2}, Op: parser.PLUS, }, &ast.IntLit{I: 3}, }, { "float mul", &ast.BinaryExpr{ LHS: &ast.FloatLit{F: 2}, RHS: &ast.FloatLit{F: 3}, Op: parser.MUL, }, &ast.FloatLit{F: 6}, }, { "int float pow", &ast.BinaryExpr{ LHS: &ast.IntLit{I: 2}, RHS: &ast.FloatLit{F: 3}, Op: parser.POW, }, &ast.FloatLit{F: 8}, }, { "float int mod", &ast.BinaryExpr{ LHS: &ast.FloatLit{F: 3}, RHS: &ast.IntLit{I: 2}, Op: parser.MOD, }, &ast.FloatLit{F: 1}, }, { "nested ops", &ast.BinaryExpr{ LHS: &ast.BinaryExpr{ LHS: &ast.IntLit{I: 2}, RHS: &ast.IntLit{I: 4}, Op: parser.POW, }, RHS: &ast.IntLit{I: 1}, Op: parser.MINUS, }, &ast.IntLit{I: 15}, }, } func TestOptimiser(t *testing.T) { for _, tc := range optimiserTests { tc := tc t.Run(tc.name, func(t *testing.T) { got, err := opt.Optimise(tc.ast) testutil.FatalIfErr(t, err) testutil.ExpectNoDiff(t, tc.want, got) }) } } var optimiserErrorTests = []struct { name string ast ast.Node want []string }{ { "integer divide by zero", &ast.BinaryExpr{ LHS: &ast.IntLit{I: 4}, RHS: &ast.IntLit{I: 0}, Op: parser.DIV, }, []string{":1:1: divide by zero"}, }, { "float divide by zero", &ast.BinaryExpr{ LHS: &ast.FloatLit{F: 4}, RHS: &ast.FloatLit{F: 0}, Op: parser.DIV, }, []string{":1:1: divide by zero"}, }, { "integer mod by zero", &ast.BinaryExpr{ LHS: &ast.IntLit{I: 4}, RHS: &ast.IntLit{I: 0}, Op: parser.MOD, }, []string{":1:1: mod by zero"}, }, { "float mod by zero", &ast.BinaryExpr{ LHS: &ast.FloatLit{F: 4}, RHS: &ast.FloatLit{F: 0}, Op: parser.MOD, }, []string{":1:1: mod by zero"}, }, } func TestOptimiserErrors(t *testing.T) { for _, tc := range optimiserErrorTests { tc := tc t.Run(tc.name, func(t *testing.T) { _, err := opt.Optimise(tc.ast) testutil.ExpectNoDiff(t, tc.want, strings.Split(err.Error(), "\n")) }) } } var commOps = map[int]string{parser.PLUS: "add", parser.MUL: "mul"} func TestConstFoldQuickIntComm(t *testing.T) { for op, name := range commOps { op := op t.Run(name, func(t *testing.T) { if err := quick.Check(func(x, y int32) bool { a, aErr := opt.Optimise(&ast.BinaryExpr{ LHS: &ast.IntLit{I: int64(x)}, RHS: &ast.IntLit{I: int64(y)}, Op: op, }) if aErr != nil { t.Fatal(aErr) } b, bErr := opt.Optimise(&ast.BinaryExpr{ LHS: &ast.IntLit{I: int64(y)}, RHS: &ast.IntLit{I: int64(x)}, Op: op, }) if bErr != nil { t.Fatal(bErr) } return cmp.Equal(a, b) }, nil); err != nil { t.Error(err) } }) } } func TestConstFoldQuickFloatComm(t *testing.T) { for op, name := range commOps { op := op t.Run(name, func(t *testing.T) { if err := quick.Check(func(x, y float32) bool { a, aErr := opt.Optimise(&ast.BinaryExpr{ LHS: &ast.FloatLit{F: float64(x)}, RHS: &ast.FloatLit{F: float64(y)}, Op: op, }) if aErr != nil { t.Fatal(aErr) } b, bErr := opt.Optimise(&ast.BinaryExpr{ LHS: &ast.FloatLit{F: float64(y)}, RHS: &ast.FloatLit{F: float64(x)}, Op: op, }) if bErr != nil { t.Fatal(bErr) } return cmp.Equal(a, b) }, nil); err != nil { t.Error(err) } }) } } func TestConstFoldQuickMixedComm(t *testing.T) { for op, name := range commOps { op := op t.Run(name, func(t *testing.T) { if err := quick.Check(func(x int32, y float32) bool { a, aErr := opt.Optimise(&ast.BinaryExpr{ LHS: &ast.IntLit{I: int64(x)}, RHS: &ast.FloatLit{F: float64(y)}, Op: op, }) if aErr != nil { t.Fatal(aErr) } b, bErr := opt.Optimise(&ast.BinaryExpr{ LHS: &ast.FloatLit{F: float64(y)}, RHS: &ast.IntLit{I: int64(x)}, Op: op, }) if bErr != nil { t.Fatal(bErr) } return cmp.Equal(a, b) }, nil); err != nil { t.Error(err) } }) } } func TestConstFoldQuickIntAddSub(t *testing.T) { if err := quick.Check(func(x, y int32) bool { a, aErr := opt.Optimise(&ast.BinaryExpr{ LHS: &ast.IntLit{I: int64(x)}, RHS: &ast.IntLit{I: int64(y)}, Op: parser.MINUS, }) if aErr != nil { t.Fatal(aErr) } b, bErr := opt.Optimise(&ast.BinaryExpr{ LHS: &ast.BinaryExpr{ LHS: &ast.IntLit{I: 0}, RHS: &ast.IntLit{I: int64(y)}, Op: parser.MINUS, }, RHS: &ast.IntLit{I: int64(x)}, Op: parser.PLUS, }) if bErr != nil { t.Fatal(bErr) } return cmp.Equal(a, b) }, nil); err != nil { t.Error(err) } } func TestConstFoldQuickFloatAddSub(t *testing.T) { if err := quick.Check(func(x, y float32) bool { a, aErr := opt.Optimise(&ast.BinaryExpr{ LHS: &ast.FloatLit{F: float64(x)}, RHS: &ast.FloatLit{F: float64(y)}, Op: parser.MINUS, }) if aErr != nil { t.Fatal(aErr) } b, bErr := opt.Optimise(&ast.BinaryExpr{ LHS: &ast.BinaryExpr{ LHS: &ast.FloatLit{F: 0}, RHS: &ast.FloatLit{F: float64(y)}, Op: parser.MINUS, }, RHS: &ast.FloatLit{F: float64(x)}, Op: parser.PLUS, }) if bErr != nil { t.Fatal(bErr) } return cmp.Equal(a, b) }, nil); err != nil { t.Error(err) } } func TestConstFoldQuickMixedAddSub(t *testing.T) { if err := quick.Check(func(x int32, y float32) bool { a, aErr := opt.Optimise(&ast.BinaryExpr{ LHS: &ast.IntLit{I: int64(x)}, RHS: &ast.FloatLit{F: float64(y)}, Op: parser.MINUS, }) if aErr != nil { t.Fatal(aErr) } b, bErr := opt.Optimise(&ast.BinaryExpr{ LHS: &ast.BinaryExpr{ LHS: &ast.FloatLit{F: 0}, RHS: &ast.FloatLit{F: float64(y)}, Op: parser.MINUS, }, RHS: &ast.IntLit{I: int64(x)}, Op: parser.PLUS, }) if bErr != nil { t.Fatal(bErr) } return cmp.Equal(a, b) }, nil); err != nil { t.Error(err) } } var cmpFloat = cmp.Comparer(func(x, y float64) bool { delta := math.Abs(x - y) mean := math.Abs(x+y) / 2.0 return delta/mean < 0.00001 }) func TestConstFoldQuickFloatMulDiv(t *testing.T) { if err := quick.Check(func(x, y float32) bool { a, aErr := opt.Optimise(&ast.BinaryExpr{ LHS: &ast.FloatLit{F: float64(x)}, RHS: &ast.FloatLit{F: float64(y)}, Op: parser.DIV, }) if aErr != nil { t.Fatal(aErr) } b, bErr := opt.Optimise(&ast.BinaryExpr{ LHS: &ast.BinaryExpr{ LHS: &ast.FloatLit{F: 1}, RHS: &ast.FloatLit{F: float64(y)}, Op: parser.DIV, }, RHS: &ast.FloatLit{F: float64(x)}, Op: parser.MUL, }) if bErr != nil { t.Fatal(bErr) } return cmp.Equal(a, b, cmpFloat) }, nil); err != nil { t.Error(err) } } func positiveInt(r *rand.Rand) int32 { v := r.Int31() if v == 0 { return 1 } return v } func TestConstFoldQuickIntModAddition(t *testing.T) { values := func(args []reflect.Value, r *rand.Rand) { args[0] = reflect.ValueOf(positiveInt(r)) args[1] = reflect.ValueOf(positiveInt(r)) args[2] = reflect.ValueOf(positiveInt(r)) } cfg := &quick.Config{ Values: values, } if err := quick.Check(func(x, y, z int32) bool { a, aErr := opt.Optimise(&ast.BinaryExpr{ LHS: &ast.BinaryExpr{ LHS: &ast.IntLit{I: int64(x)}, RHS: &ast.IntLit{I: int64(y)}, Op: parser.PLUS, }, RHS: &ast.IntLit{I: int64(z)}, Op: parser.MOD, }) if aErr != nil { t.Fatal(aErr) } b, bErr := opt.Optimise(&ast.BinaryExpr{ LHS: &ast.BinaryExpr{ LHS: &ast.BinaryExpr{ LHS: &ast.IntLit{I: int64(x)}, RHS: &ast.IntLit{I: int64(z)}, Op: parser.MOD, }, RHS: &ast.BinaryExpr{ LHS: &ast.IntLit{I: int64(y)}, RHS: &ast.IntLit{I: int64(z)}, Op: parser.MOD, }, Op: parser.PLUS, }, RHS: &ast.IntLit{I: int64(z)}, Op: parser.MOD, }) if bErr != nil { t.Fatal(bErr) } return cmp.Equal(a, b) }, cfg); err != nil { t.Error(err) } } func positiveFloat(r *rand.Rand) float32 { v := r.Float32() if v == 0.0 { return 1.0 } return v } func TestConstFoldQuickFloatModAddition(t *testing.T) { values := func(args []reflect.Value, r *rand.Rand) { args[0] = reflect.ValueOf(positiveFloat(r)) args[1] = reflect.ValueOf(positiveFloat(r)) args[2] = reflect.ValueOf(positiveFloat(r)) } cfg := &quick.Config{ Values: values, } if err := quick.Check(func(x, y, z float32) bool { a, aErr := opt.Optimise(&ast.BinaryExpr{ LHS: &ast.BinaryExpr{ LHS: &ast.FloatLit{F: float64(x)}, RHS: &ast.FloatLit{F: float64(y)}, Op: parser.PLUS, }, RHS: &ast.FloatLit{F: float64(z)}, Op: parser.MOD, }) if aErr != nil { t.Fatal(aErr) } b, bErr := opt.Optimise(&ast.BinaryExpr{ LHS: &ast.BinaryExpr{ LHS: &ast.BinaryExpr{ LHS: &ast.FloatLit{F: float64(x)}, RHS: &ast.FloatLit{F: float64(z)}, Op: parser.MOD, }, RHS: &ast.BinaryExpr{ LHS: &ast.FloatLit{F: float64(y)}, RHS: &ast.FloatLit{F: float64(z)}, Op: parser.MOD, }, Op: parser.PLUS, }, RHS: &ast.FloatLit{F: float64(z)}, Op: parser.MOD, }) if bErr != nil { t.Fatal(bErr) } return cmp.Equal(a, b) }, cfg); err != nil { t.Error(err) } } func TestConstFoldQuickMixedPowProduct(t *testing.T) { values := func(args []reflect.Value, r *rand.Rand) { args[0] = reflect.ValueOf(positiveFloat(r)) args[1] = reflect.ValueOf(positiveInt(r)) args[2] = reflect.ValueOf(positiveInt(r)) } cfg := &quick.Config{ Values: values, } if err := quick.Check(func(x float32, y, z int32) bool { a, aErr := opt.Optimise(&ast.BinaryExpr{ LHS: &ast.FloatLit{F: float64(x)}, RHS: &ast.BinaryExpr{ LHS: &ast.IntLit{I: int64(y)}, RHS: &ast.IntLit{I: int64(z)}, Op: parser.PLUS, }, Op: parser.POW, }) if aErr != nil { t.Fatal(aErr) } b, bErr := opt.Optimise(&ast.BinaryExpr{ LHS: &ast.BinaryExpr{ LHS: &ast.FloatLit{F: float64(x)}, RHS: &ast.IntLit{I: int64(y)}, Op: parser.POW, }, RHS: &ast.BinaryExpr{ LHS: &ast.FloatLit{F: float64(x)}, RHS: &ast.IntLit{I: int64(z)}, Op: parser.POW, }, Op: parser.MUL, }) if bErr != nil { t.Fatal(bErr) } return cmp.Equal(a, b) }, cfg); err != nil { t.Error(err) } } mtail-3.0.0~rc54+git0ff5/internal/runtime/compiler/parser/000077500000000000000000000000001460063571700233725ustar00rootroot00000000000000mtail-3.0.0~rc54+git0ff5/internal/runtime/compiler/parser/driver.go000066400000000000000000000070031460063571700252140ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // This file is available under the Apache license. // Build the parser: //go:generate goyacc -v y.output -o parser.go -p mtail parser.y // Package parser implements the parse phase of the mtail program compilation. // The parser itself is defined in parser.y, and goyacc generates the program // code and token definitions. The parser fetches tokens from the lexer, which // scans the input converting the program source into a token stream. The // driver code wraps the generated parser and marshals the ast and errors back // to the caller. // // Two pretty-printers are used for debugging: the unparser, which converts an // ast back into program text, and an approximation of an s-expression printer, // which tries to model in indented text the structure of the ast. package parser import ( "flag" "fmt" "io" "strconv" "time" "github.com/golang/glog" "github.com/google/mtail/internal/runtime/compiler/ast" "github.com/google/mtail/internal/runtime/compiler/errors" "github.com/google/mtail/internal/runtime/compiler/position" ) // Parse reads the program named name from the input, and if successful returns // an ast.Node for the root of the AST, otherwise parser errors. func Parse(name string, input io.Reader) (ast.Node, error) { p := newParser(name, input) r := mtailParse(p) if r != 0 || p.errors != nil { return nil, p.errors } return p.root, nil } // EOF is a marker for end of file. It has the same value as the goyacc internal Kind `$end`. const EOF = 0 // parser defines the data structure for parsing an mtail program. type parser struct { name string root ast.Node errors errors.ErrorList l *Lexer t Token // Most recently lexed token. pos position.Position // Optionally contains the position of the start of a production } func newParser(name string, input io.Reader) *parser { return &parser{name: name, l: NewLexer(name, input)} } func (p *parser) ErrorP(s string, pos *position.Position) { p.errors.Add(pos, s) } func (p *parser) Error(s string) { p.errors.Add(&p.t.Pos, s) } // Lex reads the next token from the Lexer, turning it into a form useful for the goyacc generated parser. // The variable lval is modified to carry token information, and the token type is returned. func (p *parser) Lex(lval *mtailSymType) int { p.t = p.l.NextToken() switch p.t.Kind { case INVALID: p.Error(p.t.Spelling) lval.text = p.t.Spelling return INVALID case INTLITERAL: var err error lval.intVal, err = strconv.ParseInt(p.t.Spelling, 10, 64) if err != nil { p.Error(fmt.Sprintf("bad number '%s': %s", p.t.Spelling, err)) return INVALID } case FLOATLITERAL: var err error lval.floatVal, err = strconv.ParseFloat(p.t.Spelling, 64) if err != nil { p.Error(fmt.Sprintf("bad number '%s': %s", p.t.Spelling, err)) return INVALID } case DURATIONLITERAL: var err error lval.duration, err = time.ParseDuration(p.t.Spelling) if err != nil { p.Error(fmt.Sprintf("%s", err)) return INVALID } case LT, GT, LE, GE, NE, EQ, SHL, SHR, BITAND, BITOR, AND, OR, XOR, NOT, INC, DEC, DIV, MUL, MINUS, PLUS, ASSIGN, ADD_ASSIGN, POW, MOD, MATCH, NOT_MATCH: lval.op = int(p.t.Kind) default: lval.text = p.t.Spelling } return int(p.t.Kind) } func (p *parser) inRegex() { glog.V(2).Info("Entering regex") p.l.InRegex = true } func init() { // Initialise globals defined in generated parser.go, defaults to 0 and false flag.IntVar(&mtailDebug, "mtailDebug", 0, "Set parser debug level.") mtailErrorVerbose = true } mtail-3.0.0~rc54+git0ff5/internal/runtime/compiler/parser/lexer.go000066400000000000000000000266331460063571700250520ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // This file is available under the Apache license. package parser import ( "bufio" "errors" "fmt" "io" "sort" "strings" "unicode" "github.com/golang/glog" "github.com/google/mtail/internal/runtime/compiler/position" ) // List of keywords. Keep this list sorted! var keywords = map[string]Kind{ "after": AFTER, "as": AS, "buckets": BUCKETS, "by": BY, "const": CONST, "counter": COUNTER, "def": DEF, "del": DEL, "else": ELSE, "gauge": GAUGE, "hidden": HIDDEN, "histogram": HISTOGRAM, "limit": LIMIT, "next": NEXT, "otherwise": OTHERWISE, "stop": STOP, "text": TEXT, "timer": TIMER, } // List of builtin functions. Keep this list sorted! var builtins = []string{ "bool", "float", "getfilename", "int", "len", "settime", "string", "strptime", "strtol", "subst", "timestamp", "tolower", } // Dictionary returns a list of all keywords and builtins of the language. func Dictionary() (r []string) { for k := range keywords { r = append(r, k) } r = append(r, builtins...) return } // A stateFn represents each state the scanner can be in. type stateFn func(*Lexer) stateFn // A lexer holds the state of the scanner. type Lexer struct { name string // Name of program. input *bufio.Reader // Source program state stateFn // Current state function of the lexer. // The "read cursor" in the input. rune rune // The current rune. width int // Width in bytes. line int // The line position of the current rune. col int // The column position of the current rune. InRegex bool // Context aware flag from parser to say we're in a regex // The currently being lexed token. startcol int // Starting column of the current token. text strings.Builder // the text of the current token tokens chan Token // Output channel for tokens emitted. } // NewLexer creates a new scanner type that reads the input provided. func NewLexer(name string, input io.Reader) *Lexer { l := &Lexer{ name: name, input: bufio.NewReader(input), state: lexProg, tokens: make(chan Token, 2), } return l } // NextToken returns the next token in the input. When no token is available // to be returned it executes the next action in the state machine. func (l *Lexer) NextToken() Token { for { select { case tok := <-l.tokens: return tok default: l.state = l.state(l) } } } // emit passes a token to the client. func (l *Lexer) emit(kind Kind) { pos := position.Position{l.name, l.line, l.startcol, l.col - 1} glog.V(2).Infof("Emitting %v spelled %q at %v", kind, l.text.String(), pos) l.tokens <- Token{kind, l.text.String(), pos} // Reset the current token l.text.Reset() l.startcol = l.col } // Internal end of file value. const eof rune = -1 // next returns the next rune in the input. func (l *Lexer) next() rune { var err error l.rune, l.width, err = l.input.ReadRune() if errors.Is(err, io.EOF) { l.width = 1 l.rune = eof } if l.rune == '␤' { l.rune = eof } return l.rune } // backup indicates that we haven't yet dealt with the next rune. Use when // terminating tokens on unknown runes. func (l *Lexer) backup() { l.width = 0 if l.rune == eof { return } if err := l.input.UnreadRune(); err != nil { glog.Info(err) } } // stepCursor moves the read cursor. func (l *Lexer) stepCursor() { if l.rune == '\n' { l.line++ l.col = 0 } else { l.col += l.width } } // accept accepts the current rune and its position into the current token. func (l *Lexer) accept() { l.text.WriteRune(l.rune) l.stepCursor() } // skip does not accept the current rune into the current token's text, but // does accept its position into the token. Use only at the start or end of a // token. func (l *Lexer) skip() { l.stepCursor() } // ignore skips over the current rune, removing it from the text of the token, // and resetting the start position of the current token. Use only between // tokens. func (l *Lexer) ignore() { l.stepCursor() l.startcol = l.col } // errorf returns an error token and resets the scanner. func (l *Lexer) errorf(format string, args ...interface{}) stateFn { pos := position.Position{ Filename: l.name, Line: l.line, Startcol: l.startcol, Endcol: l.col - 1, } l.tokens <- Token{ Kind: INVALID, Spelling: fmt.Sprintf(format, args...), Pos: pos, } // Reset the current token l.text.Reset() l.startcol = l.col return lexProg } // State functions. // lexProg starts lexing a program. func lexProg(l *Lexer) stateFn { if l.InRegex { return lexRegex } switch r := l.next(); { case r == '\n': l.accept() l.emit(NL) case r == '#': return lexComment case isSpace(r): l.ignore() case r == '{': l.accept() l.emit(LCURLY) case r == '}': l.accept() l.emit(RCURLY) case r == '(': l.accept() l.emit(LPAREN) case r == ')': l.accept() l.emit(RPAREN) case r == '[': l.accept() l.emit(LSQUARE) case r == ']': l.accept() l.emit(RSQUARE) case r == ',': l.accept() l.emit(COMMA) case r == '-': l.accept() switch r = l.next(); { case r == '-': l.accept() l.emit(DEC) case isDigit(r): l.backup() return lexNumeric default: l.backup() l.emit(MINUS) } case r == '+': l.accept() switch l.next() { case '+': l.accept() l.emit(INC) case '=': l.accept() l.emit(ADD_ASSIGN) default: l.backup() l.emit(PLUS) } case r == '*': l.accept() switch l.next() { case '*': l.accept() l.emit(POW) default: l.backup() l.emit(MUL) } case r == '=': l.accept() switch l.next() { case '=': l.accept() l.emit(EQ) case '~': l.accept() l.emit(MATCH) default: l.backup() l.emit(ASSIGN) } case r == '<': l.accept() switch l.next() { case '=': l.accept() l.emit(LE) case '<': l.accept() l.emit(SHL) default: l.backup() l.emit(LT) } case r == '>': l.accept() switch l.next() { case '=': l.accept() l.emit(GE) case '>': l.accept() l.emit(SHR) default: l.backup() l.emit(GT) } case r == '!': l.accept() switch l.next() { case '=': l.accept() l.emit(NE) case '~': l.accept() l.emit(NOT_MATCH) default: l.backup() return l.errorf("Unexpected input: %q", r) } case r == '/': l.accept() l.emit(DIV) case r == '%': l.accept() l.emit(MOD) case r == '&': l.accept() switch l.next() { case '&': l.accept() l.emit(AND) default: l.backup() l.emit(BITAND) } case r == '|': l.accept() switch l.next() { case '|': l.accept() l.emit(OR) default: l.backup() l.emit(BITOR) } case r == '^': l.accept() l.emit(XOR) case r == '~': l.accept() l.emit(NOT) case r == '"': return lexQuotedString case r == '$': return lexCapref case r == '@': return lexDecorator case isDigit(r): l.backup() return lexNumeric case isAlpha(r): return lexIdentifier case r == eof: l.skip() l.emit(EOF) // Stop the machine, we're done. return nil case r == '.': l.backup() return lexNumeric default: l.accept() return l.errorf("Unexpected input: %q", r) } return lexProg } // Lex a comment. func lexComment(l *Lexer) stateFn { l.ignore() Loop: for { switch l.next() { case '\n': l.skip() fallthrough case eof: break Loop default: l.ignore() } } return lexProg } // Lex a numerical constant. func lexNumeric(l *Lexer) stateFn { r := l.next() for isDigit(r) { l.accept() r = l.next() } if r != '.' && r != 'E' && r != 'e' && !isDurationSuffix(r) { l.backup() l.emit(Kind(INTLITERAL)) return lexProg } if r == '.' { l.accept() r = l.next() for isDigit(r) { l.accept() r = l.next() } } if r == 'e' || r == 'E' { l.accept() r = l.next() if r == '+' || r == '-' { l.accept() r = l.next() } for isDigit(r) { l.accept() r = l.next() } } if isDurationSuffix(r) { l.accept() return lexDuration } l.backup() l.emit(Kind(FLOATLITERAL)) return lexProg } func isDurationSuffix(r rune) bool { switch r { case 's', 'm', 'h', 'd': return true } return false } func lexDuration(l *Lexer) stateFn { Loop: for { switch r := l.next(); { case isDigit(r): l.accept() case r == '.': l.accept() case r == '-': l.accept() case r == '+': l.accept() case isDurationSuffix(r): l.accept() default: l.backup() break Loop } } l.emit(DURATIONLITERAL) return lexProg } // Lex a quoted string. The text of a quoted string does not include the '"' quotes. func lexQuotedString(l *Lexer) stateFn { l.skip() // Skip leading quote Loop: for { switch l.next() { case '\\': l.skip() if r := l.next(); r != eof && r != '\n' { if r != '"' { l.text.WriteRune('\\') } l.accept() break } fallthrough case eof, '\n': return l.errorf("Unterminated quoted string: \"\\\"%s\"", l.text.String()) case '"': l.skip() // Skip trailing quote. break Loop default: l.accept() } } l.emit(STRING) return lexProg } // Lex a capture group reference. These are local variable references to // capture groups in the preceding regular expression. func lexCapref(l *Lexer) stateFn { l.skip() // Skip the leading $ named := false Loop: for { switch r := l.next(); { case isAlnum(r) || r == '_': l.accept() if !isDigit(r) { named = true } default: l.backup() break Loop } } if named { l.emit(CAPREF_NAMED) } else { l.emit(CAPREF) } return lexProg } // Lex an identifier, or builtin keyword. func lexIdentifier(l *Lexer) stateFn { l.accept() Loop: for { switch r := l.next(); { case isAlnum(r) || r == '_': l.accept() default: l.backup() break Loop } } if r, ok := keywords[l.text.String()]; ok { l.emit(r) } else if r := sort.SearchStrings(builtins, l.text.String()); r >= 0 && r < len(builtins) && builtins[r] == l.text.String() { l.emit(BUILTIN) } else { l.emit(ID) } return lexProg } // Lex a regular expression pattern. The text of the regular expression does // not include the '/' quotes. func lexRegex(l *Lexer) stateFn { // Exit regex mode when leaving this function. defer func() { glog.V(2).Info("Exiting regex") glog.V(2).Infof("Regex at line %d, startcol %d, col %d", l.line, l.startcol, l.col) l.InRegex = false }() Loop: for { switch l.next() { case '\\': l.skip() if r := l.next(); r != eof && r != '\n' { if r != '/' { l.text.WriteRune('\\') } l.accept() break } fallthrough case eof, '\n': return l.errorf("Unterminated regular expression: \"/%s\"", l.text.String()) case '/': l.backup() // Backup trailing slash on successful parse break Loop default: l.accept() } } l.emit(REGEX) return lexProg } // Lex a decorator name. These are functiony templatey wrappers around blocks // of rules. func lexDecorator(l *Lexer) stateFn { l.skip() // Skip the leading @ Loop: for { switch r := l.next(); { case isAlnum(r) || r == '_': l.accept() default: l.backup() break Loop } } l.emit(DECO) return lexProg } // Helper predicates. // isAlpha reports whether r is an alphabetical rune. func isAlpha(r rune) bool { return unicode.IsLetter(r) } // isAlnum reports whether r is an alphanumeric rune. func isAlnum(r rune) bool { return isAlpha(r) || isDigit(r) } // isDigit reports whether r is a numerical rune. func isDigit(r rune) bool { return unicode.IsDigit(r) } // isSpace reports whether r is whitespace. func isSpace(r rune) bool { return unicode.IsSpace(r) } mtail-3.0.0~rc54+git0ff5/internal/runtime/compiler/parser/lexer_test.go000066400000000000000000000330521460063571700261020ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // This file is available under the Apache license. package parser import ( "strings" "testing" "github.com/google/mtail/internal/runtime/compiler/position" "github.com/google/mtail/internal/testutil" ) type lexerTest struct { name string input string tokens []Token } var lexerTests = []lexerTest{ {"empty", "", []Token{ {EOF, "", position.Position{"empty", 0, 0, 0}}, }}, {"spaces", " \t", []Token{ {EOF, "", position.Position{"spaces", 0, 2, 2}}, }}, {"newlines", "\n", []Token{ {NL, "\n", position.Position{"newlines", 1, 0, -1}}, {EOF, "", position.Position{"newlines", 1, 0, 0}}, }}, {"comment", "# comment", []Token{ {EOF, "", position.Position{"comment", 0, 9, 9}}, }}, {"comment not at col 1", " # comment", []Token{ {EOF, "", position.Position{"comment not at col 1", 0, 11, 11}}, }}, {"punctuation", "{}()[],", []Token{ {LCURLY, "{", position.Position{"punctuation", 0, 0, 0}}, {RCURLY, "}", position.Position{"punctuation", 0, 1, 1}}, {LPAREN, "(", position.Position{"punctuation", 0, 2, 2}}, {RPAREN, ")", position.Position{"punctuation", 0, 3, 3}}, {LSQUARE, "[", position.Position{"punctuation", 0, 4, 4}}, {RSQUARE, "]", position.Position{"punctuation", 0, 5, 5}}, {COMMA, ",", position.Position{"punctuation", 0, 6, 6}}, {EOF, "", position.Position{"punctuation", 0, 7, 7}}, }}, {"operators", "- + = ++ += < > <= >= == != * / << >> & | ^ ~ ** % || && =~ !~ --", []Token{ {MINUS, "-", position.Position{"operators", 0, 0, 0}}, {PLUS, "+", position.Position{"operators", 0, 2, 2}}, {ASSIGN, "=", position.Position{"operators", 0, 4, 4}}, {INC, "++", position.Position{"operators", 0, 6, 7}}, {ADD_ASSIGN, "+=", position.Position{"operators", 0, 9, 10}}, {LT, "<", position.Position{"operators", 0, 12, 12}}, {GT, ">", position.Position{"operators", 0, 14, 14}}, {LE, "<=", position.Position{"operators", 0, 16, 17}}, {GE, ">=", position.Position{"operators", 0, 19, 20}}, {EQ, "==", position.Position{"operators", 0, 22, 23}}, {NE, "!=", position.Position{"operators", 0, 25, 26}}, {MUL, "*", position.Position{"operators", 0, 28, 28}}, {DIV, "/", position.Position{"operators", 0, 30, 30}}, {SHL, "<<", position.Position{"operators", 0, 32, 33}}, {SHR, ">>", position.Position{"operators", 0, 35, 36}}, {BITAND, "&", position.Position{"operators", 0, 38, 38}}, {BITOR, "|", position.Position{"operators", 0, 40, 40}}, {XOR, "^", position.Position{"operators", 0, 42, 42}}, {NOT, "~", position.Position{"operators", 0, 44, 44}}, {POW, "**", position.Position{"operators", 0, 46, 47}}, {MOD, "%", position.Position{"operators", 0, 49, 49}}, {OR, "||", position.Position{"operators", 0, 51, 52}}, {AND, "&&", position.Position{"operators", 0, 54, 55}}, {MATCH, "=~", position.Position{"operators", 0, 57, 58}}, {NOT_MATCH, "!~", position.Position{"operators", 0, 60, 61}}, {DEC, "--", position.Position{"operators", 0, 63, 64}}, {EOF, "", position.Position{"operators", 0, 65, 65}}, }}, { "keywords", "counter\ngauge\nas\nby\nhidden\ndef\nnext\nconst\ntimer\notherwise\nelse\ndel\ntext\nafter\nstop\nhistogram\nbuckets\n", []Token{ {COUNTER, "counter", position.Position{"keywords", 0, 0, 6}}, {NL, "\n", position.Position{"keywords", 1, 7, -1}}, {GAUGE, "gauge", position.Position{"keywords", 1, 0, 4}}, {NL, "\n", position.Position{"keywords", 2, 5, -1}}, {AS, "as", position.Position{"keywords", 2, 0, 1}}, {NL, "\n", position.Position{"keywords", 3, 2, -1}}, {BY, "by", position.Position{"keywords", 3, 0, 1}}, {NL, "\n", position.Position{"keywords", 4, 2, -1}}, {HIDDEN, "hidden", position.Position{"keywords", 4, 0, 5}}, {NL, "\n", position.Position{"keywords", 5, 6, -1}}, {DEF, "def", position.Position{"keywords", 5, 0, 2}}, {NL, "\n", position.Position{"keywords", 6, 3, -1}}, {NEXT, "next", position.Position{"keywords", 6, 0, 3}}, {NL, "\n", position.Position{"keywords", 7, 4, -1}}, {CONST, "const", position.Position{"keywords", 7, 0, 4}}, {NL, "\n", position.Position{"keywords", 8, 5, -1}}, {TIMER, "timer", position.Position{"keywords", 8, 0, 4}}, {NL, "\n", position.Position{"keywords", 9, 5, -1}}, {OTHERWISE, "otherwise", position.Position{"keywords", 9, 0, 8}}, {NL, "\n", position.Position{"keywords", 10, 9, -1}}, {ELSE, "else", position.Position{"keywords", 10, 0, 3}}, {NL, "\n", position.Position{"keywords", 11, 4, -1}}, {DEL, "del", position.Position{"keywords", 11, 0, 2}}, {NL, "\n", position.Position{"keywords", 12, 3, -1}}, {TEXT, "text", position.Position{"keywords", 12, 0, 3}}, {NL, "\n", position.Position{"keywords", 13, 4, -1}}, {AFTER, "after", position.Position{"keywords", 13, 0, 4}}, {NL, "\n", position.Position{"keywords", 14, 5, -1}}, {STOP, "stop", position.Position{"keywords", 14, 0, 3}}, {NL, "\n", position.Position{"keywords", 15, 4, -1}}, {HISTOGRAM, "histogram", position.Position{"keywords", 15, 0, 8}}, {NL, "\n", position.Position{"keywords", 16, 9, -1}}, {BUCKETS, "buckets", position.Position{"keywords", 16, 0, 6}}, {NL, "\n", position.Position{"keywords", 17, 7, -1}}, {EOF, "", position.Position{"keywords", 17, 0, 0}}, }, }, { "builtins", "strptime\ntimestamp\ntolower\nlen\nstrtol\nsettime\ngetfilename\nint\nbool\nfloat\nstring\nsubst\n", []Token{ {BUILTIN, "strptime", position.Position{"builtins", 0, 0, 7}}, {NL, "\n", position.Position{"builtins", 1, 8, -1}}, {BUILTIN, "timestamp", position.Position{"builtins", 1, 0, 8}}, {NL, "\n", position.Position{"builtins", 2, 9, -1}}, {BUILTIN, "tolower", position.Position{"builtins", 2, 0, 6}}, {NL, "\n", position.Position{"builtins", 3, 7, -1}}, {BUILTIN, "len", position.Position{"builtins", 3, 0, 2}}, {NL, "\n", position.Position{"builtins", 4, 3, -1}}, {BUILTIN, "strtol", position.Position{"builtins", 4, 0, 5}}, {NL, "\n", position.Position{"builtins", 5, 6, -1}}, {BUILTIN, "settime", position.Position{"builtins", 5, 0, 6}}, {NL, "\n", position.Position{"builtins", 6, 7, -1}}, {BUILTIN, "getfilename", position.Position{"builtins", 6, 0, 10}}, {NL, "\n", position.Position{"builtins", 7, 11, -1}}, {BUILTIN, "int", position.Position{"builtins", 7, 0, 2}}, {NL, "\n", position.Position{"builtins", 8, 3, -1}}, {BUILTIN, "bool", position.Position{"builtins", 8, 0, 3}}, {NL, "\n", position.Position{"builtins", 9, 4, -1}}, {BUILTIN, "float", position.Position{"builtins", 9, 0, 4}}, {NL, "\n", position.Position{"builtins", 10, 5, -1}}, {BUILTIN, "string", position.Position{"builtins", 10, 0, 5}}, {NL, "\n", position.Position{"builtins", 11, 6, -1}}, {BUILTIN, "subst", position.Position{"builtins", 11, 0, 4}}, {NL, "\n", position.Position{"builtins", 12, 5, -1}}, {EOF, "", position.Position{"builtins", 12, 0, 0}}, }, }, {"numbers", "1 23 3.14 1.61.1 -1 -1.0 1h 0d 3d -1.5h 15m 24h0m0s 1e3 1e-3 .11 123.456e7", []Token{ {INTLITERAL, "1", position.Position{"numbers", 0, 0, 0}}, {INTLITERAL, "23", position.Position{"numbers", 0, 2, 3}}, {FLOATLITERAL, "3.14", position.Position{"numbers", 0, 5, 8}}, {FLOATLITERAL, "1.61", position.Position{"numbers", 0, 10, 13}}, {FLOATLITERAL, ".1", position.Position{"numbers", 0, 14, 15}}, {INTLITERAL, "-1", position.Position{"numbers", 0, 17, 18}}, {FLOATLITERAL, "-1.0", position.Position{"numbers", 0, 20, 23}}, {DURATIONLITERAL, "1h", position.Position{"numbers", 0, 25, 26}}, {DURATIONLITERAL, "0d", position.Position{"numbers", 0, 28, 29}}, {DURATIONLITERAL, "3d", position.Position{"numbers", 0, 31, 32}}, {DURATIONLITERAL, "-1.5h", position.Position{"numbers", 0, 34, 38}}, {DURATIONLITERAL, "15m", position.Position{"numbers", 0, 40, 42}}, {DURATIONLITERAL, "24h0m0s", position.Position{"numbers", 0, 44, 50}}, {FLOATLITERAL, "1e3", position.Position{"numbers", 0, 52, 54}}, {FLOATLITERAL, "1e-3", position.Position{"numbers", 0, 56, 59}}, {FLOATLITERAL, ".11", position.Position{"numbers", 0, 61, 63}}, {FLOATLITERAL, "123.456e7", position.Position{"numbers", 0, 65, 73}}, {EOF, "", position.Position{"numbers", 0, 74, 74}}, }}, {"identifier", "a be foo\nquux lines_total", []Token{ {ID, "a", position.Position{"identifier", 0, 0, 0}}, {ID, "be", position.Position{"identifier", 0, 2, 3}}, {ID, "foo", position.Position{"identifier", 0, 5, 7}}, {NL, "\n", position.Position{"identifier", 1, 8, -1}}, {ID, "quux", position.Position{"identifier", 1, 0, 3}}, {ID, "lines_total", position.Position{"identifier", 1, 5, 15}}, {EOF, "", position.Position{"identifier", 1, 16, 16}}, }}, {"regex", "/asdf/", []Token{ {DIV, "/", position.Position{"regex", 0, 0, 0}}, {REGEX, "asdf", position.Position{"regex", 0, 1, 4}}, {DIV, "/", position.Position{"regex", 0, 5, 5}}, {EOF, "", position.Position{"regex", 0, 6, 6}}, }}, {"regex with escape", `/asdf\//`, []Token{ {DIV, "/", position.Position{"regex with escape", 0, 0, 0}}, {REGEX, `asdf/`, position.Position{"regex with escape", 0, 1, 6}}, {DIV, "/", position.Position{"regex with escape", 0, 7, 7}}, {EOF, "", position.Position{"regex with escape", 0, 8, 8}}, }}, {"regex with escape and special char", `/foo\d\//`, []Token{ {DIV, "/", position.Position{"regex with escape and special char", 0, 0, 0}}, {REGEX, `foo\d/`, position.Position{"regex with escape and special char", 0, 1, 7}}, {DIV, "/", position.Position{"regex with escape and special char", 0, 8, 8}}, {EOF, "", position.Position{"regex with escape and special char", 0, 9, 9}}, }}, {"capref", "$foo $1", []Token{ {CAPREF_NAMED, "foo", position.Position{"capref", 0, 0, 3}}, {CAPREF, "1", position.Position{"capref", 0, 5, 6}}, {EOF, "", position.Position{"capref", 0, 7, 7}}, }}, {"numerical capref", "$1", []Token{ {CAPREF, "1", position.Position{"numerical capref", 0, 0, 1}}, {EOF, "", position.Position{"numerical capref", 0, 2, 2}}, }}, {"capref with trailing punc", "$foo,", []Token{ {CAPREF_NAMED, "foo", position.Position{"capref with trailing punc", 0, 0, 3}}, {COMMA, ",", position.Position{"capref with trailing punc", 0, 4, 4}}, {EOF, "", position.Position{"capref with trailing punc", 0, 5, 5}}, }}, {"quoted string", `"asdf"`, []Token{ {STRING, `asdf`, position.Position{"quoted string", 0, 0, 5}}, {EOF, "", position.Position{"quoted string", 0, 6, 6}}, }}, {"escaped quote in quoted string", `"\""`, []Token{ {STRING, `"`, position.Position{"escaped quote in quoted string", 0, 0, 3}}, {EOF, "", position.Position{"escaped quote in quoted string", 0, 4, 4}}, }}, {"decorator", `@foo`, []Token{ {DECO, "foo", position.Position{"decorator", 0, 0, 3}}, {EOF, "", position.Position{"decorator", 0, 4, 4}}, }}, { "large program", "/(?P[[:digit:]-\\/ ])/ {\n" + " strptime($date, \"%Y/%m/%d %H:%M:%S\")\n" + " foo++\n" + "}", []Token{ {DIV, "/", position.Position{"large program", 0, 0, 0}}, {REGEX, "(?P[[:digit:]-/ ])", position.Position{"large program", 0, 1, 25}}, {DIV, "/", position.Position{"large program", 0, 26, 26}}, {LCURLY, "{", position.Position{"large program", 0, 28, 28}}, {NL, "\n", position.Position{"large program", 1, 29, -1}}, {BUILTIN, "strptime", position.Position{"large program", 1, 2, 9}}, {LPAREN, "(", position.Position{"large program", 1, 10, 10}}, {CAPREF_NAMED, "date", position.Position{"large program", 1, 11, 15}}, {COMMA, ",", position.Position{"large program", 1, 16, 16}}, {STRING, "%Y/%m/%d %H:%M:%S", position.Position{"large program", 1, 18, 36}}, {RPAREN, ")", position.Position{"large program", 1, 37, 37}}, {NL, "\n", position.Position{"large program", 2, 38, -1}}, {ID, "foo", position.Position{"large program", 2, 2, 4}}, {INC, "++", position.Position{"large program", 2, 5, 6}}, {NL, "\n", position.Position{"large program", 3, 7, -1}}, {RCURLY, "}", position.Position{"large program", 3, 0, 0}}, {EOF, "", position.Position{"large program", 3, 1, 1}}, }, }, { "linecount", "# comment\n" + "# blank line\n" + "\n" + "foo", []Token{ {NL, "\n", position.Position{"linecount", 3, 12, -1}}, {ID, "foo", position.Position{"linecount", 3, 0, 2}}, {EOF, "", position.Position{"linecount", 3, 3, 3}}, }, }, // errors {"unexpected char", "?", []Token{ {INVALID, "Unexpected input: '?'", position.Position{"unexpected char", 0, 0, 0}}, {EOF, "", position.Position{"unexpected char", 0, 1, 1}}, }}, {"unterminated regex", "/foo\n", []Token{ {DIV, "/", position.Position{"unterminated regex", 0, 0, 0}}, {INVALID, "Unterminated regular expression: \"/foo\"", position.Position{"unterminated regex", 0, 1, 3}}, {EOF, "", position.Position{"unterminated regex", 0, 4, 4}}, }}, {"unterminated quoted string", "\"foo\n", []Token{ {INVALID, "Unterminated quoted string: \"\\\"foo\"", position.Position{"unterminated quoted string", 0, 0, 3}}, {EOF, "", position.Position{"unterminated quoted string", 0, 4, 4}}, }}, } // collect gathers the emitted items into a slice. func collect(t *lexerTest) (tokens []Token) { // Hack to count divs seen for regex tests. inRegexSet := false l := NewLexer(t.name, strings.NewReader(t.input)) for { tok := l.NextToken() // Hack to simulate context signal from parser. if tok.Kind == DIV && (strings.Contains(t.name, "regex") || strings.HasPrefix(t.name, "large program")) && !inRegexSet { l.InRegex = true inRegexSet = true } tokens = append(tokens, tok) if tok.Kind == EOF { return } } } func TestLex(t *testing.T) { for _, tc := range lexerTests { tc := tc t.Run(tc.name, func(t *testing.T) { tokens := collect(&tc) testutil.ExpectNoDiff(t, tc.tokens, tokens, testutil.AllowUnexported(Token{}, position.Position{})) }) } } mtail-3.0.0~rc54+git0ff5/internal/runtime/compiler/parser/parser.go000066400000000000000000001055741460063571700252310ustar00rootroot00000000000000// Code generated by goyacc -v y.output -o parser.go -p mtail parser.y. DO NOT EDIT. //line parser.y:5 /* #nosec G104 generated code, errors reported do not make sense */ package parser import __yyfmt__ "fmt" //line parser.y:6 import ( "time" "github.com/golang/glog" "github.com/google/mtail/internal/metrics" "github.com/google/mtail/internal/runtime/compiler/ast" "github.com/google/mtail/internal/runtime/compiler/position" ) //line parser.y:19 type mtailSymType struct { yys int intVal int64 floatVal float64 floats []float64 op int text string texts []string flag bool n ast.Node kind metrics.Kind duration time.Duration } const INVALID = 57346 const COUNTER = 57347 const GAUGE = 57348 const TIMER = 57349 const TEXT = 57350 const HISTOGRAM = 57351 const AFTER = 57352 const AS = 57353 const BY = 57354 const CONST = 57355 const HIDDEN = 57356 const DEF = 57357 const DEL = 57358 const NEXT = 57359 const OTHERWISE = 57360 const ELSE = 57361 const STOP = 57362 const BUCKETS = 57363 const LIMIT = 57364 const BUILTIN = 57365 const REGEX = 57366 const STRING = 57367 const CAPREF = 57368 const CAPREF_NAMED = 57369 const ID = 57370 const DECO = 57371 const INTLITERAL = 57372 const FLOATLITERAL = 57373 const DURATIONLITERAL = 57374 const INC = 57375 const DEC = 57376 const DIV = 57377 const MOD = 57378 const MUL = 57379 const MINUS = 57380 const PLUS = 57381 const POW = 57382 const SHL = 57383 const SHR = 57384 const LT = 57385 const GT = 57386 const LE = 57387 const GE = 57388 const EQ = 57389 const NE = 57390 const BITAND = 57391 const XOR = 57392 const BITOR = 57393 const NOT = 57394 const AND = 57395 const OR = 57396 const ADD_ASSIGN = 57397 const ASSIGN = 57398 const MATCH = 57399 const NOT_MATCH = 57400 const LCURLY = 57401 const RCURLY = 57402 const LPAREN = 57403 const RPAREN = 57404 const LSQUARE = 57405 const RSQUARE = 57406 const COMMA = 57407 const NL = 57408 var mtailToknames = [...]string{ "$end", "error", "$unk", "INVALID", "COUNTER", "GAUGE", "TIMER", "TEXT", "HISTOGRAM", "AFTER", "AS", "BY", "CONST", "HIDDEN", "DEF", "DEL", "NEXT", "OTHERWISE", "ELSE", "STOP", "BUCKETS", "LIMIT", "BUILTIN", "REGEX", "STRING", "CAPREF", "CAPREF_NAMED", "ID", "DECO", "INTLITERAL", "FLOATLITERAL", "DURATIONLITERAL", "INC", "DEC", "DIV", "MOD", "MUL", "MINUS", "PLUS", "POW", "SHL", "SHR", "LT", "GT", "LE", "GE", "EQ", "NE", "BITAND", "XOR", "BITOR", "NOT", "AND", "OR", "ADD_ASSIGN", "ASSIGN", "MATCH", "NOT_MATCH", "LCURLY", "RCURLY", "LPAREN", "RPAREN", "LSQUARE", "RSQUARE", "COMMA", "NL", } var mtailStatenames = [...]string{} const mtailEofCode = 1 const mtailErrCode = 2 const mtailInitialStackSize = 16 //line parser.y:733 // tokenpos returns the position of the current token. func tokenpos(mtaillex mtailLexer) position.Position { return mtaillex.(*parser).t.Pos } // markedpos returns the position recorded from the most recent mark_pos // production. func markedpos(mtaillex mtailLexer) position.Position { return mtaillex.(*parser).pos } // positionFromMark returns a position spanning from the last mark to the current position. func positionFromMark(mtaillex mtailLexer) position.Position { tp := tokenpos(mtaillex) mp := markedpos(mtaillex) return *position.Merge(&mp, &tp) } //line yacctab:1 var mtailExca = [...]int{ -1, 1, 1, -1, -2, 0, -1, 2, 1, 1, 5, 93, 6, 93, 7, 93, 8, 93, 9, 93, -2, 124, -1, 22, 66, 24, -2, 68, -1, 106, 5, 93, 6, 93, 7, 93, 8, 93, 9, 93, -2, 124, } const mtailPrivate = 57344 const mtailLast = 249 var mtailAct = [...]int{ 171, 88, 126, 28, 15, 91, 42, 44, 27, 30, 103, 127, 41, 24, 20, 86, 40, 167, 22, 128, 163, 182, 19, 26, 45, 29, 104, 25, 36, 34, 35, 43, 54, 38, 39, 87, 181, 85, 89, 125, 108, 46, 36, 34, 35, 43, 47, 38, 39, 90, 162, 163, 62, 63, 2, 31, 49, 87, 76, 77, 68, 130, 74, 73, 37, 138, 62, 63, 50, 112, 93, 94, 117, 97, 96, 118, 168, 50, 37, 119, 120, 70, 72, 71, 121, 122, 123, 66, 67, 124, 107, 129, 185, 184, 111, 79, 80, 81, 82, 83, 84, 169, 106, 131, 179, 135, 132, 175, 15, 133, 129, 43, 27, 110, 100, 101, 99, 134, 20, 102, 174, 135, 22, 173, 87, 129, 19, 160, 87, 151, 156, 142, 155, 157, 158, 87, 87, 87, 164, 166, 165, 161, 153, 159, 140, 154, 152, 136, 139, 178, 177, 13, 141, 116, 66, 67, 115, 49, 105, 109, 11, 23, 1, 176, 10, 129, 180, 12, 64, 145, 13, 65, 36, 34, 35, 43, 75, 38, 39, 11, 23, 98, 183, 10, 95, 69, 12, 92, 61, 78, 18, 36, 34, 35, 43, 170, 38, 39, 143, 31, 56, 57, 58, 59, 60, 172, 144, 137, 37, 36, 34, 35, 43, 16, 38, 39, 146, 55, 31, 33, 51, 53, 114, 48, 9, 8, 7, 37, 49, 113, 6, 32, 16, 21, 52, 17, 31, 148, 147, 5, 50, 14, 4, 3, 0, 37, 0, 149, 150, } var mtailPact = [...]int{ -1000, -1000, 166, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 83, -1000, -1000, -13, 205, -1000, -34, 195, 13, 13, -1000, 54, -1000, 21, 32, -1000, 7, 1, -1000, 52, 184, -25, -1000, -1000, -1000, -1000, 184, -1000, -1000, 29, -1000, 35, -1000, 79, -40, 139, -1000, -13, -21, -1000, 85, -13, 17, -1000, 128, -1000, -1000, -1000, -1000, -1000, -40, -1000, -1000, -40, -1000, -1000, -1000, -40, -40, -1000, -1000, -1000, -40, -40, -40, -1000, -1000, -40, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 54, -1000, 134, 184, -1, -1000, -40, -1000, -1000, -40, -1000, -1000, -40, -1000, -1000, -1000, -1000, -1000, -1000, -13, 147, -1000, 3, 120, -13, -1000, 121, 226, -1000, -1000, -1000, 184, 184, 83, 184, 184, 184, 17, 184, -14, -1000, 13, -1000, 33, -1000, 184, 184, 184, 21, 42, -1000, -1000, -1000, -45, 41, -1000, 69, -1000, -1000, -1000, -1000, 95, 82, 119, 74, 13, 32, -1000, -1000, -1000, 52, 13, 13, -1000, -1000, 29, -1000, 184, 35, 79, -1000, -1000, -1000, -1000, -29, -1000, -1000, -1000, -1000, -1000, -44, -1000, -1000, -1000, -1000, 95, 62, -1000, -1000, -1000, } var mtailPgo = [...]int{ 0, 54, 243, 39, 41, 242, 241, 239, 235, 3, 7, 6, 15, 5, 233, 9, 16, 27, 11, 231, 12, 13, 19, 230, 229, 226, 225, 25, 23, 224, 222, 219, 2, 217, 216, 206, 205, 0, 198, 195, 190, 189, 187, 185, 168, 184, 181, 176, 171, 169, 163, 162, 10, 1, 159, } var mtailR1 = [...]int{ 0, 51, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 5, 5, 5, 6, 6, 6, 7, 7, 4, 8, 8, 14, 14, 18, 18, 18, 18, 44, 44, 17, 17, 43, 43, 43, 15, 15, 41, 41, 41, 41, 41, 41, 16, 16, 42, 42, 11, 11, 45, 45, 28, 28, 47, 47, 22, 21, 21, 21, 10, 10, 46, 46, 46, 46, 13, 13, 12, 12, 48, 48, 9, 9, 9, 9, 9, 9, 9, 9, 19, 19, 20, 31, 31, 3, 3, 32, 32, 27, 23, 40, 40, 24, 24, 24, 24, 24, 30, 30, 33, 33, 33, 33, 33, 38, 39, 39, 37, 35, 34, 49, 50, 50, 50, 50, 25, 26, 29, 29, 36, 36, 53, 54, 52, 52, } var mtailR2 = [...]int{ 0, 1, 0, 2, 1, 1, 1, 1, 1, 1, 1, 4, 1, 1, 4, 2, 3, 1, 4, 1, 1, 2, 3, 1, 1, 4, 4, 1, 1, 4, 4, 1, 1, 1, 4, 1, 1, 1, 1, 4, 1, 1, 1, 1, 1, 1, 1, 4, 1, 1, 1, 4, 1, 1, 4, 4, 1, 1, 1, 1, 4, 4, 1, 4, 1, 1, 1, 1, 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 3, 1, 1, 1, 4, 1, 4, 5, 1, 3, 1, 1, 5, 3, 0, 1, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 3, 1, 2, 2, 2, 1, 1, 3, 3, 4, 3, 5, 3, 1, 1, 0, 0, 0, 1, } var mtailChk = [...]int{ -1000, -51, -1, -2, -5, -7, -23, -25, -26, -29, 17, 13, 20, 4, -6, -53, 66, -8, -40, -22, -18, -14, -12, 14, -21, -17, -28, -13, -9, -27, -15, 52, -19, -31, 26, 27, 25, 61, 30, 31, -16, -20, -11, 28, -10, -20, -4, 59, 18, 23, 35, 15, 29, 16, 66, -33, 5, 6, 7, 8, 9, -44, 53, 54, -44, -48, 33, 34, 39, -43, 49, 51, 50, 56, 55, -47, 57, 58, -41, 43, 44, 45, 46, 47, 48, -13, -12, -9, -53, 63, -18, -13, -42, 41, 42, -45, 39, 38, -46, 37, 35, 36, 40, -52, 66, 19, -1, -4, 61, -54, 28, -4, -12, -24, -30, 28, 25, -52, -52, -52, -52, -52, -52, -52, -52, -3, -32, -18, -22, -53, 62, -52, -52, -52, -21, -53, -4, 60, 62, -3, 24, -4, 10, -38, -35, -49, -34, 12, 11, 21, 22, -18, -17, -28, -27, -20, -15, -18, -18, -22, -9, -16, 64, 65, -11, -10, -13, 62, 35, 32, -39, -37, -36, 28, 25, 25, -50, 31, 30, 30, -32, 65, 65, -37, 31, 30, } var mtailDef = [...]int{ 2, -2, -2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 12, 13, 0, 0, 20, 0, 0, 17, 19, 23, -2, 94, 58, 27, 28, 62, 70, 59, 33, 124, 74, 75, 76, 77, 78, 124, 80, 81, 38, 82, 46, 84, 50, 126, 15, 2, 0, 0, 125, 0, 0, 124, 21, 0, 102, 103, 104, 105, 106, 126, 31, 32, 126, 71, 72, 73, 126, 126, 35, 36, 37, 126, 126, 126, 56, 57, 126, 40, 41, 42, 43, 44, 45, 69, 68, 70, 0, 124, 0, 62, 126, 48, 49, 126, 52, 53, 126, 64, 65, 66, 67, 124, 127, 0, -2, 16, 124, 0, 0, 119, 121, 92, 99, 100, 101, 124, 124, 124, 124, 124, 124, 124, 124, 0, 87, 89, 90, 0, 79, 124, 124, 124, 11, 0, 14, 22, 85, 0, 0, 118, 0, 95, 96, 97, 98, 0, 0, 0, 0, 18, 29, 30, 60, 61, 34, 25, 26, 54, 55, 39, 83, 124, 47, 51, 63, 86, 91, 120, 107, 108, 110, 122, 123, 111, 113, 114, 115, 112, 88, 0, 0, 109, 116, 117, } var mtailTok1 = [...]int{ 1, } var mtailTok2 = [...]int{ 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, } var mtailTok3 = [...]int{ 0, } var mtailErrorMessages = [...]struct { state int token int msg string }{ {109, 4, "unexpected end of file, expecting '/' to end regex"}, {15, 1, "unexpected end of file, expecting '}' to end block"}, {15, 1, "unexpected end of file, expecting '}' to end block"}, {15, 1, "unexpected end of file, expecting '}' to end block"}, {14, 63, "unexpected indexing of an expression"}, {14, 66, "statement with no effect, missing an assignment, `+' concatenation, or `{}' block?"}, } //line yaccpar:1 /* parser for yacc output */ var ( mtailDebug = 0 mtailErrorVerbose = false ) type mtailLexer interface { Lex(lval *mtailSymType) int Error(s string) } type mtailParser interface { Parse(mtailLexer) int Lookahead() int } type mtailParserImpl struct { lval mtailSymType stack [mtailInitialStackSize]mtailSymType char int } func (p *mtailParserImpl) Lookahead() int { return p.char } func mtailNewParser() mtailParser { return &mtailParserImpl{} } const mtailFlag = -1000 func mtailTokname(c int) string { if c >= 1 && c-1 < len(mtailToknames) { if mtailToknames[c-1] != "" { return mtailToknames[c-1] } } return __yyfmt__.Sprintf("tok-%v", c) } func mtailStatname(s int) string { if s >= 0 && s < len(mtailStatenames) { if mtailStatenames[s] != "" { return mtailStatenames[s] } } return __yyfmt__.Sprintf("state-%v", s) } func mtailErrorMessage(state, lookAhead int) string { const TOKSTART = 4 if !mtailErrorVerbose { return "syntax error" } for _, e := range mtailErrorMessages { if e.state == state && e.token == lookAhead { return "syntax error: " + e.msg } } res := "syntax error: unexpected " + mtailTokname(lookAhead) // To match Bison, suggest at most four expected tokens. expected := make([]int, 0, 4) // Look for shiftable tokens. base := mtailPact[state] for tok := TOKSTART; tok-1 < len(mtailToknames); tok++ { if n := base + tok; n >= 0 && n < mtailLast && mtailChk[mtailAct[n]] == tok { if len(expected) == cap(expected) { return res } expected = append(expected, tok) } } if mtailDef[state] == -2 { i := 0 for mtailExca[i] != -1 || mtailExca[i+1] != state { i += 2 } // Look for tokens that we accept or reduce. for i += 2; mtailExca[i] >= 0; i += 2 { tok := mtailExca[i] if tok < TOKSTART || mtailExca[i+1] == 0 { continue } if len(expected) == cap(expected) { return res } expected = append(expected, tok) } // If the default action is to accept or reduce, give up. if mtailExca[i+1] != 0 { return res } } for i, tok := range expected { if i == 0 { res += ", expecting " } else { res += " or " } res += mtailTokname(tok) } return res } func mtaillex1(lex mtailLexer, lval *mtailSymType) (char, token int) { token = 0 char = lex.Lex(lval) if char <= 0 { token = mtailTok1[0] goto out } if char < len(mtailTok1) { token = mtailTok1[char] goto out } if char >= mtailPrivate { if char < mtailPrivate+len(mtailTok2) { token = mtailTok2[char-mtailPrivate] goto out } } for i := 0; i < len(mtailTok3); i += 2 { token = mtailTok3[i+0] if token == char { token = mtailTok3[i+1] goto out } } out: if token == 0 { token = mtailTok2[1] /* unknown char */ } if mtailDebug >= 3 { __yyfmt__.Printf("lex %s(%d)\n", mtailTokname(token), uint(char)) } return char, token } func mtailParse(mtaillex mtailLexer) int { return mtailNewParser().Parse(mtaillex) } func (mtailrcvr *mtailParserImpl) Parse(mtaillex mtailLexer) int { var mtailn int var mtailVAL mtailSymType var mtailDollar []mtailSymType _ = mtailDollar // silence set and not used mtailS := mtailrcvr.stack[:] Nerrs := 0 /* number of errors */ Errflag := 0 /* error recovery flag */ mtailstate := 0 mtailrcvr.char = -1 mtailtoken := -1 // mtailrcvr.char translated into internal numbering defer func() { // Make sure we report no lookahead when not parsing. mtailstate = -1 mtailrcvr.char = -1 mtailtoken = -1 }() mtailp := -1 goto mtailstack ret0: return 0 ret1: return 1 mtailstack: /* put a state and value onto the stack */ if mtailDebug >= 4 { __yyfmt__.Printf("char %v in %v\n", mtailTokname(mtailtoken), mtailStatname(mtailstate)) } mtailp++ if mtailp >= len(mtailS) { nyys := make([]mtailSymType, len(mtailS)*2) copy(nyys, mtailS) mtailS = nyys } mtailS[mtailp] = mtailVAL mtailS[mtailp].yys = mtailstate mtailnewstate: mtailn = mtailPact[mtailstate] if mtailn <= mtailFlag { goto mtaildefault /* simple state */ } if mtailrcvr.char < 0 { mtailrcvr.char, mtailtoken = mtaillex1(mtaillex, &mtailrcvr.lval) } mtailn += mtailtoken if mtailn < 0 || mtailn >= mtailLast { goto mtaildefault } mtailn = mtailAct[mtailn] if mtailChk[mtailn] == mtailtoken { /* valid shift */ mtailrcvr.char = -1 mtailtoken = -1 mtailVAL = mtailrcvr.lval mtailstate = mtailn if Errflag > 0 { Errflag-- } goto mtailstack } mtaildefault: /* default state action */ mtailn = mtailDef[mtailstate] if mtailn == -2 { if mtailrcvr.char < 0 { mtailrcvr.char, mtailtoken = mtaillex1(mtaillex, &mtailrcvr.lval) } /* look through exception table */ xi := 0 for { if mtailExca[xi+0] == -1 && mtailExca[xi+1] == mtailstate { break } xi += 2 } for xi += 2; ; xi += 2 { mtailn = mtailExca[xi+0] if mtailn < 0 || mtailn == mtailtoken { break } } mtailn = mtailExca[xi+1] if mtailn < 0 { goto ret0 } } if mtailn == 0 { /* error ... attempt to resume parsing */ switch Errflag { case 0: /* brand new error */ mtaillex.Error(mtailErrorMessage(mtailstate, mtailtoken)) Nerrs++ if mtailDebug >= 1 { __yyfmt__.Printf("%s", mtailStatname(mtailstate)) __yyfmt__.Printf(" saw %s\n", mtailTokname(mtailtoken)) } fallthrough case 1, 2: /* incompletely recovered error ... try again */ Errflag = 3 /* find a state where "error" is a legal shift action */ for mtailp >= 0 { mtailn = mtailPact[mtailS[mtailp].yys] + mtailErrCode if mtailn >= 0 && mtailn < mtailLast { mtailstate = mtailAct[mtailn] /* simulate a shift of "error" */ if mtailChk[mtailstate] == mtailErrCode { goto mtailstack } } /* the current p has no shift on "error", pop stack */ if mtailDebug >= 2 { __yyfmt__.Printf("error recovery pops state %d\n", mtailS[mtailp].yys) } mtailp-- } /* there is no state on the stack with an error shift ... abort */ goto ret1 case 3: /* no shift yet; clobber input char */ if mtailDebug >= 2 { __yyfmt__.Printf("error recovery discards %s\n", mtailTokname(mtailtoken)) } if mtailtoken == mtailEofCode { goto ret1 } mtailrcvr.char = -1 mtailtoken = -1 goto mtailnewstate /* try again in the same state */ } } /* reduction by production mtailn */ if mtailDebug >= 2 { __yyfmt__.Printf("reduce %v in:\n\t%v\n", mtailn, mtailStatname(mtailstate)) } mtailnt := mtailn mtailpt := mtailp _ = mtailpt // guard against "declared and not used" mtailp -= mtailR2[mtailn] // mtailp is now the index of $0. Perform the default action. Iff the // reduced production is ε, $1 is possibly out of range. if mtailp+1 >= len(mtailS) { nyys := make([]mtailSymType, len(mtailS)*2) copy(nyys, mtailS) mtailS = nyys } mtailVAL = mtailS[mtailp+1] /* consult goto table to find next state */ mtailn = mtailR1[mtailn] mtailg := mtailPgo[mtailn] mtailj := mtailg + mtailS[mtailp].yys + 1 if mtailj >= mtailLast { mtailstate = mtailAct[mtailg] } else { mtailstate = mtailAct[mtailj] if mtailChk[mtailstate] != -mtailn { mtailstate = mtailAct[mtailg] } } // dummy call; replaced with literal code switch mtailnt { case 1: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:93 { mtaillex.(*parser).root = mtailDollar[1].n } case 2: mtailDollar = mtailS[mtailpt-0 : mtailpt+1] //line parser.y:101 { mtailVAL.n = &ast.StmtList{} } case 3: mtailDollar = mtailS[mtailpt-2 : mtailpt+1] //line parser.y:105 { mtailVAL.n = mtailDollar[1].n if mtailDollar[2].n != nil { mtailVAL.n.(*ast.StmtList).Children = append(mtailVAL.n.(*ast.StmtList).Children, mtailDollar[2].n) } } case 4: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:116 { mtailVAL.n = mtailDollar[1].n } case 5: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:118 { mtailVAL.n = mtailDollar[1].n } case 6: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:120 { mtailVAL.n = mtailDollar[1].n } case 7: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:122 { mtailVAL.n = mtailDollar[1].n } case 8: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:124 { mtailVAL.n = mtailDollar[1].n } case 9: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:126 { mtailVAL.n = mtailDollar[1].n } case 10: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:128 { mtailVAL.n = &ast.NextStmt{tokenpos(mtaillex)} } case 11: mtailDollar = mtailS[mtailpt-4 : mtailpt+1] //line parser.y:132 { mtailVAL.n = &ast.PatternFragment{ID: mtailDollar[2].n, Expr: mtailDollar[4].n} } case 12: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:136 { mtailVAL.n = &ast.StopStmt{tokenpos(mtaillex)} } case 13: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:140 { mtailVAL.n = &ast.Error{tokenpos(mtaillex), mtailDollar[1].text} } case 14: mtailDollar = mtailS[mtailpt-4 : mtailpt+1] //line parser.y:148 { mtailVAL.n = &ast.CondStmt{mtailDollar[1].n, mtailDollar[2].n, mtailDollar[4].n, nil} } case 15: mtailDollar = mtailS[mtailpt-2 : mtailpt+1] //line parser.y:152 { if mtailDollar[1].n != nil { mtailVAL.n = &ast.CondStmt{mtailDollar[1].n, mtailDollar[2].n, nil, nil} } else { mtailVAL.n = mtailDollar[2].n } } case 16: mtailDollar = mtailS[mtailpt-3 : mtailpt+1] //line parser.y:160 { o := &ast.OtherwiseStmt{positionFromMark(mtaillex)} mtailVAL.n = &ast.CondStmt{o, mtailDollar[3].n, nil, nil} } case 17: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:168 { mtailVAL.n = &ast.UnaryExpr{P: tokenpos(mtaillex), Expr: mtailDollar[1].n, Op: MATCH} } case 18: mtailDollar = mtailS[mtailpt-4 : mtailpt+1] //line parser.y:172 { mtailVAL.n = &ast.BinaryExpr{ LHS: &ast.UnaryExpr{P: tokenpos(mtaillex), Expr: mtailDollar[1].n, Op: MATCH}, RHS: mtailDollar[4].n, Op: mtailDollar[2].op, } } case 19: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:180 { mtailVAL.n = mtailDollar[1].n } case 20: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:186 { mtailVAL.n = nil } case 21: mtailDollar = mtailS[mtailpt-2 : mtailpt+1] //line parser.y:188 { mtailVAL.n = mtailDollar[1].n } case 22: mtailDollar = mtailS[mtailpt-3 : mtailpt+1] //line parser.y:194 { mtailVAL.n = mtailDollar[2].n } case 23: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:202 { mtailVAL.n = mtailDollar[1].n } case 24: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:204 { mtailVAL.n = mtailDollar[1].n } case 25: mtailDollar = mtailS[mtailpt-4 : mtailpt+1] //line parser.y:210 { mtailVAL.n = &ast.BinaryExpr{LHS: mtailDollar[1].n, RHS: mtailDollar[4].n, Op: mtailDollar[2].op} } case 26: mtailDollar = mtailS[mtailpt-4 : mtailpt+1] //line parser.y:214 { mtailVAL.n = &ast.BinaryExpr{LHS: mtailDollar[1].n, RHS: mtailDollar[4].n, Op: mtailDollar[2].op} } case 27: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:222 { mtailVAL.n = mtailDollar[1].n } case 28: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:224 { mtailVAL.n = mtailDollar[1].n } case 29: mtailDollar = mtailS[mtailpt-4 : mtailpt+1] //line parser.y:226 { mtailVAL.n = &ast.BinaryExpr{LHS: mtailDollar[1].n, RHS: mtailDollar[4].n, Op: mtailDollar[2].op} } case 30: mtailDollar = mtailS[mtailpt-4 : mtailpt+1] //line parser.y:230 { mtailVAL.n = &ast.BinaryExpr{LHS: mtailDollar[1].n, RHS: mtailDollar[4].n, Op: mtailDollar[2].op} } case 31: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:237 { mtailVAL.op = mtailDollar[1].op } case 32: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:239 { mtailVAL.op = mtailDollar[1].op } case 33: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:245 { mtailVAL.n = mtailDollar[1].n } case 34: mtailDollar = mtailS[mtailpt-4 : mtailpt+1] //line parser.y:247 { mtailVAL.n = &ast.BinaryExpr{LHS: mtailDollar[1].n, RHS: mtailDollar[4].n, Op: mtailDollar[2].op} } case 35: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:254 { mtailVAL.op = mtailDollar[1].op } case 36: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:256 { mtailVAL.op = mtailDollar[1].op } case 37: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:258 { mtailVAL.op = mtailDollar[1].op } case 38: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:264 { mtailVAL.n = mtailDollar[1].n } case 39: mtailDollar = mtailS[mtailpt-4 : mtailpt+1] //line parser.y:266 { mtailVAL.n = &ast.BinaryExpr{LHS: mtailDollar[1].n, RHS: mtailDollar[4].n, Op: mtailDollar[2].op} } case 40: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:273 { mtailVAL.op = mtailDollar[1].op } case 41: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:275 { mtailVAL.op = mtailDollar[1].op } case 42: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:277 { mtailVAL.op = mtailDollar[1].op } case 43: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:279 { mtailVAL.op = mtailDollar[1].op } case 44: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:281 { mtailVAL.op = mtailDollar[1].op } case 45: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:283 { mtailVAL.op = mtailDollar[1].op } case 46: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:289 { mtailVAL.n = mtailDollar[1].n } case 47: mtailDollar = mtailS[mtailpt-4 : mtailpt+1] //line parser.y:291 { mtailVAL.n = &ast.BinaryExpr{LHS: mtailDollar[1].n, RHS: mtailDollar[4].n, Op: mtailDollar[2].op} } case 48: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:298 { mtailVAL.op = mtailDollar[1].op } case 49: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:300 { mtailVAL.op = mtailDollar[1].op } case 50: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:306 { mtailVAL.n = mtailDollar[1].n } case 51: mtailDollar = mtailS[mtailpt-4 : mtailpt+1] //line parser.y:308 { mtailVAL.n = &ast.BinaryExpr{LHS: mtailDollar[1].n, RHS: mtailDollar[4].n, Op: mtailDollar[2].op} } case 52: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:315 { mtailVAL.op = mtailDollar[1].op } case 53: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:317 { mtailVAL.op = mtailDollar[1].op } case 54: mtailDollar = mtailS[mtailpt-4 : mtailpt+1] //line parser.y:323 { mtailVAL.n = &ast.BinaryExpr{LHS: mtailDollar[1].n, RHS: mtailDollar[4].n, Op: mtailDollar[2].op} } case 55: mtailDollar = mtailS[mtailpt-4 : mtailpt+1] //line parser.y:327 { mtailVAL.n = &ast.BinaryExpr{LHS: mtailDollar[1].n, RHS: mtailDollar[4].n, Op: mtailDollar[2].op} } case 56: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:334 { mtailVAL.op = mtailDollar[1].op } case 57: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:336 { mtailVAL.op = mtailDollar[1].op } case 58: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:343 { mtailVAL.n = &ast.PatternExpr{Expr: mtailDollar[1].n} } case 59: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:351 { mtailVAL.n = mtailDollar[1].n } case 60: mtailDollar = mtailS[mtailpt-4 : mtailpt+1] //line parser.y:353 { mtailVAL.n = &ast.BinaryExpr{LHS: mtailDollar[1].n, RHS: mtailDollar[4].n, Op: PLUS} } case 61: mtailDollar = mtailS[mtailpt-4 : mtailpt+1] //line parser.y:357 { mtailVAL.n = &ast.BinaryExpr{LHS: mtailDollar[1].n, RHS: mtailDollar[4].n, Op: PLUS} } case 62: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:365 { mtailVAL.n = mtailDollar[1].n } case 63: mtailDollar = mtailS[mtailpt-4 : mtailpt+1] //line parser.y:367 { mtailVAL.n = &ast.BinaryExpr{LHS: mtailDollar[1].n, RHS: mtailDollar[4].n, Op: mtailDollar[2].op} } case 64: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:374 { mtailVAL.op = mtailDollar[1].op } case 65: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:376 { mtailVAL.op = mtailDollar[1].op } case 66: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:378 { mtailVAL.op = mtailDollar[1].op } case 67: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:380 { mtailVAL.op = mtailDollar[1].op } case 68: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:386 { mtailVAL.n = mtailDollar[1].n } case 69: mtailDollar = mtailS[mtailpt-2 : mtailpt+1] //line parser.y:388 { mtailVAL.n = &ast.UnaryExpr{P: tokenpos(mtaillex), Expr: mtailDollar[2].n, Op: mtailDollar[1].op} } case 70: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:396 { mtailVAL.n = mtailDollar[1].n } case 71: mtailDollar = mtailS[mtailpt-2 : mtailpt+1] //line parser.y:398 { mtailVAL.n = &ast.UnaryExpr{P: tokenpos(mtaillex), Expr: mtailDollar[1].n, Op: mtailDollar[2].op} } case 72: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:405 { mtailVAL.op = mtailDollar[1].op } case 73: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:407 { mtailVAL.op = mtailDollar[1].op } case 74: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:413 { mtailVAL.n = mtailDollar[1].n } case 75: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:415 { mtailVAL.n = mtailDollar[1].n } case 76: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:417 { mtailVAL.n = &ast.CaprefTerm{tokenpos(mtaillex), mtailDollar[1].text, false, nil} } case 77: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:421 { mtailVAL.n = &ast.CaprefTerm{tokenpos(mtaillex), mtailDollar[1].text, true, nil} } case 78: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:425 { mtailVAL.n = &ast.StringLit{tokenpos(mtaillex), mtailDollar[1].text} } case 79: mtailDollar = mtailS[mtailpt-3 : mtailpt+1] //line parser.y:429 { mtailVAL.n = mtailDollar[2].n } case 80: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:433 { mtailVAL.n = &ast.IntLit{tokenpos(mtaillex), mtailDollar[1].intVal} } case 81: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:437 { mtailVAL.n = &ast.FloatLit{tokenpos(mtaillex), mtailDollar[1].floatVal} } case 82: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:445 { // Build an empty IndexedExpr so that the recursive rule below doesn't need to handle the alternative. mtailVAL.n = &ast.IndexedExpr{LHS: mtailDollar[1].n, Index: &ast.ExprList{}} } case 83: mtailDollar = mtailS[mtailpt-4 : mtailpt+1] //line parser.y:450 { mtailVAL.n = mtailDollar[1].n mtailVAL.n.(*ast.IndexedExpr).Index.(*ast.ExprList).Children = append( mtailVAL.n.(*ast.IndexedExpr).Index.(*ast.ExprList).Children, mtailDollar[3].n.(*ast.ExprList).Children...) } case 84: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:461 { mtailVAL.n = &ast.IDTerm{tokenpos(mtaillex), mtailDollar[1].text, nil, false} } case 85: mtailDollar = mtailS[mtailpt-4 : mtailpt+1] //line parser.y:469 { mtailVAL.n = &ast.BuiltinExpr{P: positionFromMark(mtaillex), Name: mtailDollar[2].text, Args: nil} } case 86: mtailDollar = mtailS[mtailpt-5 : mtailpt+1] //line parser.y:473 { mtailVAL.n = &ast.BuiltinExpr{P: positionFromMark(mtaillex), Name: mtailDollar[2].text, Args: mtailDollar[4].n} } case 87: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:482 { mtailVAL.n = &ast.ExprList{} mtailVAL.n.(*ast.ExprList).Children = append(mtailVAL.n.(*ast.ExprList).Children, mtailDollar[1].n) } case 88: mtailDollar = mtailS[mtailpt-3 : mtailpt+1] //line parser.y:487 { mtailVAL.n = mtailDollar[1].n mtailVAL.n.(*ast.ExprList).Children = append(mtailVAL.n.(*ast.ExprList).Children, mtailDollar[3].n) } case 89: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:495 { mtailVAL.n = mtailDollar[1].n } case 90: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:497 { mtailVAL.n = mtailDollar[1].n } case 91: mtailDollar = mtailS[mtailpt-5 : mtailpt+1] //line parser.y:503 { mtailVAL.n = &ast.PatternLit{P: positionFromMark(mtaillex), Pattern: mtailDollar[4].text} } case 92: mtailDollar = mtailS[mtailpt-3 : mtailpt+1] //line parser.y:511 { mtailVAL.n = mtailDollar[3].n d := mtailVAL.n.(*ast.VarDecl) d.Kind = mtailDollar[2].kind d.Hidden = mtailDollar[1].flag } case 93: mtailDollar = mtailS[mtailpt-0 : mtailpt+1] //line parser.y:522 { mtailVAL.flag = false } case 94: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:526 { mtailVAL.flag = true } case 95: mtailDollar = mtailS[mtailpt-2 : mtailpt+1] //line parser.y:534 { mtailVAL.n = mtailDollar[1].n mtailVAL.n.(*ast.VarDecl).Keys = mtailDollar[2].texts } case 96: mtailDollar = mtailS[mtailpt-2 : mtailpt+1] //line parser.y:539 { mtailVAL.n = mtailDollar[1].n mtailVAL.n.(*ast.VarDecl).ExportedName = mtailDollar[2].text } case 97: mtailDollar = mtailS[mtailpt-2 : mtailpt+1] //line parser.y:544 { mtailVAL.n = mtailDollar[1].n mtailVAL.n.(*ast.VarDecl).Buckets = mtailDollar[2].floats } case 98: mtailDollar = mtailS[mtailpt-2 : mtailpt+1] //line parser.y:549 { mtailVAL.n = mtailDollar[1].n mtailVAL.n.(*ast.VarDecl).Limit = mtailDollar[2].intVal } case 99: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:554 { mtailVAL.n = mtailDollar[1].n } case 100: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:562 { mtailVAL.n = &ast.VarDecl{P: tokenpos(mtaillex), Name: mtailDollar[1].text} } case 101: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:566 { mtailVAL.n = &ast.VarDecl{P: tokenpos(mtaillex), Name: mtailDollar[1].text} } case 102: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:574 { mtailVAL.kind = metrics.Counter } case 103: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:578 { mtailVAL.kind = metrics.Gauge } case 104: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:582 { mtailVAL.kind = metrics.Timer } case 105: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:586 { mtailVAL.kind = metrics.Text } case 106: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:590 { mtailVAL.kind = metrics.Histogram } case 107: mtailDollar = mtailS[mtailpt-2 : mtailpt+1] //line parser.y:598 { mtailVAL.texts = mtailDollar[2].texts } case 108: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:605 { mtailVAL.texts = make([]string, 0) mtailVAL.texts = append(mtailVAL.texts, mtailDollar[1].text) } case 109: mtailDollar = mtailS[mtailpt-3 : mtailpt+1] //line parser.y:610 { mtailVAL.texts = mtailDollar[1].texts mtailVAL.texts = append(mtailVAL.texts, mtailDollar[3].text) } case 110: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:618 { mtailVAL.text = mtailDollar[1].text } case 111: mtailDollar = mtailS[mtailpt-2 : mtailpt+1] //line parser.y:624 { mtailVAL.text = mtailDollar[2].text } case 112: mtailDollar = mtailS[mtailpt-2 : mtailpt+1] //line parser.y:631 { mtailVAL.intVal = mtailDollar[2].intVal } case 113: mtailDollar = mtailS[mtailpt-2 : mtailpt+1] //line parser.y:639 { mtailVAL.floats = mtailDollar[2].floats } case 114: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:645 { mtailVAL.floats = make([]float64, 0) mtailVAL.floats = append(mtailVAL.floats, mtailDollar[1].floatVal) } case 115: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:650 { mtailVAL.floats = make([]float64, 0) mtailVAL.floats = append(mtailVAL.floats, float64(mtailDollar[1].intVal)) } case 116: mtailDollar = mtailS[mtailpt-3 : mtailpt+1] //line parser.y:655 { mtailVAL.floats = mtailDollar[1].floats mtailVAL.floats = append(mtailVAL.floats, mtailDollar[3].floatVal) } case 117: mtailDollar = mtailS[mtailpt-3 : mtailpt+1] //line parser.y:660 { mtailVAL.floats = mtailDollar[1].floats mtailVAL.floats = append(mtailVAL.floats, float64(mtailDollar[3].intVal)) } case 118: mtailDollar = mtailS[mtailpt-4 : mtailpt+1] //line parser.y:668 { mtailVAL.n = &ast.DecoDecl{P: markedpos(mtaillex), Name: mtailDollar[3].text, Block: mtailDollar[4].n} } case 119: mtailDollar = mtailS[mtailpt-3 : mtailpt+1] //line parser.y:676 { mtailVAL.n = &ast.DecoStmt{markedpos(mtaillex), mtailDollar[2].text, mtailDollar[3].n, nil, nil} } case 120: mtailDollar = mtailS[mtailpt-5 : mtailpt+1] //line parser.y:684 { mtailVAL.n = &ast.DelStmt{P: positionFromMark(mtaillex), N: mtailDollar[3].n, Expiry: mtailDollar[5].duration} } case 121: mtailDollar = mtailS[mtailpt-3 : mtailpt+1] //line parser.y:688 { mtailVAL.n = &ast.DelStmt{P: positionFromMark(mtaillex), N: mtailDollar[3].n} } case 122: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:695 { mtailVAL.text = mtailDollar[1].text } case 123: mtailDollar = mtailS[mtailpt-1 : mtailpt+1] //line parser.y:699 { mtailVAL.text = mtailDollar[1].text } case 124: mtailDollar = mtailS[mtailpt-0 : mtailpt+1] //line parser.y:709 { glog.V(2).Infof("position marked at %v", tokenpos(mtaillex)) mtaillex.(*parser).pos = tokenpos(mtaillex) } case 125: mtailDollar = mtailS[mtailpt-0 : mtailpt+1] //line parser.y:719 { mtaillex.(*parser).inRegex() } } goto mtailstack /* stack new state and value */ } mtail-3.0.0~rc54+git0ff5/internal/runtime/compiler/parser/parser.y000066400000000000000000000376271460063571700250770ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // This file is available under the Apache license. %{ /* #nosec G104 generated code, errors reported do not make sense */ package parser import ( "time" "github.com/google/mtail/internal/metrics" "github.com/google/mtail/internal/runtime/compiler/ast" "github.com/google/mtail/internal/runtime/compiler/position" "github.com/golang/glog" ) %} %union { intVal int64 floatVal float64 floats []float64 op int text string texts []string flag bool n ast.Node kind metrics.Kind duration time.Duration } %type stmt_list stmt arg_expr_list compound_stmt conditional_stmt conditional_expr expr_stmt %type expr primary_expr multiplicative_expr additive_expr postfix_expr unary_expr assign_expr %type rel_expr shift_expr bitwise_expr logical_expr indexed_expr id_expr concat_expr pattern_expr %type metric_declaration metric_decl_attr_spec decorator_declaration decoration_stmt regex_pattern match_expr %type delete_stmt metric_name_spec builtin_expr arg_expr %type metric_type_spec %type metric_limit_spec %type metric_as_spec id_or_string metric_by_expr %type metric_by_spec metric_by_expr_list %type metric_hide_spec %type rel_op shift_op bitwise_op logical_op add_op mul_op match_op postfix_op %type metric_buckets_spec metric_buckets_list // Tokens and types are defined here. // Invalid input %token INVALID // Types %token COUNTER GAUGE TIMER TEXT HISTOGRAM // Reserved words %token AFTER AS BY CONST HIDDEN DEF DEL NEXT OTHERWISE ELSE STOP BUCKETS LIMIT // Builtins %token BUILTIN // Literals: re2 syntax regular expression, quoted strings, regex capture group // references, identifiers, decorators, and numerical constants. %token REGEX %token STRING %token CAPREF CAPREF_NAMED %token ID %token DECO %token INTLITERAL %token FLOATLITERAL %token DURATIONLITERAL // Operators, in order of precedence %token INC DEC %token DIV MOD MUL MINUS PLUS POW %token SHL SHR %token LT GT LE GE EQ NE %token BITAND XOR BITOR NOT AND OR %token ADD_ASSIGN ASSIGN %token MATCH NOT_MATCH // Punctuation %token LCURLY RCURLY LPAREN RPAREN LSQUARE RSQUARE %token COMMA %token NL %start start // The %error directive takes a list of tokens describing a parser state in error, and an error message. // See "Generating LR syntax error messages from examples", Jeffery, ACM TOPLAS Volume 24 Issue 5 Sep 2003. %error stmt_list stmt expr_stmt mark_pos DIV in_regex INVALID : "unexpected end of file, expecting '/' to end regex" %error stmt_list stmt conditional_stmt conditional_expr LCURLY stmt_list $end : "unexpected end of file, expecting '}' to end block" %error stmt_list stmt conditional_stmt conditional_expr compound_stmt ELSE LCURLY stmt_list $end : "unexpected end of file, expecting '}' to end block" %error stmt_list stmt conditional_stmt OTHERWISE LCURLY stmt_list $end : "unexpected end of file, expecting '}' to end block" %error stmt_list stmt conditional_stmt conditional_expr compound_stmt conditional_stmt conditional_expr LSQUARE : "unexpected indexing of an expression" %error stmt_list stmt conditional_stmt pattern_expr NL : "statement with no effect, missing an assignment, `+' concatenation, or `{}' block?" %% /* An `mtail` program is a list of statements. */ start : stmt_list { mtaillex.(*parser).root = $1 } ; /* A statement list is either empty, or recurses another statement list and a statement. */ stmt_list : /* empty */ { $$ = &ast.StmtList{} } | stmt_list stmt { $$ = $1 if ($2 != nil) { $$.(*ast.StmtList).Children = append($$.(*ast.StmtList).Children, $2) } } ; /* Types of statements. */ stmt : conditional_stmt { $$ = $1 } | expr_stmt { $$ = $1 } | metric_declaration { $$ = $1 } | decorator_declaration { $$ = $1 } | decoration_stmt { $$ = $1 } | delete_stmt { $$ = $1 } | NEXT { $$ = &ast.NextStmt{tokenpos(mtaillex)} } | CONST id_expr opt_nl concat_expr { $$ = &ast.PatternFragment{ID: $2, Expr: $4} } | STOP { $$ = &ast.StopStmt{tokenpos(mtaillex)} } | INVALID { $$ = &ast.Error{tokenpos(mtaillex), $1} } ; /* Conditional statement is a test condition, and then actions executed depending on the result of the test. */ conditional_stmt : conditional_expr compound_stmt ELSE compound_stmt { $$ = &ast.CondStmt{$1, $2, $4, nil} } | conditional_expr compound_stmt { if $1 != nil { $$ = &ast.CondStmt{$1, $2, nil, nil} } else { $$ = $2 } } | mark_pos OTHERWISE compound_stmt { o := &ast.OtherwiseStmt{positionFromMark(mtaillex)} $$ = &ast.CondStmt{o, $3, nil, nil} } ; conditional_expr : pattern_expr { $$ = &ast.UnaryExpr{P: tokenpos(mtaillex), Expr: $1, Op: MATCH} } | pattern_expr logical_op opt_nl logical_expr { $$ = &ast.BinaryExpr{ LHS: &ast.UnaryExpr{P: tokenpos(mtaillex), Expr: $1, Op: MATCH}, RHS: $4, Op: $2, } } | logical_expr { $$ = $1 } ; /* Expression statement is a statement that is also an expression. */ expr_stmt : NL { $$ = nil } | expr NL { $$ = $1 } ; /* Compound statement is a nested statement list with its own scope. */ compound_stmt : LCURLY stmt_list RCURLY { $$ = $2 } ; /* Expressions perform a computation and return a result. The expression tree is ordered in ascending associativity of the operator types. */ expr : assign_expr { $$ = $1 } | postfix_expr { $$ = $1 } ; /* Assignment expressions assign a value to the left hand side from the results of the right hand side. */ assign_expr : unary_expr ASSIGN opt_nl logical_expr { $$ = &ast.BinaryExpr{LHS: $1, RHS: $4, Op: $2} } | unary_expr ADD_ASSIGN opt_nl logical_expr { $$ = &ast.BinaryExpr{LHS: $1, RHS: $4, Op: $2} } ; /* Logical expressions perform comparisons with logical operators. */ logical_expr : bitwise_expr { $$ = $1 } | match_expr { $$ = $1 } | logical_expr logical_op opt_nl bitwise_expr { $$ = &ast.BinaryExpr{LHS: $1, RHS: $4, Op: $2} } | logical_expr logical_op opt_nl match_expr { $$ = &ast.BinaryExpr{LHS: $1, RHS: $4, Op: $2} } ; logical_op : AND { $$ = $1 } | OR { $$ = $1 } ; /* Bitwise expression performs bitwise comparisons of the left and right hand sides. */ bitwise_expr : rel_expr { $$ = $1 } | bitwise_expr bitwise_op opt_nl rel_expr { $$ = &ast.BinaryExpr{LHS: $1, RHS: $4, Op: $2} } ; bitwise_op : BITAND { $$ = $1 } | BITOR { $$ = $1 } | XOR { $$ = $1 } ; /* Relational expressions perform relational comparisons, e.g. less than */ rel_expr : shift_expr { $$ = $1 } | rel_expr rel_op opt_nl shift_expr { $$ = &ast.BinaryExpr{LHS: $1, RHS: $4, Op: $2} } ; rel_op : LT { $$ = $1 } | GT { $$ = $1 } | LE { $$ = $1 } | GE { $$ = $1 } | EQ { $$ = $1 } | NE { $$ = $1 } ; /* Shift expressions perform bitshift operations on the left hand side. */ shift_expr : additive_expr { $$ = $1 } | shift_expr shift_op opt_nl additive_expr { $$ = &ast.BinaryExpr{LHS: $1, RHS: $4, Op: $2} } ; shift_op : SHL { $$ = $1 } | SHR { $$ = $1 } ; /* Additive expressions perform addition and subtraction */ additive_expr : multiplicative_expr { $$ = $1 } | additive_expr add_op opt_nl multiplicative_expr { $$ = &ast.BinaryExpr{LHS: $1, RHS: $4, Op: $2} } ; add_op : PLUS { $$ = $1 } | MINUS { $$ = $1 } ; /* Match expressions perform pattern matching against the left hand side. */ match_expr : primary_expr match_op opt_nl pattern_expr { $$ = &ast.BinaryExpr{LHS: $1, RHS: $4, Op: $2} } | primary_expr match_op opt_nl primary_expr { $$ = &ast.BinaryExpr{LHS: $1, RHS: $4, Op: $2} } ; match_op : MATCH { $$ = $1 } | NOT_MATCH { $$ = $1 } ; /* Pattern expression constructs a regular expression. */ pattern_expr : concat_expr { $$ = &ast.PatternExpr{Expr: $1} } ; /* Concatenation expression forms a regular expression pattern from fragments. */ concat_expr : regex_pattern { $$ = $1 } | concat_expr PLUS opt_nl regex_pattern { $$ = &ast.BinaryExpr{LHS: $1, RHS: $4, Op: PLUS} } | concat_expr PLUS opt_nl id_expr { $$ = &ast.BinaryExpr{LHS: $1, RHS: $4, Op: PLUS} } ; /* Multiplicative expression performs multiply and division operations. */ multiplicative_expr : unary_expr { $$ = $1 } | multiplicative_expr mul_op opt_nl unary_expr { $$ = &ast.BinaryExpr{LHS: $1, RHS: $4, Op: $2} } ; mul_op : MUL { $$ = $1 } | DIV { $$ = $1 } | MOD { $$ = $1 } | POW { $$ = $1 } ; /* Unary expressions perform negation */ unary_expr : postfix_expr { $$ = $1 } | NOT unary_expr { $$ = &ast.UnaryExpr{P: tokenpos(mtaillex), Expr: $2, Op: $1} } ; /* Postfix expressions perform increment and decrement. */ postfix_expr : primary_expr { $$ = $1 } | postfix_expr postfix_op { $$ = &ast.UnaryExpr{P: tokenpos(mtaillex), Expr: $1, Op: $2} } ; postfix_op : INC { $$ = $1 } | DEC { $$ = $1 } ; /* Primary expression contains indexing, builtin calls, and terminal symbols. */ primary_expr : indexed_expr { $$ = $1 } | builtin_expr { $$ = $1 } | CAPREF { $$ = &ast.CaprefTerm{tokenpos(mtaillex), $1, false, nil} } | CAPREF_NAMED { $$ = &ast.CaprefTerm{tokenpos(mtaillex), $1, true, nil} } | STRING { $$ = &ast.StringLit{tokenpos(mtaillex), $1} } | LPAREN logical_expr RPAREN { $$ = $2 } | INTLITERAL { $$ = &ast.IntLit{tokenpos(mtaillex), $1} } | FLOATLITERAL { $$ = &ast.FloatLit{tokenpos(mtaillex), $1} } ; /* Indexed expression performs index lookup. */ indexed_expr : id_expr { // Build an empty IndexedExpr so that the recursive rule below doesn't need to handle the alternative. $$ = &ast.IndexedExpr{LHS: $1, Index: &ast.ExprList{}} } | indexed_expr LSQUARE arg_expr_list RSQUARE { $$ = $1 $$.(*ast.IndexedExpr).Index.(*ast.ExprList).Children = append( $$.(*ast.IndexedExpr).Index.(*ast.ExprList).Children, $3.(*ast.ExprList).Children...) } ; /* Identifier expression names a variable. */ id_expr : ID { $$ = &ast.IDTerm{tokenpos(mtaillex), $1, nil, false} } ; /* Builtin expression describes the builtin function calls. */ builtin_expr : mark_pos BUILTIN LPAREN RPAREN { $$ = &ast.BuiltinExpr{P: positionFromMark(mtaillex), Name: $2, Args: nil} } | mark_pos BUILTIN LPAREN arg_expr_list RPAREN { $$ = &ast.BuiltinExpr{P: positionFromMark(mtaillex), Name: $2, Args: $4} } ; /* Argument expression list describes the part of a builtin call inside the parentheses. */ arg_expr_list : arg_expr { $$ = &ast.ExprList{} $$.(*ast.ExprList).Children = append($$.(*ast.ExprList).Children, $1) } | arg_expr_list COMMA arg_expr { $$ = $1 $$.(*ast.ExprList).Children = append($$.(*ast.ExprList).Children, $3) } ; arg_expr : logical_expr { $$ = $1 } | pattern_expr { $$ = $1 } ; /* Regular expression pattern describes a regular expression literal. */ regex_pattern : mark_pos DIV in_regex REGEX DIV { $$ = &ast.PatternLit{P: positionFromMark(mtaillex), Pattern: $4} } ; /* Declaration creates a new metric. */ metric_declaration : metric_hide_spec metric_type_spec metric_decl_attr_spec { $$ = $3 d := $$.(*ast.VarDecl) d.Kind = $2 d.Hidden = $1 } ; /* A hide specification can mark a metric as hidden from export. */ metric_hide_spec : /* empty */ { $$ = false } | HIDDEN { $$ = true } ; /* A declaration attribute specification adds attributes to the metric declaration such as index keys, exported name, or bucket enumerations. */ metric_decl_attr_spec : metric_decl_attr_spec metric_by_spec { $$ = $1 $$.(*ast.VarDecl).Keys = $2 } | metric_decl_attr_spec metric_as_spec { $$ = $1 $$.(*ast.VarDecl).ExportedName = $2 } | metric_decl_attr_spec metric_buckets_spec { $$ = $1 $$.(*ast.VarDecl).Buckets = $2 } | metric_decl_attr_spec metric_limit_spec { $$ = $1 $$.(*ast.VarDecl).Limit = $2 } | metric_name_spec { $$ = $1 } ; /* Variable name spec names a metric in a declaration. */ metric_name_spec : ID { $$ = &ast.VarDecl{P: tokenpos(mtaillex), Name: $1} } | STRING { $$ = &ast.VarDecl{P: tokenpos(mtaillex), Name: $1} } ; /* Type specfication enumerates the type classification of a variable. */ metric_type_spec : COUNTER { $$ = metrics.Counter } | GAUGE { $$ = metrics.Gauge } | TIMER { $$ = metrics.Timer } | TEXT { $$ = metrics.Text } | HISTOGRAM { $$ = metrics.Histogram } ; /* By specification describes index keys for a multidimensional variable. */ metric_by_spec : BY metric_by_expr_list { $$ = $2 } ; metric_by_expr_list : metric_by_expr { $$ = make([]string, 0) $$ = append($$, $1) } | metric_by_expr_list COMMA metric_by_expr { $$ = $1 $$ = append($$, $3) } ; metric_by_expr : id_or_string { $$ = $1 } ; /* As specification describes how to rename a variable for export. */ metric_as_spec : AS STRING { $$ = $2 } ; metric_limit_spec : LIMIT INTLITERAL { $$ = $2 } ; /* Bucket specification describes the bucketing arrangement in a histogram type. */ metric_buckets_spec : BUCKETS metric_buckets_list { $$ = $2 } metric_buckets_list : FLOATLITERAL { $$ = make([]float64, 0) $$ = append($$, $1) } | INTLITERAL { $$ = make([]float64, 0) $$ = append($$, float64($1)) } | metric_buckets_list COMMA FLOATLITERAL { $$ = $1 $$ = append($$, $3) } | metric_buckets_list COMMA INTLITERAL { $$ = $1 $$ = append($$, float64($3)) } /* Decorator declaration parses the declaration and definition of a match decorator. */ decorator_declaration : mark_pos DEF ID compound_stmt { $$ = &ast.DecoDecl{P: markedpos(mtaillex), Name: $3, Block: $4} } ; /* Decoration statement parses an instantiation of a decorator. */ decoration_stmt : mark_pos DECO compound_stmt { $$ = &ast.DecoStmt{markedpos(mtaillex), $2, $3, nil, nil} } ; /* Delete statement parses a delete command. */ delete_stmt : mark_pos DEL postfix_expr AFTER DURATIONLITERAL { $$ = &ast.DelStmt{P: positionFromMark(mtaillex), N: $3, Expiry: $5} } | mark_pos DEL postfix_expr { $$ = &ast.DelStmt{P: positionFromMark(mtaillex), N: $3} } /* Identifier or String parses where an ID or a string can be expected. */ id_or_string : ID { $$ = $1 } | STRING { $$ = $1 } ; // mark_pos is an epsilon (marker nonterminal) that records the current token // position as the parser position. Use markedpos() to fetch the position and // merge with tokenpos for exotic productions. mark_pos : /* empty */ { glog.V(2).Infof("position marked at %v", tokenpos(mtaillex)) mtaillex.(*parser).pos = tokenpos(mtaillex) } ; // in_regex is a marker nonterminal that tells the parser and lexer it is now // in a regular expression in_regex : /* empty */ { mtaillex.(*parser).inRegex() } ; // opt_nl optionally accepts a newline when a line break could occur inside an // expression for formatting. Newlines terminate expressions so must be // handled explicitly. opt_nl : /* empty */ | NL ; %% // tokenpos returns the position of the current token. func tokenpos(mtaillex mtailLexer) position.Position { return mtaillex.(*parser).t.Pos } // markedpos returns the position recorded from the most recent mark_pos // production. func markedpos(mtaillex mtailLexer) position.Position { return mtaillex.(*parser).pos } // positionFromMark returns a position spanning from the last mark to the current position. func positionFromMark(mtaillex mtailLexer) position.Position { tp := tokenpos(mtaillex) mp := markedpos(mtaillex) return *position.Merge(&mp, &tp) } mtail-3.0.0~rc54+git0ff5/internal/runtime/compiler/parser/parser_test.go000066400000000000000000000265551460063571700262710ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // This file is available under the Apache license. package parser import ( "flag" "strings" "testing" "github.com/google/mtail/internal/runtime/compiler/ast" "github.com/google/mtail/internal/runtime/compiler/position" "github.com/google/mtail/internal/testutil" ) var parserTestDebug = flag.Bool("parser_test_debug", false, "Turn on parser debug output if set.") var parserTests = []struct { name string program string }{ { "empty", "", }, { "newline", "\n", }, { "declare counter", "counter lines_total\n", }, { "declare counter string name", "counter lines_total as \"line-count\"\n", }, { "declare dimensioned counter", "counter foo by bar\n", }, { "declare dimensioned metric with limit", "counter foo by a, b limit 100", }, { "declare multi-dimensioned counter", "counter foo by bar, baz, quux\n", }, { "declare hidden counter", "hidden counter foo\n", }, { "declare gauge", "gauge foo\n", }, { "declare timer", "timer foo\n", }, { "declare text", "text stringy\n", }, { "declare histogram", "histogram foo buckets 0, 1, 2\n", }, { "declare histogram float", "histogram foo buckets 0, 0.01, 0.1, 1, 10\n", }, { "declare histogram by ", "histogram foo by code buckets 0, 1, 2\n", }, { "declare histogram reversed syntax ", "histogram foo buckets 0, 1, 2 by code\n", }, { "simple pattern action", "/foo/ {}\n", }, { "increment counter", "counter lines_total\n" + "/foo/ {\n" + " lines_total++\n" + "}\n", }, { "decrement counter", `counter i /foo/ { i-- } `, }, { "regex match includes escaped slashes", "counter foo\n" + "/foo\\// { foo++\n}\n", }, { "numeric capture group reference", "/(foo)/ {\n" + " $1++\n" + "}\n", }, { "strptime and capref", "/(.*)/ {\n" + "strptime($1, \"2006-01-02T15:04:05Z07:00\")\n" + " }\n", }, { "named capture group reference", "/(?P[[:digit:]-\\/ ])/ {\n" + " strptime($date, \"%Y/%m/%d %H:%M:%S\")\n" + "}\n", }, { "nested match conditions", "counter foo\n" + "counter bar\n" + "/match(\\d+)/ {\n" + " foo += $1\n" + " /^bleh (\\S+)/ {\n" + " bar++\n" + " $1++\n" + " }\n" + "}\n", }, { "nested scope", "counter foo\n" + "/fo(o)/ {\n" + " $1++\n" + " /bar(xxx)/ {\n" + " $1 += $1\n" + " foo = $1\n" + " }\n" + "}\n", }, { "comment then code", "# %d [%p]\n" + "/^(?P\\d+\\/\\d+\\/\\d+ \\d+:\\d+:\\d+) \\[(?P\\d+)\\] / {\n" + " strptime($1, \"2006/01/02 15:04:05\")\n" + "}\n", }, { "assignment", "counter variable\n" + "/(?P.*)/ {\n" + "variable = $foo\n" + "}\n", }, { "increment operator", "counter var\n" + "/foo/ {\n" + " var++\n" + "}\n", }, { "incby operator", "counter var\n" + "/foo/ {\n var += 2\n}\n", }, { "additive", "counter time_total\n" + "/(?P.*)/ {\n" + " time_total = timestamp() - time_total\n" + "}\n", }, { "multiplicative", "counter a\n" + "counter b\n" + " /foo/ {\n a = a * b\n" + " a = a ** b\n" + "}\n", }, { "additive and mem storage", "counter time_total\n" + "counter variable by foo\n" + "/(?P.*)/ {\n" + " time_total += timestamp() - variable[$foo]\n" + "}\n", }, { "conditional expressions", "counter foo\n" + "/(?P.*)/ {\n" + " $foo > 0 {\n" + " foo += $foo\n" + " }\n" + " $foo >= 0 {\n" + " foo += $foo\n" + " }\n" + " $foo < 0 {\n" + " foo += $foo\n" + " }\n" + " $foo <= 0 {\n" + " foo += $foo\n" + " }\n" + " $foo == 0 {\n" + " foo += $foo\n" + " }\n" + " $foo != 0 {\n" + " foo += $foo\n" + " }\n" + "}\n", }, { "decorator definition and invocation", "def foo { next\n }\n" + "@foo { }\n", }, { "const regex", "const X /foo/\n" + "/foo / + X + / bar/ {\n" + "}\n", }, { "multiline regex starting with newline", "const FOO\n" + "/some regex here/ +\n" + "/plus some other things/", }, { "multiline regex", "/foo / +\n" + "/barrr/ {\n" + "}\n", }, { "len", "/(?Pfoo)/ {\n" + "len($foo) > 0 {\n" + "}\n" + "}\n", }, { "def and next", "def foobar {/(?P.*)/ {" + " next" + "}" + "}", }, { "const", `const IP /\d+(\.\d+){3}/`, }, { "bitwise", `gauge a /foo(\d)/ { a = $1 & 7 a = $1 | 8 a = $1 << 4 a = $1 >> 20 a = $1 ^ 15 a = ~ 1 }`, }, { "logical", `0 || 1 && 0 { } `, }, { "floats", `gauge foo /foo/ { foo = 3.14 }`, }, { "simple otherwise action", "otherwise {}\n", }, { "pattern action then otherwise action", `counter lines_total by type /foo/ { lines_total["foo"]++ } otherwise { lines_total["misc"] += 10 }`, }, { "simple else clause", "/foo/ {} else {}", }, { "nested else clause", "/foo/ { / bar/ {} } else { /quux/ {} else {} }", }, { "mod operator", `gauge a /foo/ { a = 3 % 1 }`, }, { "delete", `counter foo by bar /foo/ { del foo[$1] }`, }, { "delete after", `counter foo by bar /foo/ { del foo[$1] after 168h }`, }, {"getfilename", ` getfilename() `}, {"indexed expression arg list", ` counter foo by a,b /(\d) (\d+)/ { foo[$1,$2]++ }`}, {"paren expr", ` (0) || (1 && 3) { }`}, {"regex cond expr", ` /(\d)/ && 1 { } `}, {"concat expr 1", ` const X /foo/ /bar/ + X { }`}, {"concat expr 2", ` const X /foo/ X { }`}, {"match expression 1", ` $foo =~ /bar/ { } $foo !~ /bar/ { } `}, {"match expression 2", ` $foo =~ /bar/ + X { }`}, {"match expression 3", ` const X /foo/ $foo =~ X { }`}, {"capref used in def", ` /(?P.*)/ && $x > 0 { }`}, {"match expr 4", ` /(?P.{6}) (?P.*)/ { $foo =~ $bar { } }`}, {"stop", ` // { stop }`}, {"substitution", ` /(\d,\d)/ { subst(",", ",", $1) }`}, {"pattern in arg expr list", ` /(\d,\d)/ { subst(/,/, "", $1) }`}, } func TestParserRoundTrip(t *testing.T) { if *parserTestDebug { mtailDebug = 3 } for _, tc := range parserTests { tc := tc t.Run(tc.name, func(t *testing.T) { p := newParser(tc.name, strings.NewReader(tc.program)) r := mtailParse(p) if r != 0 || p.root == nil || len(p.errors) > 0 { t.Error("1st pass parse errors:\n") for _, e := range p.errors { t.Errorf("\t%s\n", e) } t.Fatal() } if *parserTestDebug { s := Sexp{} t.Log("AST:\n" + s.Dump(p.root)) } u := Unparser{} output := u.Unparse(p.root) p2 := newParser(tc.name+" 2", strings.NewReader(output)) r = mtailParse(p2) if r != 0 || p2.root == nil || len(p2.errors) > 0 { t.Errorf("2nd pass parse errors:\n") for _, e := range p2.errors { t.Errorf("\t%s\n", e) } t.Logf("2nd pass input was:\n%s", output) t.Logf("2nd pass diff:\n%s", testutil.Diff(tc.program, output)) t.Fatal() } u = Unparser{} output2 := u.Unparse(p2.root) testutil.ExpectNoDiff(t, output2, output) }) } } type parserInvalidProgram struct { name string program string errors []string } var parserInvalidPrograms = []parserInvalidProgram{ { "unknown character", "?\n", []string{"unknown character:1:1: Unexpected input: '?'"}, }, { "unterminated regex", "/foo\n", []string{ "unterminated regex:1:2-4: Unterminated regular expression: \"/foo\"", "unterminated regex:1:2-4: syntax error: unexpected end of file, expecting '/' to end regex", }, }, { "unterminated string", " \"foo }\n", []string{"unterminated string:1:2-7: Unterminated quoted string: \"\\\"foo }\""}, }, { "unterminated const regex", "const X /(?P", []string{ "unterminated const regex:1:10-17: Unterminated regular expression: \"/(?P\"", "unterminated const regex:1:10-17: syntax error: unexpected end of file, expecting '/' to end regex", }, }, { "unbalanced {", "/foo/ {\n", []string{"unbalanced {:2:1: syntax error: unexpected end of file, expecting '}' to end block"}, }, { "unbalanced else {", "/foo/ { } else {\n", []string{"unbalanced else {:2:1: syntax error: unexpected end of file, expecting '}' to end block"}, }, { "unbalanced otherwise {", "otherwise {\n", []string{"unbalanced otherwise {:2:1: syntax error: unexpected end of file, expecting '}' to end block"}, }, { "index of non-terminal 1", `// { foo++[$1]++ }`, []string{"index of non-terminal 1:2:7: syntax error: unexpected indexing of an expression"}, }, { "index of non-terminal 2", `// { 0[$1]++ }`, []string{"index of non-terminal 2:2:3: syntax error: unexpected indexing of an expression"}, }, { "index of pattern", `/foo/[0] `, []string{"index of pattern:1:6: syntax error: unexpected indexing of an expression"}, }, { "statement with no effect", `/(\d)foo/ { timestamp() - $1 }`, []string{"statement with no effect:3:18: syntax error: statement with no effect, missing an assignment, `+' concatenation, or `{}' block?"}, }, { "pattern without block", `/(?P.)/ `, []string{"pattern without block:2:11: syntax error: statement with no effect, missing an assignment, `+' concatenation, or `{}' block?"}, }, { "paired pattern without block", `/(?P.)/ /(?P.)/ {} `, []string{"paired pattern without block:2:11: syntax error: statement with no effect, missing an assignment, `+' concatenation, or `{}' block?"}, }, { "dimensioned limit per dimension", "counter foo by a limit 10, b", []string{"dimensioned limit per dimension:1:26: syntax error: unexpected COMMA"}, }, } func TestParseInvalidPrograms(t *testing.T) { if *parserTestDebug { mtailDebug = 3 } for _, tc := range parserInvalidPrograms { tc := tc t.Run(tc.name, func(t *testing.T) { p := newParser(tc.name, strings.NewReader(tc.program)) mtailParse(p) testutil.ExpectNoDiff(t, strings.Join(tc.errors, "\n"), // want strings.TrimRight(p.errors.Error(), "\n")) // got if p.errors.Error() == "no errors" && *parserTestDebug { s := Sexp{} t.Log("AST:\n" + s.Dump(p.root)) } }) } } var parsePositionTests = []struct { name string program string positions []*position.Position }{ { "empty", "", nil, }, { "variable", `counter foo`, []*position.Position{{"variable", 0, 8, 10}}, }, { "pattern", `const ID /foo/`, []*position.Position{{"pattern", 0, 9, 13}}, }, { "multiline regex", "const ID\n" + "/foo/ +\n" + "/bar/", // TODO: Update position for the first token to `1, 0, 4` when position tracking is fixed []*position.Position{{"multiline regex", 1, 4, 4}, {"multiline regex", 2, 0, 4}}, }, } func TestParsePositionTests(t *testing.T) { for _, tc := range parsePositionTests { tc := tc t.Run(tc.name, func(t *testing.T) { // Not t.Parallel() because the parser is not reentrant, and mtailDebug is a global. root, err := Parse(tc.name, strings.NewReader(tc.program)) testutil.FatalIfErr(t, err) p := &positionCollector{} ast.Walk(p, root) testutil.ExpectNoDiff(t, tc.positions, p.positions, testutil.AllowUnexported(position.Position{})) }) } } type positionCollector struct { positions []*position.Position } func (p *positionCollector) VisitBefore(node ast.Node) (ast.Visitor, ast.Node) { switch n := node.(type) { case *ast.VarDecl, *ast.PatternLit: p.positions = append(p.positions, n.Pos()) } return p, node } func (p *positionCollector) VisitAfter(node ast.Node) ast.Node { return node } mtail-3.0.0~rc54+git0ff5/internal/runtime/compiler/parser/sexp.go000066400000000000000000000112501460063571700246770ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // This file is available under the Apache license. package parser import ( "fmt" "strconv" "strings" "github.com/google/mtail/internal/metrics" "github.com/google/mtail/internal/runtime/compiler/ast" "github.com/google/mtail/internal/runtime/compiler/symbol" ) // Sexp is for converting program syntax trees into typed s-expression for printing. type Sexp struct { output strings.Builder // Accumulator for the result EmitTypes bool col int // column to indent current line to line strings.Builder } func (s *Sexp) indent() { s.col += 2 } func (s *Sexp) outdent() { s.col -= 2 } func (s *Sexp) prefix() (r string) { for i := 0; i < s.col; i++ { r += " " } return } func (s *Sexp) emit(str string) { s.line.WriteString(str) } func (s *Sexp) newline() { if s.line.Len() > 0 { s.output.WriteString(s.prefix()) s.output.WriteString(s.line.String()) } s.output.WriteString("\n") s.line.Reset() } // VisitBefore implements the astNode Visitor interface. func (s *Sexp) VisitBefore(n ast.Node) (ast.Visitor, ast.Node) { s.emit(fmt.Sprintf("( ;;%T ", n)) if s.EmitTypes { s.emit(fmt.Sprintf("<%s> ", n.Type())) } s.emit(fmt.Sprintf("@ %s", n.Pos())) s.newline() s.indent() switch v := n.(type) { case *ast.PatternFragment: s.emit("const ") ast.Walk(s, v.ID) s.emit(" ") case *ast.PatternLit: s.emit(fmt.Sprintf("%q", v.Pattern)) case *ast.BinaryExpr: switch v.Op { case LT: s.emit("<") case GT: s.emit(">") case LE: s.emit("<=") case GE: s.emit(">=") case EQ: s.emit("==") case NE: s.emit("!=") case SHL: s.emit("<<") case SHR: s.emit(">>") case BITAND: s.emit("&") case BITOR: s.emit("|") case XOR: s.emit("^") case NOT: s.emit("~") case AND: s.emit("&&") case OR: s.emit("||") case PLUS: s.emit("+") case MINUS: s.emit("-") case MUL: s.emit("*") case DIV: s.emit("/") case POW: s.emit("**") case ASSIGN: s.emit("=") case ADD_ASSIGN: s.emit("+=") case MOD: s.emit("%") case MATCH: s.emit("=~") case NOT_MATCH: s.emit("!~") default: s.emit(fmt.Sprintf("Unexpected op: %s", Kind(v.Op))) } s.newline() case *ast.IDTerm: s.emit("\"" + v.Name + "\"") case *ast.CaprefTerm: s.emit("\"" + v.Name + "\"") case *ast.BuiltinExpr: s.emit("\"" + v.Name + "\"") s.newline() case *ast.VarDecl: switch v.Kind { case metrics.Counter: s.emit("counter ") case metrics.Gauge: s.emit("gauge ") case metrics.Timer: s.emit("timer ") case metrics.Text: s.emit("text ") } s.emit(v.Name) if len(v.Keys) > 0 { s.emit(" (") s.emit(strings.Join(v.Keys, " ")) s.emit(")") } case *ast.UnaryExpr: switch v.Op { case INC: s.emit("increment") case DEC: s.emit("decrement") case NOT: s.emit("unary-not") case MATCH: s.emit("match") default: s.emit(fmt.Sprintf("Unexpected op: %s", Kind(v.Op))) } s.newline() case *ast.StringLit: s.emit("\"" + v.Text + "\"") case *ast.IntLit: s.emit(strconv.FormatInt(v.I, 10)) case *ast.FloatLit: s.emit(strconv.FormatFloat(v.F, 'g', -1, 64)) case *ast.NextStmt: s.emit("next") case *ast.OtherwiseStmt: s.emit("otherwise") case *ast.DelStmt: s.emit("del") if v.Expiry > 0 { s.emit(fmt.Sprintf(" after %s", v.Expiry)) } case *ast.ConvExpr: s.emit("conv") case *ast.Error: s.emit(fmt.Sprintf("error %q", v.Spelling)) case *ast.StopStmt: s.emit("stop") case *ast.DecoDecl: s.emit(fmt.Sprintf("%q", v.Name)) s.newline() s.emitScope(v.Scope) case *ast.DecoStmt: s.emit(fmt.Sprintf("%q", v.Name)) s.newline() case *ast.StmtList: s.emitScope(v.Scope) case *ast.CondStmt: s.emitScope(v.Scope) case *ast.IndexedExpr, *ast.ExprList, *ast.PatternExpr: // normal walk default: panic(fmt.Sprintf("sexp found undefined type %T", n)) } return s, n } // VisitAfter implements the astNode Visitor interface. func (s *Sexp) VisitAfter(node ast.Node) ast.Node { s.outdent() s.emit(")") s.newline() return node } func (s *Sexp) emitScope(scope *symbol.Scope) { s.emit(fmt.Sprintf("Scope: %p (", scope)) s.newline() if scope != nil { if scope.Parent != nil { s.indent() s.emit(fmt.Sprintf("Parent: %p", scope.Parent)) s.newline() s.outdent() } if len(scope.Symbols) > 0 { s.indent() for name, sym := range scope.Symbols { s.emit(fmt.Sprintf("%q: %v %q %v", name, sym.Kind, sym.Name, sym.Used)) s.newline() } s.outdent() } } s.emit(")") s.newline() } // Dump begins the dumping of the syntax tree, returning the s-expression as a single string. func (s *Sexp) Dump(n ast.Node) string { s.output.Reset() ast.Walk(s, n) return s.output.String() } mtail-3.0.0~rc54+git0ff5/internal/runtime/compiler/parser/tokens.go000066400000000000000000000017341460063571700252310ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // This file is available under the Apache license. package parser import ( "fmt" "github.com/google/mtail/internal/runtime/compiler/position" ) // Kind enumerates the types of lexical tokens in a mtail program. type Kind int // String returns a readable name of the token Kind. func (k Kind) String() string { // 0xE000 is the magic offset for the first token ID in goyacc, and 2 is // the offset of the internal tokens in the token table. Yes this is a // hack around what appears to be an original yacc bug. return mtailTokname(int(k) - 0xE000 + 2) } // Token describes a lexed Token from the input, containing its type, the // original text of the Token, and its position in the input. type Token struct { Kind Kind Spelling string Pos position.Position } // String returns a printable form of a Token. func (t Token) String() string { return fmt.Sprintf("%s(%q,%s)", t.Kind.String(), t.Spelling, t.Pos) } mtail-3.0.0~rc54+git0ff5/internal/runtime/compiler/parser/tokens_test.go000066400000000000000000000014221460063571700262620ustar00rootroot00000000000000// Copyright 2018 Google Inc. All Rights Reserved. // This file is available under the Apache license. package parser import ( "fmt" "testing" "testing/quick" "github.com/google/mtail/internal/runtime/compiler/position" ) func TestKindHasString(t *testing.T) { for k := INVALID; k <= NL; k++ { if Kind(k).String() != mtailToknames[k-INVALID+3] { t.Errorf("kind string not match. expected %s, received %s", mtailToknames[k-INVALID], Kind(k).String()) } } } func TestTokenString(t *testing.T) { if err := quick.Check(func(kind Kind, spelling string, pos position.Position) bool { tok := Token{Kind: kind, Spelling: spelling, Pos: pos} return tok.String() == fmt.Sprintf("%s(%q,%s)", kind.String(), spelling, pos.String()) }, nil); err != nil { t.Error(err) } } mtail-3.0.0~rc54+git0ff5/internal/runtime/compiler/parser/unparser.go000066400000000000000000000122501460063571700255600ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // This file is available under the Apache license. package parser import ( "fmt" "strconv" "strings" "github.com/google/mtail/internal/metrics" "github.com/google/mtail/internal/runtime/compiler/ast" ) // Unparser is for converting program syntax trees back to program text. type Unparser struct { pos int output strings.Builder line strings.Builder emitTypes bool } func (u *Unparser) indent() { u.pos += 2 } func (u *Unparser) outdent() { u.pos -= 2 } func (u *Unparser) prefix() (s string) { for i := 0; i < u.pos; i++ { s += " " } return } func (u *Unparser) emit(s string) { u.line.WriteString(s) } func (u *Unparser) newline() { u.output.WriteString(u.prefix()) u.output.WriteString(u.line.String()) u.output.WriteString("\n") u.line.Reset() } // VisitBefore implements the ast.Visitor interface. func (u *Unparser) VisitBefore(n ast.Node) (ast.Visitor, ast.Node) { if u.emitTypes { u.emit(fmt.Sprintf("<%s>(", n.Type())) } switch v := n.(type) { case *ast.StmtList: for _, child := range v.Children { ast.Walk(u, child) u.newline() } case *ast.ExprList: if len(v.Children) > 0 { ast.Walk(u, v.Children[0]) for _, child := range v.Children[1:] { u.emit(", ") ast.Walk(u, child) } } case *ast.CondStmt: if v.Cond != nil { ast.Walk(u, v.Cond) } u.emit(" {") u.newline() u.indent() ast.Walk(u, v.Truth) if v.Else != nil { u.outdent() u.emit("} else {") u.newline() u.indent() ast.Walk(u, v.Else) } u.outdent() u.emit("}") case *ast.PatternFragment: u.emit("const ") ast.Walk(u, v.ID) u.emit(" ") ast.Walk(u, v.Expr) case *ast.PatternLit: u.emit("/" + strings.ReplaceAll(v.Pattern, "/", "\\/") + "/") case *ast.BinaryExpr: ast.Walk(u, v.LHS) switch v.Op { case LT: u.emit(" < ") case GT: u.emit(" > ") case LE: u.emit(" <= ") case GE: u.emit(" >= ") case EQ: u.emit(" == ") case NE: u.emit(" != ") case SHL: u.emit(" << ") case SHR: u.emit(" >> ") case BITAND: u.emit(" & ") case BITOR: u.emit(" | ") case XOR: u.emit(" ^ ") case NOT: u.emit(" ~ ") case AND: u.emit(" && ") case OR: u.emit(" || ") case PLUS: u.emit(" + ") case MINUS: u.emit(" - ") case MUL: u.emit(" * ") case DIV: u.emit(" / ") case POW: u.emit(" ** ") case ASSIGN: u.emit(" = ") case ADD_ASSIGN: u.emit(" += ") case MOD: u.emit(" % ") case MATCH: u.emit(" =~ ") case NOT_MATCH: u.emit(" !~ ") default: u.emit(fmt.Sprintf("Unexpected op: %v", v.Op)) } ast.Walk(u, v.RHS) case *ast.IDTerm: u.emit(v.Name) case *ast.CaprefTerm: u.emit("$" + v.Name) case *ast.BuiltinExpr: u.emit(v.Name + "(") if v.Args != nil { ast.Walk(u, v.Args) } u.emit(")") case *ast.IndexedExpr: ast.Walk(u, v.LHS) if len(v.Index.(*ast.ExprList).Children) > 0 { u.emit("[") ast.Walk(u, v.Index) u.emit("]") } case *ast.VarDecl: switch v.Kind { case metrics.Counter: u.emit("counter ") case metrics.Gauge: u.emit("gauge ") case metrics.Timer: u.emit("timer ") case metrics.Text: u.emit("text ") case metrics.Histogram: u.emit("histogram ") } u.emit(v.Name) if len(v.Keys) > 0 { u.emit(" by " + strings.Join(v.Keys, ", ")) } if v.Limit > 0 { u.emit(fmt.Sprintf(" limit %d", v.Limit)) } if len(v.Buckets) > 0 { buckets := strings.Builder{} buckets.WriteString(" buckets ") for _, f := range v.Buckets { buckets.WriteString(fmt.Sprintf("%f, ", f)) } u.emit(buckets.String()[:buckets.Len()-2]) } case *ast.UnaryExpr: switch v.Op { case INC: ast.Walk(u, v.Expr) u.emit("++") case DEC: ast.Walk(u, v.Expr) u.emit("--") case NOT: u.emit(" ~") ast.Walk(u, v.Expr) case MATCH: ast.Walk(u, v.Expr) default: u.emit(fmt.Sprintf("Unexpected op: %s", Kind(v.Op))) } case *ast.StringLit: u.emit("\"" + v.Text + "\"") case *ast.IntLit: u.emit(strconv.FormatInt(v.I, 10)) case *ast.FloatLit: u.emit(strconv.FormatFloat(v.F, 'g', -1, 64)) case *ast.DecoDecl: u.emit(fmt.Sprintf("def %s {", v.Name)) u.newline() u.indent() ast.Walk(u, v.Block) u.outdent() u.emit("}") case *ast.DecoStmt: u.emit(fmt.Sprintf("@%s {", v.Name)) u.newline() u.indent() ast.Walk(u, v.Block) u.outdent() u.emit("}") case *ast.NextStmt: u.emit("next") case *ast.OtherwiseStmt: u.emit("otherwise") case *ast.DelStmt: u.emit("del ") ast.Walk(u, v.N) if v.Expiry > 0 { u.emit(fmt.Sprintf(" after %s", v.Expiry)) } u.newline() case *ast.ConvExpr: ast.Walk(u, v.N) case *ast.PatternExpr: ast.Walk(u, v.Expr) case *ast.Error: u.emit("// error") u.newline() u.emit(v.Spelling) case *ast.StopStmt: u.emit("stop") default: panic(fmt.Sprintf("unfound undefined type %T", n)) } if u.emitTypes { u.emit(")") } return nil, n } // VisitAfter implements the ast.Visitor interface. func (u *Unparser) VisitAfter(n ast.Node) ast.Node { return n } // Unparse begins the unparsing of the syntax tree, returning the program text as a single string. func (u *Unparser) Unparse(n ast.Node) string { ast.Walk(u, n) return u.output.String() } mtail-3.0.0~rc54+git0ff5/internal/runtime/compiler/parser/y.output000066400000000000000000001007261460063571700251320ustar00rootroot00000000000000 state 0 $accept: .start $end stmt_list: . (2) . reduce 2 (src line 99) stmt_list goto 2 start goto 1 state 1 $accept: start.$end $end accept . error state 2 start: stmt_list. (1) stmt_list: stmt_list.stmt mark_pos: . (124) metric_hide_spec: . (93) $end reduce 1 (src line 91) INVALID shift 13 COUNTER reduce 93 (src line 520) GAUGE reduce 93 (src line 520) TIMER reduce 93 (src line 520) TEXT reduce 93 (src line 520) HISTOGRAM reduce 93 (src line 520) CONST shift 11 HIDDEN shift 23 NEXT shift 10 STOP shift 12 STRING shift 36 CAPREF shift 34 CAPREF_NAMED shift 35 ID shift 43 INTLITERAL shift 38 FLOATLITERAL shift 39 NOT shift 31 LPAREN shift 37 NL shift 16 . reduce 124 (src line 707) stmt goto 3 conditional_stmt goto 4 conditional_expr goto 14 expr_stmt goto 5 expr goto 17 primary_expr goto 28 multiplicative_expr goto 44 additive_expr goto 42 postfix_expr goto 22 unary_expr goto 27 assign_expr goto 21 rel_expr goto 30 shift_expr goto 40 bitwise_expr goto 25 logical_expr goto 20 indexed_expr goto 32 id_expr goto 41 concat_expr goto 24 pattern_expr goto 19 metric_declaration goto 6 decorator_declaration goto 7 decoration_stmt goto 8 regex_pattern goto 29 match_expr goto 26 delete_stmt goto 9 builtin_expr goto 33 metric_hide_spec goto 18 mark_pos goto 15 state 3 stmt_list: stmt_list stmt. (3) . reduce 3 (src line 104) state 4 stmt: conditional_stmt. (4) . reduce 4 (src line 114) state 5 stmt: expr_stmt. (5) . reduce 5 (src line 117) state 6 stmt: metric_declaration. (6) . reduce 6 (src line 119) state 7 stmt: decorator_declaration. (7) . reduce 7 (src line 121) state 8 stmt: decoration_stmt. (8) . reduce 8 (src line 123) state 9 stmt: delete_stmt. (9) . reduce 9 (src line 125) state 10 stmt: NEXT. (10) . reduce 10 (src line 127) state 11 stmt: CONST.id_expr opt_nl concat_expr ID shift 43 . error id_expr goto 45 state 12 stmt: STOP. (12) . reduce 12 (src line 135) state 13 stmt: INVALID. (13) . reduce 13 (src line 139) state 14 conditional_stmt: conditional_expr.compound_stmt ELSE compound_stmt conditional_stmt: conditional_expr.compound_stmt LCURLY shift 47 . error compound_stmt goto 46 state 15 conditional_stmt: mark_pos.OTHERWISE compound_stmt builtin_expr: mark_pos.BUILTIN LPAREN RPAREN builtin_expr: mark_pos.BUILTIN LPAREN arg_expr_list RPAREN regex_pattern: mark_pos.DIV in_regex REGEX DIV decorator_declaration: mark_pos.DEF ID compound_stmt decoration_stmt: mark_pos.DECO compound_stmt delete_stmt: mark_pos.DEL postfix_expr AFTER DURATIONLITERAL delete_stmt: mark_pos.DEL postfix_expr DEF shift 51 DEL shift 53 OTHERWISE shift 48 BUILTIN shift 49 DECO shift 52 DIV shift 50 . error state 16 expr_stmt: NL. (20) . reduce 20 (src line 184) state 17 expr_stmt: expr.NL NL shift 54 . error state 18 metric_declaration: metric_hide_spec.metric_type_spec metric_decl_attr_spec COUNTER shift 56 GAUGE shift 57 TIMER shift 58 TEXT shift 59 HISTOGRAM shift 60 . error metric_type_spec goto 55 state 19 conditional_expr: pattern_expr. (17) conditional_expr: pattern_expr.logical_op opt_nl logical_expr AND shift 62 OR shift 63 . reduce 17 (src line 166) logical_op goto 61 state 20 conditional_expr: logical_expr. (19) logical_expr: logical_expr.logical_op opt_nl bitwise_expr logical_expr: logical_expr.logical_op opt_nl match_expr AND shift 62 OR shift 63 . reduce 19 (src line 179) logical_op goto 64 state 21 expr: assign_expr. (23) . reduce 23 (src line 200) state 22 expr: postfix_expr. (24) unary_expr: postfix_expr. (68) postfix_expr: postfix_expr.postfix_op INC shift 66 DEC shift 67 NL reduce 24 (src line 203) . reduce 68 (src line 384) postfix_op goto 65 state 23 metric_hide_spec: HIDDEN. (94) . reduce 94 (src line 525) state 24 pattern_expr: concat_expr. (58) concat_expr: concat_expr.PLUS opt_nl regex_pattern concat_expr: concat_expr.PLUS opt_nl id_expr PLUS shift 68 . reduce 58 (src line 341) state 25 logical_expr: bitwise_expr. (27) bitwise_expr: bitwise_expr.bitwise_op opt_nl rel_expr BITAND shift 70 XOR shift 72 BITOR shift 71 . reduce 27 (src line 220) bitwise_op goto 69 state 26 logical_expr: match_expr. (28) . reduce 28 (src line 223) state 27 assign_expr: unary_expr.ASSIGN opt_nl logical_expr assign_expr: unary_expr.ADD_ASSIGN opt_nl logical_expr multiplicative_expr: unary_expr. (62) ADD_ASSIGN shift 74 ASSIGN shift 73 . reduce 62 (src line 363) state 28 match_expr: primary_expr.match_op opt_nl pattern_expr match_expr: primary_expr.match_op opt_nl primary_expr postfix_expr: primary_expr. (70) MATCH shift 76 NOT_MATCH shift 77 . reduce 70 (src line 394) match_op goto 75 state 29 concat_expr: regex_pattern. (59) . reduce 59 (src line 349) state 30 bitwise_expr: rel_expr. (33) rel_expr: rel_expr.rel_op opt_nl shift_expr LT shift 79 GT shift 80 LE shift 81 GE shift 82 EQ shift 83 NE shift 84 . reduce 33 (src line 243) rel_op goto 78 state 31 unary_expr: NOT.unary_expr mark_pos: . (124) STRING shift 36 CAPREF shift 34 CAPREF_NAMED shift 35 ID shift 43 INTLITERAL shift 38 FLOATLITERAL shift 39 NOT shift 31 LPAREN shift 37 . reduce 124 (src line 707) primary_expr goto 87 postfix_expr goto 86 unary_expr goto 85 indexed_expr goto 32 id_expr goto 41 builtin_expr goto 33 mark_pos goto 88 state 32 primary_expr: indexed_expr. (74) indexed_expr: indexed_expr.LSQUARE arg_expr_list RSQUARE LSQUARE shift 89 . reduce 74 (src line 411) state 33 primary_expr: builtin_expr. (75) . reduce 75 (src line 414) state 34 primary_expr: CAPREF. (76) . reduce 76 (src line 416) state 35 primary_expr: CAPREF_NAMED. (77) . reduce 77 (src line 420) state 36 primary_expr: STRING. (78) . reduce 78 (src line 424) state 37 primary_expr: LPAREN.logical_expr RPAREN mark_pos: . (124) STRING shift 36 CAPREF shift 34 CAPREF_NAMED shift 35 ID shift 43 INTLITERAL shift 38 FLOATLITERAL shift 39 NOT shift 31 LPAREN shift 37 . reduce 124 (src line 707) primary_expr goto 28 multiplicative_expr goto 44 additive_expr goto 42 postfix_expr goto 86 unary_expr goto 91 rel_expr goto 30 shift_expr goto 40 bitwise_expr goto 25 logical_expr goto 90 indexed_expr goto 32 id_expr goto 41 match_expr goto 26 builtin_expr goto 33 mark_pos goto 88 state 38 primary_expr: INTLITERAL. (80) . reduce 80 (src line 432) state 39 primary_expr: FLOATLITERAL. (81) . reduce 81 (src line 436) state 40 rel_expr: shift_expr. (38) shift_expr: shift_expr.shift_op opt_nl additive_expr SHL shift 93 SHR shift 94 . reduce 38 (src line 262) shift_op goto 92 state 41 indexed_expr: id_expr. (82) . reduce 82 (src line 443) state 42 shift_expr: additive_expr. (46) additive_expr: additive_expr.add_op opt_nl multiplicative_expr MINUS shift 97 PLUS shift 96 . reduce 46 (src line 287) add_op goto 95 state 43 id_expr: ID. (84) . reduce 84 (src line 459) state 44 additive_expr: multiplicative_expr. (50) multiplicative_expr: multiplicative_expr.mul_op opt_nl unary_expr DIV shift 100 MOD shift 101 MUL shift 99 POW shift 102 . reduce 50 (src line 304) mul_op goto 98 state 45 stmt: CONST id_expr.opt_nl concat_expr opt_nl: . (126) NL shift 104 . reduce 126 (src line 727) opt_nl goto 103 state 46 conditional_stmt: conditional_expr compound_stmt.ELSE compound_stmt conditional_stmt: conditional_expr compound_stmt. (15) ELSE shift 105 . reduce 15 (src line 151) state 47 compound_stmt: LCURLY.stmt_list RCURLY stmt_list: . (2) . reduce 2 (src line 99) stmt_list goto 106 state 48 conditional_stmt: mark_pos OTHERWISE.compound_stmt LCURLY shift 47 . error compound_stmt goto 107 state 49 builtin_expr: mark_pos BUILTIN.LPAREN RPAREN builtin_expr: mark_pos BUILTIN.LPAREN arg_expr_list RPAREN LPAREN shift 108 . error state 50 regex_pattern: mark_pos DIV.in_regex REGEX DIV in_regex: . (125) . reduce 125 (src line 717) in_regex goto 109 state 51 decorator_declaration: mark_pos DEF.ID compound_stmt ID shift 110 . error state 52 decoration_stmt: mark_pos DECO.compound_stmt LCURLY shift 47 . error compound_stmt goto 111 state 53 delete_stmt: mark_pos DEL.postfix_expr AFTER DURATIONLITERAL delete_stmt: mark_pos DEL.postfix_expr mark_pos: . (124) STRING shift 36 CAPREF shift 34 CAPREF_NAMED shift 35 ID shift 43 INTLITERAL shift 38 FLOATLITERAL shift 39 LPAREN shift 37 . reduce 124 (src line 707) primary_expr goto 87 postfix_expr goto 112 indexed_expr goto 32 id_expr goto 41 builtin_expr goto 33 mark_pos goto 88 state 54 expr_stmt: expr NL. (21) . reduce 21 (src line 187) state 55 metric_declaration: metric_hide_spec metric_type_spec.metric_decl_attr_spec STRING shift 116 ID shift 115 . error metric_decl_attr_spec goto 113 metric_name_spec goto 114 state 56 metric_type_spec: COUNTER. (102) . reduce 102 (src line 572) state 57 metric_type_spec: GAUGE. (103) . reduce 103 (src line 577) state 58 metric_type_spec: TIMER. (104) . reduce 104 (src line 581) state 59 metric_type_spec: TEXT. (105) . reduce 105 (src line 585) state 60 metric_type_spec: HISTOGRAM. (106) . reduce 106 (src line 589) state 61 conditional_expr: pattern_expr logical_op.opt_nl logical_expr opt_nl: . (126) NL shift 104 . reduce 126 (src line 727) opt_nl goto 117 state 62 logical_op: AND. (31) . reduce 31 (src line 235) state 63 logical_op: OR. (32) . reduce 32 (src line 238) state 64 logical_expr: logical_expr logical_op.opt_nl bitwise_expr logical_expr: logical_expr logical_op.opt_nl match_expr opt_nl: . (126) NL shift 104 . reduce 126 (src line 727) opt_nl goto 118 state 65 postfix_expr: postfix_expr postfix_op. (71) . reduce 71 (src line 397) state 66 postfix_op: INC. (72) . reduce 72 (src line 403) state 67 postfix_op: DEC. (73) . reduce 73 (src line 406) state 68 concat_expr: concat_expr PLUS.opt_nl regex_pattern concat_expr: concat_expr PLUS.opt_nl id_expr opt_nl: . (126) NL shift 104 . reduce 126 (src line 727) opt_nl goto 119 state 69 bitwise_expr: bitwise_expr bitwise_op.opt_nl rel_expr opt_nl: . (126) NL shift 104 . reduce 126 (src line 727) opt_nl goto 120 state 70 bitwise_op: BITAND. (35) . reduce 35 (src line 252) state 71 bitwise_op: BITOR. (36) . reduce 36 (src line 255) state 72 bitwise_op: XOR. (37) . reduce 37 (src line 257) state 73 assign_expr: unary_expr ASSIGN.opt_nl logical_expr opt_nl: . (126) NL shift 104 . reduce 126 (src line 727) opt_nl goto 121 state 74 assign_expr: unary_expr ADD_ASSIGN.opt_nl logical_expr opt_nl: . (126) NL shift 104 . reduce 126 (src line 727) opt_nl goto 122 state 75 match_expr: primary_expr match_op.opt_nl pattern_expr match_expr: primary_expr match_op.opt_nl primary_expr opt_nl: . (126) NL shift 104 . reduce 126 (src line 727) opt_nl goto 123 state 76 match_op: MATCH. (56) . reduce 56 (src line 332) state 77 match_op: NOT_MATCH. (57) . reduce 57 (src line 335) state 78 rel_expr: rel_expr rel_op.opt_nl shift_expr opt_nl: . (126) NL shift 104 . reduce 126 (src line 727) opt_nl goto 124 state 79 rel_op: LT. (40) . reduce 40 (src line 271) state 80 rel_op: GT. (41) . reduce 41 (src line 274) state 81 rel_op: LE. (42) . reduce 42 (src line 276) state 82 rel_op: GE. (43) . reduce 43 (src line 278) state 83 rel_op: EQ. (44) . reduce 44 (src line 280) state 84 rel_op: NE. (45) . reduce 45 (src line 282) state 85 unary_expr: NOT unary_expr. (69) . reduce 69 (src line 387) state 86 unary_expr: postfix_expr. (68) postfix_expr: postfix_expr.postfix_op INC shift 66 DEC shift 67 . reduce 68 (src line 384) postfix_op goto 65 state 87 postfix_expr: primary_expr. (70) . reduce 70 (src line 394) state 88 builtin_expr: mark_pos.BUILTIN LPAREN RPAREN builtin_expr: mark_pos.BUILTIN LPAREN arg_expr_list RPAREN BUILTIN shift 49 . error state 89 indexed_expr: indexed_expr LSQUARE.arg_expr_list RSQUARE mark_pos: . (124) STRING shift 36 CAPREF shift 34 CAPREF_NAMED shift 35 ID shift 43 INTLITERAL shift 38 FLOATLITERAL shift 39 NOT shift 31 LPAREN shift 37 . reduce 124 (src line 707) arg_expr_list goto 125 primary_expr goto 28 multiplicative_expr goto 44 additive_expr goto 42 postfix_expr goto 86 unary_expr goto 91 rel_expr goto 30 shift_expr goto 40 bitwise_expr goto 25 logical_expr goto 127 indexed_expr goto 32 id_expr goto 41 concat_expr goto 24 pattern_expr goto 128 regex_pattern goto 29 match_expr goto 26 builtin_expr goto 33 arg_expr goto 126 mark_pos goto 129 state 90 logical_expr: logical_expr.logical_op opt_nl bitwise_expr logical_expr: logical_expr.logical_op opt_nl match_expr primary_expr: LPAREN logical_expr.RPAREN AND shift 62 OR shift 63 RPAREN shift 130 . error logical_op goto 64 state 91 multiplicative_expr: unary_expr. (62) . reduce 62 (src line 363) state 92 shift_expr: shift_expr shift_op.opt_nl additive_expr opt_nl: . (126) NL shift 104 . reduce 126 (src line 727) opt_nl goto 131 state 93 shift_op: SHL. (48) . reduce 48 (src line 296) state 94 shift_op: SHR. (49) . reduce 49 (src line 299) state 95 additive_expr: additive_expr add_op.opt_nl multiplicative_expr opt_nl: . (126) NL shift 104 . reduce 126 (src line 727) opt_nl goto 132 state 96 add_op: PLUS. (52) . reduce 52 (src line 313) state 97 add_op: MINUS. (53) . reduce 53 (src line 316) state 98 multiplicative_expr: multiplicative_expr mul_op.opt_nl unary_expr opt_nl: . (126) NL shift 104 . reduce 126 (src line 727) opt_nl goto 133 state 99 mul_op: MUL. (64) . reduce 64 (src line 372) state 100 mul_op: DIV. (65) . reduce 65 (src line 375) state 101 mul_op: MOD. (66) . reduce 66 (src line 377) state 102 mul_op: POW. (67) . reduce 67 (src line 379) state 103 stmt: CONST id_expr opt_nl.concat_expr mark_pos: . (124) . reduce 124 (src line 707) concat_expr goto 134 regex_pattern goto 29 mark_pos goto 135 state 104 opt_nl: NL. (127) . reduce 127 (src line 729) state 105 conditional_stmt: conditional_expr compound_stmt ELSE.compound_stmt LCURLY shift 47 . error compound_stmt goto 136 state 106 stmt_list: stmt_list.stmt compound_stmt: LCURLY stmt_list.RCURLY mark_pos: . (124) metric_hide_spec: . (93) INVALID shift 13 COUNTER reduce 93 (src line 520) GAUGE reduce 93 (src line 520) TIMER reduce 93 (src line 520) TEXT reduce 93 (src line 520) HISTOGRAM reduce 93 (src line 520) CONST shift 11 HIDDEN shift 23 NEXT shift 10 STOP shift 12 STRING shift 36 CAPREF shift 34 CAPREF_NAMED shift 35 ID shift 43 INTLITERAL shift 38 FLOATLITERAL shift 39 NOT shift 31 RCURLY shift 137 LPAREN shift 37 NL shift 16 . reduce 124 (src line 707) stmt goto 3 conditional_stmt goto 4 conditional_expr goto 14 expr_stmt goto 5 expr goto 17 primary_expr goto 28 multiplicative_expr goto 44 additive_expr goto 42 postfix_expr goto 22 unary_expr goto 27 assign_expr goto 21 rel_expr goto 30 shift_expr goto 40 bitwise_expr goto 25 logical_expr goto 20 indexed_expr goto 32 id_expr goto 41 concat_expr goto 24 pattern_expr goto 19 metric_declaration goto 6 decorator_declaration goto 7 decoration_stmt goto 8 regex_pattern goto 29 match_expr goto 26 delete_stmt goto 9 builtin_expr goto 33 metric_hide_spec goto 18 mark_pos goto 15 state 107 conditional_stmt: mark_pos OTHERWISE compound_stmt. (16) . reduce 16 (src line 159) state 108 builtin_expr: mark_pos BUILTIN LPAREN.RPAREN builtin_expr: mark_pos BUILTIN LPAREN.arg_expr_list RPAREN mark_pos: . (124) STRING shift 36 CAPREF shift 34 CAPREF_NAMED shift 35 ID shift 43 INTLITERAL shift 38 FLOATLITERAL shift 39 NOT shift 31 LPAREN shift 37 RPAREN shift 138 . reduce 124 (src line 707) arg_expr_list goto 139 primary_expr goto 28 multiplicative_expr goto 44 additive_expr goto 42 postfix_expr goto 86 unary_expr goto 91 rel_expr goto 30 shift_expr goto 40 bitwise_expr goto 25 logical_expr goto 127 indexed_expr goto 32 id_expr goto 41 concat_expr goto 24 pattern_expr goto 128 regex_pattern goto 29 match_expr goto 26 builtin_expr goto 33 arg_expr goto 126 mark_pos goto 129 state 109 regex_pattern: mark_pos DIV in_regex.REGEX DIV REGEX shift 140 . error state 110 decorator_declaration: mark_pos DEF ID.compound_stmt LCURLY shift 47 . error compound_stmt goto 141 state 111 decoration_stmt: mark_pos DECO compound_stmt. (119) . reduce 119 (src line 674) state 112 postfix_expr: postfix_expr.postfix_op delete_stmt: mark_pos DEL postfix_expr.AFTER DURATIONLITERAL delete_stmt: mark_pos DEL postfix_expr. (121) AFTER shift 142 INC shift 66 DEC shift 67 . reduce 121 (src line 687) postfix_op goto 65 state 113 metric_declaration: metric_hide_spec metric_type_spec metric_decl_attr_spec. (92) metric_decl_attr_spec: metric_decl_attr_spec.metric_by_spec metric_decl_attr_spec: metric_decl_attr_spec.metric_as_spec metric_decl_attr_spec: metric_decl_attr_spec.metric_buckets_spec metric_decl_attr_spec: metric_decl_attr_spec.metric_limit_spec AS shift 148 BY shift 147 BUCKETS shift 149 LIMIT shift 150 . reduce 92 (src line 509) metric_limit_spec goto 146 metric_as_spec goto 144 metric_by_spec goto 143 metric_buckets_spec goto 145 state 114 metric_decl_attr_spec: metric_name_spec. (99) . reduce 99 (src line 553) state 115 metric_name_spec: ID. (100) . reduce 100 (src line 560) state 116 metric_name_spec: STRING. (101) . reduce 101 (src line 565) state 117 conditional_expr: pattern_expr logical_op opt_nl.logical_expr mark_pos: . (124) STRING shift 36 CAPREF shift 34 CAPREF_NAMED shift 35 ID shift 43 INTLITERAL shift 38 FLOATLITERAL shift 39 NOT shift 31 LPAREN shift 37 . reduce 124 (src line 707) primary_expr goto 28 multiplicative_expr goto 44 additive_expr goto 42 postfix_expr goto 86 unary_expr goto 91 rel_expr goto 30 shift_expr goto 40 bitwise_expr goto 25 logical_expr goto 151 indexed_expr goto 32 id_expr goto 41 match_expr goto 26 builtin_expr goto 33 mark_pos goto 88 state 118 logical_expr: logical_expr logical_op opt_nl.bitwise_expr logical_expr: logical_expr logical_op opt_nl.match_expr mark_pos: . (124) STRING shift 36 CAPREF shift 34 CAPREF_NAMED shift 35 ID shift 43 INTLITERAL shift 38 FLOATLITERAL shift 39 NOT shift 31 LPAREN shift 37 . reduce 124 (src line 707) primary_expr goto 28 multiplicative_expr goto 44 additive_expr goto 42 postfix_expr goto 86 unary_expr goto 91 rel_expr goto 30 shift_expr goto 40 bitwise_expr goto 152 indexed_expr goto 32 id_expr goto 41 match_expr goto 153 builtin_expr goto 33 mark_pos goto 88 state 119 concat_expr: concat_expr PLUS opt_nl.regex_pattern concat_expr: concat_expr PLUS opt_nl.id_expr mark_pos: . (124) ID shift 43 . reduce 124 (src line 707) id_expr goto 155 regex_pattern goto 154 mark_pos goto 135 state 120 bitwise_expr: bitwise_expr bitwise_op opt_nl.rel_expr mark_pos: . (124) STRING shift 36 CAPREF shift 34 CAPREF_NAMED shift 35 ID shift 43 INTLITERAL shift 38 FLOATLITERAL shift 39 NOT shift 31 LPAREN shift 37 . reduce 124 (src line 707) primary_expr goto 87 multiplicative_expr goto 44 additive_expr goto 42 postfix_expr goto 86 unary_expr goto 91 rel_expr goto 156 shift_expr goto 40 indexed_expr goto 32 id_expr goto 41 builtin_expr goto 33 mark_pos goto 88 state 121 assign_expr: unary_expr ASSIGN opt_nl.logical_expr mark_pos: . (124) STRING shift 36 CAPREF shift 34 CAPREF_NAMED shift 35 ID shift 43 INTLITERAL shift 38 FLOATLITERAL shift 39 NOT shift 31 LPAREN shift 37 . reduce 124 (src line 707) primary_expr goto 28 multiplicative_expr goto 44 additive_expr goto 42 postfix_expr goto 86 unary_expr goto 91 rel_expr goto 30 shift_expr goto 40 bitwise_expr goto 25 logical_expr goto 157 indexed_expr goto 32 id_expr goto 41 match_expr goto 26 builtin_expr goto 33 mark_pos goto 88 state 122 assign_expr: unary_expr ADD_ASSIGN opt_nl.logical_expr mark_pos: . (124) STRING shift 36 CAPREF shift 34 CAPREF_NAMED shift 35 ID shift 43 INTLITERAL shift 38 FLOATLITERAL shift 39 NOT shift 31 LPAREN shift 37 . reduce 124 (src line 707) primary_expr goto 28 multiplicative_expr goto 44 additive_expr goto 42 postfix_expr goto 86 unary_expr goto 91 rel_expr goto 30 shift_expr goto 40 bitwise_expr goto 25 logical_expr goto 158 indexed_expr goto 32 id_expr goto 41 match_expr goto 26 builtin_expr goto 33 mark_pos goto 88 state 123 match_expr: primary_expr match_op opt_nl.pattern_expr match_expr: primary_expr match_op opt_nl.primary_expr mark_pos: . (124) STRING shift 36 CAPREF shift 34 CAPREF_NAMED shift 35 ID shift 43 INTLITERAL shift 38 FLOATLITERAL shift 39 LPAREN shift 37 . reduce 124 (src line 707) primary_expr goto 160 indexed_expr goto 32 id_expr goto 41 concat_expr goto 24 pattern_expr goto 159 regex_pattern goto 29 builtin_expr goto 33 mark_pos goto 129 state 124 rel_expr: rel_expr rel_op opt_nl.shift_expr mark_pos: . (124) STRING shift 36 CAPREF shift 34 CAPREF_NAMED shift 35 ID shift 43 INTLITERAL shift 38 FLOATLITERAL shift 39 NOT shift 31 LPAREN shift 37 . reduce 124 (src line 707) primary_expr goto 87 multiplicative_expr goto 44 additive_expr goto 42 postfix_expr goto 86 unary_expr goto 91 shift_expr goto 161 indexed_expr goto 32 id_expr goto 41 builtin_expr goto 33 mark_pos goto 88 state 125 indexed_expr: indexed_expr LSQUARE arg_expr_list.RSQUARE arg_expr_list: arg_expr_list.COMMA arg_expr RSQUARE shift 162 COMMA shift 163 . error state 126 arg_expr_list: arg_expr. (87) . reduce 87 (src line 480) state 127 logical_expr: logical_expr.logical_op opt_nl bitwise_expr logical_expr: logical_expr.logical_op opt_nl match_expr arg_expr: logical_expr. (89) AND shift 62 OR shift 63 . reduce 89 (src line 493) logical_op goto 64 state 128 arg_expr: pattern_expr. (90) . reduce 90 (src line 496) state 129 builtin_expr: mark_pos.BUILTIN LPAREN RPAREN builtin_expr: mark_pos.BUILTIN LPAREN arg_expr_list RPAREN regex_pattern: mark_pos.DIV in_regex REGEX DIV BUILTIN shift 49 DIV shift 50 . error state 130 primary_expr: LPAREN logical_expr RPAREN. (79) . reduce 79 (src line 428) state 131 shift_expr: shift_expr shift_op opt_nl.additive_expr mark_pos: . (124) STRING shift 36 CAPREF shift 34 CAPREF_NAMED shift 35 ID shift 43 INTLITERAL shift 38 FLOATLITERAL shift 39 NOT shift 31 LPAREN shift 37 . reduce 124 (src line 707) primary_expr goto 87 multiplicative_expr goto 44 additive_expr goto 164 postfix_expr goto 86 unary_expr goto 91 indexed_expr goto 32 id_expr goto 41 builtin_expr goto 33 mark_pos goto 88 state 132 additive_expr: additive_expr add_op opt_nl.multiplicative_expr mark_pos: . (124) STRING shift 36 CAPREF shift 34 CAPREF_NAMED shift 35 ID shift 43 INTLITERAL shift 38 FLOATLITERAL shift 39 NOT shift 31 LPAREN shift 37 . reduce 124 (src line 707) primary_expr goto 87 multiplicative_expr goto 165 postfix_expr goto 86 unary_expr goto 91 indexed_expr goto 32 id_expr goto 41 builtin_expr goto 33 mark_pos goto 88 state 133 multiplicative_expr: multiplicative_expr mul_op opt_nl.unary_expr mark_pos: . (124) STRING shift 36 CAPREF shift 34 CAPREF_NAMED shift 35 ID shift 43 INTLITERAL shift 38 FLOATLITERAL shift 39 NOT shift 31 LPAREN shift 37 . reduce 124 (src line 707) primary_expr goto 87 postfix_expr goto 86 unary_expr goto 166 indexed_expr goto 32 id_expr goto 41 builtin_expr goto 33 mark_pos goto 88 state 134 stmt: CONST id_expr opt_nl concat_expr. (11) concat_expr: concat_expr.PLUS opt_nl regex_pattern concat_expr: concat_expr.PLUS opt_nl id_expr PLUS shift 68 . reduce 11 (src line 131) state 135 regex_pattern: mark_pos.DIV in_regex REGEX DIV DIV shift 50 . error state 136 conditional_stmt: conditional_expr compound_stmt ELSE compound_stmt. (14) . reduce 14 (src line 146) state 137 compound_stmt: LCURLY stmt_list RCURLY. (22) . reduce 22 (src line 192) state 138 builtin_expr: mark_pos BUILTIN LPAREN RPAREN. (85) . reduce 85 (src line 467) state 139 builtin_expr: mark_pos BUILTIN LPAREN arg_expr_list.RPAREN arg_expr_list: arg_expr_list.COMMA arg_expr RPAREN shift 167 COMMA shift 163 . error state 140 regex_pattern: mark_pos DIV in_regex REGEX.DIV DIV shift 168 . error state 141 decorator_declaration: mark_pos DEF ID compound_stmt. (118) . reduce 118 (src line 666) state 142 delete_stmt: mark_pos DEL postfix_expr AFTER.DURATIONLITERAL DURATIONLITERAL shift 169 . error state 143 metric_decl_attr_spec: metric_decl_attr_spec metric_by_spec. (95) . reduce 95 (src line 532) state 144 metric_decl_attr_spec: metric_decl_attr_spec metric_as_spec. (96) . reduce 96 (src line 538) state 145 metric_decl_attr_spec: metric_decl_attr_spec metric_buckets_spec. (97) . reduce 97 (src line 543) state 146 metric_decl_attr_spec: metric_decl_attr_spec metric_limit_spec. (98) . reduce 98 (src line 548) state 147 metric_by_spec: BY.metric_by_expr_list STRING shift 174 ID shift 173 . error id_or_string goto 172 metric_by_expr goto 171 metric_by_expr_list goto 170 state 148 metric_as_spec: AS.STRING STRING shift 175 . error state 149 metric_buckets_spec: BUCKETS.metric_buckets_list INTLITERAL shift 178 FLOATLITERAL shift 177 . error metric_buckets_list goto 176 state 150 metric_limit_spec: LIMIT.INTLITERAL INTLITERAL shift 179 . error state 151 conditional_expr: pattern_expr logical_op opt_nl logical_expr. (18) logical_expr: logical_expr.logical_op opt_nl bitwise_expr logical_expr: logical_expr.logical_op opt_nl match_expr AND shift 62 OR shift 63 . reduce 18 (src line 171) logical_op goto 64 state 152 logical_expr: logical_expr logical_op opt_nl bitwise_expr. (29) bitwise_expr: bitwise_expr.bitwise_op opt_nl rel_expr BITAND shift 70 XOR shift 72 BITOR shift 71 . reduce 29 (src line 225) bitwise_op goto 69 state 153 logical_expr: logical_expr logical_op opt_nl match_expr. (30) . reduce 30 (src line 229) state 154 concat_expr: concat_expr PLUS opt_nl regex_pattern. (60) . reduce 60 (src line 352) state 155 concat_expr: concat_expr PLUS opt_nl id_expr. (61) . reduce 61 (src line 356) state 156 bitwise_expr: bitwise_expr bitwise_op opt_nl rel_expr. (34) rel_expr: rel_expr.rel_op opt_nl shift_expr LT shift 79 GT shift 80 LE shift 81 GE shift 82 EQ shift 83 NE shift 84 . reduce 34 (src line 246) rel_op goto 78 state 157 assign_expr: unary_expr ASSIGN opt_nl logical_expr. (25) logical_expr: logical_expr.logical_op opt_nl bitwise_expr logical_expr: logical_expr.logical_op opt_nl match_expr AND shift 62 OR shift 63 . reduce 25 (src line 208) logical_op goto 64 state 158 assign_expr: unary_expr ADD_ASSIGN opt_nl logical_expr. (26) logical_expr: logical_expr.logical_op opt_nl bitwise_expr logical_expr: logical_expr.logical_op opt_nl match_expr AND shift 62 OR shift 63 . reduce 26 (src line 213) logical_op goto 64 state 159 match_expr: primary_expr match_op opt_nl pattern_expr. (54) . reduce 54 (src line 321) state 160 match_expr: primary_expr match_op opt_nl primary_expr. (55) . reduce 55 (src line 326) state 161 rel_expr: rel_expr rel_op opt_nl shift_expr. (39) shift_expr: shift_expr.shift_op opt_nl additive_expr SHL shift 93 SHR shift 94 . reduce 39 (src line 265) shift_op goto 92 state 162 indexed_expr: indexed_expr LSQUARE arg_expr_list RSQUARE. (83) . reduce 83 (src line 449) state 163 arg_expr_list: arg_expr_list COMMA.arg_expr mark_pos: . (124) STRING shift 36 CAPREF shift 34 CAPREF_NAMED shift 35 ID shift 43 INTLITERAL shift 38 FLOATLITERAL shift 39 NOT shift 31 LPAREN shift 37 . reduce 124 (src line 707) primary_expr goto 28 multiplicative_expr goto 44 additive_expr goto 42 postfix_expr goto 86 unary_expr goto 91 rel_expr goto 30 shift_expr goto 40 bitwise_expr goto 25 logical_expr goto 127 indexed_expr goto 32 id_expr goto 41 concat_expr goto 24 pattern_expr goto 128 regex_pattern goto 29 match_expr goto 26 builtin_expr goto 33 arg_expr goto 180 mark_pos goto 129 state 164 shift_expr: shift_expr shift_op opt_nl additive_expr. (47) additive_expr: additive_expr.add_op opt_nl multiplicative_expr MINUS shift 97 PLUS shift 96 . reduce 47 (src line 290) add_op goto 95 state 165 additive_expr: additive_expr add_op opt_nl multiplicative_expr. (51) multiplicative_expr: multiplicative_expr.mul_op opt_nl unary_expr DIV shift 100 MOD shift 101 MUL shift 99 POW shift 102 . reduce 51 (src line 307) mul_op goto 98 state 166 multiplicative_expr: multiplicative_expr mul_op opt_nl unary_expr. (63) . reduce 63 (src line 366) state 167 builtin_expr: mark_pos BUILTIN LPAREN arg_expr_list RPAREN. (86) . reduce 86 (src line 472) state 168 regex_pattern: mark_pos DIV in_regex REGEX DIV. (91) . reduce 91 (src line 501) state 169 delete_stmt: mark_pos DEL postfix_expr AFTER DURATIONLITERAL. (120) . reduce 120 (src line 682) state 170 metric_by_spec: BY metric_by_expr_list. (107) metric_by_expr_list: metric_by_expr_list.COMMA metric_by_expr COMMA shift 181 . reduce 107 (src line 596) state 171 metric_by_expr_list: metric_by_expr. (108) . reduce 108 (src line 603) state 172 metric_by_expr: id_or_string. (110) . reduce 110 (src line 616) state 173 id_or_string: ID. (122) . reduce 122 (src line 693) state 174 id_or_string: STRING. (123) . reduce 123 (src line 698) state 175 metric_as_spec: AS STRING. (111) . reduce 111 (src line 622) state 176 metric_buckets_spec: BUCKETS metric_buckets_list. (113) metric_buckets_list: metric_buckets_list.COMMA FLOATLITERAL metric_buckets_list: metric_buckets_list.COMMA INTLITERAL COMMA shift 182 . reduce 113 (src line 637) state 177 metric_buckets_list: FLOATLITERAL. (114) . reduce 114 (src line 643) state 178 metric_buckets_list: INTLITERAL. (115) . reduce 115 (src line 649) state 179 metric_limit_spec: LIMIT INTLITERAL. (112) . reduce 112 (src line 629) state 180 arg_expr_list: arg_expr_list COMMA arg_expr. (88) . reduce 88 (src line 486) state 181 metric_by_expr_list: metric_by_expr_list COMMA.metric_by_expr STRING shift 174 ID shift 173 . error id_or_string goto 172 metric_by_expr goto 183 state 182 metric_buckets_list: metric_buckets_list COMMA.FLOATLITERAL metric_buckets_list: metric_buckets_list COMMA.INTLITERAL INTLITERAL shift 185 FLOATLITERAL shift 184 . error state 183 metric_by_expr_list: metric_by_expr_list COMMA metric_by_expr. (109) . reduce 109 (src line 609) state 184 metric_buckets_list: metric_buckets_list COMMA FLOATLITERAL. (116) . reduce 116 (src line 654) state 185 metric_buckets_list: metric_buckets_list COMMA INTLITERAL. (117) . reduce 117 (src line 659) 66 terminals, 55 nonterminals 128 grammar rules, 186/16000 states 0 shift/reduce, 0 reduce/reduce conflicts reported 104 working sets used memory: parser 397/240000 169 extra closures 282 shift entries, 13 exceptions 116 goto entries 193 entries saved by goto default Optimizer space used: output 249/240000 249 table entries, 2 zero maximum spread: 66, maximum offset: 181 mtail-3.0.0~rc54+git0ff5/internal/runtime/compiler/position/000077500000000000000000000000001460063571700237425ustar00rootroot00000000000000mtail-3.0.0~rc54+git0ff5/internal/runtime/compiler/position/position.go000066400000000000000000000026421460063571700261410ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // This file is available under the Apache license. // Package position implements a data structure for storing source code positions. package position import "fmt" // A Position is the location in the source program that a token appears. It // can specify a single character in the pinput, in which case the start and // end columns are the same, or a span of sequential characters on one line. type Position struct { Filename string // Source filename in which this token appears. Line int // Line in the source for this token. Startcol int // Starting and ending columns in the source for this token. Endcol int } // String formats a position to be useful for printing messages associated with // this position, e.g. compiler errors. func (p Position) String() string { r := fmt.Sprintf("%s:%d:%d", p.Filename, p.Line+1, p.Startcol+1) if p.Endcol > p.Startcol { r += fmt.Sprintf("-%d", p.Endcol+1) } return r } // MergePosition returns the union of two positions such that the result contains both inputs. func Merge(a, b *Position) *Position { if a == nil { return b } if b == nil { return a } if a.Filename != b.Filename { return a } // TODO(jaq): handle multi-line positions if a.Line != b.Line { return a } r := *a if b.Startcol < r.Startcol { r.Startcol = b.Startcol } if b.Endcol > r.Endcol { r.Endcol = b.Endcol } return &r } mtail-3.0.0~rc54+git0ff5/internal/runtime/compiler/symbol/000077500000000000000000000000001460063571700234035ustar00rootroot00000000000000mtail-3.0.0~rc54+git0ff5/internal/runtime/compiler/symbol/symtab.go000066400000000000000000000075611460063571700252420ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // This file is available under the Apache license. package symbol import ( "bytes" "fmt" "github.com/google/mtail/internal/runtime/compiler/position" "github.com/google/mtail/internal/runtime/compiler/types" ) // Kind enumerates the kind of a Symbol. type Kind int // Kind enumerates the kinds of symbols found in the program text. const ( VarSymbol Kind = iota // Variables CaprefSymbol // Capture group references DecoSymbol // Decorators PatternSymbol // Named pattern constants endSymbol // for testing ) func (k Kind) String() string { switch k { case VarSymbol: return "variable" case CaprefSymbol: return "capture group reference" case DecoSymbol: return "decorator" case PatternSymbol: return "named pattern constant" default: panic("unexpected symbolkind") } } // Symbol describes a named program object. type Symbol struct { Name string // identifier name Kind Kind // kind of program object Type types.Type // object's type Pos *position.Position // Source file position of definition Binding interface{} // binding to storage allocated in runtime Addr int // Address offset in another structure, object specific Used bool // Optional marker that this symbol is used after declaration. } // NewSymbol creates a record of a given symbol kind, named name, found at loc. func NewSymbol(name string, kind Kind, pos *position.Position) (sym *Symbol) { return &Symbol{name, kind, types.Undef, pos, nil, 0, false} } // Scope maintains a record of the identifiers declared in the current program // scope, and a link to the parent scope. type Scope struct { Parent *Scope Symbols map[string]*Symbol } // NewScope creates a new scope within the parent scope. func NewScope(parent *Scope) *Scope { return &Scope{parent, make(map[string]*Symbol)} } // Insert attempts to insert a symbol into the scope. If the scope already // contains an object alt with the same name, the scope is unchanged and the // function returns alt. Otherwise the symbol is inserted, and returns nil. func (s *Scope) Insert(sym *Symbol) (alt *Symbol) { if alt = s.Symbols[sym.Name]; alt == nil { s.Symbols[sym.Name] = sym } return } // InsertAlias attempts to insert a duplicate name for an existing symbol into // the scope. If the scope already contains an object alt with the alias, the // scope is unchanged and the function returns alt. Otherwise, the symbol is // inserted and the function returns nil. func (s *Scope) InsertAlias(sym *Symbol, alias string) (alt *Symbol) { if alt := s.Symbols[alias]; alt == nil { s.Symbols[alias] = sym } return } // Lookup returns the symbol with the given name if it is found in this or any // parent scope, otherwise nil. func (s *Scope) Lookup(name string, kind Kind) *Symbol { for scope := s; scope != nil; scope = scope.Parent { if sym := scope.Symbols[name]; sym != nil && sym.Kind == kind { return sym } } return nil } // String prints the current scope and all parents to a string, recursing up to // the root scope. This method is only used for debugging. func (s *Scope) String() string { var buf bytes.Buffer fmt.Fprintf(&buf, "scope %p {", s) if s != nil { fmt.Fprintln(&buf) if len(s.Symbols) > 0 { for name, sym := range s.Symbols { fmt.Fprintf(&buf, "\t%q: %v %q %v\n", name, sym.Kind, sym.Name, sym.Used) } } if s.Parent != nil { fmt.Fprintf(&buf, "%s", s.Parent.String()) } } fmt.Fprintf(&buf, "}\n") return buf.String() } // CopyFrom copies all the symbols from another scope object into this one. // It recurses up the input scope copying all visible symbols into one. func (s *Scope) CopyFrom(o *Scope) { for _, sym := range o.Symbols { s.Insert(sym) } if o.Parent != nil { s.CopyFrom(o.Parent) } } mtail-3.0.0~rc54+git0ff5/internal/runtime/compiler/symbol/symtab_test.go000066400000000000000000000033301460063571700262670ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // This file is available under the Apache license. package symbol import ( "math/rand" "reflect" "testing" "testing/quick" "github.com/google/mtail/internal/testutil" ) func TestInsertLookup(t *testing.T) { s := NewScope(nil) sym1 := NewSymbol("foo", VarSymbol, nil) if r := s.Insert(sym1); r != nil { t.Errorf("Insert already had sym1: %v", r) } r1 := s.Lookup("foo", VarSymbol) testutil.ExpectNoDiff(t, r1, sym1) } // Generate implements the quick.Generator interface for SymbolKind. func (Kind) Generate(rand *rand.Rand, _ int) reflect.Value { return reflect.ValueOf(Kind(rand.Intn(int(endSymbol)))) } func TestInsertLookupQuick(t *testing.T) { testutil.SkipIfShort(t) check := func(name string, kind Kind) bool { // Create a new scope each run because scope doesn't overwrite on insert. scope := NewScope(nil) sym := NewSymbol(name, kind, nil) a := scope.Insert(sym) if a != nil { return false } b := scope.Lookup(name, kind) diff := testutil.Diff(a, b) return diff != "" } q := &quick.Config{MaxCount: 100000} if err := quick.Check(check, q); err != nil { t.Error(err) } } func TestNestedScope(t *testing.T) { s := NewScope(nil) s1 := NewScope(s) sym1 := NewSymbol("bar", VarSymbol, nil) if r := s.Insert(sym1); r != nil { t.Errorf("Insert already had sym1: %v", r) } sym2 := NewSymbol("foo", VarSymbol, nil) if r1 := s1.Insert(sym2); r1 != nil { t.Errorf("Insert already had sym2: %v", r1) } if s1.Lookup("foo", VarSymbol) == nil { t.Errorf("foo not found in s1") } if s.Lookup("foo", VarSymbol) != nil { t.Errorf("foo found in s") } if s1.Lookup("bar", VarSymbol) == nil { t.Errorf("bar not found from s1") } } mtail-3.0.0~rc54+git0ff5/internal/runtime/compiler/types/000077500000000000000000000000001460063571700232425ustar00rootroot00000000000000mtail-3.0.0~rc54+git0ff5/internal/runtime/compiler/types/regexp.go000066400000000000000000000006441460063571700250670ustar00rootroot00000000000000// Copyright 2021 Google Inc. All Rights Reserved. // This file is available under the Apache license. package types import ( "regexp/syntax" ) // ParseRegexp ensures we use the same regexp syntax.Flags across all // invocations of this method. func ParseRegexp(pattern string) (re *syntax.Regexp, err error) { re, err = syntax.Parse(pattern, syntax.Perl) if err != nil { return } re = re.Simplify() return } mtail-3.0.0~rc54+git0ff5/internal/runtime/compiler/types/types.go000066400000000000000000000365371460063571700247530ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // This file is available under the Apache license. package types import ( "errors" "fmt" "regexp/syntax" "strings" "sync" "github.com/golang/glog" ) // Type represents a type in the mtail program. type Type interface { // Root returns an exemplar Type after unification occurs. If the type // system is complete after unification, Root will be a TypeOperator. Root // is the equivalent of Find in the union-find algorithm. Root() Type // String returns a string representation of a Type. String() string } // TypeError describes an error in which a type was expected, but another was encountered. type TypeError struct { error error expected Type received Type } var ( ErrRecursiveUnification = errors.New("recursive unification error") ErrTypeMismatch = errors.New("type mismatch") ErrInternal = errors.New("internal error") ) func (e *TypeError) Root() Type { return e } func (e *TypeError) String() string { if e == nil || e.error == nil { return "type error" } var estr, rstr string if IsComplete(e.expected) { estr = e.expected.String() } else { estr = "incomplete type" } if IsComplete(e.received) { rstr = e.received.String() } else { rstr = "incomplete type" } glog.V(2).Infof("%s: expected %q received %q", e.error, e.expected, e.received) return fmt.Sprintf("%s; expected %s received %s", e.error, estr, rstr) } func (e TypeError) Error() string { return e.String() } func (e *TypeError) Unwrap() error { return e.error } // AsTypeError behaves like `errors.As`, attempting to cast the type `t` into a // provided `target` TypeError and returning if it was successful. func AsTypeError(t Type, target **TypeError) (ok bool) { *target, ok = t.(*TypeError) return ok } // IsTypeError behaves like `errors.Is`, indicating that the type is a TypeError. func IsTypeError(t Type) bool { var e *TypeError return AsTypeError(t, &e) } var ( nextVariableIDMu sync.Mutex nextVariableID int ) // Variable represents an unbound type variable in the type system. type Variable struct { ID int // Instance is set if this variable has been bound to a type. instanceMu sync.RWMutex Instance Type } // NewVariable constructs a new unique TypeVariable. func NewVariable() *Variable { nextVariableIDMu.Lock() id := nextVariableID nextVariableID++ nextVariableIDMu.Unlock() return &Variable{ID: id} } // Root returns an exemplar of this TypeVariable, in this case the root of the unification tree. func (t *Variable) Root() Type { t.instanceMu.Lock() defer t.instanceMu.Unlock() if t.Instance == nil { return t } r := t.Instance.Root() t.Instance = r return r } func (t *Variable) String() string { t.instanceMu.RLock() defer t.instanceMu.RUnlock() if t.Instance != nil { return t.Instance.String() } return fmt.Sprintf("typeVar%d", t.ID) } // SetInstance sets the exemplar instance of this TypeVariable, during // unification. SetInstance is the equivalent of Union in the Union-Find // algorithm. func (t *Variable) SetInstance(t1 Type) { t.instanceMu.Lock() defer t.instanceMu.Unlock() t.Instance = t1 } // Operator represents a type scheme in the type system. type Operator struct { // Name is a common name for this operator Name string // Args is the sequence of types that are parameters to this type. They // may be fully bound type operators, or partially defined (i.e. contain // TypeVariables) in which case they represent polymorphism in the operator // they are arguments to. Args []Type } // Root returns an exemplar of a TypeOperator, i.e. itself. func (t *Operator) Root() Type { return t } func (t *Operator) String() (s string) { switch l := len(t.Args); { case l < 2: s = t.Name for _, a := range t.Args { s += " " + a.String() } default: s = t.Args[0].String() for _, a := range t.Args[1:] { s += t.Name + a.String() } } return s } const ( functionName = "→" dimensionName = "⨯" alternateName = "|" ) // Function is a convenience method, which instantiates a new Function type // scheme, with the given args as parameters. func Function(args ...Type) *Operator { return &Operator{functionName, args} } // IsFunction returns true if the given type is a Function type. func IsFunction(t Type) bool { if v, ok := t.(*Operator); ok { return v.Name == functionName } return false } // Dimension is a convenience method which instantiates a new Dimension type // scheme, with the given args as the dimensions of the type. (This type looks // a lot like a Product type.) func Dimension(args ...Type) *Operator { return &Operator{dimensionName, args} } // IsDimension returns true if the given type is a Dimension type. func IsDimension(t Type) bool { if v, ok := t.(*Operator); ok { return v.Name == dimensionName } return false } // Alternate is a convenience method which instantiates a new Alternate type // scheme, with the given args as the possible types this type may take. (You // might know this sort of type by the name Sum type.) func Alternate(args ...Type) *Operator { return &Operator{alternateName, args} } // IsAlternate returns true if the given type is an Alternate type. func IsAlternate(t Type) bool { if v, ok := t.(*Operator); ok { return v.Name == alternateName } return false } // IsComplete returns true if the type and all its arguments have non-variable exemplars. func IsComplete(t Type) bool { switch v := t.Root().(type) { case *Variable: return false case *Operator: for _, a := range v.Args { if !IsComplete(a) { return false } } return true } return false } // Builtin type constants. var ( Error = &TypeError{} InternalError = &TypeError{error: ErrInternal} Undef = &Operator{"Undef", []Type{}} None = &Operator{"None", []Type{}} Bool = &Operator{"Bool", []Type{}} Int = &Operator{"Int", []Type{}} Float = &Operator{"Float", []Type{}} String = &Operator{"String", []Type{}} Pattern = &Operator{"Pattern", []Type{}} // TODO(jaq): use composite type so we can typecheck the bucket directly, e.g. hist[j] = i. Buckets = &Operator{"Buckets", []Type{}} // Numeric types can be either Int or Float. Numeric = Alternate(Int, Float) ) // Builtins is a mapping of the builtin language functions to their type definitions. var Builtins = map[string]Type{ "int": Function(NewVariable(), Int), "bool": Function(NewVariable(), Bool), "float": Function(NewVariable(), Float), "string": Function(NewVariable(), String), "timestamp": Function(Int), "len": Function(String, Int), "settime": Function(Int, None), "strptime": Function(String, String, None), "strtol": Function(String, Int, Int), "tolower": Function(String, String), "getfilename": Function(String), "subst": Function(Pattern, String, String, String), } // FreshType returns a new type from the provided type scheme, replacing any // unbound type variables with new type variables. func FreshType(t Type) Type { // mappings keeps track of replaced variables in this type so that t -> t // becomes q -> q not q -> r mappings := make(map[*Variable]*Variable) var freshRec func(Type) Type freshRec = func(tp Type) Type { p := tp.Root() switch p1 := p.(type) { case *Variable: if _, ok := mappings[p1]; !ok { mappings[p1] = NewVariable() } return mappings[p1] case *Operator: args := make([]Type, 0, len(p1.Args)) for _, arg := range p1.Args { args = append(args, freshRec(arg)) } return &Operator{p1.Name, args} default: glog.V(1).Infof("Unexpected type p1: %v", p1) } return tp } return freshRec(t) } // occursIn returns true if `v` is in any of `types`. func OccursIn(v Type, types []Type) bool { for _, t2 := range types { if occursInType(v, t2) { return true } } return false } // occursInType returns true if `v` is `t2` or recursively contained within `t2`. func occursInType(v Type, t2 Type) bool { root := t2.Root() if Equals(root, v) { return true } if to, ok := root.(*Operator); ok { return OccursIn(v, to.Args) } return false } // Equals compares two types, testing for equality. func Equals(t1, t2 Type) bool { t1, t2 = t1.Root(), t2.Root() switch t1 := t1.(type) { case *Variable: r2, ok := t2.(*Variable) if !ok { return occursInType(t1, t2) } return t1.ID == r2.ID case *Operator: t2, ok := t2.(*Operator) if !ok { return false } if t1.Name != t2.Name { return false } if len(t1.Args) != len(t2.Args) { return false } for i := range t1.Args { if !Equals(t1.Args[i], t2.Args[i]) { return false } } return true case *TypeError: return false } return true } // Unify performs type unification of both parameter Types. It returns the // least upper bound of both types, the most general type that is capable of // representing both parameters. If either type is a type variable, then that // variable is unified with the LUB. In reporting errors, it is assumed that a // is the expected type and b is the type observed. func Unify(a, b Type) Type { glog.V(2).Infof("Unifying %v and %v", a, b) aR, bR := a.Root(), b.Root() switch aT := aR.(type) { case *Variable: switch bT := bR.(type) { case *Variable: if aT.ID != bT.ID { glog.V(2).Infof("Making %q type %q", aT, bR) aT.SetInstance(bR) return bR } return aT case *Operator: if occursInType(aT, bT) { return &TypeError{ErrRecursiveUnification, aT, bT} } glog.V(2).Infof("Making %q type %q", aT, bR) aT.SetInstance(bR) return bR } case *Operator: switch bT := bR.(type) { case *Variable: // reverse args, to recurse the pattern above t := Unify(b, a) var e *TypeError if AsTypeError(t, &e) { // Re-reverse from the recursion return &TypeError{ErrTypeMismatch, e.received, e.expected} } return t case *Operator: switch { case IsAlternate(aT) && !IsAlternate(bT): if OccursIn(bT, aT.Args) { return bT } return &TypeError{ErrTypeMismatch, aT, bT} case IsAlternate(bT) && !IsAlternate(aT): t := Unify(b, a) var e *TypeError if AsTypeError(t, &e) { // We flipped the args, flip them back. return &TypeError{e.error, e.received, e.expected} } return t case IsAlternate(aT) && IsAlternate(bT): // Both are Alternates, find intersection of type arguments. var args []Type for _, arg := range bT.Args { if OccursIn(arg, aT.Args) { args = append(args, arg) } } if len(args) == 0 { return &TypeError{ErrTypeMismatch, aT, bT} } if len(args) == 1 { return args[0] } return &Operator{alternateName, args} default: if len(aT.Args) != len(bT.Args) { return &TypeError{ErrTypeMismatch, aT, bT} } var rType *Operator if aT.Name != bT.Name { t := LeastUpperBound(a, b) glog.V(2).Infof("Got LUB = %#v", t) var e *TypeError if AsTypeError(t, &e) { return e } var ok bool if rType, ok = t.(*Operator); !ok { return &TypeError{ErrRecursiveUnification, aT, bT} } } else { rType = &Operator{aT.Name, []Type{}} } rType.Args = make([]Type, len(aT.Args)) for i, argA := range aT.Args { t := Unify(argA, bT.Args[i]) var e *TypeError if AsTypeError(t, &e) { return e } rType.Args[i] = t } return rType } } } return &TypeError{ErrInternal, a, b} } type TypeCoercion struct { sub, sup Type } // type coercions for builtin types var typeCoercions = []TypeCoercion{ {Bool, Int}, {Bool, Float}, // contentious {Int, Float}, // contentious {Bool, String}, {Int, String}, {Float, String}, {String, Pattern}, {Int, Bool}, // an integer using C style cast to bool } // LeastUpperBound returns the smallest type that may contain both parameter types. func LeastUpperBound(a, b Type) Type { a1, b1 := a.Root(), b.Root() glog.V(2).Infof("Computing LUB(%q, %q)", a1, b1) if Equals(a1, b1) { return a1 } // If either is a TypeVariable, the other is the lub if _, ok := a1.(*Variable); ok { return b1 } if _, ok := b1.(*Variable); ok { return a1 } // If either is Undef, other is the lub if Equals(a1, Undef) { return b1 } if Equals(b1, Undef) { return a1 } // Easy substitutions for _, pair := range typeCoercions { if (Equals(a1, pair.sub) && Equals(b1, pair.sup)) || (Equals(b1, pair.sub) && Equals(a1, pair.sup)) { return pair.sup } } // Patterns imply match status, which is boolean. if (Equals(a1, Pattern) && Equals(b1, Bool)) || (Equals(a1, Bool) && Equals(b1, Pattern)) { return Bool } if (Equals(a1, Bool) && Equals(b1, Int)) || (Equals(a1, Int) && Equals(b1, Bool)) { return Int } // A Numeric can be an Int, or a Float, but not vice versa. if (Equals(a1, Numeric) && Equals(b1, Int)) || (Equals(a1, Int) && Equals(b1, Numeric)) { return Int } if (Equals(a1, Numeric) && Equals(b1, Float)) || (Equals(a1, Float) && Equals(b1, Numeric)) { return Float } // A string can be a pattern, but not vice versa. if (Equals(a1, String) && Equals(b1, Pattern)) || (Equals(a1, Pattern) && Equals(b1, String)) { return Pattern } // A pattern and an Int are Bool if (Equals(a1, Pattern) && Equals(b1, Int)) || (Equals(a1, Int) && Equals(b1, Pattern)) { return Bool } return &TypeError{ErrTypeMismatch, a, b} } // inferCaprefType determines a type for the nth capturing group in re, based on contents // of that capture group. func InferCaprefType(re *syntax.Regexp, n int) Type { group := getCaptureGroup(re, n) if group == nil { return None } if group.Op != syntax.OpAlternate { return inferGroupType(group) } subType := Type(Undef) for _, sub := range group.Sub { subType = LeastUpperBound(subType, inferGroupType(sub)) } return subType } func inferGroupType(group *syntax.Regexp) Type { switch { case groupOnlyMatches(group, "+-"): return String case groupOnlyMatches(group, "+-0123456789"): // Must be at least one digit in the group. if !strings.ContainsAny(group.String(), "0123456789") { return String } if group.Op == syntax.OpAlternate || group.Op == syntax.OpCharClass { return String } return Int case groupOnlyMatches(group, "+-0123456789.eE"): // Only one decimal point allowed. if strings.Count(group.String(), ".") > 1 { return String } return Float } return String } // getCaptureGroup returns the Regexp node of the capturing group numbered cgID // in re. func getCaptureGroup(re *syntax.Regexp, cgID int) *syntax.Regexp { if re.Op == syntax.OpCapture && re.Cap == cgID { return re.Sub[0] } for _, sub := range re.Sub { r := getCaptureGroup(sub, cgID) if r != nil { return r } } return nil } // groupOnlyMatches returns true iff group only matches runes in s. func groupOnlyMatches(group *syntax.Regexp, s string) bool { switch group.Op { case syntax.OpLiteral: for _, r := range group.Rune { if !strings.ContainsRune(s, r) { return false } } return true case syntax.OpCharClass: for i := 0; i < len(group.Rune); i += 2 { lo, hi := group.Rune[i], group.Rune[i+1] for r := lo; r <= hi; r++ { if !strings.ContainsRune(s, r) { return false } } } return true case syntax.OpStar, syntax.OpPlus, syntax.OpRepeat, syntax.OpQuest, syntax.OpCapture: return groupOnlyMatches(group.Sub[0], s) case syntax.OpConcat, syntax.OpAlternate: for _, sub := range group.Sub { if !groupOnlyMatches(sub, s) { return false } } default: return false } return true } mtail-3.0.0~rc54+git0ff5/internal/runtime/compiler/types/types_test.go000066400000000000000000000137041460063571700260010ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // This file is available under the Apache license. package types import ( "errors" "fmt" "testing" "github.com/google/mtail/internal/testutil" ) var typeUnificationTests = []struct { a, b Type expected Type }{ // The unification of None with None is still None. { None, None, None, }, // The unification of a type T with itself is T. { String, String, String, }, { Int, Int, Int, }, { Float, Float, Float, }, { &Variable{ID: 0}, &Variable{ID: 0}, &Variable{ID: 0}, }, // The unification of any type operator with a type variable is the type operator { &Variable{}, None, None, }, { &Variable{}, Float, Float, }, { &Variable{}, Int, Int, }, { &Variable{}, String, String, }, { None, &Variable{}, None, }, { Float, &Variable{}, Float, }, { Int, &Variable{}, Int, }, { String, &Variable{}, String, }, // Unification of an Alternate with a concrete operator should be concrete. { Alternate(Bool, Int, Float), Bool, Bool, }, { Pattern, Alternate(Bool, Pattern), Pattern, }, // Unification of an Alternate with an Alternate operator should be a reduced Alternate { Alternate(Bool, Int, Float), Alternate(Bool, Int), Alternate(Bool, Int), }, // Decompose an alternate to an operator. { Alternate(Pattern), Alternate(Bool, Pattern), Pattern, }, { &Variable{}, Alternate(Int, Float), Alternate(Int, Float), }, // The lub of Int and Float is Float. { Int, Float, Float, }, { Float, Int, Float, }, // The lub of Int and String is String. { Int, String, String, }, { String, Int, String, }, // The lub of Float and String is String. { Float, String, String, }, { String, Float, String, }, // lub of Bool and Int is an Int. { Bool, Int, Int, }, { Int, Bool, Int, }, // Strings can be Patterns. { Pattern, String, Pattern, }, { String, Pattern, Pattern, }, // Patterns and Ints can only be bool. { Pattern, Int, Bool, }, // Undef secedes to other { Undef, Int, Int, }, { String, Undef, String, }, { Undef, Undef, Undef, }, // TypeError supercedes other. { Pattern, &TypeError{}, &TypeError{}, }, { &TypeError{}, Float, &TypeError{}, }, // Numeric seceds to the concrete type { Numeric, Int, Int, }, { Int, Numeric, Int, }, { Numeric, Float, Float, }, { Float, Numeric, Float, }, } func TestTypeUnification(t *testing.T) { for _, tc := range typeUnificationTests { tc := tc t.Run(fmt.Sprintf("%s %s", tc.a, tc.b), func(t *testing.T) { tU := Unify(tc.a, tc.b) /* Type Errors never equal. */ if IsTypeError(tc.expected) && IsTypeError(tU) { return } if !Equals(tc.expected, tU) { t.Errorf("want %q, got %q", tc.expected, tU) } }) } } var groupOnlyMatchesTests = []struct { pattern string check string expected bool }{ { `\d+`, "0123456789", true, }, { `[0123456789]`, "0123456789", true, }, { `(0|1|2|3|4|5|6|7|8|9)`, "0123456789", true, }, { `(\+|-)?\d+(\.\d+)?`, "0123456789", false, }, { `(\d+\.\d+)`, "0123456789.eE+-", true, }, { `(\+|-)?\d+(\.\d+)?`, "0123456789.eE+-", true, }, { `(?P-?\d+\.\d+)`, "0123456789.eE+-", true, }, { `(?P-)`, "+-", true, }, { `(?P-)`, "+-0123456789", true, }, { `\-`, "+-", true, }, { `\-`, "+-0123456789", true, }, { `\-|[0-9]`, "+-", false, }, } func TestGroupOnlyMatches(t *testing.T) { for _, tc := range groupOnlyMatchesTests { r, err := ParseRegexp(tc.pattern) testutil.FatalIfErr(t, err) result := groupOnlyMatches(r, tc.check) if result != tc.expected { t.Errorf("Pattern %q didn't only match check %q: expected %+v, received %+v", tc.pattern, tc.check, tc.expected, result) } } } var inferCaprefTypeTests = []struct { pattern string typ Type }{ { `\d+`, Int, }, { `-?\d+`, Int, }, { `[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?`, Float, }, { `-?\d+\.\d+`, Float, }, { `(\d+\.\d+)`, Float, }, { `\d+\.\d+\.\d+\.\d+`, String, }, { `-`, String, }, { `\-`, String, }, // A single - is not an Int, so the whole class cannot be Int. { `[-0-9]`, String, }, // Fun fact! This test gets simplified into `[\-0-9]` because the character // class is also an alternation. { `-|[0-9]`, String, }, { `\d+\.\d+|\-`, String, }, { `\-|\d+\.\d+`, String, }, } func TestInferCaprefType(t *testing.T) { for _, tc := range inferCaprefTypeTests { tc := tc t.Run(tc.pattern, func(t *testing.T) { re, err := ParseRegexp(`(` + tc.pattern + `)`) testutil.FatalIfErr(t, err) r := InferCaprefType(re, 1) if !Equals(tc.typ, r) { t.Errorf("Types don't match: %q inferred %v, not %v", tc.pattern, r, tc.typ) } }) } } func TestTypeEquals(t *testing.T) { if Equals(NewVariable(), NewVariable()) { t.Error("Type variables are not same") } var e *TypeError t1 := NewVariable() t2 := NewVariable() ty := Unify(t1, t2) if AsTypeError(ty, &e) { t.Fatal(e) } if !Equals(t1, t2) { t.Errorf("Unified variables should be same: %v %v", t1, t2) } if !Equals(Int, Int) { t.Errorf("type constants not same") } t3 := NewVariable() if Equals(t3, Int) { t.Error("ununified type const and var") } ty = Unify(Int, t3) if AsTypeError(ty, &e) { t.Fatal(e) } if !Equals(t3, Int) { t.Error("unified variable and const not same") } typeErr := &TypeError{} if Equals(typeErr, typeErr) { t.Error("error type equals itself") } } func TestAsTypeError(t *testing.T) { e := &TypeError{ErrTypeMismatch, Int, Bool} var e1 *TypeError if !AsTypeError(e, &e1) { t.Errorf("want type error, got: %#v", e1) } if !errors.Is(e1.error, ErrTypeMismatch) { t.Errorf("want ErrTypeMismatch, got: %#v", e1.error) } if e.expected != e1.expected || e.received != e1.received { t.Errorf("want %#v, got: %#v", e.expected, e1.expected) t.Errorf("want %#v, got: %#v", e.received, e1.received) } } mtail-3.0.0~rc54+git0ff5/internal/runtime/fuzz.go000066400000000000000000000034511460063571700216140ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // This file is available under the Apache license. //go:build gofuzz // +build gofuzz package runtime import ( "bufio" "bytes" "context" "flag" "fmt" "github.com/google/mtail/internal/logline" "github.com/google/mtail/internal/runtime/compiler" "github.com/google/mtail/internal/runtime/vm" ) // U+2424 SYMBOL FOR NEWLINE const SEP = "␤" // Enable this when debugging with a fuzz crash artifact; it slows the fuzzer down when enabled. const dumpDebug = false func Fuzz(data []byte) int { // Data contains the program and sample input, separated by SEP. offset := bytes.Index(data, []byte(SEP)) if offset < 0 { // If no SEP, then append one and an empty line of input. offset = len(data) data = append(data, []byte(SEP+"\n")...) } fmt.Printf("data len %d, offset is %d, input starts at %d\n", len(data), offset, offset+len(SEP)) cOpts := []compiler.Option{} if dumpDebug { cOpts = append(cOpts, compiler.EmitAst(), compiler.EmitAstTypes()) } c, err := compiler.New(cOpts...) if err != nil { fmt.Println(err) return 0 } obj, err := c.Compile("fuzz", bytes.NewReader(data[:offset])) if err != nil { fmt.Println(err) return 0 // false } v := vm.New("fuzz", obj, false, nil, dumpDebug, dumpDebug) if dumpDebug { fmt.Println(v.DumpByteCode()) } v.HardCrash = true scanner := bufio.NewScanner(bytes.NewBuffer(data[offset+len(SEP):])) for scanner.Scan() { v.ProcessLogLine(context.Background(), logline.New(context.Background(), "fuzz", scanner.Text())) } return 1 } func init() { // We need to successfully parse flags to initialize the glog logger used // by the compiler, but the fuzzer gets called with flags captured by the // libfuzzer main, which we don't want to intercept here. flag.CommandLine.Parse([]string{}) } mtail-3.0.0~rc54+git0ff5/internal/runtime/fuzz/000077500000000000000000000000001460063571700212625ustar00rootroot00000000000000mtail-3.0.0~rc54+git0ff5/internal/runtime/fuzz/1.mtail000066400000000000000000000000041460063571700224440ustar00rootroot000000000000001{} mtail-3.0.0~rc54+git0ff5/internal/runtime/fuzz/284.mtail000066400000000000000000000000721460063571700226260ustar00rootroot00000000000000counter c by x /"(?P\S+)"/ + /$/ { c[$x]++ } ␤ "a" mtail-3.0.0~rc54+git0ff5/internal/runtime/fuzz/capref-double-regexp-in-cond.mtail000066400000000000000000000000371460063571700276370ustar00rootroot000000000000000||0||//||/;0/{$0||//||/;0/{}} mtail-3.0.0~rc54+git0ff5/internal/runtime/fuzz/cmp-to-none.mtail000066400000000000000000000000241460063571700244420ustar00rootroot00000000000000strptime("","")<5{} mtail-3.0.0~rc54+git0ff5/internal/runtime/fuzz/const-a.mtail000066400000000000000000000000361460063571700236550ustar00rootroot00000000000000const A /n/ A { } A && 1 { } mtail-3.0.0~rc54+git0ff5/internal/runtime/fuzz/const-as-cond.mtail000066400000000000000000000000221460063571700247540ustar00rootroot00000000000000const A /n/ A { } mtail-3.0.0~rc54+git0ff5/internal/runtime/fuzz/const-unused.mtail000066400000000000000000000000241460063571700247350ustar00rootroot00000000000000const l /l/ + /f/ l mtail-3.0.0~rc54+git0ff5/internal/runtime/fuzz/datum-string-concat.mtail000066400000000000000000000000141460063571700261700ustar00rootroot00000000000000text l l+=l mtail-3.0.0~rc54+git0ff5/internal/runtime/fuzz/len.mtail000066400000000000000000000000071460063571700230650ustar00rootroot00000000000000len(2) mtail-3.0.0~rc54+git0ff5/internal/runtime/fuzz/match-01e1.mtail000066400000000000000000000000141460063571700240450ustar00rootroot00000000000000882=~01e1{} mtail-3.0.0~rc54+git0ff5/internal/runtime/fuzz/match-str.mtail000066400000000000000000000000101460063571700242030ustar00rootroot00000000000000(0=~"") mtail-3.0.0~rc54+git0ff5/internal/runtime/fuzz/match-to-int.mtail000066400000000000000000000000071460063571700246130ustar00rootroot000000000000006=~0{} mtail-3.0.0~rc54+git0ff5/internal/runtime/fuzz/negate-none.mtail000066400000000000000000000000271460063571700245110ustar00rootroot00000000000000~strptime("",""){} ␤ mtail-3.0.0~rc54+git0ff5/internal/runtime/fuzz/recursion-depth.mtail000066400000000000000000001107301460063571700254270ustar00rootroot00000000000000settime(111*01*01*.8*01*4/1*11*062222.8111*0.1*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*4/101*.8*01*4/1*11*062222.8111*0.1*4/1*11*01*01*.8*01*4/1*.8*01*4/1*1*01*.8*01*4/1*.8*1*01*4/1*.8*01*4/1*1*01*.8*1*2/1*11*4294967297*32769*.8*32769*4/1*.8*01*4/0*1*01*1*11*4294967297*32769*.8*32769*4/1*.8*01*4/0*1*01*1*11*4294967297*32769*.8*32769*4/1*.8*01*4/0*1*01*1*11*4294967297*32769*.8*32769*4/1*.8*01*4/0*1*01*1*11*4294967297*32769*.8*32769*4/1*.8*01*4/0*1*01*1*11*4294967297*32769*.8*32769*4/1*.8*01*4/0*1*01*1*11*4294967297*32769*.8*32769*4/1*.8*01*4/0*1*01*1*11*4294967297*32769*.8*32769*4/1*.8*01*4/0*1*01*1*11*4294967297*32769*.8*32769*4/1*.8*01*4/0*1*01*1*11*4294967297*32769*.8*32769*4/1*.8*01*4/0*1*01*1*11*4294967297*32769*.8*32769*4/1*.8*01*4/0*1*01*1*11*4294967297*32769*.8*32769*4/1*.8*01*4/0*1*01*1*11*4294967297*32769*.8*32769*4/1*.8*01*4/0*1*01*1*11*4294967297*32769*.8*32769*4/1*.8*01*4/0*1*01*1*11*4294967297*32769*.8*32769*4/1*.8*01*4/0*1*01*1*11*4294967297*32769*.8*32769*4/1*.8*01*4/0*1*01*1*11*4294967297*32769*.8*32769*4/1*.8*01*4/0*1*01*1*11*4294967297*32769*.8*32769*4/1*.8*01*4/0*1*01*1*11*4294967297*32769*.8*32769*4/1*.8*01*4/0*1*01*1*11*4294967297*32769*.8*32769*4/1*.8*01*4/0*1*01*1*11*4294967297*32769*.8*32769*4/1*.8*01*4/0*1*01*1*11*4294967297*32769*.8*32769*4/1*.8*01*4/0*1*01*1*11*4294967297*32769*.8*32769*4/1*.8*01*4/0*1*01*.8*32770*0/10411190411.83) mtail-3.0.0~rc54+git0ff5/internal/runtime/fuzz/retval-from-dec.mtail000066400000000000000000000000241460063571700252750ustar00rootroot00000000000000timer l l-- - l { } mtail-3.0.0~rc54+git0ff5/internal/runtime/fuzz/uninitialised.mtail000066400000000000000000000000461460063571700251530ustar00rootroot00000000000000gauge time/()/{time=$1 }settime(time) mtail-3.0.0~rc54+git0ff5/internal/runtime/httpstatus.go000066400000000000000000000057161460063571700230470ustar00rootroot00000000000000// Copyright 2021 Google Inc. All Rights Reserved. // This file is available under the Apache license. package runtime import ( "fmt" "html/template" "io" "net/http" "github.com/google/mtail/internal/runtime/vm" ) const loaderTemplate = `

Program Loader

{{range $name, $errors := $.Errors}} {{end}}
program name errors load errors load successes unloads runtime errors last runtime error
{{ if index $.ProgLoaded $name}}{{$name}}{{else}}{{$name}}{{end}} {{if $errors}} {{$errors}} {{else}} No compile errors {{end}} {{index $.Loaderrors $name}} {{index $.Loadsuccess $name}} {{index $.Unloads $name}} {{index $.RuntimeErrors $name}}
{{index $.RuntimeErrorString $name}}
` // WriteStatusHTML writes the current state of the loader as HTML to the given writer w. func (r *Runtime) WriteStatusHTML(w io.Writer) error { t, err := template.New("loader").Parse(loaderTemplate) if err != nil { return err } r.programErrorMu.RLock() defer r.programErrorMu.RUnlock() data := struct { ProgLoaded map[string]bool Errors map[string]error Loaderrors map[string]string Loadsuccess map[string]string Unloads map[string]string RuntimeErrors map[string]string RuntimeErrorString map[string]string }{ make(map[string]bool), r.programErrors, make(map[string]string), make(map[string]string), make(map[string]string), make(map[string]string), make(map[string]string), } for name := range r.programErrors { if ProgLoadErrors.Get(name) != nil { data.Loaderrors[name] = ProgLoadErrors.Get(name).String() } if ProgLoads.Get(name) != nil { data.Loadsuccess[name] = ProgLoads.Get(name).String() } if ProgUnloads.Get(name) != nil { data.Unloads[name] = ProgUnloads.Get(name).String() } if vm.ProgRuntimeErrors.Get(name) != nil { data.RuntimeErrors[name] = vm.ProgRuntimeErrors.Get(name).String() } r.handleMu.RLock() if h, ok := r.handles[name]; ok { data.ProgLoaded[name] = true data.RuntimeErrorString[name] = h.vm.RuntimeErrorString() } r.handleMu.RUnlock() } return t.Execute(w, data) } func (r *Runtime) ProgzHandler(w http.ResponseWriter, req *http.Request) { prog := req.URL.Query().Get("prog") if prog != "" { r.handleMu.RLock() handle, ok := r.handles[prog] r.handleMu.RUnlock() if !ok { http.Error(w, "No program found", http.StatusNotFound) return } fmt.Fprint(w, handle.vm.DumpByteCode()) fmt.Fprintf(w, "\nLast runtime error:\n%s", handle.vm.RuntimeErrorString()) return } r.handleMu.RLock() defer r.handleMu.RUnlock() w.Header().Add("Content-type", "text/html") fmt.Fprintf(w, "
") } mtail-3.0.0~rc54+git0ff5/internal/runtime/options.go000066400000000000000000000057461460063571700223220ustar00rootroot00000000000000// Copyright 2021 Google Inc. All Rights Reserved. // This file is available under the Apache license. package runtime import ( "time" "github.com/google/mtail/internal/runtime/compiler" "github.com/google/mtail/internal/runtime/vm" "github.com/prometheus/client_golang/prometheus" ) // Option configures a new program Runtime. type Option func(*Runtime) error // OverrideLocation sets the timezone location for the VM. func OverrideLocation(loc *time.Location) Option { return func(r *Runtime) error { r.overrideLocation = loc return nil } } // CompileOnly sets the Runtime to compile programs only, without executing them. func CompileOnly() Option { return func(r *Runtime) error { r.compileOnly = true return ErrorsAbort()(r) } } // ErrorsAbort sets the Runtime to abort the Runtime on compile errors. func ErrorsAbort() Option { return func(r *Runtime) error { r.errorsAbort = true return nil } } // DumpAst emits the AST after program compilation. func DumpAst() Option { return func(r *Runtime) error { r.cOpts = append(r.cOpts, compiler.EmitAst()) return nil } } // DumpAstTypes emits the AST after type checking. func DumpAstTypes() Option { return func(r *Runtime) error { r.cOpts = append(r.cOpts, compiler.EmitAstTypes()) return nil } } // DumpBytecode instructs the loader to print the compiled bytecode after code generation. func DumpBytecode() Option { return func(r *Runtime) error { r.dumpBytecode = true return nil } } // SyslogUseCurrentYear instructs the VM to annotate yearless timestamps with the current year. func SyslogUseCurrentYear() Option { return func(r *Runtime) error { r.syslogUseCurrentYear = true return nil } } // MaxRegexpLength sets the maximum length an mtail regular expression can have, in terms of characters. func MaxRegexpLength(maxRegexpLength int) Option { return func(r *Runtime) error { r.cOpts = append(r.cOpts, compiler.MaxRegexpLength(maxRegexpLength)) return nil } } // MaxRecursionDepth sets the maximum depth the abstract syntax tree built during lexation can have. func MaxRecursionDepth(maxRecursionDepth int) Option { return func(r *Runtime) error { r.cOpts = append(r.cOpts, compiler.MaxRecursionDepth(maxRecursionDepth)) return nil } } // OmitMetricSource instructs the Runtime to not annotate metrics with their program source when added to the metric store. func OmitMetricSource() Option { return func(r *Runtime) error { r.omitMetricSource = true return nil } } // PrometheusRegisterer passes in a registry for setting up exported metrics. func PrometheusRegisterer(reg prometheus.Registerer) Option { return func(r *Runtime) error { r.reg = reg r.reg.MustRegister(vm.LineProcessingDurations) return nil } } // LogRuntimeErrors instructs the VM to emit runtime errors into the log. func LogRuntimeErrors() Option { return func(r *Runtime) error { r.logRuntimeErrors = true return nil } } func TraceExecution() Option { return func(r *Runtime) error { r.trace = true return nil } } mtail-3.0.0~rc54+git0ff5/internal/runtime/runtime.go000066400000000000000000000241341460063571700223020ustar00rootroot00000000000000// Copyright 2015 Google Inc. All Rights Reserved. // This file is available under the Apache license. package runtime // mtail programs may be created, updated, and deleted while mtail is running, and they will be // reloaded without having to restart the mtail process -- mtail will handle these on a HUP signal. import ( "bytes" "crypto/sha256" "expvar" "io" "os" "os/signal" "path/filepath" "strings" "sync" "syscall" "time" "github.com/golang/glog" "github.com/google/mtail/internal/logline" "github.com/google/mtail/internal/metrics" "github.com/google/mtail/internal/runtime/compiler" "github.com/google/mtail/internal/runtime/vm" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" ) var ( // LineCount counts the number of lines received by the program loader. LineCount = expvar.NewInt("lines_total") // ProgLoads counts the number of program load events. ProgLoads = expvar.NewMap("prog_loads_total") // ProgUnloads counts the number of program unload events. ProgUnloads = expvar.NewMap("prog_unloads_total") // ProgLoadErrors counts the number of program load errors. ProgLoadErrors = expvar.NewMap("prog_load_errors_total") ) const ( fileExt = ".mtail" ) // LoadAllPrograms loads all programs in a directory and starts watching the // directory for filesystem changes. Any compile errors are stored for later retrieival. // This function returns an error if an internal error occurs. func (r *Runtime) LoadAllPrograms() error { if r.programPath == "" { glog.V(2).Info("Programpath is empty, loading nothing") return nil } s, err := os.Stat(r.programPath) if err != nil { return errors.Wrapf(err, "failed to stat %q", r.programPath) } switch { case s.IsDir(): dirents, rerr := os.ReadDir(r.programPath) if rerr != nil { return errors.Wrapf(rerr, "Failed to list programs in %q", r.programPath) } markDeleted := make(map[string]struct{}) r.handleMu.RLock() for name := range r.handles { glog.Infof("added %s", name) markDeleted[name] = struct{}{} } r.handleMu.RUnlock() for _, dirent := range dirents { if dirent.IsDir() { continue } err = r.LoadProgram(filepath.Join(r.programPath, dirent.Name())) if err != nil { if r.errorsAbort { return err } glog.Warning(err) } glog.Infof("unmarking %s", filepath.Base(dirent.Name())) delete(markDeleted, filepath.Base(dirent.Name())) } for name := range markDeleted { glog.Infof("unloading %s", name) r.UnloadProgram(name) } default: err = r.LoadProgram(r.programPath) if err != nil { if r.errorsAbort { return err } glog.Warning(err) } } return nil } // LoadProgram loads or reloads a program from the full pathname programPath. The name of // the program is the basename of the file. func (r *Runtime) LoadProgram(programPath string) error { name := filepath.Base(programPath) if strings.HasPrefix(name, ".") { glog.V(2).Infof("Skipping %s because it is a hidden file.", programPath) return nil } if filepath.Ext(name) != fileExt { glog.V(2).Infof("Skipping %s due to file extension.", programPath) return nil } f, err := os.OpenFile(filepath.Clean(programPath), os.O_RDONLY, 0o600) if err != nil { ProgLoadErrors.Add(name, 1) return errors.Wrapf(err, "Failed to read program %q", programPath) } defer func() { if err := f.Close(); err != nil { glog.Warning(err) } }() r.programErrorMu.Lock() defer r.programErrorMu.Unlock() r.programErrors[name] = r.CompileAndRun(name, f) if r.programErrors[name] != nil { if r.errorsAbort { return r.programErrors[name] } glog.Infof("Compile errors for %s:\n%s", name, r.programErrors[name]) } return nil } // CompileAndRun compiles a program read from the input, starting execution if // it succeeds. If an existing virtual machine of the same name already // exists, the previous virtual machine is terminated and the new loaded over // it. If the new program fails to compile, any existing virtual machine with // the same name remains running. func (r *Runtime) CompileAndRun(name string, input io.Reader) error { glog.V(2).Infof("CompileAndRun %s", name) var buf bytes.Buffer tee := io.TeeReader(input, &buf) hasher := sha256.New() if _, err := io.Copy(hasher, tee); err != nil { ProgLoadErrors.Add(name, 1) return errors.Wrapf(err, "hashing failed for %q", name) } contentHash := hasher.Sum(nil) r.handleMu.RLock() vh, ok := r.handles[name] r.handleMu.RUnlock() if ok && bytes.Equal(vh.contentHash, contentHash) { glog.V(1).Infof("contents match, not recompiling %q", name) return nil } obj, errs := r.c.Compile(name, &buf) if errs != nil { ProgLoadErrors.Add(name, 1) return errors.Errorf("compile failed for %s:\n%s", name, errs) } if obj == nil { ProgLoadErrors.Add(name, 1) return errors.Errorf("internal error: compilation failed for %s: no program returned, but no errors", name) } v := vm.New(name, obj, r.syslogUseCurrentYear, r.overrideLocation, r.logRuntimeErrors, r.trace) if r.dumpBytecode { glog.Info("Dumping program objects and bytecode\n", v.DumpByteCode()) } // Load the metrics from the compilation into the global metric storage for export. for _, m := range v.Metrics { if !m.Hidden { if r.omitMetricSource { m.Source = "" } err := r.ms.Add(m) if err != nil { return err } } } ProgLoads.Add(name, 1) glog.Infof("Loaded program %s", name) if r.compileOnly { return nil } r.handleMu.Lock() defer r.handleMu.Unlock() // Terminates the existing vm. if handle, ok := r.handles[name]; ok { close(handle.lines) } lines := make(chan *logline.LogLine) r.handles[name] = &vmHandle{contentHash: contentHash, vm: v, lines: lines} r.wg.Add(1) go v.Run(lines, &r.wg) return nil } type vmHandle struct { contentHash []byte vm *vm.VM lines chan *logline.LogLine } // Runtime handles the lifecycle of programs and virtual machines, by watching // the configured program source directory, compiling changes to programs, and // managing the virtual machines. type Runtime struct { wg sync.WaitGroup // used to await vm shutdown ms *metrics.Store // pointer to metrics.Store to pass to compiler reg prometheus.Registerer // plce to reg metrics cOpts []compiler.Option // options for constructing `c` c *compiler.Compiler programPath string // Path that contains mtail programs. handleMu sync.RWMutex // guards accesses to handles handles map[string]*vmHandle // map of program names to virtual machines programErrorMu sync.RWMutex // guards access to programErrors programErrors map[string]error // errors from the last compile attempt of the program overrideLocation *time.Location // Instructs the vm to override the timezone with the specified zone. compileOnly bool // Only compile programs and report errors, do not load VMs. errorsAbort bool // Compiler errors abort the loader. dumpBytecode bool // Instructs the loader to dump to stdout the compiled program after compilation. syslogUseCurrentYear bool // Instructs the VM to overwrite zero years with the current year in a strptime instruction. omitMetricSource bool logRuntimeErrors bool // Instruct the VM to emit runtime errors to the log. trace bool // Trace execution of each VM. signalQuit chan struct{} // When closed stops the signal handler goroutine. } var ( ErrNeedsStore = errors.New("loader needs a store") ErrNeedsWaitgroup = errors.New("loader needs a WaitGroup") ) // New creates a new program loader that reads programs from programPath. func New(lines <-chan *logline.LogLine, wg *sync.WaitGroup, programPath string, store *metrics.Store, options ...Option) (*Runtime, error) { if store == nil { return nil, ErrNeedsStore } if wg == nil { return nil, ErrNeedsWaitgroup } r := &Runtime{ ms: store, programPath: programPath, handles: make(map[string]*vmHandle), programErrors: make(map[string]error), signalQuit: make(chan struct{}), } initDone := make(chan struct{}) defer close(initDone) var err error if err = r.SetOption(options...); err != nil { return nil, err } if r.c, err = compiler.New(r.cOpts...); err != nil { return nil, err } // Defer shutdown handling to avoid a race on r.wg. wg.Add(1) defer func() { go func() { defer wg.Done() <-initDone r.wg.Wait() }() }() // This goroutine is the main consumer/producer loop. r.wg.Add(1) go func() { defer r.wg.Done() // signal to owner we're done <-initDone for line := range lines { LineCount.Add(1) r.handleMu.RLock() for prog := range r.handles { r.handles[prog].lines <- line } r.handleMu.RUnlock() } glog.Info("END OF LINE") glog.Infof("processed %s lines", LineCount.String()) close(r.signalQuit) r.handleMu.Lock() for prog := range r.handles { close(r.handles[prog].lines) delete(r.handles, prog) } r.handleMu.Unlock() }() if r.programPath == "" { glog.Info("No program path specified, no programs will be loaded.") return r, nil } // Create one goroutine that handles reload signals. r.wg.Add(1) go func() { defer r.wg.Done() <-initDone if r.programPath == "" { glog.Info("no program reload on SIGHUP without programPath") return } n := make(chan os.Signal, 1) signal.Notify(n, syscall.SIGHUP) defer signal.Stop(n) for { select { case <-r.signalQuit: return case <-n: if err := r.LoadAllPrograms(); err != nil { glog.Info(err) } } } }() // Guarantee all existing programmes get loaded before we leave. if err := r.LoadAllPrograms(); err != nil { return nil, err } return r, nil } // SetOption takes one or more option functions and applies them in order to Runtime. func (r *Runtime) SetOption(options ...Option) error { for _, option := range options { if err := option(r); err != nil { return err } } return nil } // UnloadProgram removes the named program, any currently running VM goroutine. func (r *Runtime) UnloadProgram(pathname string) { name := filepath.Base(pathname) r.handleMu.Lock() defer r.handleMu.Unlock() close(r.handles[name].lines) delete(r.handles, name) ProgUnloads.Add(name, 1) } mtail-3.0.0~rc54+git0ff5/internal/runtime/runtime_integration_test.go000066400000000000000000000471201460063571700257440ustar00rootroot00000000000000// Copyright 2019 Google Inc. All Rights Reserved. // This file is available under the Apache license. package runtime import ( "bufio" "context" "math" "strings" "sync" "testing" "github.com/google/mtail/internal/logline" "github.com/google/mtail/internal/metrics" "github.com/google/mtail/internal/metrics/datum" "github.com/google/mtail/internal/testutil" ) var vmTests = []struct { name string prog string log string errs int64 metrics metrics.MetricSlice }{ { "single-dash-parseint", `counter c /(?P-)/ { $x == "-" { c++ } } `, `123 a - b `, 0, metrics.MetricSlice{ { Name: "c", Program: "single-dash-parseint", Kind: metrics.Counter, Type: metrics.Int, Hidden: false, Keys: []string{}, LabelValues: []*metrics.LabelValue{ { Value: &datum.Int{Value: 1}, }, }, }, }, }, { "histogram", `histogram hist1 buckets 1, 2, 4, 8 histogram hist2 by code buckets 0, 1, 2, 4, 8 histogram hist3 by f buckets -1, 0, 1 /^(.) (\d+)/ { hist1 = $2 hist2[$1] = $2 } /^(?P[a-z]+) (?P

Metrics: json, graphite, prometheus