pax_global_header00006660000000000000000000000064133655754660014536gustar00rootroot0000000000000052 comment=0339245739b727d2c0b8ff213dcd42acff0e0f35 prometheus-process-exporter-0.4.0+ds/000077500000000000000000000000001336557546600176565ustar00rootroot00000000000000prometheus-process-exporter-0.4.0+ds/.circleci/000077500000000000000000000000001336557546600215115ustar00rootroot00000000000000prometheus-process-exporter-0.4.0+ds/.circleci/config.yml000066400000000000000000000010441336557546600235000ustar00rootroot00000000000000version: 2 jobs: build: docker: - image: circleci/golang:1.10 working_directory: /go/src/github.com/ncabatoff/process-exporter environment: DEP_VERSION: "0.5.0" steps: - checkout - setup_remote_docker - run: curl -L -s -o dep https://github.com/golang/dep/releases/download/v${DEP_VERSION}/dep-linux-amd64 - run: chmod +x dep - run: ./dep ensure -vendor-only - run: sudo apt-get -qq update - run: sudo apt-get install -y rpm - run: make style vet test build integ dockerprometheus-process-exporter-0.4.0+ds/.gitignore000066400000000000000000000000771336557546600216520ustar00rootroot00000000000000.*.sw? process-exporter load-generator integration-tester dist prometheus-process-exporter-0.4.0+ds/.goreleaser.yml000066400000000000000000000020171336557546600226070ustar00rootroot00000000000000builds: - main: cmd/process-exporter/main.go binary: process-exporter flags: -tags netgo goos: - linux goarch: - amd64 - 386 - arm - arm64 - ppc64 - ppc64le archive: name_template: "process-exporter-{{ .Version }}.{{ .Os }}-{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" wrap_in_directory: true nfpm: homepage: https://github.com/ncabatoff/process-exporter maintainer: nick.cabatoff+procexp@gmail.com description: Prometheus exporter to report on processes running license: MIT formats: - deb - rpm bindir: /usr/bin files: "packaging/process-exporter.service": "/lib/systemd/system/process-exporter.service" config_files: "packaging/conf/all.yaml": "/etc/process-exporter/all.yaml" scripts: postinstall: "packaging/scripts/postinstall.sh" postremove: "packaging/scripts/postremove.sh" preremove: "packaging/scripts/preremove.sh" release: github: owner: ncabatoff name: process-exporter draft: false prerelease: true prometheus-process-exporter-0.4.0+ds/.travis.yml000066400000000000000000000013141336557546600217660ustar00rootroot00000000000000services: - docker language: go env: - IMAGE_TAG=`echo $TRAVIS_TAG|sed s/v//` go: - 1.10.x before_install: - sudo apt-get -qq update - sudo apt-get install -y rpm go_import_path: github.com/ncabatoff/process-exporter script: - make style vet test build smoke docker - if [ -n "$IMAGE_TAG" ]; then make docker DOCKER_IMAGE_TAG=$IMAGE_TAG; fi after_success: - docker login -u $DOCKER_USER -p "$DOCKER_PASSWORD" - > test -n "$TRAVIS_TAG" && docker tag ncabatoff/process-exporter:$IMAGE_TAG ncabatoff/process-exporter:latest && docker push ncabatoff/process-exporter:$IMAGE_TAG && docker push ncabatoff/process-exporter:latest && curl -sL http://git.io/goreleaser | bash prometheus-process-exporter-0.4.0+ds/Dockerfile000066400000000000000000000013141336557546600216470ustar00rootroot00000000000000# Start from a Debian image with the latest version of Go installed # and a workspace (GOPATH) configured at /go. FROM golang:1.10 AS build #RUN curl -L -s https://github.com/golang/dep/releases/download/v0.5.0/dep-linux-amd64 -o $GOPATH/bin/dep #RUN chmod +x $GOPATH/bin/dep WORKDIR /go/src/github.com/ncabatoff/process-exporter ADD . . #RUN dep ensure # Build the process-exporter command inside the container. RUN make FROM scratch COPY --from=build /go/src/github.com/ncabatoff/process-exporter/process-exporter /bin/process-exporter # Run the process-exporter command by default when the container starts. ENTRYPOINT ["/bin/process-exporter"] # Document that the service listens on port 9256. EXPOSE 9256 prometheus-process-exporter-0.4.0+ds/Dockerfile.cloudbuild000066400000000000000000000001551336557546600237760ustar00rootroot00000000000000FROM scratch COPY gopath/bin/process-exporter /process-exporter ENTRYPOINT ["/process-exporter"] EXPOSE 9256 prometheus-process-exporter-0.4.0+ds/LICENSE000066400000000000000000000020641336557546600206650ustar00rootroot00000000000000The MIT License (MIT) Copyright (c) 2016 ncabatoff Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. prometheus-process-exporter-0.4.0+ds/Makefile000066400000000000000000000030341336557546600213160ustar00rootroot00000000000000pkgs = $(shell go list ./... | grep -v /vendor/) PREFIX ?= $(shell pwd) BIN_DIR ?= $(shell pwd) DOCKER_IMAGE_NAME ?= ncabatoff/process-exporter DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) SMOKE_TEST = -config.path packaging/conf/all.yaml -once-to-stdout-delay 1s |grep -q 'namedprocess_namegroup_memory_bytes{groupname="process-exporte",memtype="virtual"}' all: format vet test build smoke style: @echo ">> checking code style" @! gofmt -d $(shell find . -path ./vendor -prune -o -name '*.go' -print) | grep '^' test: @echo ">> running short tests" go test -short $(pkgs) format: @echo ">> formatting code" go fmt $(pkgs) vet: @echo ">> vetting code" go vet $(pkgs) build: @echo ">> building code" cd cmd/process-exporter; CGO_ENABLED=0 go build -o ../../process-exporter -a -tags netgo smoke: @echo ">> smoke testing process-exporter" ./process-exporter $(SMOKE_TEST) integ: @echo ">> integration testing process-exporter" go build -o integration-tester cmd/integration-tester/main.go go build -o load-generator cmd/load-generator/main.go ./integration-tester -write-size-bytes 65536 install: @echo ">> installing binary" cd cmd/process-exporter; CGO_ENABLED=0 go install -a -tags netgo docker: @echo ">> building docker image" docker build -t "$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" . docker run --rm -v `pwd`/packaging:/packaging "$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(SMOKE_TEST) .PHONY: all style format test vet build integ docker prometheus-process-exporter-0.4.0+ds/README.md000066400000000000000000000311021336557546600211320ustar00rootroot00000000000000# process-exporter Prometheus exporter that mines /proc to report on selected processes. [release]: https://github.com/ncabatoff/process-exporter/releases/latest [![Release](https://img.shields.io/github/release/ncabatoff/process-exporter.svg?style=flat-square")][release] [![Build Status](https://travis-ci.org/ncabatoff/process-exporter.svg?branch=master)](https://travis-ci.org/ncabatoff/process-exporter) [![Powered By: GoReleaser](https://img.shields.io/badge/powered%20by-goreleaser-green.svg?branch=master)](https://github.com/goreleaser) Some apps are impractical to instrument directly, either because you don't control the code or they're written in a language that isn't easy to instrument with Prometheus. We must instead resort to mining /proc. ## Installation Either grab a package for your OS from the [Releases][release] page, or install via [docker](https://hub.docker.com/r/ncabatoff/process-exporter/). ## Running Usage: ``` process-exporter [options] -config.path filename.yml ``` or via docker: ``` docker run -d --rm -p 9256:9256 --privileged -v /proc:/host/proc -v `pwd`:/config ncabatoff/process-exporter --procfs /host/proc -config.path /config/filename.yml ``` Important options (run process-exporter --help for full list): -children (default:true) makes it so that any process that otherwise isn't part of its own group becomes part of the first group found (if any) when walking the process tree upwards. In other words, resource usage of subprocesses is added to their parent's usage unless the subprocess identifies as a different group name. -recheck (default:false) means that on each scrape the process names are re-evaluated. This is disabled by default as an optimization, but since processes can choose to change their names, this may result in a process falling into the wrong group if we happen to see it for the first time before it's assumed its proper name. -procnames is intended as a quick alternative to using a config file. Details in the following section. ## Configuration and group naming To select and group the processes to monitor, either provide command-line arguments or use a YAML configuration file. The recommended option is to use a config file via -config.path, but for convenience and backwards compatability the -procnames/-namemapping options exist as an alternative. ### Using a config file The general format of the -config.path YAML file is a top-level `process_names` section, containing a list of name matchers: ``` process_names: - matcher1 - matcher2 ... - matcherN ``` The default config shipped with the deb/rpm packages is: ``` process_names: - name: "{{.Comm}}" cmdline: - '.+' ``` A process may only belong to one group: even if multiple items would match, the first one listed in the file wins. (Side note: to avoid confusion with the cmdline YAML element, we'll refer to the command-line arguments of a process `/proc//cmdline` as the array `argv[]`.) #### Using a config file: group name Each item in `process_names` gives a recipe for identifying and naming processes. The optional `name` tag defines a template to use to name matching processes; if not specified, `name` defaults to `{{.ExeBase}}`. Template variables available: - `{{.Comm}}` contains the basename of the original executable, i.e. 2nd field in `/proc//stat` - `{{.ExeBase}}` contains the basename of the executable - `{{.ExeFull}}` contains the fully qualified path of the executable - `{{.Username}}` contains the username of the effective user - `{{.Matches}}` map contains all the matches resulting from applying cmdline regexps #### Using a config file: process selectors Each item in `process_names` must contain one or more selectors (`comm`, `exe` or `cmdline`); if more than one selector is present, they must all match. Each selector is a list of strings to match against a process's `comm`, `argv[0]`, or in the case of `cmdline`, a regexp to apply to the command line. The cmdline regexp uses the [Go syntax](https://golang.org/pkg/regexp). For `comm` and `exe`, the list of strings is an OR, meaning any process matching any of the strings will be added to the item's group. For `cmdline`, the list of regexes is an AND, meaning they all must match. Any capturing groups in a regexp must use the `?P` option to assign a name to the capture, which is used to populate `.Matches`. Performance tip: give an exe or comm clause in addition to any cmdline clause, so you avoid executing the regexp when the executable name doesn't match. ``` process_names: # comm is the second field of /proc//stat minus parens. # It is the base executable name, truncated at 15 chars. # It cannot be modified by the program, unlike exe. - comm: - bash # exe is argv[0]. If no slashes, only basename of argv[0] need match. # If exe contains slashes, argv[0] must match exactly. - exe: - postgres - /usr/local/bin/prometheus # cmdline is a list of regexps applied to argv. # Each must match, and any captures are added to the .Matches map. - name: "{{.ExeFull}}:{{.Matches.Cfgfile}}" exe: - /usr/local/bin/process-exporter cmdline: - -config.path\s+(?P\S+) ``` Here's the config I use on my home machine: ``` process_names: - comm: - chromium-browse - bash - prometheus - gvim - exe: - /sbin/upstart cmdline: - --user name: upstart:-user ``` ### Using -procnames/-namemapping instead of config.path Every name in the procnames list becomes a process group. The default name of a process is the value found in the second field of /proc//stat ("comm"), which is truncated at 15 chars. Usually this is the same as the name of the executable. If -namemapping isn't provided, every process with a comm value present in -procnames is assigned to a group based on that name, and any other processes are ignored. The -namemapping option is a comma-separated list of alternating name,regexp values. It allows assigning a name to a process based on a combination of the process name and command line. For example, using -namemapping "python2,([^/]+)\.py,java,-jar\s+([^/]+).jar" will make it so that each different python2 and java -jar invocation will be tracked with distinct metrics. Processes whose remapped name is absent from the procnames list will be ignored. On a Ubuntu Xenian machine being used as a workstation, here's a good way of tracking resource usage for a few different key user apps: process-exporter -namemapping "upstart,(--user)" \ -procnames chromium-browse,bash,gvim,prometheus,process-exporter,upstart:-user Since upstart --user is the parent process of the X11 session, this will make all apps started by the user fall into the group named "upstart:-user", unless they're one of the others named explicitly with -procnames, like gvim. ## Group Metrics There's no meaningful way to name a process that will only ever name a single process, so process-exporter assumes that every metric will be attached to a group of processes - not a [process group](https://en.wikipedia.org/wiki/Process_group) in the technical sense, just one or more processes that meet a configuration's specification of what should be monitored and how to name it. All these metrics start with `namedprocess_namegroup_` and have at minimum the label `groupname`. ### num_procs gauge Number of processes in this group. ### cpu_user_seconds_total counter CPU usage based on /proc/[pid]/stat field utime(14) i.e. user time. A value of 1 indicates that the processes in this group have been scheduled in user mode for a total of 1 second on a single virtual CPU. ### cpu_system_seconds_total counter CPU usage based on /proc/[pid]/stat field stime(15) i.e. system time. ### read_bytes_total counter Bytes read based on /proc/[pid]/io field read_bytes. The man page says > Attempt to count the number of bytes which this process really did cause to be fetched from the storage layer. This is accurate for block-backed filesystems. but I would take it with a grain of salt. ### write_bytes_total counter Bytes written based on /proc/[pid]/io field write_bytes. As with read_bytes, somewhat dubious. May be useful for isolating which processes are doing the most I/O, but probably not measuring just how much I/O is happening. ### major_page_faults_total counter Number of major page faults based on /proc/[pid]/stat field majflt(12). ### minor_page_faults_total counter Number of minor page faults based on /proc/[pid]/stat field minflt(10). ### context_switches_total counter Number of context switches based on /proc/[pid]/status fields voluntary_ctxt_switches and nonvoluntary_ctxt_switches. The extra label `ctxswitchtype` can have two values: `voluntary` and `nonvoluntary`. ### memory_bytes gauge Number of bytes of memory used. The extra label `memtype` can have two values: *resident*: Field rss(24) from /proc/[pid]/stat, whose doc says: > This is just the pages which count toward text, data, or stack space. This does not include pages which have not been demand-loaded in, or which are swapped out. *virtual*: Field vsize(23) from /proc/[pid]/stat, virtual memory size. *swapped*: Field VmSwap from /proc/[pid]/status, translated from KB to bytes. ### open_filedesc gauge Number of file descriptors, based on counting how many entries are in the directory /proc/[pid]/fd. ### worst_fd_ratio gauge Worst ratio of open filedescs to filedesc limit, amongst all the procs in the group. The limit is the fd soft limit based on /proc/[pid]/limits. Normally Prometheus metrics ought to be as "basic" as possible (i.e. the raw values rather than a derived ratio), but we use a ratio here because nothing else makes sense. Suppose there are 10 procs in a given group, each with a soft limit of 4096, and one of them has 4000 open fds and the others all have 40, their total fdcount is 4360 and total soft limit is 40960, so the ratio is 1:10, but in fact one of the procs is about to run out of fds. With worst_fd_ratio we're able to know this: in the above example it would be 0.97, rather than the 0.10 you'd see if you computed sum(open_filedesc) / sum(limit_filedesc). ### oldest_start_time_seconds gauge Epoch time (seconds since 1970/1/1) at which the oldest process in the group started. This is derived from field starttime(22) from /proc/[pid]/stat, added to boot time to make it relative to epoch. ### num_threads gauge Sum of number of threads of all process in the group. Based on field num_threads(20) from /proc/[pid]/stat. ### states gauge Number of processes in the group in each of various states, based on the field state(3) from /proc/[pid]/stat. The extra label `state` can have these values: `Running`, `Sleeping`, `Waiting`, `Zombie`, `Other`. ## Group Thread Metrics All these metrics start with `namedprocess_namegroup_` and have at minimum the labels `groupname` and `threadname`. `threadname` is field comm(2) from /proc/[pid]/stat. Just as groupname breaks the set of processes down into groups, threadname breaks a given process group down into subgroups. ### thread_count gauge Number of threads in this thread subgroup. ### thread_cpu_seconds_total counter Same as cpu_user_seconds_total and cpu_system_seconds_total, but broken down per-thread subgroup. Unlike cpu_user_seconds_total/cpu_system_seconds_total, the label `cpumode` is used to distinguish between `user` and `system` time. ### thread_io_bytes_total counter Same as read_bytes_total and write_bytes_total, but broken down per-thread subgroup. Unlike read_bytes_total/write_bytes_total, the label `iomode` is used to distinguish between `read` and `write` bytes. ### thread_major_page_faults_total counter Same as major_page_faults_total, but broken down per-thread subgroup. ### thread_minor_page_faults_total counter Same as minor_page_faults_total, but broken down per-thread subgroup. ### thread_context_switches_total counter Same as context_switches_total, but broken down per-thread subgroup. ## Instrumentation cost process-exporter will consume CPU in proportion to the number of processes in the system and the rate at which new ones are created. The most expensive parts - applying regexps and executing templates - are only applied once per process seen, unless the command-line option -recheck is provided. If you have mostly long-running processes process-exporter overhead should be minimal: each time a scrape occurs, it will parse of /proc/$pid/stat and /proc/$pid/cmdline for every process being monitored and add a few numbers. ## Dashboards An example Grafana dashboard to view the metrics is available at https://grafana.net/dashboards/249 ## Building Install [dep](https://github.com/golang/dep), then: ``` dep ensure make ```prometheus-process-exporter-0.4.0+ds/cloudbuild.release.yaml000066400000000000000000000031171336557546600243110ustar00rootroot00000000000000steps: # - name: string # args: string # env: string # dir: string # id: string # waitFor: string # entrypoint: string # secretEnv: string # Setup the workspace - name: gcr.io/cloud-builders/go env: ['PROJECT_ROOT=github.com/ncabatoff/process-exporter'] args: ['env'] # Build project - name: gcr.io/cloud-builders/docker entrypoint: 'bash' args: ['-c', 'docker build -t ncabatoff/process-exporter:`echo $TAG_NAME|sed s/^v//` .'] # Login to docker hub - name: gcr.io/cloud-builders/docker entrypoint: 'bash' args: ['-c', 'docker login --username=ncabatoff --password=$$DOCKER_PASSWORD'] secretEnv: ['DOCKER_PASSWORD'] # Push to docker hub - name: gcr.io/cloud-builders/docker entrypoint: 'bash' args: ['-c', 'docker push ncabatoff/process-exporter:`echo $TAG_NAME|sed s/^v//`'] # Create github release - name: goreleaser/goreleaser entrypoint: /bin/sh dir: gopath/src/github.com env: ['GOPATH=/workspace/gopath'] args: ['-c', 'cd ncabatoff/process-exporter && git tag $TAG_NAME && /goreleaser' ] secretEnv: ['GITHUB_TOKEN'] secrets: - kmsKeyName: projects/process-exporter/locations/global/keyRings/cloudbuild/cryptoKeys/mykey secretEnv: DOCKER_PASSWORD: | CiQAeHUuEinm1h2j9mp8r0NjPw1l1bBwzDG+JHPUPf3GvtmdjXESMAD3wUauaxWrxid/zPunG67x 5+1CYedV5exh0XwQ32eu4UkniS7HHJNWBudklaG0JA== GITHUB_TOKEN: | CiQAeHUuEhEKAvfIHlUZrCgHNScm0mDKI8Z1w/N3OzDk8Ql6kAUSUQD3wUau7qRc+H7OnTUo6b2Z DKA1eMKHNg729KfHj2ZMqZXinrJloYMbZcZRXP9xv91xCq6QJB5UoFoyYDnXGdvgXC08YUstR6UB H0bwHhe1GQ== prometheus-process-exporter-0.4.0+ds/cloudbuild.yaml000066400000000000000000000022251336557546600226710ustar00rootroot00000000000000steps: # - name: string # args: string # env: string # dir: string # id: string # waitFor: string # entrypoint: string # secretEnv: string # - name: gcr.io/cloud-builders/curl # args: ['-L', '-s', '-o', 'dep', 'https://github.com/golang/dep/releases/download/v0.5.0/dep-linux-amd64'] # - name: ubuntu # args: ['chmod', '+x', 'dep'] # Setup the workspace - name: gcr.io/cloud-builders/go env: ['PROJECT_ROOT=github.com/ncabatoff/process-exporter'] args: ['env'] # Run dep in the workspace created in previous step # - name: gcr.io/cloud-builders/go # entrypoint: /bin/sh # dir: gopath/src/github.com # env: ['GOPATH=/workspace/gopath'] # args: ['-c', 'cd ncabatoff/process-exporter && /workspace/dep ensure -vendor-only' ] - name: gcr.io/cloud-builders/go entrypoint: /bin/sh dir: gopath/src/github.com env: ['GOPATH=/workspace/gopath'] args: ['-c', 'make -C ncabatoff/process-exporter style vet test build integ install' ] - name: gcr.io/cloud-builders/docker args: ['build', '--tag=gcr.io/$PROJECT_ID/process-exporter', '.', '-f', 'Dockerfile.cloudbuild'] images: ['gcr.io/$PROJECT_ID/process-exporter'] prometheus-process-exporter-0.4.0+ds/cmd/000077500000000000000000000000001336557546600204215ustar00rootroot00000000000000prometheus-process-exporter-0.4.0+ds/cmd/integration-tester/000077500000000000000000000000001336557546600242505ustar00rootroot00000000000000prometheus-process-exporter-0.4.0+ds/cmd/integration-tester/main.go000066400000000000000000000133451336557546600255310ustar00rootroot00000000000000package main import ( "bufio" "bytes" "context" "flag" "fmt" "log" "os" "os/exec" "path/filepath" "strconv" "strings" "time" ) // You wouldn't think our child could start before us, but I have observed it; maybe due to rounding? var start = time.Now().Unix() - 1 func main() { var ( flagProcessExporter = flag.String("process-exporter", "./process-exporter", "path to process-exporter") flagLoadGenerator = flag.String("load-generator", "./load-generator", "path to load-generator") flagAttempts = flag.Int("attempts", 3, "try this many times before returning failure") flagWriteSizeBytes = flag.Int("write-size-bytes", 1024*1024, "how many bytes to write each cycle") ) flag.Parse() ctx, cancel := context.WithCancel(context.Background()) defer cancel() cmdlg := exec.CommandContext(ctx, *flagLoadGenerator, "-write-size-bytes", strconv.Itoa(*flagWriteSizeBytes)) var buf = &bytes.Buffer{} cmdlg.Stdout = buf err := cmdlg.Start() if err != nil { log.Fatalf("Error launching load generator %q: %v", *flagLoadGenerator, err) } for !strings.HasPrefix(buf.String(), "ready") { time.Sleep(time.Second) } success := false for i := 0; i < *flagAttempts; i++ { comm := filepath.Base(*flagLoadGenerator) cmdpe := exec.CommandContext(ctx, *flagProcessExporter, "-once-to-stdout-delay", "20s", "-procnames", comm) out, err := cmdpe.Output() if err != nil { log.Fatalf("Error launching process-exporter %q: %v", *flagProcessExporter, err) } log.Println(string(out)) results := getResults(comm, string(out)) if verify(results) { success = true break } log.Printf("try %d/%d failed", i+1, *flagAttempts) } cancel() cmdlg.Wait() if !success { os.Exit(1) } } type result struct { name string labels map[string]string value float64 } func getResults(group string, out string) map[string][]result { results := make(map[string][]result) skiplabel := fmt.Sprintf(`groupname="%s"`, group) lines := bufio.NewScanner(strings.NewReader(out)) lines.Split(bufio.ScanLines) for lines.Scan() { line := lines.Text() metric, value := "", 0.0 _, err := fmt.Sscanf(line, "namedprocess_namegroup_%s %f", &metric, &value) if err != nil { continue } pos := strings.IndexByte(metric, '{') if pos == -1 { log.Fatalf("cannot parse metric %q, no open curly found", metric) } name, labelstr := metric[:pos], metric[pos+1:] labelstr = labelstr[:len(labelstr)-1] labels := make(map[string]string) for _, kv := range strings.Split(labelstr, ",") { if kv != skiplabel { pieces := strings.SplitN(kv, "=", 2) labelname, labelvalue := pieces[0], pieces[1][1:len(pieces[1])-1] labels[labelname] = labelvalue } } results[name] = append(results[name], result{name, labels, value}) } return results } func verify(results map[string][]result) bool { success := true assertExact := func(name string, got, want float64) { if got != want { success = false log.Printf("expected %s to be %f, got %f", name, want, got) } } assertGreaterOrEqual := func(name string, got, want float64) { if got < want { success = false log.Printf("expected %s to have at least %f, got %f", name, want, got) } } assertExact("num_procs", results["num_procs"][0].value, 1) // Four locked threads plus go runtime means more than 7, but we'll say 7 to play it safe. assertGreaterOrEqual("num_threads", results["num_threads"][0].value, 7) // Our child must have started later than us. assertGreaterOrEqual("oldest_start_time_seconds", results["oldest_start_time_seconds"][0].value, float64(start)) for _, result := range results["states"] { switch state := result.labels["state"]; state { case "Other", "Zombie": assertExact("state "+state, result.value, 0) case "Running": assertGreaterOrEqual("state "+state, result.value, 2) case "Waiting": assertGreaterOrEqual("state "+state, result.value, 0) case "Sleeping": assertGreaterOrEqual("state "+state, result.value, 4) } } for _, result := range results["thread_count"] { switch tname := result.labels["threadname"]; tname { case "blocking", "sysbusy", "userbusy", "waiting": assertExact("thread_count "+tname, result.value, 1) case "main": assertGreaterOrEqual("thread_count "+tname, result.value, 3) } } for _, result := range results["thread_cpu_seconds_total"] { if result.labels["cpumode"] == "system" { switch tname := result.labels["threadname"]; tname { case "sysbusy", "blocking": assertGreaterOrEqual("thread_cpu_seconds_total system "+tname, result.value, 0.00001) default: assertGreaterOrEqual("thread_cpu_seconds_total system "+tname, result.value, 0) } } else if result.labels["cpumode"] == "user" { switch tname := result.labels["threadname"]; tname { case "userbusy": assertGreaterOrEqual("thread_cpu_seconds_total user "+tname, result.value, 0.00001) default: assertGreaterOrEqual("thread_cpu_seconds_total user "+tname, result.value, 0) } } } for _, result := range results["thread_io_bytes_total"] { tname, iomode := result.labels["threadname"], result.labels["iomode"] rname := fmt.Sprintf("%s %s %s", "thread_io_bytes_total", iomode, tname) switch tname { case "blocking", "sysbusy": assertGreaterOrEqual(rname, result.value, 0.00001) default: assertExact(rname, result.value, 0) } } otherwchan := 0.0 for _, result := range results["threads_wchan"] { switch wchan := result.labels["wchan"]; wchan { case "poll_schedule_timeout": assertGreaterOrEqual(wchan, result.value, 1) case "futex_wait_queue_me": assertGreaterOrEqual(wchan, result.value, 4) default: // The specific wchan involved for the blocking thread varies by filesystem. otherwchan++ } } // assertGreaterOrEqual("other wchan", otherwchan, 1) return success } prometheus-process-exporter-0.4.0+ds/cmd/load-generator/000077500000000000000000000000001336557546600233245ustar00rootroot00000000000000prometheus-process-exporter-0.4.0+ds/cmd/load-generator/main.go000066400000000000000000000047001336557546600246000ustar00rootroot00000000000000package main import ( "flag" "fmt" "io/ioutil" "math/rand" "runtime" "syscall" "unsafe" ) var ready = make(chan struct{}) func init() { var ( flagWaiting = flag.Int("waiting", 1, "minimum number of waiting threads") flagUserBusy = flag.Int("userbusy", 1, "minimum number of userbusy threads") flagSysBusy = flag.Int("sysbusy", 1, "minimum number of sysbusy threads") flagBlocking = flag.Int("blocking", 1, "minimum number of io blocking threads") flagWriteSizeBytes = flag.Int("write-size-bytes", 1024*1024, "how many bytes to write each cycle") ) flag.Parse() runtime.LockOSThread() for i := 0; i < *flagWaiting; i++ { go waiting() <-ready } for i := 0; i < *flagUserBusy; i++ { go userbusy() <-ready } for i := 0; i < *flagSysBusy; i++ { go diskio(false, *flagWriteSizeBytes) <-ready } for i := 0; i < *flagBlocking; i++ { go diskio(true, *flagWriteSizeBytes) <-ready } } func main() { c := make(chan struct{}) fmt.Println("ready") <-c } func setPrName(name string) error { bytes := append([]byte(name), 0) ptr := unsafe.Pointer(&bytes[0]) _, _, errno := syscall.RawSyscall6(syscall.SYS_PRCTL, syscall.PR_SET_NAME, uintptr(ptr), 0, 0, 0, 0) if errno != 0 { return syscall.Errno(errno) } return nil } func waiting() { runtime.LockOSThread() setPrName("waiting") ready <- struct{}{} c := make(chan struct{}) <-c } func userbusy() { runtime.LockOSThread() setPrName("userbusy") ready <- struct{}{} i := 1.0000001 for { i *= i } } func diskio(sync bool, writesize int) { runtime.LockOSThread() if sync { setPrName("blocking") } else { setPrName("sysbusy") } // Use random data because if we're on a filesystem that does compression like ZFS, // using zeroes is almost a no-op. b := make([]byte, writesize) _, err := rand.Read(b) if err != nil { panic("unable to get rands: " + err.Error()) } f, err := ioutil.TempFile("", "loadgen") if err != nil { panic("unable to create tempfile: " + err.Error()) } defer f.Close() sentready := false offset := int64(0) for { _, err = f.WriteAt(b, offset) if err != nil { panic("unable to write tempfile: " + err.Error()) } if sync { err = f.Sync() if err != nil { panic("unable to sync tempfile: " + err.Error()) } } _, err = f.ReadAt(b, 0) if err != nil { panic("unable to read tempfile: " + err.Error()) } if !sentready { ready <- struct{}{} sentready = true } offset++ } } prometheus-process-exporter-0.4.0+ds/cmd/process-exporter/000077500000000000000000000000001336557546600237455ustar00rootroot00000000000000prometheus-process-exporter-0.4.0+ds/cmd/process-exporter/main.go000066400000000000000000000433411336557546600252250ustar00rootroot00000000000000package main import ( "flag" "fmt" "log" "net/http" _ "net/http/pprof" "regexp" "strings" "time" "github.com/ncabatoff/fakescraper" common "github.com/ncabatoff/process-exporter" "github.com/ncabatoff/process-exporter/config" "github.com/ncabatoff/process-exporter/proc" "github.com/prometheus/client_golang/prometheus" ) func printManual() { fmt.Print(`Usage: process-exporter [options] -config.path filename.yml or process-exporter [options] -procnames name1,...,nameN [-namemapping k1,v1,...,kN,vN] The recommended option is to use a config file, but for convenience and backwards compatability the -procnames/-namemapping options exist as an alternative. The -children option (default:true) makes it so that any process that otherwise isn't part of its own group becomes part of the first group found (if any) when walking the process tree upwards. In other words, resource usage of subprocesses is added to their parent's usage unless the subprocess identifies as a different group name. Command-line process selection (procnames/namemapping): Every process not in the procnames list is ignored. Otherwise, all processes found are reported on as a group based on the process name they share. Here 'process name' refers to the value found in the second field of /proc//stat, which is truncated at 15 chars. The -namemapping option allows assigning a group name based on a combination of the process name and command line. For example, using -namemapping "python2,([^/]+)\.py,java,-jar\s+([^/]+).jar" will make it so that each different python2 and java -jar invocation will be tracked with distinct metrics. Processes whose remapped name is absent from the procnames list will be ignored. Here's an example that I run on my home machine (Ubuntu Xenian): process-exporter -namemapping "upstart,(--user)" \ -procnames chromium-browse,bash,prometheus,prombench,gvim,upstart:-user Since it appears that upstart --user is the parent process of my X11 session, this will make all apps I start count against it, unless they're one of the others named explicitly with -procnames. Config file process selection (filename.yml): See README.md. ` + "\n") } var ( numprocsDesc = prometheus.NewDesc( "namedprocess_namegroup_num_procs", "number of processes in this group", []string{"groupname"}, nil) cpuUserSecsDesc = prometheus.NewDesc( "namedprocess_namegroup_cpu_user_seconds_total", "Cpu user usage in seconds", []string{"groupname"}, nil) cpuSystemSecsDesc = prometheus.NewDesc( "namedprocess_namegroup_cpu_system_seconds_total", "Cpu system usage in seconds", []string{"groupname"}, nil) readBytesDesc = prometheus.NewDesc( "namedprocess_namegroup_read_bytes_total", "number of bytes read by this group", []string{"groupname"}, nil) writeBytesDesc = prometheus.NewDesc( "namedprocess_namegroup_write_bytes_total", "number of bytes written by this group", []string{"groupname"}, nil) majorPageFaultsDesc = prometheus.NewDesc( "namedprocess_namegroup_major_page_faults_total", "Major page faults", []string{"groupname"}, nil) minorPageFaultsDesc = prometheus.NewDesc( "namedprocess_namegroup_minor_page_faults_total", "Minor page faults", []string{"groupname"}, nil) contextSwitchesDesc = prometheus.NewDesc( "namedprocess_namegroup_context_switches_total", "Context switches", []string{"groupname", "ctxswitchtype"}, nil) membytesDesc = prometheus.NewDesc( "namedprocess_namegroup_memory_bytes", "number of bytes of memory in use", []string{"groupname", "memtype"}, nil) openFDsDesc = prometheus.NewDesc( "namedprocess_namegroup_open_filedesc", "number of open file descriptors for this group", []string{"groupname"}, nil) worstFDRatioDesc = prometheus.NewDesc( "namedprocess_namegroup_worst_fd_ratio", "the worst (closest to 1) ratio between open fds and max fds among all procs in this group", []string{"groupname"}, nil) startTimeDesc = prometheus.NewDesc( "namedprocess_namegroup_oldest_start_time_seconds", "start time in seconds since 1970/01/01 of oldest process in group", []string{"groupname"}, nil) numThreadsDesc = prometheus.NewDesc( "namedprocess_namegroup_num_threads", "Number of threads", []string{"groupname"}, nil) statesDesc = prometheus.NewDesc( "namedprocess_namegroup_states", "Number of processes in states Running, Sleeping, Waiting, Zombie, or Other", []string{"groupname", "state"}, nil) scrapeErrorsDesc = prometheus.NewDesc( "namedprocess_scrape_errors", "general scrape errors: no proc metrics collected during a cycle", nil, nil) scrapeProcReadErrorsDesc = prometheus.NewDesc( "namedprocess_scrape_procread_errors", "incremented each time a proc's metrics collection fails", nil, nil) scrapePartialErrorsDesc = prometheus.NewDesc( "namedprocess_scrape_partial_errors", "incremented each time a tracked proc's metrics collection fails partially, e.g. unreadable I/O stats", nil, nil) threadWchanDesc = prometheus.NewDesc( "namedprocess_namegroup_threads_wchan", "Number of threads in this group waiting on each wchan", []string{"groupname", "wchan"}, nil) threadCountDesc = prometheus.NewDesc( "namedprocess_namegroup_thread_count", "Number of threads in this group with same threadname", []string{"groupname", "threadname"}, nil) threadCpuSecsDesc = prometheus.NewDesc( "namedprocess_namegroup_thread_cpu_seconds_total", "Cpu user/system usage in seconds", []string{"groupname", "threadname", "cpumode"}, nil) threadIoBytesDesc = prometheus.NewDesc( "namedprocess_namegroup_thread_io_bytes_total", "number of bytes read/written by these threads", []string{"groupname", "threadname", "iomode"}, nil) threadMajorPageFaultsDesc = prometheus.NewDesc( "namedprocess_namegroup_thread_major_page_faults_total", "Major page faults for these threads", []string{"groupname", "threadname"}, nil) threadMinorPageFaultsDesc = prometheus.NewDesc( "namedprocess_namegroup_thread_minor_page_faults_total", "Minor page faults for these threads", []string{"groupname", "threadname"}, nil) threadContextSwitchesDesc = prometheus.NewDesc( "namedprocess_namegroup_thread_context_switches_total", "Context switches for these threads", []string{"groupname", "threadname", "ctxswitchtype"}, nil) ) type ( prefixRegex struct { prefix string regex *regexp.Regexp } nameMapperRegex struct { mapping map[string]*prefixRegex } ) func (nmr *nameMapperRegex) String() string { return fmt.Sprintf("%+v", nmr.mapping) } // Create a nameMapperRegex based on a string given as the -namemapper argument. func parseNameMapper(s string) (*nameMapperRegex, error) { mapper := make(map[string]*prefixRegex) if s == "" { return &nameMapperRegex{mapper}, nil } toks := strings.Split(s, ",") if len(toks)%2 == 1 { return nil, fmt.Errorf("bad namemapper: odd number of tokens") } for i, tok := range toks { if tok == "" { return nil, fmt.Errorf("bad namemapper: token %d is empty", i) } if i%2 == 1 { name, regexstr := toks[i-1], tok matchName := name prefix := name + ":" if r, err := regexp.Compile(regexstr); err != nil { return nil, fmt.Errorf("error compiling regexp '%s': %v", regexstr, err) } else { mapper[matchName] = &prefixRegex{prefix: prefix, regex: r} } } } return &nameMapperRegex{mapper}, nil } func (nmr *nameMapperRegex) MatchAndName(nacl common.ProcAttributes) (bool, string) { if pregex, ok := nmr.mapping[nacl.Name]; ok { if pregex == nil { return true, nacl.Name } matches := pregex.regex.FindStringSubmatch(strings.Join(nacl.Cmdline, " ")) if len(matches) > 1 { for _, matchstr := range matches[1:] { if matchstr != "" { return true, pregex.prefix + matchstr } } } } return false, "" } func main() { var ( listenAddress = flag.String("web.listen-address", ":9256", "Address on which to expose metrics and web interface.") metricsPath = flag.String("web.telemetry-path", "/metrics", "Path under which to expose metrics.") onceToStdoutDelay = flag.Duration("once-to-stdout-delay", 0, "Don't bind, just wait this much time, print the metrics once to stdout, and exit") procNames = flag.String("procnames", "", "comma-seperated list of process names to monitor") procfsPath = flag.String("procfs", "/proc", "path to read proc data from") nameMapping = flag.String("namemapping", "", "comma-seperated list, alternating process name and capturing regex to apply to cmdline") children = flag.Bool("children", true, "if a proc is tracked, track with it any children that aren't part of their own group") man = flag.Bool("man", false, "print manual") configPath = flag.String("config.path", "", "path to YAML config file") recheck = flag.Bool("recheck", false, "recheck process names on each scrape") debug = flag.Bool("debug", false, "log debugging information to stdout") ) flag.Parse() if *man { printManual() return } var matchnamer common.MatchNamer if *configPath != "" { if *nameMapping != "" || *procNames != "" { log.Fatalf("-config.path cannot be used with -namemapping or -procnames") } cfg, err := config.ReadFile(*configPath, *debug) if err != nil { log.Fatalf("error reading config file %q: %v", *configPath, err) } log.Printf("Reading metrics from %s based on %q", *procfsPath, *configPath) matchnamer = cfg.MatchNamers if *debug { log.Printf("using config matchnamer: %v", cfg.MatchNamers) } } else { namemapper, err := parseNameMapper(*nameMapping) if err != nil { log.Fatalf("Error parsing -namemapping argument '%s': %v", *nameMapping, err) } var names []string for _, s := range strings.Split(*procNames, ",") { if s != "" { if _, ok := namemapper.mapping[s]; !ok { namemapper.mapping[s] = nil } names = append(names, s) } } log.Printf("Reading metrics from %s for procnames: %v", *procfsPath, names) if *debug { log.Printf("using cmdline matchnamer: %v", namemapper) } matchnamer = namemapper } pc, err := NewProcessCollector(*procfsPath, *children, matchnamer, *recheck, *debug) if err != nil { log.Fatalf("Error initializing: %v", err) } prometheus.MustRegister(pc) if *onceToStdoutDelay != 0 { // We throw away the first result because that first collection primes the pump, and // otherwise we won't see our counter metrics. This is specific to the implementation // of NamedProcessCollector.Collect(). fscraper := fakescraper.NewFakeScraper() fscraper.Scrape() time.Sleep(*onceToStdoutDelay) fmt.Print(fscraper.Scrape()) return } http.Handle(*metricsPath, prometheus.Handler()) http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte(` Named Process Exporter

Named Process Exporter

Metrics

`)) }) if err := http.ListenAndServe(*listenAddress, nil); err != nil { log.Fatalf("Unable to setup HTTP server: %v", err) } } type ( scrapeRequest struct { results chan<- prometheus.Metric done chan struct{} } NamedProcessCollector struct { scrapeChan chan scrapeRequest *proc.Grouper source proc.Source scrapeErrors int scrapeProcReadErrors int scrapePartialErrors int debug bool } ) func NewProcessCollector( procfsPath string, children bool, n common.MatchNamer, recheck bool, debug bool, ) (*NamedProcessCollector, error) { fs, err := proc.NewFS(procfsPath, debug) if err != nil { return nil, err } p := &NamedProcessCollector{ scrapeChan: make(chan scrapeRequest), Grouper: proc.NewGrouper(n, children, recheck, debug), source: fs, debug: debug, } colErrs, _, err := p.Update(p.source.AllProcs()) if err != nil { if debug { log.Print(err) } return nil, err } p.scrapePartialErrors += colErrs.Partial p.scrapeProcReadErrors += colErrs.Read go p.start() return p, nil } // Describe implements prometheus.Collector. func (p *NamedProcessCollector) Describe(ch chan<- *prometheus.Desc) { ch <- cpuUserSecsDesc ch <- cpuSystemSecsDesc ch <- numprocsDesc ch <- readBytesDesc ch <- writeBytesDesc ch <- membytesDesc ch <- openFDsDesc ch <- worstFDRatioDesc ch <- startTimeDesc ch <- majorPageFaultsDesc ch <- minorPageFaultsDesc ch <- contextSwitchesDesc ch <- numThreadsDesc ch <- statesDesc ch <- scrapeErrorsDesc ch <- scrapeProcReadErrorsDesc ch <- scrapePartialErrorsDesc ch <- threadWchanDesc ch <- threadCountDesc ch <- threadCpuSecsDesc ch <- threadIoBytesDesc ch <- threadMajorPageFaultsDesc ch <- threadMinorPageFaultsDesc ch <- threadContextSwitchesDesc } // Collect implements prometheus.Collector. func (p *NamedProcessCollector) Collect(ch chan<- prometheus.Metric) { req := scrapeRequest{results: ch, done: make(chan struct{})} p.scrapeChan <- req <-req.done } func (p *NamedProcessCollector) start() { for req := range p.scrapeChan { ch := req.results p.scrape(ch) req.done <- struct{}{} } } func (p *NamedProcessCollector) scrape(ch chan<- prometheus.Metric) { permErrs, groups, err := p.Update(p.source.AllProcs()) p.scrapePartialErrors += permErrs.Partial if err != nil { p.scrapeErrors++ log.Printf("error reading procs: %v", err) } else { for gname, gcounts := range groups { ch <- prometheus.MustNewConstMetric(numprocsDesc, prometheus.GaugeValue, float64(gcounts.Procs), gname) ch <- prometheus.MustNewConstMetric(membytesDesc, prometheus.GaugeValue, float64(gcounts.Memory.ResidentBytes), gname, "resident") ch <- prometheus.MustNewConstMetric(membytesDesc, prometheus.GaugeValue, float64(gcounts.Memory.VirtualBytes), gname, "virtual") ch <- prometheus.MustNewConstMetric(membytesDesc, prometheus.GaugeValue, float64(gcounts.Memory.VmSwapBytes), gname, "swapped") ch <- prometheus.MustNewConstMetric(startTimeDesc, prometheus.GaugeValue, float64(gcounts.OldestStartTime.Unix()), gname) ch <- prometheus.MustNewConstMetric(openFDsDesc, prometheus.GaugeValue, float64(gcounts.OpenFDs), gname) ch <- prometheus.MustNewConstMetric(worstFDRatioDesc, prometheus.GaugeValue, float64(gcounts.WorstFDratio), gname) ch <- prometheus.MustNewConstMetric(cpuUserSecsDesc, prometheus.CounterValue, gcounts.CPUUserTime, gname) ch <- prometheus.MustNewConstMetric(cpuSystemSecsDesc, prometheus.CounterValue, gcounts.CPUSystemTime, gname) ch <- prometheus.MustNewConstMetric(readBytesDesc, prometheus.CounterValue, float64(gcounts.ReadBytes), gname) ch <- prometheus.MustNewConstMetric(writeBytesDesc, prometheus.CounterValue, float64(gcounts.WriteBytes), gname) ch <- prometheus.MustNewConstMetric(majorPageFaultsDesc, prometheus.CounterValue, float64(gcounts.MajorPageFaults), gname) ch <- prometheus.MustNewConstMetric(minorPageFaultsDesc, prometheus.CounterValue, float64(gcounts.MinorPageFaults), gname) ch <- prometheus.MustNewConstMetric(contextSwitchesDesc, prometheus.CounterValue, float64(gcounts.CtxSwitchVoluntary), gname, "voluntary") ch <- prometheus.MustNewConstMetric(contextSwitchesDesc, prometheus.CounterValue, float64(gcounts.CtxSwitchNonvoluntary), gname, "nonvoluntary") ch <- prometheus.MustNewConstMetric(numThreadsDesc, prometheus.GaugeValue, float64(gcounts.NumThreads), gname) ch <- prometheus.MustNewConstMetric(statesDesc, prometheus.GaugeValue, float64(gcounts.States.Running), gname, "Running") ch <- prometheus.MustNewConstMetric(statesDesc, prometheus.GaugeValue, float64(gcounts.States.Sleeping), gname, "Sleeping") ch <- prometheus.MustNewConstMetric(statesDesc, prometheus.GaugeValue, float64(gcounts.States.Waiting), gname, "Waiting") ch <- prometheus.MustNewConstMetric(statesDesc, prometheus.GaugeValue, float64(gcounts.States.Zombie), gname, "Zombie") ch <- prometheus.MustNewConstMetric(statesDesc, prometheus.GaugeValue, float64(gcounts.States.Other), gname, "Other") for wchan, count := range gcounts.Wchans { ch <- prometheus.MustNewConstMetric(threadWchanDesc, prometheus.GaugeValue, float64(count), gname, wchan) } for _, thr := range gcounts.Threads { ch <- prometheus.MustNewConstMetric(threadCountDesc, prometheus.GaugeValue, float64(thr.NumThreads), gname, thr.Name) ch <- prometheus.MustNewConstMetric(threadCpuSecsDesc, prometheus.CounterValue, float64(thr.CPUUserTime), gname, thr.Name, "user") ch <- prometheus.MustNewConstMetric(threadCpuSecsDesc, prometheus.CounterValue, float64(thr.CPUSystemTime), gname, thr.Name, "system") ch <- prometheus.MustNewConstMetric(threadIoBytesDesc, prometheus.CounterValue, float64(thr.ReadBytes), gname, thr.Name, "read") ch <- prometheus.MustNewConstMetric(threadIoBytesDesc, prometheus.CounterValue, float64(thr.WriteBytes), gname, thr.Name, "write") ch <- prometheus.MustNewConstMetric(threadMajorPageFaultsDesc, prometheus.CounterValue, float64(thr.MajorPageFaults), gname, thr.Name) ch <- prometheus.MustNewConstMetric(threadMinorPageFaultsDesc, prometheus.CounterValue, float64(thr.MinorPageFaults), gname, thr.Name) ch <- prometheus.MustNewConstMetric(threadContextSwitchesDesc, prometheus.CounterValue, float64(thr.CtxSwitchVoluntary), gname, thr.Name, "voluntary") ch <- prometheus.MustNewConstMetric(threadContextSwitchesDesc, prometheus.CounterValue, float64(thr.CtxSwitchNonvoluntary), gname, thr.Name, "nonvoluntary") } } } ch <- prometheus.MustNewConstMetric(scrapeErrorsDesc, prometheus.CounterValue, float64(p.scrapeErrors)) ch <- prometheus.MustNewConstMetric(scrapeProcReadErrorsDesc, prometheus.CounterValue, float64(p.scrapeProcReadErrors)) ch <- prometheus.MustNewConstMetric(scrapePartialErrorsDesc, prometheus.CounterValue, float64(p.scrapePartialErrors)) } prometheus-process-exporter-0.4.0+ds/common.go000066400000000000000000000004651336557546600215020ustar00rootroot00000000000000package common import "fmt" type ( ProcAttributes struct { Name string Cmdline []string Username string } MatchNamer interface { // MatchAndName returns false if the match failed, otherwise // true and the resulting name. MatchAndName(ProcAttributes) (bool, string) fmt.Stringer } ) prometheus-process-exporter-0.4.0+ds/config/000077500000000000000000000000001336557546600211235ustar00rootroot00000000000000prometheus-process-exporter-0.4.0+ds/config/base_test.go000066400000000000000000000003071336557546600234230ustar00rootroot00000000000000package config import ( "testing" . "gopkg.in/check.v1" ) // Hook up gocheck into the "go test" runner. func Test(t *testing.T) { TestingT(t) } type MySuite struct{} var _ = Suite(&MySuite{}) prometheus-process-exporter-0.4.0+ds/config/config.go000066400000000000000000000145521336557546600227260ustar00rootroot00000000000000package config import ( "bytes" "fmt" "io/ioutil" "log" "path/filepath" "regexp" "strings" "text/template" common "github.com/ncabatoff/process-exporter" "gopkg.in/yaml.v2" ) type ( Matcher interface { // Match returns empty string for no match, or the group name on success. Match(common.ProcAttributes) bool } FirstMatcher struct { matchers []common.MatchNamer } Config struct { MatchNamers FirstMatcher } commMatcher struct { comms map[string]struct{} } exeMatcher struct { exes map[string]string } cmdlineMatcher struct { regexes []*regexp.Regexp captures map[string]string } andMatcher []Matcher templateNamer struct { template *template.Template } matchNamer struct { andMatcher templateNamer } templateParams struct { Comm string ExeBase string ExeFull string Username string Matches map[string]string } ) func (c *cmdlineMatcher) String() string { return fmt.Sprintf("cmdlines: %+v", c.regexes) } func (e *exeMatcher) String() string { return fmt.Sprintf("exes: %+v", e.exes) } func (c *commMatcher) String() string { var comms = make([]string, 0, len(c.comms)) for cm := range c.comms { comms = append(comms, cm) } return fmt.Sprintf("comms: %+v", comms) } func (f FirstMatcher) String() string { return fmt.Sprintf("%v", f.matchers) } func (f FirstMatcher) MatchAndName(nacl common.ProcAttributes) (bool, string) { for _, m := range f.matchers { if matched, name := m.MatchAndName(nacl); matched { return true, name } } return false, "" } func (m *matchNamer) String() string { return fmt.Sprintf("%+v", m.andMatcher) } func (m *matchNamer) MatchAndName(nacl common.ProcAttributes) (bool, string) { if !m.Match(nacl) { return false, "" } matches := make(map[string]string) for _, m := range m.andMatcher { if mc, ok := m.(*cmdlineMatcher); ok { for k, v := range mc.captures { matches[k] = v } } } exebase, exefull := nacl.Name, nacl.Name if len(nacl.Cmdline) > 0 { exefull = nacl.Cmdline[0] exebase = filepath.Base(exefull) } var buf bytes.Buffer m.template.Execute(&buf, &templateParams{ Comm: nacl.Name, ExeBase: exebase, ExeFull: exefull, Matches: matches, Username: nacl.Username, }) return true, buf.String() } func (m *commMatcher) Match(nacl common.ProcAttributes) bool { _, found := m.comms[nacl.Name] return found } func (m *exeMatcher) Match(nacl common.ProcAttributes) bool { if len(nacl.Cmdline) == 0 { return false } thisbase := filepath.Base(nacl.Cmdline[0]) fqpath, found := m.exes[thisbase] if !found { return false } if fqpath == "" { return true } return fqpath == nacl.Cmdline[0] } func (m *cmdlineMatcher) Match(nacl common.ProcAttributes) bool { for _, regex := range m.regexes { captures := regex.FindStringSubmatch(strings.Join(nacl.Cmdline, " ")) if m.captures == nil { return false } subexpNames := regex.SubexpNames() if len(subexpNames) != len(captures) { return false } for i, name := range subexpNames { m.captures[name] = captures[i] } } return true } func (m andMatcher) Match(nacl common.ProcAttributes) bool { for _, matcher := range m { if !matcher.Match(nacl) { return false } } return true } // ReadRecipesFile opens the named file and extracts recipes from it. func ReadFile(cfgpath string, debug bool) (*Config, error) { content, err := ioutil.ReadFile(cfgpath) if err != nil { return nil, fmt.Errorf("error reading config file %q: %v", cfgpath, err) } if debug { log.Printf("Config file %q contents:\n%s", cfgpath, content) } return GetConfig(string(content), debug) } // GetConfig extracts Config from content by parsing it as YAML. func GetConfig(content string, debug bool) (*Config, error) { var yamldata map[string]interface{} err := yaml.Unmarshal([]byte(content), &yamldata) if err != nil { return nil, err } yamlProcnames, ok := yamldata["process_names"] if !ok { return nil, fmt.Errorf("error parsing YAML config: no top-level 'process_names' key") } procnames, ok := yamlProcnames.([]interface{}) if !ok { return nil, fmt.Errorf("error parsing YAML config: 'process_names' is not a list") } var cfg Config for i, procname := range procnames { mn, err := getMatchNamer(procname) if err != nil { return nil, fmt.Errorf("unable to parse process_name entry %d: %v", i, err) } cfg.MatchNamers.matchers = append(cfg.MatchNamers.matchers, mn) } return &cfg, nil } func getMatchNamer(yamlmn interface{}) (common.MatchNamer, error) { nm, ok := yamlmn.(map[interface{}]interface{}) if !ok { return nil, fmt.Errorf("not a map") } var smap = make(map[string][]string) var nametmpl string for k, v := range nm { key, ok := k.(string) if !ok { return nil, fmt.Errorf("non-string key %v", k) } if key == "name" { value, ok := v.(string) if !ok { return nil, fmt.Errorf("non-string value %v for key %q", v, key) } nametmpl = value } else { vals, ok := v.([]interface{}) if !ok { return nil, fmt.Errorf("non-string array value %v for key %q", v, key) } var strs []string for i, si := range vals { s, ok := si.(string) if !ok { return nil, fmt.Errorf("non-string value %v in list[%d] for key %q", v, i, key) } strs = append(strs, s) } smap[key] = strs } } var matchers andMatcher if comm, ok := smap["comm"]; ok { comms := make(map[string]struct{}) for _, c := range comm { comms[c] = struct{}{} } matchers = append(matchers, &commMatcher{comms}) } if exe, ok := smap["exe"]; ok { exes := make(map[string]string) for _, e := range exe { if strings.Contains(e, "/") { exes[filepath.Base(e)] = e } else { exes[e] = "" } } matchers = append(matchers, &exeMatcher{exes}) } if cmdline, ok := smap["cmdline"]; ok { var rs []*regexp.Regexp for _, c := range cmdline { r, err := regexp.Compile(c) if err != nil { return nil, fmt.Errorf("bad cmdline regex %q: %v", c, err) } rs = append(rs, r) } matchers = append(matchers, &cmdlineMatcher{ regexes: rs, captures: make(map[string]string), }) } if len(matchers) == 0 { return nil, fmt.Errorf("no matchers provided") } if nametmpl == "" { nametmpl = "{{.ExeBase}}" } tmpl := template.New("cmdname") tmpl, err := tmpl.Parse(nametmpl) if err != nil { return nil, fmt.Errorf("bad name template %q: %v", nametmpl, err) } return &matchNamer{matchers, templateNamer{tmpl}}, nil } prometheus-process-exporter-0.4.0+ds/config/config_test.go000066400000000000000000000044761336557546600237710ustar00rootroot00000000000000package config import ( // "github.com/kylelemons/godebug/pretty" common "github.com/ncabatoff/process-exporter" . "gopkg.in/check.v1" ) func (s MySuite) TestConfigBasic(c *C) { yml := ` process_names: - exe: - bash - exe: - sh - exe: - /bin/ksh ` cfg, err := GetConfig(yml, false) c.Assert(err, IsNil) c.Check(cfg.MatchNamers.matchers, HasLen, 3) bash := common.ProcAttributes{Name: "bash", Cmdline: []string{"/bin/bash"}} sh := common.ProcAttributes{Name: "sh", Cmdline: []string{"sh"}} ksh := common.ProcAttributes{Name: "ksh", Cmdline: []string{"/bin/ksh"}} found, name := cfg.MatchNamers.matchers[0].MatchAndName(bash) c.Check(found, Equals, true) c.Check(name, Equals, "bash") found, name = cfg.MatchNamers.matchers[0].MatchAndName(sh) c.Check(found, Equals, false) found, name = cfg.MatchNamers.matchers[0].MatchAndName(ksh) c.Check(found, Equals, false) found, name = cfg.MatchNamers.matchers[1].MatchAndName(bash) c.Check(found, Equals, false) found, name = cfg.MatchNamers.matchers[1].MatchAndName(sh) c.Check(found, Equals, true) c.Check(name, Equals, "sh") found, name = cfg.MatchNamers.matchers[1].MatchAndName(ksh) c.Check(found, Equals, false) found, name = cfg.MatchNamers.matchers[2].MatchAndName(bash) c.Check(found, Equals, false) found, name = cfg.MatchNamers.matchers[2].MatchAndName(sh) c.Check(found, Equals, false) found, name = cfg.MatchNamers.matchers[2].MatchAndName(ksh) c.Check(found, Equals, true) c.Check(name, Equals, "ksh") } func (s MySuite) TestConfigTemplates(c *C) { yml := ` process_names: - exe: - postmaster cmdline: - "-D\\s+.+?(?P[^/]+)(?:$|\\s)" name: "{{.ExeBase}}:{{.Matches.Path}}" - exe: - prometheus name: "{{.ExeFull}}" ` cfg, err := GetConfig(yml, false) c.Assert(err, IsNil) c.Check(cfg.MatchNamers.matchers, HasLen, 2) postgres := common.ProcAttributes{Name: "postmaster", Cmdline: []string{"/usr/bin/postmaster", "-D", "/data/pg"}} found, name := cfg.MatchNamers.matchers[0].MatchAndName(postgres) c.Check(found, Equals, true) c.Check(name, Equals, "postmaster:pg") pm := common.ProcAttributes{Name: "prometheus", Cmdline: []string{"/usr/local/bin/prometheus"}} found, name = cfg.MatchNamers.matchers[1].MatchAndName(pm) c.Check(found, Equals, true) c.Check(name, Equals, "/usr/local/bin/prometheus") } prometheus-process-exporter-0.4.0+ds/fixtures/000077500000000000000000000000001336557546600215275ustar00rootroot00000000000000prometheus-process-exporter-0.4.0+ds/fixtures/14804/000077500000000000000000000000001336557546600222075ustar00rootroot00000000000000prometheus-process-exporter-0.4.0+ds/fixtures/14804/cmdline000066400000000000000000000000431336557546600235420ustar00rootroot00000000000000./process-exporter-procnamesbashprometheus-process-exporter-0.4.0+ds/fixtures/14804/comm000066400000000000000000000000201336557546600230550ustar00rootroot00000000000000process-exporte prometheus-process-exporter-0.4.0+ds/fixtures/14804/exe000077700000000000000000000000001336557546600276152/usr/bin/process-exporterustar00rootroot00000000000000prometheus-process-exporter-0.4.0+ds/fixtures/14804/fd/000077500000000000000000000000001336557546600226005ustar00rootroot00000000000000prometheus-process-exporter-0.4.0+ds/fixtures/14804/fd/0000077700000000000000000000000001336557546600270332../../symlinktargets/abcustar00rootroot00000000000000prometheus-process-exporter-0.4.0+ds/fixtures/14804/fd/1000077700000000000000000000000001336557546600270452../../symlinktargets/defustar00rootroot00000000000000prometheus-process-exporter-0.4.0+ds/fixtures/14804/fd/10000077700000000000000000000000001336557546600272212../../symlinktargets/xyzustar00rootroot00000000000000prometheus-process-exporter-0.4.0+ds/fixtures/14804/fd/2000077700000000000000000000000001336557546600270572../../symlinktargets/ghiustar00rootroot00000000000000prometheus-process-exporter-0.4.0+ds/fixtures/14804/fd/3000077700000000000000000000000001336557546600271322../../symlinktargets/uvwustar00rootroot00000000000000prometheus-process-exporter-0.4.0+ds/fixtures/14804/io000066400000000000000000000001521336557546600225370ustar00rootroot00000000000000rchar: 1605958 wchar: 69 syscr: 5534 syscw: 1 read_bytes: 1814455 write_bytes: 0 cancelled_write_bytes: 0 prometheus-process-exporter-0.4.0+ds/fixtures/14804/limits000066400000000000000000000024531336557546600234370ustar00rootroot00000000000000Limit Soft Limit Hard Limit Units Max cpu time unlimited unlimited seconds Max file size unlimited unlimited bytes Max data size unlimited unlimited bytes Max stack size 8388608 unlimited bytes Max core file size 0 unlimited bytes Max resident set unlimited unlimited bytes Max processes 31421 31421 processes Max open files 1024 65536 files Max locked memory 65536 65536 bytes Max address space unlimited unlimited bytes Max file locks unlimited unlimited locks Max pending signals 31421 31421 signals Max msgqueue size 819200 819200 bytes Max nice priority 0 0 Max realtime priority 0 0 Max realtime timeout unlimited unlimited us prometheus-process-exporter-0.4.0+ds/fixtures/14804/stat000066400000000000000000000005071336557546600231070ustar00rootroot0000000000000014804 (process-exporte) S 10884 14804 10884 34834 14895 1077936128 1603 0 767 0 10 4 0 0 20 0 7 0 324219 17174528 1969 18446744073709551615 4194304 7971236 140736389529632 140736389529064 4564099 0 0 0 2143420159 0 0 0 17 4 0 0 2 0 0 10805248 11036864 42311680 140736389534279 140736389534314 140736389534314 140736389537765 0 prometheus-process-exporter-0.4.0+ds/fixtures/14804/status000066400000000000000000000017101336557546600234540ustar00rootroot00000000000000Name: process-exporte State: S (sleeping) Tgid: 14804 Ngid: 0 Pid: 14804 PPid: 10884 TracerPid: 0 Uid: 1000 1000 1000 1000 Gid: 1000 1000 1000 1000 FDSize: 256 Groups: 4 24 27 30 46 110 111 127 1000 NStgid: 14804 NSpid: 14804 NSpgid: 14804 NSsid: 10884 VmPeak: 16772 kB VmSize: 16772 kB VmLck: 0 kB VmPin: 0 kB VmHWM: 7876 kB VmRSS: 7876 kB VmData: 9956 kB VmStk: 132 kB VmExe: 3692 kB VmLib: 0 kB VmPTE: 48 kB VmPMD: 20 kB VmSwap: 10 kB HugetlbPages: 0 kB Threads: 7 SigQ: 0/31421 SigPnd: 0000000000000000 ShdPnd: 0000000000000000 SigBlk: 0000000000000000 SigIgn: 0000000000000000 SigCgt: fffffffe7fc1feff CapInh: 0000000000000000 CapPrm: 0000000000000000 CapEff: 0000000000000000 CapBnd: 0000003fffffffff CapAmb: 0000000000000000 Seccomp: 0 Cpus_allowed: ff Cpus_allowed_list: 0-7 Mems_allowed: 00000000,00000001 Mems_allowed_list: 0 voluntary_ctxt_switches: 72 nonvoluntary_ctxt_switches: 6 prometheus-process-exporter-0.4.0+ds/fixtures/stat000066400000000000000000000027131336557546600224300ustar00rootroot00000000000000cpu 258072 10128 55919 2163830 6946 0 2336 0 0 0 cpu0 44237 138 12166 358089 1410 0 306 0 0 0 cpu1 39583 23 11894 363839 1027 0 230 0 0 0 cpu2 44817 2670 9943 355700 1509 0 824 0 0 0 cpu3 41434 3808 6188 363646 886 0 250 0 0 0 cpu4 46320 2279 9630 356546 1342 0 312 0 0 0 cpu5 41680 1209 6096 366008 769 0 412 0 0 0 intr 16484556 45 2 0 0 0 0 0 2 1 0 0 0 4 0 0 988 219000 4 3 1601 0 0 247107 0 0 0 0 771839 691840 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ctxt 30119844 btime 1508450329 processes 28048 procs_running 2 procs_blocked 0 softirq 5524311 18 1594113 712 780657 248302 0 24642 1420512 0 1455355 prometheus-process-exporter-0.4.0+ds/fixtures/symlinktargets/000077500000000000000000000000001336557546600246075ustar00rootroot00000000000000prometheus-process-exporter-0.4.0+ds/fixtures/symlinktargets/README000066400000000000000000000002211336557546600254620ustar00rootroot00000000000000This directory contains some empty files that are the symlinks the files in the "fd" directory point to. They are otherwise ignored by the tests prometheus-process-exporter-0.4.0+ds/fixtures/symlinktargets/abc000066400000000000000000000000001336557546600252450ustar00rootroot00000000000000prometheus-process-exporter-0.4.0+ds/fixtures/symlinktargets/def000066400000000000000000000000001336557546600252560ustar00rootroot00000000000000prometheus-process-exporter-0.4.0+ds/fixtures/symlinktargets/ghi000066400000000000000000000000001336557546600252670ustar00rootroot00000000000000prometheus-process-exporter-0.4.0+ds/fixtures/symlinktargets/uvw000066400000000000000000000000001336557546600253410ustar00rootroot00000000000000prometheus-process-exporter-0.4.0+ds/fixtures/symlinktargets/xyz000066400000000000000000000000001336557546600253520ustar00rootroot00000000000000prometheus-process-exporter-0.4.0+ds/packaging/000077500000000000000000000000001336557546600216025ustar00rootroot00000000000000prometheus-process-exporter-0.4.0+ds/packaging/conf/000077500000000000000000000000001336557546600225275ustar00rootroot00000000000000prometheus-process-exporter-0.4.0+ds/packaging/conf/all.yaml000066400000000000000000000000751336557546600241650ustar00rootroot00000000000000process_names: - name: "{{.Comm}}" cmdline: - '.+'prometheus-process-exporter-0.4.0+ds/packaging/process-exporter.service000066400000000000000000000004061336557546600265100ustar00rootroot00000000000000[Unit] Description=Process Exporter for Prometheus [Service] User=root Type=simple ExecStart=/usr/bin/process-exporter --config.path /etc/process-exporter/all.yaml --web.listen-address=:9256 KillMode=process Restart=always [Install] WantedBy=multi-user.target prometheus-process-exporter-0.4.0+ds/packaging/scripts/000077500000000000000000000000001336557546600232715ustar00rootroot00000000000000prometheus-process-exporter-0.4.0+ds/packaging/scripts/postinstall.sh000077500000000000000000000001551336557546600262050ustar00rootroot00000000000000systemctl daemon-reload systemctl enable process-exporter.service systemctl restart process-exporter.service prometheus-process-exporter-0.4.0+ds/packaging/scripts/postremove.sh000077500000000000000000000000301336557546600260240ustar00rootroot00000000000000systemctl daemon-reload prometheus-process-exporter-0.4.0+ds/packaging/scripts/preremove.sh000077500000000000000000000001231336557546600256300ustar00rootroot00000000000000systemctl stop process-exporter.service systemctl disable process-exporter.service prometheus-process-exporter-0.4.0+ds/proc/000077500000000000000000000000001336557546600206215ustar00rootroot00000000000000prometheus-process-exporter-0.4.0+ds/proc/base_test.go000066400000000000000000000046651336557546600231340ustar00rootroot00000000000000package proc import ( "fmt" "time" common "github.com/ncabatoff/process-exporter" ) type msi map[string]int // procinfo reads the ProcIdInfo for a proc and returns it or a zero value plus // an error. func procinfo(p Proc) (IDInfo, error) { id, err := p.GetProcID() if err != nil { return IDInfo{}, err } static, err := p.GetStatic() if err != nil { return IDInfo{}, err } metrics, _, err := p.GetMetrics() if err != nil { return IDInfo{}, err } return IDInfo{id, static, metrics, nil}, nil } // read everything in the iterator func consumeIter(pi Iter) ([]IDInfo, error) { infos := []IDInfo{} for pi.Next() { info, err := procinfo(pi) if err != nil { return nil, err } infos = append(infos, info) } return infos, nil } type namer map[string]struct{} func newNamer(names ...string) namer { nr := make(namer, len(names)) for _, name := range names { nr[name] = struct{}{} } return nr } func (n namer) String() string { var ss = make([]string, 0, len(n)) for s := range n { ss = append(ss, s) } return fmt.Sprintf("%v", ss) } func (n namer) MatchAndName(nacl common.ProcAttributes) (bool, string) { if _, ok := n[nacl.Name]; ok { return true, nacl.Name } return false, "" } func newProcIDStatic(pid, ppid int, startTime uint64, name string, cmdline []string) (ID, Static) { return ID{pid, startTime}, Static{name, cmdline, ppid, time.Unix(int64(startTime), 0).UTC(), 1000} } func newProc(pid int, name string, m Metrics) IDInfo { id, static := newProcIDStatic(pid, 0, 0, name, nil) return IDInfo{id, static, m, nil} } func newProcStart(pid int, name string, startTime uint64) IDInfo { id, static := newProcIDStatic(pid, 0, startTime, name, nil) return IDInfo{id, static, Metrics{}, nil} } func newProcParent(pid int, name string, ppid int) IDInfo { id, static := newProcIDStatic(pid, ppid, 0, name, nil) return IDInfo{id, static, Metrics{}, nil} } func piinfot(pid int, name string, c Counts, m Memory, f Filedesc, threads []Thread) IDInfo { pii := piinfo(pid, name, c, m, f, len(threads)) pii.Threads = threads return pii } func piinfo(pid int, name string, c Counts, m Memory, f Filedesc, t int) IDInfo { return piinfost(pid, name, c, m, f, t, States{}) } func piinfost(pid int, name string, c Counts, m Memory, f Filedesc, t int, s States) IDInfo { id, static := newProcIDStatic(pid, 0, 0, name, nil) return IDInfo{ ID: id, Static: static, Metrics: Metrics{c, m, f, uint64(t), s, ""}, } } prometheus-process-exporter-0.4.0+ds/proc/grouper.go000066400000000000000000000115771336557546600226460ustar00rootroot00000000000000package proc import ( "time" seq "github.com/ncabatoff/go-seq/seq" common "github.com/ncabatoff/process-exporter" ) type ( // Grouper is the top-level interface to the process metrics. All tracked // procs sharing the same group name are aggregated. Grouper struct { // groupAccum records the historical accumulation of a group so that // we can avoid ever decreasing the counts we return. groupAccum map[string]Counts tracker *Tracker threadAccum map[string]map[string]Threads debug bool } // GroupByName maps group name to group metrics. GroupByName map[string]Group // Threads collects metrics for threads in a group sharing a thread name. Threads struct { Name string NumThreads int Counts } // Group describes the metrics of a single group. Group struct { Counts States Wchans map[string]int Procs int Memory OldestStartTime time.Time OpenFDs uint64 WorstFDratio float64 NumThreads uint64 Threads []Threads } ) // Returns true if x < y. Test designers should ensure they always have // a unique name/numthreads combination for each group. func lessThreads(x, y Threads) bool { return seq.Compare(x, y) < 0 } // NewGrouper creates a grouper. func NewGrouper(namer common.MatchNamer, trackChildren, alwaysRecheck, debug bool) *Grouper { g := Grouper{ groupAccum: make(map[string]Counts), threadAccum: make(map[string]map[string]Threads), tracker: NewTracker(namer, trackChildren, alwaysRecheck, debug), debug: debug, } return &g } func groupadd(grp Group, ts Update) Group { var zeroTime time.Time grp.Procs++ grp.Memory.ResidentBytes += ts.Memory.ResidentBytes grp.Memory.VirtualBytes += ts.Memory.VirtualBytes grp.Memory.VmSwapBytes += ts.Memory.VmSwapBytes if ts.Filedesc.Open != -1 { grp.OpenFDs += uint64(ts.Filedesc.Open) } openratio := float64(ts.Filedesc.Open) / float64(ts.Filedesc.Limit) if grp.WorstFDratio < openratio { grp.WorstFDratio = openratio } grp.NumThreads += ts.NumThreads grp.Counts.Add(ts.Latest) grp.States.Add(ts.States) if grp.OldestStartTime == zeroTime || ts.Start.Before(grp.OldestStartTime) { grp.OldestStartTime = ts.Start } if grp.Wchans == nil { grp.Wchans = make(map[string]int) } for wchan, count := range ts.Wchans { grp.Wchans[wchan] += count } return grp } // Update asks the tracker to report on each tracked process by name. // These are aggregated by groupname, augmented by accumulated counts // from the past, and returned. Note that while the Tracker reports // only what counts have changed since last cycle, Grouper.Update // returns counts that never decrease. Even once the last process // with name X disappears, name X will still appear in the results // with the same counts as before; of course, all non-count metrics // will be zero. func (g *Grouper) Update(iter Iter) (CollectErrors, GroupByName, error) { cerrs, tracked, err := g.tracker.Update(iter) if err != nil { return cerrs, nil, err } return cerrs, g.groups(tracked), nil } // Translate the updates into a new GroupByName and update internal history. func (g *Grouper) groups(tracked []Update) GroupByName { groups := make(GroupByName) threadsByGroup := make(map[string][]ThreadUpdate) for _, update := range tracked { groups[update.GroupName] = groupadd(groups[update.GroupName], update) if update.Threads != nil { threadsByGroup[update.GroupName] = append(threadsByGroup[update.GroupName], update.Threads...) } } // Add any accumulated counts to what was just observed, // and update the accumulators. for gname, group := range groups { if oldcounts, ok := g.groupAccum[gname]; ok { group.Counts.Add(Delta(oldcounts)) } g.groupAccum[gname] = group.Counts group.Threads = g.threads(gname, threadsByGroup[gname]) groups[gname] = group } // Now add any groups that were observed in the past but aren't running now. for gname, gcounts := range g.groupAccum { if _, ok := groups[gname]; !ok { groups[gname] = Group{Counts: gcounts} } } return groups } func (g *Grouper) threads(gname string, tracked []ThreadUpdate) []Threads { if len(tracked) == 0 { delete(g.threadAccum, gname) return nil } ret := make([]Threads, 0, len(tracked)) threads := make(map[string]Threads) // First aggregate the thread metrics by thread name. for _, nc := range tracked { curthr := threads[nc.ThreadName] curthr.NumThreads++ curthr.Counts.Add(nc.Latest) curthr.Name = nc.ThreadName threads[nc.ThreadName] = curthr } // Add any accumulated counts to what was just observed, // and update the accumulators. if history := g.threadAccum[gname]; history != nil { for tname := range threads { if oldcounts, ok := history[tname]; ok { counts := threads[tname] counts.Add(Delta(oldcounts.Counts)) threads[tname] = counts } } } g.threadAccum[gname] = threads for _, thr := range threads { ret = append(ret, thr) } return ret } prometheus-process-exporter-0.4.0+ds/proc/grouper_test.go000066400000000000000000000160401336557546600236730ustar00rootroot00000000000000package proc import ( "testing" "time" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" ) type grouptest struct { grouper *Grouper procs Iter want GroupByName } //func (gt grouptest) run(c *C) { // _, err := gt.grouper.Update(gt.procs) // c.Assert(err, IsNil) // // got := gt.grouper.curgroups() // c.Check(got, DeepEquals, gt.want, Commentf("diff %s", pretty.Compare(got, gt.want))) //} func rungroup(t *testing.T, gr *Grouper, procs Iter) GroupByName { _, groups, err := gr.Update(procs) if err != nil { t.Fatalf("group.Update error: %v", err) } return groups } // TestGrouperBasic tests core Update/curgroups functionality on single-proc // groups: the grouper adds to counts and updates the other tracked metrics like // Memory. func TestGrouperBasic(t *testing.T) { p1, p2 := 1, 2 n1, n2 := "g1", "g2" starttime := time.Unix(0, 0).UTC() tests := []struct { procs []IDInfo want GroupByName }{ { []IDInfo{ piinfost(p1, n1, Counts{1, 2, 3, 4, 5, 6, 0, 0}, Memory{7, 8, 0}, Filedesc{4, 400}, 2, States{Other: 1}), piinfost(p2, n2, Counts{2, 3, 4, 5, 6, 7, 0, 0}, Memory{8, 9, 0}, Filedesc{40, 400}, 3, States{Waiting: 1}), }, GroupByName{ "g1": Group{Counts{}, States{Other: 1}, msi{}, 1, Memory{7, 8, 0}, starttime, 4, 0.01, 2, nil}, "g2": Group{Counts{}, States{Waiting: 1}, msi{}, 1, Memory{8, 9, 0}, starttime, 40, 0.1, 3, nil}, }, }, { []IDInfo{ piinfost(p1, n1, Counts{2, 3, 4, 5, 6, 7, 0, 0}, Memory{6, 7, 0}, Filedesc{100, 400}, 4, States{Zombie: 1}), piinfost(p2, n2, Counts{4, 5, 6, 7, 8, 9, 0, 0}, Memory{9, 8, 0}, Filedesc{400, 400}, 2, States{Running: 1}), }, GroupByName{ "g1": Group{Counts{1, 1, 1, 1, 1, 1, 0, 0}, States{Zombie: 1}, msi{}, 1, Memory{6, 7, 0}, starttime, 100, 0.25, 4, nil}, "g2": Group{Counts{2, 2, 2, 2, 2, 2, 0, 0}, States{Running: 1}, msi{}, 1, Memory{9, 8, 0}, starttime, 400, 1, 2, nil}, }, }, } gr := NewGrouper(newNamer(n1, n2), false, false, false) for i, tc := range tests { got := rungroup(t, gr, procInfoIter(tc.procs...)) if diff := cmp.Diff(got, tc.want); diff != "" { t.Errorf("%d: curgroups differs: (-got +want)\n%s", i, diff) } } } // TestGrouperProcJoin tests the appearance of a new process in a group, // and that all procs metrics contribute to a group. func TestGrouperProcJoin(t *testing.T) { p1, p2 := 1, 2 n1, n2 := "g1", "g1" starttime := time.Unix(0, 0).UTC() tests := []struct { procs []IDInfo want GroupByName }{ { []IDInfo{ piinfo(p1, n1, Counts{1, 2, 3, 4, 5, 6, 0, 0}, Memory{3, 4, 0}, Filedesc{4, 400}, 2), }, GroupByName{ "g1": Group{Counts{}, States{}, msi{}, 1, Memory{3, 4, 0}, starttime, 4, 0.01, 2, nil}, }, }, { // The counts for pid2 won't be factored into the total yet because we only add // to counts starting with the second time we see a proc. Memory and FDs are // affected though. []IDInfo{ piinfost(p1, n1, Counts{3, 4, 5, 6, 7, 8, 0, 0}, Memory{3, 4, 0}, Filedesc{4, 400}, 2, States{Running: 1}), piinfost(p2, n2, Counts{1, 1, 1, 1, 1, 1, 0, 0}, Memory{1, 2, 0}, Filedesc{40, 400}, 3, States{Sleeping: 1}), }, GroupByName{ "g1": Group{Counts{2, 2, 2, 2, 2, 2, 0, 0}, States{Running: 1, Sleeping: 1}, msi{}, 2, Memory{4, 6, 0}, starttime, 44, 0.1, 5, nil}, }, }, { []IDInfo{ piinfost(p1, n1, Counts{4, 5, 6, 7, 8, 9, 0, 0}, Memory{1, 5, 0}, Filedesc{4, 400}, 2, States{Running: 1}), piinfost(p2, n2, Counts{2, 2, 2, 2, 2, 2, 0, 0}, Memory{2, 4, 0}, Filedesc{40, 400}, 3, States{Running: 1}), }, GroupByName{ "g1": Group{Counts{4, 4, 4, 4, 4, 4, 0, 0}, States{Running: 2}, msi{}, 2, Memory{3, 9, 0}, starttime, 44, 0.1, 5, nil}, }, }, } gr := NewGrouper(newNamer(n1), false, false, false) for i, tc := range tests { got := rungroup(t, gr, procInfoIter(tc.procs...)) if diff := cmp.Diff(got, tc.want); diff != "" { t.Errorf("%d: curgroups differs: (-got +want)\n%s", i, diff) } } } // TestGrouperNonDecreasing tests the disappearance of a process. Its previous // contribution to the counts should not go away when that happens. func TestGrouperNonDecreasing(t *testing.T) { p1, p2 := 1, 2 n1, n2 := "g1", "g1" starttime := time.Unix(0, 0).UTC() tests := []struct { procs []IDInfo want GroupByName }{ { []IDInfo{ piinfo(p1, n1, Counts{3, 4, 5, 6, 7, 8, 0, 0}, Memory{3, 4, 0}, Filedesc{4, 400}, 2), piinfo(p2, n2, Counts{1, 1, 1, 1, 1, 1, 0, 0}, Memory{1, 2, 0}, Filedesc{40, 400}, 3), }, GroupByName{ "g1": Group{Counts{}, States{}, msi{}, 2, Memory{4, 6, 0}, starttime, 44, 0.1, 5, nil}, }, }, { []IDInfo{ piinfo(p1, n1, Counts{4, 5, 6, 7, 8, 9, 0, 0}, Memory{1, 5, 0}, Filedesc{4, 400}, 2), }, GroupByName{ "g1": Group{Counts{1, 1, 1, 1, 1, 1, 0, 0}, States{}, msi{}, 1, Memory{1, 5, 0}, starttime, 4, 0.01, 2, nil}, }, }, { []IDInfo{}, GroupByName{ "g1": Group{Counts{1, 1, 1, 1, 1, 1, 0, 0}, States{}, nil, 0, Memory{}, time.Time{}, 0, 0, 0, nil}, }, }, } gr := NewGrouper(newNamer(n1), false, false, false) for i, tc := range tests { got := rungroup(t, gr, procInfoIter(tc.procs...)) if diff := cmp.Diff(got, tc.want); diff != "" { t.Errorf("%d: curgroups differs: (-got +want)\n%s", i, diff) } } } func TestGrouperThreads(t *testing.T) { p, n, tm := 1, "g1", time.Unix(0, 0).UTC() tests := []struct { proc IDInfo want GroupByName }{ { piinfot(p, n, Counts{}, Memory{}, Filedesc{1, 1}, []Thread{ {ThreadID(ID{p, 0}), "t1", Counts{1, 2, 3, 4, 5, 6, 0, 0}, "", States{}}, {ThreadID(ID{p + 1, 0}), "t2", Counts{1, 1, 1, 1, 1, 1, 0, 0}, "", States{}}, }), GroupByName{ "g1": Group{Counts{}, States{}, msi{}, 1, Memory{}, tm, 1, 1, 2, []Threads{ Threads{"t1", 1, Counts{}}, Threads{"t2", 1, Counts{}}, }}, }, }, { piinfot(p, n, Counts{}, Memory{}, Filedesc{1, 1}, []Thread{ {ThreadID(ID{p, 0}), "t1", Counts{2, 3, 4, 5, 6, 7, 0, 0}, "", States{}}, {ThreadID(ID{p + 1, 0}), "t2", Counts{2, 2, 2, 2, 2, 2, 0, 0}, "", States{}}, {ThreadID(ID{p + 2, 0}), "t2", Counts{1, 1, 1, 1, 1, 1, 0, 0}, "", States{}}, }), GroupByName{ "g1": Group{Counts{}, States{}, msi{}, 1, Memory{}, tm, 1, 1, 3, []Threads{ Threads{"t1", 1, Counts{1, 1, 1, 1, 1, 1, 0, 0}}, Threads{"t2", 2, Counts{1, 1, 1, 1, 1, 1, 0, 0}}, }}, }, }, { piinfot(p, n, Counts{}, Memory{}, Filedesc{1, 1}, []Thread{ {ThreadID(ID{p + 1, 0}), "t2", Counts{4, 4, 4, 4, 4, 4, 0, 0}, "", States{}}, {ThreadID(ID{p + 2, 0}), "t2", Counts{2, 3, 4, 5, 6, 7, 0, 0}, "", States{}}, }), GroupByName{ "g1": Group{Counts{}, States{}, msi{}, 1, Memory{}, tm, 1, 1, 2, []Threads{ Threads{"t2", 2, Counts{4, 5, 6, 7, 8, 9, 0, 0}}, }}, }, }, } opts := cmpopts.SortSlices(lessThreads) gr := NewGrouper(newNamer(n), false, false, false) for i, tc := range tests { got := rungroup(t, gr, procInfoIter(tc.proc)) if diff := cmp.Diff(got, tc.want, opts); diff != "" { t.Errorf("%d: curgroups differs: (-got +want)\n%s", i, diff) } } } prometheus-process-exporter-0.4.0+ds/proc/read.go000066400000000000000000000320441336557546600220660ustar00rootroot00000000000000package proc import ( "fmt" "os" "path/filepath" "strconv" "time" "github.com/ncabatoff/procfs" ) // ErrProcNotExist indicates a process couldn't be read because it doesn't exist, // typically because it disappeared while we were reading it. var ErrProcNotExist = fmt.Errorf("process does not exist") type ( // ID uniquely identifies a process. ID struct { // UNIX process id Pid int // The time the process started after system boot, the value is expressed // in clock ticks. StartTimeRel uint64 } ThreadID ID // Static contains data read from /proc/pid/* Static struct { Name string Cmdline []string ParentPid int StartTime time.Time EffectiveUID int } // Counts are metric counters common to threads and processes and groups. Counts struct { CPUUserTime float64 CPUSystemTime float64 ReadBytes uint64 WriteBytes uint64 MajorPageFaults uint64 MinorPageFaults uint64 CtxSwitchVoluntary uint64 CtxSwitchNonvoluntary uint64 } // Memory describes a proc's memory usage. Memory struct { ResidentBytes uint64 VirtualBytes uint64 VmSwapBytes uint64 } // Filedesc describes a proc's file descriptor usage and soft limit. Filedesc struct { // Open is the count of open file descriptors, -1 if unknown. Open int64 // Limit is the fd soft limit for the process. Limit uint64 } // States counts how many threads are in each state. States struct { Running int Sleeping int Waiting int Zombie int Other int } // Metrics contains data read from /proc/pid/* Metrics struct { Counts Memory Filedesc NumThreads uint64 States Wchan string } // Thread contains per-thread data. Thread struct { ThreadID ThreadName string Counts Wchan string States } // IDInfo groups all info for a single process. IDInfo struct { ID Static Metrics Threads []Thread } // ProcIdInfoThreads struct { // ProcIdInfo // Threads []ProcThread // } // Proc wraps the details of the underlying procfs-reading library. // Any of these methods may fail if the process has disapeared. // We try to return as much as possible rather than an error, e.g. // if some /proc files are unreadable. Proc interface { // GetPid() returns the POSIX PID (process id). They may be reused over time. GetPid() int // GetProcID() returns (pid,starttime), which can be considered a unique process id. GetProcID() (ID, error) // GetStatic() returns various details read from files under /proc//. Technically // name may not be static, but we'll pretend it is. GetStatic() (Static, error) // GetMetrics() returns various metrics read from files under /proc//. // It returns an error on complete failure. Otherwise, it returns metrics // and 0 on complete success, 1 if some (like I/O) couldn't be read. GetMetrics() (Metrics, int, error) GetStates() (States, error) GetWchan() (string, error) GetCounts() (Counts, int, error) GetThreads() ([]Thread, error) } // proccache implements the Proc interface by acting as wrapper for procfs.Proc // that caches results of some reads. proccache struct { procfs.Proc procid *ID stat *procfs.ProcStat status *procfs.ProcStatus cmdline []string io *procfs.ProcIO fs *FS wchan *string } proc struct { proccache } // procs is a fancier []Proc that saves on some copying. procs interface { get(int) Proc length() int } // procfsprocs implements procs using procfs. procfsprocs struct { Procs []procfs.Proc fs *FS } // Iter is an iterator over a sequence of procs. Iter interface { // Next returns true if the iterator is not exhausted. Next() bool // Close releases any resources the iterator uses. Close() error // The iterator satisfies the Proc interface. Proc } // procIterator implements the Iter interface procIterator struct { // procs is the list of Proc we're iterating over. procs // idx is the current iteration, i.e. it's an index into procs. idx int // err is set with an error when Next() fails. It is not affected by failures accessing // the current iteration variable, e.g. with GetProcId. err error // Proc is the current iteration variable, or nil if Next() has never been called or the // iterator is exhausted. Proc } // Source is a source of procs. Source interface { // AllProcs returns all the processes in this source at this moment in time. AllProcs() Iter } // FS implements Source. FS struct { procfs.FS BootTime uint64 MountPoint string debug bool } ) func (ii IDInfo) String() string { return fmt.Sprintf("%+v:%+v", ii.ID, ii.Static) } // Add adds c2 to the counts. func (c *Counts) Add(c2 Delta) { c.CPUUserTime += c2.CPUUserTime c.CPUSystemTime += c2.CPUSystemTime c.ReadBytes += c2.ReadBytes c.WriteBytes += c2.WriteBytes c.MajorPageFaults += c2.MajorPageFaults c.MinorPageFaults += c2.MinorPageFaults c.CtxSwitchVoluntary += c2.CtxSwitchVoluntary c.CtxSwitchNonvoluntary += c2.CtxSwitchNonvoluntary } // Sub subtracts c2 from the counts. func (c Counts) Sub(c2 Counts) Delta { c.CPUUserTime -= c2.CPUUserTime c.CPUSystemTime -= c2.CPUSystemTime c.ReadBytes -= c2.ReadBytes c.WriteBytes -= c2.WriteBytes c.MajorPageFaults -= c2.MajorPageFaults c.MinorPageFaults -= c2.MinorPageFaults c.CtxSwitchVoluntary -= c2.CtxSwitchVoluntary c.CtxSwitchNonvoluntary -= c2.CtxSwitchNonvoluntary return Delta(c) } func (s *States) Add(s2 States) { s.Other += s2.Other s.Running += s2.Running s.Sleeping += s2.Sleeping s.Waiting += s2.Waiting s.Zombie += s2.Zombie } func (p IDInfo) GetThreads() ([]Thread, error) { return p.Threads, nil } // GetPid implements Proc. func (p IDInfo) GetPid() int { return p.ID.Pid } // GetProcID implements Proc. func (p IDInfo) GetProcID() (ID, error) { return p.ID, nil } // GetStatic implements Proc. func (p IDInfo) GetStatic() (Static, error) { return p.Static, nil } // GetCounts implements Proc. func (p IDInfo) GetCounts() (Counts, int, error) { return p.Metrics.Counts, 0, nil } // GetMetrics implements Proc. func (p IDInfo) GetMetrics() (Metrics, int, error) { return p.Metrics, 0, nil } // GetStates implements Proc. func (p IDInfo) GetStates() (States, error) { return p.States, nil } func (p IDInfo) GetWchan() (string, error) { return p.Wchan, nil } func (p *proccache) GetPid() int { return p.Proc.PID } func (p *proccache) getStat() (procfs.ProcStat, error) { if p.stat == nil { stat, err := p.Proc.NewStat() if err != nil { return procfs.ProcStat{}, err } p.stat = &stat } return *p.stat, nil } func (p *proccache) getStatus() (procfs.ProcStatus, error) { if p.status == nil { status, err := p.Proc.NewStatus() if err != nil { return procfs.ProcStatus{}, err } p.status = &status } return *p.status, nil } // GetProcID implements Proc. func (p *proccache) GetProcID() (ID, error) { if p.procid == nil { stat, err := p.getStat() if err != nil { return ID{}, err } p.procid = &ID{Pid: p.GetPid(), StartTimeRel: stat.Starttime} } return *p.procid, nil } func (p *proccache) getCmdLine() ([]string, error) { if p.cmdline == nil { cmdline, err := p.Proc.CmdLine() if err != nil { return nil, err } p.cmdline = cmdline } return p.cmdline, nil } func (p *proccache) getWchan() (string, error) { if p.wchan == nil { wchan, err := p.Proc.Wchan() if err != nil { return "", err } p.wchan = &wchan } return *p.wchan, nil } func (p *proccache) getIo() (procfs.ProcIO, error) { if p.io == nil { io, err := p.Proc.NewIO() if err != nil { return procfs.ProcIO{}, err } p.io = &io } return *p.io, nil } // GetStatic returns the ProcStatic corresponding to this proc. func (p *proccache) GetStatic() (Static, error) { // /proc//cmdline is normally world-readable. cmdline, err := p.getCmdLine() if err != nil { return Static{}, err } // /proc//stat is normally world-readable. stat, err := p.getStat() if err != nil { return Static{}, err } startTime := time.Unix(int64(p.fs.BootTime), 0).UTC() startTime = startTime.Add(time.Second / userHZ * time.Duration(stat.Starttime)) // /proc//status is normally world-readable. status, err := p.getStatus() if err != nil { return Static{}, err } return Static{ Name: stat.Comm, Cmdline: cmdline, ParentPid: stat.PPID, StartTime: startTime, EffectiveUID: status.UIDEffective, }, nil } func (p proc) GetCounts() (Counts, int, error) { stat, err := p.getStat() if err != nil { if err == os.ErrNotExist { err = ErrProcNotExist } return Counts{}, 0, err } status, err := p.getStatus() if err != nil { if err == os.ErrNotExist { err = ErrProcNotExist } return Counts{}, 0, err } io, err := p.getIo() softerrors := 0 if err != nil { softerrors++ } return Counts{ CPUUserTime: float64(stat.UTime) / userHZ, CPUSystemTime: float64(stat.STime) / userHZ, ReadBytes: io.ReadBytes, WriteBytes: io.WriteBytes, MajorPageFaults: uint64(stat.MajFlt), MinorPageFaults: uint64(stat.MinFlt), CtxSwitchVoluntary: uint64(status.VoluntaryCtxtSwitches), CtxSwitchNonvoluntary: uint64(status.NonvoluntaryCtxtSwitches), }, softerrors, nil } func (p proc) GetWchan() (string, error) { return p.getWchan() } func (p proc) GetStates() (States, error) { stat, err := p.getStat() if err != nil { return States{}, err } var s States switch stat.State { case "R": s.Running++ case "S": s.Sleeping++ case "D": s.Waiting++ case "Z": s.Zombie++ default: s.Other++ } return s, nil } // GetMetrics returns the current metrics for the proc. The results are // not cached. func (p proc) GetMetrics() (Metrics, int, error) { counts, softerrors, err := p.GetCounts() if err != nil { return Metrics{}, 0, err } // We don't need to check for error here because p will have cached // the successful result of calling getStat in GetCounts. // Since GetMetrics isn't a pointer receiver method, our callers // won't see the effect of the caching between calls. stat, _ := p.getStat() // Ditto for states states, _ := p.GetStates() status, err := p.getStatus() if err != nil { return Metrics{}, 0, err } numfds, err := p.Proc.FileDescriptorsLen() if err != nil { numfds = -1 softerrors |= 1 } limits, err := p.Proc.NewLimits() if err != nil { return Metrics{}, 0, err } wchan, err := p.getWchan() if err != nil { softerrors |= 1 } return Metrics{ Counts: counts, Memory: Memory{ ResidentBytes: uint64(stat.ResidentMemory()), VirtualBytes: uint64(stat.VirtualMemory()), VmSwapBytes: uint64(status.VmSwapKB * 1024), }, Filedesc: Filedesc{ Open: int64(numfds), Limit: uint64(limits.OpenFiles), }, NumThreads: uint64(stat.NumThreads), States: states, Wchan: wchan, }, softerrors, nil } func (p proc) GetThreads() ([]Thread, error) { fs, err := p.fs.threadFs(p.PID) if err != nil { return nil, err } threads := []Thread{} iter := fs.AllProcs() for iter.Next() { var id ID id, err = iter.GetProcID() if err != nil { continue } var static Static static, err = iter.GetStatic() if err != nil { continue } var counts Counts counts, _, err = iter.GetCounts() if err != nil { continue } wchan, _ := iter.GetWchan() states, _ := iter.GetStates() threads = append(threads, Thread{ ThreadID: ThreadID(id), ThreadName: static.Name, Counts: counts, Wchan: wchan, States: states, }) } err = iter.Close() if err != nil { return nil, err } if len(threads) < 2 { return nil, nil } return threads, nil } // See https://github.com/prometheus/procfs/blob/master/proc_stat.go for details on userHZ. const userHZ = 100 // NewFS returns a new FS mounted under the given mountPoint. It will error // if the mount point can't be read. func NewFS(mountPoint string, debug bool) (*FS, error) { fs, err := procfs.NewFS(mountPoint) if err != nil { return nil, err } stat, err := fs.NewStat() if err != nil { return nil, err } return &FS{fs, stat.BootTime, mountPoint, debug}, nil } func (fs *FS) threadFs(pid int) (*FS, error) { mountPoint := filepath.Join(fs.MountPoint, strconv.Itoa(pid), "task") tfs, err := procfs.NewFS(mountPoint) if err != nil { return nil, err } return &FS{tfs, fs.BootTime, mountPoint, false}, nil } // AllProcs implements Source. func (fs *FS) AllProcs() Iter { procs, err := fs.FS.AllProcs() if err != nil { err = fmt.Errorf("Error reading procs: %v", err) } return &procIterator{procs: procfsprocs{procs, fs}, err: err, idx: -1} } // get implements procs. func (p procfsprocs) get(i int) Proc { return &proc{proccache{Proc: p.Procs[i], fs: p.fs}} } // length implements procs. func (p procfsprocs) length() int { return len(p.Procs) } // Next implements Iter. func (pi *procIterator) Next() bool { pi.idx++ if pi.idx < pi.procs.length() { pi.Proc = pi.procs.get(pi.idx) } else { pi.Proc = nil } return pi.idx < pi.procs.length() } // Close implements Iter. func (pi *procIterator) Close() error { pi.Next() pi.procs = nil pi.Proc = nil return pi.err } prometheus-process-exporter-0.4.0+ds/proc/read_test.go000066400000000000000000000115041336557546600231230ustar00rootroot00000000000000package proc import ( "fmt" "os" "os/exec" "testing" "time" "github.com/google/go-cmp/cmp" ) type ( // procIDInfos implements procs using a slice of already // populated ProcIdInfo. Used for testing. procIDInfos []IDInfo ) func (p procIDInfos) get(i int) Proc { return &p[i] } func (p procIDInfos) length() int { return len(p) } func procInfoIter(ps ...IDInfo) *procIterator { return &procIterator{procs: procIDInfos(ps), idx: -1} } func allprocs(procpath string) Iter { fs, err := NewFS(procpath, false) if err != nil { cwd, _ := os.Getwd() panic("can't read " + procpath + ", cwd=" + cwd + ", err=" + fmt.Sprintf("%v", err)) } return fs.AllProcs() } func TestReadFixture(t *testing.T) { procs := allprocs("../fixtures") var pii IDInfo count := 0 for procs.Next() { count++ var err error pii, err = procinfo(procs) noerr(t, err) } err := procs.Close() noerr(t, err) if count != 1 { t.Fatalf("got %d procs, want 1", count) } wantprocid := ID{Pid: 14804, StartTimeRel: 0x4f27b} if diff := cmp.Diff(pii.ID, wantprocid); diff != "" { t.Errorf("procid differs: (-got +want)\n%s", diff) } stime, _ := time.Parse(time.RFC3339Nano, "2017-10-19T22:52:51.19Z") wantstatic := Static{ Name: "process-exporte", Cmdline: []string{"./process-exporter", "-procnames", "bash"}, ParentPid: 10884, StartTime: stime, EffectiveUID: 1000, } if diff := cmp.Diff(pii.Static, wantstatic); diff != "" { t.Errorf("static differs: (-got +want)\n%s", diff) } wantmetrics := Metrics{ Counts: Counts{ CPUUserTime: 0.1, CPUSystemTime: 0.04, ReadBytes: 1814455, WriteBytes: 0, MajorPageFaults: 0x2ff, MinorPageFaults: 0x643, CtxSwitchVoluntary: 72, CtxSwitchNonvoluntary: 6, }, Memory: Memory{ ResidentBytes: 0x7b1000, VirtualBytes: 0x1061000, VmSwapBytes: 0x2800, }, Filedesc: Filedesc{ Open: 5, Limit: 0x400, }, NumThreads: 7, States: States{Sleeping: 1}, } if diff := cmp.Diff(pii.Metrics, wantmetrics); diff != "" { t.Errorf("metrics differs: (-got +want)\n%s", diff) } } func noerr(t *testing.T, err error) { if err != nil { t.Fatalf("error: %v", err) } } // Basic test of proc reading: does AllProcs return at least two procs, one of which is us. func TestAllProcs(t *testing.T) { procs := allprocs("/proc") count := 0 for procs.Next() { count++ if procs.GetPid() != os.Getpid() { continue } procid, err := procs.GetProcID() noerr(t, err) if procid.Pid != os.Getpid() { t.Errorf("got %d, want %d", procid.Pid, os.Getpid()) } static, err := procs.GetStatic() noerr(t, err) if static.ParentPid != os.Getppid() { t.Errorf("got %d, want %d", static.ParentPid, os.Getppid()) } metrics, _, err := procs.GetMetrics() noerr(t, err) if metrics.ResidentBytes == 0 { t.Errorf("got 0 bytes resident, want nonzero") } // All Go programs have multiple threads. if metrics.NumThreads < 2 { t.Errorf("got %d threads, want >1", metrics.NumThreads) } var zstates States if metrics.States == zstates { t.Errorf("got empty states") } threads, err := procs.GetThreads() if len(threads) < 2 { t.Errorf("got %d thread details, want >1", len(threads)) } } err := procs.Close() noerr(t, err) if count == 0 { t.Errorf("got %d, want 0", count) } } // Test that we can observe the absence of a child process before it spawns and after it exits, // and its presence during its lifetime. func TestAllProcsSpawn(t *testing.T) { childprocs := func() []IDInfo { found := []IDInfo{} procs := allprocs("/proc") mypid := os.Getpid() for procs.Next() { procid, err := procs.GetProcID() if err != nil { continue } static, err := procs.GetStatic() if err != nil { continue } if static.ParentPid == mypid { found = append(found, IDInfo{procid, static, Metrics{}, nil}) } } err := procs.Close() if err != nil { t.Fatalf("error closing procs iterator: %v", err) } return found } foundcat := func(procs []IDInfo) bool { for _, proc := range procs { if proc.Name == "cat" { return true } } return false } if foundcat(childprocs()) { t.Errorf("found cat before spawning it") } cmd := exec.Command("/bin/cat") wc, err := cmd.StdinPipe() noerr(t, err) err = cmd.Start() noerr(t, err) if !foundcat(childprocs()) { t.Errorf("didn't find cat after spawning it") } err = wc.Close() noerr(t, err) err = cmd.Wait() noerr(t, err) if foundcat(childprocs()) { t.Errorf("found cat after exit") } } func TestIterator(t *testing.T) { p1 := newProc(1, "p1", Metrics{}) p2 := newProc(2, "p2", Metrics{}) want := []IDInfo{p1, p2} pis := procInfoIter(want...) got, err := consumeIter(pis) noerr(t, err) if diff := cmp.Diff(got, want); diff != "" { t.Errorf("procs differs: (-got +want)\n%s", diff) } } prometheus-process-exporter-0.4.0+ds/proc/tracker.go000066400000000000000000000303111336557546600226010ustar00rootroot00000000000000package proc import ( "fmt" "log" "os/user" "strconv" "time" seq "github.com/ncabatoff/go-seq/seq" common "github.com/ncabatoff/process-exporter" ) type ( // Tracker tracks processes and records metrics. Tracker struct { // namer determines what processes to track and names them namer common.MatchNamer // tracked holds the processes are being monitored. Processes // may be blacklisted such that they no longer get tracked by // setting their value in the tracked map to nil. tracked map[ID]*trackedProc // procIds is a map from pid to ProcId. This is a convenience // to allow finding the Tracked entry of a parent process. procIds map[int]ID // trackChildren makes Tracker track descendants of procs the // namer wanted tracked. trackChildren bool // never ignore processes, i.e. always re-check untracked processes in case comm has changed alwaysRecheck bool username map[int]string debug bool } // Delta is an alias of Counts used to signal that its contents are not // totals, but rather the result of subtracting two totals. Delta Counts trackedThread struct { name string accum Counts latest Delta lastUpdate time.Time wchan string } // trackedProc accumulates metrics for a process, as well as // remembering an optional GroupName tag associated with it. trackedProc struct { // lastUpdate is used internally during the update cycle to find which procs have exited lastUpdate time.Time // static static Static metrics Metrics // lastaccum is the increment to the counters seen in the last update. lastaccum Delta // groupName is the tag for this proc given by the namer. groupName string threads map[ThreadID]trackedThread } // ThreadUpdate describes what's changed for a thread since the last cycle. ThreadUpdate struct { // ThreadName is the name of the thread based on field of stat. ThreadName string // Latest is how much the counts increased since last cycle. Latest Delta } // Update reports on the latest stats for a process. Update struct { // GroupName is the name given by the namer to the process. GroupName string // Latest is how much the counts increased since last cycle. Latest Delta // Memory is the current memory usage. Memory // Filedesc is the current fd usage/limit. Filedesc // Start is the time the process started. Start time.Time // NumThreads is the number of threads. NumThreads uint64 // States is how many processes are in which run state. States // Wchans is how many threads are in each non-zero wchan. Wchans map[string]int // Threads are the thread updates for this process. Threads []ThreadUpdate } // CollectErrors describes non-fatal errors found while collecting proc // metrics. CollectErrors struct { // Read is incremented every time GetMetrics() returns an error. // This means we failed to load even the basics for the process, // and not just because it disappeared on us. Read int // Partial is incremented every time we're unable to collect // some metrics (e.g. I/O) for a tracked proc, but we're still able // to get the basic stuff like cmdline and core stats. Partial int } ) func lessUpdateGroupName(x, y Update) bool { return x.GroupName < y.GroupName } func lessThreadUpdate(x, y ThreadUpdate) bool { return seq.Compare(x, y) < 0 } func lessCounts(x, y Counts) bool { return seq.Compare(x, y) < 0 } func (tp *trackedProc) getUpdate() Update { u := Update{ GroupName: tp.groupName, Latest: tp.lastaccum, Memory: tp.metrics.Memory, Filedesc: tp.metrics.Filedesc, Start: tp.static.StartTime, NumThreads: tp.metrics.NumThreads, States: tp.metrics.States, Wchans: make(map[string]int), } if tp.metrics.Wchan != "" { u.Wchans[tp.metrics.Wchan] = 1 } if len(tp.threads) > 1 { for _, tt := range tp.threads { u.Threads = append(u.Threads, ThreadUpdate{tt.name, tt.latest}) if tt.wchan != "" { u.Wchans[tt.wchan]++ } } } return u } // NewTracker creates a Tracker. func NewTracker(namer common.MatchNamer, trackChildren, alwaysRecheck, debug bool) *Tracker { return &Tracker{ namer: namer, tracked: make(map[ID]*trackedProc), procIds: make(map[int]ID), trackChildren: trackChildren, alwaysRecheck: alwaysRecheck, username: make(map[int]string), debug: debug, } } func (t *Tracker) track(groupName string, idinfo IDInfo) { tproc := trackedProc{ groupName: groupName, static: idinfo.Static, metrics: idinfo.Metrics, } if len(idinfo.Threads) > 0 { tproc.threads = make(map[ThreadID]trackedThread) for _, thr := range idinfo.Threads { tproc.threads[thr.ThreadID] = trackedThread{ thr.ThreadName, thr.Counts, Delta{}, time.Time{}, thr.Wchan} } } t.tracked[idinfo.ID] = &tproc } func (t *Tracker) ignore(id ID) { // only ignore ID if we didn't set recheck to true if t.alwaysRecheck == false { t.tracked[id] = nil } } func (tp *trackedProc) update(metrics Metrics, now time.Time, cerrs *CollectErrors, threads []Thread) { // newcounts: resource consumption since last cycle newcounts := metrics.Counts tp.lastaccum = newcounts.Sub(tp.metrics.Counts) tp.metrics = metrics tp.lastUpdate = now if len(threads) > 1 { if tp.threads == nil { tp.threads = make(map[ThreadID]trackedThread) } for _, thr := range threads { tt := trackedThread{thr.ThreadName, thr.Counts, Delta{}, now, thr.Wchan} if old, ok := tp.threads[thr.ThreadID]; ok { tt.latest, tt.accum = thr.Counts.Sub(old.accum), thr.Counts } tp.threads[thr.ThreadID] = tt } for id, tt := range tp.threads { if tt.lastUpdate != now { delete(tp.threads, id) } } } else { tp.threads = nil } } // handleProc updates the tracker if it's a known and not ignored proc. // If it's neither known nor ignored, newProc will be non-nil. // It is not an error if the process disappears while we are reading // its info out of /proc, it just means nothing will be returned and // the tracker will be unchanged. func (t *Tracker) handleProc(proc Proc, updateTime time.Time) (*IDInfo, CollectErrors) { var cerrs CollectErrors procID, err := proc.GetProcID() if err != nil { return nil, cerrs } // Do nothing if we're ignoring this proc. last, known := t.tracked[procID] if known && last == nil { return nil, cerrs } metrics, softerrors, err := proc.GetMetrics() if err != nil { if t.debug { log.Printf("error reading metrics for %+v: %v", procID, err) } // This usually happens due to the proc having exited, i.e. // we lost the race. We don't count that as an error. if err != ErrProcNotExist { cerrs.Read++ } return nil, cerrs } var threads []Thread threads, err = proc.GetThreads() if err != nil { softerrors |= 1 } cerrs.Partial += softerrors if len(threads) > 0 { metrics.Counts.CtxSwitchNonvoluntary, metrics.Counts.CtxSwitchVoluntary = 0, 0 for _, thread := range threads { metrics.Counts.CtxSwitchNonvoluntary += thread.Counts.CtxSwitchNonvoluntary metrics.Counts.CtxSwitchVoluntary += thread.Counts.CtxSwitchVoluntary metrics.States.Add(thread.States) } } var newProc *IDInfo if known { last.update(metrics, updateTime, &cerrs, threads) } else { static, err := proc.GetStatic() if err != nil { if t.debug { log.Printf("error reading static details for %+v: %v", procID, err) } return nil, cerrs } newProc = &IDInfo{procID, static, metrics, threads} if t.debug { log.Printf("found new proc: %s", newProc) } // Is this a new process with the same pid as one we already know? // Then delete it from the known map, otherwise the cleanup in Update() // will remove the ProcIds entry we're creating here. if oldProcID, ok := t.procIds[procID.Pid]; ok { delete(t.tracked, oldProcID) } t.procIds[procID.Pid] = procID } return newProc, cerrs } // update scans procs and updates metrics for those which are tracked. Processes // that have gone away get removed from the Tracked map. New processes are // returned, along with the count of nonfatal errors. func (t *Tracker) update(procs Iter) ([]IDInfo, CollectErrors, error) { var newProcs []IDInfo var colErrs CollectErrors var now = time.Now() for procs.Next() { newProc, cerrs := t.handleProc(procs, now) if newProc != nil { newProcs = append(newProcs, *newProc) } colErrs.Read += cerrs.Read colErrs.Partial += cerrs.Partial } err := procs.Close() if err != nil { return nil, colErrs, fmt.Errorf("Error reading procs: %v", err) } // Rather than allocating a new map each time to detect procs that have // disappeared, we bump the last update time on those that are still // present. Then as a second pass we traverse the map looking for // stale procs and removing them. for procID, pinfo := range t.tracked { if pinfo == nil { // TODO is this a bug? we're not tracking the proc so we don't see it go away so ProcIds // and Tracked are leaking? continue } if pinfo.lastUpdate != now { delete(t.tracked, procID) delete(t.procIds, procID.Pid) } } return newProcs, colErrs, nil } // checkAncestry walks the process tree recursively towards the root, // stopping at pid 1 or upon finding a parent that's already tracked // or ignored. If we find a tracked parent track this one too; if not, // ignore this one. func (t *Tracker) checkAncestry(idinfo IDInfo, newprocs map[ID]IDInfo) string { ppid := idinfo.ParentPid pProcID := t.procIds[ppid] if pProcID.Pid < 1 { if t.debug { log.Printf("ignoring unmatched proc with no matched parent: %+v", idinfo) } // Reached root of process tree without finding a tracked parent. t.ignore(idinfo.ID) return "" } // Is the parent already known to the tracker? if ptproc, ok := t.tracked[pProcID]; ok { if ptproc != nil { if t.debug { log.Printf("matched as %q because child of %+v: %+v", ptproc.groupName, pProcID, idinfo) } // We've found a tracked parent. t.track(ptproc.groupName, idinfo) return ptproc.groupName } // We've found an untracked parent. t.ignore(idinfo.ID) return "" } // Is the parent another new process? if pinfoid, ok := newprocs[pProcID]; ok { if name := t.checkAncestry(pinfoid, newprocs); name != "" { if t.debug { log.Printf("matched as %q because child of %+v: %+v", name, pProcID, idinfo) } // We've found a tracked parent, which implies this entire lineage should be tracked. t.track(name, idinfo) return name } } // Parent is dead, i.e. we never saw it, or there's no tracked proc in our ancestry. if t.debug { log.Printf("ignoring unmatched proc with no matched parent: %+v", idinfo) } t.ignore(idinfo.ID) return "" } func (t *Tracker) lookupUid(uid int) string { if name, ok := t.username[uid]; ok { return name } var name string uidstr := strconv.Itoa(uid) u, err := user.LookupId(uidstr) if err != nil { name = uidstr } else { name = u.Username } t.username[uid] = name return name } // Update modifies the tracker's internal state based on what it reads from // iter. Tracks any new procs the namer wants tracked, and updates // its metrics for existing tracked procs. Returns nonfatal errors // and the status of all tracked procs, or an error if fatal. func (t *Tracker) Update(iter Iter) (CollectErrors, []Update, error) { newProcs, colErrs, err := t.update(iter) if err != nil { return colErrs, nil, err } // Step 1: track any new proc that should be tracked based on its name and cmdline. untracked := make(map[ID]IDInfo) for _, idinfo := range newProcs { nacl := common.ProcAttributes{ Name: idinfo.Name, Cmdline: idinfo.Cmdline, Username: t.lookupUid(idinfo.EffectiveUID), } wanted, gname := t.namer.MatchAndName(nacl) if wanted { if t.debug { log.Printf("matched as %q: %+v", gname, idinfo) } t.track(gname, idinfo) } else { untracked[idinfo.ID] = idinfo } } // Step 2: track any untracked new proc that should be tracked because its parent is tracked. if t.trackChildren { for _, idinfo := range untracked { if _, ok := t.tracked[idinfo.ID]; ok { // Already tracked or ignored in an earlier iteration continue } t.checkAncestry(idinfo, untracked) } } tp := []Update{} for _, tproc := range t.tracked { if tproc != nil { tp = append(tp, tproc.getUpdate()) } } return colErrs, tp, nil } prometheus-process-exporter-0.4.0+ds/proc/tracker_test.go000066400000000000000000000127661336557546600236560ustar00rootroot00000000000000package proc import ( "testing" "time" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" ) // Verify that the tracker finds and tracks or ignores procs based on the // namer, and that it can distinguish between two procs with the same pid // but different start time. func TestTrackerBasic(t *testing.T) { p1, p2, p3 := 1, 2, 3 n1, n2, n3, n4 := "g1", "g2", "g3", "g4" t1, t2, t3 := time.Unix(1, 0).UTC(), time.Unix(2, 0).UTC(), time.Unix(3, 0).UTC() tests := []struct { procs []IDInfo want []Update }{ { []IDInfo{newProcStart(p1, n1, 1), newProcStart(p3, n3, 1)}, []Update{{GroupName: n1, Start: t1, Wchans: msi{}}}, }, { // p3 (ignored) has exited and p2 has appeared []IDInfo{newProcStart(p1, n1, 1), newProcStart(p2, n2, 2)}, []Update{{GroupName: n1, Start: t1, Wchans: msi{}}, {GroupName: n2, Start: t2, Wchans: msi{}}}, }, { // p1 has exited and a new proc with a new name has taken its pid []IDInfo{newProcStart(p1, n4, 3), newProcStart(p2, n2, 2)}, []Update{{GroupName: n4, Start: t3, Wchans: msi{}}, {GroupName: n2, Start: t2, Wchans: msi{}}}, }, } // Note that n3 should not be tracked according to our namer. tr := NewTracker(newNamer(n1, n2, n4), false, false, false) opts := cmpopts.SortSlices(lessUpdateGroupName) for i, tc := range tests { _, got, err := tr.Update(procInfoIter(tc.procs...)) noerr(t, err) if diff := cmp.Diff(got, tc.want, opts); diff != "" { t.Errorf("%d: update differs: (-got +want)\n%s", i, diff) } } } // TestTrackerChildren verifies that when the tracker is asked to track // children, processes not selected by the namer are still tracked if // they're children of ones that are. func TestTrackerChildren(t *testing.T) { p1, p2, p3 := 1, 2, 3 n1, n2, n3 := "g1", "g2", "g3" // In this test everything starts at time t1 for simplicity t1 := time.Unix(0, 0).UTC() tests := []struct { procs []IDInfo want []Update }{ { []IDInfo{ newProcParent(p1, n1, 0), newProcParent(p2, n2, p1), }, []Update{{GroupName: n2, Start: t1, Wchans: msi{}}}, }, { []IDInfo{ newProcParent(p1, n1, 0), newProcParent(p2, n2, p1), newProcParent(p3, n3, p2), }, []Update{{GroupName: n2, Start: t1, Wchans: msi{}}, {GroupName: n2, Start: t1, Wchans: msi{}}}, }, } // Only n2 and children of n2s should be tracked tr := NewTracker(newNamer(n2), true, false, false) for i, tc := range tests { _, got, err := tr.Update(procInfoIter(tc.procs...)) noerr(t, err) if diff := cmp.Diff(got, tc.want); diff != "" { t.Errorf("%d: update differs: (-got +want)\n%s", i, diff) } } } // TestTrackerMetrics verifies that the updates returned by the tracker // match the input we're giving it. func TestTrackerMetrics(t *testing.T) { p, n, tm := 1, "g1", time.Unix(0, 0).UTC() tests := []struct { proc IDInfo want Update }{ { piinfost(p, n, Counts{1, 2, 3, 4, 5, 6, 0, 0}, Memory{7, 8, 0}, Filedesc{1, 10}, 9, States{Sleeping: 1}), Update{n, Delta{}, Memory{7, 8, 0}, Filedesc{1, 10}, tm, 9, States{Sleeping: 1}, msi{}, nil}, }, { piinfost(p, n, Counts{2, 3, 4, 5, 6, 7, 0, 0}, Memory{1, 2, 0}, Filedesc{2, 20}, 1, States{Running: 1}), Update{n, Delta{1, 1, 1, 1, 1, 1, 0, 0}, Memory{1, 2, 0}, Filedesc{2, 20}, tm, 1, States{Running: 1}, msi{}, nil}, }, } tr := NewTracker(newNamer(n), false, false, false) for i, tc := range tests { _, got, err := tr.Update(procInfoIter(tc.proc)) noerr(t, err) if diff := cmp.Diff(got, []Update{tc.want}); diff != "" { t.Errorf("%d: update differs: (-got +want)\n%s", i, diff) } } } func TestTrackerThreads(t *testing.T) { p, n, tm := 1, "g1", time.Unix(0, 0).UTC() tests := []struct { proc IDInfo want Update }{ { piinfo(p, n, Counts{}, Memory{}, Filedesc{1, 1}, 1), Update{n, Delta{}, Memory{}, Filedesc{1, 1}, tm, 1, States{}, msi{}, nil}, }, { piinfot(p, n, Counts{}, Memory{}, Filedesc{1, 1}, []Thread{ {ThreadID(ID{p, 0}), "t1", Counts{1, 2, 3, 4, 5, 6, 0, 0}, "", States{}}, {ThreadID(ID{p + 1, 0}), "t2", Counts{1, 1, 1, 1, 1, 1, 0, 0}, "", States{}}, }), Update{n, Delta{}, Memory{}, Filedesc{1, 1}, tm, 2, States{}, msi{}, []ThreadUpdate{ {"t1", Delta{}}, {"t2", Delta{}}, }, }, }, { piinfot(p, n, Counts{}, Memory{}, Filedesc{1, 1}, []Thread{ {ThreadID(ID{p, 0}), "t1", Counts{2, 3, 4, 5, 6, 7, 0, 0}, "", States{}}, {ThreadID(ID{p + 1, 0}), "t2", Counts{2, 2, 2, 2, 2, 2, 0, 0}, "", States{}}, {ThreadID(ID{p + 2, 0}), "t2", Counts{1, 1, 1, 1, 1, 1, 0, 0}, "", States{}}, }), Update{n, Delta{}, Memory{}, Filedesc{1, 1}, tm, 3, States{}, msi{}, []ThreadUpdate{ {"t1", Delta{1, 1, 1, 1, 1, 1, 0, 0}}, {"t2", Delta{1, 1, 1, 1, 1, 1, 0, 0}}, {"t2", Delta{}}, }, }, }, { piinfot(p, n, Counts{}, Memory{}, Filedesc{1, 1}, []Thread{ {ThreadID(ID{p, 0}), "t1", Counts{2, 3, 4, 5, 6, 7, 0, 0}, "", States{}}, {ThreadID(ID{p + 2, 0}), "t2", Counts{1, 2, 3, 4, 5, 6, 0, 0}, "", States{}}, }), Update{n, Delta{}, Memory{}, Filedesc{1, 1}, tm, 2, States{}, msi{}, []ThreadUpdate{ {"t1", Delta{}}, {"t2", Delta{0, 1, 2, 3, 4, 5, 0, 0}}, }, }, }, } tr := NewTracker(newNamer(n), false, false, false) opts := cmpopts.SortSlices(lessThreadUpdate) for i, tc := range tests { _, got, err := tr.Update(procInfoIter(tc.proc)) noerr(t, err) if diff := cmp.Diff(got, []Update{tc.want}, opts); diff != "" { t.Errorf("%d: update differs: (-got +want)\n%s, %v, %v", i, diff, got[0].Threads, tc.want.Threads) } } } prometheus-process-exporter-0.4.0+ds/vendor/000077500000000000000000000000001336557546600211535ustar00rootroot00000000000000prometheus-process-exporter-0.4.0+ds/vendor/github.com/000077500000000000000000000000001336557546600232125ustar00rootroot00000000000000prometheus-process-exporter-0.4.0+ds/vendor/github.com/ncabatoff/000077500000000000000000000000001336557546600251355ustar00rootroot00000000000000prometheus-process-exporter-0.4.0+ds/vendor/github.com/ncabatoff/fakescraper/000077500000000000000000000000001336557546600274235ustar00rootroot00000000000000prometheus-process-exporter-0.4.0+ds/vendor/github.com/ncabatoff/fakescraper/README.md000066400000000000000000000012631336557546600307040ustar00rootroot00000000000000# fakescraper Scrape Prometheus metrics from inside the app. Handy for testing. I use this when I'm writing Prometheus exporters and want to test them from the command line. Before this I would start the daemon, run curl to fetch the metrics, then kill it. Now I simply do something like: ``` func main() { var ( onceToStdout = flag.Bool("once-to-stdout", false, "Don't bind, instead just print the metrics once to stdout and exit") ) flag.Parse() if *onceToStdout { fs := fakescraper.NewFakeScraper() fmt.Print(fs.Scrape()) return } ... if err := http.ListenAndServe(*listenAddress, nil); err != nil { log.Fatalf("Unable to setup HTTP server: %v", err) } } ``` prometheus-process-exporter-0.4.0+ds/vendor/github.com/ncabatoff/fakescraper/fakescraper.go000066400000000000000000000015111336557546600322360ustar00rootroot00000000000000package fakescraper import ( "bytes" "github.com/prometheus/client_golang/prometheus" "log" "net/http" ) type ( dummyResponseWriter struct { bytes.Buffer header http.Header } FakeScraper struct { dummyResponseWriter } ) func (d *dummyResponseWriter) Header() http.Header { return d.header } func (d *dummyResponseWriter) WriteHeader(code int) { } func NewFakeScraper() *FakeScraper { return &FakeScraper{dummyResponseWriter{header: make(http.Header)}} } // Ask prometheus to handle a scrape request so we can capture and return the output. func (fs *FakeScraper) Scrape() string { httpreq, err := http.NewRequest("GET", "/metrics", nil) if err != nil { log.Fatalf("Error building request: %v", err) } prometheus.Handler().ServeHTTP(&fs.dummyResponseWriter, httpreq) s := fs.String() fs.Truncate(0) return s } prometheus-process-exporter-0.4.0+ds/vendor/github.com/ncabatoff/go-seq/000077500000000000000000000000001336557546600263305ustar00rootroot00000000000000prometheus-process-exporter-0.4.0+ds/vendor/github.com/ncabatoff/go-seq/LICENSE000066400000000000000000000020521336557546600273340ustar00rootroot00000000000000MIT License Copyright (c) 2018 ncabatoff Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. prometheus-process-exporter-0.4.0+ds/vendor/github.com/ncabatoff/go-seq/seq/000077500000000000000000000000001336557546600271205ustar00rootroot00000000000000prometheus-process-exporter-0.4.0+ds/vendor/github.com/ncabatoff/go-seq/seq/compare.go000066400000000000000000000077641336557546600311130ustar00rootroot00000000000000package seq import ( "fmt" "reflect" "sort" ) // Compare returns 0 if a and b are equal, -1 if a < b, or 1 if a > b. // Panics if a and b are not of the same type, or are of a type not listed here. // * Bools are compared assuming false < true. // * Strings, integer and float values are compared as Go compares them. // * Two nil pointers are equal; one nil pointer is treated as smaller than a non-nil pointer. // * Non-nil pointers are compared by comparing the values they point to. // * Structures are compared by comparing their fields in order. // * Slices are compared by comparing elements sequentially. If the slices are of different // length and all elements are the same up to the shorter length, the shorter slice is treated as // smaller. // * Maps can only be compared if they have string keys, in which case the ordered list of // keys are first compared as string slices, and if they're equal then the values are compared // sequentially in key order. func Compare(a, b interface{}) int { return compareValue(reflect.ValueOf(a), reflect.ValueOf(b)) } func boolToInt(b bool) int { if b { return 1 } return 0 } func compareValue(ir1, ir2 reflect.Value) int { var zerovalue reflect.Value r1, r2 := reflect.Indirect(ir1), reflect.Indirect(ir2) if r1 == zerovalue { if r2 == zerovalue { return 0 } return -1 } if r2 == zerovalue { return 1 } switch r1.Kind() { case reflect.Bool: v1, v2 := boolToInt(r1.Bool()), boolToInt(r2.Bool()) if v1 < v2 { return -1 } if v1 > v2 { return 1 } case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: v1, v2 := r1.Int(), r2.Int() if v1 < v2 { return -1 } if v1 > v2 { return 1 } case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: v1, v2 := r1.Uint(), r2.Uint() if v1 < v2 { return -1 } if v1 > v2 { return 1 } case reflect.Float32, reflect.Float64: v1, v2 := r1.Float(), r2.Float() if v1 < v2 { return -1 } if v1 > v2 { return 1 } case reflect.Map: return compareMap(r1, r2) case reflect.Struct: return compareStruct(r1, r2) case reflect.Slice: if r1.Type().Elem().Kind() == reflect.Uint8 { // Not using bytes.Compare because that fails on unexported fields: // return bytes.Compare(r1.Interface().([]byte), r2.Interface().([]byte)) var s string strtype := reflect.TypeOf(s) v1, v2 := r1.Convert(strtype).String(), r2.Convert(strtype).String() if v1 < v2 { return -1 } if v1 > v2 { return 1 } } return compareSlice(r1, r2) case reflect.String: v1, v2 := r1.String(), r2.String() if v1 < v2 { return -1 } if v1 > v2 { return 1 } default: panic(fmt.Sprintf("don't know how to compare values of type %v", r1.Type())) } return 0 } func compareStruct(r1, r2 reflect.Value) int { if r1.Type() != r2.Type() { panic(fmt.Sprintf("s1 and s2 are not of the same type: %v, %v", r1.Type(), r2.Type())) } n := r1.NumField() for i := 0; i < n; i++ { c := compareValue(r1.Field(i), r2.Field(i)) if c != 0 { return c } } return 0 } func compareSlice(r1, r2 reflect.Value) int { maxlen := r1.Len() if r2.Len() < maxlen { maxlen = r2.Len() } for i := 0; i < maxlen; i++ { c := compareValue(r1.Index(i), r2.Index(i)) if c != 0 { return c } } if r1.Len() > maxlen { return 1 } if r2.Len() > maxlen { return -1 } return 0 } func sortedKeys(r1 reflect.Value) []string { keys := make([]string, 0, r1.Len()) for _, k := range r1.MapKeys() { keys = append(keys, k.String()) } sort.Strings(keys) return keys } func compareMap(r1, r2 reflect.Value) int { if r1.Type().Key().Kind() != reflect.String { panic("can only compare maps with keys of type string") } s1, s2 := sortedKeys(r1), sortedKeys(r2) c := compareSlice(reflect.ValueOf(s1), reflect.ValueOf(s2)) if c != 0 { return c } for _, k := range s1 { vk := reflect.ValueOf(k) c := compareValue(r1.MapIndex(vk), r2.MapIndex(vk)) if c != 0 { return c } } return 0 } prometheus-process-exporter-0.4.0+ds/vendor/github.com/ncabatoff/procfs/000077500000000000000000000000001336557546600264315ustar00rootroot00000000000000prometheus-process-exporter-0.4.0+ds/vendor/github.com/ncabatoff/procfs/.gitignore000066400000000000000000000000131336557546600304130ustar00rootroot00000000000000/fixtures/ prometheus-process-exporter-0.4.0+ds/vendor/github.com/ncabatoff/procfs/.travis.yml000066400000000000000000000002271336557546600305430ustar00rootroot00000000000000sudo: false language: go go: - 1.9.x - 1.10.x go_import_path: github.com/prometheus/procfs script: - make style check_license vet test staticcheck prometheus-process-exporter-0.4.0+ds/vendor/github.com/ncabatoff/procfs/CONTRIBUTING.md000066400000000000000000000015461336557546600306700ustar00rootroot00000000000000# Contributing Prometheus uses GitHub to manage reviews of pull requests. * If you have a trivial fix or improvement, go ahead and create a pull request, addressing (with `@...`) the maintainer of this repository (see [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request. * If you plan to do something more involved, first discuss your ideas on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). This will avoid unnecessary work and surely give you and us a good deal of inspiration. * Relevant coding style guidelines are the [Go Code Review Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) and the _Formatting and style_ section of Peter Bourgon's [Go: Best Practices for Production Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). prometheus-process-exporter-0.4.0+ds/vendor/github.com/ncabatoff/procfs/LICENSE000066400000000000000000000261351336557546600274450ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. prometheus-process-exporter-0.4.0+ds/vendor/github.com/ncabatoff/procfs/MAINTAINERS.md000066400000000000000000000000441336557546600305230ustar00rootroot00000000000000* Tobias Schmidt prometheus-process-exporter-0.4.0+ds/vendor/github.com/ncabatoff/procfs/Makefile000066400000000000000000000045211336557546600300730ustar00rootroot00000000000000# Copyright 2018 The Prometheus Authors # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Ensure GOBIN is not set during build so that promu is installed to the correct path unexport GOBIN GO ?= go GOFMT ?= $(GO)fmt FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) STATICCHECK := $(FIRST_GOPATH)/bin/staticcheck pkgs = $(shell $(GO) list ./... | grep -v /vendor/) PREFIX ?= $(shell pwd) BIN_DIR ?= $(shell pwd) ifdef DEBUG bindata_flags = -debug endif STATICCHECK_IGNORE = all: format staticcheck build test style: @echo ">> checking code style" @! $(GOFMT) -d $(shell find . -path ./vendor -prune -o -name '*.go' -print) | grep '^' check_license: @echo ">> checking license header" @./scripts/check_license.sh test: fixtures/.unpacked sysfs/fixtures/.unpacked @echo ">> running all tests" @$(GO) test -race $(shell $(GO) list ./... | grep -v /vendor/ | grep -v examples) format: @echo ">> formatting code" @$(GO) fmt $(pkgs) vet: @echo ">> vetting code" @$(GO) vet $(pkgs) staticcheck: $(STATICCHECK) @echo ">> running staticcheck" @$(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs) %/.unpacked: %.ttar ./ttar -C $(dir $*) -x -f $*.ttar touch $@ update_fixtures: fixtures.ttar sysfs/fixtures.ttar %fixtures.ttar: %/fixtures rm -v $(dir $*)fixtures/.unpacked ./ttar -C $(dir $*) -c -f $*fixtures.ttar fixtures/ $(FIRST_GOPATH)/bin/staticcheck: @GOOS= GOARCH= $(GO) get -u honnef.co/go/tools/cmd/staticcheck .PHONY: all style check_license format test vet staticcheck # Declaring the binaries at their default locations as PHONY targets is a hack # to ensure the latest version is downloaded on every make execution. # If this is not desired, copy/symlink these binaries to a different path and # set the respective environment variables. .PHONY: $(GOPATH)/bin/staticcheck prometheus-process-exporter-0.4.0+ds/vendor/github.com/ncabatoff/procfs/NOTICE000066400000000000000000000003551336557546600273400ustar00rootroot00000000000000procfs provides functions to retrieve system, kernel and process metrics from the pseudo-filesystem proc. Copyright 2014-2015 The Prometheus Authors This product includes software developed at SoundCloud Ltd. (http://soundcloud.com/). prometheus-process-exporter-0.4.0+ds/vendor/github.com/ncabatoff/procfs/README.md000066400000000000000000000012171336557546600277110ustar00rootroot00000000000000# procfs This procfs package provides functions to retrieve system, kernel and process metrics from the pseudo-filesystem proc. *WARNING*: This package is a work in progress. Its API may still break in backwards-incompatible ways without warnings. Use it at your own risk. [![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs) [![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs) [![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs) prometheus-process-exporter-0.4.0+ds/vendor/github.com/ncabatoff/procfs/buddyinfo.go000066400000000000000000000045671336557546600307570ustar00rootroot00000000000000// Copyright 2017 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package procfs import ( "bufio" "fmt" "io" "os" "strconv" "strings" ) // A BuddyInfo is the details parsed from /proc/buddyinfo. // The data is comprised of an array of free fragments of each size. // The sizes are 2^n*PAGE_SIZE, where n is the array index. type BuddyInfo struct { Node string Zone string Sizes []float64 } // NewBuddyInfo reads the buddyinfo statistics. func NewBuddyInfo() ([]BuddyInfo, error) { fs, err := NewFS(DefaultMountPoint) if err != nil { return nil, err } return fs.NewBuddyInfo() } // NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem. func (fs FS) NewBuddyInfo() ([]BuddyInfo, error) { file, err := os.Open(fs.Path("buddyinfo")) if err != nil { return nil, err } defer file.Close() return parseBuddyInfo(file) } func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { var ( buddyInfo = []BuddyInfo{} scanner = bufio.NewScanner(r) bucketCount = -1 ) for scanner.Scan() { var err error line := scanner.Text() parts := strings.Fields(line) if len(parts) < 4 { return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo") } node := strings.TrimRight(parts[1], ",") zone := strings.TrimRight(parts[3], ",") arraySize := len(parts[4:]) if bucketCount == -1 { bucketCount = arraySize } else { if bucketCount != arraySize { return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize) } } sizes := make([]float64, arraySize) for i := 0; i < arraySize; i++ { sizes[i], err = strconv.ParseFloat(parts[i+4], 64) if err != nil { return nil, fmt.Errorf("invalid value in buddyinfo: %s", err) } } buddyInfo = append(buddyInfo, BuddyInfo{node, zone, sizes}) } return buddyInfo, scanner.Err() } prometheus-process-exporter-0.4.0+ds/vendor/github.com/ncabatoff/procfs/doc.go000066400000000000000000000025041336557546600275260ustar00rootroot00000000000000// Copyright 2014 Prometheus Team // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package procfs provides functions to retrieve system, kernel and process // metrics from the pseudo-filesystem proc. // // Example: // // package main // // import ( // "fmt" // "log" // // "github.com/prometheus/procfs" // ) // // func main() { // p, err := procfs.Self() // if err != nil { // log.Fatalf("could not get process: %s", err) // } // // stat, err := p.NewStat() // if err != nil { // log.Fatalf("could not get process stat: %s", err) // } // // fmt.Printf("command: %s\n", stat.Comm) // fmt.Printf("cpu time: %fs\n", stat.CPUTime()) // fmt.Printf("vsize: %dB\n", stat.VirtualMemory()) // fmt.Printf("rss: %dB\n", stat.ResidentMemory()) // } // package procfs prometheus-process-exporter-0.4.0+ds/vendor/github.com/ncabatoff/procfs/fixtures.ttar000066400000000000000000000555211336557546600312060ustar00rootroot00000000000000# Archive created by ttar -c -f fixtures.ttar fixtures/ Directory: fixtures Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/26231 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/26231/cmdline Lines: 1 vimNULLBYTEtest.goNULLBYTE+10NULLBYTEEOF Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/26231/comm Lines: 1 vim Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/26231/exe SymlinkTo: /usr/bin/vim # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/26231/fd Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/26231/fd/0 SymlinkTo: ../../symlinktargets/abc # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/26231/fd/1 SymlinkTo: ../../symlinktargets/def # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/26231/fd/10 SymlinkTo: ../../symlinktargets/xyz # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/26231/fd/2 SymlinkTo: ../../symlinktargets/ghi # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/26231/fd/3 SymlinkTo: ../../symlinktargets/uvw # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/26231/io Lines: 7 rchar: 750339 wchar: 818609 syscr: 7405 syscw: 5245 read_bytes: 1024 write_bytes: 2048 cancelled_write_bytes: -1024 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/26231/limits Lines: 17 Limit Soft Limit Hard Limit Units Max cpu time unlimited unlimited seconds Max file size unlimited unlimited bytes Max data size unlimited unlimited bytes Max stack size 8388608 unlimited bytes Max core file size 0 unlimited bytes Max resident set unlimited unlimited bytes Max processes 62898 62898 processes Max open files 2048 4096 files Max locked memory 65536 65536 bytes Max address space 8589934592 unlimited bytes Max file locks unlimited unlimited locks Max pending signals 62898 62898 signals Max msgqueue size 819200 819200 bytes Max nice priority 0 0 Max realtime priority 0 0 Max realtime timeout unlimited unlimited us Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/26231/mountstats Lines: 19 device rootfs mounted on / with fstype rootfs device sysfs mounted on /sys with fstype sysfs device proc mounted on /proc with fstype proc device /dev/sda1 mounted on / with fstype ext4 device 192.168.1.1:/srv/test mounted on /mnt/nfs/test with fstype nfs4 statvers=1.1 opts: rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,clientaddr=192.168.1.5,local_lock=none age: 13968 caps: caps=0xfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255 nfsv4: bm0=0xfdffafff,bm1=0xf9be3e,bm2=0x0,acl=0x0,pnfs=not configured sec: flavor=1,pseudoflavor=1 events: 52 226 0 0 1 13 398 0 0 331 0 47 0 0 77 0 0 77 0 0 0 0 0 0 0 0 0 bytes: 1207640230 0 0 0 1210214218 0 295483 0 RPC iostats version: 1.0 p/v: 100003/4 (nfs) xprt: tcp 832 0 1 0 11 6428 6428 0 12154 0 24 26 5726 per-op statistics NULL: 0 0 0 0 0 0 0 0 READ: 1298 1298 0 207680 1210292152 6 79386 79407 WRITE: 0 0 0 0 0 0 0 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/26231/net Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/26231/net/dev Lines: 4 Inter-| Receive | Transmit face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed lo: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 eth0: 438 5 0 0 0 0 0 0 648 8 0 0 0 0 0 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/26231/ns Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/26231/ns/mnt SymlinkTo: mnt:[4026531840] # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/26231/ns/net SymlinkTo: net:[4026531993] # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/26231/stat Lines: 1 26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/26231/status Lines: 50 Name: gvim State: S (sleeping) Tgid: 26231 Ngid: 0 Pid: 26231 PPid: 6429 TracerPid: 0 Uid: 1000 1000 1000 1000 Gid: 1000 1000 1000 1000 FDSize: 64 Groups: 999 1000 1001 NStgid: 26231 NSpid: 26231 NSpgid: 26231 NSsid: 26231 VmPeak: 768040 kB VmSize: 713068 kB VmLck: 0 kB VmPin: 0 kB VmHWM: 30652 kB VmRSS: 30632 kB VmData: 377304 kB VmStk: 132 kB VmExe: 2712 kB VmLib: 69672 kB VmPTE: 632 kB VmPMD: 16 kB VmSwap: 0 kB HugetlbPages: 0 kB Threads: 4 SigQ: 1/128288 SigPnd: 0000000000000000 ShdPnd: 0000000000000000 SigBlk: 0000000000000000 SigIgn: 0000000000003001 SigCgt: 00000001ed824efe CapInh: 0000000000000000 CapPrm: 0000000000000000 CapEff: 0000000000000000 CapBnd: 0000003fffffffff CapAmb: 0000000000000000 Seccomp: 0 Speculation_Store_Bypass: thread vulnerable Cpus_allowed: ffff Cpus_allowed_list: 0-15 Mems_allowed: 00000000,00000001 Mems_allowed_list: 0 voluntary_ctxt_switches: 6340 nonvoluntary_ctxt_switches: 361 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/26232 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/26232/cmdline Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/26232/comm Lines: 1 ata_sff Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/26232/fd Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/26232/fd/0 SymlinkTo: ../../symlinktargets/abc # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/26232/fd/1 SymlinkTo: ../../symlinktargets/def # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/26232/fd/2 SymlinkTo: ../../symlinktargets/ghi # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/26232/fd/3 SymlinkTo: ../../symlinktargets/uvw # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/26232/fd/4 SymlinkTo: ../../symlinktargets/xyz # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/26232/limits Lines: 17 Limit Soft Limit Hard Limit Units Max cpu time unlimited unlimited seconds Max file size unlimited unlimited bytes Max data size unlimited unlimited bytes Max stack size 8388608 unlimited bytes Max core file size 0 unlimited bytes Max resident set unlimited unlimited bytes Max processes 29436 29436 processes Max open files 1024 4096 files Max locked memory 65536 65536 bytes Max address space unlimited unlimited bytes Max file locks unlimited unlimited locks Max pending signals 29436 29436 signals Max msgqueue size 819200 819200 bytes Max nice priority 0 0 Max realtime priority 0 0 Max realtime timeout unlimited unlimited us Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/26232/stat Lines: 1 33 (ata_sff) S 2 0 0 0 -1 69238880 0 0 0 0 0 0 0 0 0 -20 1 0 5 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/26233 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/26233/cmdline Lines: 1 com.github.uiautomatorNULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTEEOF Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/584 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/584/stat Lines: 2 1020 ((a b ) ( c d) ) R 28378 1020 28378 34842 1020 4218880 286 0 0 0 0 0 0 0 20 0 1 0 10839175 10395648 155 18446744073709551615 4194304 4238788 140736466511168 140736466511168 140609271124624 0 0 0 0 0 0 0 17 5 0 0 0 0 0 6336016 6337300 25579520 140736466515030 140736466515061 140736466515061 140736466518002 0 #!/bin/cat /proc/self/stat Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/buddyinfo Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/buddyinfo/short Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/buddyinfo/short/buddyinfo Lines: 3 Node 0, zone Node 0, zone Node 0, zone Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/buddyinfo/sizemismatch Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/buddyinfo/sizemismatch/buddyinfo Lines: 3 Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3 Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 0 Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/buddyinfo/valid Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/buddyinfo/valid/buddyinfo Lines: 3 Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3 Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/fs Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/fs/xfs Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/fs/xfs/stat Lines: 23 extent_alloc 92447 97589 92448 93751 abt 0 0 0 0 blk_map 1767055 188820 184891 92447 92448 2140766 0 bmbt 0 0 0 0 dir 185039 92447 92444 136422 trans 706 944304 0 ig 185045 58807 0 126238 0 33637 22 log 2883 113448 9 17360 739 push_ail 945014 0 134260 15483 0 3940 464 159985 0 40 xstrat 92447 0 rw 107739 94045 attr 4 0 0 0 icluster 8677 7849 135802 vnodes 92601 0 0 0 92444 92444 92444 0 buf 2666287 7122 2659202 3599 2 7085 0 10297 7085 abtb2 184941 1277345 13257 13278 0 0 0 0 0 0 0 0 0 0 2746147 abtc2 345295 2416764 172637 172658 0 0 0 0 0 0 0 0 0 0 21406023 bmbt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ibt2 343004 1358467 0 0 0 0 0 0 0 0 0 0 0 0 0 fibt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 qm 0 0 0 0 0 0 0 0 xpc 399724544 92823103 86219234 debug 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/mdstat Lines: 26 Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10] md3 : active raid6 sda1[8] sdh1[7] sdg1[6] sdf1[5] sde1[11] sdd1[3] sdc1[10] sdb1[9] 5853468288 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU] md127 : active raid1 sdi2[0] sdj2[1] 312319552 blocks [2/2] [UU] md0 : active raid1 sdk[2](S) sdi1[0] sdj1[1] 248896 blocks [2/2] [UU] md4 : inactive raid1 sda3[0] sdb3[1] 4883648 blocks [2/2] [UU] md6 : active raid1 sdb2[2] sda2[0] 195310144 blocks [2/1] [U_] [=>...................] recovery = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec md8 : active raid1 sdb1[1] sda1[0] 195310144 blocks [2/2] [UU] [=>...................] resync = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1] 7813735424 blocks super 1.2 level 6, 512k chunk, algorithm 2 [4/3] [U_UU] bitmap: 0/30 pages [0KB], 65536KB chunk unused devices: Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/net Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/net/dev Lines: 6 Inter-| Receive | Transmit face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed vethf345468: 648 8 0 0 0 0 0 0 438 5 0 0 0 0 0 0 lo: 1664039048 1566805 0 0 0 0 0 0 1664039048 1566805 0 0 0 0 0 0 docker0: 2568 38 0 0 0 0 0 0 438 5 0 0 0 0 0 0 eth0: 874354587 1036395 0 0 0 0 0 0 563352563 732147 0 0 0 0 0 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/net/ip_vs Lines: 21 IP Virtual Server version 1.2.1 (size=4096) Prot LocalAddress:Port Scheduler Flags -> RemoteAddress:Port Forward Weight ActiveConn InActConn TCP C0A80016:0CEA wlc -> C0A85216:0CEA Tunnel 100 248 2 -> C0A85318:0CEA Tunnel 100 248 2 -> C0A85315:0CEA Tunnel 100 248 1 TCP C0A80039:0CEA wlc -> C0A85416:0CEA Tunnel 0 0 0 -> C0A85215:0CEA Tunnel 100 1499 0 -> C0A83215:0CEA Tunnel 100 1498 0 TCP C0A80037:0CEA wlc -> C0A8321A:0CEA Tunnel 0 0 0 -> C0A83120:0CEA Tunnel 100 0 0 TCP [2620:0000:0000:0000:0000:0000:0000:0001]:0050 sh -> [2620:0000:0000:0000:0000:0000:0000:0002]:0050 Route 1 0 0 -> [2620:0000:0000:0000:0000:0000:0000:0003]:0050 Route 1 0 0 -> [2620:0000:0000:0000:0000:0000:0000:0004]:0050 Route 1 1 1 FWM 10001000 wlc -> C0A8321A:0CEA Route 0 0 1 -> C0A83215:0CEA Route 0 0 2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/net/ip_vs_stats Lines: 6 Total Incoming Outgoing Incoming Outgoing Conns Packets Packets Bytes Bytes 16AA370 E33656E5 0 51D8C8883AB3 0 Conns/s Pkts/s Pkts/s Bytes/s Bytes/s 4 1FB3C 0 1282A8F 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/net/rpc Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/net/rpc/nfs Lines: 5 net 18628 0 18628 6 rpc 4329785 0 4338291 proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2 proc3 22 1 4084749 29200 94754 32580 186 47747 7981 8639 0 6356 0 6962 0 7958 0 0 241 4 4 2 39 proc4 61 1 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/net/rpc/nfsd Lines: 11 rc 0 6 18622 fh 0 0 0 0 0 io 157286400 0 th 8 0 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 ra 32 0 0 0 0 0 0 0 0 0 0 0 net 18628 0 18628 6 rpc 18628 0 0 0 0 proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2 proc3 22 2 112 0 2719 111 0 0 0 0 0 0 0 0 0 0 0 27 216 0 2 1 0 proc4 2 2 10853 proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/net/xfrm_stat Lines: 28 XfrmInError 1 XfrmInBufferError 2 XfrmInHdrError 4 XfrmInNoStates 3 XfrmInStateProtoError 40 XfrmInStateModeError 100 XfrmInStateSeqError 6000 XfrmInStateExpired 4 XfrmInStateMismatch 23451 XfrmInStateInvalid 55555 XfrmInTmplMismatch 51 XfrmInNoPols 65432 XfrmInPolBlock 100 XfrmInPolError 10000 XfrmOutError 1000000 XfrmOutBundleGenError 43321 XfrmOutBundleCheckError 555 XfrmOutNoStates 869 XfrmOutStateProtoError 4542 XfrmOutStateModeError 4 XfrmOutStateSeqError 543 XfrmOutStateExpired 565 XfrmOutPolBlock 43456 XfrmOutPolDead 7656 XfrmOutPolError 1454 XfrmFwdHdrError 6654 XfrmOutStateInvalid 28765 XfrmAcquireError 24532 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/self SymlinkTo: 26231 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/stat Lines: 16 cpu 301854 612 111922 8979004 3552 2 3944 0 0 0 cpu0 44490 19 21045 1087069 220 1 3410 0 0 0 cpu1 47869 23 16474 1110787 591 0 46 0 0 0 cpu2 46504 36 15916 1112321 441 0 326 0 0 0 cpu3 47054 102 15683 1113230 533 0 60 0 0 0 cpu4 28413 25 10776 1140321 217 0 8 0 0 0 cpu5 29271 101 11586 1136270 672 0 30 0 0 0 cpu6 29152 36 10276 1139721 319 0 29 0 0 0 cpu7 29098 268 10164 1139282 555 0 31 0 0 0 intr 8885917 17 0 0 0 0 0 0 0 1 79281 0 0 0 0 0 0 0 231237 0 0 0 0 250586 103 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 223424 190745 13 906 1283803 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ctxt 38014093 btime 1418183276 processes 26442 procs_running 2 procs_blocked 1 softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/symlinktargets Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/symlinktargets/README Lines: 2 This directory contains some empty files that are the symlinks the files in the "fd" directory point to. They are otherwise ignored by the tests Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/symlinktargets/abc Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/symlinktargets/def Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/symlinktargets/ghi Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/symlinktargets/uvw Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/symlinktargets/xyz Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - prometheus-process-exporter-0.4.0+ds/vendor/github.com/ncabatoff/procfs/fs.go000066400000000000000000000043371336557546600273770ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package procfs import ( "fmt" "os" "path" "github.com/ncabatoff/procfs/nfs" "github.com/ncabatoff/procfs/xfs" ) // FS represents the pseudo-filesystem proc, which provides an interface to // kernel data structures. type FS string // DefaultMountPoint is the common mount point of the proc filesystem. const DefaultMountPoint = "/proc" // NewFS returns a new FS mounted under the given mountPoint. It will error // if the mount point can't be read. func NewFS(mountPoint string) (FS, error) { info, err := os.Stat(mountPoint) if err != nil { return "", fmt.Errorf("could not read %s: %s", mountPoint, err) } if !info.IsDir() { return "", fmt.Errorf("mount point %s is not a directory", mountPoint) } return FS(mountPoint), nil } // Path returns the path of the given subsystem relative to the procfs root. func (fs FS) Path(p ...string) string { return path.Join(append([]string{string(fs)}, p...)...) } // XFSStats retrieves XFS filesystem runtime statistics. func (fs FS) XFSStats() (*xfs.Stats, error) { f, err := os.Open(fs.Path("fs/xfs/stat")) if err != nil { return nil, err } defer f.Close() return xfs.ParseStats(f) } // NFSClientRPCStats retrieves NFS client RPC statistics. func (fs FS) NFSClientRPCStats() (*nfs.ClientRPCStats, error) { f, err := os.Open(fs.Path("net/rpc/nfs")) if err != nil { return nil, err } defer f.Close() return nfs.ParseClientRPCStats(f) } // NFSdServerRPCStats retrieves NFS daemon RPC statistics. func (fs FS) NFSdServerRPCStats() (*nfs.ServerRPCStats, error) { f, err := os.Open(fs.Path("net/rpc/nfsd")) if err != nil { return nil, err } defer f.Close() return nfs.ParseServerRPCStats(f) } prometheus-process-exporter-0.4.0+ds/vendor/github.com/ncabatoff/procfs/internal/000077500000000000000000000000001336557546600302455ustar00rootroot00000000000000prometheus-process-exporter-0.4.0+ds/vendor/github.com/ncabatoff/procfs/internal/util/000077500000000000000000000000001336557546600312225ustar00rootroot00000000000000prometheus-process-exporter-0.4.0+ds/vendor/github.com/ncabatoff/procfs/internal/util/parse.go000066400000000000000000000023261336557546600326660ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package util import "strconv" // ParseUint32s parses a slice of strings into a slice of uint32s. func ParseUint32s(ss []string) ([]uint32, error) { us := make([]uint32, 0, len(ss)) for _, s := range ss { u, err := strconv.ParseUint(s, 10, 32) if err != nil { return nil, err } us = append(us, uint32(u)) } return us, nil } // ParseUint64s parses a slice of strings into a slice of uint64s. func ParseUint64s(ss []string) ([]uint64, error) { us := make([]uint64, 0, len(ss)) for _, s := range ss { u, err := strconv.ParseUint(s, 10, 64) if err != nil { return nil, err } us = append(us, u) } return us, nil } prometheus-process-exporter-0.4.0+ds/vendor/github.com/ncabatoff/procfs/ipvs.go000066400000000000000000000145131336557546600277450ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package procfs import ( "bufio" "encoding/hex" "errors" "fmt" "io" "io/ioutil" "net" "os" "strconv" "strings" ) // IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`. type IPVSStats struct { // Total count of connections. Connections uint64 // Total incoming packages processed. IncomingPackets uint64 // Total outgoing packages processed. OutgoingPackets uint64 // Total incoming traffic. IncomingBytes uint64 // Total outgoing traffic. OutgoingBytes uint64 } // IPVSBackendStatus holds current metrics of one virtual / real address pair. type IPVSBackendStatus struct { // The local (virtual) IP address. LocalAddress net.IP // The remote (real) IP address. RemoteAddress net.IP // The local (virtual) port. LocalPort uint16 // The remote (real) port. RemotePort uint16 // The local firewall mark LocalMark string // The transport protocol (TCP, UDP). Proto string // The current number of active connections for this virtual/real address pair. ActiveConn uint64 // The current number of inactive connections for this virtual/real address pair. InactConn uint64 // The current weight of this virtual/real address pair. Weight uint64 } // NewIPVSStats reads the IPVS statistics. func NewIPVSStats() (IPVSStats, error) { fs, err := NewFS(DefaultMountPoint) if err != nil { return IPVSStats{}, err } return fs.NewIPVSStats() } // NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem. func (fs FS) NewIPVSStats() (IPVSStats, error) { file, err := os.Open(fs.Path("net/ip_vs_stats")) if err != nil { return IPVSStats{}, err } defer file.Close() return parseIPVSStats(file) } // parseIPVSStats performs the actual parsing of `ip_vs_stats`. func parseIPVSStats(file io.Reader) (IPVSStats, error) { var ( statContent []byte statLines []string statFields []string stats IPVSStats ) statContent, err := ioutil.ReadAll(file) if err != nil { return IPVSStats{}, err } statLines = strings.SplitN(string(statContent), "\n", 4) if len(statLines) != 4 { return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short") } statFields = strings.Fields(statLines[2]) if len(statFields) != 5 { return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields") } stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64) if err != nil { return IPVSStats{}, err } stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64) if err != nil { return IPVSStats{}, err } stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64) if err != nil { return IPVSStats{}, err } stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64) if err != nil { return IPVSStats{}, err } stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64) if err != nil { return IPVSStats{}, err } return stats, nil } // NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs. func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { fs, err := NewFS(DefaultMountPoint) if err != nil { return []IPVSBackendStatus{}, err } return fs.NewIPVSBackendStatus() } // NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem. func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { file, err := os.Open(fs.Path("net/ip_vs")) if err != nil { return nil, err } defer file.Close() return parseIPVSBackendStatus(file) } func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) { var ( status []IPVSBackendStatus scanner = bufio.NewScanner(file) proto string localMark string localAddress net.IP localPort uint16 err error ) for scanner.Scan() { fields := strings.Fields(scanner.Text()) if len(fields) == 0 { continue } switch { case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port": continue case fields[0] == "TCP" || fields[0] == "UDP": if len(fields) < 2 { continue } proto = fields[0] localMark = "" localAddress, localPort, err = parseIPPort(fields[1]) if err != nil { return nil, err } case fields[0] == "FWM": if len(fields) < 2 { continue } proto = fields[0] localMark = fields[1] localAddress = nil localPort = 0 case fields[0] == "->": if len(fields) < 6 { continue } remoteAddress, remotePort, err := parseIPPort(fields[1]) if err != nil { return nil, err } weight, err := strconv.ParseUint(fields[3], 10, 64) if err != nil { return nil, err } activeConn, err := strconv.ParseUint(fields[4], 10, 64) if err != nil { return nil, err } inactConn, err := strconv.ParseUint(fields[5], 10, 64) if err != nil { return nil, err } status = append(status, IPVSBackendStatus{ LocalAddress: localAddress, LocalPort: localPort, LocalMark: localMark, RemoteAddress: remoteAddress, RemotePort: remotePort, Proto: proto, Weight: weight, ActiveConn: activeConn, InactConn: inactConn, }) } } return status, nil } func parseIPPort(s string) (net.IP, uint16, error) { var ( ip net.IP err error ) switch len(s) { case 13: ip, err = hex.DecodeString(s[0:8]) if err != nil { return nil, 0, err } case 46: ip = net.ParseIP(s[1:40]) if ip == nil { return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40]) } default: return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s) } portString := s[len(s)-4:] if len(portString) != 4 { return nil, 0, fmt.Errorf("unexpected port string format: %s", portString) } port, err := strconv.ParseUint(portString, 16, 16) if err != nil { return nil, 0, err } return ip, uint16(port), nil } prometheus-process-exporter-0.4.0+ds/vendor/github.com/ncabatoff/procfs/mdstat.go000066400000000000000000000101551336557546600302560ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package procfs import ( "fmt" "io/ioutil" "regexp" "strconv" "strings" ) var ( statuslineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`) buildlineRE = regexp.MustCompile(`\((\d+)/\d+\)`) ) // MDStat holds info parsed from /proc/mdstat. type MDStat struct { // Name of the device. Name string // activity-state of the device. ActivityState string // Number of active disks. DisksActive int64 // Total number of disks the device consists of. DisksTotal int64 // Number of blocks the device holds. BlocksTotal int64 // Number of blocks on the device that are in sync. BlocksSynced int64 } // ParseMDStat parses an mdstat-file and returns a struct with the relevant infos. func (fs FS) ParseMDStat() (mdstates []MDStat, err error) { mdStatusFilePath := fs.Path("mdstat") content, err := ioutil.ReadFile(mdStatusFilePath) if err != nil { return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) } mdStates := []MDStat{} lines := strings.Split(string(content), "\n") for i, l := range lines { if l == "" { continue } if l[0] == ' ' { continue } if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") { continue } mainLine := strings.Split(l, " ") if len(mainLine) < 3 { return mdStates, fmt.Errorf("error parsing mdline: %s", l) } mdName := mainLine[0] activityState := mainLine[2] if len(lines) <= i+3 { return mdStates, fmt.Errorf( "error parsing %s: too few lines for md device %s", mdStatusFilePath, mdName, ) } active, total, size, err := evalStatusline(lines[i+1]) if err != nil { return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) } // j is the line number of the syncing-line. j := i + 2 if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line j = i + 3 } // If device is syncing at the moment, get the number of currently // synced bytes, otherwise that number equals the size of the device. syncedBlocks := size if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") { syncedBlocks, err = evalBuildline(lines[j]) if err != nil { return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) } } mdStates = append(mdStates, MDStat{ Name: mdName, ActivityState: activityState, DisksActive: active, DisksTotal: total, BlocksTotal: size, BlocksSynced: syncedBlocks, }) } return mdStates, nil } func evalStatusline(statusline string) (active, total, size int64, err error) { matches := statuslineRE.FindStringSubmatch(statusline) if len(matches) != 4 { return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline) } size, err = strconv.ParseInt(matches[1], 10, 64) if err != nil { return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) } total, err = strconv.ParseInt(matches[2], 10, 64) if err != nil { return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) } active, err = strconv.ParseInt(matches[3], 10, 64) if err != nil { return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) } return active, total, size, nil } func evalBuildline(buildline string) (syncedBlocks int64, err error) { matches := buildlineRE.FindStringSubmatch(buildline) if len(matches) != 2 { return 0, fmt.Errorf("unexpected buildline: %s", buildline) } syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) if err != nil { return 0, fmt.Errorf("%s in buildline: %s", err, buildline) } return syncedBlocks, nil } prometheus-process-exporter-0.4.0+ds/vendor/github.com/ncabatoff/procfs/mountstats.go000066400000000000000000000434111336557546600312040ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package procfs // While implementing parsing of /proc/[pid]/mountstats, this blog was used // heavily as a reference: // https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex // // Special thanks to Chris Siebenmann for all of his posts explaining the // various statistics available for NFS. import ( "bufio" "fmt" "io" "strconv" "strings" "time" ) // Constants shared between multiple functions. const ( deviceEntryLen = 8 fieldBytesLen = 8 fieldEventsLen = 27 statVersion10 = "1.0" statVersion11 = "1.1" fieldTransport10TCPLen = 10 fieldTransport10UDPLen = 7 fieldTransport11TCPLen = 13 fieldTransport11UDPLen = 10 ) // A Mount is a device mount parsed from /proc/[pid]/mountstats. type Mount struct { // Name of the device. Device string // The mount point of the device. Mount string // The filesystem type used by the device. Type string // If available additional statistics related to this Mount. // Use a type assertion to determine if additional statistics are available. Stats MountStats } // A MountStats is a type which contains detailed statistics for a specific // type of Mount. type MountStats interface { mountStats() } // A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts. type MountStatsNFS struct { // The version of statistics provided. StatVersion string // The age of the NFS mount. Age time.Duration // Statistics related to byte counters for various operations. Bytes NFSBytesStats // Statistics related to various NFS event occurrences. Events NFSEventsStats // Statistics broken down by filesystem operation. Operations []NFSOperationStats // Statistics about the NFS RPC transport. Transport NFSTransportStats } // mountStats implements MountStats. func (m MountStatsNFS) mountStats() {} // A NFSBytesStats contains statistics about the number of bytes read and written // by an NFS client to and from an NFS server. type NFSBytesStats struct { // Number of bytes read using the read() syscall. Read uint64 // Number of bytes written using the write() syscall. Write uint64 // Number of bytes read using the read() syscall in O_DIRECT mode. DirectRead uint64 // Number of bytes written using the write() syscall in O_DIRECT mode. DirectWrite uint64 // Number of bytes read from the NFS server, in total. ReadTotal uint64 // Number of bytes written to the NFS server, in total. WriteTotal uint64 // Number of pages read directly via mmap()'d files. ReadPages uint64 // Number of pages written directly via mmap()'d files. WritePages uint64 } // A NFSEventsStats contains statistics about NFS event occurrences. type NFSEventsStats struct { // Number of times cached inode attributes are re-validated from the server. InodeRevalidate uint64 // Number of times cached dentry nodes are re-validated from the server. DnodeRevalidate uint64 // Number of times an inode cache is cleared. DataInvalidate uint64 // Number of times cached inode attributes are invalidated. AttributeInvalidate uint64 // Number of times files or directories have been open()'d. VFSOpen uint64 // Number of times a directory lookup has occurred. VFSLookup uint64 // Number of times permissions have been checked. VFSAccess uint64 // Number of updates (and potential writes) to pages. VFSUpdatePage uint64 // Number of pages read directly via mmap()'d files. VFSReadPage uint64 // Number of times a group of pages have been read. VFSReadPages uint64 // Number of pages written directly via mmap()'d files. VFSWritePage uint64 // Number of times a group of pages have been written. VFSWritePages uint64 // Number of times directory entries have been read with getdents(). VFSGetdents uint64 // Number of times attributes have been set on inodes. VFSSetattr uint64 // Number of pending writes that have been forcefully flushed to the server. VFSFlush uint64 // Number of times fsync() has been called on directories and files. VFSFsync uint64 // Number of times locking has been attempted on a file. VFSLock uint64 // Number of times files have been closed and released. VFSFileRelease uint64 // Unknown. Possibly unused. CongestionWait uint64 // Number of times files have been truncated. Truncation uint64 // Number of times a file has been grown due to writes beyond its existing end. WriteExtension uint64 // Number of times a file was removed while still open by another process. SillyRename uint64 // Number of times the NFS server gave less data than expected while reading. ShortRead uint64 // Number of times the NFS server wrote less data than expected while writing. ShortWrite uint64 // Number of times the NFS server indicated EJUKEBOX; retrieving data from // offline storage. JukeboxDelay uint64 // Number of NFS v4.1+ pNFS reads. PNFSRead uint64 // Number of NFS v4.1+ pNFS writes. PNFSWrite uint64 } // A NFSOperationStats contains statistics for a single operation. type NFSOperationStats struct { // The name of the operation. Operation string // Number of requests performed for this operation. Requests uint64 // Number of times an actual RPC request has been transmitted for this operation. Transmissions uint64 // Number of times a request has had a major timeout. MajorTimeouts uint64 // Number of bytes sent for this operation, including RPC headers and payload. BytesSent uint64 // Number of bytes received for this operation, including RPC headers and payload. BytesReceived uint64 // Duration all requests spent queued for transmission before they were sent. CumulativeQueueTime time.Duration // Duration it took to get a reply back after the request was transmitted. CumulativeTotalResponseTime time.Duration // Duration from when a request was enqueued to when it was completely handled. CumulativeTotalRequestTime time.Duration } // A NFSTransportStats contains statistics for the NFS mount RPC requests and // responses. type NFSTransportStats struct { // The transport protocol used for the NFS mount. Protocol string // The local port used for the NFS mount. Port uint64 // Number of times the client has had to establish a connection from scratch // to the NFS server. Bind uint64 // Number of times the client has made a TCP connection to the NFS server. Connect uint64 // Duration (in jiffies, a kernel internal unit of time) the NFS mount has // spent waiting for connections to the server to be established. ConnectIdleTime uint64 // Duration since the NFS mount last saw any RPC traffic. IdleTime time.Duration // Number of RPC requests for this mount sent to the NFS server. Sends uint64 // Number of RPC responses for this mount received from the NFS server. Receives uint64 // Number of times the NFS server sent a response with a transaction ID // unknown to this client. BadTransactionIDs uint64 // A running counter, incremented on each request as the current difference // ebetween sends and receives. CumulativeActiveRequests uint64 // A running counter, incremented on each request by the current backlog // queue size. CumulativeBacklog uint64 // Stats below only available with stat version 1.1. // Maximum number of simultaneously active RPC requests ever used. MaximumRPCSlotsUsed uint64 // A running counter, incremented on each request as the current size of the // sending queue. CumulativeSendingQueue uint64 // A running counter, incremented on each request as the current size of the // pending queue. CumulativePendingQueue uint64 } // parseMountStats parses a /proc/[pid]/mountstats file and returns a slice // of Mount structures containing detailed information about each mount. // If available, statistics for each mount are parsed as well. func parseMountStats(r io.Reader) ([]*Mount, error) { const ( device = "device" statVersionPrefix = "statvers=" nfs3Type = "nfs" nfs4Type = "nfs4" ) var mounts []*Mount s := bufio.NewScanner(r) for s.Scan() { // Only look for device entries in this function ss := strings.Fields(string(s.Bytes())) if len(ss) == 0 || ss[0] != device { continue } m, err := parseMount(ss) if err != nil { return nil, err } // Does this mount also possess statistics information? if len(ss) > deviceEntryLen { // Only NFSv3 and v4 are supported for parsing statistics if m.Type != nfs3Type && m.Type != nfs4Type { return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type) } statVersion := strings.TrimPrefix(ss[8], statVersionPrefix) stats, err := parseMountStatsNFS(s, statVersion) if err != nil { return nil, err } m.Stats = stats } mounts = append(mounts, m) } return mounts, s.Err() } // parseMount parses an entry in /proc/[pid]/mountstats in the format: // device [device] mounted on [mount] with fstype [type] func parseMount(ss []string) (*Mount, error) { if len(ss) < deviceEntryLen { return nil, fmt.Errorf("invalid device entry: %v", ss) } // Check for specific words appearing at specific indices to ensure // the format is consistent with what we expect format := []struct { i int s string }{ {i: 0, s: "device"}, {i: 2, s: "mounted"}, {i: 3, s: "on"}, {i: 5, s: "with"}, {i: 6, s: "fstype"}, } for _, f := range format { if ss[f.i] != f.s { return nil, fmt.Errorf("invalid device entry: %v", ss) } } return &Mount{ Device: ss[1], Mount: ss[4], Type: ss[7], }, nil } // parseMountStatsNFS parses a MountStatsNFS by scanning additional information // related to NFS statistics. func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) { // Field indicators for parsing specific types of data const ( fieldAge = "age:" fieldBytes = "bytes:" fieldEvents = "events:" fieldPerOpStats = "per-op" fieldTransport = "xprt:" ) stats := &MountStatsNFS{ StatVersion: statVersion, } for s.Scan() { ss := strings.Fields(string(s.Bytes())) if len(ss) == 0 { break } if len(ss) < 2 { return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) } switch ss[0] { case fieldAge: // Age integer is in seconds d, err := time.ParseDuration(ss[1] + "s") if err != nil { return nil, err } stats.Age = d case fieldBytes: bstats, err := parseNFSBytesStats(ss[1:]) if err != nil { return nil, err } stats.Bytes = *bstats case fieldEvents: estats, err := parseNFSEventsStats(ss[1:]) if err != nil { return nil, err } stats.Events = *estats case fieldTransport: if len(ss) < 3 { return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss) } tstats, err := parseNFSTransportStats(ss[1:], statVersion) if err != nil { return nil, err } stats.Transport = *tstats } // When encountering "per-operation statistics", we must break this // loop and parse them separately to ensure we can terminate parsing // before reaching another device entry; hence why this 'if' statement // is not just another switch case if ss[0] == fieldPerOpStats { break } } if err := s.Err(); err != nil { return nil, err } // NFS per-operation stats appear last before the next device entry perOpStats, err := parseNFSOperationStats(s) if err != nil { return nil, err } stats.Operations = perOpStats return stats, nil } // parseNFSBytesStats parses a NFSBytesStats line using an input set of // integer fields. func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) { if len(ss) != fieldBytesLen { return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss) } ns := make([]uint64, 0, fieldBytesLen) for _, s := range ss { n, err := strconv.ParseUint(s, 10, 64) if err != nil { return nil, err } ns = append(ns, n) } return &NFSBytesStats{ Read: ns[0], Write: ns[1], DirectRead: ns[2], DirectWrite: ns[3], ReadTotal: ns[4], WriteTotal: ns[5], ReadPages: ns[6], WritePages: ns[7], }, nil } // parseNFSEventsStats parses a NFSEventsStats line using an input set of // integer fields. func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) { if len(ss) != fieldEventsLen { return nil, fmt.Errorf("invalid NFS events stats: %v", ss) } ns := make([]uint64, 0, fieldEventsLen) for _, s := range ss { n, err := strconv.ParseUint(s, 10, 64) if err != nil { return nil, err } ns = append(ns, n) } return &NFSEventsStats{ InodeRevalidate: ns[0], DnodeRevalidate: ns[1], DataInvalidate: ns[2], AttributeInvalidate: ns[3], VFSOpen: ns[4], VFSLookup: ns[5], VFSAccess: ns[6], VFSUpdatePage: ns[7], VFSReadPage: ns[8], VFSReadPages: ns[9], VFSWritePage: ns[10], VFSWritePages: ns[11], VFSGetdents: ns[12], VFSSetattr: ns[13], VFSFlush: ns[14], VFSFsync: ns[15], VFSLock: ns[16], VFSFileRelease: ns[17], CongestionWait: ns[18], Truncation: ns[19], WriteExtension: ns[20], SillyRename: ns[21], ShortRead: ns[22], ShortWrite: ns[23], JukeboxDelay: ns[24], PNFSRead: ns[25], PNFSWrite: ns[26], }, nil } // parseNFSOperationStats parses a slice of NFSOperationStats by scanning // additional information about per-operation statistics until an empty // line is reached. func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { const ( // Number of expected fields in each per-operation statistics set numFields = 9 ) var ops []NFSOperationStats for s.Scan() { ss := strings.Fields(string(s.Bytes())) if len(ss) == 0 { // Must break when reading a blank line after per-operation stats to // enable top-level function to parse the next device entry break } if len(ss) != numFields { return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss) } // Skip string operation name for integers ns := make([]uint64, 0, numFields-1) for _, st := range ss[1:] { n, err := strconv.ParseUint(st, 10, 64) if err != nil { return nil, err } ns = append(ns, n) } ops = append(ops, NFSOperationStats{ Operation: strings.TrimSuffix(ss[0], ":"), Requests: ns[0], Transmissions: ns[1], MajorTimeouts: ns[2], BytesSent: ns[3], BytesReceived: ns[4], CumulativeQueueTime: time.Duration(ns[5]) * time.Millisecond, CumulativeTotalResponseTime: time.Duration(ns[6]) * time.Millisecond, CumulativeTotalRequestTime: time.Duration(ns[7]) * time.Millisecond, }) } return ops, s.Err() } // parseNFSTransportStats parses a NFSTransportStats line using an input set of // integer fields matched to a specific stats version. func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) { // Extract the protocol field. It is the only string value in the line protocol := ss[0] ss = ss[1:] switch statVersion { case statVersion10: var expectedLength int if protocol == "tcp" { expectedLength = fieldTransport10TCPLen } else if protocol == "udp" { expectedLength = fieldTransport10UDPLen } else { return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.0 statement: %v", protocol, ss) } if len(ss) != expectedLength { return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss) } case statVersion11: var expectedLength int if protocol == "tcp" { expectedLength = fieldTransport11TCPLen } else if protocol == "udp" { expectedLength = fieldTransport11UDPLen } else { return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.1 statement: %v", protocol, ss) } if len(ss) != expectedLength { return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss) } default: return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion) } // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay // in a v1.0 response. Since the stat length is bigger for TCP stats, we use // the TCP length here. // // Note: slice length must be set to length of v1.1 stats to avoid a panic when // only v1.0 stats are present. // See: https://github.com/prometheus/node_exporter/issues/571. ns := make([]uint64, fieldTransport11TCPLen) for i, s := range ss { n, err := strconv.ParseUint(s, 10, 64) if err != nil { return nil, err } ns[i] = n } // The fields differ depending on the transport protocol (TCP or UDP) // From https://utcc.utoronto.ca/%7Ecks/space/blog/linux/NFSMountstatsXprt // // For the udp RPC transport there is no connection count, connect idle time, // or idle time (fields #3, #4, and #5); all other fields are the same. So // we set them to 0 here. if protocol == "udp" { ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...) } return &NFSTransportStats{ Protocol: protocol, Port: ns[0], Bind: ns[1], Connect: ns[2], ConnectIdleTime: ns[3], IdleTime: time.Duration(ns[4]) * time.Second, Sends: ns[5], Receives: ns[6], BadTransactionIDs: ns[7], CumulativeActiveRequests: ns[8], CumulativeBacklog: ns[9], MaximumRPCSlotsUsed: ns[10], CumulativeSendingQueue: ns[11], CumulativePendingQueue: ns[12], }, nil } prometheus-process-exporter-0.4.0+ds/vendor/github.com/ncabatoff/procfs/net_dev.go000066400000000000000000000150751336557546600304140ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package procfs import ( "bufio" "errors" "os" "sort" "strconv" "strings" ) // NetDevLine is single line parsed from /proc/net/dev or /proc/[pid]/net/dev. type NetDevLine struct { Name string `json:"name"` // The name of the interface. RxBytes uint64 `json:"rx_bytes"` // Cumulative count of bytes received. RxPackets uint64 `json:"rx_packets"` // Cumulative count of packets received. RxErrors uint64 `json:"rx_errors"` // Cumulative count of receive errors encountered. RxDropped uint64 `json:"rx_dropped"` // Cumulative count of packets dropped while receiving. RxFIFO uint64 `json:"rx_fifo"` // Cumulative count of FIFO buffer errors. RxFrame uint64 `json:"rx_frame"` // Cumulative count of packet framing errors. RxCompressed uint64 `json:"rx_compressed"` // Cumulative count of compressed packets received by the device driver. RxMulticast uint64 `json:"rx_multicast"` // Cumulative count of multicast frames received by the device driver. TxBytes uint64 `json:"tx_bytes"` // Cumulative count of bytes transmitted. TxPackets uint64 `json:"tx_packets"` // Cumulative count of packets transmitted. TxErrors uint64 `json:"tx_errors"` // Cumulative count of transmit errors encountered. TxDropped uint64 `json:"tx_dropped"` // Cumulative count of packets dropped while transmitting. TxFIFO uint64 `json:"tx_fifo"` // Cumulative count of FIFO buffer errors. TxCollisions uint64 `json:"tx_collisions"` // Cumulative count of collisions detected on the interface. TxCarrier uint64 `json:"tx_carrier"` // Cumulative count of carrier losses detected by the device driver. TxCompressed uint64 `json:"tx_compressed"` // Cumulative count of compressed packets transmitted by the device driver. } // NetDev is parsed from /proc/net/dev or /proc/[pid]/net/dev. The map keys // are interface names. type NetDev map[string]NetDevLine // NewNetDev returns kernel/system statistics read from /proc/net/dev. func NewNetDev() (NetDev, error) { fs, err := NewFS(DefaultMountPoint) if err != nil { return nil, err } return fs.NewNetDev() } // NewNetDev returns kernel/system statistics read from /proc/net/dev. func (fs FS) NewNetDev() (NetDev, error) { return newNetDev(fs.Path("net/dev")) } // NewNetDev returns kernel/system statistics read from /proc/[pid]/net/dev. func (p Proc) NewNetDev() (NetDev, error) { return newNetDev(p.path("net/dev")) } // newNetDev creates a new NetDev from the contents of the given file. func newNetDev(file string) (NetDev, error) { f, err := os.Open(file) if err != nil { return NetDev{}, err } defer f.Close() nd := NetDev{} s := bufio.NewScanner(f) for n := 0; s.Scan(); n++ { // Skip the 2 header lines. if n < 2 { continue } line, err := nd.parseLine(s.Text()) if err != nil { return nd, err } nd[line.Name] = *line } return nd, s.Err() } // parseLine parses a single line from the /proc/net/dev file. Header lines // must be filtered prior to calling this method. func (nd NetDev) parseLine(rawLine string) (*NetDevLine, error) { parts := strings.SplitN(rawLine, ":", 2) if len(parts) != 2 { return nil, errors.New("invalid net/dev line, missing colon") } fields := strings.Fields(strings.TrimSpace(parts[1])) var err error line := &NetDevLine{} // Interface Name line.Name = strings.TrimSpace(parts[0]) if line.Name == "" { return nil, errors.New("invalid net/dev line, empty interface name") } // RX line.RxBytes, err = strconv.ParseUint(fields[0], 10, 64) if err != nil { return nil, err } line.RxPackets, err = strconv.ParseUint(fields[1], 10, 64) if err != nil { return nil, err } line.RxErrors, err = strconv.ParseUint(fields[2], 10, 64) if err != nil { return nil, err } line.RxDropped, err = strconv.ParseUint(fields[3], 10, 64) if err != nil { return nil, err } line.RxFIFO, err = strconv.ParseUint(fields[4], 10, 64) if err != nil { return nil, err } line.RxFrame, err = strconv.ParseUint(fields[5], 10, 64) if err != nil { return nil, err } line.RxCompressed, err = strconv.ParseUint(fields[6], 10, 64) if err != nil { return nil, err } line.RxMulticast, err = strconv.ParseUint(fields[7], 10, 64) if err != nil { return nil, err } // TX line.TxBytes, err = strconv.ParseUint(fields[8], 10, 64) if err != nil { return nil, err } line.TxPackets, err = strconv.ParseUint(fields[9], 10, 64) if err != nil { return nil, err } line.TxErrors, err = strconv.ParseUint(fields[10], 10, 64) if err != nil { return nil, err } line.TxDropped, err = strconv.ParseUint(fields[11], 10, 64) if err != nil { return nil, err } line.TxFIFO, err = strconv.ParseUint(fields[12], 10, 64) if err != nil { return nil, err } line.TxCollisions, err = strconv.ParseUint(fields[13], 10, 64) if err != nil { return nil, err } line.TxCarrier, err = strconv.ParseUint(fields[14], 10, 64) if err != nil { return nil, err } line.TxCompressed, err = strconv.ParseUint(fields[15], 10, 64) if err != nil { return nil, err } return line, nil } // Total aggregates the values across interfaces and returns a new NetDevLine. // The Name field will be a sorted comma separated list of interface names. func (nd NetDev) Total() NetDevLine { total := NetDevLine{} names := make([]string, 0, len(nd)) for _, ifc := range nd { names = append(names, ifc.Name) total.RxBytes += ifc.RxBytes total.RxPackets += ifc.RxPackets total.RxPackets += ifc.RxPackets total.RxErrors += ifc.RxErrors total.RxDropped += ifc.RxDropped total.RxFIFO += ifc.RxFIFO total.RxFrame += ifc.RxFrame total.RxCompressed += ifc.RxCompressed total.RxMulticast += ifc.RxMulticast total.TxBytes += ifc.TxBytes total.TxPackets += ifc.TxPackets total.TxErrors += ifc.TxErrors total.TxDropped += ifc.TxDropped total.TxFIFO += ifc.TxFIFO total.TxCollisions += ifc.TxCollisions total.TxCarrier += ifc.TxCarrier total.TxCompressed += ifc.TxCompressed } sort.Strings(names) total.Name = strings.Join(names, ", ") return total } prometheus-process-exporter-0.4.0+ds/vendor/github.com/ncabatoff/procfs/nfs/000077500000000000000000000000001336557546600272175ustar00rootroot00000000000000prometheus-process-exporter-0.4.0+ds/vendor/github.com/ncabatoff/procfs/nfs/nfs.go000066400000000000000000000141731336557546600303420ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package nfs implements parsing of /proc/net/rpc/nfsd. // Fields are documented in https://www.svennd.be/nfsd-stats-explained-procnetrpcnfsd/ package nfs // ReplyCache models the "rc" line. type ReplyCache struct { Hits uint64 Misses uint64 NoCache uint64 } // FileHandles models the "fh" line. type FileHandles struct { Stale uint64 TotalLookups uint64 AnonLookups uint64 DirNoCache uint64 NoDirNoCache uint64 } // InputOutput models the "io" line. type InputOutput struct { Read uint64 Write uint64 } // Threads models the "th" line. type Threads struct { Threads uint64 FullCnt uint64 } // ReadAheadCache models the "ra" line. type ReadAheadCache struct { CacheSize uint64 CacheHistogram []uint64 NotFound uint64 } // Network models the "net" line. type Network struct { NetCount uint64 UDPCount uint64 TCPCount uint64 TCPConnect uint64 } // ClientRPC models the nfs "rpc" line. type ClientRPC struct { RPCCount uint64 Retransmissions uint64 AuthRefreshes uint64 } // ServerRPC models the nfsd "rpc" line. type ServerRPC struct { RPCCount uint64 BadCnt uint64 BadFmt uint64 BadAuth uint64 BadcInt uint64 } // V2Stats models the "proc2" line. type V2Stats struct { Null uint64 GetAttr uint64 SetAttr uint64 Root uint64 Lookup uint64 ReadLink uint64 Read uint64 WrCache uint64 Write uint64 Create uint64 Remove uint64 Rename uint64 Link uint64 SymLink uint64 MkDir uint64 RmDir uint64 ReadDir uint64 FsStat uint64 } // V3Stats models the "proc3" line. type V3Stats struct { Null uint64 GetAttr uint64 SetAttr uint64 Lookup uint64 Access uint64 ReadLink uint64 Read uint64 Write uint64 Create uint64 MkDir uint64 SymLink uint64 MkNod uint64 Remove uint64 RmDir uint64 Rename uint64 Link uint64 ReadDir uint64 ReadDirPlus uint64 FsStat uint64 FsInfo uint64 PathConf uint64 Commit uint64 } // ClientV4Stats models the nfs "proc4" line. type ClientV4Stats struct { Null uint64 Read uint64 Write uint64 Commit uint64 Open uint64 OpenConfirm uint64 OpenNoattr uint64 OpenDowngrade uint64 Close uint64 Setattr uint64 FsInfo uint64 Renew uint64 SetClientID uint64 SetClientIDConfirm uint64 Lock uint64 Lockt uint64 Locku uint64 Access uint64 Getattr uint64 Lookup uint64 LookupRoot uint64 Remove uint64 Rename uint64 Link uint64 Symlink uint64 Create uint64 Pathconf uint64 StatFs uint64 ReadLink uint64 ReadDir uint64 ServerCaps uint64 DelegReturn uint64 GetACL uint64 SetACL uint64 FsLocations uint64 ReleaseLockowner uint64 Secinfo uint64 FsidPresent uint64 ExchangeID uint64 CreateSession uint64 DestroySession uint64 Sequence uint64 GetLeaseTime uint64 ReclaimComplete uint64 LayoutGet uint64 GetDeviceInfo uint64 LayoutCommit uint64 LayoutReturn uint64 SecinfoNoName uint64 TestStateID uint64 FreeStateID uint64 GetDeviceList uint64 BindConnToSession uint64 DestroyClientID uint64 Seek uint64 Allocate uint64 DeAllocate uint64 LayoutStats uint64 Clone uint64 } // ServerV4Stats models the nfsd "proc4" line. type ServerV4Stats struct { Null uint64 Compound uint64 } // V4Ops models the "proc4ops" line: NFSv4 operations // Variable list, see: // v4.0 https://tools.ietf.org/html/rfc3010 (38 operations) // v4.1 https://tools.ietf.org/html/rfc5661 (58 operations) // v4.2 https://tools.ietf.org/html/draft-ietf-nfsv4-minorversion2-41 (71 operations) type V4Ops struct { //Values uint64 // Variable depending on v4.x sub-version. TODO: Will this always at least include the fields in this struct? Op0Unused uint64 Op1Unused uint64 Op2Future uint64 Access uint64 Close uint64 Commit uint64 Create uint64 DelegPurge uint64 DelegReturn uint64 GetAttr uint64 GetFH uint64 Link uint64 Lock uint64 Lockt uint64 Locku uint64 Lookup uint64 LookupRoot uint64 Nverify uint64 Open uint64 OpenAttr uint64 OpenConfirm uint64 OpenDgrd uint64 PutFH uint64 PutPubFH uint64 PutRootFH uint64 Read uint64 ReadDir uint64 ReadLink uint64 Remove uint64 Rename uint64 Renew uint64 RestoreFH uint64 SaveFH uint64 SecInfo uint64 SetAttr uint64 Verify uint64 Write uint64 RelLockOwner uint64 } // ClientRPCStats models all stats from /proc/net/rpc/nfs. type ClientRPCStats struct { Network Network ClientRPC ClientRPC V2Stats V2Stats V3Stats V3Stats ClientV4Stats ClientV4Stats } // ServerRPCStats models all stats from /proc/net/rpc/nfsd. type ServerRPCStats struct { ReplyCache ReplyCache FileHandles FileHandles InputOutput InputOutput Threads Threads ReadAheadCache ReadAheadCache Network Network ServerRPC ServerRPC V2Stats V2Stats V3Stats V3Stats ServerV4Stats ServerV4Stats V4Ops V4Ops } prometheus-process-exporter-0.4.0+ds/vendor/github.com/ncabatoff/procfs/nfs/parse.go000066400000000000000000000162311336557546600306630ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package nfs import ( "fmt" ) func parseReplyCache(v []uint64) (ReplyCache, error) { if len(v) != 3 { return ReplyCache{}, fmt.Errorf("invalid ReplyCache line %q", v) } return ReplyCache{ Hits: v[0], Misses: v[1], NoCache: v[2], }, nil } func parseFileHandles(v []uint64) (FileHandles, error) { if len(v) != 5 { return FileHandles{}, fmt.Errorf("invalid FileHandles, line %q", v) } return FileHandles{ Stale: v[0], TotalLookups: v[1], AnonLookups: v[2], DirNoCache: v[3], NoDirNoCache: v[4], }, nil } func parseInputOutput(v []uint64) (InputOutput, error) { if len(v) != 2 { return InputOutput{}, fmt.Errorf("invalid InputOutput line %q", v) } return InputOutput{ Read: v[0], Write: v[1], }, nil } func parseThreads(v []uint64) (Threads, error) { if len(v) != 2 { return Threads{}, fmt.Errorf("invalid Threads line %q", v) } return Threads{ Threads: v[0], FullCnt: v[1], }, nil } func parseReadAheadCache(v []uint64) (ReadAheadCache, error) { if len(v) != 12 { return ReadAheadCache{}, fmt.Errorf("invalid ReadAheadCache line %q", v) } return ReadAheadCache{ CacheSize: v[0], CacheHistogram: v[1:11], NotFound: v[11], }, nil } func parseNetwork(v []uint64) (Network, error) { if len(v) != 4 { return Network{}, fmt.Errorf("invalid Network line %q", v) } return Network{ NetCount: v[0], UDPCount: v[1], TCPCount: v[2], TCPConnect: v[3], }, nil } func parseServerRPC(v []uint64) (ServerRPC, error) { if len(v) != 5 { return ServerRPC{}, fmt.Errorf("invalid RPC line %q", v) } return ServerRPC{ RPCCount: v[0], BadCnt: v[1], BadFmt: v[2], BadAuth: v[3], BadcInt: v[4], }, nil } func parseClientRPC(v []uint64) (ClientRPC, error) { if len(v) != 3 { return ClientRPC{}, fmt.Errorf("invalid RPC line %q", v) } return ClientRPC{ RPCCount: v[0], Retransmissions: v[1], AuthRefreshes: v[2], }, nil } func parseV2Stats(v []uint64) (V2Stats, error) { values := int(v[0]) if len(v[1:]) != values || values != 18 { return V2Stats{}, fmt.Errorf("invalid V2Stats line %q", v) } return V2Stats{ Null: v[1], GetAttr: v[2], SetAttr: v[3], Root: v[4], Lookup: v[5], ReadLink: v[6], Read: v[7], WrCache: v[8], Write: v[9], Create: v[10], Remove: v[11], Rename: v[12], Link: v[13], SymLink: v[14], MkDir: v[15], RmDir: v[16], ReadDir: v[17], FsStat: v[18], }, nil } func parseV3Stats(v []uint64) (V3Stats, error) { values := int(v[0]) if len(v[1:]) != values || values != 22 { return V3Stats{}, fmt.Errorf("invalid V3Stats line %q", v) } return V3Stats{ Null: v[1], GetAttr: v[2], SetAttr: v[3], Lookup: v[4], Access: v[5], ReadLink: v[6], Read: v[7], Write: v[8], Create: v[9], MkDir: v[10], SymLink: v[11], MkNod: v[12], Remove: v[13], RmDir: v[14], Rename: v[15], Link: v[16], ReadDir: v[17], ReadDirPlus: v[18], FsStat: v[19], FsInfo: v[20], PathConf: v[21], Commit: v[22], }, nil } func parseClientV4Stats(v []uint64) (ClientV4Stats, error) { values := int(v[0]) if len(v[1:]) != values { return ClientV4Stats{}, fmt.Errorf("invalid ClientV4Stats line %q", v) } // This function currently supports mapping 59 NFS v4 client stats. Older // kernels may emit fewer stats, so we must detect this and pad out the // values to match the expected slice size. if values < 59 { newValues := make([]uint64, 60) copy(newValues, v) v = newValues } return ClientV4Stats{ Null: v[1], Read: v[2], Write: v[3], Commit: v[4], Open: v[5], OpenConfirm: v[6], OpenNoattr: v[7], OpenDowngrade: v[8], Close: v[9], Setattr: v[10], FsInfo: v[11], Renew: v[12], SetClientID: v[13], SetClientIDConfirm: v[14], Lock: v[15], Lockt: v[16], Locku: v[17], Access: v[18], Getattr: v[19], Lookup: v[20], LookupRoot: v[21], Remove: v[22], Rename: v[23], Link: v[24], Symlink: v[25], Create: v[26], Pathconf: v[27], StatFs: v[28], ReadLink: v[29], ReadDir: v[30], ServerCaps: v[31], DelegReturn: v[32], GetACL: v[33], SetACL: v[34], FsLocations: v[35], ReleaseLockowner: v[36], Secinfo: v[37], FsidPresent: v[38], ExchangeID: v[39], CreateSession: v[40], DestroySession: v[41], Sequence: v[42], GetLeaseTime: v[43], ReclaimComplete: v[44], LayoutGet: v[45], GetDeviceInfo: v[46], LayoutCommit: v[47], LayoutReturn: v[48], SecinfoNoName: v[49], TestStateID: v[50], FreeStateID: v[51], GetDeviceList: v[52], BindConnToSession: v[53], DestroyClientID: v[54], Seek: v[55], Allocate: v[56], DeAllocate: v[57], LayoutStats: v[58], Clone: v[59], }, nil } func parseServerV4Stats(v []uint64) (ServerV4Stats, error) { values := int(v[0]) if len(v[1:]) != values || values != 2 { return ServerV4Stats{}, fmt.Errorf("invalid V4Stats line %q", v) } return ServerV4Stats{ Null: v[1], Compound: v[2], }, nil } func parseV4Ops(v []uint64) (V4Ops, error) { values := int(v[0]) if len(v[1:]) != values || values < 39 { return V4Ops{}, fmt.Errorf("invalid V4Ops line %q", v) } stats := V4Ops{ Op0Unused: v[1], Op1Unused: v[2], Op2Future: v[3], Access: v[4], Close: v[5], Commit: v[6], Create: v[7], DelegPurge: v[8], DelegReturn: v[9], GetAttr: v[10], GetFH: v[11], Link: v[12], Lock: v[13], Lockt: v[14], Locku: v[15], Lookup: v[16], LookupRoot: v[17], Nverify: v[18], Open: v[19], OpenAttr: v[20], OpenConfirm: v[21], OpenDgrd: v[22], PutFH: v[23], PutPubFH: v[24], PutRootFH: v[25], Read: v[26], ReadDir: v[27], ReadLink: v[28], Remove: v[29], Rename: v[30], Renew: v[31], RestoreFH: v[32], SaveFH: v[33], SecInfo: v[34], SetAttr: v[35], Verify: v[36], Write: v[37], RelLockOwner: v[38], } return stats, nil } prometheus-process-exporter-0.4.0+ds/vendor/github.com/ncabatoff/procfs/nfs/parse_nfs.go000066400000000000000000000035611336557546600315330ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package nfs import ( "bufio" "fmt" "io" "strings" "github.com/ncabatoff/procfs/internal/util" ) // ParseClientRPCStats returns stats read from /proc/net/rpc/nfs func ParseClientRPCStats(r io.Reader) (*ClientRPCStats, error) { stats := &ClientRPCStats{} scanner := bufio.NewScanner(r) for scanner.Scan() { line := scanner.Text() parts := strings.Fields(scanner.Text()) // require at least if len(parts) < 2 { return nil, fmt.Errorf("invalid NFS metric line %q", line) } values, err := util.ParseUint64s(parts[1:]) if err != nil { return nil, fmt.Errorf("error parsing NFS metric line: %s", err) } switch metricLine := parts[0]; metricLine { case "net": stats.Network, err = parseNetwork(values) case "rpc": stats.ClientRPC, err = parseClientRPC(values) case "proc2": stats.V2Stats, err = parseV2Stats(values) case "proc3": stats.V3Stats, err = parseV3Stats(values) case "proc4": stats.ClientV4Stats, err = parseClientV4Stats(values) default: return nil, fmt.Errorf("unknown NFS metric line %q", metricLine) } if err != nil { return nil, fmt.Errorf("errors parsing NFS metric line: %s", err) } } if err := scanner.Err(); err != nil { return nil, fmt.Errorf("error scanning NFS file: %s", err) } return stats, nil } prometheus-process-exporter-0.4.0+ds/vendor/github.com/ncabatoff/procfs/nfs/parse_nfsd.go000066400000000000000000000047451336557546600317040ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package nfs import ( "bufio" "fmt" "io" "strings" "github.com/ncabatoff/procfs/internal/util" ) // ParseServerRPCStats returns stats read from /proc/net/rpc/nfsd func ParseServerRPCStats(r io.Reader) (*ServerRPCStats, error) { stats := &ServerRPCStats{} scanner := bufio.NewScanner(r) for scanner.Scan() { line := scanner.Text() parts := strings.Fields(scanner.Text()) // require at least if len(parts) < 2 { return nil, fmt.Errorf("invalid NFSd metric line %q", line) } label := parts[0] var values []uint64 var err error if label == "th" { if len(parts) < 3 { return nil, fmt.Errorf("invalid NFSd th metric line %q", line) } values, err = util.ParseUint64s(parts[1:3]) } else { values, err = util.ParseUint64s(parts[1:]) } if err != nil { return nil, fmt.Errorf("error parsing NFSd metric line: %s", err) } switch metricLine := parts[0]; metricLine { case "rc": stats.ReplyCache, err = parseReplyCache(values) case "fh": stats.FileHandles, err = parseFileHandles(values) case "io": stats.InputOutput, err = parseInputOutput(values) case "th": stats.Threads, err = parseThreads(values) case "ra": stats.ReadAheadCache, err = parseReadAheadCache(values) case "net": stats.Network, err = parseNetwork(values) case "rpc": stats.ServerRPC, err = parseServerRPC(values) case "proc2": stats.V2Stats, err = parseV2Stats(values) case "proc3": stats.V3Stats, err = parseV3Stats(values) case "proc4": stats.ServerV4Stats, err = parseServerV4Stats(values) case "proc4ops": stats.V4Ops, err = parseV4Ops(values) default: return nil, fmt.Errorf("unknown NFSd metric line %q", metricLine) } if err != nil { return nil, fmt.Errorf("errors parsing NFSd metric line: %s", err) } } if err := scanner.Err(); err != nil { return nil, fmt.Errorf("error scanning NFSd file: %s", err) } return stats, nil } prometheus-process-exporter-0.4.0+ds/vendor/github.com/ncabatoff/procfs/proc.go000066400000000000000000000132741336557546600277320ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package procfs import ( "bytes" "fmt" "io/ioutil" "os" "strconv" "strings" ) // Proc provides information about a running process. type Proc struct { // The process ID. PID int fs FS } // Procs represents a list of Proc structs. type Procs []Proc func (p Procs) Len() int { return len(p) } func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID } // Self returns a process for the current process read via /proc/self. func Self() (Proc, error) { fs, err := NewFS(DefaultMountPoint) if err != nil { return Proc{}, err } return fs.Self() } // NewProc returns a process for the given pid under /proc. func NewProc(pid int) (Proc, error) { fs, err := NewFS(DefaultMountPoint) if err != nil { return Proc{}, err } return fs.NewProc(pid) } // AllProcs returns a list of all currently available processes under /proc. func AllProcs() (Procs, error) { fs, err := NewFS(DefaultMountPoint) if err != nil { return Procs{}, err } return fs.AllProcs() } // Self returns a process for the current process. func (fs FS) Self() (Proc, error) { p, err := os.Readlink(fs.Path("self")) if err != nil { return Proc{}, err } pid, err := strconv.Atoi(strings.Replace(p, string(fs), "", -1)) if err != nil { return Proc{}, err } return fs.NewProc(pid) } // NewProc returns a process for the given pid. func (fs FS) NewProc(pid int) (Proc, error) { if _, err := os.Stat(fs.Path(strconv.Itoa(pid))); err != nil { return Proc{}, err } return Proc{PID: pid, fs: fs}, nil } // AllProcs returns a list of all currently available processes. func (fs FS) AllProcs() (Procs, error) { d, err := os.Open(fs.Path()) if err != nil { return Procs{}, err } defer d.Close() names, err := d.Readdirnames(-1) if err != nil { return Procs{}, fmt.Errorf("could not read %s: %s", d.Name(), err) } p := Procs{} for _, n := range names { pid, err := strconv.ParseInt(n, 10, 64) if err != nil { continue } p = append(p, Proc{PID: int(pid), fs: fs}) } return p, nil } // CmdLine returns the command line of a process. func (p Proc) CmdLine() ([]string, error) { f, err := os.Open(p.path("cmdline")) if err != nil { return nil, err } defer f.Close() data, err := ioutil.ReadAll(f) if err != nil { return nil, err } if len(data) < 1 { return []string{}, nil } return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil } // Wchan returns the wchan (wait channel) of a process. func (p Proc) Wchan() (string, error) { f, err := os.Open(p.path("wchan")) if err != nil { return "", err } defer f.Close() data, err := ioutil.ReadAll(f) if err != nil { return "", err } wchan := string(data) if wchan == "" || wchan == "0" { return "", nil } return wchan, nil } // Comm returns the command name of a process. func (p Proc) Comm() (string, error) { f, err := os.Open(p.path("comm")) if err != nil { return "", err } defer f.Close() data, err := ioutil.ReadAll(f) if err != nil { return "", err } return strings.TrimSpace(string(data)), nil } // Executable returns the absolute path of the executable command of a process. func (p Proc) Executable() (string, error) { exe, err := os.Readlink(p.path("exe")) if os.IsNotExist(err) { return "", nil } return exe, err } // FileDescriptors returns the currently open file descriptors of a process. func (p Proc) FileDescriptors() ([]uintptr, error) { names, err := p.fileDescriptors() if err != nil { return nil, err } fds := make([]uintptr, len(names)) for i, n := range names { fd, err := strconv.ParseInt(n, 10, 32) if err != nil { return nil, fmt.Errorf("could not parse fd %s: %s", n, err) } fds[i] = uintptr(fd) } return fds, nil } // FileDescriptorTargets returns the targets of all file descriptors of a process. // If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string. func (p Proc) FileDescriptorTargets() ([]string, error) { names, err := p.fileDescriptors() if err != nil { return nil, err } targets := make([]string, len(names)) for i, name := range names { target, err := os.Readlink(p.path("fd", name)) if err == nil { targets[i] = target } } return targets, nil } // FileDescriptorsLen returns the number of currently open file descriptors of // a process. func (p Proc) FileDescriptorsLen() (int, error) { fds, err := p.fileDescriptors() if err != nil { return 0, err } return len(fds), nil } // MountStats retrieves statistics and configuration for mount points in a // process's namespace. func (p Proc) MountStats() ([]*Mount, error) { f, err := os.Open(p.path("mountstats")) if err != nil { return nil, err } defer f.Close() return parseMountStats(f) } func (p Proc) fileDescriptors() ([]string, error) { d, err := os.Open(p.path("fd")) if err != nil { return nil, err } defer d.Close() names, err := d.Readdirnames(-1) if err != nil { return nil, fmt.Errorf("could not read %s: %s", d.Name(), err) } return names, nil } func (p Proc) path(pa ...string) string { return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...) } prometheus-process-exporter-0.4.0+ds/vendor/github.com/ncabatoff/procfs/proc_io.go000066400000000000000000000032341336557546600304140ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package procfs import ( "fmt" "io/ioutil" "os" ) // ProcIO models the content of /proc//io. type ProcIO struct { // Chars read. RChar uint64 // Chars written. WChar uint64 // Read syscalls. SyscR uint64 // Write syscalls. SyscW uint64 // Bytes read. ReadBytes uint64 // Bytes written. WriteBytes uint64 // Bytes written, but taking into account truncation. See // Documentation/filesystems/proc.txt in the kernel sources for // detailed explanation. CancelledWriteBytes int64 } // NewIO creates a new ProcIO instance from a given Proc instance. func (p Proc) NewIO() (ProcIO, error) { pio := ProcIO{} f, err := os.Open(p.path("io")) if err != nil { return pio, err } defer f.Close() data, err := ioutil.ReadAll(f) if err != nil { return pio, err } ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" + "read_bytes: %d\nwrite_bytes: %d\n" + "cancelled_write_bytes: %d\n" _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR, &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes) return pio, err } prometheus-process-exporter-0.4.0+ds/vendor/github.com/ncabatoff/procfs/proc_limits.go000066400000000000000000000107051336557546600313070ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package procfs import ( "bufio" "fmt" "os" "regexp" "strconv" ) // ProcLimits represents the soft limits for each of the process's resource // limits. For more information see getrlimit(2): // http://man7.org/linux/man-pages/man2/getrlimit.2.html. type ProcLimits struct { // CPU time limit in seconds. CPUTime int64 // Maximum size of files that the process may create. FileSize int64 // Maximum size of the process's data segment (initialized data, // uninitialized data, and heap). DataSize int64 // Maximum size of the process stack in bytes. StackSize int64 // Maximum size of a core file. CoreFileSize int64 // Limit of the process's resident set in pages. ResidentSet int64 // Maximum number of processes that can be created for the real user ID of // the calling process. Processes int64 // Value one greater than the maximum file descriptor number that can be // opened by this process. OpenFiles int64 // Maximum number of bytes of memory that may be locked into RAM. LockedMemory int64 // Maximum size of the process's virtual memory address space in bytes. AddressSpace int64 // Limit on the combined number of flock(2) locks and fcntl(2) leases that // this process may establish. FileLocks int64 // Limit of signals that may be queued for the real user ID of the calling // process. PendingSignals int64 // Limit on the number of bytes that can be allocated for POSIX message // queues for the real user ID of the calling process. MsqqueueSize int64 // Limit of the nice priority set using setpriority(2) or nice(2). NicePriority int64 // Limit of the real-time priority set using sched_setscheduler(2) or // sched_setparam(2). RealtimePriority int64 // Limit (in microseconds) on the amount of CPU time that a process // scheduled under a real-time scheduling policy may consume without making // a blocking system call. RealtimeTimeout int64 } const ( limitsFields = 3 limitsUnlimited = "unlimited" ) var ( limitsDelimiter = regexp.MustCompile(" +") ) // NewLimits returns the current soft limits of the process. func (p Proc) NewLimits() (ProcLimits, error) { f, err := os.Open(p.path("limits")) if err != nil { return ProcLimits{}, err } defer f.Close() var ( l = ProcLimits{} s = bufio.NewScanner(f) ) for s.Scan() { fields := limitsDelimiter.Split(s.Text(), limitsFields) if len(fields) != limitsFields { return ProcLimits{}, fmt.Errorf( "couldn't parse %s line %s", f.Name(), s.Text()) } switch fields[0] { case "Max cpu time": l.CPUTime, err = parseInt(fields[1]) case "Max file size": l.FileSize, err = parseInt(fields[1]) case "Max data size": l.DataSize, err = parseInt(fields[1]) case "Max stack size": l.StackSize, err = parseInt(fields[1]) case "Max core file size": l.CoreFileSize, err = parseInt(fields[1]) case "Max resident set": l.ResidentSet, err = parseInt(fields[1]) case "Max processes": l.Processes, err = parseInt(fields[1]) case "Max open files": l.OpenFiles, err = parseInt(fields[1]) case "Max locked memory": l.LockedMemory, err = parseInt(fields[1]) case "Max address space": l.AddressSpace, err = parseInt(fields[1]) case "Max file locks": l.FileLocks, err = parseInt(fields[1]) case "Max pending signals": l.PendingSignals, err = parseInt(fields[1]) case "Max msgqueue size": l.MsqqueueSize, err = parseInt(fields[1]) case "Max nice priority": l.NicePriority, err = parseInt(fields[1]) case "Max realtime priority": l.RealtimePriority, err = parseInt(fields[1]) case "Max realtime timeout": l.RealtimeTimeout, err = parseInt(fields[1]) } if err != nil { return ProcLimits{}, err } } return l, s.Err() } func parseInt(s string) (int64, error) { if s == limitsUnlimited { return -1, nil } i, err := strconv.ParseInt(s, 10, 64) if err != nil { return 0, fmt.Errorf("couldn't parse value %s: %s", s, err) } return i, nil } prometheus-process-exporter-0.4.0+ds/vendor/github.com/ncabatoff/procfs/proc_ns.go000066400000000000000000000036421336557546600304300ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package procfs import ( "fmt" "os" "strconv" "strings" ) // Namespace represents a single namespace of a process. type Namespace struct { Type string // Namespace type. Inode uint32 // Inode number of the namespace. If two processes are in the same namespace their inodes will match. } // Namespaces contains all of the namespaces that the process is contained in. type Namespaces map[string]Namespace // NewNamespaces reads from /proc/[pid/ns/* to get the namespaces of which the // process is a member. func (p Proc) NewNamespaces() (Namespaces, error) { d, err := os.Open(p.path("ns")) if err != nil { return nil, err } defer d.Close() names, err := d.Readdirnames(-1) if err != nil { return nil, fmt.Errorf("failed to read contents of ns dir: %v", err) } ns := make(Namespaces, len(names)) for _, name := range names { target, err := os.Readlink(p.path("ns", name)) if err != nil { return nil, err } fields := strings.SplitN(target, ":", 2) if len(fields) != 2 { return nil, fmt.Errorf("failed to parse namespace type and inode from '%v'", target) } typ := fields[0] inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32) if err != nil { return nil, fmt.Errorf("failed to parse inode from '%v': %v", fields[1], err) } ns[name] = Namespace{typ, uint32(inode)} } return ns, nil } prometheus-process-exporter-0.4.0+ds/vendor/github.com/ncabatoff/procfs/proc_stat.go000066400000000000000000000122261336557546600307610ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package procfs import ( "bytes" "fmt" "io/ioutil" "os" ) // Originally, this USER_HZ value was dynamically retrieved via a sysconf call // which required cgo. However, that caused a lot of problems regarding // cross-compilation. Alternatives such as running a binary to determine the // value, or trying to derive it in some other way were all problematic. After // much research it was determined that USER_HZ is actually hardcoded to 100 on // all Go-supported platforms as of the time of this writing. This is why we // decided to hardcode it here as well. It is not impossible that there could // be systems with exceptions, but they should be very exotic edge cases, and // in that case, the worst outcome will be two misreported metrics. // // See also the following discussions: // // - https://github.com/prometheus/node_exporter/issues/52 // - https://github.com/prometheus/procfs/pull/2 // - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue const userHZ = 100 // ProcStat provides status information about the process, // read from /proc/[pid]/stat. type ProcStat struct { // The process ID. PID int // The filename of the executable. Comm string // The process state. State string // The PID of the parent of this process. PPID int // The process group ID of the process. PGRP int // The session ID of the process. Session int // The controlling terminal of the process. TTY int // The ID of the foreground process group of the controlling terminal of // the process. TPGID int // The kernel flags word of the process. Flags uint // The number of minor faults the process has made which have not required // loading a memory page from disk. MinFlt uint // The number of minor faults that the process's waited-for children have // made. CMinFlt uint // The number of major faults the process has made which have required // loading a memory page from disk. MajFlt uint // The number of major faults that the process's waited-for children have // made. CMajFlt uint // Amount of time that this process has been scheduled in user mode, // measured in clock ticks. UTime uint // Amount of time that this process has been scheduled in kernel mode, // measured in clock ticks. STime uint // Amount of time that this process's waited-for children have been // scheduled in user mode, measured in clock ticks. CUTime uint // Amount of time that this process's waited-for children have been // scheduled in kernel mode, measured in clock ticks. CSTime uint // For processes running a real-time scheduling policy, this is the negated // scheduling priority, minus one. Priority int // The nice value, a value in the range 19 (low priority) to -20 (high // priority). Nice int // Number of threads in this process. NumThreads int // The time the process started after system boot, the value is expressed // in clock ticks. Starttime uint64 // Virtual memory size in bytes. VSize int // Resident set size in pages. RSS int fs FS } // NewStat returns the current status information of the process. func (p Proc) NewStat() (ProcStat, error) { f, err := os.Open(p.path("stat")) if err != nil { return ProcStat{}, err } defer f.Close() data, err := ioutil.ReadAll(f) if err != nil { return ProcStat{}, err } var ( ignore int s = ProcStat{PID: p.PID, fs: p.fs} l = bytes.Index(data, []byte("(")) r = bytes.LastIndex(data, []byte(")")) ) if l < 0 || r < 0 { return ProcStat{}, fmt.Errorf( "unexpected format, couldn't extract comm: %s", data, ) } s.Comm = string(data[l+1 : r]) _, err = fmt.Fscan( bytes.NewBuffer(data[r+2:]), &s.State, &s.PPID, &s.PGRP, &s.Session, &s.TTY, &s.TPGID, &s.Flags, &s.MinFlt, &s.CMinFlt, &s.MajFlt, &s.CMajFlt, &s.UTime, &s.STime, &s.CUTime, &s.CSTime, &s.Priority, &s.Nice, &s.NumThreads, &ignore, &s.Starttime, &s.VSize, &s.RSS, ) if err != nil { return ProcStat{}, err } return s, nil } // VirtualMemory returns the virtual memory size in bytes. func (s ProcStat) VirtualMemory() int { return s.VSize } // ResidentMemory returns the resident memory size in bytes. func (s ProcStat) ResidentMemory() int { return s.RSS * os.Getpagesize() } // StartTime returns the unix timestamp of the process in seconds. func (s ProcStat) StartTime() (float64, error) { stat, err := s.fs.NewStat() if err != nil { return 0, err } return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil } // CPUTime returns the total CPU user and system time in seconds. func (s ProcStat) CPUTime() float64 { return float64(s.UTime+s.STime) / userHZ } prometheus-process-exporter-0.4.0+ds/vendor/github.com/ncabatoff/procfs/proc_status.go000066400000000000000000000147451336557546600313410ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package procfs import ( "fmt" "io" "io/ioutil" "os" "strings" ) // ProcStatus provides status information about the process, // read from /proc/[pid]/status. type ( ProcStatus struct { TID int TracerPid int UIDReal int UIDEffective int UIDSavedSet int UIDFileSystem int GIDReal int GIDEffective int GIDSavedSet int GIDFileSystem int FDSize int VmPeakKB int VmSizeKB int VmLckKB int VmHWMKB int VmRSSKB int VmDataKB int VmStkKB int VmExeKB int VmLibKB int VmPTEKB int VmSwapKB int VoluntaryCtxtSwitches int NonvoluntaryCtxtSwitches int } procStatusFiller func(*ProcStatus, string) error procStatusBuilder struct { scanners map[string]procStatusFiller } ) func (ps *ProcStatus) refTID() []interface{} { return []interface{}{&ps.TID} } func (ps *ProcStatus) refTracerPid() []interface{} { return []interface{}{&ps.TracerPid} } func (ps *ProcStatus) refUID() []interface{} { return []interface{}{&ps.UIDReal, &ps.UIDEffective, &ps.UIDSavedSet, &ps.UIDFileSystem} } func (ps *ProcStatus) refGID() []interface{} { return []interface{}{&ps.GIDReal, &ps.GIDEffective, &ps.GIDSavedSet, &ps.GIDFileSystem} } func (ps *ProcStatus) refFDSize() []interface{} { return []interface{}{&ps.FDSize} } func (ps *ProcStatus) refVmPeakKB() []interface{} { return []interface{}{&ps.VmPeakKB} } func (ps *ProcStatus) refVmSizeKB() []interface{} { return []interface{}{&ps.VmSizeKB} } func (ps *ProcStatus) refVmLckKB() []interface{} { return []interface{}{&ps.VmLckKB} } func (ps *ProcStatus) refVmHWMKB() []interface{} { return []interface{}{&ps.VmHWMKB} } func (ps *ProcStatus) refVmRSSKB() []interface{} { return []interface{}{&ps.VmRSSKB} } func (ps *ProcStatus) refVmDataKB() []interface{} { return []interface{}{&ps.VmDataKB} } func (ps *ProcStatus) refVmStkKB() []interface{} { return []interface{}{&ps.VmStkKB} } func (ps *ProcStatus) refVmExeKB() []interface{} { return []interface{}{&ps.VmExeKB} } func (ps *ProcStatus) refVmLibKB() []interface{} { return []interface{}{&ps.VmLibKB} } func (ps *ProcStatus) refVmPTEKB() []interface{} { return []interface{}{&ps.VmPTEKB} } func (ps *ProcStatus) refVmSwapKB() []interface{} { return []interface{}{&ps.VmSwapKB} } func (ps *ProcStatus) refVoluntaryCtxtSwitches() []interface{} { return []interface{}{&ps.VoluntaryCtxtSwitches} } func (ps *ProcStatus) refNonvoluntaryCtxtSwitches() []interface{} { return []interface{}{&ps.NonvoluntaryCtxtSwitches} } func newFiller(format string, ref func(ps *ProcStatus) []interface{}) procStatusFiller { return procStatusFiller(func(ps *ProcStatus, s string) error { _, err := fmt.Sscanf(s, format, ref(ps)...) return err }) } func newProcStatusBuilder() *procStatusBuilder { return &procStatusBuilder{ scanners: map[string]procStatusFiller{ "Pid": newFiller("%d", (*ProcStatus).refTID), "TracerPid": newFiller("%d", (*ProcStatus).refTracerPid), "Uid": newFiller("%d %d %d %d", (*ProcStatus).refUID), "Gid": newFiller("%d %d %d %d", (*ProcStatus).refGID), "FDSize": newFiller("%d", (*ProcStatus).refFDSize), "VmPeak": newFiller("%d kB", (*ProcStatus).refVmPeakKB), "VmSize": newFiller("%d kB", (*ProcStatus).refVmSizeKB), "VmLck": newFiller("%d kB", (*ProcStatus).refVmLckKB), "VmHWM": newFiller("%d kB", (*ProcStatus).refVmHWMKB), "VmRSS": newFiller("%d kB", (*ProcStatus).refVmRSSKB), "VmData": newFiller("%d kB", (*ProcStatus).refVmDataKB), "VmStk": newFiller("%d kB", (*ProcStatus).refVmStkKB), "VmExe": newFiller("%d kB", (*ProcStatus).refVmExeKB), "VmLib": newFiller("%d kB", (*ProcStatus).refVmLibKB), "VmPTE": newFiller("%d kB", (*ProcStatus).refVmPTEKB), "VmSwap": newFiller("%d kB", (*ProcStatus).refVmSwapKB), "voluntary_ctxt_switches": newFiller("%d", (*ProcStatus).refVoluntaryCtxtSwitches), "nonvoluntary_ctxt_switches": newFiller("%d", (*ProcStatus).refNonvoluntaryCtxtSwitches), }, } } func (b *procStatusBuilder) readStatus(r io.Reader) (ProcStatus, error) { contents, err := ioutil.ReadAll(r) if err != nil { return ProcStatus{}, err } s := string(contents) var ps ProcStatus for lineno := 0; s != ""; lineno++ { crpos := strings.IndexByte(s, '\n') if crpos == -1 { return ProcStatus{}, fmt.Errorf("line %d from status file without newline: %s", lineno, s) } line := strings.TrimSpace(s[:crpos]) s = s[crpos+1:] if line == "" { if s == "" { break } continue } pos := strings.IndexByte(line, ':') if pos == -1 { return ProcStatus{}, fmt.Errorf("line %d from status file without ':': %s", lineno, line) } field := line[:pos] scanner, ok := b.scanners[field] if !ok { continue } err = scanner(&ps, line[pos+1:]) // TODO: flag parse errors with some kind of "warning" error. if err != nil { // Be lenient about parse errors, because otherwise we miss out on some interesting // procs. For example, my Ubuntu kernel (4.4.0-130-generic #156-Ubuntu) is showing // a Chromium status file with "VmLib: 18446744073709442944 kB". continue } } return ps, nil } var psb = newProcStatusBuilder() // NewStatus returns the current status information of the process. func (p Proc) NewStatus() (ProcStatus, error) { f, err := os.Open(p.path("status")) if err != nil { return ProcStatus{}, err } defer f.Close() return psb.readStatus(f) } prometheus-process-exporter-0.4.0+ds/vendor/github.com/ncabatoff/procfs/stat.go000066400000000000000000000152451336557546600277420ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package procfs import ( "bufio" "fmt" "io" "os" "strconv" "strings" ) // CPUStat shows how much time the cpu spend in various stages. type CPUStat struct { User float64 Nice float64 System float64 Idle float64 Iowait float64 IRQ float64 SoftIRQ float64 Steal float64 Guest float64 GuestNice float64 } // SoftIRQStat represent the softirq statistics as exported in the procfs stat file. // A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html // It is possible to get per-cpu stats by reading /proc/softirqs type SoftIRQStat struct { Hi uint64 Timer uint64 NetTx uint64 NetRx uint64 Block uint64 BlockIoPoll uint64 Tasklet uint64 Sched uint64 Hrtimer uint64 Rcu uint64 } // Stat represents kernel/system statistics. type Stat struct { // Boot time in seconds since the Epoch. BootTime uint64 // Summed up cpu statistics. CPUTotal CPUStat // Per-CPU statistics. CPU []CPUStat // Number of times interrupts were handled, which contains numbered and unnumbered IRQs. IRQTotal uint64 // Number of times a numbered IRQ was triggered. IRQ []uint64 // Number of times a context switch happened. ContextSwitches uint64 // Number of times a process was created. ProcessCreated uint64 // Number of processes currently running. ProcessesRunning uint64 // Number of processes currently blocked (waiting for IO). ProcessesBlocked uint64 // Number of times a softirq was scheduled. SoftIRQTotal uint64 // Detailed softirq statistics. SoftIRQ SoftIRQStat } // NewStat returns kernel/system statistics read from /proc/stat. func NewStat() (Stat, error) { fs, err := NewFS(DefaultMountPoint) if err != nil { return Stat{}, err } return fs.NewStat() } // Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum). func parseCPUStat(line string) (CPUStat, int64, error) { cpuStat := CPUStat{} var cpu string count, err := fmt.Sscanf(line, "%s %f %f %f %f %f %f %f %f %f %f", &cpu, &cpuStat.User, &cpuStat.Nice, &cpuStat.System, &cpuStat.Idle, &cpuStat.Iowait, &cpuStat.IRQ, &cpuStat.SoftIRQ, &cpuStat.Steal, &cpuStat.Guest, &cpuStat.GuestNice) if err != nil && err != io.EOF { return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): %s", line, err) } if count == 0 { return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): 0 elements parsed", line) } cpuStat.User /= userHZ cpuStat.Nice /= userHZ cpuStat.System /= userHZ cpuStat.Idle /= userHZ cpuStat.Iowait /= userHZ cpuStat.IRQ /= userHZ cpuStat.SoftIRQ /= userHZ cpuStat.Steal /= userHZ cpuStat.Guest /= userHZ cpuStat.GuestNice /= userHZ if cpu == "cpu" { return cpuStat, -1, nil } cpuID, err := strconv.ParseInt(cpu[3:], 10, 64) if err != nil { return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu/cpuid): %s", line, err) } return cpuStat, cpuID, nil } // Parse a softirq line. func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) { softIRQStat := SoftIRQStat{} var total uint64 var prefix string _, err := fmt.Sscanf(line, "%s %d %d %d %d %d %d %d %d %d %d %d", &prefix, &total, &softIRQStat.Hi, &softIRQStat.Timer, &softIRQStat.NetTx, &softIRQStat.NetRx, &softIRQStat.Block, &softIRQStat.BlockIoPoll, &softIRQStat.Tasklet, &softIRQStat.Sched, &softIRQStat.Hrtimer, &softIRQStat.Rcu) if err != nil { return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %s (softirq): %s", line, err) } return softIRQStat, total, nil } // NewStat returns an information about current kernel/system statistics. func (fs FS) NewStat() (Stat, error) { // See https://www.kernel.org/doc/Documentation/filesystems/proc.txt f, err := os.Open(fs.Path("stat")) if err != nil { return Stat{}, err } defer f.Close() stat := Stat{} scanner := bufio.NewScanner(f) for scanner.Scan() { line := scanner.Text() parts := strings.Fields(scanner.Text()) // require at least if len(parts) < 2 { continue } switch { case parts[0] == "btime": if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil { return Stat{}, fmt.Errorf("couldn't parse %s (btime): %s", parts[1], err) } case parts[0] == "intr": if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil { return Stat{}, fmt.Errorf("couldn't parse %s (intr): %s", parts[1], err) } numberedIRQs := parts[2:] stat.IRQ = make([]uint64, len(numberedIRQs)) for i, count := range numberedIRQs { if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil { return Stat{}, fmt.Errorf("couldn't parse %s (intr%d): %s", count, i, err) } } case parts[0] == "ctxt": if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil { return Stat{}, fmt.Errorf("couldn't parse %s (ctxt): %s", parts[1], err) } case parts[0] == "processes": if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil { return Stat{}, fmt.Errorf("couldn't parse %s (processes): %s", parts[1], err) } case parts[0] == "procs_running": if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil { return Stat{}, fmt.Errorf("couldn't parse %s (procs_running): %s", parts[1], err) } case parts[0] == "procs_blocked": if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil { return Stat{}, fmt.Errorf("couldn't parse %s (procs_blocked): %s", parts[1], err) } case parts[0] == "softirq": softIRQStats, total, err := parseSoftIRQStat(line) if err != nil { return Stat{}, err } stat.SoftIRQTotal = total stat.SoftIRQ = softIRQStats case strings.HasPrefix(parts[0], "cpu"): cpuStat, cpuID, err := parseCPUStat(line) if err != nil { return Stat{}, err } if cpuID == -1 { stat.CPUTotal = cpuStat } else { for int64(len(stat.CPU)) <= cpuID { stat.CPU = append(stat.CPU, CPUStat{}) } stat.CPU[cpuID] = cpuStat } } } if err := scanner.Err(); err != nil { return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err) } return stat, nil } prometheus-process-exporter-0.4.0+ds/vendor/github.com/ncabatoff/procfs/ttar000077500000000000000000000254571336557546600273460ustar00rootroot00000000000000#!/usr/bin/env bash # Purpose: plain text tar format # Limitations: - only suitable for text files, directories, and symlinks # - stores only filename, content, and mode # - not designed for untrusted input # # Note: must work with bash version 3.2 (macOS) # Copyright 2017 Roger Luethi # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -o errexit -o nounset # Sanitize environment (for instance, standard sorting of glob matches) export LC_ALL=C path="" CMD="" ARG_STRING="$*" #------------------------------------------------------------------------------ # Not all sed implementations can work on null bytes. In order to make ttar # work out of the box on macOS, use Python as a stream editor. USE_PYTHON=0 PYTHON_CREATE_FILTER=$(cat << 'PCF' #!/usr/bin/env python import re import sys for line in sys.stdin: line = re.sub(r'EOF', r'\EOF', line) line = re.sub(r'NULLBYTE', r'\NULLBYTE', line) line = re.sub('\x00', r'NULLBYTE', line) sys.stdout.write(line) PCF ) PYTHON_EXTRACT_FILTER=$(cat << 'PEF' #!/usr/bin/env python import re import sys for line in sys.stdin: line = re.sub(r'(?/dev/null; then echo "ERROR Python not found. Aborting." exit 2 fi USE_PYTHON=1 fi } #------------------------------------------------------------------------------ function usage { bname=$(basename "$0") cat << USAGE Usage: $bname [-C ] -c -f (create archive) $bname -t -f (list archive contents) $bname [-C ] -x -f (extract archive) Options: -C (change directory) -v (verbose) Example: Change to sysfs directory, create ttar file from fixtures directory $bname -C sysfs -c -f sysfs/fixtures.ttar fixtures/ USAGE exit "$1" } function vecho { if [ "${VERBOSE:-}" == "yes" ]; then echo >&7 "$@" fi } function set_cmd { if [ -n "$CMD" ]; then echo "ERROR: more than one command given" echo usage 2 fi CMD=$1 } unset VERBOSE while getopts :cf:htxvC: opt; do case $opt in c) set_cmd "create" ;; f) ARCHIVE=$OPTARG ;; h) usage 0 ;; t) set_cmd "list" ;; x) set_cmd "extract" ;; v) VERBOSE=yes exec 7>&1 ;; C) CDIR=$OPTARG ;; *) echo >&2 "ERROR: invalid option -$OPTARG" echo usage 1 ;; esac done # Remove processed options from arguments shift $(( OPTIND - 1 )); if [ "${CMD:-}" == "" ]; then echo >&2 "ERROR: no command given" echo usage 1 elif [ "${ARCHIVE:-}" == "" ]; then echo >&2 "ERROR: no archive name given" echo usage 1 fi function list { local path="" local size=0 local line_no=0 local ttar_file=$1 if [ -n "${2:-}" ]; then echo >&2 "ERROR: too many arguments." echo usage 1 fi if [ ! -e "$ttar_file" ]; then echo >&2 "ERROR: file not found ($ttar_file)" echo usage 1 fi while read -r line; do line_no=$(( line_no + 1 )) if [ $size -gt 0 ]; then size=$(( size - 1 )) continue fi if [[ $line =~ ^Path:\ (.*)$ ]]; then path=${BASH_REMATCH[1]} elif [[ $line =~ ^Lines:\ (.*)$ ]]; then size=${BASH_REMATCH[1]} echo "$path" elif [[ $line =~ ^Directory:\ (.*)$ ]]; then path=${BASH_REMATCH[1]} echo "$path/" elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then echo "$path -> ${BASH_REMATCH[1]}" fi done < "$ttar_file" } function extract { local path="" local size=0 local line_no=0 local ttar_file=$1 if [ -n "${2:-}" ]; then echo >&2 "ERROR: too many arguments." echo usage 1 fi if [ ! -e "$ttar_file" ]; then echo >&2 "ERROR: file not found ($ttar_file)" echo usage 1 fi while IFS= read -r line; do line_no=$(( line_no + 1 )) local eof_without_newline if [ "$size" -gt 0 ]; then if [[ "$line" =~ [^\\]EOF ]]; then # An EOF not preceeded by a backslash indicates that the line # does not end with a newline eof_without_newline=1 else eof_without_newline=0 fi # Replace NULLBYTE with null byte if at beginning of line # Replace NULLBYTE with null byte unless preceeded by backslash # Remove one backslash in front of NULLBYTE (if any) # Remove EOF unless preceeded by backslash # Remove one backslash in front of EOF if [ $USE_PYTHON -eq 1 ]; then echo -n "$line" | python -c "$PYTHON_EXTRACT_FILTER" >> "$path" else # The repeated pattern makes up for sed's lack of negative # lookbehind assertions (for consecutive null bytes). echo -n "$line" | \ sed -e 's/^NULLBYTE/\x0/g; s/\([^\\]\)NULLBYTE/\1\x0/g; s/\([^\\]\)NULLBYTE/\1\x0/g; s/\\NULLBYTE/NULLBYTE/g; s/\([^\\]\)EOF/\1/g; s/\\EOF/EOF/g; ' >> "$path" fi if [[ "$eof_without_newline" -eq 0 ]]; then echo >> "$path" fi size=$(( size - 1 )) continue fi if [[ $line =~ ^Path:\ (.*)$ ]]; then path=${BASH_REMATCH[1]} if [ -e "$path" ] || [ -L "$path" ]; then rm "$path" fi elif [[ $line =~ ^Lines:\ (.*)$ ]]; then size=${BASH_REMATCH[1]} # Create file even if it is zero-length. touch "$path" vecho " $path" elif [[ $line =~ ^Mode:\ (.*)$ ]]; then mode=${BASH_REMATCH[1]} chmod "$mode" "$path" vecho "$mode" elif [[ $line =~ ^Directory:\ (.*)$ ]]; then path=${BASH_REMATCH[1]} mkdir -p "$path" vecho " $path/" elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then ln -s "${BASH_REMATCH[1]}" "$path" vecho " $path -> ${BASH_REMATCH[1]}" elif [[ $line =~ ^# ]]; then # Ignore comments between files continue else echo >&2 "ERROR: Unknown keyword on line $line_no: $line" exit 1 fi done < "$ttar_file" } function div { echo "# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -" \ "- - - - - -" } function get_mode { local mfile=$1 if [ -z "${STAT_OPTION:-}" ]; then if stat -c '%a' "$mfile" >/dev/null 2>&1; then # GNU stat STAT_OPTION='-c' STAT_FORMAT='%a' else # BSD stat STAT_OPTION='-f' # Octal output, user/group/other (omit file type, sticky bit) STAT_FORMAT='%OLp' fi fi stat "${STAT_OPTION}" "${STAT_FORMAT}" "$mfile" } function _create { shopt -s nullglob local mode local eof_without_newline while (( "$#" )); do file=$1 if [ -L "$file" ]; then echo "Path: $file" symlinkTo=$(readlink "$file") echo "SymlinkTo: $symlinkTo" vecho " $file -> $symlinkTo" div elif [ -d "$file" ]; then # Strip trailing slash (if there is one) file=${file%/} echo "Directory: $file" mode=$(get_mode "$file") echo "Mode: $mode" vecho "$mode $file/" div # Find all files and dirs, including hidden/dot files for x in "$file/"{*,.[^.]*}; do _create "$x" done elif [ -f "$file" ]; then echo "Path: $file" lines=$(wc -l "$file"|awk '{print $1}') eof_without_newline=0 if [[ "$(wc -c "$file"|awk '{print $1}')" -gt 0 ]] && \ [[ "$(tail -c 1 "$file" | wc -l)" -eq 0 ]]; then eof_without_newline=1 lines=$((lines+1)) fi echo "Lines: $lines" # Add backslash in front of EOF # Add backslash in front of NULLBYTE # Replace null byte with NULLBYTE if [ $USE_PYTHON -eq 1 ]; then < "$file" python -c "$PYTHON_CREATE_FILTER" else < "$file" \ sed 's/EOF/\\EOF/g; s/NULLBYTE/\\NULLBYTE/g; s/\x0/NULLBYTE/g; ' fi if [[ "$eof_without_newline" -eq 1 ]]; then # Finish line with EOF to indicate that the original line did # not end with a linefeed echo "EOF" fi mode=$(get_mode "$file") echo "Mode: $mode" vecho "$mode $file" div else echo >&2 "ERROR: file not found ($file in $(pwd))" exit 2 fi shift done } function create { ttar_file=$1 shift if [ -z "${1:-}" ]; then echo >&2 "ERROR: missing arguments." echo usage 1 fi if [ -e "$ttar_file" ]; then rm "$ttar_file" fi exec > "$ttar_file" echo "# Archive created by ttar $ARG_STRING" _create "$@" } test_environment if [ -n "${CDIR:-}" ]; then if [[ "$ARCHIVE" != /* ]]; then # Relative path: preserve the archive's location before changing # directory ARCHIVE="$(pwd)/$ARCHIVE" fi cd "$CDIR" fi "$CMD" "$ARCHIVE" "$@" prometheus-process-exporter-0.4.0+ds/vendor/github.com/ncabatoff/procfs/xfrm.go000066400000000000000000000114051336557546600277350ustar00rootroot00000000000000// Copyright 2017 Prometheus Team // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package procfs import ( "bufio" "fmt" "os" "strconv" "strings" ) // XfrmStat models the contents of /proc/net/xfrm_stat. type XfrmStat struct { // All errors which are not matched by other XfrmInError int // No buffer is left XfrmInBufferError int // Header Error XfrmInHdrError int // No state found // i.e. either inbound SPI, address, or IPSEC protocol at SA is wrong XfrmInNoStates int // Transformation protocol specific error // e.g. SA Key is wrong XfrmInStateProtoError int // Transformation mode specific error XfrmInStateModeError int // Sequence error // e.g. sequence number is out of window XfrmInStateSeqError int // State is expired XfrmInStateExpired int // State has mismatch option // e.g. UDP encapsulation type is mismatched XfrmInStateMismatch int // State is invalid XfrmInStateInvalid int // No matching template for states // e.g. Inbound SAs are correct but SP rule is wrong XfrmInTmplMismatch int // No policy is found for states // e.g. Inbound SAs are correct but no SP is found XfrmInNoPols int // Policy discards XfrmInPolBlock int // Policy error XfrmInPolError int // All errors which are not matched by others XfrmOutError int // Bundle generation error XfrmOutBundleGenError int // Bundle check error XfrmOutBundleCheckError int // No state was found XfrmOutNoStates int // Transformation protocol specific error XfrmOutStateProtoError int // Transportation mode specific error XfrmOutStateModeError int // Sequence error // i.e sequence number overflow XfrmOutStateSeqError int // State is expired XfrmOutStateExpired int // Policy discads XfrmOutPolBlock int // Policy is dead XfrmOutPolDead int // Policy Error XfrmOutPolError int XfrmFwdHdrError int XfrmOutStateInvalid int XfrmAcquireError int } // NewXfrmStat reads the xfrm_stat statistics. func NewXfrmStat() (XfrmStat, error) { fs, err := NewFS(DefaultMountPoint) if err != nil { return XfrmStat{}, err } return fs.NewXfrmStat() } // NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem. func (fs FS) NewXfrmStat() (XfrmStat, error) { file, err := os.Open(fs.Path("net/xfrm_stat")) if err != nil { return XfrmStat{}, err } defer file.Close() var ( x = XfrmStat{} s = bufio.NewScanner(file) ) for s.Scan() { fields := strings.Fields(s.Text()) if len(fields) != 2 { return XfrmStat{}, fmt.Errorf( "couldn't parse %s line %s", file.Name(), s.Text()) } name := fields[0] value, err := strconv.Atoi(fields[1]) if err != nil { return XfrmStat{}, err } switch name { case "XfrmInError": x.XfrmInError = value case "XfrmInBufferError": x.XfrmInBufferError = value case "XfrmInHdrError": x.XfrmInHdrError = value case "XfrmInNoStates": x.XfrmInNoStates = value case "XfrmInStateProtoError": x.XfrmInStateProtoError = value case "XfrmInStateModeError": x.XfrmInStateModeError = value case "XfrmInStateSeqError": x.XfrmInStateSeqError = value case "XfrmInStateExpired": x.XfrmInStateExpired = value case "XfrmInStateInvalid": x.XfrmInStateInvalid = value case "XfrmInTmplMismatch": x.XfrmInTmplMismatch = value case "XfrmInNoPols": x.XfrmInNoPols = value case "XfrmInPolBlock": x.XfrmInPolBlock = value case "XfrmInPolError": x.XfrmInPolError = value case "XfrmOutError": x.XfrmOutError = value case "XfrmInStateMismatch": x.XfrmInStateMismatch = value case "XfrmOutBundleGenError": x.XfrmOutBundleGenError = value case "XfrmOutBundleCheckError": x.XfrmOutBundleCheckError = value case "XfrmOutNoStates": x.XfrmOutNoStates = value case "XfrmOutStateProtoError": x.XfrmOutStateProtoError = value case "XfrmOutStateModeError": x.XfrmOutStateModeError = value case "XfrmOutStateSeqError": x.XfrmOutStateSeqError = value case "XfrmOutStateExpired": x.XfrmOutStateExpired = value case "XfrmOutPolBlock": x.XfrmOutPolBlock = value case "XfrmOutPolDead": x.XfrmOutPolDead = value case "XfrmOutPolError": x.XfrmOutPolError = value case "XfrmFwdHdrError": x.XfrmFwdHdrError = value case "XfrmOutStateInvalid": x.XfrmOutStateInvalid = value case "XfrmAcquireError": x.XfrmAcquireError = value } } return x, s.Err() } prometheus-process-exporter-0.4.0+ds/vendor/github.com/ncabatoff/procfs/xfs/000077500000000000000000000000001336557546600272315ustar00rootroot00000000000000prometheus-process-exporter-0.4.0+ds/vendor/github.com/ncabatoff/procfs/xfs/parse.go000066400000000000000000000216141336557546600306760ustar00rootroot00000000000000// Copyright 2017 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package xfs import ( "bufio" "fmt" "io" "strings" "github.com/ncabatoff/procfs/internal/util" ) // ParseStats parses a Stats from an input io.Reader, using the format // found in /proc/fs/xfs/stat. func ParseStats(r io.Reader) (*Stats, error) { const ( // Fields parsed into stats structures. fieldExtentAlloc = "extent_alloc" fieldAbt = "abt" fieldBlkMap = "blk_map" fieldBmbt = "bmbt" fieldDir = "dir" fieldTrans = "trans" fieldIg = "ig" fieldLog = "log" fieldRw = "rw" fieldAttr = "attr" fieldIcluster = "icluster" fieldVnodes = "vnodes" fieldBuf = "buf" fieldXpc = "xpc" // Unimplemented at this time due to lack of documentation. fieldPushAil = "push_ail" fieldXstrat = "xstrat" fieldAbtb2 = "abtb2" fieldAbtc2 = "abtc2" fieldBmbt2 = "bmbt2" fieldIbt2 = "ibt2" fieldFibt2 = "fibt2" fieldQm = "qm" fieldDebug = "debug" ) var xfss Stats s := bufio.NewScanner(r) for s.Scan() { // Expect at least a string label and a single integer value, ex: // - abt 0 // - rw 1 2 ss := strings.Fields(string(s.Bytes())) if len(ss) < 2 { continue } label := ss[0] // Extended precision counters are uint64 values. if label == fieldXpc { us, err := util.ParseUint64s(ss[1:]) if err != nil { return nil, err } xfss.ExtendedPrecision, err = extendedPrecisionStats(us) if err != nil { return nil, err } continue } // All other counters are uint32 values. us, err := util.ParseUint32s(ss[1:]) if err != nil { return nil, err } switch label { case fieldExtentAlloc: xfss.ExtentAllocation, err = extentAllocationStats(us) case fieldAbt: xfss.AllocationBTree, err = btreeStats(us) case fieldBlkMap: xfss.BlockMapping, err = blockMappingStats(us) case fieldBmbt: xfss.BlockMapBTree, err = btreeStats(us) case fieldDir: xfss.DirectoryOperation, err = directoryOperationStats(us) case fieldTrans: xfss.Transaction, err = transactionStats(us) case fieldIg: xfss.InodeOperation, err = inodeOperationStats(us) case fieldLog: xfss.LogOperation, err = logOperationStats(us) case fieldRw: xfss.ReadWrite, err = readWriteStats(us) case fieldAttr: xfss.AttributeOperation, err = attributeOperationStats(us) case fieldIcluster: xfss.InodeClustering, err = inodeClusteringStats(us) case fieldVnodes: xfss.Vnode, err = vnodeStats(us) case fieldBuf: xfss.Buffer, err = bufferStats(us) } if err != nil { return nil, err } } return &xfss, s.Err() } // extentAllocationStats builds an ExtentAllocationStats from a slice of uint32s. func extentAllocationStats(us []uint32) (ExtentAllocationStats, error) { if l := len(us); l != 4 { return ExtentAllocationStats{}, fmt.Errorf("incorrect number of values for XFS extent allocation stats: %d", l) } return ExtentAllocationStats{ ExtentsAllocated: us[0], BlocksAllocated: us[1], ExtentsFreed: us[2], BlocksFreed: us[3], }, nil } // btreeStats builds a BTreeStats from a slice of uint32s. func btreeStats(us []uint32) (BTreeStats, error) { if l := len(us); l != 4 { return BTreeStats{}, fmt.Errorf("incorrect number of values for XFS btree stats: %d", l) } return BTreeStats{ Lookups: us[0], Compares: us[1], RecordsInserted: us[2], RecordsDeleted: us[3], }, nil } // BlockMappingStat builds a BlockMappingStats from a slice of uint32s. func blockMappingStats(us []uint32) (BlockMappingStats, error) { if l := len(us); l != 7 { return BlockMappingStats{}, fmt.Errorf("incorrect number of values for XFS block mapping stats: %d", l) } return BlockMappingStats{ Reads: us[0], Writes: us[1], Unmaps: us[2], ExtentListInsertions: us[3], ExtentListDeletions: us[4], ExtentListLookups: us[5], ExtentListCompares: us[6], }, nil } // DirectoryOperationStats builds a DirectoryOperationStats from a slice of uint32s. func directoryOperationStats(us []uint32) (DirectoryOperationStats, error) { if l := len(us); l != 4 { return DirectoryOperationStats{}, fmt.Errorf("incorrect number of values for XFS directory operation stats: %d", l) } return DirectoryOperationStats{ Lookups: us[0], Creates: us[1], Removes: us[2], Getdents: us[3], }, nil } // TransactionStats builds a TransactionStats from a slice of uint32s. func transactionStats(us []uint32) (TransactionStats, error) { if l := len(us); l != 3 { return TransactionStats{}, fmt.Errorf("incorrect number of values for XFS transaction stats: %d", l) } return TransactionStats{ Sync: us[0], Async: us[1], Empty: us[2], }, nil } // InodeOperationStats builds an InodeOperationStats from a slice of uint32s. func inodeOperationStats(us []uint32) (InodeOperationStats, error) { if l := len(us); l != 7 { return InodeOperationStats{}, fmt.Errorf("incorrect number of values for XFS inode operation stats: %d", l) } return InodeOperationStats{ Attempts: us[0], Found: us[1], Recycle: us[2], Missed: us[3], Duplicate: us[4], Reclaims: us[5], AttributeChange: us[6], }, nil } // LogOperationStats builds a LogOperationStats from a slice of uint32s. func logOperationStats(us []uint32) (LogOperationStats, error) { if l := len(us); l != 5 { return LogOperationStats{}, fmt.Errorf("incorrect number of values for XFS log operation stats: %d", l) } return LogOperationStats{ Writes: us[0], Blocks: us[1], NoInternalBuffers: us[2], Force: us[3], ForceSleep: us[4], }, nil } // ReadWriteStats builds a ReadWriteStats from a slice of uint32s. func readWriteStats(us []uint32) (ReadWriteStats, error) { if l := len(us); l != 2 { return ReadWriteStats{}, fmt.Errorf("incorrect number of values for XFS read write stats: %d", l) } return ReadWriteStats{ Read: us[0], Write: us[1], }, nil } // AttributeOperationStats builds an AttributeOperationStats from a slice of uint32s. func attributeOperationStats(us []uint32) (AttributeOperationStats, error) { if l := len(us); l != 4 { return AttributeOperationStats{}, fmt.Errorf("incorrect number of values for XFS attribute operation stats: %d", l) } return AttributeOperationStats{ Get: us[0], Set: us[1], Remove: us[2], List: us[3], }, nil } // InodeClusteringStats builds an InodeClusteringStats from a slice of uint32s. func inodeClusteringStats(us []uint32) (InodeClusteringStats, error) { if l := len(us); l != 3 { return InodeClusteringStats{}, fmt.Errorf("incorrect number of values for XFS inode clustering stats: %d", l) } return InodeClusteringStats{ Iflush: us[0], Flush: us[1], FlushInode: us[2], }, nil } // VnodeStats builds a VnodeStats from a slice of uint32s. func vnodeStats(us []uint32) (VnodeStats, error) { // The attribute "Free" appears to not be available on older XFS // stats versions. Therefore, 7 or 8 elements may appear in // this slice. l := len(us) if l != 7 && l != 8 { return VnodeStats{}, fmt.Errorf("incorrect number of values for XFS vnode stats: %d", l) } s := VnodeStats{ Active: us[0], Allocate: us[1], Get: us[2], Hold: us[3], Release: us[4], Reclaim: us[5], Remove: us[6], } // Skip adding free, unless it is present. The zero value will // be used in place of an actual count. if l == 7 { return s, nil } s.Free = us[7] return s, nil } // BufferStats builds a BufferStats from a slice of uint32s. func bufferStats(us []uint32) (BufferStats, error) { if l := len(us); l != 9 { return BufferStats{}, fmt.Errorf("incorrect number of values for XFS buffer stats: %d", l) } return BufferStats{ Get: us[0], Create: us[1], GetLocked: us[2], GetLockedWaited: us[3], BusyLocked: us[4], MissLocked: us[5], PageRetries: us[6], PageFound: us[7], GetRead: us[8], }, nil } // ExtendedPrecisionStats builds an ExtendedPrecisionStats from a slice of uint32s. func extendedPrecisionStats(us []uint64) (ExtendedPrecisionStats, error) { if l := len(us); l != 3 { return ExtendedPrecisionStats{}, fmt.Errorf("incorrect number of values for XFS extended precision stats: %d", l) } return ExtendedPrecisionStats{ FlushBytes: us[0], WriteBytes: us[1], ReadBytes: us[2], }, nil } prometheus-process-exporter-0.4.0+ds/vendor/github.com/ncabatoff/procfs/xfs/xfs.go000066400000000000000000000111341336557546600303600ustar00rootroot00000000000000// Copyright 2017 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package xfs provides access to statistics exposed by the XFS filesystem. package xfs // Stats contains XFS filesystem runtime statistics, parsed from // /proc/fs/xfs/stat. // // The names and meanings of each statistic were taken from // http://xfs.org/index.php/Runtime_Stats and xfs_stats.h in the Linux // kernel source. Most counters are uint32s (same data types used in // xfs_stats.h), but some of the "extended precision stats" are uint64s. type Stats struct { // The name of the filesystem used to source these statistics. // If empty, this indicates aggregated statistics for all XFS // filesystems on the host. Name string ExtentAllocation ExtentAllocationStats AllocationBTree BTreeStats BlockMapping BlockMappingStats BlockMapBTree BTreeStats DirectoryOperation DirectoryOperationStats Transaction TransactionStats InodeOperation InodeOperationStats LogOperation LogOperationStats ReadWrite ReadWriteStats AttributeOperation AttributeOperationStats InodeClustering InodeClusteringStats Vnode VnodeStats Buffer BufferStats ExtendedPrecision ExtendedPrecisionStats } // ExtentAllocationStats contains statistics regarding XFS extent allocations. type ExtentAllocationStats struct { ExtentsAllocated uint32 BlocksAllocated uint32 ExtentsFreed uint32 BlocksFreed uint32 } // BTreeStats contains statistics regarding an XFS internal B-tree. type BTreeStats struct { Lookups uint32 Compares uint32 RecordsInserted uint32 RecordsDeleted uint32 } // BlockMappingStats contains statistics regarding XFS block maps. type BlockMappingStats struct { Reads uint32 Writes uint32 Unmaps uint32 ExtentListInsertions uint32 ExtentListDeletions uint32 ExtentListLookups uint32 ExtentListCompares uint32 } // DirectoryOperationStats contains statistics regarding XFS directory entries. type DirectoryOperationStats struct { Lookups uint32 Creates uint32 Removes uint32 Getdents uint32 } // TransactionStats contains statistics regarding XFS metadata transactions. type TransactionStats struct { Sync uint32 Async uint32 Empty uint32 } // InodeOperationStats contains statistics regarding XFS inode operations. type InodeOperationStats struct { Attempts uint32 Found uint32 Recycle uint32 Missed uint32 Duplicate uint32 Reclaims uint32 AttributeChange uint32 } // LogOperationStats contains statistics regarding the XFS log buffer. type LogOperationStats struct { Writes uint32 Blocks uint32 NoInternalBuffers uint32 Force uint32 ForceSleep uint32 } // ReadWriteStats contains statistics regarding the number of read and write // system calls for XFS filesystems. type ReadWriteStats struct { Read uint32 Write uint32 } // AttributeOperationStats contains statistics regarding manipulation of // XFS extended file attributes. type AttributeOperationStats struct { Get uint32 Set uint32 Remove uint32 List uint32 } // InodeClusteringStats contains statistics regarding XFS inode clustering // operations. type InodeClusteringStats struct { Iflush uint32 Flush uint32 FlushInode uint32 } // VnodeStats contains statistics regarding XFS vnode operations. type VnodeStats struct { Active uint32 Allocate uint32 Get uint32 Hold uint32 Release uint32 Reclaim uint32 Remove uint32 Free uint32 } // BufferStats contains statistics regarding XFS read/write I/O buffers. type BufferStats struct { Get uint32 Create uint32 GetLocked uint32 GetLockedWaited uint32 BusyLocked uint32 MissLocked uint32 PageRetries uint32 PageFound uint32 GetRead uint32 } // ExtendedPrecisionStats contains high precision counters used to track the // total number of bytes read, written, or flushed, during XFS operations. type ExtendedPrecisionStats struct { FlushBytes uint64 WriteBytes uint64 ReadBytes uint64 }