pax_global_header00006660000000000000000000000064145242605760014525gustar00rootroot0000000000000052 comment=7333465abf9efba81876303bb57e6fadb946041b node_exporter-1.7.0/000077500000000000000000000000001452426057600144075ustar00rootroot00000000000000node_exporter-1.7.0/.circleci/000077500000000000000000000000001452426057600162425ustar00rootroot00000000000000node_exporter-1.7.0/.circleci/config.yml000066400000000000000000000066071452426057600202430ustar00rootroot00000000000000--- version: 2.1 orbs: prometheus: prometheus/prometheus@0.17.1 executors: # Whenever the Go version is updated here, .promu.yml and .promu-cgo.yml # should also be updated. golang: docker: - image: cimg/go:1.21 arm: machine: image: ubuntu-2204:current resource_class: arm.medium jobs: test: executor: golang steps: - prometheus/setup_environment - run: go mod download - run: make - prometheus/store_artifact: file: node_exporter test-arm: executor: arm steps: - checkout - run: uname -a - run: make test-e2e test_mixins: executor: golang steps: - checkout - run: go install github.com/google/go-jsonnet/cmd/jsonnet@latest - run: go install github.com/google/go-jsonnet/cmd/jsonnetfmt@latest - run: go install github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb@latest - run: make promtool - run: make -C docs/node-mixin clean - run: make -C docs/node-mixin jb_install - run: make -C docs/node-mixin - run: git diff --exit-code build: machine: image: ubuntu-2204:current parallelism: 3 steps: - prometheus/setup_environment - run: docker run --privileged linuxkit/binfmt:af88a591f9cc896a52ce596b9cf7ca26a061ef97 - run: promu crossbuild -v --parallelism $CIRCLE_NODE_TOTAL --parallelism-thread $CIRCLE_NODE_INDEX - run: promu --config .promu-cgo.yml crossbuild -v --parallelism $CIRCLE_NODE_TOTAL --parallelism-thread $CIRCLE_NODE_INDEX - persist_to_workspace: root: . paths: - .build - store_artifacts: path: .build destination: /build test_docker: machine: image: ubuntu-2204:current environment: DOCKER_TEST_IMAGE_NAME: quay.io/prometheus/golang-builder:1.18-base REPO_PATH: github.com/prometheus/node_exporter steps: - prometheus/setup_environment - attach_workspace: at: . - run: command: | if [ -n "$CIRCLE_TAG" ]; then make docker DOCKER_IMAGE_TAG=$CIRCLE_TAG else make docker fi - run: docker images - run: docker run --rm -t -v "$(pwd):/app" "${DOCKER_TEST_IMAGE_NAME}" -i "${REPO_PATH}" -T - run: command: | if [ -n "$CIRCLE_TAG" ]; then make test-docker DOCKER_IMAGE_TAG=$CIRCLE_TAG else make test-docker fi workflows: version: 2 node_exporter: jobs: - test: filters: tags: only: /.*/ - test-arm: filters: tags: only: /.*/ - build: filters: tags: only: /.*/ - test_docker: requires: - test - build filters: tags: only: /.*/ - test_mixins: filters: tags: only: /.*/ - prometheus/publish_master: context: org-context requires: - test - build filters: branches: only: master - prometheus/publish_release: context: org-context requires: - test - build filters: tags: only: /^v.*/ branches: ignore: /.*/ node_exporter-1.7.0/.dockerignore000066400000000000000000000001721452426057600170630ustar00rootroot00000000000000.build/ .tarballs/ !.build/linux-amd64 !.build/linux-armv7 !.build/linux-arm64 !.build/linux-ppc64le !.build/linux-s390x node_exporter-1.7.0/.github/000077500000000000000000000000001452426057600157475ustar00rootroot00000000000000node_exporter-1.7.0/.github/ISSUE_TEMPLATE.md000066400000000000000000000022271452426057600204570ustar00rootroot00000000000000 ### Host operating system: output of `uname -a` ### node_exporter version: output of `node_exporter --version` ### node_exporter command line flags ### node_exporter log output ### Are you running node_exporter in Docker? ### What did you do that produced an error? ### What did you expect to see? ### What did you see instead? node_exporter-1.7.0/.github/dependabot.yml000066400000000000000000000001561452426057600206010ustar00rootroot00000000000000version: 2 updates: - package-ecosystem: "gomod" directory: "/" schedule: interval: "monthly" node_exporter-1.7.0/.github/workflows/000077500000000000000000000000001452426057600200045ustar00rootroot00000000000000node_exporter-1.7.0/.github/workflows/golangci-lint.yml000066400000000000000000000017161452426057600232630ustar00rootroot00000000000000--- # This action is synced from https://github.com/prometheus/prometheus name: golangci-lint on: push: paths: - "go.sum" - "go.mod" - "**.go" - "scripts/errcheck_excludes.txt" - ".github/workflows/golangci-lint.yml" - ".golangci.yml" pull_request: jobs: golangci: name: lint runs-on: ubuntu-latest steps: - name: Checkout repository uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: install Go uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0 with: go-version: 1.20.x - name: Install snmp_exporter/generator dependencies run: sudo apt-get update && sudo apt-get -y install libsnmp-dev if: github.repository == 'prometheus/snmp_exporter' - name: Lint uses: golangci/golangci-lint-action@3a919529898de77ec3da873e3063ca4b10e7f5cc # v3.7.0 with: version: v1.54.2 node_exporter-1.7.0/.gitignore000066400000000000000000000006701452426057600164020ustar00rootroot00000000000000# Compiled Object files, Static and Dynamic libs (Shared Objects) *.o *.a *.so # Folders _obj _test # Architecture specific extensions/prefixes *.[568vq] [568vq].out *.cgo1.go *.cgo2.c _cgo_defun.c _cgo_gotypes.go _cgo_export.* _testmain.go *.exe dependencies-stamp /node_exporter /.build /.deps /.release /.tarballs # Intellij /.idea *.iml # Test files extracted from ttar collector/fixtures/sys/ collector/fixtures/udev/ /vendor node_exporter-1.7.0/.golangci.yml000066400000000000000000000015741452426057600170020ustar00rootroot00000000000000linters: enable: - misspell - revive disable: # Disable soon to deprecated[1] linters that lead to false # positives when build tags disable certain files[2] # 1: https://github.com/golangci/golangci-lint/issues/1841 # 2: https://github.com/prometheus/node_exporter/issues/1545 - deadcode - unused - structcheck - varcheck issues: exclude-rules: - path: _test.go linters: - errcheck linters-settings: errcheck: exclude-functions: # Used in HTTP handlers, any error is handled by the server itself. - (net/http.ResponseWriter).Write # Never check for logger errors. - (github.com/go-kit/log.Logger).Log revive: rules: # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unused-parameter - name: unused-parameter severity: warning disabled: true node_exporter-1.7.0/.promu-cgo.yml000066400000000000000000000015371452426057600171260ustar00rootroot00000000000000go: # Whenever the Go version is updated here, .circle/config.yml and # .promu.yml should also be updated. version: 1.21 cgo: true repository: path: github.com/prometheus/node_exporter build: binaries: - name: node_exporter flags: -a -tags 'netgo osusergo static_build' ldflags: | -X github.com/prometheus/common/version.Version={{.Version}} -X github.com/prometheus/common/version.Revision={{.Revision}} -X github.com/prometheus/common/version.Branch={{.Branch}} -X github.com/prometheus/common/version.BuildUser={{user}}@{{host}} -X github.com/prometheus/common/version.BuildDate={{date "20060102-15:04:05"}} tarball: files: - LICENSE - NOTICE crossbuild: platforms: - darwin/amd64 - darwin/arm64 - netbsd/amd64 - netbsd/386 node_exporter-1.7.0/.promu.yml000066400000000000000000000014431452426057600163540ustar00rootroot00000000000000go: # Whenever the Go version is updated here, .circle/config.yml and # .promu-cgo.yml should also be updated. version: 1.21 repository: path: github.com/prometheus/node_exporter build: binaries: - name: node_exporter flags: -a -tags 'netgo osusergo static_build' ldflags: | -X github.com/prometheus/common/version.Version={{.Version}} -X github.com/prometheus/common/version.Revision={{.Revision}} -X github.com/prometheus/common/version.Branch={{.Branch}} -X github.com/prometheus/common/version.BuildUser={{user}}@{{host}} -X github.com/prometheus/common/version.BuildDate={{date "20060102-15:04:05"}} tarball: files: - LICENSE - NOTICE crossbuild: platforms: - linux - openbsd/amd64 node_exporter-1.7.0/.yamllint000066400000000000000000000006631452426057600162460ustar00rootroot00000000000000--- extends: default rules: braces: max-spaces-inside: 1 level: error brackets: max-spaces-inside: 1 level: error commas: disable comments: disable comments-indentation: disable document-start: disable indentation: spaces: consistent indent-sequences: consistent key-duplicates: ignore: | config/testdata/section_key_dup.bad.yml line-length: disable truthy: check-keys: false node_exporter-1.7.0/CHANGELOG.md000066400000000000000000001033221452426057600162210ustar00rootroot00000000000000## master / unreleased * [CHANGE] * [FEATURE] * [ENHANCEMENT] * [BUGFIX] ## 1.7.0 / 2023-11-11 * [FEATURE] Add ZFS freebsd per dataset stats #2753 * [FEATURE] Add cpu vulnerabilities reporting from sysfs #2721 * [ENHANCEMENT] Parallelize stat calls in Linux filesystem collector #1772 * [ENHANCEMENT] Add missing linkspeeds to ethtool collector 2711 * [ENHANCEMENT] Add CPU MHz as the value for `node_cpu_info` metric #2778 * [ENHANCEMENT] Improve qdisc collector performance #2779 * [ENHANCEMENT] Add include and exclude filter for hwmon collector #2699 * [ENHANCEMENT] Optionally fetch ARP stats via rtnetlink instead of procfs #2777 * [BUFFIX] Fix ZFS arcstats on FreeBSD 14.0+ 2754 * [BUGFIX] Fallback to 32-bit stats in netdev #2757 * [BUGFIX] Close btrfs.FS handle after use #2780 * [BUGFIX] Move RO status before error return #2807 * [BUFFIX] Fix `promhttp_metric_handler_errors_total` being always active #2808 * [BUGFIX] Fix nfsd v4 index miss #2824 ## 1.6.1 / 2023-06-17 Rebuild with latest Go compiler bugfix release. ## 1.6.0 / 2023-05-27 * [CHANGE] Fix cpustat when some cpus are offline #2318 * [CHANGE] Remove metrics of offline CPUs in CPU collector #2605 * [CHANGE] Deprecate ntp collector #2603 * [CHANGE] Remove bcache `cache_readaheads_totals` metrics #2583 * [CHANGE] Deprecate supervisord collector #2685 * [FEATURE] Enable uname collector on NetBSD #2559 * [FEATURE] NetBSD support for the meminfo collector #2570 * [FEATURE] NetBSD support for CPU collector #2626 * [FEATURE] Add FreeBSD collector for netisr subsystem #2668 * [FEATURE] Add softirqs collector #2669 * [ENHANCEMENT] Add suspended as a `node_zfs_zpool_state` #2449 * [ENHANCEMENT] Add administrative state of Linux network interfaces #2515 * [ENHANCEMENT] Log current value of GOMAXPROCS #2537 * [ENHANCEMENT] Add profiler options for perf collector #2542 * [ENHANCEMENT] Allow root path as metrics path #2590 * [ENHANCEMENT] Add cpu frequency governor metrics #2569 * [ENHANCEMENT] Add new landing page #2622 * [ENHANCEMENT] Reduce privileges needed for btrfs device stats #2634 * [ENHANCEMENT] Add ZFS `memory_available_bytes` #2687 * [ENHANCEMENT] Use `SCSI_IDENT_SERIAL` as serial in diskstats #2612 * [ENHANCEMENT] Read missing from netlink netclass attributes from sysfs #2669 * [BUGFIX] perf: fixes for automatically detecting the correct tracefs mountpoints #2553 * [BUGFIX] Fix `thermal_zone` collector noise #2554 * [BUGFIX] Fix a problem fetching the user wire count on FreeBSD #2584 * [BUGFIX] interrupts: Fix fields on linux aarch64 #2631 * [BUGFIX] Remove metrics of offline CPUs in CPU collector #2605 * [BUGFIX] Fix OpenBSD filesystem collector string parsing #2637 * [BUGFIX] Fix bad reporting of `node_cpu_seconds_total` in OpenBSD #2663 ## 1.5.0 / 2022-11-29 NOTE: This changes the Go runtime "GOMAXPROCS" to 1. This is done to limit the concurrency of the exporter to 1 CPU thread at a time in order to avoid a race condition problem in the Linux kernel (#2500) and parallel IO issues on nodes with high numbers of CPUs/CPU threads (#1880). NOTE: A command line arg has been changed from `--web.config` to `--web.config.file`. * [CHANGE] Default GOMAXPROCS to 1 #2530 * [FEATURE] Add multiple listeners and systemd socket listener activation #2393 * [ENHANCEMENT] Add RTNL version of netclass collector #2492, #2528 * [BUGFIX] Fix diskstats exclude flags #2487 * [BUGFIX] Bump go/x/crypt and go/x/net #2488 * [BUGFIX] Fix hwmon label sanitizer #2504 * [BUGFIX] Use native endianness when encoding InetDiagMsg #2508 * [BUGFIX] Fix btrfs device stats always being zero #2516 * [BUGFIX] Security: Update exporter-toolkit (CVE-2022-46146) #2531 ## 1.4.1 / 2022-11-29 * [BUGFIX] Fix diskstats exclude flags #2487 * [BUGFIX] Security: Update go/x/crypto and go/x/net (CVE-2022-27191 CVE-2022-27664) #2488 * [BUGFIX] Security: Update exporter-toolkit (CVE-2022-46146) #2531 ## 1.4.0 / 2022-09-24 * [CHANGE] Merge metrics descriptions in textfile collector #2475 * [FEATURE] [node-mixin] Add darwin dashboard to mixin #2351 * [FEATURE] Add "isolated" metric on cpu collector on linux #2251 * [FEATURE] Add cgroup summary collector #2408 * [FEATURE] Add selinux collector #2205 * [FEATURE] Add slab info collector #2376 * [FEATURE] Add sysctl collector #2425 * [FEATURE] Also track the CPU Spin time for OpenBSD systems #1971 * [FEATURE] Add support for MacOS version #2471 * [ENHANCEMENT] [node-mixin] Add missing selectors #2426 * [ENHANCEMENT] [node-mixin] Change current datasource to grafana's default #2281 * [ENHANCEMENT] [node-mixin] Change disk graph to disk table #2364 * [ENHANCEMENT] [node-mixin] Change io time units to %util #2375 * [ENHANCEMENT] Ad user_wired_bytes and laundry_bytes on *bsd #2266 * [ENHANCEMENT] Add additional vm_stat memory metrics for darwin #2240 * [ENHANCEMENT] Add device filter flags to arp collector #2254 * [ENHANCEMENT] Add diskstats include and exclude device flags #2417 * [ENHANCEMENT] Add node_softirqs_total metric #2221 * [ENHANCEMENT] Add rapl zone name label option #2401 * [ENHANCEMENT] Add slabinfo collector #1799 * [ENHANCEMENT] Allow user to select port on NTP server to query #2270 * [ENHANCEMENT] collector/diskstats: Add labels and metrics from udev #2404 * [ENHANCEMENT] Enable builds against older macOS SDK #2327 * [ENHANCEMENT] qdisk-linux: Add exclude and include flags for interface name #2432 * [ENHANCEMENT] systemd: Expose systemd minor version #2282 * [ENHANCEMENT] Use netlink for tcpstat collector #2322 * [ENHANCEMENT] Use netlink to get netdev stats #2074 * [ENHANCEMENT] Add additional perf counters for stalled frontend/backend cycles #2191 * [ENHANCEMENT] Add btrfs device error stats #2193 * [BUGFIX] [node-mixin] Fix fsSpaceAvailableCriticalThreshold and fsSpaceAvailableWarning #2352 * [BUGFIX] Fix concurrency issue in ethtool collector #2289 * [BUGFIX] Fix concurrency issue in netdev collector #2267 * [BUGFIX] Fix diskstat reads and write metrics for disks with different sector sizes #2311 * [BUGFIX] Fix iostat on macos broken by deprecation warning #2292 * [BUGFIX] Fix NodeFileDescriptorLimit alerts #2340 * [BUGFIX] Sanitize rapl zone names #2299 * [BUGFIX] Add file descriptor close safely in test #2447 * [BUGFIX] Fix race condition in os_release.go #2454 * [BUGFIX] Skip ZFS IO metrics if their paths are missing #2451 ## 1.3.1 / 2021-12-01 * [BUGFIX] Handle nil CPU thermal power status on M1 #2218 * [BUGFIX] bsd: Ignore filesystems flagged as MNT_IGNORE. #2227 * [BUGFIX] Sanitize UTF-8 in dmi collector #2229 ## 1.3.0 / 2021-10-20 NOTE: In order to support globs in the textfile collector path, filenames exposed by `node_textfile_mtime_seconds` now contain the full path name. * [CHANGE] Add path label to rapl collector #2146 * [CHANGE] Exclude filesystems under /run/credentials #2157 * [CHANGE] Add TCPTimeouts to netstat default filter #2189 * [FEATURE] Add lnstat collector for metrics from /proc/net/stat/ #1771 * [FEATURE] Add darwin powersupply collector #1777 * [FEATURE] Add support for monitoring GPUs on Linux #1998 * [FEATURE] Add Darwin thermal collector #2032 * [FEATURE] Add os release collector #2094 * [FEATURE] Add netdev.address-info collector #2105 * [FEATURE] Add clocksource metrics to time collector #2197 * [ENHANCEMENT] Support glob textfile collector directories #1985 * [ENHANCEMENT] ethtool: Expose node_ethtool_info metric #2080 * [ENHANCEMENT] Use include/exclude flags for ethtool filtering #2165 * [ENHANCEMENT] Add flag to disable guest CPU metrics #2123 * [ENHANCEMENT] Add DMI collector #2131 * [ENHANCEMENT] Add threads metrics to processes collector #2164 * [ENHANCEMENT] Reduce timer GC delays in the Linux filesystem collector #2169 * [ENHANCEMENT] Add TCPTimeouts to netstat default filter #2189 * [ENHANCEMENT] Use SysctlTimeval for boottime collector on BSD #2208 * [BUGFIX] ethtool: Sanitize metric names #2093 * [BUGFIX] Fix ethtool collector for multiple interfaces #2126 * [BUGFIX] Fix possible panic on macOS #2133 * [BUGFIX] Collect flag_info and bug_info only for one core #2156 * [BUGFIX] Prevent duplicate ethtool metric names #2187 ## 1.2.2 / 2021-08-06 * [BUGFIX] Fix processes collector long int parsing #2112 ## 1.2.1 / 2021-07-23 * [BUGFIX] Fix zoneinfo parsing prometheus/procfs#386 * [BUGFIX] Fix nvme collector log noise #2091 * [BUGFIX] Fix rapl collector log noise #2092 ## 1.2.0 / 2021-07-15 NOTE: Ignoring invalid network speed will be the default in 2.x NOTE: Filesystem collector flags have been renamed. `--collector.filesystem.ignored-mount-points` is now `--collector.filesystem.mount-points-exclude` and `--collector.filesystem.ignored-fs-types` is now `--collector.filesystem.fs-types-exclude`. The old flags will be removed in 2.x. * [CHANGE] Rename filesystem collector flags to match other collectors #2012 * [CHANGE] Make node_exporter print usage to STDOUT #2039 * [FEATURE] Add conntrack statistics metrics #1155 * [FEATURE] Add ethtool stats collector #1832 * [FEATURE] Add flag to ignore network speed if it is unknown #1989 * [FEATURE] Add tapestats collector for Linux #2044 * [FEATURE] Add nvme collector #2062 * [ENHANCEMENT] Add ErrorLog plumbing to promhttp #1887 * [ENHANCEMENT] Add more Infiniband counters #2019 * [ENHANCEMENT] netclass: retrieve interface names and filter before parsing #2033 * [ENHANCEMENT] Add time zone offset metric #2060 * [BUGFIX] Handle errors from disabled PSI subsystem #1983 * [BUGFIX] Fix panic when using backwards compatible flags #2000 * [BUGFIX] Fix wrong value for OpenBSD memory buffer cache #2015 * [BUGFIX] Only initiate collectors once #2048 * [BUGFIX] Handle small backwards jumps in CPU idle #2067 ## 1.1.2 / 2021-03-05 * [BUGFIX] Handle errors from disabled PSI subsystem #1983 * [BUGFIX] Sanitize strings from /sys/class/power_supply #1984 * [BUGFIX] Silence missing netclass errors #1986 ## 1.1.1 / 2021-02-12 * [BUGFIX] Fix ineffassign issue #1957 * [BUGFIX] Fix some noisy log lines #1962 ## 1.1.0 / 2021-02-05 NOTE: We have improved some of the flag naming conventions (PR #1743). The old names are deprecated and will be removed in 2.0. They will continue to work for backwards compatibility. * [CHANGE] Improve filter flag names #1743 * [CHANGE] Add btrfs and powersupplyclass to list of exporters enabled by default #1897 * [FEATURE] Add fibre channel collector #1786 * [FEATURE] Expose cpu bugs and flags as info metrics. #1788 * [FEATURE] Add network_route collector #1811 * [FEATURE] Add zoneinfo collector #1922 * [ENHANCEMENT] Add more InfiniBand counters #1694 * [ENHANCEMENT] Add flag to aggr ipvs metrics to avoid high cardinality metrics #1709 * [ENHANCEMENT] Adding backlog/current queue length to qdisc collector #1732 * [ENHANCEMENT] Include TCP OutRsts in netstat metrics #1733 * [ENHANCEMENT] Add pool size to entropy collector #1753 * [ENHANCEMENT] Remove CGO dependencies for OpenBSD amd64 #1774 * [ENHANCEMENT] bcache: add writeback_rate_debug stats #1658 * [ENHANCEMENT] Add check state for mdadm arrays via node_md_state metric #1810 * [ENHANCEMENT] Expose XFS inode statistics #1870 * [ENHANCEMENT] Expose zfs zpool state #1878 * [ENHANCEMENT] Added an ability to pass collector.supervisord.url via SUPERVISORD_URL environment variable #1947 * [BUGFIX] filesystem_freebsd: Fix label values #1728 * [BUGFIX] Fix various procfs parsing errors #1735 * [BUGFIX] Handle no data from powersupplyclass #1747 * [BUGFIX] udp_queues_linux.go: change upd to udp in two error strings #1769 * [BUGFIX] Fix node_scrape_collector_success behaviour #1816 * [BUGFIX] Fix NodeRAIDDegraded to not use a string rule expressions #1827 * [BUGFIX] Fix node_md_disks state label from fail to failed #1862 * [BUGFIX] Handle EPERM for syscall in timex collector #1938 * [BUGFIX] bcache: fix typo in a metric name #1943 * [BUGFIX] Fix XFS read/write stats (https://github.com/prometheus/procfs/pull/343) ## 1.0.1 / 2020-06-15 * [BUGFIX] filesystem_freebsd: Fix label values #1728 * [BUGFIX] Update prometheus/procfs to fix log noise #1735 * [BUGFIX] Fix build tags for collectors #1745 * [BUGFIX] Handle no data from powersupplyclass #1747, #1749 ## 1.0.0 / 2020-05-25 ### **Breaking changes** * The netdev collector CLI argument `--collector.netdev.ignored-devices` was renamed to `--collector.netdev.device-blacklist` in order to conform with the systemd collector. #1279 * The label named `state` on `node_systemd_service_restart_total` metrics was changed to `name` to better describe the metric. #1393 * Refactoring of the mdadm collector changes several metrics - `node_md_disks_active` is removed - `node_md_disks` now has a `state` label for "failed", "spare", "active" disks. - `node_md_is_active` is replaced by `node_md_state` with a state set of "active", "inactive", "recovering", "resync". * Additional label `mountaddr` added to NFS device metrics to distinguish mounts from the same URL, but different IP addresses. #1417 * Metrics node_cpu_scaling_frequency_min_hrts and node_cpu_scaling_frequency_max_hrts of the cpufreq collector were renamed to node_cpu_scaling_frequency_min_hertz and node_cpu_scaling_frequency_max_hertz. #1510 * Collectors that are enabled, but are unable to find data to collect, now return 0 for `node_scrape_collector_success`. ### Changes * [CHANGE] Add `--collector.netdev.device-whitelist`. #1279 * [CHANGE] Ignore iso9600 filesystem on Linux #1355 * [CHANGE] Refactor mdadm collector #1403 * [CHANGE] Add `mountaddr` label to NFS metrics. #1417 * [CHANGE] Don't count empty collectors as success. #1613 * [FEATURE] New flag to disable default collectors #1276 * [FEATURE] Add experimental TLS support #1277, #1687, #1695 * [FEATURE] Add collector for Power Supply Class #1280 * [FEATURE] Add new schedstat collector #1389 * [FEATURE] Add FreeBSD zfs support #1394 * [FEATURE] Add uname support for Darwin and OpenBSD #1433 * [FEATURE] Add new metric node_cpu_info #1489 * [FEATURE] Add new thermal_zone collector #1425 * [FEATURE] Add new cooling_device metrics to thermal zone collector #1445 * [FEATURE] Add swap usage on darwin #1508 * [FEATURE] Add Btrfs collector #1512 * [FEATURE] Add RAPL collector #1523 * [FEATURE] Add new softnet collector #1576 * [FEATURE] Add new udp_queues collector #1503 * [FEATURE] Add basic authentication #1673 * [ENHANCEMENT] Log pid when there is a problem reading the process stats #1341 * [ENHANCEMENT] Collect InfiniBand port state and physical state #1357 * [ENHANCEMENT] Include additional XFS runtime statistics. #1423 * [ENHANCEMENT] Report non-fatal collection errors in the exporter metric. #1439 * [ENHANCEMENT] Expose IPVS firewall mark as a label #1455 * [ENHANCEMENT] Add check for systemd version before attempting to query certain metrics. #1413 * [ENHANCEMENT] Add a flag to adjust mount timeout #1486 * [ENHANCEMENT] Add new counters for flush requests in Linux 5.5 #1548 * [ENHANCEMENT] Add metrics and tests for UDP receive and send buffer errors #1534 * [ENHANCEMENT] The sockstat collector now exposes IPv6 statistics in addition to the existing IPv4 support. #1552 * [ENHANCEMENT] Add infiniband info metric #1563 * [ENHANCEMENT] Add unix socket support for supervisord collector #1592 * [ENHANCEMENT] Implement loadavg on all BSDs without cgo #1584 * [ENHANCEMENT] Add model_name and stepping to node_cpu_info metric #1617 * [ENHANCEMENT] Add `--collector.perf.cpus` to allow setting the CPU list for perf stats. #1561 * [ENHANCEMENT] Add metrics for IO errors and retires on Darwin. #1636 * [ENHANCEMENT] Add perf tracepoint collection flag #1664 * [ENHANCEMENT] ZFS: read contents of objset file #1632 * [ENHANCEMENT] Linux CPU: Cache CPU metrics to make them monotonically increasing #1711 * [BUGFIX] Read /proc/net files with a single read syscall #1380 * [BUGFIX] Renamed label `state` to `name` on `node_systemd_service_restart_total`. #1393 * [BUGFIX] Fix netdev nil reference on Darwin #1414 * [BUGFIX] Strip path.rootfs from mountpoint labels #1421 * [BUGFIX] Fix seconds reported by schedstat #1426 * [BUGFIX] Fix empty string in path.rootfs #1464 * [BUGFIX] Fix typo in cpufreq metric names #1510 * [BUGFIX] Read /proc/stat in one syscall #1538 * [BUGFIX] Fix OpenBSD cache memory information #1542 * [BUGFIX] Refactor textfile collector to avoid looping defer #1549 * [BUGFIX] Fix network speed math #1580 * [BUGFIX] collector/systemd: use regexp to extract systemd version #1647 * [BUGFIX] Fix initialization in perf collector when using multiple CPUs #1665 * [BUGFIX] Fix accidentally empty lines in meminfo_linux #1671 ## 0.18.1 / 2019-06-04 ### Changes * [BUGFIX] Fix incorrect sysctl call in BSD meminfo collector, resulting in broken swap metrics on FreeBSD #1345 * [BUGFIX] Fix rollover bug in mountstats collector #1364 ## 0.18.0 / 2019-05-09 ### **Breaking changes** * Renamed `interface` label to `device` in netclass collector for consistency with other network metrics #1224 * The cpufreq metrics now separate the `cpufreq` and `scaling` data based on what the driver provides. #1248 * The labels for the network_up metric have changed, see issue #1236 * Bonding collector now uses `mii_status` instead of `operstatus` #1124 * Several systemd metrics have been turned off by default to improve performance #1254 These include unit_tasks_current, unit_tasks_max, service_restart_total, and unit_start_time_seconds * The systemd collector blacklist now includes automount, device, mount, and slice units by default. #1255 ### Changes * [CHANGE] Bonding state uses mii_status #1124 * [CHANGE] Add a limit to the number of in-flight requests #1166 * [CHANGE] Renamed `interface` label to `device` in netclass collector #1224 * [CHANGE] Add separate cpufreq and scaling metrics #1248 * [CHANGE] Several systemd metrics have been turned off by default to improve performance #1254 * [CHANGE] Expand systemd collector blacklist #1255 * [CHANGE] Split cpufreq metrics into a separate collector #1253 * [FEATURE] Add a flag to disable exporter metrics #1148 * [FEATURE] Add kstat-based Solaris metrics for boottime, cpu and zfs collectors #1197 * [FEATURE] Add uname collector for FreeBSD #1239 * [FEATURE] Add diskstats collector for OpenBSD #1250 * [FEATURE] Add pressure collector exposing pressure stall information for Linux #1174 * [FEATURE] Add perf exporter for Linux #1274 * [ENHANCEMENT] Add Infiniband counters #1120 * [ENHANCEMENT] Add TCPSynRetrans to netstat default filter #1143 * [ENHANCEMENT] Move network_up labels into new metric network_info #1236 * [ENHANCEMENT] Use 64-bit counters for Darwin netstat * [BUGFIX] Add fallback for missing /proc/1/mounts #1172 * [BUGFIX] Fix node_textfile_mtime_seconds to work properly on symlinks #1326 ## 0.17.0 / 2018-11-30 Build note: Linux builds can now be built without CGO. ### **Breaking changes** supvervisord collector reports `start_time_seconds` rather than `uptime` #952 The wifi collector is disabled by default due to suspected caching issues and goroutine leaks. * https://github.com/prometheus/node_exporter/issues/870 * https://github.com/prometheus/node_exporter/issues/1008 Darwin meminfo metrics have been renamed to match Prometheus conventions. #1060 ### Changes * [CHANGE] Use /proc/mounts instead of statfs(2) for ro state #1002 * [CHANGE] Exclude only subdirectories of /var/lib/docker #1003 * [CHANGE] Filter out non-installed units when collecting all systemd units #1011 * [CHANGE] `service_restart_total` and `socket_refused_connections_total` will not be reported if you're running an older version of systemd * [CHANGE] collector/timex: remove cgo dependency #1079 * [CHANGE] filesystem: Ignore Docker netns mounts #1047 * [CHANGE] Ignore additional virtual filesystems #1104 * [FEATURE] Add netclass collector #851 * [FEATURE] Add processes collector #950 * [FEATURE] Collect start time for systemd units #952 * [FEATURE] Add socket unit stats to systemd collector #968 * [FEATURE] Collect NRestarts property for systemd service units #992 * [FEATURE] Collect NRefused property for systemd socket units (available as of systemd v239) #995 * [FEATURE] Allow removal of rootfs prefix for run in docker #1058 * [ENHANCEMENT] Support for octal characters in mountpoints #954 * [ENHANCEMENT] Update wifi stats to support multiple stations #980 * [ENHANCEMENT] Add transmit/receive bytes total for wifi stations #1150 * [ENHANCEMENT] Handle stuck NFS mounts #997 * [ENHANCEMENT] infiniband: Handle iWARP RDMA modules N/A #974 * [ENHANCEMENT] Update diskstats for linux kernel 4.19 #1109 * [ENHANCEMENT] Collect TasksCurrent, TasksMax per systemd unit #1098 * [BUGFIX] Fix FreeBSD CPU temp #965 * [BUGFIX] Fix goroutine leak in supervisord collector #978 * [BUGFIX] Fix mdadm collector issues #985 * [BUGFIX] Fix ntp collector thread safety #1014 * [BUGFIX] Systemd units will not be ignored if you're running older versions of systemd #1039 * [BUGFIX] Handle vanishing PIDs #1043 * [BUGFIX] Correctly cast Darwin memory info #1060 * [BUGFIX] Filter systemd units in Go for compatibility with older versions #1083 * [BUGFIX] Update cpu collector for OpenBSD 6.4 #1094 * [BUGFIX] Fix typo on HELP of `read_time_seconds_total` #1057 * [BUGFIX] collector/diskstats: don't fail if there are extra stats #1125 * [BUGFIX] collector/hwmon\_linux: handle temperature sensor file #1123 * [BUGFIX] collector/filesystem: add bounds check #1133 * [BUGFIX] Fix dragonfly's CPU counting frequency #1140 * [BUGFIX] Add fallback for missing /proc/1/mounts #1172 ## 0.16.0 / 2018-05-15 **Breaking changes** This release contains major breaking changes to metric names. Many metrics have new names, labels, and label values in order to conform to current naming conventions. * Linux node_cpu metrics now break out `guest` values into separate metrics. See Issue #737 * Many counter metrics have been renamed to include `_total`. * Many metrics have been renamed/modified to include base units, for example `node_cpu` is now `node_cpu_seconds_total`. In order to help with the transition we have an [upgrade guide](docs/V0_16_UPGRADE_GUIDE.md). Other breaking changes: * The megacli collector has been removed, is now replaced by the storcli.py textfile helper. * The gmond collector has been removed. * The textfile collector will now treat timestamps as errors. * [CHANGE] Split out guest cpu metrics on Linux. #744 * [CHANGE] Exclude Linux proc from filesystem type regexp #774 * [CHANGE] Ignore more virtual filesystems #775 * [CHANGE] Remove obsolete megacli collector. #798 * [CHANGE] Ignore /var/lib/docker by default. #814 * [CHANGE] Cleanup NFS metrics #834 * [CHANGE] Only report core throttles per core, not per cpu #836 * [CHANGE] Treat custom textfile metric timestamps as errors #769 * [CHANGE] Use lowercase cpu label name in interrupts #849 * [CHANGE] Enable bonding collector by default. #872 * [CHANGE] Greatly reduce the metrics vmstat returns by default. #874 * [CHANGE] Greatly trim what netstat collector exposes by default #876 * [CHANGE] Drop `exec_` prefix and move `node_boot_time_seconds` from `exec` to new `boottime` collector and enable for Darwin/Dragonfly/FreeBSD/NetBSD/OpenBSD. #839, #901 * [CHANGE] Remove deprecated gmond collector #852 * [CHANGE] align Darwin disk stat names with Linux #930 * [FEATURE] Add `collect[]` parameter #699 * [FEATURE] Add text collector conversion for ipmitool output. #746 * [FEATURE] Add openbsd meminfo #724 * [FEATURE] Add systemd summary metrics #765 * [FEATURE] Add OpenBSD CPU collector #805 * [FEATURE] Add NFS Server metrics collector. #803 * [FEATURE] add sample directory size exporter #789 * [ENHANCEMENT] added Wear_Leveling_Count attribute to smartmon.sh script #707 * [ENHANCEMENT] Simplify Utsname string conversion #716 * [ENHANCEMENT] apt.sh: handle multiple origins in apt-get output #757 * [ENHANCEMENT] Export systemd timers last trigger seconds. #807 * [ENHANCEMENT] updates for zfsonlinux 0.7.5 #779 * [BUGFIX] Fix smartmon.sh textfile script #700 * [BUGFIX] netdev: Change valueType to CounterValue #749 * [BUGFIX] textfile: fix duplicate metrics error #738 * [BUGFIX] Fix panic by updating github.com/ema/qdisc dependency #778 * [BUGFIX] Use uint64 in the ZFS collector #714 * [BUGFIX] multiply page size after float64 coercion to avoid signed integer overflow #780 * [BUGFIX] smartmon: Escape double quotes in device model family #772 * [BUGFIX] Fix log level regression in #533 #815 * [BUGFIX] Correct the ClocksPerSec scaling factor on Darwin #846 * [BUGFIX] Count core throttles per core and per package #871 * [BUGFIX] Fix netdev collector for linux #890 #910 * [BUGFIX] Fix memory corruption when number of filesystems > 16 on FreeBSD #900 * [BUGFIX] Fix parsing of interface aliases in netdev linux #904 ## 0.15.2 / 2017-12-06 * [BUGFIX] cpu: Support processor-less (memory-only) NUMA nodes #734 ## 0.15.1 / 2017-11-07 * [BUGFIX] xfs: expose correct fields, fix metric names #708 * [BUGFIX] Correct buffer_bytes > INT_MAX on BSD/amd64. #712 * [BUGFIX] netstat: return nothing when /proc/net/snmp6 not found #718 * [BUGFIX] Fix off by one in Linux interrupts collector #721 * [BUGFIX] Add and use sysReadFile in hwmon collector #728 ## 0.15.0 / 2017-10-06 **Breaking changes** This release contains major breaking changes to flag handling. * The flag library has been changed, all flags now require double-dashes. (`-foo` becomes `--foo`). * The collector selection flag has been replaced by individual boolean flags. * The `-collector.procfs` and `-collector.sysfs` flags have been renamed to `--path.procfs` and `--path.sysfs` respectively. The `ntp` collector has been replaced with a new NTP-based check that is designed to expose the state of a localhost NTP server rather than provide the offset of the node to a remote NTP server. By default the `ntp` collector is now locked to localhost. This is to avoid accidental spamming of public internet NTP pools. Windows support is now removed, the [wmi_exporter](https://github.com/martinlindhe/wmi_exporter) is recommended as a replacement. * [CHANGE] `node_cpu` metrics moved from `stats` to `cpu` collector on linux (enabled by default). #548 * [CHANGE] Blacklist systemd scope units #534 * [CHANGE] Remove netbsd/arm #551 * [CHANGE] Remove Windows support #549 * [CHANGE] Enable IPVS collector by default #623 * [CHANGE] Switch to kingpin flags #639 * [CHANGE] Replace --collectors.enabled with per-collector flags #640 * [FEATURE] Add ARP collector for Linux #540 * [FEATURE] Add XFS collector for Linux #568, #575 * [FEATURE] Add qdisc collector for Linux #580 * [FEATURE] Add cpufreq stats for Linux #548 * [FEATURE] Add diskstats for Darwin #593 * [FEATURE] Add bcache collector for Linux #597 * [FEATURE] Add parsing /proc/net/snmp6 file for Linux #615 * [FEATURE] Add timex collector for Linux #664 * [ENHANCEMENT] Include overall health status in smartmon.sh example script #546 * [ENHANCEMENT] Include `guest_nice` in CPU collector #554 * [ENHANCEMENT] Add exec_boot_time for freebsd, dragonfly #550 * [ENHANCEMENT] Get full resolution for node_time #555 * [ENHANCEMENT] infiniband: Multiply port data XMIT/RCV metrics by 4 #579 * [ENHANCEMENT] cpu: Metric 'package_throttles_total' is per package. #657 * [BUGFIX] Fix stale device error metrics #533 * [BUGFIX] edac: Fix typo in node_edac_csrow_uncorrectable_errors_total #564 * [BUGFIX] Use int64 throughout the ZFS collector #653 * [BUGFIX] Silently ignore nonexisting bonding_masters file #569 * [BUGFIX] Change raid0 status line regexp for mdadm collector (bug #618) #619 * [BUGFIX] Ignore wifi collector permission errors #646 * [BUGFIX] Always try to return smartmon_device_info metric #663 ## 0.14.0 / 2017-03-21 NOTE: We are deprecating several collectors in this release. * `gmond` - Out of scope. * `megacli` - Requires forking, to be moved to textfile collection. * `ntp` - Out of scope. Breaking changes: * Collector errors are now a separate metric, `node_scrape_collector_success`, not a label on `node_exporter_scrape_duration_seconds` (#516) * [CHANGE] Report collector success/failure as a bool metric, not a label. #516 * [FEATURE] Add loadavg collector for Solaris #311 * [FEATURE] Add StorCli text collector example script #320 * [FEATURE] Add collector for Linux EDAC #324 * [FEATURE] Add text file utility for SMART metrics #354 * [FEATURE] Add a collector for NFS client statistics. #360 * [FEATURE] Add mountstats collector for detailed NFS statistics #367 * [FEATURE] Add a collector for DRBD #365 * [FEATURE] Add cpu collector for darwin #391 * [FEATURE] Add netdev collector for darwin #393 * [FEATURE] Collect CPU temperatures on FreeBSD #397 * [FEATURE] Add ZFS collector #410 * [FEATURE] Add initial wifi collector #413 * [FEATURE] Add NFS event metrics to mountstats collector #415 * [FEATURE] Add an example rules file #422 * [FEATURE] infiniband: Add new collector for InfiniBand statistics #450 * [FEATURE] buddyinfo: Add support for /proc/buddyinfo for linux free memory fragmentation. #454 * [IMPROVEMENT] hwmon: Provide annotation metric to link chip sysfs paths to human-readable chip types #359 * [IMPROVEMENT] Add node_filesystem_device_errors_total metric #374 * [IMPROVEMENT] Add runit service dir flag #375 * [IMPROVEMENT] Improve Docker documentation #376 * [IMPROVEMENT] Ignore autofs filesystems on linux #384 * [IMPROVEMENT] Replace some FreeBSD collectors with pure Go versions #385 * [IMPROVEMENT] Use filename as label, move 'label' to own metric #411 (hwmon) * [BUGFIX] mips64 build fix #361 * [BUGFIX] Update vendoring #372 (to fix #242) * [BUGFIX] Convert remaining collectors to use ConstMetrics #389 * [BUGFIX] Check for errors in netdev scanner #398 * [BUGFIX] Don't leak or race in FreeBSD devstat collector #396 * [BUGFIX] Allow graceful failure in hwmon collector #427 * [BUGFIX] Fix the reporting of active+total disk metrics for inactive raids. #522 ## 0.13.0 / 2016-11-26 NOTE: We have disabled builds of linux/ppc64 and linux/ppc64le due to build bugs. * [FEATURE] Add flag to ignore certain filesystem types (Copy of #217) #241 * [FEATURE] Add NTP stratum to NTP collector. #247 * [FEATURE] Add ignored-units flag for systemd collector #286 * [FEATURE] Compile netdev on dragonfly #314 * [FEATURE] Compile meminfo for dfly #315 * [FEATURE] Add hwmon /sensors support #278 * [FEATURE] Add Linux NUMA "numastat" metrics #249 * [FEATURE] export DragonFlyBSD CPU time #310 * [FEATURE] Dragonfly devstat #323 * [IMPROVEMENT] Use the offset calculation that includes round trip time in the ntp collector #250 * [IMPROVEMENT] Enable `*bsd` collector on darwin #265 * [IMPROVEMENT] Use meminfo_freebsd on darwin as well #266 * [IMPROVEMENT] sockstat: add support for RHE4 #267 * [IMPROVEMENT] Compile fs stats for dfly #302 * [BUGFIX] Add support for raid0 devices in mdadm_linux collector. #253 * [BUGFIX] Close file handler in textfile #263 * [BUGFIX] Ignore partitions on NVME devices by default #268 * [BUGFIX] Fix mdstat tabs parsing #275 * [BUGFIX] Fix mdadm collector for resync=PENDING. #309 * [BUGFIX] mdstat: Fix parsing of RAID0 lines that contain additional attributes. #341 * [BUGFIX] Fix additional mdadm parsing cases #346 ## 0.12.0 / 2016-05-05 * [CHANGE] Remove lastlogin collector. * [CHANGE] Remove -debug.memprofile-file flag. * [CHANGE] Sync BSD filesystem collector labels with Linux. * [CHANGE] Remove HTTP Basic Auth support. * [FEATURE] Add -version flag. * [FEATURE] Add Linux logind collector. * [FEATURE] Add Linux ksmd collector. * [FEATURE] Add Linux memory NUMA collector. * [FEATURE] Add Linux entropy collector. * [FEATURE] Add Linux vmstat collector. * [FEATURE] Add Linux conntrack collector. * [FEATURE] Add systemd collector. * [FEATURE] Add OpenBSD support for filesystem, interrupt and netdev collectors. * [FEATURE] Add supervisord collector. * [FEATURE] Add Linux /proc/mdstat collector. * [FEATURE] Add Linux uname collector. * [FEATURE] Add Linux /proc/sys/fs/file-nr collector. * [FEATURE] Add Linux /proc/net/sockstat collector. * [IMPROVEMENT] Provide statically linked Linux binaries. * [IMPROVEMENT] Remove root requirement for FreeBSD CPU metrics. * [IMPROVEMENT] Add node_exporter build info metric. * [IMPROVEMENT] Add disk bytes read/written metrics on Linux. * [IMPROVEMENT] Add filesystem read-only metric. * [IMPROVEMENT] Use common Prometheus log formatting. * [IMPROVEMENT] Add option to specify NTP protocol version. * [IMPROVEMENT] Handle statfs errors gracefully for individual filesystems. * [IMPROVEMENT] Add load5 and load15 metrics to loadavg collector. * [IMPROVEMENT] Add end-to-end tests. * [IMPROVEMENT] Export FreeBSD CPU metrics to seconds. * [IMPROVEMENT] Add flag to configure sysfs mountpoint. * [IMPROVEMENT] Add flag to configure procfs mountpoint. * [IMPROVEMENT] Add metric for last service state change to runit collector. * [BUGFIX] Fix FreeBSD netdev metrics on 64 bit systems. * [BUGFIX] Fix mdstat for devices in delayed resync state. * [BUGFIX] Fix Linux stat metrics on parallel scrapes. * [BUGFIX] Remove unavailable collectors from defaults. * [BUGFIX] Fix build errors on FreeBSD, OpenBSD, Darwin and Windows. * [BUGFIX] Fix build errors on 386, arm, arm64, ppc64 and ppc64le architectures. * [BUGFIX] Fix export of stale metrics for removed filesystem and network devices. * [BUGFIX] textfile: Fix mtime reporting. * [BUGFIX] megacli: prevent crash when drive temperature is N/A ## 0.11.0 / 2015-07-27 * [FEATURE] Add stats from /proc/net/snmp. * [FEATURE] Add support for FreeBSD. * [FEATURE] Allow netdev devices to be ignored. * [MAINTENANCE] New Dockerfile for unified way to dockerize Prometheus exporters. * [FEATURE] Add device,fstype collection to the filesystem exporter. * [IMPROVEMENT] Make logging of collector executions less verbose. ## 0.10.0 / 2015-06-10 * [CHANGE] Change logging output format and flags. ## 0.9.0 / 2015-05-26 * [BUGFIX] Fix `/proc/net/dev` parsing. * [CLEANUP] Remove the `attributes` collector, use `textfile` instead. * [CLEANUP] Replace last uses of the configuration file with flags. * [IMPROVEMENT] Remove cgo dependency. * [IMPROVEMENT] Sort collector names when printing. * [FEATURE] IPVS stats collector. ## 0.8.1 / 2015-05-17 * [MAINTENANCE] Use the common Prometheus build infrastructure. * [MAINTENANCE] Update former Google Code imports. * [IMPROVEMENT] Log the version at startup. * [FEATURE] TCP stats collector ## 0.8.0 / 2015-03-09 * [CLEANUP] Introduced semantic versioning and changelog. From now on, changes will be reported in this file. node_exporter-1.7.0/CODE_OF_CONDUCT.md000066400000000000000000000002301452426057600172010ustar00rootroot00000000000000# Prometheus Community Code of Conduct Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md). node_exporter-1.7.0/CONTRIBUTING.md000066400000000000000000000044661452426057600166520ustar00rootroot00000000000000# Contributing Prometheus uses GitHub to manage reviews of pull requests. * If you have a trivial fix or improvement, go ahead and create a pull request, addressing (with `@...`) the maintainer of this repository (see [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request. * If you plan to do something more involved, first discuss your ideas on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). This will avoid unnecessary work and surely give you and us a good deal of inspiration. * Relevant coding style guidelines are the [Go Code Review Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) and the _Formatting and style_ section of Peter Bourgon's [Go: Best Practices for Production Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). * Sign your work to certify that your changes were created by yourself or you have the right to submit it under our license. Read https://developercertificate.org/ for all details and append your sign-off to every commit message like this: Signed-off-by: Random J Developer ## Collector Implementation Guidelines The Node Exporter is not a general monitoring agent. Its sole purpose is to expose machine metrics, as oppose to service metrics, with the only exception being the textfile collector. The metrics should not get transformed in a way that is hardware specific and would require maintaining any form of vendor based mappings or conditions. If for example a proc file contains the magic number 42 as some identifier, the Node Exporter should expose it as it is and not keep a mapping in code to make this human readable. Instead, the textfile collector can be used to add a static metric which can be joined with the metrics exposed by the exporter to get human readable identifier. A Collector may only read `/proc` or `/sys` files, use system calls or local sockets to retrieve metrics. It may not require root privileges. Running external commands is not allowed for performance and reliability reasons. Use a dedicated exporter instead or gather the metrics via the textfile collector. The Node Exporter tries to support the most common machine metrics. For more exotic metrics, use the textfile collector or a dedicated Exporter. node_exporter-1.7.0/Dockerfile000066400000000000000000000005161452426057600164030ustar00rootroot00000000000000ARG ARCH="amd64" ARG OS="linux" FROM quay.io/prometheus/busybox-${OS}-${ARCH}:latest LABEL maintainer="The Prometheus Authors " ARG ARCH="amd64" ARG OS="linux" COPY .build/${OS}-${ARCH}/node_exporter /bin/node_exporter EXPOSE 9100 USER nobody ENTRYPOINT [ "/bin/node_exporter" ] node_exporter-1.7.0/LICENSE000066400000000000000000000261351452426057600154230ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. node_exporter-1.7.0/MAINTAINERS.md000066400000000000000000000001501452426057600164770ustar00rootroot00000000000000* Ben Kochie @SuperQ * Johannes 'fish' Ziemke @discordianfish node_exporter-1.7.0/Makefile000066400000000000000000000105601452426057600160510ustar00rootroot00000000000000# Copyright 2015 The Prometheus Authors # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Ensure that 'all' is the default target otherwise it will be the first target from Makefile.common. all:: # Needs to be defined before including Makefile.common to auto-generate targets DOCKER_ARCHS ?= amd64 armv7 arm64 ppc64le s390x include Makefile.common PROMTOOL_VERSION ?= 2.30.0 PROMTOOL_URL ?= https://github.com/prometheus/prometheus/releases/download/v$(PROMTOOL_VERSION)/prometheus-$(PROMTOOL_VERSION).$(GO_BUILD_PLATFORM).tar.gz PROMTOOL ?= $(FIRST_GOPATH)/bin/promtool DOCKER_IMAGE_NAME ?= node-exporter MACH ?= $(shell uname -m) STATICCHECK_IGNORE = ifeq ($(GOHOSTOS), linux) test-e2e := test-e2e else test-e2e := skip-test-e2e endif # Use CGO for non-Linux builds. ifeq ($(GOOS), linux) PROMU_CONF ?= .promu.yml else ifndef GOOS ifeq ($(GOHOSTOS), linux) PROMU_CONF ?= .promu.yml else PROMU_CONF ?= .promu-cgo.yml endif else # Do not use CGO for openbsd/amd64 builds ifeq ($(GOOS), openbsd) ifeq ($(GOARCH), amd64) PROMU_CONF ?= .promu.yml else PROMU_CONF ?= .promu-cgo.yml endif else PROMU_CONF ?= .promu-cgo.yml endif endif endif PROMU := $(FIRST_GOPATH)/bin/promu --config $(PROMU_CONF) e2e-out-64k-page = collector/fixtures/e2e-64k-page-output.txt e2e-out = collector/fixtures/e2e-output.txt ifeq ($(MACH), ppc64le) e2e-out = $(e2e-out-64k-page) endif ifeq ($(MACH), aarch64) e2e-out = $(e2e-out-64k-page) endif # 64bit -> 32bit mapping for cross-checking. At least for amd64/386, the 64bit CPU can execute 32bit code but not the other way around, so we don't support cross-testing upwards. cross-test = skip-test-32bit define goarch_pair ifeq ($$(GOHOSTOS),linux) ifeq ($$(GOHOSTARCH),$1) GOARCH_CROSS = $2 cross-test = test-32bit endif endif endef # By default, "cross" test with ourselves to cover unknown pairings. $(eval $(call goarch_pair,amd64,386)) $(eval $(call goarch_pair,mips64,mips)) $(eval $(call goarch_pair,mips64el,mipsel)) all:: vet checkmetrics checkrules common-all $(cross-test) $(test-e2e) .PHONY: test test: collector/fixtures/sys/.unpacked collector/fixtures/udev/.unpacked @echo ">> running tests" $(GO) test -short $(test-flags) $(pkgs) .PHONY: test-32bit test-32bit: collector/fixtures/sys/.unpacked collector/fixtures/udev/.unpacked @echo ">> running tests in 32-bit mode" @env GOARCH=$(GOARCH_CROSS) $(GO) test $(pkgs) .PHONY: skip-test-32bit skip-test-32bit: @echo ">> SKIP running tests in 32-bit mode: not supported on $(GOHOSTOS)/$(GOHOSTARCH)" %/.unpacked: %.ttar @echo ">> extracting fixtures" if [ -d $(dir $@) ] ; then rm -rf $(dir $@) ; fi ./ttar -C $(dir $*) -x -f $*.ttar touch $@ update_fixtures: rm -vf collector/fixtures/sys/.unpacked ./ttar -C collector/fixtures -c -f collector/fixtures/sys.ttar sys rm -vf collector/fixtures/udev/.unpacked ./ttar -C collector/fixtures -c -f collector/fixtures/udev.ttar udev .PHONY: test-e2e test-e2e: build collector/fixtures/sys/.unpacked collector/fixtures/udev/.unpacked @echo ">> running end-to-end tests" ./end-to-end-test.sh .PHONY: skip-test-e2e skip-test-e2e: @echo ">> SKIP running end-to-end tests on $(GOHOSTOS)" .PHONY: checkmetrics checkmetrics: $(PROMTOOL) @echo ">> checking metrics for correctness" ./checkmetrics.sh $(PROMTOOL) $(e2e-out) ./checkmetrics.sh $(PROMTOOL) $(e2e-out-64k-page) .PHONY: checkrules checkrules: $(PROMTOOL) @echo ">> checking rules for correctness" find . -name "*rules*.yml" | xargs -I {} $(PROMTOOL) check rules {} .PHONY: test-docker test-docker: @echo ">> testing docker image" ./test_image.sh "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-amd64:$(DOCKER_IMAGE_TAG)" 9100 .PHONY: promtool promtool: $(PROMTOOL) $(PROMTOOL): mkdir -p $(FIRST_GOPATH)/bin curl -fsS -L $(PROMTOOL_URL) | tar -xvzf - -C $(FIRST_GOPATH)/bin --strip 1 "prometheus-$(PROMTOOL_VERSION).$(GO_BUILD_PLATFORM)/promtool" node_exporter-1.7.0/Makefile.common000066400000000000000000000216551452426057600173470ustar00rootroot00000000000000# Copyright 2018 The Prometheus Authors # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # A common Makefile that includes rules to be reused in different prometheus projects. # !!! Open PRs only against the prometheus/prometheus/Makefile.common repository! # Example usage : # Create the main Makefile in the root project directory. # include Makefile.common # customTarget: # @echo ">> Running customTarget" # # Ensure GOBIN is not set during build so that promu is installed to the correct path unexport GOBIN GO ?= go GOFMT ?= $(GO)fmt FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) GOOPTS ?= GOHOSTOS ?= $(shell $(GO) env GOHOSTOS) GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH) GO_VERSION ?= $(shell $(GO) version) GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') PROMU := $(FIRST_GOPATH)/bin/promu pkgs = ./... ifeq (arm, $(GOHOSTARCH)) GOHOSTARM ?= $(shell GOARM= $(GO) env GOARM) GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)v$(GOHOSTARM) else GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH) endif GOTEST := $(GO) test GOTEST_DIR := ifneq ($(CIRCLE_JOB),) ifneq ($(shell command -v gotestsum > /dev/null),) GOTEST_DIR := test-results GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml -- endif endif PROMU_VERSION ?= 0.15.0 PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= GOLANGCI_LINT_VERSION ?= v1.54.2 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386)) # If we're in CI and there is an Actions file, that means the linter # is being run in Actions, so we don't need to run it here. ifneq (,$(SKIP_GOLANGCI_LINT)) GOLANGCI_LINT := else ifeq (,$(CIRCLE_JOB)) GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint else ifeq (,$(wildcard .github/workflows/golangci-lint.yml)) GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint endif endif endif PREFIX ?= $(shell pwd) BIN_DIR ?= $(shell pwd) DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) DOCKERFILE_PATH ?= ./Dockerfile DOCKERBUILD_CONTEXT ?= ./ DOCKER_REPO ?= prom DOCKER_ARCHS ?= amd64 BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS)) PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS)) TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS)) SANITIZED_DOCKER_IMAGE_TAG := $(subst +,-,$(DOCKER_IMAGE_TAG)) ifeq ($(GOHOSTARCH),amd64) ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows)) # Only supported on amd64 test-flags := -race endif endif # This rule is used to forward a target like "build" to "common-build". This # allows a new "build" target to be defined in a Makefile which includes this # one and override "common-build" without override warnings. %: common-% ; .PHONY: common-all common-all: precheck style check_license lint yamllint unused build test .PHONY: common-style common-style: @echo ">> checking code style" @fmtRes=$$($(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print)); \ if [ -n "$${fmtRes}" ]; then \ echo "gofmt checking failed!"; echo "$${fmtRes}"; echo; \ echo "Please ensure you are using $$($(GO) version) for formatting code."; \ exit 1; \ fi .PHONY: common-check_license common-check_license: @echo ">> checking license header" @licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \ awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \ done); \ if [ -n "$${licRes}" ]; then \ echo "license header checking failed:"; echo "$${licRes}"; \ exit 1; \ fi .PHONY: common-deps common-deps: @echo ">> getting dependencies" $(GO) mod download .PHONY: update-go-deps update-go-deps: @echo ">> updating Go dependencies" @for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \ $(GO) get -d $$m; \ done $(GO) mod tidy .PHONY: common-test-short common-test-short: $(GOTEST_DIR) @echo ">> running short tests" $(GOTEST) -short $(GOOPTS) $(pkgs) .PHONY: common-test common-test: $(GOTEST_DIR) @echo ">> running all tests" $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs) $(GOTEST_DIR): @mkdir -p $@ .PHONY: common-format common-format: @echo ">> formatting code" $(GO) fmt $(pkgs) .PHONY: common-vet common-vet: @echo ">> vetting code" $(GO) vet $(GOOPTS) $(pkgs) .PHONY: common-lint common-lint: $(GOLANGCI_LINT) ifdef GOLANGCI_LINT @echo ">> running golangci-lint" # 'go list' needs to be executed before staticcheck to prepopulate the modules cache. # Otherwise staticcheck might fail randomly for some reason not yet explained. $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs) endif .PHONY: common-yamllint common-yamllint: @echo ">> running yamllint on all YAML files in the repository" ifeq (, $(shell command -v yamllint > /dev/null)) @echo "yamllint not installed so skipping" else yamllint . endif # For backward-compatibility. .PHONY: common-staticcheck common-staticcheck: lint .PHONY: common-unused common-unused: @echo ">> running check for unused/missing packages in go.mod" $(GO) mod tidy @git diff --exit-code -- go.sum go.mod .PHONY: common-build common-build: promu @echo ">> building binaries" $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES) .PHONY: common-tarball common-tarball: promu @echo ">> building release tarball" $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) .PHONY: common-docker $(BUILD_DOCKER_ARCHS) common-docker: $(BUILD_DOCKER_ARCHS) $(BUILD_DOCKER_ARCHS): common-docker-%: docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \ -f $(DOCKERFILE_PATH) \ --build-arg ARCH="$*" \ --build-arg OS="linux" \ $(DOCKERBUILD_CONTEXT) .PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS) common-docker-publish: $(PUBLISH_DOCKER_ARCHS) $(PUBLISH_DOCKER_ARCHS): common-docker-publish-%: docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION))) .PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS) common-docker-tag-latest: $(TAG_DOCKER_ARCHS) $(TAG_DOCKER_ARCHS): common-docker-tag-latest-%: docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)" .PHONY: common-docker-manifest common-docker-manifest: DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG)) DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" .PHONY: promu promu: $(PROMU) $(PROMU): $(eval PROMU_TMP := $(shell mktemp -d)) curl -s -L $(PROMU_URL) | tar -xvzf - -C $(PROMU_TMP) mkdir -p $(FIRST_GOPATH)/bin cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu rm -r $(PROMU_TMP) .PHONY: proto proto: @echo ">> generating code from proto files" @./scripts/genproto.sh ifdef GOLANGCI_LINT $(GOLANGCI_LINT): mkdir -p $(FIRST_GOPATH)/bin curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/$(GOLANGCI_LINT_VERSION)/install.sh \ | sed -e '/install -d/d' \ | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION) endif .PHONY: precheck precheck:: define PRECHECK_COMMAND_template = precheck:: $(1)_precheck PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1))) .PHONY: $(1)_precheck $(1)_precheck: @if ! $$(PRECHECK_COMMAND_$(1)) 1>/dev/null 2>&1; then \ echo "Execution of '$$(PRECHECK_COMMAND_$(1))' command failed. Is $(1) installed?"; \ exit 1; \ fi endef node_exporter-1.7.0/NOTICE000066400000000000000000000007171452426057600153200ustar00rootroot00000000000000Configurable modular Prometheus exporter for various node metrics. Copyright 2013-2015 The Prometheus Authors This product includes software developed at SoundCloud Ltd. (http://soundcloud.com/). The following components are included in this product: wifi https://github.com/mdlayher/wifi Copyright 2016-2017 Matt Layher Licensed under the MIT License netlink https://github.com/mdlayher/netlink Copyright 2016-2017 Matt Layher Licensed under the MIT License node_exporter-1.7.0/README.md000066400000000000000000000454201452426057600156730ustar00rootroot00000000000000# Node exporter [![CircleCI](https://circleci.com/gh/prometheus/node_exporter/tree/master.svg?style=shield)][circleci] [![Buildkite status](https://badge.buildkite.com/94a0c1fb00b1f46883219c256efe9ce01d63b6505f3a942f9b.svg)](https://buildkite.com/prometheus/node-exporter) [![Docker Repository on Quay](https://quay.io/repository/prometheus/node-exporter/status)][quay] [![Docker Pulls](https://img.shields.io/docker/pulls/prom/node-exporter.svg?maxAge=604800)][hub] [![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/node_exporter)][goreportcard] Prometheus exporter for hardware and OS metrics exposed by \*NIX kernels, written in Go with pluggable metric collectors. The [Windows exporter](https://github.com/prometheus-community/windows_exporter) is recommended for Windows users. To expose NVIDIA GPU metrics, [prometheus-dcgm ](https://github.com/NVIDIA/dcgm-exporter) can be used. ## Installation and Usage If you are new to Prometheus and `node_exporter` there is a [simple step-by-step guide](https://prometheus.io/docs/guides/node-exporter/). The `node_exporter` listens on HTTP port 9100 by default. See the `--help` output for more options. ### Ansible For automated installs with [Ansible](https://www.ansible.com/), there is the [Prometheus Community role](https://github.com/prometheus-community/ansible). ### Docker The `node_exporter` is designed to monitor the host system. It's not recommended to deploy it as a Docker container because it requires access to the host system. For situations where Docker deployment is needed, some extra flags must be used to allow the `node_exporter` access to the host namespaces. Be aware that any non-root mount points you want to monitor will need to be bind-mounted into the container. If you start container for host monitoring, specify `path.rootfs` argument. This argument must match path in bind-mount of host root. The node\_exporter will use `path.rootfs` as prefix to access host filesystem. ```bash docker run -d \ --net="host" \ --pid="host" \ -v "/:/host:ro,rslave" \ quay.io/prometheus/node-exporter:latest \ --path.rootfs=/host ``` For Docker compose, similar flag changes are needed. ```yaml --- version: '3.8' services: node_exporter: image: quay.io/prometheus/node-exporter:latest container_name: node_exporter command: - '--path.rootfs=/host' network_mode: host pid: host restart: unless-stopped volumes: - '/:/host:ro,rslave' ``` On some systems, the `timex` collector requires an additional Docker flag, `--cap-add=SYS_TIME`, in order to access the required syscalls. ## Collectors There is varying support for collectors on each operating system. The tables below list all existing collectors and the supported systems. Collectors are enabled by providing a `--collector.` flag. Collectors that are enabled by default can be disabled by providing a `--no-collector.` flag. To enable only some specific collector(s), use `--collector.disable-defaults --collector. ...`. ### Include & Exclude flags A few collectors can be configured to include or exclude certain patterns using dedicated flags. The exclude flags are used to indicate "all except", while the include flags are used to say "none except". Note that these flags are mutually exclusive on collectors that support both. Example: ```txt --collector.filesystem.mount-points-exclude=^/(dev|proc|sys|var/lib/docker/.+|var/lib/kubelet/.+)($|/) ``` List: Collector | Scope | Include Flag | Exclude Flag --- | --- | --- | --- arp | device | --collector.arp.device-include | --collector.arp.device-exclude cpu | bugs | --collector.cpu.info.bugs-include | N/A cpu | flags | --collector.cpu.info.flags-include | N/A diskstats | device | --collector.diskstats.device-include | --collector.diskstats.device-exclude ethtool | device | --collector.ethtool.device-include | --collector.ethtool.device-exclude ethtool | metrics | --collector.ethtool.metrics-include | N/A filesystem | fs-types | N/A | --collector.filesystem.fs-types-exclude filesystem | mount-points | N/A | --collector.filesystem.mount-points-exclude hwmon | chip | --collector.hwmon.chip-include | --collector.hwmon.chip-exclude netdev | device | --collector.netdev.device-include | --collector.netdev.device-exclude qdisk | device | --collector.qdisk.device-include | --collector.qdisk.device-exclude sysctl | all | --collector.sysctl.include | N/A systemd | unit | --collector.systemd.unit-include | --collector.systemd.unit-exclude ### Enabled by default Name | Description | OS ---------|-------------|---- arp | Exposes ARP statistics from `/proc/net/arp`. | Linux bcache | Exposes bcache statistics from `/sys/fs/bcache/`. | Linux bonding | Exposes the number of configured and active slaves of Linux bonding interfaces. | Linux btrfs | Exposes btrfs statistics | Linux boottime | Exposes system boot time derived from the `kern.boottime` sysctl. | Darwin, Dragonfly, FreeBSD, NetBSD, OpenBSD, Solaris conntrack | Shows conntrack statistics (does nothing if no `/proc/sys/net/netfilter/` present). | Linux cpu | Exposes CPU statistics | Darwin, Dragonfly, FreeBSD, Linux, Solaris, OpenBSD cpufreq | Exposes CPU frequency statistics | Linux, Solaris diskstats | Exposes disk I/O statistics. | Darwin, Linux, OpenBSD dmi | Expose Desktop Management Interface (DMI) info from `/sys/class/dmi/id/` | Linux edac | Exposes error detection and correction statistics. | Linux entropy | Exposes available entropy. | Linux exec | Exposes execution statistics. | Dragonfly, FreeBSD fibrechannel | Exposes fibre channel information and statistics from `/sys/class/fc_host/`. | Linux filefd | Exposes file descriptor statistics from `/proc/sys/fs/file-nr`. | Linux filesystem | Exposes filesystem statistics, such as disk space used. | Darwin, Dragonfly, FreeBSD, Linux, OpenBSD hwmon | Expose hardware monitoring and sensor data from `/sys/class/hwmon/`. | Linux infiniband | Exposes network statistics specific to InfiniBand and Intel OmniPath configurations. | Linux ipvs | Exposes IPVS status from `/proc/net/ip_vs` and stats from `/proc/net/ip_vs_stats`. | Linux loadavg | Exposes load average. | Darwin, Dragonfly, FreeBSD, Linux, NetBSD, OpenBSD, Solaris mdadm | Exposes statistics about devices in `/proc/mdstat` (does nothing if no `/proc/mdstat` present). | Linux meminfo | Exposes memory statistics. | Darwin, Dragonfly, FreeBSD, Linux, OpenBSD netclass | Exposes network interface info from `/sys/class/net/` | Linux netdev | Exposes network interface statistics such as bytes transferred. | Darwin, Dragonfly, FreeBSD, Linux, OpenBSD netisr | Exposes netisr statistics | FreeBSD netstat | Exposes network statistics from `/proc/net/netstat`. This is the same information as `netstat -s`. | Linux nfs | Exposes NFS client statistics from `/proc/net/rpc/nfs`. This is the same information as `nfsstat -c`. | Linux nfsd | Exposes NFS kernel server statistics from `/proc/net/rpc/nfsd`. This is the same information as `nfsstat -s`. | Linux nvme | Exposes NVMe info from `/sys/class/nvme/` | Linux os | Expose OS release info from `/etc/os-release` or `/usr/lib/os-release` | _any_ powersupplyclass | Exposes Power Supply statistics from `/sys/class/power_supply` | Linux pressure | Exposes pressure stall statistics from `/proc/pressure/`. | Linux (kernel 4.20+ and/or [CONFIG\_PSI](https://www.kernel.org/doc/html/latest/accounting/psi.html)) rapl | Exposes various statistics from `/sys/class/powercap`. | Linux schedstat | Exposes task scheduler statistics from `/proc/schedstat`. | Linux selinux | Exposes SELinux statistics. | Linux sockstat | Exposes various statistics from `/proc/net/sockstat`. | Linux softnet | Exposes statistics from `/proc/net/softnet_stat`. | Linux stat | Exposes various statistics from `/proc/stat`. This includes boot time, forks and interrupts. | Linux tapestats | Exposes statistics from `/sys/class/scsi_tape`. | Linux textfile | Exposes statistics read from local disk. The `--collector.textfile.directory` flag must be set. | _any_ thermal | Exposes thermal statistics like `pmset -g therm`. | Darwin thermal\_zone | Exposes thermal zone & cooling device statistics from `/sys/class/thermal`. | Linux time | Exposes the current system time. | _any_ timex | Exposes selected adjtimex(2) system call stats. | Linux udp_queues | Exposes UDP total lengths of the rx_queue and tx_queue from `/proc/net/udp` and `/proc/net/udp6`. | Linux uname | Exposes system information as provided by the uname system call. | Darwin, FreeBSD, Linux, OpenBSD vmstat | Exposes statistics from `/proc/vmstat`. | Linux xfs | Exposes XFS runtime statistics. | Linux (kernel 4.4+) zfs | Exposes [ZFS](http://open-zfs.org/) performance statistics. | FreeBSD, [Linux](http://zfsonlinux.org/), Solaris ### Disabled by default `node_exporter` also implements a number of collectors that are disabled by default. Reasons for this vary by collector, and may include: * High cardinality * Prolonged runtime that exceeds the Prometheus `scrape_interval` or `scrape_timeout` * Significant resource demands on the host You can enable additional collectors as desired by adding them to your init system's or service supervisor's startup configuration for `node_exporter` but caution is advised. Enable at most one at a time, testing first on a non-production system, then by hand on a single production node. When enabling additional collectors, you should carefully monitor the change by observing the ` scrape_duration_seconds` metric to ensure that collection completes and does not time out. In addition, monitor the `scrape_samples_post_metric_relabeling` metric to see the changes in cardinality. Name | Description | OS ---------|-------------|---- buddyinfo | Exposes statistics of memory fragments as reported by /proc/buddyinfo. | Linux cgroups | A summary of the number of active and enabled cgroups | Linux cpu\_vulnerabilities | Exposes CPU vulnerability information from sysfs. | Linux devstat | Exposes device statistics | Dragonfly, FreeBSD drm | Expose GPU metrics using sysfs / DRM, `amdgpu` is the only driver which exposes this information through DRM | Linux drbd | Exposes Distributed Replicated Block Device statistics (to version 8.4) | Linux ethtool | Exposes network interface information and network driver statistics equivalent to `ethtool`, `ethtool -S`, and `ethtool -i`. | Linux interrupts | Exposes detailed interrupts statistics. | Linux, OpenBSD ksmd | Exposes kernel and system statistics from `/sys/kernel/mm/ksm`. | Linux lnstat | Exposes stats from `/proc/net/stat/`. | Linux logind | Exposes session counts from [logind](http://www.freedesktop.org/wiki/Software/systemd/logind/). | Linux meminfo\_numa | Exposes memory statistics from `/sys/devices/system/node/node[0-9]*/meminfo`, `/sys/devices/system/node/node[0-9]*/numastat`. | Linux mountstats | Exposes filesystem statistics from `/proc/self/mountstats`. Exposes detailed NFS client statistics. | Linux network_route | Exposes the routing table as metrics | Linux perf | Exposes perf based metrics (Warning: Metrics are dependent on kernel configuration and settings). | Linux processes | Exposes aggregate process statistics from `/proc`. | Linux qdisc | Exposes [queuing discipline](https://en.wikipedia.org/wiki/Network_scheduler#Linux_kernel) statistics | Linux slabinfo | Exposes slab statistics from `/proc/slabinfo`. Note that permission of `/proc/slabinfo` is usually 0400, so set it appropriately. | Linux softirqs | Exposes detailed softirq statistics from `/proc/softirqs`. | Linux sysctl | Expose sysctl values from `/proc/sys`. Use `--collector.sysctl.include(-info)` to configure. | Linux systemd | Exposes service and system status from [systemd](http://www.freedesktop.org/wiki/Software/systemd/). | Linux tcpstat | Exposes TCP connection status information from `/proc/net/tcp` and `/proc/net/tcp6`. (Warning: the current version has potential performance issues in high load situations.) | Linux wifi | Exposes WiFi device and station statistics. | Linux zoneinfo | Exposes NUMA memory zone metrics. | Linux ### Deprecated These collectors are deprecated and will be removed in the next major release. Name | Description | OS ---------|-------------|---- ntp | Exposes local NTP daemon health to check [time](./docs/TIME.md) | _any_ runit | Exposes service status from [runit](http://smarden.org/runit/). | _any_ supervisord | Exposes service status from [supervisord](http://supervisord.org/). | _any_ ### Perf Collector The `perf` collector may not work out of the box on some Linux systems due to kernel configuration and security settings. To allow access, set the following `sysctl` parameter: ``` sysctl -w kernel.perf_event_paranoid=X ``` - 2 allow only user-space measurements (default since Linux 4.6). - 1 allow both kernel and user measurements (default before Linux 4.6). - 0 allow access to CPU-specific data but not raw tracepoint samples. - -1 no restrictions. Depending on the configured value different metrics will be available, for most cases `0` will provide the most complete set. For more information see [`man 2 perf_event_open`](http://man7.org/linux/man-pages/man2/perf_event_open.2.html). By default, the `perf` collector will only collect metrics of the CPUs that `node_exporter` is running on (ie [`runtime.NumCPU`](https://golang.org/pkg/runtime/#NumCPU). If this is insufficient (e.g. if you run `node_exporter` with its CPU affinity set to specific CPUs), you can specify a list of alternate CPUs by using the `--collector.perf.cpus` flag. For example, to collect metrics on CPUs 2-6, you would specify: `--collector.perf --collector.perf.cpus=2-6`. The CPU configuration is zero indexed and can also take a stride value; e.g. `--collector.perf --collector.perf.cpus=1-10:5` would collect on CPUs 1, 5, and 10. The `perf` collector is also able to collect [tracepoint](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html) counts when using the `--collector.perf.tracepoint` flag. Tracepoints can be found using [`perf list`](http://man7.org/linux/man-pages/man1/perf.1.html) or from debugfs. And example usage of this would be `--collector.perf.tracepoint="sched:sched_process_exec"`. ### Sysctl Collector The `sysctl` collector can be enabled with `--collector.sysctl`. It supports exposing numeric sysctl values as metrics using the `--collector.sysctl.include` flag and string values as info metrics by using the `--collector.sysctl.include-info` flag. The flags can be repeated. For sysctl with multiple numeric values, an optional mapping can be given to expose each value as its own metric. Otherwise an `index` label is used to identify the different fields. #### Examples ##### Numeric values ###### Single values Using `--collector.sysctl.include=vm.user_reserve_kbytes`: `vm.user_reserve_kbytes = 131072` -> `node_sysctl_vm_user_reserve_kbytes 131072` ###### Multiple values A sysctl can contain multiple values, for example: ``` net.ipv4.tcp_rmem = 4096 131072 6291456 ``` Using `--collector.sysctl.include=net.ipv4.tcp_rmem` the collector will expose: ``` node_sysctl_net_ipv4_tcp_rmem{index="0"} 4096 node_sysctl_net_ipv4_tcp_rmem{index="1"} 131072 node_sysctl_net_ipv4_tcp_rmem{index="2"} 6291456 ``` If the indexes have defined meaning like in this case, the values can be mapped to multiple metrics by appending the mapping to the --collector.sysctl.include flag: Using `--collector.sysctl.include=net.ipv4.tcp_rmem:min,default,max` the collector will expose: ``` node_sysctl_net_ipv4_tcp_rmem_min 4096 node_sysctl_net_ipv4_tcp_rmem_default 131072 node_sysctl_net_ipv4_tcp_rmem_max 6291456 ``` ##### String values String values need to be exposed as info metric. The user selects them by using the `--collector.sysctl.include-info` flag. ###### Single values `kernel.core_pattern = core` -> `node_sysctl_info{key="kernel.core_pattern_info", value="core"} 1` ###### Multiple values Given the following sysctl: ``` kernel.seccomp.actions_avail = kill_process kill_thread trap errno trace log allow ``` Setting `--collector.sysctl.include-info=kernel.seccomp.actions_avail` will yield: ``` node_sysctl_info{key="kernel.seccomp.actions_avail", index="0", value="kill_process"} 1 node_sysctl_info{key="kernel.seccomp.actions_avail", index="1", value="kill_thread"} 1 ... ``` ### Textfile Collector The `textfile` collector is similar to the [Pushgateway](https://github.com/prometheus/pushgateway), in that it allows exporting of statistics from batch jobs. It can also be used to export static metrics, such as what role a machine has. The Pushgateway should be used for service-level metrics. The `textfile` module is for metrics that are tied to a machine. To use it, set the `--collector.textfile.directory` flag on the `node_exporter` commandline. The collector will parse all files in that directory matching the glob `*.prom` using the [text format](http://prometheus.io/docs/instrumenting/exposition_formats/). **Note:** Timestamps are not supported. To atomically push completion time for a cron job: ``` echo my_batch_job_completion_time $(date +%s) > /path/to/directory/my_batch_job.prom.$$ mv /path/to/directory/my_batch_job.prom.$$ /path/to/directory/my_batch_job.prom ``` To statically set roles for a machine using labels: ``` echo 'role{role="application_server"} 1' > /path/to/directory/role.prom.$$ mv /path/to/directory/role.prom.$$ /path/to/directory/role.prom ``` ### Filtering enabled collectors The `node_exporter` will expose all metrics from enabled collectors by default. This is the recommended way to collect metrics to avoid errors when comparing metrics of different families. For advanced use the `node_exporter` can be passed an optional list of collectors to filter metrics. The `collect[]` parameter may be used multiple times. In Prometheus configuration you can use this syntax under the [scrape config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#). ``` params: collect[]: - foo - bar ``` This can be useful for having different Prometheus servers collect specific metrics from nodes. ## Development building and running Prerequisites: * [Go compiler](https://golang.org/dl/) * RHEL/CentOS: `glibc-static` package. Building: git clone https://github.com/prometheus/node_exporter.git cd node_exporter make build ./node_exporter To see all available configuration flags: ./node_exporter -h ## Running tests make test ## TLS endpoint ** EXPERIMENTAL ** The exporter supports TLS via a new web configuration file. ```console ./node_exporter --web.config.file=web-config.yml ``` See the [exporter-toolkit web-configuration](https://github.com/prometheus/exporter-toolkit/blob/master/docs/web-configuration.md) for more details. [travis]: https://travis-ci.org/prometheus/node_exporter [hub]: https://hub.docker.com/r/prom/node-exporter/ [circleci]: https://circleci.com/gh/prometheus/node_exporter [quay]: https://quay.io/repository/prometheus/node-exporter [goreportcard]: https://goreportcard.com/report/github.com/prometheus/node_exporter node_exporter-1.7.0/SECURITY.md000066400000000000000000000002541452426057600162010ustar00rootroot00000000000000# Reporting a security issue The Prometheus security policy, including how to report vulnerabilities, can be found here: node_exporter-1.7.0/VERSION000066400000000000000000000000061452426057600154530ustar00rootroot000000000000001.7.0 node_exporter-1.7.0/checkmetrics.sh000077500000000000000000000006741452426057600174210ustar00rootroot00000000000000#!/usr/bin/env bash if [[ ( -z "$1" ) || ( -z "$2" ) ]]; then echo "usage: ./checkmetrics.sh /usr/bin/promtool e2e-output.txt" exit 1 fi # Ignore known issues in auto-generated and network specific collectors. lint=$($1 check metrics < "$2" 2>&1 | grep -v -E "^node_(entropy|memory|netstat|wifi_station)_") if [[ -n $lint ]]; then echo -e "Some Prometheus metrics do not follow best practices:\n" echo "$lint" exit 1 fi node_exporter-1.7.0/collector/000077500000000000000000000000001452426057600163755ustar00rootroot00000000000000node_exporter-1.7.0/collector/arp_linux.go000066400000000000000000000075701452426057600207360ustar00rootroot00000000000000// Copyright 2017 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !noarp // +build !noarp package collector import ( "errors" "fmt" "net" "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" "github.com/jsimonetti/rtnetlink" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs" "golang.org/x/sys/unix" ) var ( arpDeviceInclude = kingpin.Flag("collector.arp.device-include", "Regexp of arp devices to include (mutually exclusive to device-exclude).").String() arpDeviceExclude = kingpin.Flag("collector.arp.device-exclude", "Regexp of arp devices to exclude (mutually exclusive to device-include).").String() arpNetlink = kingpin.Flag("collector.arp.netlink", "Use netlink to gather stats instead of /proc/net/arp.").Default("true").Bool() ) type arpCollector struct { fs procfs.FS deviceFilter deviceFilter entries *prometheus.Desc logger log.Logger } func init() { registerCollector("arp", defaultEnabled, NewARPCollector) } // NewARPCollector returns a new Collector exposing ARP stats. func NewARPCollector(logger log.Logger) (Collector, error) { fs, err := procfs.NewFS(*procPath) if err != nil { return nil, fmt.Errorf("failed to open procfs: %w", err) } return &arpCollector{ fs: fs, deviceFilter: newDeviceFilter(*arpDeviceExclude, *arpDeviceInclude), entries: prometheus.NewDesc( prometheus.BuildFQName(namespace, "arp", "entries"), "ARP entries by device", []string{"device"}, nil, ), logger: logger, }, nil } func getTotalArpEntries(deviceEntries []procfs.ARPEntry) map[string]uint32 { entries := make(map[string]uint32) for _, device := range deviceEntries { entries[device.Device]++ } return entries } func getTotalArpEntriesRTNL() (map[string]uint32, error) { conn, err := rtnetlink.Dial(nil) if err != nil { return nil, err } defer conn.Close() neighbors, err := conn.Neigh.List() if err != nil { return nil, err } ifIndexEntries := make(map[uint32]uint32) for _, n := range neighbors { // Neighbors will also contain IPv6 neighbors, but since this is purely an ARP collector, // restrict to AF_INET. Also skip entries which have state NUD_NOARP to conform to output // of /proc/net/arp. if n.Family == unix.AF_INET && n.State&unix.NUD_NOARP == 0 { ifIndexEntries[n.Index]++ } } enumEntries := make(map[string]uint32) // Convert interface indexes to names. for ifIndex, entryCount := range ifIndexEntries { iface, err := net.InterfaceByIndex(int(ifIndex)) if err != nil { if errors.Unwrap(err).Error() == "no such network interface" { continue } return nil, err } enumEntries[iface.Name] = entryCount } return enumEntries, nil } func (c *arpCollector) Update(ch chan<- prometheus.Metric) error { var enumeratedEntry map[string]uint32 if *arpNetlink { var err error enumeratedEntry, err = getTotalArpEntriesRTNL() if err != nil { return fmt.Errorf("could not get ARP entries: %w", err) } } else { entries, err := c.fs.GatherARPEntries() if err != nil { return fmt.Errorf("could not get ARP entries: %w", err) } enumeratedEntry = getTotalArpEntries(entries) } for device, entryCount := range enumeratedEntry { if c.deviceFilter.ignored(device) { continue } ch <- prometheus.MustNewConstMetric( c.entries, prometheus.GaugeValue, float64(entryCount), device) } return nil } node_exporter-1.7.0/collector/bcache_linux.go000066400000000000000000000267421452426057600213630ustar00rootroot00000000000000// Copyright 2017 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nobcache // +build !nobcache package collector import ( "fmt" "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs/bcache" ) var ( priorityStats = kingpin.Flag("collector.bcache.priorityStats", "Expose expensive priority stats.").Bool() ) func init() { registerCollector("bcache", defaultEnabled, NewBcacheCollector) } // A bcacheCollector is a Collector which gathers metrics from Linux bcache. type bcacheCollector struct { fs bcache.FS logger log.Logger } // NewBcacheCollector returns a newly allocated bcacheCollector. // It exposes a number of Linux bcache statistics. func NewBcacheCollector(logger log.Logger) (Collector, error) { fs, err := bcache.NewFS(*sysPath) if err != nil { return nil, fmt.Errorf("failed to open sysfs: %w", err) } return &bcacheCollector{ fs: fs, logger: logger, }, nil } // Update reads and exposes bcache stats. // It implements the Collector interface. func (c *bcacheCollector) Update(ch chan<- prometheus.Metric) error { var stats []*bcache.Stats var err error if *priorityStats { stats, err = c.fs.Stats() } else { stats, err = c.fs.StatsWithoutPriority() } if err != nil { return fmt.Errorf("failed to retrieve bcache stats: %w", err) } for _, s := range stats { c.updateBcacheStats(ch, s) } return nil } type bcacheMetric struct { name string desc string value float64 metricType prometheus.ValueType extraLabel []string extraLabelValue string } func bcachePeriodStatsToMetric(ps *bcache.PeriodStats, labelValue string) []bcacheMetric { label := []string{"backing_device"} metrics := []bcacheMetric{ { name: "bypassed_bytes_total", desc: "Amount of IO (both reads and writes) that has bypassed the cache.", value: float64(ps.Bypassed), metricType: prometheus.CounterValue, extraLabel: label, extraLabelValue: labelValue, }, { name: "cache_hits_total", desc: "Hits counted per individual IO as bcache sees them.", value: float64(ps.CacheHits), metricType: prometheus.CounterValue, extraLabel: label, extraLabelValue: labelValue, }, { name: "cache_misses_total", desc: "Misses counted per individual IO as bcache sees them.", value: float64(ps.CacheMisses), metricType: prometheus.CounterValue, extraLabel: label, extraLabelValue: labelValue, }, { name: "cache_bypass_hits_total", desc: "Hits for IO intended to skip the cache.", value: float64(ps.CacheBypassHits), metricType: prometheus.CounterValue, extraLabel: label, extraLabelValue: labelValue, }, { name: "cache_bypass_misses_total", desc: "Misses for IO intended to skip the cache.", value: float64(ps.CacheBypassMisses), metricType: prometheus.CounterValue, extraLabel: label, extraLabelValue: labelValue, }, { name: "cache_miss_collisions_total", desc: "Instances where data insertion from cache miss raced with write (data already present).", value: float64(ps.CacheMissCollisions), metricType: prometheus.CounterValue, extraLabel: label, extraLabelValue: labelValue, }, } if ps.CacheReadaheads != 0 { bcacheReadaheadMetrics := []bcacheMetric{ { name: "cache_readaheads_total", desc: "Count of times readahead occurred.", value: float64(ps.CacheReadaheads), metricType: prometheus.CounterValue, extraLabel: label, extraLabelValue: labelValue, }, } metrics = append(metrics, bcacheReadaheadMetrics...) } return metrics } // UpdateBcacheStats collects statistics for one bcache ID. func (c *bcacheCollector) updateBcacheStats(ch chan<- prometheus.Metric, s *bcache.Stats) { const ( subsystem = "bcache" ) var ( devLabel = []string{"uuid"} allMetrics []bcacheMetric metrics []bcacheMetric ) allMetrics = []bcacheMetric{ // metrics in /sys/fs/bcache// { name: "average_key_size_sectors", desc: "Average data per key in the btree (sectors).", value: float64(s.Bcache.AverageKeySize), metricType: prometheus.GaugeValue, }, { name: "btree_cache_size_bytes", desc: "Amount of memory currently used by the btree cache.", value: float64(s.Bcache.BtreeCacheSize), metricType: prometheus.GaugeValue, }, { name: "cache_available_percent", desc: "Percentage of cache device without dirty data, usable for writeback (may contain clean cached data).", value: float64(s.Bcache.CacheAvailablePercent), metricType: prometheus.GaugeValue, }, { name: "congested", desc: "Congestion.", value: float64(s.Bcache.Congested), metricType: prometheus.GaugeValue, }, { name: "root_usage_percent", desc: "Percentage of the root btree node in use (tree depth increases if too high).", value: float64(s.Bcache.RootUsagePercent), metricType: prometheus.GaugeValue, }, { name: "tree_depth", desc: "Depth of the btree.", value: float64(s.Bcache.TreeDepth), metricType: prometheus.GaugeValue, }, // metrics in /sys/fs/bcache//internal/ { name: "active_journal_entries", desc: "Number of journal entries that are newer than the index.", value: float64(s.Bcache.Internal.ActiveJournalEntries), metricType: prometheus.GaugeValue, }, { name: "btree_nodes", desc: "Total nodes in the btree.", value: float64(s.Bcache.Internal.BtreeNodes), metricType: prometheus.GaugeValue, }, { name: "btree_read_average_duration_seconds", desc: "Average btree read duration.", value: float64(s.Bcache.Internal.BtreeReadAverageDurationNanoSeconds) * 1e-9, metricType: prometheus.GaugeValue, }, { name: "cache_read_races_total", desc: "Counts instances where while data was being read from the cache, the bucket was reused and invalidated - i.e. where the pointer was stale after the read completed.", value: float64(s.Bcache.Internal.CacheReadRaces), metricType: prometheus.CounterValue, }, } for _, bdev := range s.Bdevs { // metrics in /sys/fs/bcache/// metrics = []bcacheMetric{ { name: "dirty_data_bytes", desc: "Amount of dirty data for this backing device in the cache.", value: float64(bdev.DirtyData), metricType: prometheus.GaugeValue, extraLabel: []string{"backing_device"}, extraLabelValue: bdev.Name, }, { name: "dirty_target_bytes", desc: "Current dirty data target threshold for this backing device in bytes.", value: float64(bdev.WritebackRateDebug.Target), metricType: prometheus.GaugeValue, extraLabel: []string{"backing_device"}, extraLabelValue: bdev.Name, }, { name: "writeback_rate", desc: "Current writeback rate for this backing device in bytes.", value: float64(bdev.WritebackRateDebug.Rate), metricType: prometheus.GaugeValue, extraLabel: []string{"backing_device"}, extraLabelValue: bdev.Name, }, { name: "writeback_rate_proportional_term", desc: "Current result of proportional controller, part of writeback rate", value: float64(bdev.WritebackRateDebug.Proportional), metricType: prometheus.GaugeValue, extraLabel: []string{"backing_device"}, extraLabelValue: bdev.Name, }, { name: "writeback_rate_integral_term", desc: "Current result of integral controller, part of writeback rate", value: float64(bdev.WritebackRateDebug.Integral), metricType: prometheus.GaugeValue, extraLabel: []string{"backing_device"}, extraLabelValue: bdev.Name, }, { name: "writeback_change", desc: "Last writeback rate change step for this backing device.", value: float64(bdev.WritebackRateDebug.Change), metricType: prometheus.GaugeValue, extraLabel: []string{"backing_device"}, extraLabelValue: bdev.Name, }, } allMetrics = append(allMetrics, metrics...) // metrics in /sys/fs/bcache///stats_total metrics := bcachePeriodStatsToMetric(&bdev.Total, bdev.Name) allMetrics = append(allMetrics, metrics...) } for _, cache := range s.Caches { metrics = []bcacheMetric{ // metrics in /sys/fs/bcache/// { name: "io_errors", desc: "Number of errors that have occurred, decayed by io_error_halflife.", value: float64(cache.IOErrors), metricType: prometheus.GaugeValue, extraLabel: []string{"cache_device"}, extraLabelValue: cache.Name, }, { name: "metadata_written_bytes_total", desc: "Sum of all non data writes (btree writes and all other metadata).", value: float64(cache.MetadataWritten), metricType: prometheus.CounterValue, extraLabel: []string{"cache_device"}, extraLabelValue: cache.Name, }, { name: "written_bytes_total", desc: "Sum of all data that has been written to the cache.", value: float64(cache.Written), metricType: prometheus.CounterValue, extraLabel: []string{"cache_device"}, extraLabelValue: cache.Name, }, } if *priorityStats { // metrics in /sys/fs/bcache///priority_stats priorityStatsMetrics := []bcacheMetric{ { name: "priority_stats_unused_percent", desc: "The percentage of the cache that doesn't contain any data.", value: float64(cache.Priority.UnusedPercent), metricType: prometheus.GaugeValue, extraLabel: []string{"cache_device"}, extraLabelValue: cache.Name, }, { name: "priority_stats_metadata_percent", desc: "Bcache's metadata overhead.", value: float64(cache.Priority.MetadataPercent), metricType: prometheus.GaugeValue, extraLabel: []string{"cache_device"}, extraLabelValue: cache.Name, }, } metrics = append(metrics, priorityStatsMetrics...) } allMetrics = append(allMetrics, metrics...) } for _, m := range allMetrics { labels := append(devLabel, m.extraLabel...) desc := prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, m.name), m.desc, labels, nil, ) labelValues := []string{s.Name} if m.extraLabelValue != "" { labelValues = append(labelValues, m.extraLabelValue) } ch <- prometheus.MustNewConstMetric( desc, m.metricType, m.value, labelValues..., ) } } node_exporter-1.7.0/collector/bonding_linux.go000066400000000000000000000064101452426057600215640ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nobonding // +build !nobonding package collector import ( "errors" "fmt" "os" "path/filepath" "strings" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" ) type bondingCollector struct { slaves, active typedDesc logger log.Logger } func init() { registerCollector("bonding", defaultEnabled, NewBondingCollector) } // NewBondingCollector returns a newly allocated bondingCollector. // It exposes the number of configured and active slave of linux bonding interfaces. func NewBondingCollector(logger log.Logger) (Collector, error) { return &bondingCollector{ slaves: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, "bonding", "slaves"), "Number of configured slaves per bonding interface.", []string{"master"}, nil, ), prometheus.GaugeValue}, active: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, "bonding", "active"), "Number of active slaves per bonding interface.", []string{"master"}, nil, ), prometheus.GaugeValue}, logger: logger, }, nil } // Update reads and exposes bonding states, implements Collector interface. Caution: This works only on linux. func (c *bondingCollector) Update(ch chan<- prometheus.Metric) error { statusfile := sysFilePath("class/net") bondingStats, err := readBondingStats(statusfile) if err != nil { if errors.Is(err, os.ErrNotExist) { level.Debug(c.logger).Log("msg", "Not collecting bonding, file does not exist", "file", statusfile) return ErrNoData } return err } for master, status := range bondingStats { ch <- c.slaves.mustNewConstMetric(float64(status[0]), master) ch <- c.active.mustNewConstMetric(float64(status[1]), master) } return nil } func readBondingStats(root string) (status map[string][2]int, err error) { status = map[string][2]int{} masters, err := os.ReadFile(filepath.Join(root, "bonding_masters")) if err != nil { return nil, err } for _, master := range strings.Fields(string(masters)) { slaves, err := os.ReadFile(filepath.Join(root, master, "bonding", "slaves")) if err != nil { return nil, err } sstat := [2]int{0, 0} for _, slave := range strings.Fields(string(slaves)) { state, err := os.ReadFile(filepath.Join(root, master, fmt.Sprintf("lower_%s", slave), "bonding_slave", "mii_status")) if errors.Is(err, os.ErrNotExist) { // some older? kernels use slave_ prefix state, err = os.ReadFile(filepath.Join(root, master, fmt.Sprintf("slave_%s", slave), "bonding_slave", "mii_status")) } if err != nil { return nil, err } sstat[0]++ if strings.TrimSpace(string(state)) == "up" { sstat[1]++ } } status[master] = sstat } return status, err } node_exporter-1.7.0/collector/bonding_linux_test.go000066400000000000000000000021611452426057600226220ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nobonding // +build !nobonding package collector import ( "testing" ) func TestBonding(t *testing.T) { bondingStats, err := readBondingStats("fixtures/sys/class/net") if err != nil { t.Fatal(err) } if bondingStats["bond0"][0] != 0 || bondingStats["bond0"][1] != 0 { t.Fatal("bond0 in unexpected state") } if bondingStats["int"][0] != 2 || bondingStats["int"][1] != 1 { t.Fatal("int in unexpected state") } if bondingStats["dmz"][0] != 2 || bondingStats["dmz"][1] != 2 { t.Fatal("dmz in unexpected state") } } node_exporter-1.7.0/collector/boot_time_bsd.go000066400000000000000000000034171452426057600215420ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build (freebsd || dragonfly || openbsd || netbsd || darwin) && !noboottime // +build freebsd dragonfly openbsd netbsd darwin // +build !noboottime package collector import ( "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "golang.org/x/sys/unix" ) type bootTimeCollector struct { logger log.Logger } func init() { registerCollector("boottime", defaultEnabled, newBootTimeCollector) } // newBootTimeCollector returns a new Collector exposing system boot time on BSD systems. func newBootTimeCollector(logger log.Logger) (Collector, error) { return &bootTimeCollector{ logger: logger, }, nil } // Update pushes boot time onto ch func (c *bootTimeCollector) Update(ch chan<- prometheus.Metric) error { tv, err := unix.SysctlTimeval("kern.boottime") if err != nil { return err } // This conversion maintains the usec precision. Using the time // package did not. v := float64(tv.Sec) + (float64(tv.Usec) / float64(1000*1000)) ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "boot_time_seconds"), "Unix time of last boot, including microseconds.", nil, nil, ), prometheus.GaugeValue, v) return nil } node_exporter-1.7.0/collector/boot_time_solaris.go000066400000000000000000000033601452426057600224430ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !noboottime // +build !noboottime package collector import ( "github.com/go-kit/log" "github.com/illumos/go-kstat" "github.com/prometheus/client_golang/prometheus" ) type bootTimeCollector struct { boottime typedDesc logger log.Logger } func init() { registerCollector("boottime", defaultEnabled, newBootTimeCollector) } func newBootTimeCollector(logger log.Logger) (Collector, error) { return &bootTimeCollector{ boottime: typedDesc{ prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "boot_time_seconds"), "Unix time of last boot, including microseconds.", nil, nil, ), prometheus.GaugeValue}, logger: logger, }, nil } // newBootTimeCollector returns a new Collector exposing system boot time on Solaris systems. // Update pushes boot time onto ch func (c *bootTimeCollector) Update(ch chan<- prometheus.Metric) error { tok, err := kstat.Open() if err != nil { return err } defer tok.Close() ks, err := tok.Lookup("unix", 0, "system_misc") if err != nil { return err } v, err := ks.GetNamed("boot_time") if err != nil { return err } ch <- c.boottime.mustNewConstMetric(float64(v.UintVal)) return nil } node_exporter-1.7.0/collector/btrfs_linux.go000066400000000000000000000272251452426057600212730ustar00rootroot00000000000000// Copyright 2019 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nobtrfs // +build !nobtrfs package collector import ( "fmt" "path" "strings" "syscall" dennwc "github.com/dennwc/btrfs" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs/btrfs" ) // A btrfsCollector is a Collector which gathers metrics from Btrfs filesystems. type btrfsCollector struct { fs btrfs.FS logger log.Logger } func init() { registerCollector("btrfs", defaultEnabled, NewBtrfsCollector) } // NewBtrfsCollector returns a new Collector exposing Btrfs statistics. func NewBtrfsCollector(logger log.Logger) (Collector, error) { fs, err := btrfs.NewFS(*sysPath) if err != nil { return nil, fmt.Errorf("failed to open sysfs: %w", err) } return &btrfsCollector{ fs: fs, logger: logger, }, nil } // Update retrieves and exports Btrfs statistics. // It implements Collector. func (c *btrfsCollector) Update(ch chan<- prometheus.Metric) error { stats, err := c.fs.Stats() if err != nil { return fmt.Errorf("failed to retrieve Btrfs stats from procfs: %w", err) } ioctlStatsMap, err := c.getIoctlStats() if err != nil { level.Debug(c.logger).Log( "msg", "Error querying btrfs device stats with ioctl", "err", err) ioctlStatsMap = make(map[string]*btrfsIoctlFsStats) } for _, s := range stats { // match up procfs and ioctl info by filesystem UUID (without dashes) var fsUUID = strings.Replace(s.UUID, "-", "", -1) ioctlStats := ioctlStatsMap[fsUUID] c.updateBtrfsStats(ch, s, ioctlStats) } return nil } type btrfsIoctlFsDevStats struct { path string uuid string bytesUsed uint64 totalBytes uint64 // The error stats below match the following upstream lists: // https://github.com/dennwc/btrfs/blob/b3db0b2dedac3bf580f412034d77e0bf4b420167/btrfs.go#L132-L140 // https://github.com/torvalds/linux/blob/70d605cbeecb408dd884b1f0cd3963eeeaac144c/include/uapi/linux/btrfs.h#L680-L692 writeErrs uint64 readErrs uint64 flushErrs uint64 corruptionErrs uint64 generationErrs uint64 } type btrfsIoctlFsStats struct { uuid string devices []btrfsIoctlFsDevStats } func (c *btrfsCollector) getIoctlStats() (map[string]*btrfsIoctlFsStats, error) { // Instead of introducing more ioctl calls to scan for all btrfs // filesystems re-use our mount point utils to find known mounts mountsList, err := mountPointDetails(c.logger) if err != nil { return nil, err } // Track devices we have successfully scanned, by device path. devicesDone := make(map[string]struct{}) // Filesystems scann results by UUID. fsStats := make(map[string]*btrfsIoctlFsStats) for _, mount := range mountsList { if mount.fsType != "btrfs" { continue } if _, found := devicesDone[mount.device]; found { // We already found this filesystem by another mount point. continue } mountPath := rootfsFilePath(mount.mountPoint) fs, err := dennwc.Open(mountPath, true) if err != nil { // Failed to open this mount point, maybe we didn't have permission // maybe we'll find another mount point for this FS later. level.Debug(c.logger).Log( "msg", "Error inspecting btrfs mountpoint", "mountPoint", mountPath, "err", err) continue } defer fs.Close() fsInfo, err := fs.Info() if err != nil { // Failed to get the FS info for some reason, // perhaps it'll work with a different mount point level.Debug(c.logger).Log( "msg", "Error querying btrfs filesystem", "mountPoint", mountPath, "err", err) continue } fsID := fsInfo.FSID.String() if _, found := fsStats[fsID]; found { // We already found this filesystem by another mount point continue } deviceStats, err := c.getIoctlDeviceStats(fs, &fsInfo) if err != nil { level.Debug(c.logger).Log( "msg", "Error querying btrfs device stats", "mountPoint", mountPath, "err", err) continue } devicesDone[mount.device] = struct{}{} fsStats[fsID] = &btrfsIoctlFsStats{ uuid: fsID, devices: deviceStats, } } return fsStats, nil } func (c *btrfsCollector) getIoctlDeviceStats(fs *dennwc.FS, fsInfo *dennwc.Info) ([]btrfsIoctlFsDevStats, error) { devices := make([]btrfsIoctlFsDevStats, 0, fsInfo.NumDevices) for i := uint64(0); i <= fsInfo.MaxID; i++ { deviceInfo, err := fs.GetDevInfo(i) if err != nil { if errno, ok := err.(syscall.Errno); ok && errno == syscall.ENODEV { // Device IDs do not consistently start at 0, nor are ranges contiguous, so we expect this. continue } return nil, err } deviceStats, err := fs.GetDevStats(i) if err != nil { return nil, err } devices = append(devices, btrfsIoctlFsDevStats{ path: deviceInfo.Path, uuid: deviceInfo.UUID.String(), bytesUsed: deviceInfo.BytesUsed, totalBytes: deviceInfo.TotalBytes, writeErrs: deviceStats.WriteErrs, readErrs: deviceStats.ReadErrs, flushErrs: deviceStats.FlushErrs, corruptionErrs: deviceStats.CorruptionErrs, generationErrs: deviceStats.GenerationErrs, }) if uint64(len(devices)) == fsInfo.NumDevices { break } } return devices, nil } // btrfsMetric represents a single Btrfs metric that is converted into a Prometheus Metric. type btrfsMetric struct { name string metricType prometheus.ValueType desc string value float64 extraLabel []string extraLabelValue []string } // updateBtrfsStats collects statistics for one bcache ID. func (c *btrfsCollector) updateBtrfsStats(ch chan<- prometheus.Metric, s *btrfs.Stats, ioctlStats *btrfsIoctlFsStats) { const subsystem = "btrfs" // Basic information about the filesystem. devLabels := []string{"uuid"} // Retrieve the metrics. metrics := c.getMetrics(s, ioctlStats) // Convert all gathered metrics to Prometheus Metrics and add to channel. for _, m := range metrics { labels := append(devLabels, m.extraLabel...) desc := prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, m.name), m.desc, labels, nil, ) labelValues := []string{s.UUID} if len(m.extraLabelValue) > 0 { labelValues = append(labelValues, m.extraLabelValue...) } ch <- prometheus.MustNewConstMetric( desc, m.metricType, m.value, labelValues..., ) } } // getMetrics returns metrics for the given Btrfs statistics. func (c *btrfsCollector) getMetrics(s *btrfs.Stats, ioctlStats *btrfsIoctlFsStats) []btrfsMetric { metrics := []btrfsMetric{ { name: "info", desc: "Filesystem information", value: 1, metricType: prometheus.GaugeValue, extraLabel: []string{"label"}, extraLabelValue: []string{s.Label}, }, { name: "global_rsv_size_bytes", desc: "Size of global reserve.", metricType: prometheus.GaugeValue, value: float64(s.Allocation.GlobalRsvSize), }, } // Information about data, metadata and system data. metrics = append(metrics, c.getAllocationStats("data", s.Allocation.Data)...) metrics = append(metrics, c.getAllocationStats("metadata", s.Allocation.Metadata)...) metrics = append(metrics, c.getAllocationStats("system", s.Allocation.System)...) // Information about devices. if ioctlStats == nil { for n, dev := range s.Devices { metrics = append(metrics, btrfsMetric{ name: "device_size_bytes", desc: "Size of a device that is part of the filesystem.", metricType: prometheus.GaugeValue, value: float64(dev.Size), extraLabel: []string{"device"}, extraLabelValue: []string{n}, }) } return metrics } for _, dev := range ioctlStats.devices { // trim the path prefix from the device name so the value should match // the value used in the fallback branch above. // e.g. /dev/sda -> sda, /rootfs/dev/md1 -> md1 _, device := path.Split(dev.path) extraLabels := []string{"device", "btrfs_dev_uuid"} extraLabelValues := []string{device, dev.uuid} metrics = append(metrics, btrfsMetric{ name: "device_size_bytes", desc: "Size of a device that is part of the filesystem.", metricType: prometheus.GaugeValue, value: float64(dev.totalBytes), extraLabel: extraLabels, extraLabelValue: extraLabelValues, }, // A bytes available metric is probably more useful than a // bytes used metric, because large numbers of bytes will // suffer from floating point representation issues // and we probably care more about the number when it's low anyway btrfsMetric{ name: "device_unused_bytes", desc: "Unused bytes unused on a device that is part of the filesystem.", metricType: prometheus.GaugeValue, value: float64(dev.totalBytes - dev.bytesUsed), extraLabel: extraLabels, extraLabelValue: extraLabelValues, }) errorLabels := append([]string{"type"}, extraLabels...) values := []uint64{ dev.writeErrs, dev.readErrs, dev.flushErrs, dev.corruptionErrs, dev.generationErrs, } btrfsErrorTypeNames := []string{ "write", "read", "flush", "corruption", "generation", } for i, errorType := range btrfsErrorTypeNames { metrics = append(metrics, btrfsMetric{ name: "device_errors_total", desc: "Errors reported for the device", metricType: prometheus.CounterValue, value: float64(values[i]), extraLabel: errorLabels, extraLabelValue: append([]string{errorType}, extraLabelValues...), }) } } return metrics } // getAllocationStats returns allocation metrics for the given Btrfs Allocation statistics. func (c *btrfsCollector) getAllocationStats(a string, s *btrfs.AllocationStats) []btrfsMetric { metrics := []btrfsMetric{ { name: "reserved_bytes", desc: "Amount of space reserved for a data type", metricType: prometheus.GaugeValue, value: float64(s.ReservedBytes), extraLabel: []string{"block_group_type"}, extraLabelValue: []string{a}, }, } // Add all layout statistics. for layout, stats := range s.Layouts { metrics = append(metrics, c.getLayoutStats(a, layout, stats)...) } return metrics } // getLayoutStats returns metrics for a data layout. func (c *btrfsCollector) getLayoutStats(a, l string, s *btrfs.LayoutUsage) []btrfsMetric { return []btrfsMetric{ { name: "used_bytes", desc: "Amount of used space by a layout/data type", metricType: prometheus.GaugeValue, value: float64(s.UsedBytes), extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{a, l}, }, { name: "size_bytes", desc: "Amount of space allocated for a layout/data type", metricType: prometheus.GaugeValue, value: float64(s.TotalBytes), extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{a, l}, }, { name: "allocation_ratio", desc: "Data allocation ratio for a layout/data type", metricType: prometheus.GaugeValue, value: s.Ratio, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{a, l}, }, } } node_exporter-1.7.0/collector/btrfs_linux_test.go000066400000000000000000000145211452426057600223250ustar00rootroot00000000000000// Copyright 2019 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nobtrfs // +build !nobtrfs package collector import ( "strings" "testing" "github.com/prometheus/procfs/btrfs" ) var expectedBtrfsMetrics = [][]btrfsMetric{ { {name: "info", value: 1, extraLabel: []string{"label"}, extraLabelValue: []string{"fixture"}}, {name: "global_rsv_size_bytes", value: 1.6777216e+07}, {name: "reserved_bytes", value: 0, extraLabel: []string{"block_group_type"}, extraLabelValue: []string{"data"}}, {name: "used_bytes", value: 8.08189952e+08, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"data", "raid0"}}, {name: "size_bytes", value: 2.147483648e+09, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"data", "raid0"}}, {name: "allocation_ratio", value: 1, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"data", "raid0"}}, {name: "reserved_bytes", value: 0, extraLabel: []string{"block_group_type"}, extraLabelValue: []string{"metadata"}}, {name: "used_bytes", value: 933888, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"metadata", "raid1"}}, {name: "size_bytes", value: 1.073741824e+09, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"metadata", "raid1"}}, {name: "allocation_ratio", value: 2, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"metadata", "raid1"}}, {name: "reserved_bytes", value: 0, extraLabel: []string{"block_group_type"}, extraLabelValue: []string{"system"}}, {name: "used_bytes", value: 16384, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"system", "raid1"}}, {name: "size_bytes", value: 8.388608e+06, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"system", "raid1"}}, {name: "allocation_ratio", value: 2, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"system", "raid1"}}, {name: "device_size_bytes", value: 1.073741824e+10, extraLabel: []string{"device"}, extraLabelValue: []string{"loop25"}}, {name: "device_size_bytes", value: 1.073741824e+10, extraLabel: []string{"device"}, extraLabelValue: []string{"loop26"}}, }, { {name: "info", value: 1, extraLabel: []string{"label"}, extraLabelValue: []string{""}}, {name: "global_rsv_size_bytes", value: 1.6777216e+07}, {name: "reserved_bytes", value: 0, extraLabel: []string{"block_group_type"}, extraLabelValue: []string{"data"}}, {name: "used_bytes", value: 0, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"data", "raid5"}}, {name: "size_bytes", value: 6.44087808e+08, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"data", "raid5"}}, {name: "allocation_ratio", value: 1.3333333333333333, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"data", "raid5"}}, {name: "reserved_bytes", value: 0, extraLabel: []string{"block_group_type"}, extraLabelValue: []string{"metadata"}}, {name: "used_bytes", value: 114688, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"metadata", "raid6"}}, {name: "size_bytes", value: 4.29391872e+08, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"metadata", "raid6"}}, {name: "allocation_ratio", value: 2, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"metadata", "raid6"}}, {name: "reserved_bytes", value: 0, extraLabel: []string{"block_group_type"}, extraLabelValue: []string{"system"}}, {name: "used_bytes", value: 16384, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"system", "raid6"}}, {name: "size_bytes", value: 1.6777216e+07, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"system", "raid6"}}, {name: "allocation_ratio", value: 2, extraLabel: []string{"block_group_type", "mode"}, extraLabelValue: []string{"system", "raid6"}}, {name: "device_size_bytes", value: 1.073741824e+10, extraLabel: []string{"device"}, extraLabelValue: []string{"loop22"}}, {name: "device_size_bytes", value: 1.073741824e+10, extraLabel: []string{"device"}, extraLabelValue: []string{"loop23"}}, {name: "device_size_bytes", value: 1.073741824e+10, extraLabel: []string{"device"}, extraLabelValue: []string{"loop24"}}, {name: "device_size_bytes", value: 1.073741824e+10, extraLabel: []string{"device"}, extraLabelValue: []string{"loop25"}}, }, } func checkMetric(exp, got *btrfsMetric) bool { if exp.name != got.name || exp.value != got.value || len(exp.extraLabel) != len(got.extraLabel) || len(exp.extraLabelValue) != len(got.extraLabelValue) { return false } for i := range exp.extraLabel { if exp.extraLabel[i] != got.extraLabel[i] { return false } // Devices (loopXX) can appear in random order, so just check the first 4 characters. if strings.HasPrefix(got.extraLabelValue[i], "loop") && exp.extraLabelValue[i][:4] == got.extraLabelValue[i][:4] { continue } if exp.extraLabelValue[i] != got.extraLabelValue[i] { return false } } return true } func TestBtrfs(t *testing.T) { fs, _ := btrfs.NewFS("fixtures/sys") collector := &btrfsCollector{fs: fs} stats, err := collector.fs.Stats() if err != nil { t.Fatalf("Failed to retrieve Btrfs stats: %v", err) } if len(stats) != len(expectedBtrfsMetrics) { t.Fatalf("Unexpected number of Btrfs stats: expected %v, got %v", len(expectedBtrfsMetrics), len(stats)) } for i, s := range stats { metrics := collector.getMetrics(s, nil) if len(metrics) != len(expectedBtrfsMetrics[i]) { t.Fatalf("Unexpected number of Btrfs metrics: expected %v, got %v", len(expectedBtrfsMetrics[i]), len(metrics)) } for j, m := range metrics { exp := expectedBtrfsMetrics[i][j] if !checkMetric(&exp, &m) { t.Errorf("Incorrect btrfs metric: expected %#v, got: %#v", exp, m) } } } } node_exporter-1.7.0/collector/buddyinfo.go000066400000000000000000000041771452426057600207200ustar00rootroot00000000000000// Copyright 2017 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nobuddyinfo && !netbsd // +build !nobuddyinfo,!netbsd package collector import ( "fmt" "strconv" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs" ) const ( buddyInfoSubsystem = "buddyinfo" ) type buddyinfoCollector struct { fs procfs.FS desc *prometheus.Desc logger log.Logger } func init() { registerCollector("buddyinfo", defaultDisabled, NewBuddyinfoCollector) } // NewBuddyinfoCollector returns a new Collector exposing buddyinfo stats. func NewBuddyinfoCollector(logger log.Logger) (Collector, error) { desc := prometheus.NewDesc( prometheus.BuildFQName(namespace, buddyInfoSubsystem, "blocks"), "Count of free blocks according to size.", []string{"node", "zone", "size"}, nil, ) fs, err := procfs.NewFS(*procPath) if err != nil { return nil, fmt.Errorf("failed to open procfs: %w", err) } return &buddyinfoCollector{fs, desc, logger}, nil } // Update calls (*buddyinfoCollector).getBuddyInfo to get the platform specific // buddyinfo metrics. func (c *buddyinfoCollector) Update(ch chan<- prometheus.Metric) error { buddyInfo, err := c.fs.BuddyInfo() if err != nil { return fmt.Errorf("couldn't get buddyinfo: %w", err) } level.Debug(c.logger).Log("msg", "Set node_buddy", "buddyInfo", buddyInfo) for _, entry := range buddyInfo { for size, value := range entry.Sizes { ch <- prometheus.MustNewConstMetric( c.desc, prometheus.GaugeValue, value, entry.Node, entry.Zone, strconv.Itoa(size), ) } } return nil } node_exporter-1.7.0/collector/cgroups_linux.go000066400000000000000000000043401452426057600216260ustar00rootroot00000000000000// Copyright 2022 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nostat // +build !nostat package collector import ( "fmt" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs" ) const cgroupsCollectorSubsystem = "cgroups" type cgroupSummaryCollector struct { fs procfs.FS cgroups *prometheus.Desc enabled *prometheus.Desc logger log.Logger } func init() { registerCollector(cgroupsCollectorSubsystem, defaultDisabled, NewCgroupSummaryCollector) } // NewCgroupSummaryCollector returns a new Collector exposing a summary of cgroups. func NewCgroupSummaryCollector(logger log.Logger) (Collector, error) { fs, err := procfs.NewFS(*procPath) if err != nil { return nil, fmt.Errorf("failed to open procfs: %w", err) } return &cgroupSummaryCollector{ fs: fs, cgroups: prometheus.NewDesc( prometheus.BuildFQName(namespace, cgroupsCollectorSubsystem, "cgroups"), "Current cgroup number of the subsystem.", []string{"subsys_name"}, nil, ), enabled: prometheus.NewDesc( prometheus.BuildFQName(namespace, cgroupsCollectorSubsystem, "enabled"), "Current cgroup number of the subsystem.", []string{"subsys_name"}, nil, ), logger: logger, }, nil } // Update implements Collector and exposes cgroup statistics. func (c *cgroupSummaryCollector) Update(ch chan<- prometheus.Metric) error { cgroupSummarys, err := c.fs.CgroupSummarys() if err != nil { return err } for _, cs := range cgroupSummarys { ch <- prometheus.MustNewConstMetric(c.cgroups, prometheus.GaugeValue, float64(cs.Cgroups), cs.SubsysName) ch <- prometheus.MustNewConstMetric(c.enabled, prometheus.GaugeValue, float64(cs.Enabled), cs.SubsysName) } return nil } node_exporter-1.7.0/collector/collector.go000066400000000000000000000163211452426057600207150ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package collector includes all individual collectors to gather and export system metrics. package collector import ( "errors" "fmt" "sync" "time" "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" ) // Namespace defines the common namespace to be used by all metrics. const namespace = "node" var ( scrapeDurationDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, "scrape", "collector_duration_seconds"), "node_exporter: Duration of a collector scrape.", []string{"collector"}, nil, ) scrapeSuccessDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, "scrape", "collector_success"), "node_exporter: Whether a collector succeeded.", []string{"collector"}, nil, ) ) const ( defaultEnabled = true defaultDisabled = false ) var ( factories = make(map[string]func(logger log.Logger) (Collector, error)) initiatedCollectorsMtx = sync.Mutex{} initiatedCollectors = make(map[string]Collector) collectorState = make(map[string]*bool) forcedCollectors = map[string]bool{} // collectors which have been explicitly enabled or disabled ) func registerCollector(collector string, isDefaultEnabled bool, factory func(logger log.Logger) (Collector, error)) { var helpDefaultState string if isDefaultEnabled { helpDefaultState = "enabled" } else { helpDefaultState = "disabled" } flagName := fmt.Sprintf("collector.%s", collector) flagHelp := fmt.Sprintf("Enable the %s collector (default: %s).", collector, helpDefaultState) defaultValue := fmt.Sprintf("%v", isDefaultEnabled) flag := kingpin.Flag(flagName, flagHelp).Default(defaultValue).Action(collectorFlagAction(collector)).Bool() collectorState[collector] = flag factories[collector] = factory } // NodeCollector implements the prometheus.Collector interface. type NodeCollector struct { Collectors map[string]Collector logger log.Logger } // DisableDefaultCollectors sets the collector state to false for all collectors which // have not been explicitly enabled on the command line. func DisableDefaultCollectors() { for c := range collectorState { if _, ok := forcedCollectors[c]; !ok { *collectorState[c] = false } } } // collectorFlagAction generates a new action function for the given collector // to track whether it has been explicitly enabled or disabled from the command line. // A new action function is needed for each collector flag because the ParseContext // does not contain information about which flag called the action. // See: https://github.com/alecthomas/kingpin/issues/294 func collectorFlagAction(collector string) func(ctx *kingpin.ParseContext) error { return func(ctx *kingpin.ParseContext) error { forcedCollectors[collector] = true return nil } } // NewNodeCollector creates a new NodeCollector. func NewNodeCollector(logger log.Logger, filters ...string) (*NodeCollector, error) { f := make(map[string]bool) for _, filter := range filters { enabled, exist := collectorState[filter] if !exist { return nil, fmt.Errorf("missing collector: %s", filter) } if !*enabled { return nil, fmt.Errorf("disabled collector: %s", filter) } f[filter] = true } collectors := make(map[string]Collector) initiatedCollectorsMtx.Lock() defer initiatedCollectorsMtx.Unlock() for key, enabled := range collectorState { if !*enabled || (len(f) > 0 && !f[key]) { continue } if collector, ok := initiatedCollectors[key]; ok { collectors[key] = collector } else { collector, err := factories[key](log.With(logger, "collector", key)) if err != nil { return nil, err } collectors[key] = collector initiatedCollectors[key] = collector } } return &NodeCollector{Collectors: collectors, logger: logger}, nil } // Describe implements the prometheus.Collector interface. func (n NodeCollector) Describe(ch chan<- *prometheus.Desc) { ch <- scrapeDurationDesc ch <- scrapeSuccessDesc } // Collect implements the prometheus.Collector interface. func (n NodeCollector) Collect(ch chan<- prometheus.Metric) { wg := sync.WaitGroup{} wg.Add(len(n.Collectors)) for name, c := range n.Collectors { go func(name string, c Collector) { execute(name, c, ch, n.logger) wg.Done() }(name, c) } wg.Wait() } func execute(name string, c Collector, ch chan<- prometheus.Metric, logger log.Logger) { begin := time.Now() err := c.Update(ch) duration := time.Since(begin) var success float64 if err != nil { if IsNoDataError(err) { level.Debug(logger).Log("msg", "collector returned no data", "name", name, "duration_seconds", duration.Seconds(), "err", err) } else { level.Error(logger).Log("msg", "collector failed", "name", name, "duration_seconds", duration.Seconds(), "err", err) } success = 0 } else { level.Debug(logger).Log("msg", "collector succeeded", "name", name, "duration_seconds", duration.Seconds()) success = 1 } ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, duration.Seconds(), name) ch <- prometheus.MustNewConstMetric(scrapeSuccessDesc, prometheus.GaugeValue, success, name) } // Collector is the interface a collector has to implement. type Collector interface { // Get new metrics and expose them via prometheus registry. Update(ch chan<- prometheus.Metric) error } type typedDesc struct { desc *prometheus.Desc valueType prometheus.ValueType } func (d *typedDesc) mustNewConstMetric(value float64, labels ...string) prometheus.Metric { return prometheus.MustNewConstMetric(d.desc, d.valueType, value, labels...) } // ErrNoData indicates the collector found no data to collect, but had no other error. var ErrNoData = errors.New("collector returned no data") func IsNoDataError(err error) bool { return err == ErrNoData } // pushMetric helps construct and convert a variety of value types into Prometheus float64 metrics. func pushMetric(ch chan<- prometheus.Metric, fieldDesc *prometheus.Desc, name string, value interface{}, valueType prometheus.ValueType, labelValues ...string) { var fVal float64 switch val := value.(type) { case uint8: fVal = float64(val) case uint16: fVal = float64(val) case uint32: fVal = float64(val) case uint64: fVal = float64(val) case int64: fVal = float64(val) case *uint8: if val == nil { return } fVal = float64(*val) case *uint16: if val == nil { return } fVal = float64(*val) case *uint32: if val == nil { return } fVal = float64(*val) case *uint64: if val == nil { return } fVal = float64(*val) case *int64: if val == nil { return } fVal = float64(*val) default: return } ch <- prometheus.MustNewConstMetric(fieldDesc, valueType, fVal, labelValues...) } node_exporter-1.7.0/collector/conntrack_linux.go000066400000000000000000000147621452426057600221370ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !noconntrack // +build !noconntrack package collector import ( "errors" "fmt" "os" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs" ) type conntrackCollector struct { current *prometheus.Desc limit *prometheus.Desc found *prometheus.Desc invalid *prometheus.Desc ignore *prometheus.Desc insert *prometheus.Desc insertFailed *prometheus.Desc drop *prometheus.Desc earlyDrop *prometheus.Desc searchRestart *prometheus.Desc logger log.Logger } type conntrackStatistics struct { found uint64 // Number of searched entries which were successful invalid uint64 // Number of packets seen which can not be tracked ignore uint64 // Number of packets seen which are already connected to a conntrack entry insert uint64 // Number of entries inserted into the list insertFailed uint64 // Number of entries for which list insertion was attempted but failed (happens if the same entry is already present) drop uint64 // Number of packets dropped due to conntrack failure. Either new conntrack entry allocation failed, or protocol helper dropped the packet earlyDrop uint64 // Number of dropped conntrack entries to make room for new ones, if maximum table size was reached searchRestart uint64 // Number of conntrack table lookups which had to be restarted due to hashtable resizes } func init() { registerCollector("conntrack", defaultEnabled, NewConntrackCollector) } // NewConntrackCollector returns a new Collector exposing conntrack stats. func NewConntrackCollector(logger log.Logger) (Collector, error) { return &conntrackCollector{ current: prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "nf_conntrack_entries"), "Number of currently allocated flow entries for connection tracking.", nil, nil, ), limit: prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "nf_conntrack_entries_limit"), "Maximum size of connection tracking table.", nil, nil, ), found: prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "nf_conntrack_stat_found"), "Number of searched entries which were successful.", nil, nil, ), invalid: prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "nf_conntrack_stat_invalid"), "Number of packets seen which can not be tracked.", nil, nil, ), ignore: prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "nf_conntrack_stat_ignore"), "Number of packets seen which are already connected to a conntrack entry.", nil, nil, ), insert: prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "nf_conntrack_stat_insert"), "Number of entries inserted into the list.", nil, nil, ), insertFailed: prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "nf_conntrack_stat_insert_failed"), "Number of entries for which list insertion was attempted but failed.", nil, nil, ), drop: prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "nf_conntrack_stat_drop"), "Number of packets dropped due to conntrack failure.", nil, nil, ), earlyDrop: prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "nf_conntrack_stat_early_drop"), "Number of dropped conntrack entries to make room for new ones, if maximum table size was reached.", nil, nil, ), searchRestart: prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "nf_conntrack_stat_search_restart"), "Number of conntrack table lookups which had to be restarted due to hashtable resizes.", nil, nil, ), logger: logger, }, nil } func (c *conntrackCollector) Update(ch chan<- prometheus.Metric) error { value, err := readUintFromFile(procFilePath("sys/net/netfilter/nf_conntrack_count")) if err != nil { return c.handleErr(err) } ch <- prometheus.MustNewConstMetric( c.current, prometheus.GaugeValue, float64(value)) value, err = readUintFromFile(procFilePath("sys/net/netfilter/nf_conntrack_max")) if err != nil { return c.handleErr(err) } ch <- prometheus.MustNewConstMetric( c.limit, prometheus.GaugeValue, float64(value)) conntrackStats, err := getConntrackStatistics() if err != nil { return c.handleErr(err) } ch <- prometheus.MustNewConstMetric( c.found, prometheus.GaugeValue, float64(conntrackStats.found)) ch <- prometheus.MustNewConstMetric( c.invalid, prometheus.GaugeValue, float64(conntrackStats.invalid)) ch <- prometheus.MustNewConstMetric( c.ignore, prometheus.GaugeValue, float64(conntrackStats.ignore)) ch <- prometheus.MustNewConstMetric( c.insert, prometheus.GaugeValue, float64(conntrackStats.insert)) ch <- prometheus.MustNewConstMetric( c.insertFailed, prometheus.GaugeValue, float64(conntrackStats.insertFailed)) ch <- prometheus.MustNewConstMetric( c.drop, prometheus.GaugeValue, float64(conntrackStats.drop)) ch <- prometheus.MustNewConstMetric( c.earlyDrop, prometheus.GaugeValue, float64(conntrackStats.earlyDrop)) ch <- prometheus.MustNewConstMetric( c.searchRestart, prometheus.GaugeValue, float64(conntrackStats.searchRestart)) return nil } func (c *conntrackCollector) handleErr(err error) error { if errors.Is(err, os.ErrNotExist) { level.Debug(c.logger).Log("msg", "conntrack probably not loaded") return ErrNoData } return fmt.Errorf("failed to retrieve conntrack stats: %w", err) } func getConntrackStatistics() (*conntrackStatistics, error) { c := conntrackStatistics{} fs, err := procfs.NewFS(*procPath) if err != nil { return nil, fmt.Errorf("failed to open procfs: %w", err) } connStats, err := fs.ConntrackStat() if err != nil { return nil, err } for _, connStat := range connStats { c.found += connStat.Found c.invalid += connStat.Invalid c.ignore += connStat.Ignore c.insert += connStat.Insert c.insertFailed += connStat.InsertFailed c.drop += connStat.Drop c.earlyDrop += connStat.EarlyDrop c.searchRestart += connStat.SearchRestart } return &c, nil } node_exporter-1.7.0/collector/cpu_common.go000066400000000000000000000016741452426057600210730ustar00rootroot00000000000000// Copyright 2016 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nocpu // +build !nocpu package collector import ( "github.com/prometheus/client_golang/prometheus" ) const ( cpuCollectorSubsystem = "cpu" ) var ( nodeCPUSecondsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "seconds_total"), "Seconds the CPUs spent in each mode.", []string{"cpu", "mode"}, nil, ) ) node_exporter-1.7.0/collector/cpu_darwin.go000066400000000000000000000062411452426057600210620ustar00rootroot00000000000000// Copyright 2016 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Based on gopsutil/cpu/cpu_darwin_cgo.go @ ae251eb which is licensed under // BSD. See https://github.com/shirou/gopsutil/blob/master/LICENSE for details. //go:build !nocpu // +build !nocpu package collector import ( "bytes" "encoding/binary" "fmt" "strconv" "unsafe" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) /* #cgo LDFLAGS: #include #include #include #include #include #include #include #include #if TARGET_OS_MAC #include #endif #include #include */ import "C" // ClocksPerSec default value. from time.h const ClocksPerSec = float64(C.CLK_TCK) type statCollector struct { cpu *prometheus.Desc logger log.Logger } func init() { registerCollector("cpu", defaultEnabled, NewCPUCollector) } // NewCPUCollector returns a new Collector exposing CPU stats. func NewCPUCollector(logger log.Logger) (Collector, error) { return &statCollector{ cpu: nodeCPUSecondsDesc, logger: logger, }, nil } func (c *statCollector) Update(ch chan<- prometheus.Metric) error { var ( count C.mach_msg_type_number_t cpuload *C.processor_cpu_load_info_data_t ncpu C.natural_t ) status := C.host_processor_info(C.host_t(C.mach_host_self()), C.PROCESSOR_CPU_LOAD_INFO, &ncpu, (*C.processor_info_array_t)(unsafe.Pointer(&cpuload)), &count) if status != C.KERN_SUCCESS { return fmt.Errorf("host_processor_info error=%d", status) } // jump through some cgo casting hoops and ensure we properly free // the memory that cpuload points to target := C.vm_map_t(C.mach_task_self_) address := C.vm_address_t(uintptr(unsafe.Pointer(cpuload))) defer C.vm_deallocate(target, address, C.vm_size_t(ncpu)) // the body of struct processor_cpu_load_info // aka processor_cpu_load_info_data_t var cpuTicks [C.CPU_STATE_MAX]uint32 // copy the cpuload array to a []byte buffer // where we can binary.Read the data size := int(ncpu) * binary.Size(cpuTicks) buf := (*[1 << 30]byte)(unsafe.Pointer(cpuload))[:size:size] bbuf := bytes.NewBuffer(buf) for i := 0; i < int(ncpu); i++ { err := binary.Read(bbuf, binary.LittleEndian, &cpuTicks) if err != nil { return err } for k, v := range map[string]int{ "user": C.CPU_STATE_USER, "system": C.CPU_STATE_SYSTEM, "nice": C.CPU_STATE_NICE, "idle": C.CPU_STATE_IDLE, } { ch <- prometheus.MustNewConstMetric(c.cpu, prometheus.CounterValue, float64(cpuTicks[v])/ClocksPerSec, strconv.Itoa(i), k) } } return nil } node_exporter-1.7.0/collector/cpu_dragonfly.go000066400000000000000000000066541452426057600215730ustar00rootroot00000000000000// Copyright 2016 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nocpu // +build !nocpu package collector import ( "errors" "strconv" "unsafe" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) /* #cgo LDFLAGS: #include #include #include #include int getCPUTimes(uint64_t **cputime, size_t *cpu_times_len) { size_t len; // Get number of cpu cores. int mib[2]; int ncpu; mib[0] = CTL_HW; mib[1] = HW_NCPU; len = sizeof(ncpu); if (sysctl(mib, 2, &ncpu, &len, NULL, 0)) { return -1; } // Get the cpu times. struct kinfo_cputime cp_t[ncpu]; bzero(cp_t, sizeof(struct kinfo_cputime)*ncpu); len = sizeof(cp_t[0])*ncpu; if (sysctlbyname("kern.cputime", &cp_t, &len, NULL, 0)) { return -1; } *cpu_times_len = ncpu*CPUSTATES; uint64_t user, nice, sys, intr, idle; user = nice = sys = intr = idle = 0; *cputime = (uint64_t *) malloc(sizeof(uint64_t)*(*cpu_times_len)); for (int i = 0; i < ncpu; ++i) { int offset = CPUSTATES * i; (*cputime)[offset] = cp_t[i].cp_user; (*cputime)[offset+1] = cp_t[i].cp_nice; (*cputime)[offset+2] = cp_t[i].cp_sys; (*cputime)[offset+3] = cp_t[i].cp_intr; (*cputime)[offset+4] = cp_t[i].cp_idle; } return 0; } */ import "C" const maxCPUTimesLen = C.MAXCPU * C.CPUSTATES type statCollector struct { cpu *prometheus.Desc logger log.Logger } func init() { registerCollector("cpu", defaultEnabled, NewStatCollector) } // NewStatCollector returns a new Collector exposing CPU stats. func NewStatCollector(logger log.Logger) (Collector, error) { return &statCollector{ cpu: nodeCPUSecondsDesc, logger: logger, }, nil } func getDragonFlyCPUTimes() ([]float64, error) { // We want time spent per-CPU per CPUSTATE. // CPUSTATES (number of CPUSTATES) is defined as 5U. // States: CP_USER | CP_NICE | CP_SYS | CP_IDLE | CP_INTR // // Each value is in microseconds // // Look into sys/kern/kern_clock.c for details. var ( cpuTimesC *C.uint64_t cpuTimesLength C.size_t ) if C.getCPUTimes(&cpuTimesC, &cpuTimesLength) == -1 { return nil, errors.New("could not retrieve CPU times") } defer C.free(unsafe.Pointer(cpuTimesC)) cput := (*[maxCPUTimesLen]C.uint64_t)(unsafe.Pointer(cpuTimesC))[:cpuTimesLength:cpuTimesLength] cpuTimes := make([]float64, cpuTimesLength) for i, value := range cput { cpuTimes[i] = float64(value) / float64(1000000) } return cpuTimes, nil } // Expose CPU stats using sysctl. func (c *statCollector) Update(ch chan<- prometheus.Metric) error { var fieldsCount = 5 cpuTimes, err := getDragonFlyCPUTimes() if err != nil { return err } // Export order: user nice sys intr idle cpuFields := []string{"user", "nice", "sys", "interrupt", "idle"} for i, value := range cpuTimes { cpux := strconv.Itoa(i / fieldsCount) ch <- prometheus.MustNewConstMetric(c.cpu, prometheus.CounterValue, value, cpux, cpuFields[i%fieldsCount]) } return nil } node_exporter-1.7.0/collector/cpu_dragonfly_test.go000066400000000000000000000020151452426057600226150ustar00rootroot00000000000000// Copyright 2016 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nocpu // +build !nocpu package collector import ( "runtime" "testing" ) func TestCPU(t *testing.T) { var ( fieldsCount = 5 times, err = getDragonFlyCPUTimes() ) if err != nil { t.Fatalf("expected no error, got %v", err) } if len(times) == 0 { t.Fatalf("no cputimes found") } want := runtime.NumCPU() * fieldsCount if len(times) != want { t.Fatalf("should have %d cpuTimes: got %d", want, len(times)) } } node_exporter-1.7.0/collector/cpu_freebsd.go000066400000000000000000000110641452426057600212070ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nocpu // +build !nocpu package collector import ( "fmt" "math" "strconv" "unsafe" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "golang.org/x/sys/unix" ) type clockinfo struct { hz int32 // clock frequency tick int32 // micro-seconds per hz tick spare int32 stathz int32 // statistics clock frequency profhz int32 // profiling clock frequency } type cputime struct { user float64 nice float64 sys float64 intr float64 idle float64 } func getCPUTimes() ([]cputime, error) { const states = 5 clockb, err := unix.SysctlRaw("kern.clockrate") if err != nil { return nil, err } clock := *(*clockinfo)(unsafe.Pointer(&clockb[0])) cpb, err := unix.SysctlRaw("kern.cp_times") if err != nil { return nil, err } var cpufreq float64 if clock.stathz > 0 { cpufreq = float64(clock.stathz) } else { cpufreq = float64(clock.hz) } var times []float64 for len(cpb) >= int(unsafe.Sizeof(int(0))) { t := *(*int)(unsafe.Pointer(&cpb[0])) times = append(times, float64(t)/cpufreq) cpb = cpb[unsafe.Sizeof(int(0)):] } cpus := make([]cputime, len(times)/states) for i := 0; i < len(times); i += states { cpu := &cpus[i/states] cpu.user = times[i] cpu.nice = times[i+1] cpu.sys = times[i+2] cpu.intr = times[i+3] cpu.idle = times[i+4] } return cpus, nil } type statCollector struct { cpu typedDesc temp typedDesc logger log.Logger } func init() { registerCollector("cpu", defaultEnabled, NewStatCollector) } // NewStatCollector returns a new Collector exposing CPU stats. func NewStatCollector(logger log.Logger) (Collector, error) { return &statCollector{ cpu: typedDesc{nodeCPUSecondsDesc, prometheus.CounterValue}, temp: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "temperature_celsius"), "CPU temperature", []string{"cpu"}, nil, ), prometheus.GaugeValue}, logger: logger, }, nil } // Expose CPU stats using sysctl. func (c *statCollector) Update(ch chan<- prometheus.Metric) error { // We want time spent per-cpu per CPUSTATE. // CPUSTATES (number of CPUSTATES) is defined as 5U. // Order: CP_USER | CP_NICE | CP_SYS | CP_IDLE | CP_INTR // sysctl kern.cp_times provides hw.ncpu * CPUSTATES long integers: // hw.ncpu * (space-separated list of the above variables) // // Each value is a counter incremented at frequency // kern.clockrate.(stathz | hz) // // Look into sys/kern/kern_clock.c for details. cpuTimes, err := getCPUTimes() if err != nil { return err } for cpu, t := range cpuTimes { lcpu := strconv.Itoa(cpu) ch <- c.cpu.mustNewConstMetric(float64(t.user), lcpu, "user") ch <- c.cpu.mustNewConstMetric(float64(t.nice), lcpu, "nice") ch <- c.cpu.mustNewConstMetric(float64(t.sys), lcpu, "system") ch <- c.cpu.mustNewConstMetric(float64(t.intr), lcpu, "interrupt") ch <- c.cpu.mustNewConstMetric(float64(t.idle), lcpu, "idle") temp, err := unix.SysctlUint32(fmt.Sprintf("dev.cpu.%d.temperature", cpu)) if err != nil { if err == unix.ENOENT { // No temperature information for this CPU level.Debug(c.logger).Log("msg", "no temperature information for CPU", "cpu", cpu) } else { // Unexpected error ch <- c.temp.mustNewConstMetric(math.NaN(), lcpu) level.Error(c.logger).Log("msg", "failed to query CPU temperature for CPU", "cpu", cpu, "err", err) } continue } // Temp is a signed integer in deci-degrees Kelvin. // Cast uint32 to int32 and convert to float64 degrees Celsius. // // 2732 is used as the conversion constant for deci-degrees // Kelvin, in multiple places in the kernel that feed into this // sysctl, so we want to maintain consistency: // // sys/dev/amdtemp/amdtemp.c // #define AMDTEMP_ZERO_C_TO_K 2732 // // sys/dev/acpica/acpi_thermal.c // #define TZ_ZEROC 2732 // // sys/dev/coretemp/coretemp.c // #define TZ_ZEROC 2732 ch <- c.temp.mustNewConstMetric(float64(int32(temp)-2732)/10, lcpu) } return err } node_exporter-1.7.0/collector/cpu_linux.go000066400000000000000000000372611452426057600207430ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nocpu // +build !nocpu package collector import ( "fmt" "os" "path/filepath" "regexp" "strconv" "sync" "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs" "github.com/prometheus/procfs/sysfs" "golang.org/x/exp/maps" "golang.org/x/exp/slices" ) type cpuCollector struct { fs procfs.FS cpu *prometheus.Desc cpuInfo *prometheus.Desc cpuFrequencyHz *prometheus.Desc cpuFlagsInfo *prometheus.Desc cpuBugsInfo *prometheus.Desc cpuGuest *prometheus.Desc cpuCoreThrottle *prometheus.Desc cpuPackageThrottle *prometheus.Desc cpuIsolated *prometheus.Desc logger log.Logger cpuStats map[int64]procfs.CPUStat cpuStatsMutex sync.Mutex isolatedCpus []uint16 cpuFlagsIncludeRegexp *regexp.Regexp cpuBugsIncludeRegexp *regexp.Regexp } // Idle jump back limit in seconds. const jumpBackSeconds = 3.0 var ( enableCPUGuest = kingpin.Flag("collector.cpu.guest", "Enables metric node_cpu_guest_seconds_total").Default("true").Bool() enableCPUInfo = kingpin.Flag("collector.cpu.info", "Enables metric cpu_info").Bool() flagsInclude = kingpin.Flag("collector.cpu.info.flags-include", "Filter the `flags` field in cpuInfo with a value that must be a regular expression").String() bugsInclude = kingpin.Flag("collector.cpu.info.bugs-include", "Filter the `bugs` field in cpuInfo with a value that must be a regular expression").String() jumpBackDebugMessage = fmt.Sprintf("CPU Idle counter jumped backwards more than %f seconds, possible hotplug event, resetting CPU stats", jumpBackSeconds) ) func init() { registerCollector("cpu", defaultEnabled, NewCPUCollector) } // NewCPUCollector returns a new Collector exposing kernel/system statistics. func NewCPUCollector(logger log.Logger) (Collector, error) { fs, err := procfs.NewFS(*procPath) if err != nil { return nil, fmt.Errorf("failed to open procfs: %w", err) } sysfs, err := sysfs.NewFS(*sysPath) if err != nil { return nil, fmt.Errorf("failed to open sysfs: %w", err) } isolcpus, err := sysfs.IsolatedCPUs() if err != nil { if !os.IsNotExist(err) { return nil, fmt.Errorf("Unable to get isolated cpus: %w", err) } level.Debug(logger).Log("msg", "Could not open isolated file", "error", err) } c := &cpuCollector{ fs: fs, cpu: nodeCPUSecondsDesc, cpuInfo: prometheus.NewDesc( prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "info"), "CPU information from /proc/cpuinfo.", []string{"package", "core", "cpu", "vendor", "family", "model", "model_name", "microcode", "stepping", "cachesize"}, nil, ), cpuFrequencyHz: prometheus.NewDesc( prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "frequency_hertz"), "CPU frequency in hertz from /proc/cpuinfo.", []string{"package", "core", "cpu"}, nil, ), cpuFlagsInfo: prometheus.NewDesc( prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "flag_info"), "The `flags` field of CPU information from /proc/cpuinfo taken from the first core.", []string{"flag"}, nil, ), cpuBugsInfo: prometheus.NewDesc( prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "bug_info"), "The `bugs` field of CPU information from /proc/cpuinfo taken from the first core.", []string{"bug"}, nil, ), cpuGuest: prometheus.NewDesc( prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "guest_seconds_total"), "Seconds the CPUs spent in guests (VMs) for each mode.", []string{"cpu", "mode"}, nil, ), cpuCoreThrottle: prometheus.NewDesc( prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "core_throttles_total"), "Number of times this CPU core has been throttled.", []string{"package", "core"}, nil, ), cpuPackageThrottle: prometheus.NewDesc( prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "package_throttles_total"), "Number of times this CPU package has been throttled.", []string{"package"}, nil, ), cpuIsolated: prometheus.NewDesc( prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "isolated"), "Whether each core is isolated, information from /sys/devices/system/cpu/isolated.", []string{"cpu"}, nil, ), logger: logger, isolatedCpus: isolcpus, cpuStats: make(map[int64]procfs.CPUStat), } err = c.compileIncludeFlags(flagsInclude, bugsInclude) if err != nil { return nil, fmt.Errorf("fail to compile --collector.cpu.info.flags-include and --collector.cpu.info.bugs-include, the values of them must be regular expressions: %w", err) } return c, nil } func (c *cpuCollector) compileIncludeFlags(flagsIncludeFlag, bugsIncludeFlag *string) error { if (*flagsIncludeFlag != "" || *bugsIncludeFlag != "") && !*enableCPUInfo { *enableCPUInfo = true level.Info(c.logger).Log("msg", "--collector.cpu.info has been set to `true` because you set the following flags, like --collector.cpu.info.flags-include and --collector.cpu.info.bugs-include") } var err error if *flagsIncludeFlag != "" { c.cpuFlagsIncludeRegexp, err = regexp.Compile(*flagsIncludeFlag) if err != nil { return err } } if *bugsIncludeFlag != "" { c.cpuBugsIncludeRegexp, err = regexp.Compile(*bugsIncludeFlag) if err != nil { return err } } return nil } // Update implements Collector and exposes cpu related metrics from /proc/stat and /sys/.../cpu/. func (c *cpuCollector) Update(ch chan<- prometheus.Metric) error { if *enableCPUInfo { if err := c.updateInfo(ch); err != nil { return err } } if err := c.updateStat(ch); err != nil { return err } if c.isolatedCpus != nil { c.updateIsolated(ch) } return c.updateThermalThrottle(ch) } // updateInfo reads /proc/cpuinfo func (c *cpuCollector) updateInfo(ch chan<- prometheus.Metric) error { info, err := c.fs.CPUInfo() if err != nil { return err } for _, cpu := range info { ch <- prometheus.MustNewConstMetric(c.cpuInfo, prometheus.GaugeValue, 1, cpu.PhysicalID, cpu.CoreID, strconv.Itoa(int(cpu.Processor)), cpu.VendorID, cpu.CPUFamily, cpu.Model, cpu.ModelName, cpu.Microcode, cpu.Stepping, cpu.CacheSize) } cpuFreqEnabled, ok := collectorState["cpufreq"] if !ok || cpuFreqEnabled == nil { level.Debug(c.logger).Log("cpufreq key missing or nil value in collectorState map", err) } else if !*cpuFreqEnabled { for _, cpu := range info { ch <- prometheus.MustNewConstMetric(c.cpuFrequencyHz, prometheus.GaugeValue, cpu.CPUMHz*1e6, cpu.PhysicalID, cpu.CoreID, strconv.Itoa(int(cpu.Processor))) } } if len(info) != 0 { cpu := info[0] if err := updateFieldInfo(cpu.Flags, c.cpuFlagsIncludeRegexp, c.cpuFlagsInfo, ch); err != nil { return err } if err := updateFieldInfo(cpu.Bugs, c.cpuBugsIncludeRegexp, c.cpuBugsInfo, ch); err != nil { return err } } return nil } func updateFieldInfo(valueList []string, filter *regexp.Regexp, desc *prometheus.Desc, ch chan<- prometheus.Metric) error { if filter == nil { return nil } for _, val := range valueList { if !filter.MatchString(val) { continue } ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, 1, val, ) } return nil } // updateThermalThrottle reads /sys/devices/system/cpu/cpu* and expose thermal throttle statistics. func (c *cpuCollector) updateThermalThrottle(ch chan<- prometheus.Metric) error { cpus, err := filepath.Glob(sysFilePath("devices/system/cpu/cpu[0-9]*")) if err != nil { return err } packageThrottles := make(map[uint64]uint64) packageCoreThrottles := make(map[uint64]map[uint64]uint64) // cpu loop for _, cpu := range cpus { // See // https://www.kernel.org/doc/Documentation/x86/topology.txt // https://www.kernel.org/doc/Documentation/cputopology.txt // https://www.kernel.org/doc/Documentation/ABI/testing/sysfs-devices-system-cpu var err error var physicalPackageID, coreID uint64 // topology/physical_package_id if physicalPackageID, err = readUintFromFile(filepath.Join(cpu, "topology", "physical_package_id")); err != nil { level.Debug(c.logger).Log("msg", "CPU is missing physical_package_id", "cpu", cpu) continue } // topology/core_id if coreID, err = readUintFromFile(filepath.Join(cpu, "topology", "core_id")); err != nil { level.Debug(c.logger).Log("msg", "CPU is missing core_id", "cpu", cpu) continue } // metric node_cpu_core_throttles_total // // We process this metric before the package throttles as there // are CPU+kernel combinations that only present core throttles // but no package throttles. // Seen e.g. on an Intel Xeon E5472 system with RHEL 6.9 kernel. if _, present := packageCoreThrottles[physicalPackageID]; !present { packageCoreThrottles[physicalPackageID] = make(map[uint64]uint64) } if _, present := packageCoreThrottles[physicalPackageID][coreID]; !present { // Read thermal_throttle/core_throttle_count only once if coreThrottleCount, err := readUintFromFile(filepath.Join(cpu, "thermal_throttle", "core_throttle_count")); err == nil { packageCoreThrottles[physicalPackageID][coreID] = coreThrottleCount } else { level.Debug(c.logger).Log("msg", "CPU is missing core_throttle_count", "cpu", cpu) } } // metric node_cpu_package_throttles_total if _, present := packageThrottles[physicalPackageID]; !present { // Read thermal_throttle/package_throttle_count only once if packageThrottleCount, err := readUintFromFile(filepath.Join(cpu, "thermal_throttle", "package_throttle_count")); err == nil { packageThrottles[physicalPackageID] = packageThrottleCount } else { level.Debug(c.logger).Log("msg", "CPU is missing package_throttle_count", "cpu", cpu) } } } for physicalPackageID, packageThrottleCount := range packageThrottles { ch <- prometheus.MustNewConstMetric(c.cpuPackageThrottle, prometheus.CounterValue, float64(packageThrottleCount), strconv.FormatUint(physicalPackageID, 10)) } for physicalPackageID, coreMap := range packageCoreThrottles { for coreID, coreThrottleCount := range coreMap { ch <- prometheus.MustNewConstMetric(c.cpuCoreThrottle, prometheus.CounterValue, float64(coreThrottleCount), strconv.FormatUint(physicalPackageID, 10), strconv.FormatUint(coreID, 10)) } } return nil } // updateIsolated reads /sys/devices/system/cpu/isolated through sysfs and exports isolation level metrics. func (c *cpuCollector) updateIsolated(ch chan<- prometheus.Metric) { for _, cpu := range c.isolatedCpus { cpuNum := strconv.Itoa(int(cpu)) ch <- prometheus.MustNewConstMetric(c.cpuIsolated, prometheus.GaugeValue, 1.0, cpuNum) } } // updateStat reads /proc/stat through procfs and exports CPU-related metrics. func (c *cpuCollector) updateStat(ch chan<- prometheus.Metric) error { stats, err := c.fs.Stat() if err != nil { return err } c.updateCPUStats(stats.CPU) // Acquire a lock to read the stats. c.cpuStatsMutex.Lock() defer c.cpuStatsMutex.Unlock() for cpuID, cpuStat := range c.cpuStats { cpuNum := strconv.Itoa(int(cpuID)) ch <- prometheus.MustNewConstMetric(c.cpu, prometheus.CounterValue, cpuStat.User, cpuNum, "user") ch <- prometheus.MustNewConstMetric(c.cpu, prometheus.CounterValue, cpuStat.Nice, cpuNum, "nice") ch <- prometheus.MustNewConstMetric(c.cpu, prometheus.CounterValue, cpuStat.System, cpuNum, "system") ch <- prometheus.MustNewConstMetric(c.cpu, prometheus.CounterValue, cpuStat.Idle, cpuNum, "idle") ch <- prometheus.MustNewConstMetric(c.cpu, prometheus.CounterValue, cpuStat.Iowait, cpuNum, "iowait") ch <- prometheus.MustNewConstMetric(c.cpu, prometheus.CounterValue, cpuStat.IRQ, cpuNum, "irq") ch <- prometheus.MustNewConstMetric(c.cpu, prometheus.CounterValue, cpuStat.SoftIRQ, cpuNum, "softirq") ch <- prometheus.MustNewConstMetric(c.cpu, prometheus.CounterValue, cpuStat.Steal, cpuNum, "steal") if *enableCPUGuest { // Guest CPU is also accounted for in cpuStat.User and cpuStat.Nice, expose these as separate metrics. ch <- prometheus.MustNewConstMetric(c.cpuGuest, prometheus.CounterValue, cpuStat.Guest, cpuNum, "user") ch <- prometheus.MustNewConstMetric(c.cpuGuest, prometheus.CounterValue, cpuStat.GuestNice, cpuNum, "nice") } } return nil } // updateCPUStats updates the internal cache of CPU stats. func (c *cpuCollector) updateCPUStats(newStats map[int64]procfs.CPUStat) { // Acquire a lock to update the stats. c.cpuStatsMutex.Lock() defer c.cpuStatsMutex.Unlock() // Reset the cache if the list of CPUs has changed. for i, n := range newStats { cpuStats := c.cpuStats[i] // If idle jumps backwards by more than X seconds, assume we had a hotplug event and reset the stats for this CPU. if (cpuStats.Idle - n.Idle) >= jumpBackSeconds { level.Debug(c.logger).Log("msg", jumpBackDebugMessage, "cpu", i, "old_value", cpuStats.Idle, "new_value", n.Idle) cpuStats = procfs.CPUStat{} } if n.Idle >= cpuStats.Idle { cpuStats.Idle = n.Idle } else { level.Debug(c.logger).Log("msg", "CPU Idle counter jumped backwards", "cpu", i, "old_value", cpuStats.Idle, "new_value", n.Idle) } if n.User >= cpuStats.User { cpuStats.User = n.User } else { level.Debug(c.logger).Log("msg", "CPU User counter jumped backwards", "cpu", i, "old_value", cpuStats.User, "new_value", n.User) } if n.Nice >= cpuStats.Nice { cpuStats.Nice = n.Nice } else { level.Debug(c.logger).Log("msg", "CPU Nice counter jumped backwards", "cpu", i, "old_value", cpuStats.Nice, "new_value", n.Nice) } if n.System >= cpuStats.System { cpuStats.System = n.System } else { level.Debug(c.logger).Log("msg", "CPU System counter jumped backwards", "cpu", i, "old_value", cpuStats.System, "new_value", n.System) } if n.Iowait >= cpuStats.Iowait { cpuStats.Iowait = n.Iowait } else { level.Debug(c.logger).Log("msg", "CPU Iowait counter jumped backwards", "cpu", i, "old_value", cpuStats.Iowait, "new_value", n.Iowait) } if n.IRQ >= cpuStats.IRQ { cpuStats.IRQ = n.IRQ } else { level.Debug(c.logger).Log("msg", "CPU IRQ counter jumped backwards", "cpu", i, "old_value", cpuStats.IRQ, "new_value", n.IRQ) } if n.SoftIRQ >= cpuStats.SoftIRQ { cpuStats.SoftIRQ = n.SoftIRQ } else { level.Debug(c.logger).Log("msg", "CPU SoftIRQ counter jumped backwards", "cpu", i, "old_value", cpuStats.SoftIRQ, "new_value", n.SoftIRQ) } if n.Steal >= cpuStats.Steal { cpuStats.Steal = n.Steal } else { level.Debug(c.logger).Log("msg", "CPU Steal counter jumped backwards", "cpu", i, "old_value", cpuStats.Steal, "new_value", n.Steal) } if n.Guest >= cpuStats.Guest { cpuStats.Guest = n.Guest } else { level.Debug(c.logger).Log("msg", "CPU Guest counter jumped backwards", "cpu", i, "old_value", cpuStats.Guest, "new_value", n.Guest) } if n.GuestNice >= cpuStats.GuestNice { cpuStats.GuestNice = n.GuestNice } else { level.Debug(c.logger).Log("msg", "CPU GuestNice counter jumped backwards", "cpu", i, "old_value", cpuStats.GuestNice, "new_value", n.GuestNice) } c.cpuStats[i] = cpuStats } // Remove offline CPUs. if len(newStats) != len(c.cpuStats) { onlineCPUIds := maps.Keys(newStats) maps.DeleteFunc(c.cpuStats, func(key int64, item procfs.CPUStat) bool { return !slices.Contains(onlineCPUIds, key) }) } } node_exporter-1.7.0/collector/cpu_linux_test.go000066400000000000000000000103501452426057600217700ustar00rootroot00000000000000// Copyright 2021 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nocpu // +build !nocpu package collector import ( "reflect" "testing" "github.com/go-kit/log" "github.com/prometheus/procfs" ) func copyStats(d, s map[int64]procfs.CPUStat) { for k := range s { v := s[k] d[k] = v } } func makeTestCPUCollector(s map[int64]procfs.CPUStat) *cpuCollector { dup := make(map[int64]procfs.CPUStat, len(s)) copyStats(dup, s) return &cpuCollector{ logger: log.NewNopLogger(), cpuStats: dup, } } func TestCPU(t *testing.T) { firstCPUStat := map[int64]procfs.CPUStat{ 0: { User: 100.0, Nice: 100.0, System: 100.0, Idle: 100.0, Iowait: 100.0, IRQ: 100.0, SoftIRQ: 100.0, Steal: 100.0, Guest: 100.0, GuestNice: 100.0, }} c := makeTestCPUCollector(firstCPUStat) want := map[int64]procfs.CPUStat{ 0: { User: 101.0, Nice: 101.0, System: 101.0, Idle: 101.0, Iowait: 101.0, IRQ: 101.0, SoftIRQ: 101.0, Steal: 101.0, Guest: 101.0, GuestNice: 101.0, }} c.updateCPUStats(want) got := c.cpuStats if !reflect.DeepEqual(want, got) { t.Fatalf("should have %v CPU Stat: got %v", want, got) } c = makeTestCPUCollector(firstCPUStat) jumpBack := map[int64]procfs.CPUStat{ 0: { User: 99.9, Nice: 99.9, System: 99.9, Idle: 99.9, Iowait: 99.9, IRQ: 99.9, SoftIRQ: 99.9, Steal: 99.9, Guest: 99.9, GuestNice: 99.9, }} c.updateCPUStats(jumpBack) got = c.cpuStats if reflect.DeepEqual(jumpBack, got) { t.Fatalf("should have %v CPU Stat: got %v", firstCPUStat, got) } c = makeTestCPUCollector(firstCPUStat) resetIdle := map[int64]procfs.CPUStat{ 0: { User: 102.0, Nice: 102.0, System: 102.0, Idle: 1.0, Iowait: 102.0, IRQ: 102.0, SoftIRQ: 102.0, Steal: 102.0, Guest: 102.0, GuestNice: 102.0, }} c.updateCPUStats(resetIdle) got = c.cpuStats if !reflect.DeepEqual(resetIdle, got) { t.Fatalf("should have %v CPU Stat: got %v", resetIdle, got) } } func TestCPUOffline(t *testing.T) { // CPU 1 goes offline. firstCPUStat := map[int64]procfs.CPUStat{ 0: { User: 100.0, Nice: 100.0, System: 100.0, Idle: 100.0, Iowait: 100.0, IRQ: 100.0, SoftIRQ: 100.0, Steal: 100.0, Guest: 100.0, GuestNice: 100.0, }, 1: { User: 101.0, Nice: 101.0, System: 101.0, Idle: 101.0, Iowait: 101.0, IRQ: 101.0, SoftIRQ: 101.0, Steal: 101.0, Guest: 101.0, GuestNice: 101.0, }, } c := makeTestCPUCollector(firstCPUStat) want := map[int64]procfs.CPUStat{ 0: { User: 100.0, Nice: 100.0, System: 100.0, Idle: 100.0, Iowait: 100.0, IRQ: 100.0, SoftIRQ: 100.0, Steal: 100.0, Guest: 100.0, GuestNice: 100.0, }, } c.updateCPUStats(want) got := c.cpuStats if !reflect.DeepEqual(want, got) { t.Fatalf("should have %v CPU Stat: got %v", want, got) } // CPU 1 comes back online. want = map[int64]procfs.CPUStat{ 0: { User: 100.0, Nice: 100.0, System: 100.0, Idle: 100.0, Iowait: 100.0, IRQ: 100.0, SoftIRQ: 100.0, Steal: 100.0, Guest: 100.0, GuestNice: 100.0, }, 1: { User: 101.0, Nice: 101.0, System: 101.0, Idle: 101.0, Iowait: 101.0, IRQ: 101.0, SoftIRQ: 101.0, Steal: 101.0, Guest: 101.0, GuestNice: 101.0, }, } c.updateCPUStats(want) got = c.cpuStats if !reflect.DeepEqual(want, got) { t.Fatalf("should have %v CPU Stat: got %v", want, got) } } node_exporter-1.7.0/collector/cpu_netbsd.go000066400000000000000000000150671452426057600210630ustar00rootroot00000000000000// Copyright 2023 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nocpu // +build !nocpu package collector import ( "errors" "math" "regexp" "sort" "strconv" "strings" "unsafe" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "golang.org/x/sys/unix" "howett.net/plist" ) type clockinfo struct { hz int32 // clock frequency tick int32 // micro-seconds per hz tick spare int32 stathz int32 // statistics clock frequency profhz int32 // profiling clock frequency } type cputime struct { user float64 nice float64 sys float64 intr float64 idle float64 } type plistref struct { pref_plist unsafe.Pointer pref_len uint64 } type sysmonValues struct { CurValue int `plist:"cur-value"` Description string `plist:"description"` State string `plist:"state"` Type string `plist:"type"` } type sysmonProperty []sysmonValues type sysmonProperties map[string]sysmonProperty func readBytes(ptr unsafe.Pointer, length uint64) []byte { buf := make([]byte, length-1) var i uint64 for ; i < length-1; i++ { buf[i] = *(*byte)(unsafe.Pointer(uintptr(ptr) + uintptr(i))) } return buf } func ioctl(fd int, nr int64, typ byte, size uintptr, retptr unsafe.Pointer) error { _, _, errno := unix.Syscall( unix.SYS_IOCTL, uintptr(fd), // Some magicks derived from sys/ioccom.h. uintptr((0x40000000|0x80000000)| ((int64(size)&(1<<13-1))<<16)| (int64(typ)<<8)| nr, ), uintptr(retptr), ) if errno != 0 { return errno } return nil } func readSysmonProperties() (sysmonProperties, error) { fd, err := unix.Open(rootfsFilePath("/dev/sysmon"), unix.O_RDONLY, 0777) if err != nil { return nil, err } defer unix.Close(fd) var retptr plistref if err = ioctl(fd, 0, 'E', unsafe.Sizeof(retptr), unsafe.Pointer(&retptr)); err != nil { return nil, err } bytes := readBytes(retptr.pref_plist, retptr.pref_len) var props sysmonProperties if _, err = plist.Unmarshal(bytes, &props); err != nil { return nil, err } return props, nil } func sortFilterSysmonProperties(props sysmonProperties, prefix string) []string { var keys []string for key := range props { if !strings.HasPrefix(key, prefix) { continue } keys = append(keys, key) } sort.Strings(keys) return keys } func convertTemperatures(prop sysmonProperty, res map[int]float64) error { for _, val := range prop { if val.State == "invalid" || val.State == "unknown" || val.State == "" { continue } re := regexp.MustCompile("^cpu([0-9]+) temperature$") core := re.FindStringSubmatch(val.Description)[1] ncore, _ := strconv.Atoi(core) temperature := ((float64(uint64(val.CurValue))) / 1000000) - 273.15 res[ncore] = temperature } return nil } func getCPUTemperatures() (map[int]float64, error) { res := make(map[int]float64) // Read all properties props, err := readSysmonProperties() if err != nil { return res, err } keys := sortFilterSysmonProperties(props, "coretemp") for idx, _ := range keys { convertTemperatures(props[keys[idx]], res) } return res, nil } func getCPUTimes() ([]cputime, error) { const states = 5 clockb, err := unix.SysctlRaw("kern.clockrate") if err != nil { return nil, err } clock := *(*clockinfo)(unsafe.Pointer(&clockb[0])) var cpufreq float64 if clock.stathz > 0 { cpufreq = float64(clock.stathz) } else { cpufreq = float64(clock.hz) } ncpusb, err := unix.SysctlRaw("hw.ncpu") if err != nil { return nil, err } ncpus := *(*int)(unsafe.Pointer(&ncpusb[0])) if ncpus < 1 { return nil, errors.New("Invalid cpu number") } var times []float64 for ncpu := 0; ncpu < ncpus; ncpu++ { cpb, err := unix.SysctlRaw("kern.cp_time", ncpu) if err != nil { return nil, err } for len(cpb) >= int(unsafe.Sizeof(int(0))) { t := *(*int)(unsafe.Pointer(&cpb[0])) times = append(times, float64(t)/cpufreq) cpb = cpb[unsafe.Sizeof(int(0)):] } } cpus := make([]cputime, len(times)/states) for i := 0; i < len(times); i += states { cpu := &cpus[i/states] cpu.user = times[i] cpu.nice = times[i+1] cpu.sys = times[i+2] cpu.intr = times[i+3] cpu.idle = times[i+4] } return cpus, nil } type statCollector struct { cpu typedDesc temp typedDesc logger log.Logger } func init() { registerCollector("cpu", defaultEnabled, NewStatCollector) } // NewStatCollector returns a new Collector exposing CPU stats. func NewStatCollector(logger log.Logger) (Collector, error) { return &statCollector{ cpu: typedDesc{nodeCPUSecondsDesc, prometheus.CounterValue}, temp: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "temperature_celsius"), "CPU temperature", []string{"cpu"}, nil, ), prometheus.GaugeValue}, logger: logger, }, nil } // Expose CPU stats using sysctl. func (c *statCollector) Update(ch chan<- prometheus.Metric) error { // We want time spent per-cpu per CPUSTATE. // CPUSTATES (number of CPUSTATES) is defined as 5U. // Order: CP_USER | CP_NICE | CP_SYS | CP_IDLE | CP_INTR // sysctl kern.cp_time.x provides CPUSTATES long integers: // (space-separated list of the above variables, where // x stands for the number of the CPU core) // // Each value is a counter incremented at frequency // kern.clockrate.(stathz | hz) // // Look into sys/kern/kern_clock.c for details. cpuTimes, err := getCPUTimes() if err != nil { return err } cpuTemperatures, err := getCPUTemperatures() if err != nil { return err } for cpu, t := range cpuTimes { lcpu := strconv.Itoa(cpu) ch <- c.cpu.mustNewConstMetric(float64(t.user), lcpu, "user") ch <- c.cpu.mustNewConstMetric(float64(t.nice), lcpu, "nice") ch <- c.cpu.mustNewConstMetric(float64(t.sys), lcpu, "system") ch <- c.cpu.mustNewConstMetric(float64(t.intr), lcpu, "interrupt") ch <- c.cpu.mustNewConstMetric(float64(t.idle), lcpu, "idle") if temp, ok := cpuTemperatures[cpu]; ok { ch <- c.temp.mustNewConstMetric(temp, lcpu) } else { level.Debug(c.logger).Log("msg", "no temperature information for CPU", "cpu", cpu) ch <- c.temp.mustNewConstMetric(math.NaN(), lcpu) } } return err } node_exporter-1.7.0/collector/cpu_netbsd_test.go000066400000000000000000000022111452426057600221050ustar00rootroot00000000000000// Copyright 2023 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nocpu // +build !nocpu package collector import ( "runtime" "testing" ) func TestCPUTimes(t *testing.T) { times, err := getCPUTimes() if err != nil { t.Fatalf("getCPUTimes returned error: %v", err) } if len(times) == 0 { t.Fatalf("no CPU times found") } if got, want := len(times), runtime.NumCPU(); got != want { t.Fatalf("unexpected # of CPU times; got %d want %d", got, want) } } func TestCPUTemperatures(t *testing.T) { _, err := getCPUTemperatures() if err != nil { t.Fatalf("getCPUTemperatures returned error: %v", err) } } node_exporter-1.7.0/collector/cpu_openbsd.go000066400000000000000000000050621452426057600212300ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nocpu // +build !nocpu package collector import ( "strconv" "unsafe" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "golang.org/x/sys/unix" ) const ( CP_USER = iota CP_NICE CP_SYS CP_SPIN CP_INTR CP_IDLE CPUSTATES ) const ( CP_USER_O63 = iota CP_NICE_O63 CP_SYS_O63 CP_INTR_O63 CP_IDLE_O63 CPUSTATES_O63 ) type cpuCollector struct { cpu typedDesc logger log.Logger } func init() { registerCollector("cpu", defaultEnabled, NewCPUCollector) } func NewCPUCollector(logger log.Logger) (Collector, error) { return &cpuCollector{ cpu: typedDesc{nodeCPUSecondsDesc, prometheus.CounterValue}, logger: logger, }, nil } func (c *cpuCollector) Update(ch chan<- prometheus.Metric) (err error) { clockb, err := unix.SysctlRaw("kern.clockrate") if err != nil { return err } clock := *(*unix.Clockinfo)(unsafe.Pointer(&clockb[0])) hz := float64(clock.Stathz) ncpus, err := unix.SysctlUint32("hw.ncpu") if err != nil { return err } var cpTime [][CPUSTATES]uint64 for i := 0; i < int(ncpus); i++ { cpb, err := unix.SysctlRaw("kern.cp_time2", i) if err != nil && err != unix.ENODEV { return err } if err != unix.ENODEV { var times [CPUSTATES]uint64 for n := 0; n < len(cpb); n += 8 { times[n/8] = *(*uint64)(unsafe.Pointer(&cpb[n])) } if len(cpb)/8 == CPUSTATES_O63 { copy(times[CP_INTR:], times[CP_INTR_O63:]) times[CP_SPIN] = 0 } cpTime = append(cpTime, times) } } for cpu, time := range cpTime { lcpu := strconv.Itoa(cpu) ch <- c.cpu.mustNewConstMetric(float64(time[CP_USER])/hz, lcpu, "user") ch <- c.cpu.mustNewConstMetric(float64(time[CP_NICE])/hz, lcpu, "nice") ch <- c.cpu.mustNewConstMetric(float64(time[CP_SYS])/hz, lcpu, "system") ch <- c.cpu.mustNewConstMetric(float64(time[CP_SPIN])/hz, lcpu, "spin") ch <- c.cpu.mustNewConstMetric(float64(time[CP_INTR])/hz, lcpu, "interrupt") ch <- c.cpu.mustNewConstMetric(float64(time[CP_IDLE])/hz, lcpu, "idle") } return err } node_exporter-1.7.0/collector/cpu_solaris.go000066400000000000000000000034201452426057600212460ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nocpu // +build !nocpu package collector import ( "strconv" "github.com/go-kit/log" "github.com/illumos/go-kstat" "github.com/prometheus/client_golang/prometheus" ) // #include import "C" type cpuCollector struct { cpu typedDesc logger log.Logger } func init() { registerCollector("cpu", defaultEnabled, NewCpuCollector) } func NewCpuCollector(logger log.Logger) (Collector, error) { return &cpuCollector{ cpu: typedDesc{nodeCPUSecondsDesc, prometheus.CounterValue}, logger: logger, }, nil } func (c *cpuCollector) Update(ch chan<- prometheus.Metric) error { ncpus := C.sysconf(C._SC_NPROCESSORS_ONLN) tok, err := kstat.Open() if err != nil { return err } defer tok.Close() for cpu := 0; cpu < int(ncpus); cpu++ { ksCPU, err := tok.Lookup("cpu", cpu, "sys") if err != nil { return err } for k, v := range map[string]string{ "idle": "cpu_ticks_idle", "kernel": "cpu_ticks_kernel", "user": "cpu_ticks_user", "wait": "cpu_ticks_wait", } { kstatValue, err := ksCPU.GetNamed(v) if err != nil { return err } ch <- c.cpu.mustNewConstMetric(float64(kstatValue.UintVal), strconv.Itoa(cpu), k) } } return nil } node_exporter-1.7.0/collector/cpu_vulnerabilities_linux.go000066400000000000000000000037231452426057600242200ustar00rootroot00000000000000// Copyright 2023 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector import ( "fmt" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs/sysfs" ) const ( cpuVulerabilitiesCollector = "cpu_vulnerabilities" ) var ( vulnerabilityDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, cpuVulerabilitiesCollector, "info"), "Details of each CPU vulnerability reported by sysfs. The value of the series is an int encoded state of the vulnerability. The same state is stored as a string in the label", []string{"codename", "state"}, nil, ) ) type cpuVulnerabilitiesCollector struct{} func init() { registerCollector(cpuVulerabilitiesCollector, defaultDisabled, NewVulnerabilitySysfsCollector) } func NewVulnerabilitySysfsCollector(logger log.Logger) (Collector, error) { return &cpuVulnerabilitiesCollector{}, nil } func (v *cpuVulnerabilitiesCollector) Update(ch chan<- prometheus.Metric) error { fs, err := sysfs.NewFS(*sysPath) if err != nil { return fmt.Errorf("failed to open sysfs: %w", err) } vulnerabilities, err := fs.CPUVulnerabilities() if err != nil { return fmt.Errorf("failed to get vulnerabilities: %w", err) } for _, vulnerability := range vulnerabilities { ch <- prometheus.MustNewConstMetric( vulnerabilityDesc, prometheus.GaugeValue, 1.0, vulnerability.CodeName, sysfs.VulnerabilityHumanEncoding[vulnerability.State], ) } return nil } node_exporter-1.7.0/collector/cpufreq_common.go000066400000000000000000000041511452426057600217420ustar00rootroot00000000000000// Copyright 2023 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nocpu // +build !nocpu package collector import ( "github.com/prometheus/client_golang/prometheus" ) var ( cpuFreqHertzDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "frequency_hertz"), "Current CPU thread frequency in hertz.", []string{"cpu"}, nil, ) cpuFreqMinDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "frequency_min_hertz"), "Minimum CPU thread frequency in hertz.", []string{"cpu"}, nil, ) cpuFreqMaxDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "frequency_max_hertz"), "Maximum CPU thread frequency in hertz.", []string{"cpu"}, nil, ) cpuFreqScalingFreqDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "scaling_frequency_hertz"), "Current scaled CPU thread frequency in hertz.", []string{"cpu"}, nil, ) cpuFreqScalingFreqMinDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "scaling_frequency_min_hertz"), "Minimum scaled CPU thread frequency in hertz.", []string{"cpu"}, nil, ) cpuFreqScalingFreqMaxDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "scaling_frequency_max_hertz"), "Maximum scaled CPU thread frequency in hertz.", []string{"cpu"}, nil, ) cpuFreqScalingGovernorDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, cpuCollectorSubsystem, "scaling_governor"), "Current enabled CPU frequency governor.", []string{"cpu", "governor"}, nil, ) ) node_exporter-1.7.0/collector/cpufreq_linux.go000066400000000000000000000064541452426057600216210ustar00rootroot00000000000000// Copyright 2019 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nocpu // +build !nocpu package collector import ( "fmt" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs/sysfs" "strings" ) type cpuFreqCollector struct { fs sysfs.FS logger log.Logger } func init() { registerCollector("cpufreq", defaultEnabled, NewCPUFreqCollector) } // NewCPUFreqCollector returns a new Collector exposing kernel/system statistics. func NewCPUFreqCollector(logger log.Logger) (Collector, error) { fs, err := sysfs.NewFS(*sysPath) if err != nil { return nil, fmt.Errorf("failed to open sysfs: %w", err) } return &cpuFreqCollector{ fs: fs, logger: logger, }, nil } // Update implements Collector and exposes cpu related metrics from /proc/stat and /sys/.../cpu/. func (c *cpuFreqCollector) Update(ch chan<- prometheus.Metric) error { cpuFreqs, err := c.fs.SystemCpufreq() if err != nil { return err } // sysfs cpufreq values are kHz, thus multiply by 1000 to export base units (hz). // See https://www.kernel.org/doc/Documentation/cpu-freq/user-guide.txt for _, stats := range cpuFreqs { if stats.CpuinfoCurrentFrequency != nil { ch <- prometheus.MustNewConstMetric( cpuFreqHertzDesc, prometheus.GaugeValue, float64(*stats.CpuinfoCurrentFrequency)*1000.0, stats.Name, ) } if stats.CpuinfoMinimumFrequency != nil { ch <- prometheus.MustNewConstMetric( cpuFreqMinDesc, prometheus.GaugeValue, float64(*stats.CpuinfoMinimumFrequency)*1000.0, stats.Name, ) } if stats.CpuinfoMaximumFrequency != nil { ch <- prometheus.MustNewConstMetric( cpuFreqMaxDesc, prometheus.GaugeValue, float64(*stats.CpuinfoMaximumFrequency)*1000.0, stats.Name, ) } if stats.ScalingCurrentFrequency != nil { ch <- prometheus.MustNewConstMetric( cpuFreqScalingFreqDesc, prometheus.GaugeValue, float64(*stats.ScalingCurrentFrequency)*1000.0, stats.Name, ) } if stats.ScalingMinimumFrequency != nil { ch <- prometheus.MustNewConstMetric( cpuFreqScalingFreqMinDesc, prometheus.GaugeValue, float64(*stats.ScalingMinimumFrequency)*1000.0, stats.Name, ) } if stats.ScalingMaximumFrequency != nil { ch <- prometheus.MustNewConstMetric( cpuFreqScalingFreqMaxDesc, prometheus.GaugeValue, float64(*stats.ScalingMaximumFrequency)*1000.0, stats.Name, ) } if stats.Governor != "" { availableGovernors := strings.Split(stats.AvailableGovernors, " ") for _, g := range availableGovernors { state := 0 if g == stats.Governor { state = 1 } ch <- prometheus.MustNewConstMetric( cpuFreqScalingGovernorDesc, prometheus.GaugeValue, float64(state), stats.Name, g, ) } } } return nil } node_exporter-1.7.0/collector/cpufreq_solaris.go000066400000000000000000000036611452426057600221330ustar00rootroot00000000000000// Copyright 2019 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nocpu // +build !nocpu package collector import ( "fmt" "strconv" "github.com/go-kit/log" "github.com/illumos/go-kstat" "github.com/prometheus/client_golang/prometheus" ) // #include import "C" type cpuFreqCollector struct { logger log.Logger } func init() { registerCollector("cpufreq", defaultEnabled, NewCpuFreqCollector) } func NewCpuFreqCollector(logger log.Logger) (Collector, error) { return &cpuFreqCollector{ logger: logger, }, nil } func (c *cpuFreqCollector) Update(ch chan<- prometheus.Metric) error { ncpus := C.sysconf(C._SC_NPROCESSORS_ONLN) tok, err := kstat.Open() if err != nil { return err } defer tok.Close() for cpu := 0; cpu < int(ncpus); cpu++ { ksCPUInfo, err := tok.Lookup("cpu_info", cpu, fmt.Sprintf("cpu_info%d", cpu)) if err != nil { return err } cpuFreqV, err := ksCPUInfo.GetNamed("current_clock_Hz") if err != nil { return err } cpuFreqMaxV, err := ksCPUInfo.GetNamed("clock_MHz") if err != nil { return err } lcpu := strconv.Itoa(cpu) ch <- prometheus.MustNewConstMetric( cpuFreqHertzDesc, prometheus.GaugeValue, float64(cpuFreqV.UintVal), lcpu, ) // Multiply by 1e+6 to convert MHz to Hz. ch <- prometheus.MustNewConstMetric( cpuFreqMaxDesc, prometheus.GaugeValue, float64(cpuFreqMaxV.IntVal)*1e+6, lcpu, ) } return nil } node_exporter-1.7.0/collector/device_filter.go000066400000000000000000000023161452426057600215320ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector import ( "regexp" ) type deviceFilter struct { ignorePattern *regexp.Regexp acceptPattern *regexp.Regexp } func newDeviceFilter(ignoredPattern, acceptPattern string) (f deviceFilter) { if ignoredPattern != "" { f.ignorePattern = regexp.MustCompile(ignoredPattern) } if acceptPattern != "" { f.acceptPattern = regexp.MustCompile(acceptPattern) } return } // ignored returns whether the device should be ignored func (f *deviceFilter) ignored(name string) bool { return (f.ignorePattern != nil && f.ignorePattern.MatchString(name)) || (f.acceptPattern != nil && !f.acceptPattern.MatchString(name)) } node_exporter-1.7.0/collector/device_filter_test.go000066400000000000000000000024331452426057600225710ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector import ( "testing" ) func TestDeviceFilter(t *testing.T) { tests := []struct { ignore string accept string name string expectedResult bool }{ {"", "", "eth0", false}, {"", "^💩0$", "💩0", false}, {"", "^💩0$", "💩1", true}, {"", "^💩0$", "veth0", true}, {"^💩", "", "💩3", true}, {"^💩", "", "veth0", false}, } for _, test := range tests { filter := newDeviceFilter(test.ignore, test.accept) result := filter.ignored(test.name) if result != test.expectedResult { t.Errorf("ignorePattern=%v acceptPattern=%v ifname=%v expected=%v result=%v", test.ignore, test.accept, test.name, test.expectedResult, result) } } } node_exporter-1.7.0/collector/devstat_dragonfly.go000066400000000000000000000074721452426057600224550ustar00rootroot00000000000000// Copyright 2016 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nodevstat // +build !nodevstat package collector import ( "errors" "fmt" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) /* #cgo LDFLAGS: -ldevstat #include #include #include typedef struct { char device[DEVSTAT_NAME_LEN]; int unit; uint64_t bytes; uint64_t transfers; uint64_t blocks; } Stats; int _get_ndevs() { struct statinfo current; int num_devices; current.dinfo = (struct devinfo *)calloc(1, sizeof(struct devinfo)); if (current.dinfo == NULL) return -2; checkversion(); if (getdevs(¤t) == -1) return -1; return current.dinfo->numdevs; } Stats _get_stats(int i) { struct statinfo current; int num_devices; current.dinfo = (struct devinfo *)calloc(1, sizeof(struct devinfo)); getdevs(¤t); num_devices = current.dinfo->numdevs; Stats stats; uint64_t total_bytes, total_transfers, total_blocks; long double kb_per_transfer, transfers_per_second, mb_per_second, blocks_per_second, ms_per_transaction; strcpy(stats.device, current.dinfo->devices[i].device_name); stats.unit = current.dinfo->devices[i].unit_number; compute_stats(¤t.dinfo->devices[i], NULL, 1.0, &total_bytes, &total_transfers, &total_blocks, &kb_per_transfer, &transfers_per_second, &mb_per_second, &blocks_per_second, &ms_per_transaction); stats.bytes = total_bytes; stats.transfers = total_transfers; stats.blocks = total_blocks; return stats; } */ import "C" const ( devstatSubsystem = "devstat" ) type devstatCollector struct { bytesDesc *prometheus.Desc transfersDesc *prometheus.Desc blocksDesc *prometheus.Desc logger log.Logger } func init() { registerCollector("devstat", defaultDisabled, NewDevstatCollector) } // NewDevstatCollector returns a new Collector exposing Device stats. func NewDevstatCollector(logger log.Logger) (Collector, error) { return &devstatCollector{ bytesDesc: prometheus.NewDesc( prometheus.BuildFQName(namespace, devstatSubsystem, "bytes_total"), "The total number of bytes transferred for reads and writes on the device.", []string{"device"}, nil, ), transfersDesc: prometheus.NewDesc( prometheus.BuildFQName(namespace, devstatSubsystem, "transfers_total"), "The total number of transactions completed.", []string{"device"}, nil, ), blocksDesc: prometheus.NewDesc( prometheus.BuildFQName(namespace, devstatSubsystem, "blocks_total"), "The total number of bytes given in terms of the devices blocksize.", []string{"device"}, nil, ), logger: logger, }, nil } func (c *devstatCollector) Update(ch chan<- prometheus.Metric) error { count := C._get_ndevs() if count == -1 { return errors.New("getdevs() failed") } if count == -2 { return errors.New("calloc() failed") } for i := C.int(0); i < count; i++ { stats := C._get_stats(i) device := fmt.Sprintf("%s%d", C.GoString(&stats.device[0]), stats.unit) ch <- prometheus.MustNewConstMetric(c.bytesDesc, prometheus.CounterValue, float64(stats.bytes), device) ch <- prometheus.MustNewConstMetric(c.transfersDesc, prometheus.CounterValue, float64(stats.transfers), device) ch <- prometheus.MustNewConstMetric(c.blocksDesc, prometheus.CounterValue, float64(stats.blocks), device) } return nil } node_exporter-1.7.0/collector/devstat_freebsd.c000066400000000000000000000050621452426057600217100ustar00rootroot00000000000000// Copyright 2017 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // +build !nodevstat #include #include #include #include #include #include #include #include int _get_stats(struct devinfo *info, Stats **stats) { struct statinfo current; current.dinfo = info; if (devstat_getdevs(NULL, ¤t) == -1) { return -1; } Stats *p = (Stats*)calloc(current.dinfo->numdevs, sizeof(Stats)); for (int i = 0; i < current.dinfo->numdevs; i++) { uint64_t bytes_read, bytes_write, bytes_free; uint64_t transfers_other, transfers_read, transfers_write, transfers_free; long double duration_other, duration_read, duration_write, duration_free; long double busy_time; uint64_t blocks; strcpy(p[i].device, current.dinfo->devices[i].device_name); p[i].unit = current.dinfo->devices[i].unit_number; devstat_compute_statistics(¤t.dinfo->devices[i], NULL, 1.0, DSM_TOTAL_BYTES_READ, &bytes_read, DSM_TOTAL_BYTES_WRITE, &bytes_write, DSM_TOTAL_BYTES_FREE, &bytes_free, DSM_TOTAL_TRANSFERS_OTHER, &transfers_other, DSM_TOTAL_TRANSFERS_READ, &transfers_read, DSM_TOTAL_TRANSFERS_WRITE, &transfers_write, DSM_TOTAL_TRANSFERS_FREE, &transfers_free, DSM_TOTAL_DURATION_OTHER, &duration_other, DSM_TOTAL_DURATION_READ, &duration_read, DSM_TOTAL_DURATION_WRITE, &duration_write, DSM_TOTAL_DURATION_FREE, &duration_free, DSM_TOTAL_BUSY_TIME, &busy_time, DSM_TOTAL_BLOCKS, &blocks, DSM_NONE); p[i].bytes.read = bytes_read; p[i].bytes.write = bytes_write; p[i].bytes.free = bytes_free; p[i].transfers.other = transfers_other; p[i].transfers.read = transfers_read; p[i].transfers.write = transfers_write; p[i].transfers.free = transfers_free; p[i].duration.other = duration_other; p[i].duration.read = duration_read; p[i].duration.write = duration_write; p[i].duration.free = duration_free; p[i].busyTime = busy_time; p[i].blocks = blocks; } *stats = p; return current.dinfo->numdevs; } node_exporter-1.7.0/collector/devstat_freebsd.go000066400000000000000000000075361452426057600221030ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nodevstat // +build !nodevstat package collector import ( "errors" "fmt" "sync" "unsafe" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) // #cgo LDFLAGS: -ldevstat -lkvm -lelf // #include "devstat_freebsd.h" import "C" const ( devstatSubsystem = "devstat" ) type devstatCollector struct { mu sync.Mutex devinfo *C.struct_devinfo bytes typedDesc transfers typedDesc duration typedDesc busyTime typedDesc blocks typedDesc logger log.Logger } func init() { registerCollector("devstat", defaultDisabled, NewDevstatCollector) } // NewDevstatCollector returns a new Collector exposing Device stats. func NewDevstatCollector(logger log.Logger) (Collector, error) { return &devstatCollector{ devinfo: &C.struct_devinfo{}, bytes: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, devstatSubsystem, "bytes_total"), "The total number of bytes in transactions.", []string{"device", "type"}, nil, ), prometheus.CounterValue}, transfers: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, devstatSubsystem, "transfers_total"), "The total number of transactions.", []string{"device", "type"}, nil, ), prometheus.CounterValue}, duration: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, devstatSubsystem, "duration_seconds_total"), "The total duration of transactions in seconds.", []string{"device", "type"}, nil, ), prometheus.CounterValue}, busyTime: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, devstatSubsystem, "busy_time_seconds_total"), "Total time the device had one or more transactions outstanding in seconds.", []string{"device"}, nil, ), prometheus.CounterValue}, blocks: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, devstatSubsystem, "blocks_transferred_total"), "The total number of blocks transferred.", []string{"device"}, nil, ), prometheus.CounterValue}, logger: logger, }, nil } func (c *devstatCollector) Update(ch chan<- prometheus.Metric) error { c.mu.Lock() defer c.mu.Unlock() var stats *C.Stats n := C._get_stats(c.devinfo, &stats) if n == -1 { return errors.New("devstat_getdevs failed") } base := unsafe.Pointer(stats) for i := C.int(0); i < n; i++ { offset := i * C.int(C.sizeof_Stats) stat := (*C.Stats)(unsafe.Pointer(uintptr(base) + uintptr(offset))) device := fmt.Sprintf("%s%d", C.GoString(&stat.device[0]), stat.unit) ch <- c.bytes.mustNewConstMetric(float64(stat.bytes.read), device, "read") ch <- c.bytes.mustNewConstMetric(float64(stat.bytes.write), device, "write") ch <- c.transfers.mustNewConstMetric(float64(stat.transfers.other), device, "other") ch <- c.transfers.mustNewConstMetric(float64(stat.transfers.read), device, "read") ch <- c.transfers.mustNewConstMetric(float64(stat.transfers.write), device, "write") ch <- c.duration.mustNewConstMetric(float64(stat.duration.other), device, "other") ch <- c.duration.mustNewConstMetric(float64(stat.duration.read), device, "read") ch <- c.duration.mustNewConstMetric(float64(stat.duration.write), device, "write") ch <- c.busyTime.mustNewConstMetric(float64(stat.busyTime), device) ch <- c.blocks.mustNewConstMetric(float64(stat.blocks), device) } C.free(unsafe.Pointer(stats)) return nil } node_exporter-1.7.0/collector/devstat_freebsd.h000066400000000000000000000023231452426057600217120ustar00rootroot00000000000000// Copyright 2017 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include #include #include #include #include #include #include typedef struct { uint64_t read; uint64_t write; uint64_t free; } Bytes; typedef struct { uint64_t other; uint64_t read; uint64_t write; uint64_t free; } Transfers; typedef struct { double other; double read; double write; double free; } Duration; typedef struct { char device[DEVSTAT_NAME_LEN]; int unit; Bytes bytes; Transfers transfers; Duration duration; long busyTime; uint64_t blocks; } Stats; int _get_ndevs(); int _get_stats(struct devinfo *info, Stats **stats); node_exporter-1.7.0/collector/diskstats_common.go000066400000000000000000000100321452426057600223010ustar00rootroot00000000000000// Copyright 2019 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nodiskstats && (openbsd || linux || darwin) // +build !nodiskstats // +build openbsd linux darwin package collector import ( "errors" "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" ) const ( diskSubsystem = "disk" ) var ( diskLabelNames = []string{"device"} diskstatsDeviceExcludeSet bool diskstatsDeviceExclude = kingpin.Flag( "collector.diskstats.device-exclude", "Regexp of diskstats devices to exclude (mutually exclusive to device-include).", ).Default(diskstatsDefaultIgnoredDevices).PreAction(func(c *kingpin.ParseContext) error { diskstatsDeviceExcludeSet = true return nil }).String() oldDiskstatsDeviceExclude = kingpin.Flag( "collector.diskstats.ignored-devices", "DEPRECATED: Use collector.diskstats.device-exclude", ).Hidden().String() diskstatsDeviceInclude = kingpin.Flag("collector.diskstats.device-include", "Regexp of diskstats devices to include (mutually exclusive to device-exclude).").String() readsCompletedDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, diskSubsystem, "reads_completed_total"), "The total number of reads completed successfully.", diskLabelNames, nil, ) readBytesDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, diskSubsystem, "read_bytes_total"), "The total number of bytes read successfully.", diskLabelNames, nil, ) writesCompletedDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, diskSubsystem, "writes_completed_total"), "The total number of writes completed successfully.", diskLabelNames, nil, ) writtenBytesDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, diskSubsystem, "written_bytes_total"), "The total number of bytes written successfully.", diskLabelNames, nil, ) ioTimeSecondsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, diskSubsystem, "io_time_seconds_total"), "Total seconds spent doing I/Os.", diskLabelNames, nil, ) readTimeSecondsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, diskSubsystem, "read_time_seconds_total"), "The total number of seconds spent by all reads.", diskLabelNames, nil, ) writeTimeSecondsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, diskSubsystem, "write_time_seconds_total"), "This is the total number of seconds spent by all writes.", diskLabelNames, nil, ) ) func newDiskstatsDeviceFilter(logger log.Logger) (deviceFilter, error) { if *oldDiskstatsDeviceExclude != "" { if !diskstatsDeviceExcludeSet { level.Warn(logger).Log("msg", "--collector.diskstats.ignored-devices is DEPRECATED and will be removed in 2.0.0, use --collector.diskstats.device-exclude") *diskstatsDeviceExclude = *oldDiskstatsDeviceExclude } else { return deviceFilter{}, errors.New("--collector.diskstats.ignored-devices and --collector.diskstats.device-exclude are mutually exclusive") } } if *diskstatsDeviceExclude != "" && *diskstatsDeviceInclude != "" { return deviceFilter{}, errors.New("device-exclude & device-include are mutually exclusive") } if *diskstatsDeviceExclude != "" { level.Info(logger).Log("msg", "Parsed flag --collector.diskstats.device-exclude", "flag", *diskstatsDeviceExclude) } if *diskstatsDeviceInclude != "" { level.Info(logger).Log("msg", "Parsed Flag --collector.diskstats.device-include", "flag", *diskstatsDeviceInclude) } return newDeviceFilter(*diskstatsDeviceExclude, *diskstatsDeviceInclude), nil } node_exporter-1.7.0/collector/diskstats_darwin.go000066400000000000000000000127451452426057600223120ustar00rootroot00000000000000// Copyright 2017 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nodiskstats // +build !nodiskstats package collector import ( "fmt" "github.com/go-kit/log" "github.com/lufia/iostat" "github.com/prometheus/client_golang/prometheus" ) const diskstatsDefaultIgnoredDevices = "" type typedDescFunc struct { typedDesc value func(stat *iostat.DriveStats) float64 } type diskstatsCollector struct { descs []typedDescFunc deviceFilter deviceFilter logger log.Logger } func init() { registerCollector("diskstats", defaultEnabled, NewDiskstatsCollector) } // NewDiskstatsCollector returns a new Collector exposing disk device stats. func NewDiskstatsCollector(logger log.Logger) (Collector, error) { var diskLabelNames = []string{"device"} deviceFilter, err := newDiskstatsDeviceFilter(logger) if err != nil { return nil, fmt.Errorf("failed to parse device filter flags: %w", err) } return &diskstatsCollector{ descs: []typedDescFunc{ { typedDesc: typedDesc{ desc: readsCompletedDesc, valueType: prometheus.CounterValue, }, value: func(stat *iostat.DriveStats) float64 { return float64(stat.NumRead) }, }, { typedDesc: typedDesc{ desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, diskSubsystem, "read_sectors_total"), "The total number of sectors read successfully.", diskLabelNames, nil, ), valueType: prometheus.CounterValue, }, value: func(stat *iostat.DriveStats) float64 { return float64(stat.NumRead) / float64(stat.BlockSize) }, }, { typedDesc: typedDesc{ desc: readTimeSecondsDesc, valueType: prometheus.CounterValue, }, value: func(stat *iostat.DriveStats) float64 { return stat.TotalReadTime.Seconds() }, }, { typedDesc: typedDesc{ desc: writesCompletedDesc, valueType: prometheus.CounterValue, }, value: func(stat *iostat.DriveStats) float64 { return float64(stat.NumWrite) }, }, { typedDesc: typedDesc{ desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, diskSubsystem, "written_sectors_total"), "The total number of sectors written successfully.", diskLabelNames, nil, ), valueType: prometheus.CounterValue, }, value: func(stat *iostat.DriveStats) float64 { return float64(stat.NumWrite) / float64(stat.BlockSize) }, }, { typedDesc: typedDesc{ desc: writeTimeSecondsDesc, valueType: prometheus.CounterValue, }, value: func(stat *iostat.DriveStats) float64 { return stat.TotalWriteTime.Seconds() }, }, { typedDesc: typedDesc{ desc: readBytesDesc, valueType: prometheus.CounterValue, }, value: func(stat *iostat.DriveStats) float64 { return float64(stat.BytesRead) }, }, { typedDesc: typedDesc{ desc: writtenBytesDesc, valueType: prometheus.CounterValue, }, value: func(stat *iostat.DriveStats) float64 { return float64(stat.BytesWritten) }, }, { typedDesc: typedDesc{ desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, diskSubsystem, "read_errors_total"), "The total number of read errors.", diskLabelNames, nil, ), valueType: prometheus.CounterValue, }, value: func(stat *iostat.DriveStats) float64 { return float64(stat.ReadErrors) }, }, { typedDesc: typedDesc{ desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, diskSubsystem, "write_errors_total"), "The total number of write errors.", diskLabelNames, nil, ), valueType: prometheus.CounterValue, }, value: func(stat *iostat.DriveStats) float64 { return float64(stat.WriteErrors) }, }, { typedDesc: typedDesc{ desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, diskSubsystem, "read_retries_total"), "The total number of read retries.", diskLabelNames, nil, ), valueType: prometheus.CounterValue, }, value: func(stat *iostat.DriveStats) float64 { return float64(stat.ReadRetries) }, }, { typedDesc: typedDesc{ desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, diskSubsystem, "write_retries_total"), "The total number of write retries.", diskLabelNames, nil, ), valueType: prometheus.CounterValue, }, value: func(stat *iostat.DriveStats) float64 { return float64(stat.WriteRetries) }, }, }, deviceFilter: deviceFilter, logger: logger, }, nil } func (c *diskstatsCollector) Update(ch chan<- prometheus.Metric) error { diskStats, err := iostat.ReadDriveStats() if err != nil { return fmt.Errorf("couldn't get diskstats: %w", err) } for _, stats := range diskStats { if c.deviceFilter.ignored(stats.Name) { continue } for _, desc := range c.descs { v := desc.value(stats) ch <- desc.mustNewConstMetric(v, stats.Name) } } return nil } node_exporter-1.7.0/collector/diskstats_linux.go000066400000000000000000000303741452426057600221630ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nodiskstats // +build !nodiskstats package collector import ( "bufio" "fmt" "os" "strconv" "strings" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs/blockdevice" ) const ( secondsPerTick = 1.0 / 1000.0 // Read sectors and write sectors are the "standard UNIX 512-byte sectors, not any device- or filesystem-specific block size." // See also https://www.kernel.org/doc/Documentation/block/stat.txt unixSectorSize = 512.0 diskstatsDefaultIgnoredDevices = "^(ram|loop|fd|(h|s|v|xv)d[a-z]|nvme\\d+n\\d+p)\\d+$" // See udevadm(8). udevDevicePropertyPrefix = "E:" // Udev device properties. udevDMLVLayer = "DM_LV_LAYER" udevDMLVName = "DM_LV_NAME" udevDMName = "DM_NAME" udevDMUUID = "DM_UUID" udevDMVGName = "DM_VG_NAME" udevIDATA = "ID_ATA" udevIDATARotationRateRPM = "ID_ATA_ROTATION_RATE_RPM" udevIDATASATA = "ID_ATA_SATA" udevIDATASATASignalRateGen1 = "ID_ATA_SATA_SIGNAL_RATE_GEN1" udevIDATASATASignalRateGen2 = "ID_ATA_SATA_SIGNAL_RATE_GEN2" udevIDATAWriteCache = "ID_ATA_WRITE_CACHE" udevIDATAWriteCacheEnabled = "ID_ATA_WRITE_CACHE_ENABLED" udevIDFSType = "ID_FS_TYPE" udevIDFSUsage = "ID_FS_USAGE" udevIDFSUUID = "ID_FS_UUID" udevIDFSVersion = "ID_FS_VERSION" udevIDModel = "ID_MODEL" udevIDPath = "ID_PATH" udevIDRevision = "ID_REVISION" udevIDSerialShort = "ID_SERIAL_SHORT" udevIDWWN = "ID_WWN" udevSCSIIdentSerial = "SCSI_IDENT_SERIAL" ) type typedFactorDesc struct { desc *prometheus.Desc valueType prometheus.ValueType } type udevInfo map[string]string func (d *typedFactorDesc) mustNewConstMetric(value float64, labels ...string) prometheus.Metric { return prometheus.MustNewConstMetric(d.desc, d.valueType, value, labels...) } type diskstatsCollector struct { deviceFilter deviceFilter fs blockdevice.FS infoDesc typedFactorDesc descs []typedFactorDesc filesystemInfoDesc typedFactorDesc deviceMapperInfoDesc typedFactorDesc ataDescs map[string]typedFactorDesc logger log.Logger getUdevDeviceProperties func(uint32, uint32) (udevInfo, error) } func init() { registerCollector("diskstats", defaultEnabled, NewDiskstatsCollector) } // NewDiskstatsCollector returns a new Collector exposing disk device stats. // Docs from https://www.kernel.org/doc/Documentation/iostats.txt func NewDiskstatsCollector(logger log.Logger) (Collector, error) { var diskLabelNames = []string{"device"} fs, err := blockdevice.NewFS(*procPath, *sysPath) if err != nil { return nil, fmt.Errorf("failed to open sysfs: %w", err) } deviceFilter, err := newDiskstatsDeviceFilter(logger) if err != nil { return nil, fmt.Errorf("failed to parse device filter flags: %w", err) } collector := diskstatsCollector{ deviceFilter: deviceFilter, fs: fs, infoDesc: typedFactorDesc{ desc: prometheus.NewDesc(prometheus.BuildFQName(namespace, diskSubsystem, "info"), "Info of /sys/block/.", []string{"device", "major", "minor", "path", "wwn", "model", "serial", "revision"}, nil, ), valueType: prometheus.GaugeValue, }, descs: []typedFactorDesc{ { desc: readsCompletedDesc, valueType: prometheus.CounterValue, }, { desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, diskSubsystem, "reads_merged_total"), "The total number of reads merged.", diskLabelNames, nil, ), valueType: prometheus.CounterValue, }, { desc: readBytesDesc, valueType: prometheus.CounterValue, }, { desc: readTimeSecondsDesc, valueType: prometheus.CounterValue, }, { desc: writesCompletedDesc, valueType: prometheus.CounterValue, }, { desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, diskSubsystem, "writes_merged_total"), "The number of writes merged.", diskLabelNames, nil, ), valueType: prometheus.CounterValue, }, { desc: writtenBytesDesc, valueType: prometheus.CounterValue, }, { desc: writeTimeSecondsDesc, valueType: prometheus.CounterValue, }, { desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, diskSubsystem, "io_now"), "The number of I/Os currently in progress.", diskLabelNames, nil, ), valueType: prometheus.GaugeValue, }, { desc: ioTimeSecondsDesc, valueType: prometheus.CounterValue, }, { desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, diskSubsystem, "io_time_weighted_seconds_total"), "The weighted # of seconds spent doing I/Os.", diskLabelNames, nil, ), valueType: prometheus.CounterValue, }, { desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, diskSubsystem, "discards_completed_total"), "The total number of discards completed successfully.", diskLabelNames, nil, ), valueType: prometheus.CounterValue, }, { desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, diskSubsystem, "discards_merged_total"), "The total number of discards merged.", diskLabelNames, nil, ), valueType: prometheus.CounterValue, }, { desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, diskSubsystem, "discarded_sectors_total"), "The total number of sectors discarded successfully.", diskLabelNames, nil, ), valueType: prometheus.CounterValue, }, { desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, diskSubsystem, "discard_time_seconds_total"), "This is the total number of seconds spent by all discards.", diskLabelNames, nil, ), valueType: prometheus.CounterValue, }, { desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, diskSubsystem, "flush_requests_total"), "The total number of flush requests completed successfully", diskLabelNames, nil, ), valueType: prometheus.CounterValue, }, { desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, diskSubsystem, "flush_requests_time_seconds_total"), "This is the total number of seconds spent by all flush requests.", diskLabelNames, nil, ), valueType: prometheus.CounterValue, }, }, filesystemInfoDesc: typedFactorDesc{ desc: prometheus.NewDesc(prometheus.BuildFQName(namespace, diskSubsystem, "filesystem_info"), "Info about disk filesystem.", []string{"device", "type", "usage", "uuid", "version"}, nil, ), valueType: prometheus.GaugeValue, }, deviceMapperInfoDesc: typedFactorDesc{ desc: prometheus.NewDesc(prometheus.BuildFQName(namespace, diskSubsystem, "device_mapper_info"), "Info about disk device mapper.", []string{"device", "name", "uuid", "vg_name", "lv_name", "lv_layer"}, nil, ), valueType: prometheus.GaugeValue, }, ataDescs: map[string]typedFactorDesc{ udevIDATAWriteCache: { desc: prometheus.NewDesc(prometheus.BuildFQName(namespace, diskSubsystem, "ata_write_cache"), "ATA disk has a write cache.", []string{"device"}, nil, ), valueType: prometheus.GaugeValue, }, udevIDATAWriteCacheEnabled: { desc: prometheus.NewDesc(prometheus.BuildFQName(namespace, diskSubsystem, "ata_write_cache_enabled"), "ATA disk has its write cache enabled.", []string{"device"}, nil, ), valueType: prometheus.GaugeValue, }, udevIDATARotationRateRPM: { desc: prometheus.NewDesc(prometheus.BuildFQName(namespace, diskSubsystem, "ata_rotation_rate_rpm"), "ATA disk rotation rate in RPMs (0 for SSDs).", []string{"device"}, nil, ), valueType: prometheus.GaugeValue, }, }, logger: logger, } // Only enable getting device properties from udev if the directory is readable. if stat, err := os.Stat(*udevDataPath); err != nil || !stat.IsDir() { level.Error(logger).Log("msg", "Failed to open directory, disabling udev device properties", "path", *udevDataPath) } else { collector.getUdevDeviceProperties = getUdevDeviceProperties } return &collector, nil } func (c *diskstatsCollector) Update(ch chan<- prometheus.Metric) error { diskStats, err := c.fs.ProcDiskstats() if err != nil { return fmt.Errorf("couldn't get diskstats: %w", err) } for _, stats := range diskStats { dev := stats.DeviceName if c.deviceFilter.ignored(dev) { continue } info, err := getUdevDeviceProperties(stats.MajorNumber, stats.MinorNumber) if err != nil { level.Debug(c.logger).Log("msg", "Failed to parse udev info", "err", err) } // This is usually the serial printed on the disk label. serial := info[udevSCSIIdentSerial] // If it's undefined, fallback to ID_SERIAL_SHORT instead. if serial == "" { serial = info[udevIDSerialShort] } ch <- c.infoDesc.mustNewConstMetric(1.0, dev, fmt.Sprint(stats.MajorNumber), fmt.Sprint(stats.MinorNumber), info[udevIDPath], info[udevIDWWN], info[udevIDModel], serial, info[udevIDRevision], ) statCount := stats.IoStatsCount - 3 // Total diskstats record count, less MajorNumber, MinorNumber and DeviceName for i, val := range []float64{ float64(stats.ReadIOs), float64(stats.ReadMerges), float64(stats.ReadSectors) * unixSectorSize, float64(stats.ReadTicks) * secondsPerTick, float64(stats.WriteIOs), float64(stats.WriteMerges), float64(stats.WriteSectors) * unixSectorSize, float64(stats.WriteTicks) * secondsPerTick, float64(stats.IOsInProgress), float64(stats.IOsTotalTicks) * secondsPerTick, float64(stats.WeightedIOTicks) * secondsPerTick, float64(stats.DiscardIOs), float64(stats.DiscardMerges), float64(stats.DiscardSectors), float64(stats.DiscardTicks) * secondsPerTick, float64(stats.FlushRequestsCompleted), float64(stats.TimeSpentFlushing) * secondsPerTick, } { if i >= statCount { break } ch <- c.descs[i].mustNewConstMetric(val, dev) } if fsType := info[udevIDFSType]; fsType != "" { ch <- c.filesystemInfoDesc.mustNewConstMetric(1.0, dev, fsType, info[udevIDFSUsage], info[udevIDFSUUID], info[udevIDFSVersion], ) } if name := info[udevDMName]; name != "" { ch <- c.deviceMapperInfoDesc.mustNewConstMetric(1.0, dev, name, info[udevDMUUID], info[udevDMVGName], info[udevDMLVName], info[udevDMLVLayer], ) } if ata := info[udevIDATA]; ata != "" { for attr, desc := range c.ataDescs { str, ok := info[attr] if !ok { level.Debug(c.logger).Log("msg", "Udev attribute does not exist", "attribute", attr) continue } if value, err := strconv.ParseFloat(str, 64); err == nil { ch <- desc.mustNewConstMetric(value, dev) } else { level.Error(c.logger).Log("msg", "Failed to parse ATA value", "err", err) } } } } return nil } func getUdevDeviceProperties(major, minor uint32) (udevInfo, error) { filename := udevDataFilePath(fmt.Sprintf("b%d:%d", major, minor)) data, err := os.Open(filename) if err != nil { return nil, err } defer data.Close() info := make(udevInfo) scanner := bufio.NewScanner(data) for scanner.Scan() { line := scanner.Text() // We're only interested in device properties. if !strings.HasPrefix(line, udevDevicePropertyPrefix) { continue } line = strings.TrimPrefix(line, udevDevicePropertyPrefix) /* TODO: After we drop support for Go 1.17, the condition below can be simplified to: if name, value, found := strings.Cut(line, "="); found { info[name] = value } */ if fields := strings.SplitN(line, "=", 2); len(fields) == 2 { info[fields[0]] = fields[1] } } return info, nil } node_exporter-1.7.0/collector/diskstats_linux_test.go000066400000000000000000000455571452426057600232330ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nodiskstats // +build !nodiskstats package collector import ( "fmt" "os" "strings" "testing" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/testutil" ) type testDiskStatsCollector struct { dsc Collector } func (c testDiskStatsCollector) Collect(ch chan<- prometheus.Metric) { c.dsc.Update(ch) } func (c testDiskStatsCollector) Describe(ch chan<- *prometheus.Desc) { prometheus.DescribeByCollect(c, ch) } func NewTestDiskStatsCollector(logger log.Logger) (prometheus.Collector, error) { dsc, err := NewDiskstatsCollector(logger) if err != nil { return testDiskStatsCollector{}, err } return testDiskStatsCollector{ dsc: dsc, }, err } func TestDiskStats(t *testing.T) { *sysPath = "fixtures/sys" *procPath = "fixtures/proc" *udevDataPath = "fixtures/udev/data" *diskstatsDeviceExclude = "^(ram|loop|fd|(h|s|v|xv)d[a-z]|nvme\\d+n\\d+p)\\d+$" testcase := `# HELP node_disk_ata_rotation_rate_rpm ATA disk rotation rate in RPMs (0 for SSDs). # TYPE node_disk_ata_rotation_rate_rpm gauge node_disk_ata_rotation_rate_rpm{device="sda"} 7200 node_disk_ata_rotation_rate_rpm{device="sdb"} 0 node_disk_ata_rotation_rate_rpm{device="sdc"} 0 # HELP node_disk_ata_write_cache ATA disk has a write cache. # TYPE node_disk_ata_write_cache gauge node_disk_ata_write_cache{device="sda"} 1 node_disk_ata_write_cache{device="sdb"} 1 node_disk_ata_write_cache{device="sdc"} 1 # HELP node_disk_ata_write_cache_enabled ATA disk has its write cache enabled. # TYPE node_disk_ata_write_cache_enabled gauge node_disk_ata_write_cache_enabled{device="sda"} 0 node_disk_ata_write_cache_enabled{device="sdb"} 1 node_disk_ata_write_cache_enabled{device="sdc"} 0 # HELP node_disk_device_mapper_info Info about disk device mapper. # TYPE node_disk_device_mapper_info gauge node_disk_device_mapper_info{device="dm-0",lv_layer="",lv_name="",name="nvme0n1_crypt",uuid="CRYPT-LUKS2-jolaulot80fy9zsiobkxyxo7y2dqeho2-nvme0n1_crypt",vg_name=""} 1 node_disk_device_mapper_info{device="dm-1",lv_layer="",lv_name="swap_1",name="system-swap_1",uuid="LVM-wbGqQEBL9SxrW2DLntJwgg8fAv946hw3Tvjqh0v31fWgxEtD4BoHO0lROWFUY65T",vg_name="system"} 1 node_disk_device_mapper_info{device="dm-2",lv_layer="",lv_name="root",name="system-root",uuid="LVM-NWEDo8q5ABDyJuC3F8veKNyWfYmeIBfFMS4MF3HakzUhkk7ekDm6fJTHkl2fYHe7",vg_name="system"} 1 node_disk_device_mapper_info{device="dm-3",lv_layer="",lv_name="var",name="system-var",uuid="LVM-hrxHo0rlZ6U95ku5841Lpd17bS1Z7V7lrtEE60DVgE6YEOCdS9gcDGyonWim4hGP",vg_name="system"} 1 node_disk_device_mapper_info{device="dm-4",lv_layer="",lv_name="tmp",name="system-tmp",uuid="LVM-XTNGOHjPWLHcxmJmVu5cWTXEtuzqDeBkdEHAZW5q9LxWQ2d4mb5CchUQzUPJpl8H",vg_name="system"} 1 node_disk_device_mapper_info{device="dm-5",lv_layer="",lv_name="home",name="system-home",uuid="LVM-MtoJaWTpjWRXlUnNFlpxZauTEuYlMvGFutigEzCCrfj8CNh6jCRi5LQJXZCpLjPf",vg_name="system"} 1 # HELP node_disk_discard_time_seconds_total This is the total number of seconds spent by all discards. # TYPE node_disk_discard_time_seconds_total counter node_disk_discard_time_seconds_total{device="sdb"} 11.13 node_disk_discard_time_seconds_total{device="sdc"} 11.13 # HELP node_disk_discarded_sectors_total The total number of sectors discarded successfully. # TYPE node_disk_discarded_sectors_total counter node_disk_discarded_sectors_total{device="sdb"} 1.925173784e+09 node_disk_discarded_sectors_total{device="sdc"} 1.25173784e+08 # HELP node_disk_discards_completed_total The total number of discards completed successfully. # TYPE node_disk_discards_completed_total counter node_disk_discards_completed_total{device="sdb"} 68851 node_disk_discards_completed_total{device="sdc"} 18851 # HELP node_disk_discards_merged_total The total number of discards merged. # TYPE node_disk_discards_merged_total counter node_disk_discards_merged_total{device="sdb"} 0 node_disk_discards_merged_total{device="sdc"} 0 # HELP node_disk_filesystem_info Info about disk filesystem. # TYPE node_disk_filesystem_info gauge node_disk_filesystem_info{device="dm-0",type="LVM2_member",usage="raid",uuid="c3C3uW-gD96-Yw69-c1CJ-5MwT-6ysM-mST0vB",version="LVM2 001"} 1 node_disk_filesystem_info{device="dm-1",type="swap",usage="other",uuid="5272bb60-04b5-49cd-b730-be57c7604450",version="1"} 1 node_disk_filesystem_info{device="dm-2",type="ext4",usage="filesystem",uuid="3deafd0d-faff-4695-8d15-51061ae1f51b",version="1.0"} 1 node_disk_filesystem_info{device="dm-3",type="ext4",usage="filesystem",uuid="5c772222-f7d4-4c8e-87e8-e97df6b7a45e",version="1.0"} 1 node_disk_filesystem_info{device="dm-4",type="ext4",usage="filesystem",uuid="a9479d44-60e1-4015-a1e5-bb065e6dd11b",version="1.0"} 1 node_disk_filesystem_info{device="dm-5",type="ext4",usage="filesystem",uuid="b05b726a-c718-4c4d-8641-7c73a7696d83",version="1.0"} 1 node_disk_filesystem_info{device="mmcblk0p1",type="vfat",usage="filesystem",uuid="6284-658D",version="FAT32"} 1 node_disk_filesystem_info{device="mmcblk0p2",type="ext4",usage="filesystem",uuid="83324ce8-a6f3-4e35-ad64-dbb3d6b87a32",version="1.0"} 1 node_disk_filesystem_info{device="sda",type="LVM2_member",usage="raid",uuid="cVVv6j-HSA2-IY33-1Jmj-dO2H-YL7w-b4Oxqw",version="LVM2 001"} 1 node_disk_filesystem_info{device="sdc",type="LVM2_member",usage="raid",uuid="QFy9W7-Brj3-hQ6v-AF8i-3Zqg-n3Vs-kGY4vb",version="LVM2 001"} 1 # HELP node_disk_flush_requests_time_seconds_total This is the total number of seconds spent by all flush requests. # TYPE node_disk_flush_requests_time_seconds_total counter node_disk_flush_requests_time_seconds_total{device="sdc"} 1.944 # HELP node_disk_flush_requests_total The total number of flush requests completed successfully # TYPE node_disk_flush_requests_total counter node_disk_flush_requests_total{device="sdc"} 1555 # HELP node_disk_info Info of /sys/block/. # TYPE node_disk_info gauge node_disk_info{device="dm-0",major="252",minor="0",model="",path="",revision="",serial="",wwn=""} 1 node_disk_info{device="dm-1",major="252",minor="1",model="",path="",revision="",serial="",wwn=""} 1 node_disk_info{device="dm-2",major="252",minor="2",model="",path="",revision="",serial="",wwn=""} 1 node_disk_info{device="dm-3",major="252",minor="3",model="",path="",revision="",serial="",wwn=""} 1 node_disk_info{device="dm-4",major="252",minor="4",model="",path="",revision="",serial="",wwn=""} 1 node_disk_info{device="dm-5",major="252",minor="5",model="",path="",revision="",serial="",wwn=""} 1 node_disk_info{device="mmcblk0",major="179",minor="0",model="",path="platform-df2969f3.mmc",revision="",serial="",wwn=""} 1 node_disk_info{device="mmcblk0p1",major="179",minor="1",model="",path="platform-df2969f3.mmc",revision="",serial="",wwn=""} 1 node_disk_info{device="mmcblk0p2",major="179",minor="2",model="",path="platform-df2969f3.mmc",revision="",serial="",wwn=""} 1 node_disk_info{device="nvme0n1",major="259",minor="0",model="SAMSUNG EHFTF55LURSY-000Y9",path="pci-0000:02:00.0-nvme-1",revision="4NBTUY95",serial="S252B6CU1HG3M1",wwn="eui.p3vbbiejx5aae2r3"} 1 node_disk_info{device="sda",major="8",minor="0",model="TOSHIBA_KSDB4U86",path="pci-0000:3b:00.0-sas-phy7-lun-0",revision="0102",serial="2160A0D5FVGG",wwn="0x7c72382b8de36a64"} 1 node_disk_info{device="sdb",major="8",minor="16",model="SuperMicro_SSD",path="pci-0000:00:1f.2-ata-1",revision="0R",serial="SMC0E1B87ABBB16BD84E",wwn="0xe1b87abbb16bd84e"} 1 node_disk_info{device="sdc",major="8",minor="32",model="INTEL_SSDS9X9SI0",path="pci-0000:00:1f.2-ata-4",revision="0100",serial="3EWB5Y25CWQWA7EH1U",wwn="0x58907ddc573a5de"} 1 node_disk_info{device="sr0",major="11",minor="0",model="Virtual_CDROM0",path="pci-0000:00:14.0-usb-0:1.1:1.0-scsi-0:0:0:0",revision="1.00",serial="AAAABBBBCCCC1",wwn=""} 1 node_disk_info{device="vda",major="254",minor="0",model="",path="pci-0000:00:06.0",revision="",serial="",wwn=""} 1 # HELP node_disk_io_now The number of I/Os currently in progress. # TYPE node_disk_io_now gauge node_disk_io_now{device="dm-0"} 0 node_disk_io_now{device="dm-1"} 0 node_disk_io_now{device="dm-2"} 0 node_disk_io_now{device="dm-3"} 0 node_disk_io_now{device="dm-4"} 0 node_disk_io_now{device="dm-5"} 0 node_disk_io_now{device="mmcblk0"} 0 node_disk_io_now{device="mmcblk0p1"} 0 node_disk_io_now{device="mmcblk0p2"} 0 node_disk_io_now{device="nvme0n1"} 0 node_disk_io_now{device="sda"} 0 node_disk_io_now{device="sdb"} 0 node_disk_io_now{device="sdc"} 0 node_disk_io_now{device="sr0"} 0 node_disk_io_now{device="vda"} 0 # HELP node_disk_io_time_seconds_total Total seconds spent doing I/Os. # TYPE node_disk_io_time_seconds_total counter node_disk_io_time_seconds_total{device="dm-0"} 11325.968 node_disk_io_time_seconds_total{device="dm-1"} 0.076 node_disk_io_time_seconds_total{device="dm-2"} 65.4 node_disk_io_time_seconds_total{device="dm-3"} 0.016 node_disk_io_time_seconds_total{device="dm-4"} 0.024 node_disk_io_time_seconds_total{device="dm-5"} 58.848 node_disk_io_time_seconds_total{device="mmcblk0"} 0.136 node_disk_io_time_seconds_total{device="mmcblk0p1"} 0.024 node_disk_io_time_seconds_total{device="mmcblk0p2"} 0.068 node_disk_io_time_seconds_total{device="nvme0n1"} 222.766 node_disk_io_time_seconds_total{device="sda"} 9653.880000000001 node_disk_io_time_seconds_total{device="sdb"} 60.730000000000004 node_disk_io_time_seconds_total{device="sdc"} 10.73 node_disk_io_time_seconds_total{device="sr0"} 0 node_disk_io_time_seconds_total{device="vda"} 41614.592000000004 # HELP node_disk_io_time_weighted_seconds_total The weighted # of seconds spent doing I/Os. # TYPE node_disk_io_time_weighted_seconds_total counter node_disk_io_time_weighted_seconds_total{device="dm-0"} 1.206301256e+06 node_disk_io_time_weighted_seconds_total{device="dm-1"} 0.084 node_disk_io_time_weighted_seconds_total{device="dm-2"} 129.416 node_disk_io_time_weighted_seconds_total{device="dm-3"} 0.10400000000000001 node_disk_io_time_weighted_seconds_total{device="dm-4"} 0.044 node_disk_io_time_weighted_seconds_total{device="dm-5"} 105.632 node_disk_io_time_weighted_seconds_total{device="mmcblk0"} 0.156 node_disk_io_time_weighted_seconds_total{device="mmcblk0p1"} 0.024 node_disk_io_time_weighted_seconds_total{device="mmcblk0p2"} 0.068 node_disk_io_time_weighted_seconds_total{device="nvme0n1"} 1032.546 node_disk_io_time_weighted_seconds_total{device="sda"} 82621.804 node_disk_io_time_weighted_seconds_total{device="sdb"} 67.07000000000001 node_disk_io_time_weighted_seconds_total{device="sdc"} 17.07 node_disk_io_time_weighted_seconds_total{device="sr0"} 0 node_disk_io_time_weighted_seconds_total{device="vda"} 2.0778722280000001e+06 # HELP node_disk_read_bytes_total The total number of bytes read successfully. # TYPE node_disk_read_bytes_total counter node_disk_read_bytes_total{device="dm-0"} 5.13708655616e+11 node_disk_read_bytes_total{device="dm-1"} 1.589248e+06 node_disk_read_bytes_total{device="dm-2"} 1.578752e+08 node_disk_read_bytes_total{device="dm-3"} 1.98144e+06 node_disk_read_bytes_total{device="dm-4"} 529408 node_disk_read_bytes_total{device="dm-5"} 4.3150848e+07 node_disk_read_bytes_total{device="mmcblk0"} 798720 node_disk_read_bytes_total{device="mmcblk0p1"} 81920 node_disk_read_bytes_total{device="mmcblk0p2"} 389120 node_disk_read_bytes_total{device="nvme0n1"} 2.377714176e+09 node_disk_read_bytes_total{device="sda"} 5.13713216512e+11 node_disk_read_bytes_total{device="sdb"} 4.944782848e+09 node_disk_read_bytes_total{device="sdc"} 8.48782848e+08 node_disk_read_bytes_total{device="sr0"} 0 node_disk_read_bytes_total{device="vda"} 1.6727491584e+10 # HELP node_disk_read_time_seconds_total The total number of seconds spent by all reads. # TYPE node_disk_read_time_seconds_total counter node_disk_read_time_seconds_total{device="dm-0"} 46229.572 node_disk_read_time_seconds_total{device="dm-1"} 0.084 node_disk_read_time_seconds_total{device="dm-2"} 6.5360000000000005 node_disk_read_time_seconds_total{device="dm-3"} 0.10400000000000001 node_disk_read_time_seconds_total{device="dm-4"} 0.028 node_disk_read_time_seconds_total{device="dm-5"} 0.924 node_disk_read_time_seconds_total{device="mmcblk0"} 0.156 node_disk_read_time_seconds_total{device="mmcblk0p1"} 0.024 node_disk_read_time_seconds_total{device="mmcblk0p2"} 0.068 node_disk_read_time_seconds_total{device="nvme0n1"} 21.650000000000002 node_disk_read_time_seconds_total{device="sda"} 18492.372 node_disk_read_time_seconds_total{device="sdb"} 0.084 node_disk_read_time_seconds_total{device="sdc"} 0.014 node_disk_read_time_seconds_total{device="sr0"} 0 node_disk_read_time_seconds_total{device="vda"} 8655.768 # HELP node_disk_reads_completed_total The total number of reads completed successfully. # TYPE node_disk_reads_completed_total counter node_disk_reads_completed_total{device="dm-0"} 5.9910002e+07 node_disk_reads_completed_total{device="dm-1"} 388 node_disk_reads_completed_total{device="dm-2"} 11571 node_disk_reads_completed_total{device="dm-3"} 3870 node_disk_reads_completed_total{device="dm-4"} 392 node_disk_reads_completed_total{device="dm-5"} 3729 node_disk_reads_completed_total{device="mmcblk0"} 192 node_disk_reads_completed_total{device="mmcblk0p1"} 17 node_disk_reads_completed_total{device="mmcblk0p2"} 95 node_disk_reads_completed_total{device="nvme0n1"} 47114 node_disk_reads_completed_total{device="sda"} 2.5354637e+07 node_disk_reads_completed_total{device="sdb"} 326552 node_disk_reads_completed_total{device="sdc"} 126552 node_disk_reads_completed_total{device="sr0"} 0 node_disk_reads_completed_total{device="vda"} 1.775784e+06 # HELP node_disk_reads_merged_total The total number of reads merged. # TYPE node_disk_reads_merged_total counter node_disk_reads_merged_total{device="dm-0"} 0 node_disk_reads_merged_total{device="dm-1"} 0 node_disk_reads_merged_total{device="dm-2"} 0 node_disk_reads_merged_total{device="dm-3"} 0 node_disk_reads_merged_total{device="dm-4"} 0 node_disk_reads_merged_total{device="dm-5"} 0 node_disk_reads_merged_total{device="mmcblk0"} 3 node_disk_reads_merged_total{device="mmcblk0p1"} 3 node_disk_reads_merged_total{device="mmcblk0p2"} 0 node_disk_reads_merged_total{device="nvme0n1"} 4 node_disk_reads_merged_total{device="sda"} 3.4367663e+07 node_disk_reads_merged_total{device="sdb"} 841 node_disk_reads_merged_total{device="sdc"} 141 node_disk_reads_merged_total{device="sr0"} 0 node_disk_reads_merged_total{device="vda"} 15386 # HELP node_disk_write_time_seconds_total This is the total number of seconds spent by all writes. # TYPE node_disk_write_time_seconds_total counter node_disk_write_time_seconds_total{device="dm-0"} 1.1585578e+06 node_disk_write_time_seconds_total{device="dm-1"} 0 node_disk_write_time_seconds_total{device="dm-2"} 122.884 node_disk_write_time_seconds_total{device="dm-3"} 0 node_disk_write_time_seconds_total{device="dm-4"} 0.016 node_disk_write_time_seconds_total{device="dm-5"} 104.684 node_disk_write_time_seconds_total{device="mmcblk0"} 0 node_disk_write_time_seconds_total{device="mmcblk0p1"} 0 node_disk_write_time_seconds_total{device="mmcblk0p2"} 0 node_disk_write_time_seconds_total{device="nvme0n1"} 1011.053 node_disk_write_time_seconds_total{device="sda"} 63877.96 node_disk_write_time_seconds_total{device="sdb"} 5.007 node_disk_write_time_seconds_total{device="sdc"} 1.0070000000000001 node_disk_write_time_seconds_total{device="sr0"} 0 node_disk_write_time_seconds_total{device="vda"} 2.069221364e+06 # HELP node_disk_writes_completed_total The total number of writes completed successfully. # TYPE node_disk_writes_completed_total counter node_disk_writes_completed_total{device="dm-0"} 3.9231014e+07 node_disk_writes_completed_total{device="dm-1"} 74 node_disk_writes_completed_total{device="dm-2"} 153522 node_disk_writes_completed_total{device="dm-3"} 0 node_disk_writes_completed_total{device="dm-4"} 38 node_disk_writes_completed_total{device="dm-5"} 98918 node_disk_writes_completed_total{device="mmcblk0"} 0 node_disk_writes_completed_total{device="mmcblk0p1"} 0 node_disk_writes_completed_total{device="mmcblk0p2"} 0 node_disk_writes_completed_total{device="nvme0n1"} 1.07832e+06 node_disk_writes_completed_total{device="sda"} 2.8444756e+07 node_disk_writes_completed_total{device="sdb"} 41822 node_disk_writes_completed_total{device="sdc"} 11822 node_disk_writes_completed_total{device="sr0"} 0 node_disk_writes_completed_total{device="vda"} 6.038856e+06 # HELP node_disk_writes_merged_total The number of writes merged. # TYPE node_disk_writes_merged_total counter node_disk_writes_merged_total{device="dm-0"} 0 node_disk_writes_merged_total{device="dm-1"} 0 node_disk_writes_merged_total{device="dm-2"} 0 node_disk_writes_merged_total{device="dm-3"} 0 node_disk_writes_merged_total{device="dm-4"} 0 node_disk_writes_merged_total{device="dm-5"} 0 node_disk_writes_merged_total{device="mmcblk0"} 0 node_disk_writes_merged_total{device="mmcblk0p1"} 0 node_disk_writes_merged_total{device="mmcblk0p2"} 0 node_disk_writes_merged_total{device="nvme0n1"} 43950 node_disk_writes_merged_total{device="sda"} 1.1134226e+07 node_disk_writes_merged_total{device="sdb"} 2895 node_disk_writes_merged_total{device="sdc"} 1895 node_disk_writes_merged_total{device="sr0"} 0 node_disk_writes_merged_total{device="vda"} 2.0711856e+07 # HELP node_disk_written_bytes_total The total number of bytes written successfully. # TYPE node_disk_written_bytes_total counter node_disk_written_bytes_total{device="dm-0"} 2.5891680256e+11 node_disk_written_bytes_total{device="dm-1"} 303104 node_disk_written_bytes_total{device="dm-2"} 2.607828992e+09 node_disk_written_bytes_total{device="dm-3"} 0 node_disk_written_bytes_total{device="dm-4"} 70144 node_disk_written_bytes_total{device="dm-5"} 5.89664256e+08 node_disk_written_bytes_total{device="mmcblk0"} 0 node_disk_written_bytes_total{device="mmcblk0p1"} 0 node_disk_written_bytes_total{device="mmcblk0p2"} 0 node_disk_written_bytes_total{device="nvme0n1"} 2.0199236096e+10 node_disk_written_bytes_total{device="sda"} 2.58916880384e+11 node_disk_written_bytes_total{device="sdb"} 1.01012736e+09 node_disk_written_bytes_total{device="sdc"} 8.852736e+07 node_disk_written_bytes_total{device="sr0"} 0 node_disk_written_bytes_total{device="vda"} 1.0938236928e+11 ` logger := log.NewLogfmtLogger(os.Stderr) collector, err := NewDiskstatsCollector(logger) if err != nil { panic(err) } c, err := NewTestDiskStatsCollector(logger) if err != nil { t.Fatal(err) } reg := prometheus.NewRegistry() reg.MustRegister(c) sink := make(chan prometheus.Metric) go func() { err = collector.Update(sink) if err != nil { panic(fmt.Errorf("failed to update collector: %s", err)) } close(sink) }() err = testutil.GatherAndCompare(reg, strings.NewReader(testcase)) if err != nil { t.Fatal(err) } } node_exporter-1.7.0/collector/diskstats_openbsd.go000066400000000000000000000053501452426057600224520ustar00rootroot00000000000000// Copyright 2019 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nodiskstats && !amd64 // +build !nodiskstats,!amd64 package collector import ( "fmt" "unsafe" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "golang.org/x/sys/unix" ) /* #include #include */ import "C" const diskstatsDefaultIgnoredDevices = "" type diskstatsCollector struct { rxfer typedDesc rbytes typedDesc wxfer typedDesc wbytes typedDesc time typedDesc deviceFilter deviceFilter logger log.Logger } func init() { registerCollector("diskstats", defaultEnabled, NewDiskstatsCollector) } // NewDiskstatsCollector returns a new Collector exposing disk device stats. func NewDiskstatsCollector(logger log.Logger) (Collector, error) { deviceFilter, err := newDiskstatsDeviceFilter(logger) if err != nil { return nil, fmt.Errorf("failed to parse device filter flags: %w", err) } return &diskstatsCollector{ rxfer: typedDesc{readsCompletedDesc, prometheus.CounterValue}, rbytes: typedDesc{readBytesDesc, prometheus.CounterValue}, wxfer: typedDesc{writesCompletedDesc, prometheus.CounterValue}, wbytes: typedDesc{writtenBytesDesc, prometheus.CounterValue}, time: typedDesc{ioTimeSecondsDesc, prometheus.CounterValue}, deviceFilter: deviceFilter, logger: logger, }, nil } func (c *diskstatsCollector) Update(ch chan<- prometheus.Metric) (err error) { diskstatsb, err := unix.SysctlRaw("hw.diskstats") if err != nil { return err } ndisks := len(diskstatsb) / C.sizeof_struct_diskstats diskstats := *(*[]C.struct_diskstats)(unsafe.Pointer(&diskstatsb)) for i := 0; i < ndisks; i++ { diskname := C.GoString(&diskstats[i].ds_name[0]) if c.deviceFilter.ignored(diskname) { continue } ch <- c.rxfer.mustNewConstMetric(float64(diskstats[i].ds_rxfer), diskname) ch <- c.rbytes.mustNewConstMetric(float64(diskstats[i].ds_rbytes), diskname) ch <- c.wxfer.mustNewConstMetric(float64(diskstats[i].ds_wxfer), diskname) ch <- c.wbytes.mustNewConstMetric(float64(diskstats[i].ds_wbytes), diskname) time := float64(diskstats[i].ds_time.tv_sec) + float64(diskstats[i].ds_time.tv_usec)/1000000 ch <- c.time.mustNewConstMetric(time, diskname) } return nil } node_exporter-1.7.0/collector/diskstats_openbsd_amd64.go000066400000000000000000000057071452426057600234530ustar00rootroot00000000000000// Copyright 2020 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nodiskstats // +build !nodiskstats package collector import ( "fmt" "unsafe" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "golang.org/x/sys/unix" ) const ( DS_DISKNAMELEN = 16 diskstatsDefaultIgnoredDevices = "" ) type DiskStats struct { Name [DS_DISKNAMELEN]int8 Busy int32 Rxfer uint64 Wxfer uint64 Seek uint64 Rbytes uint64 Wbytes uint64 Attachtime unix.Timeval Timestamp unix.Timeval Time unix.Timeval } type diskstatsCollector struct { rxfer typedDesc rbytes typedDesc wxfer typedDesc wbytes typedDesc time typedDesc deviceFilter deviceFilter logger log.Logger } func init() { registerCollector("diskstats", defaultEnabled, NewDiskstatsCollector) } // NewDiskstatsCollector returns a new Collector exposing disk device stats. func NewDiskstatsCollector(logger log.Logger) (Collector, error) { deviceFilter, err := newDiskstatsDeviceFilter(logger) if err != nil { return nil, fmt.Errorf("failed to parse device filter flags: %w", err) } return &diskstatsCollector{ rxfer: typedDesc{readsCompletedDesc, prometheus.CounterValue}, rbytes: typedDesc{readBytesDesc, prometheus.CounterValue}, wxfer: typedDesc{writesCompletedDesc, prometheus.CounterValue}, wbytes: typedDesc{writtenBytesDesc, prometheus.CounterValue}, time: typedDesc{ioTimeSecondsDesc, prometheus.CounterValue}, deviceFilter: deviceFilter, logger: logger, }, nil } func (c *diskstatsCollector) Update(ch chan<- prometheus.Metric) (err error) { diskstatsb, err := unix.SysctlRaw("hw.diskstats") if err != nil { return err } ndisks := len(diskstatsb) / int(unsafe.Sizeof(DiskStats{})) diskstats := *(*[]DiskStats)(unsafe.Pointer(&diskstatsb)) for i := 0; i < ndisks; i++ { dn := *(*[DS_DISKNAMELEN]int8)(unsafe.Pointer(&diskstats[i].Name[0])) diskname := int8ToString(dn[:]) if c.deviceFilter.ignored(diskname) { continue } ch <- c.rxfer.mustNewConstMetric(float64(diskstats[i].Rxfer), diskname) ch <- c.rbytes.mustNewConstMetric(float64(diskstats[i].Rbytes), diskname) ch <- c.wxfer.mustNewConstMetric(float64(diskstats[i].Wxfer), diskname) ch <- c.wbytes.mustNewConstMetric(float64(diskstats[i].Wbytes), diskname) time := float64(diskstats[i].Time.Sec) + float64(diskstats[i].Time.Usec)/1000000 ch <- c.time.mustNewConstMetric(time, diskname) } return nil } node_exporter-1.7.0/collector/dmi.go000066400000000000000000000066171452426057600175070ustar00rootroot00000000000000// Copyright 2021 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build linux && !nodmi // +build linux,!nodmi package collector import ( "errors" "fmt" "os" "strings" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs/sysfs" ) type dmiCollector struct { infoDesc *prometheus.Desc values []string } func init() { registerCollector("dmi", defaultEnabled, NewDMICollector) } // NewDMICollector returns a new Collector exposing DMI information. func NewDMICollector(logger log.Logger) (Collector, error) { fs, err := sysfs.NewFS(*sysPath) if err != nil { return nil, fmt.Errorf("failed to open sysfs: %w", err) } dmi, err := fs.DMIClass() if err != nil { if errors.Is(err, os.ErrNotExist) { level.Debug(logger).Log("msg", "Platform does not support Desktop Management Interface (DMI) information", "err", err) dmi = &sysfs.DMIClass{} } else { return nil, fmt.Errorf("failed to read Desktop Management Interface (DMI) information: %w", err) } } var labels, values []string for label, value := range map[string]*string{ "bios_date": dmi.BiosDate, "bios_release": dmi.BiosRelease, "bios_vendor": dmi.BiosVendor, "bios_version": dmi.BiosVersion, "board_asset_tag": dmi.BoardAssetTag, "board_name": dmi.BoardName, "board_serial": dmi.BoardSerial, "board_vendor": dmi.BoardVendor, "board_version": dmi.BoardVersion, "chassis_asset_tag": dmi.ChassisAssetTag, "chassis_serial": dmi.ChassisSerial, "chassis_vendor": dmi.ChassisVendor, "chassis_version": dmi.ChassisVersion, "product_family": dmi.ProductFamily, "product_name": dmi.ProductName, "product_serial": dmi.ProductSerial, "product_sku": dmi.ProductSKU, "product_uuid": dmi.ProductUUID, "product_version": dmi.ProductVersion, "system_vendor": dmi.SystemVendor, } { if value != nil { labels = append(labels, label) values = append(values, strings.ToValidUTF8(*value, "�")) } } // Construct DMI metric only once since it will not change until the next reboot. return &dmiCollector{ infoDesc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "dmi", "info"), "A metric with a constant '1' value labeled by bios_date, bios_release, bios_vendor, bios_version, "+ "board_asset_tag, board_name, board_serial, board_vendor, board_version, chassis_asset_tag, "+ "chassis_serial, chassis_vendor, chassis_version, product_family, product_name, product_serial, "+ "product_sku, product_uuid, product_version, system_vendor if provided by DMI.", labels, nil, ), values: values, }, nil } func (c *dmiCollector) Update(ch chan<- prometheus.Metric) error { if len(c.values) == 0 { return ErrNoData } ch <- prometheus.MustNewConstMetric(c.infoDesc, prometheus.GaugeValue, 1.0, c.values...) return nil } node_exporter-1.7.0/collector/drbd_linux.go000066400000000000000000000147471452426057600210730ustar00rootroot00000000000000// Copyright 2016 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nodrbd // +build !nodrbd package collector import ( "bufio" "errors" "fmt" "os" "strconv" "strings" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" ) // Numerical metric provided by /proc/drbd. type drbdNumericalMetric struct { desc *prometheus.Desc valueType prometheus.ValueType multiplier float64 } func newDRBDNumericalMetric(name, desc string, valueType prometheus.ValueType, multiplier float64) drbdNumericalMetric { return drbdNumericalMetric{ desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "drbd", name), desc, []string{"device"}, nil, ), valueType: valueType, multiplier: multiplier, } } // String pair metric provided by /proc/drbd. type drbdStringPairMetric struct { desc *prometheus.Desc valueOK string } func (m *drbdStringPairMetric) isOkay(v string) float64 { if v == m.valueOK { return 1 } return 0 } func newDRBDStringPairMetric(name, desc, valueOK string) drbdStringPairMetric { return drbdStringPairMetric{ desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "drbd", name), desc, []string{"device", "node"}, nil, ), valueOK: valueOK, } } type drbdCollector struct { numerical map[string]drbdNumericalMetric stringPair map[string]drbdStringPairMetric connected *prometheus.Desc logger log.Logger } func init() { registerCollector("drbd", defaultDisabled, newDRBDCollector) } func newDRBDCollector(logger log.Logger) (Collector, error) { return &drbdCollector{ numerical: map[string]drbdNumericalMetric{ "ns": newDRBDNumericalMetric( "network_sent_bytes_total", "Total number of bytes sent via the network.", prometheus.CounterValue, 1024, ), "nr": newDRBDNumericalMetric( "network_received_bytes_total", "Total number of bytes received via the network.", prometheus.CounterValue, 1, ), "dw": newDRBDNumericalMetric( "disk_written_bytes_total", "Net data written on local hard disk; in bytes.", prometheus.CounterValue, 1024, ), "dr": newDRBDNumericalMetric( "disk_read_bytes_total", "Net data read from local hard disk; in bytes.", prometheus.CounterValue, 1024, ), "al": newDRBDNumericalMetric( "activitylog_writes_total", "Number of updates of the activity log area of the meta data.", prometheus.CounterValue, 1, ), "bm": newDRBDNumericalMetric( "bitmap_writes_total", "Number of updates of the bitmap area of the meta data.", prometheus.CounterValue, 1, ), "lo": newDRBDNumericalMetric( "local_pending", "Number of open requests to the local I/O sub-system.", prometheus.GaugeValue, 1, ), "pe": newDRBDNumericalMetric( "remote_pending", "Number of requests sent to the peer, but that have not yet been answered by the latter.", prometheus.GaugeValue, 1, ), "ua": newDRBDNumericalMetric( "remote_unacknowledged", "Number of requests received by the peer via the network connection, but that have not yet been answered.", prometheus.GaugeValue, 1, ), "ap": newDRBDNumericalMetric( "application_pending", "Number of block I/O requests forwarded to DRBD, but not yet answered by DRBD.", prometheus.GaugeValue, 1, ), "ep": newDRBDNumericalMetric( "epochs", "Number of Epochs currently on the fly.", prometheus.GaugeValue, 1, ), "oos": newDRBDNumericalMetric( "out_of_sync_bytes", "Amount of data known to be out of sync; in bytes.", prometheus.GaugeValue, 1024, ), }, stringPair: map[string]drbdStringPairMetric{ "ro": newDRBDStringPairMetric( "node_role_is_primary", "Whether the role of the node is in the primary state.", "Primary", ), "ds": newDRBDStringPairMetric( "disk_state_is_up_to_date", "Whether the disk of the node is up to date.", "UpToDate", ), }, connected: prometheus.NewDesc( prometheus.BuildFQName(namespace, "drbd", "connected"), "Whether DRBD is connected to the peer.", []string{"device"}, nil, ), logger: logger, }, nil } func (c *drbdCollector) Update(ch chan<- prometheus.Metric) error { statsFile := procFilePath("drbd") file, err := os.Open(statsFile) if err != nil { if errors.Is(err, os.ErrNotExist) { level.Debug(c.logger).Log("msg", "stats file does not exist, skipping", "file", statsFile, "err", err) return ErrNoData } return err } defer file.Close() scanner := bufio.NewScanner(file) scanner.Split(bufio.ScanWords) device := "unknown" for scanner.Scan() { field := scanner.Text() kv := strings.Split(field, ":") if len(kv) != 2 { level.Debug(c.logger).Log("msg", "skipping invalid key:value pair", "field", field) continue } if id, err := strconv.ParseUint(kv[0], 10, 64); err == nil && kv[1] == "" { // New DRBD device encountered. device = fmt.Sprintf("drbd%d", id) continue } if m, ok := c.numerical[kv[0]]; ok { // Numerical value. v, err := strconv.ParseFloat(kv[1], 64) if err != nil { return err } ch <- prometheus.MustNewConstMetric( m.desc, m.valueType, v*m.multiplier, device, ) continue } if m, ok := c.stringPair[kv[0]]; ok { // String pair value. values := strings.Split(kv[1], "/") ch <- prometheus.MustNewConstMetric( m.desc, prometheus.GaugeValue, m.isOkay(values[0]), device, "local", ) ch <- prometheus.MustNewConstMetric( m.desc, prometheus.GaugeValue, m.isOkay(values[1]), device, "remote", ) continue } if kv[0] == "cs" { // Connection state. var connected float64 if kv[1] == "Connected" { connected = 1 } ch <- prometheus.MustNewConstMetric( c.connected, prometheus.GaugeValue, connected, device, ) continue } level.Debug(c.logger).Log("msg", "unhandled key-value pair", "key", kv[0], "value", kv[1]) } return scanner.Err() } node_exporter-1.7.0/collector/drm_linux.go000066400000000000000000000110711452426057600207250ustar00rootroot00000000000000// Copyright 2021 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nogpu // +build !nogpu package collector import ( "fmt" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs/sysfs" ) const ( drmCollectorSubsystem = "drm" ) type drmCollector struct { fs sysfs.FS logger log.Logger CardInfo *prometheus.Desc GPUBusyPercent *prometheus.Desc MemoryGTTSize *prometheus.Desc MemoryGTTUsed *prometheus.Desc MemoryVisibleVRAMSize *prometheus.Desc MemoryVisibleVRAMUsed *prometheus.Desc MemoryVRAMSize *prometheus.Desc MemoryVRAMUsed *prometheus.Desc } func init() { registerCollector("drm", defaultDisabled, NewDrmCollector) } // NewDrmCollector returns a new Collector exposing /sys/class/drm/card?/device stats. func NewDrmCollector(logger log.Logger) (Collector, error) { fs, err := sysfs.NewFS(*sysPath) if err != nil { return nil, fmt.Errorf("failed to open sysfs: %w", err) } return &drmCollector{ fs: fs, logger: logger, CardInfo: prometheus.NewDesc( prometheus.BuildFQName(namespace, drmCollectorSubsystem, "card_info"), "Card information", []string{"card", "memory_vendor", "power_performance_level", "unique_id", "vendor"}, nil, ), GPUBusyPercent: prometheus.NewDesc( prometheus.BuildFQName(namespace, drmCollectorSubsystem, "gpu_busy_percent"), "How busy the GPU is as a percentage.", []string{"card"}, nil, ), MemoryGTTSize: prometheus.NewDesc( prometheus.BuildFQName(namespace, drmCollectorSubsystem, "memory_gtt_size_bytes"), "The size of the graphics translation table (GTT) block in bytes.", []string{"card"}, nil, ), MemoryGTTUsed: prometheus.NewDesc( prometheus.BuildFQName(namespace, drmCollectorSubsystem, "memory_gtt_used_bytes"), "The used amount of the graphics translation table (GTT) block in bytes.", []string{"card"}, nil, ), MemoryVisibleVRAMSize: prometheus.NewDesc( prometheus.BuildFQName(namespace, drmCollectorSubsystem, "memory_vis_vram_size_bytes"), "The size of visible VRAM in bytes.", []string{"card"}, nil, ), MemoryVisibleVRAMUsed: prometheus.NewDesc( prometheus.BuildFQName(namespace, drmCollectorSubsystem, "memory_vis_vram_used_bytes"), "The used amount of visible VRAM in bytes.", []string{"card"}, nil, ), MemoryVRAMSize: prometheus.NewDesc( prometheus.BuildFQName(namespace, drmCollectorSubsystem, "memory_vram_size_bytes"), "The size of VRAM in bytes.", []string{"card"}, nil, ), MemoryVRAMUsed: prometheus.NewDesc( prometheus.BuildFQName(namespace, drmCollectorSubsystem, "memory_vram_used_bytes"), "The used amount of VRAM in bytes.", []string{"card"}, nil, ), }, nil } func (c *drmCollector) Update(ch chan<- prometheus.Metric) error { return c.updateAMDCards(ch) } func (c *drmCollector) updateAMDCards(ch chan<- prometheus.Metric) error { vendor := "amd" stats, err := c.fs.ClassDRMCardAMDGPUStats() if err != nil { return err } for _, s := range stats { ch <- prometheus.MustNewConstMetric( c.CardInfo, prometheus.GaugeValue, 1, s.Name, s.MemoryVRAMVendor, s.PowerDPMForcePerformanceLevel, s.UniqueID, vendor) ch <- prometheus.MustNewConstMetric( c.GPUBusyPercent, prometheus.GaugeValue, float64(s.GPUBusyPercent), s.Name) ch <- prometheus.MustNewConstMetric( c.MemoryGTTSize, prometheus.GaugeValue, float64(s.MemoryGTTSize), s.Name) ch <- prometheus.MustNewConstMetric( c.MemoryGTTUsed, prometheus.GaugeValue, float64(s.MemoryGTTUsed), s.Name) ch <- prometheus.MustNewConstMetric( c.MemoryVRAMSize, prometheus.GaugeValue, float64(s.MemoryVRAMSize), s.Name) ch <- prometheus.MustNewConstMetric( c.MemoryVRAMUsed, prometheus.GaugeValue, float64(s.MemoryVRAMUsed), s.Name) ch <- prometheus.MustNewConstMetric( c.MemoryVisibleVRAMSize, prometheus.GaugeValue, float64(s.MemoryVisibleVRAMSize), s.Name) ch <- prometheus.MustNewConstMetric( c.MemoryVisibleVRAMUsed, prometheus.GaugeValue, float64(s.MemoryVisibleVRAMUsed), s.Name) } return nil } node_exporter-1.7.0/collector/edac_linux.go000066400000000000000000000116541452426057600210460ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !noedac // +build !noedac package collector import ( "fmt" "path/filepath" "regexp" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) const ( edacSubsystem = "edac" ) var ( edacMemControllerRE = regexp.MustCompile(`.*devices/system/edac/mc/mc([0-9]*)`) edacMemCsrowRE = regexp.MustCompile(`.*devices/system/edac/mc/mc[0-9]*/csrow([0-9]*)`) ) type edacCollector struct { ceCount *prometheus.Desc ueCount *prometheus.Desc csRowCECount *prometheus.Desc csRowUECount *prometheus.Desc logger log.Logger } func init() { registerCollector("edac", defaultEnabled, NewEdacCollector) } // NewEdacCollector returns a new Collector exposing edac stats. func NewEdacCollector(logger log.Logger) (Collector, error) { return &edacCollector{ ceCount: prometheus.NewDesc( prometheus.BuildFQName(namespace, edacSubsystem, "correctable_errors_total"), "Total correctable memory errors.", []string{"controller"}, nil, ), ueCount: prometheus.NewDesc( prometheus.BuildFQName(namespace, edacSubsystem, "uncorrectable_errors_total"), "Total uncorrectable memory errors.", []string{"controller"}, nil, ), csRowCECount: prometheus.NewDesc( prometheus.BuildFQName(namespace, edacSubsystem, "csrow_correctable_errors_total"), "Total correctable memory errors for this csrow.", []string{"controller", "csrow"}, nil, ), csRowUECount: prometheus.NewDesc( prometheus.BuildFQName(namespace, edacSubsystem, "csrow_uncorrectable_errors_total"), "Total uncorrectable memory errors for this csrow.", []string{"controller", "csrow"}, nil, ), logger: logger, }, nil } func (c *edacCollector) Update(ch chan<- prometheus.Metric) error { memControllers, err := filepath.Glob(sysFilePath("devices/system/edac/mc/mc[0-9]*")) if err != nil { return err } for _, controller := range memControllers { controllerMatch := edacMemControllerRE.FindStringSubmatch(controller) if controllerMatch == nil { return fmt.Errorf("controller string didn't match regexp: %s", controller) } controllerNumber := controllerMatch[1] value, err := readUintFromFile(filepath.Join(controller, "ce_count")) if err != nil { return fmt.Errorf("couldn't get ce_count for controller %s: %w", controllerNumber, err) } ch <- prometheus.MustNewConstMetric( c.ceCount, prometheus.CounterValue, float64(value), controllerNumber) value, err = readUintFromFile(filepath.Join(controller, "ce_noinfo_count")) if err != nil { return fmt.Errorf("couldn't get ce_noinfo_count for controller %s: %w", controllerNumber, err) } ch <- prometheus.MustNewConstMetric( c.csRowCECount, prometheus.CounterValue, float64(value), controllerNumber, "unknown") value, err = readUintFromFile(filepath.Join(controller, "ue_count")) if err != nil { return fmt.Errorf("couldn't get ue_count for controller %s: %w", controllerNumber, err) } ch <- prometheus.MustNewConstMetric( c.ueCount, prometheus.CounterValue, float64(value), controllerNumber) value, err = readUintFromFile(filepath.Join(controller, "ue_noinfo_count")) if err != nil { return fmt.Errorf("couldn't get ue_noinfo_count for controller %s: %w", controllerNumber, err) } ch <- prometheus.MustNewConstMetric( c.csRowUECount, prometheus.CounterValue, float64(value), controllerNumber, "unknown") // For each controller, walk the csrow directories. csrows, err := filepath.Glob(controller + "/csrow[0-9]*") if err != nil { return err } for _, csrow := range csrows { csrowMatch := edacMemCsrowRE.FindStringSubmatch(csrow) if csrowMatch == nil { return fmt.Errorf("csrow string didn't match regexp: %s", csrow) } csrowNumber := csrowMatch[1] value, err = readUintFromFile(filepath.Join(csrow, "ce_count")) if err != nil { return fmt.Errorf("couldn't get ce_count for controller/csrow %s/%s: %w", controllerNumber, csrowNumber, err) } ch <- prometheus.MustNewConstMetric( c.csRowCECount, prometheus.CounterValue, float64(value), controllerNumber, csrowNumber) value, err = readUintFromFile(filepath.Join(csrow, "ue_count")) if err != nil { return fmt.Errorf("couldn't get ue_count for controller/csrow %s/%s: %w", controllerNumber, csrowNumber, err) } ch <- prometheus.MustNewConstMetric( c.csRowUECount, prometheus.CounterValue, float64(value), controllerNumber, csrowNumber) } } return err } node_exporter-1.7.0/collector/entropy_linux.go000066400000000000000000000043151452426057600216460ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !noentropy // +build !noentropy package collector import ( "fmt" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs" ) type entropyCollector struct { fs procfs.FS entropyAvail *prometheus.Desc entropyPoolSize *prometheus.Desc logger log.Logger } func init() { registerCollector("entropy", defaultEnabled, NewEntropyCollector) } // NewEntropyCollector returns a new Collector exposing entropy stats. func NewEntropyCollector(logger log.Logger) (Collector, error) { fs, err := procfs.NewFS(*procPath) if err != nil { return nil, fmt.Errorf("failed to open procfs: %w", err) } return &entropyCollector{ fs: fs, entropyAvail: prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "entropy_available_bits"), "Bits of available entropy.", nil, nil, ), entropyPoolSize: prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "entropy_pool_size_bits"), "Bits of entropy pool.", nil, nil, ), logger: logger, }, nil } func (c *entropyCollector) Update(ch chan<- prometheus.Metric) error { stats, err := c.fs.KernelRandom() if err != nil { return fmt.Errorf("failed to get kernel random stats: %w", err) } if stats.EntropyAvaliable == nil { return fmt.Errorf("couldn't get entropy_avail") } ch <- prometheus.MustNewConstMetric( c.entropyAvail, prometheus.GaugeValue, float64(*stats.EntropyAvaliable)) if stats.PoolSize == nil { return fmt.Errorf("couldn't get entropy poolsize") } ch <- prometheus.MustNewConstMetric( c.entropyPoolSize, prometheus.GaugeValue, float64(*stats.PoolSize)) return nil } node_exporter-1.7.0/collector/ethtool_linux.go000066400000000000000000000531521452426057600216270ustar00rootroot00000000000000// Copyright 2021 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !noethtool // +build !noethtool // The hard work of collecting data from the kernel via the ethtool interfaces is done by // https://github.com/safchain/ethtool/ // by Sylvain Afchain. Used under the Apache license. package collector import ( "errors" "fmt" "os" "regexp" "sort" "strings" "sync" "syscall" "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs/sysfs" "github.com/safchain/ethtool" "golang.org/x/sys/unix" ) var ( ethtoolDeviceInclude = kingpin.Flag("collector.ethtool.device-include", "Regexp of ethtool devices to include (mutually exclusive to device-exclude).").String() ethtoolDeviceExclude = kingpin.Flag("collector.ethtool.device-exclude", "Regexp of ethtool devices to exclude (mutually exclusive to device-include).").String() ethtoolIncludedMetrics = kingpin.Flag("collector.ethtool.metrics-include", "Regexp of ethtool stats to include.").Default(".*").String() ethtoolReceivedRegex = regexp.MustCompile(`(^|_)rx(_|$)`) ethtoolTransmitRegex = regexp.MustCompile(`(^|_)tx(_|$)`) ) type Ethtool interface { DriverInfo(string) (ethtool.DrvInfo, error) Stats(string) (map[string]uint64, error) LinkInfo(string) (ethtool.EthtoolCmd, error) } type ethtoolLibrary struct { ethtool *ethtool.Ethtool } func (e *ethtoolLibrary) DriverInfo(intf string) (ethtool.DrvInfo, error) { return e.ethtool.DriverInfo(intf) } func (e *ethtoolLibrary) Stats(intf string) (map[string]uint64, error) { return e.ethtool.Stats(intf) } func (e *ethtoolLibrary) LinkInfo(intf string) (ethtool.EthtoolCmd, error) { var ethtoolCmd ethtool.EthtoolCmd _, err := ethtoolCmd.CmdGet(intf) return ethtoolCmd, err } type ethtoolCollector struct { fs sysfs.FS entries map[string]*prometheus.Desc entriesMutex sync.Mutex ethtool Ethtool deviceFilter deviceFilter infoDesc *prometheus.Desc metricsPattern *regexp.Regexp logger log.Logger } // makeEthtoolCollector is the internal constructor for EthtoolCollector. // This allows NewEthtoolTestCollector to override its .ethtool interface // for testing. func makeEthtoolCollector(logger log.Logger) (*ethtoolCollector, error) { fs, err := sysfs.NewFS(*sysPath) if err != nil { return nil, fmt.Errorf("failed to open sysfs: %w", err) } e, err := ethtool.NewEthtool() if err != nil { return nil, fmt.Errorf("failed to initialize ethtool library: %w", err) } // Pre-populate some common ethtool metrics. return ðtoolCollector{ fs: fs, ethtool: ðtoolLibrary{e}, deviceFilter: newDeviceFilter(*ethtoolDeviceExclude, *ethtoolDeviceInclude), metricsPattern: regexp.MustCompile(*ethtoolIncludedMetrics), logger: logger, entries: map[string]*prometheus.Desc{ "rx_bytes": prometheus.NewDesc( prometheus.BuildFQName(namespace, "ethtool", "received_bytes_total"), "Network interface bytes received", []string{"device"}, nil, ), "rx_dropped": prometheus.NewDesc( prometheus.BuildFQName(namespace, "ethtool", "received_dropped_total"), "Number of received frames dropped", []string{"device"}, nil, ), "rx_errors": prometheus.NewDesc( prometheus.BuildFQName(namespace, "ethtool", "received_errors_total"), "Number of received frames with errors", []string{"device"}, nil, ), "rx_packets": prometheus.NewDesc( prometheus.BuildFQName(namespace, "ethtool", "received_packets_total"), "Network interface packets received", []string{"device"}, nil, ), "tx_bytes": prometheus.NewDesc( prometheus.BuildFQName(namespace, "ethtool", "transmitted_bytes_total"), "Network interface bytes sent", []string{"device"}, nil, ), "tx_errors": prometheus.NewDesc( prometheus.BuildFQName(namespace, "ethtool", "transmitted_errors_total"), "Number of sent frames with errors", []string{"device"}, nil, ), "tx_packets": prometheus.NewDesc( prometheus.BuildFQName(namespace, "ethtool", "transmitted_packets_total"), "Network interface packets sent", []string{"device"}, nil, ), // link info "supported_port": prometheus.NewDesc( prometheus.BuildFQName(namespace, "network", "supported_port_info"), "Type of ports or PHYs supported by network device", []string{"device", "type"}, nil, ), "supported_speed": prometheus.NewDesc( prometheus.BuildFQName(namespace, "network", "supported_speed_bytes"), "Combination of speeds and features supported by network device", []string{"device", "duplex", "mode"}, nil, ), "supported_autonegotiate": prometheus.NewDesc( prometheus.BuildFQName(namespace, "network", "autonegotiate_supported"), "If this port device supports autonegotiate", []string{"device"}, nil, ), "supported_pause": prometheus.NewDesc( prometheus.BuildFQName(namespace, "network", "pause_supported"), "If this port device supports pause frames", []string{"device"}, nil, ), "supported_asymmetricpause": prometheus.NewDesc( prometheus.BuildFQName(namespace, "network", "asymmetricpause_supported"), "If this port device supports asymmetric pause frames", []string{"device"}, nil, ), "advertised_speed": prometheus.NewDesc( prometheus.BuildFQName(namespace, "network", "advertised_speed_bytes"), "Combination of speeds and features offered by network device", []string{"device", "duplex", "mode"}, nil, ), "advertised_autonegotiate": prometheus.NewDesc( prometheus.BuildFQName(namespace, "network", "autonegotiate_advertised"), "If this port device offers autonegotiate", []string{"device"}, nil, ), "advertised_pause": prometheus.NewDesc( prometheus.BuildFQName(namespace, "network", "pause_advertised"), "If this port device offers pause capability", []string{"device"}, nil, ), "advertised_asymmetricpause": prometheus.NewDesc( prometheus.BuildFQName(namespace, "network", "asymmetricpause_advertised"), "If this port device offers asymmetric pause capability", []string{"device"}, nil, ), "autonegotiate": prometheus.NewDesc( prometheus.BuildFQName(namespace, "network", "autonegotiate"), "If this port is using autonegotiate", []string{"device"}, nil, ), }, infoDesc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "ethtool", "info"), "A metric with a constant '1' value labeled by bus_info, device, driver, expansion_rom_version, firmware_version, version.", []string{"bus_info", "device", "driver", "expansion_rom_version", "firmware_version", "version"}, nil, ), }, nil } func init() { registerCollector("ethtool", defaultDisabled, NewEthtoolCollector) } // Generate the fully-qualified metric name for the ethool metric. func buildEthtoolFQName(metric string) string { metricName := strings.TrimLeft(strings.ToLower(SanitizeMetricName(metric)), "_") metricName = ethtoolReceivedRegex.ReplaceAllString(metricName, "${1}received${2}") metricName = ethtoolTransmitRegex.ReplaceAllString(metricName, "${1}transmitted${2}") return prometheus.BuildFQName(namespace, "ethtool", metricName) } // NewEthtoolCollector returns a new Collector exposing ethtool stats. func NewEthtoolCollector(logger log.Logger) (Collector, error) { return makeEthtoolCollector(logger) } // updatePortCapabilities generates metrics for autonegotiate, pause and asymmetricpause. // The bit offsets here correspond to ethtool_link_mode_bit_indices in linux/include/uapi/linux/ethtool.h // https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/include/uapi/linux/ethtool.h func (c *ethtoolCollector) updatePortCapabilities(ch chan<- prometheus.Metric, prefix string, device string, linkModes uint32) { var ( autonegotiate = 0.0 pause = 0.0 asymmetricPause = 0.0 ) if linkModes&(1<, value is always 1.", []string{"fc_host", "speed", "port_state", "port_type", "port_id", "port_name", "fabric_name", "symbolic_name", "supported_classes", "supported_speeds", "dev_loss_tmo"}, nil, ) infoValue := 1.0 // First push the Host values ch <- prometheus.MustNewConstMetric(infoDesc, prometheus.GaugeValue, infoValue, host.Name, host.Speed, host.PortState, host.PortType, host.PortID, host.PortName, host.FabricName, host.SymbolicName, host.SupportedClasses, host.SupportedSpeeds, host.DevLossTMO) // Then the counters c.pushCounter(ch, "dumped_frames_total", host.Counters.DumpedFrames, host.Name) c.pushCounter(ch, "error_frames_total", host.Counters.ErrorFrames, host.Name) c.pushCounter(ch, "invalid_crc_total", host.Counters.InvalidCRCCount, host.Name) c.pushCounter(ch, "rx_frames_total", host.Counters.RXFrames, host.Name) c.pushCounter(ch, "rx_words_total", host.Counters.RXWords, host.Name) c.pushCounter(ch, "tx_frames_total", host.Counters.TXFrames, host.Name) c.pushCounter(ch, "tx_words_total", host.Counters.TXWords, host.Name) c.pushCounter(ch, "seconds_since_last_reset_total", host.Counters.SecondsSinceLastReset, host.Name) c.pushCounter(ch, "invalid_tx_words_total", host.Counters.InvalidTXWordCount, host.Name) c.pushCounter(ch, "link_failure_total", host.Counters.LinkFailureCount, host.Name) c.pushCounter(ch, "loss_of_sync_total", host.Counters.LossOfSyncCount, host.Name) c.pushCounter(ch, "loss_of_signal_total", host.Counters.LossOfSignalCount, host.Name) c.pushCounter(ch, "nos_total", host.Counters.NosCount, host.Name) c.pushCounter(ch, "fcp_packet_aborts_total", host.Counters.FCPPacketAborts, host.Name) } return nil } node_exporter-1.7.0/collector/filefd_linux.go000066400000000000000000000046601452426057600214020ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nofilefd // +build !nofilefd package collector import ( "bytes" "fmt" "io" "os" "strconv" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) const ( fileFDStatSubsystem = "filefd" ) type fileFDStatCollector struct { logger log.Logger } func init() { registerCollector(fileFDStatSubsystem, defaultEnabled, NewFileFDStatCollector) } // NewFileFDStatCollector returns a new Collector exposing file-nr stats. func NewFileFDStatCollector(logger log.Logger) (Collector, error) { return &fileFDStatCollector{logger}, nil } func (c *fileFDStatCollector) Update(ch chan<- prometheus.Metric) error { fileFDStat, err := parseFileFDStats(procFilePath("sys/fs/file-nr")) if err != nil { return fmt.Errorf("couldn't get file-nr: %w", err) } for name, value := range fileFDStat { v, err := strconv.ParseFloat(value, 64) if err != nil { return fmt.Errorf("invalid value %s in file-nr: %w", value, err) } ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(namespace, fileFDStatSubsystem, name), fmt.Sprintf("File descriptor statistics: %s.", name), nil, nil, ), prometheus.GaugeValue, v, ) } return nil } func parseFileFDStats(filename string) (map[string]string, error) { file, err := os.Open(filename) if err != nil { return nil, err } defer file.Close() content, err := io.ReadAll(file) if err != nil { return nil, err } parts := bytes.Split(bytes.TrimSpace(content), []byte("\u0009")) if len(parts) < 3 { return nil, fmt.Errorf("unexpected number of file stats in %q", filename) } var fileFDStat = map[string]string{} // The file-nr proc is only 1 line with 3 values. fileFDStat["allocated"] = string(parts[0]) // The second value is skipped as it will always be zero in linux 2.6. fileFDStat["maximum"] = string(parts[2]) return fileFDStat, nil } node_exporter-1.7.0/collector/filefd_linux_test.go000066400000000000000000000020551452426057600224350ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nofilefd // +build !nofilefd package collector import "testing" func TestFileFDStats(t *testing.T) { fileFDStats, err := parseFileFDStats("fixtures/proc/sys/fs/file-nr") if err != nil { t.Fatal(err) } if want, got := "1024", fileFDStats["allocated"]; want != got { t.Errorf("want filefd allocated %q, got %q", want, got) } if want, got := "1631329", fileFDStats["maximum"]; want != got { t.Errorf("want filefd maximum %q, got %q", want, got) } } node_exporter-1.7.0/collector/filesystem_bsd.go000066400000000000000000000045541452426057600217500ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build (darwin || dragonfly) && !nofilesystem // +build darwin dragonfly // +build !nofilesystem package collector import ( "errors" "unsafe" "github.com/go-kit/log/level" ) /* #include #include #include #include */ import "C" const ( defMountPointsExcluded = "^/(dev)($|/)" defFSTypesExcluded = "^devfs$" readOnly = 0x1 // MNT_RDONLY ) // Expose filesystem fullness. func (c *filesystemCollector) GetStats() (stats []filesystemStats, err error) { var mntbuf *C.struct_statfs count := C.getmntinfo(&mntbuf, C.MNT_NOWAIT) if count == 0 { return nil, errors.New("getmntinfo() failed") } mnt := (*[1 << 20]C.struct_statfs)(unsafe.Pointer(mntbuf)) stats = []filesystemStats{} for i := 0; i < int(count); i++ { mountpoint := C.GoString(&mnt[i].f_mntonname[0]) if c.excludedMountPointsPattern.MatchString(mountpoint) { level.Debug(c.logger).Log("msg", "Ignoring mount point", "mountpoint", mountpoint) continue } device := C.GoString(&mnt[i].f_mntfromname[0]) fstype := C.GoString(&mnt[i].f_fstypename[0]) if c.excludedFSTypesPattern.MatchString(fstype) { level.Debug(c.logger).Log("msg", "Ignoring fs type", "type", fstype) continue } var ro float64 if (mnt[i].f_flags & readOnly) != 0 { ro = 1 } stats = append(stats, filesystemStats{ labels: filesystemLabels{ device: device, mountPoint: rootfsStripPrefix(mountpoint), fsType: fstype, }, size: float64(mnt[i].f_blocks) * float64(mnt[i].f_bsize), free: float64(mnt[i].f_bfree) * float64(mnt[i].f_bsize), avail: float64(mnt[i].f_bavail) * float64(mnt[i].f_bsize), files: float64(mnt[i].f_files), filesFree: float64(mnt[i].f_ffree), ro: ro, }) } return stats, nil } node_exporter-1.7.0/collector/filesystem_common.go000066400000000000000000000162201452426057600224610ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nofilesystem && (linux || freebsd || openbsd || darwin || dragonfly) // +build !nofilesystem // +build linux freebsd openbsd darwin dragonfly package collector import ( "errors" "regexp" "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" ) // Arch-dependent implementation must define: // * defMountPointsExcluded // * defFSTypesExcluded // * filesystemLabelNames // * filesystemCollector.GetStats var ( mountPointsExcludeSet bool mountPointsExclude = kingpin.Flag( "collector.filesystem.mount-points-exclude", "Regexp of mount points to exclude for filesystem collector.", ).Default(defMountPointsExcluded).PreAction(func(c *kingpin.ParseContext) error { mountPointsExcludeSet = true return nil }).String() oldMountPointsExcluded = kingpin.Flag( "collector.filesystem.ignored-mount-points", "Regexp of mount points to ignore for filesystem collector.", ).Hidden().String() fsTypesExcludeSet bool fsTypesExclude = kingpin.Flag( "collector.filesystem.fs-types-exclude", "Regexp of filesystem types to exclude for filesystem collector.", ).Default(defFSTypesExcluded).PreAction(func(c *kingpin.ParseContext) error { fsTypesExcludeSet = true return nil }).String() oldFSTypesExcluded = kingpin.Flag( "collector.filesystem.ignored-fs-types", "Regexp of filesystem types to ignore for filesystem collector.", ).Hidden().String() filesystemLabelNames = []string{"device", "mountpoint", "fstype"} ) type filesystemCollector struct { excludedMountPointsPattern *regexp.Regexp excludedFSTypesPattern *regexp.Regexp sizeDesc, freeDesc, availDesc *prometheus.Desc filesDesc, filesFreeDesc *prometheus.Desc roDesc, deviceErrorDesc *prometheus.Desc logger log.Logger } type filesystemLabels struct { device, mountPoint, fsType, options string } type filesystemStats struct { labels filesystemLabels size, free, avail float64 files, filesFree float64 ro, deviceError float64 } func init() { registerCollector("filesystem", defaultEnabled, NewFilesystemCollector) } // NewFilesystemCollector returns a new Collector exposing filesystems stats. func NewFilesystemCollector(logger log.Logger) (Collector, error) { if *oldMountPointsExcluded != "" { if !mountPointsExcludeSet { level.Warn(logger).Log("msg", "--collector.filesystem.ignored-mount-points is DEPRECATED and will be removed in 2.0.0, use --collector.filesystem.mount-points-exclude") *mountPointsExclude = *oldMountPointsExcluded } else { return nil, errors.New("--collector.filesystem.ignored-mount-points and --collector.filesystem.mount-points-exclude are mutually exclusive") } } if *oldFSTypesExcluded != "" { if !fsTypesExcludeSet { level.Warn(logger).Log("msg", "--collector.filesystem.ignored-fs-types is DEPRECATED and will be removed in 2.0.0, use --collector.filesystem.fs-types-exclude") *fsTypesExclude = *oldFSTypesExcluded } else { return nil, errors.New("--collector.filesystem.ignored-fs-types and --collector.filesystem.fs-types-exclude are mutually exclusive") } } subsystem := "filesystem" level.Info(logger).Log("msg", "Parsed flag --collector.filesystem.mount-points-exclude", "flag", *mountPointsExclude) mountPointPattern := regexp.MustCompile(*mountPointsExclude) level.Info(logger).Log("msg", "Parsed flag --collector.filesystem.fs-types-exclude", "flag", *fsTypesExclude) filesystemsTypesPattern := regexp.MustCompile(*fsTypesExclude) sizeDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "size_bytes"), "Filesystem size in bytes.", filesystemLabelNames, nil, ) freeDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "free_bytes"), "Filesystem free space in bytes.", filesystemLabelNames, nil, ) availDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "avail_bytes"), "Filesystem space available to non-root users in bytes.", filesystemLabelNames, nil, ) filesDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "files"), "Filesystem total file nodes.", filesystemLabelNames, nil, ) filesFreeDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "files_free"), "Filesystem total free file nodes.", filesystemLabelNames, nil, ) roDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "readonly"), "Filesystem read-only status.", filesystemLabelNames, nil, ) deviceErrorDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "device_error"), "Whether an error occurred while getting statistics for the given device.", filesystemLabelNames, nil, ) return &filesystemCollector{ excludedMountPointsPattern: mountPointPattern, excludedFSTypesPattern: filesystemsTypesPattern, sizeDesc: sizeDesc, freeDesc: freeDesc, availDesc: availDesc, filesDesc: filesDesc, filesFreeDesc: filesFreeDesc, roDesc: roDesc, deviceErrorDesc: deviceErrorDesc, logger: logger, }, nil } func (c *filesystemCollector) Update(ch chan<- prometheus.Metric) error { stats, err := c.GetStats() if err != nil { return err } // Make sure we expose a metric once, even if there are multiple mounts seen := map[filesystemLabels]bool{} for _, s := range stats { if seen[s.labels] { continue } seen[s.labels] = true ch <- prometheus.MustNewConstMetric( c.deviceErrorDesc, prometheus.GaugeValue, s.deviceError, s.labels.device, s.labels.mountPoint, s.labels.fsType, ) ch <- prometheus.MustNewConstMetric( c.roDesc, prometheus.GaugeValue, s.ro, s.labels.device, s.labels.mountPoint, s.labels.fsType, ) if s.deviceError > 0 { continue } ch <- prometheus.MustNewConstMetric( c.sizeDesc, prometheus.GaugeValue, s.size, s.labels.device, s.labels.mountPoint, s.labels.fsType, ) ch <- prometheus.MustNewConstMetric( c.freeDesc, prometheus.GaugeValue, s.free, s.labels.device, s.labels.mountPoint, s.labels.fsType, ) ch <- prometheus.MustNewConstMetric( c.availDesc, prometheus.GaugeValue, s.avail, s.labels.device, s.labels.mountPoint, s.labels.fsType, ) ch <- prometheus.MustNewConstMetric( c.filesDesc, prometheus.GaugeValue, s.files, s.labels.device, s.labels.mountPoint, s.labels.fsType, ) ch <- prometheus.MustNewConstMetric( c.filesFreeDesc, prometheus.GaugeValue, s.filesFree, s.labels.device, s.labels.mountPoint, s.labels.fsType, ) } return nil } node_exporter-1.7.0/collector/filesystem_freebsd.go000066400000000000000000000044201452426057600226020ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nofilesystem // +build !nofilesystem package collector import ( "github.com/go-kit/log/level" "golang.org/x/sys/unix" ) const ( defMountPointsExcluded = "^/(dev)($|/)" defFSTypesExcluded = "^devfs$" ) // Expose filesystem fullness. func (c *filesystemCollector) GetStats() ([]filesystemStats, error) { n, err := unix.Getfsstat(nil, unix.MNT_NOWAIT) if err != nil { return nil, err } buf := make([]unix.Statfs_t, n) _, err = unix.Getfsstat(buf, unix.MNT_NOWAIT) if err != nil { return nil, err } stats := []filesystemStats{} for _, fs := range buf { mountpoint := unix.ByteSliceToString(fs.Mntonname[:]) if c.excludedMountPointsPattern.MatchString(mountpoint) { level.Debug(c.logger).Log("msg", "Ignoring mount point", "mountpoint", mountpoint) continue } device := unix.ByteSliceToString(fs.Mntfromname[:]) fstype := unix.ByteSliceToString(fs.Fstypename[:]) if c.excludedFSTypesPattern.MatchString(fstype) { level.Debug(c.logger).Log("msg", "Ignoring fs type", "type", fstype) continue } if (fs.Flags & unix.MNT_IGNORE) != 0 { level.Debug(c.logger).Log("msg", "Ignoring mount flagged as ignore", "mountpoint", mountpoint) continue } var ro float64 if (fs.Flags & unix.MNT_RDONLY) != 0 { ro = 1 } stats = append(stats, filesystemStats{ labels: filesystemLabels{ device: device, mountPoint: rootfsStripPrefix(mountpoint), fsType: fstype, }, size: float64(fs.Blocks) * float64(fs.Bsize), free: float64(fs.Bfree) * float64(fs.Bsize), avail: float64(fs.Bavail) * float64(fs.Bsize), files: float64(fs.Files), filesFree: float64(fs.Ffree), ro: ro, }) } return stats, nil } node_exporter-1.7.0/collector/filesystem_linux.go000066400000000000000000000144451452426057600223370ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nofilesystem // +build !nofilesystem package collector import ( "bufio" "errors" "fmt" "io" "os" "strings" "sync" "time" "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" "github.com/go-kit/log/level" "golang.org/x/sys/unix" ) const ( defMountPointsExcluded = "^/(dev|proc|run/credentials/.+|sys|var/lib/docker/.+|var/lib/containers/storage/.+)($|/)" defFSTypesExcluded = "^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$" ) var mountTimeout = kingpin.Flag("collector.filesystem.mount-timeout", "how long to wait for a mount to respond before marking it as stale"). Hidden().Default("5s").Duration() var statWorkerCount = kingpin.Flag("collector.filesystem.stat-workers", "how many stat calls to process simultaneously"). Hidden().Default("4").Int() var stuckMounts = make(map[string]struct{}) var stuckMountsMtx = &sync.Mutex{} // GetStats returns filesystem stats. func (c *filesystemCollector) GetStats() ([]filesystemStats, error) { mps, err := mountPointDetails(c.logger) if err != nil { return nil, err } stats := []filesystemStats{} labelChan := make(chan filesystemLabels) statChan := make(chan filesystemStats) wg := sync.WaitGroup{} workerCount := *statWorkerCount if workerCount < 1 { workerCount = 1 } for i := 0; i < workerCount; i++ { wg.Add(1) go func() { defer wg.Done() for labels := range labelChan { statChan <- c.processStat(labels) } }() } go func() { for _, labels := range mps { if c.excludedMountPointsPattern.MatchString(labels.mountPoint) { level.Debug(c.logger).Log("msg", "Ignoring mount point", "mountpoint", labels.mountPoint) continue } if c.excludedFSTypesPattern.MatchString(labels.fsType) { level.Debug(c.logger).Log("msg", "Ignoring fs", "type", labels.fsType) continue } stuckMountsMtx.Lock() if _, ok := stuckMounts[labels.mountPoint]; ok { stats = append(stats, filesystemStats{ labels: labels, deviceError: 1, }) level.Debug(c.logger).Log("msg", "Mount point is in an unresponsive state", "mountpoint", labels.mountPoint) stuckMountsMtx.Unlock() continue } stuckMountsMtx.Unlock() labelChan <- labels } close(labelChan) wg.Wait() close(statChan) }() for stat := range statChan { stats = append(stats, stat) } return stats, nil } func (c *filesystemCollector) processStat(labels filesystemLabels) filesystemStats { var ro float64 for _, option := range strings.Split(labels.options, ",") { if option == "ro" { ro = 1 break } } success := make(chan struct{}) go stuckMountWatcher(labels.mountPoint, success, c.logger) buf := new(unix.Statfs_t) err := unix.Statfs(rootfsFilePath(labels.mountPoint), buf) stuckMountsMtx.Lock() close(success) // If the mount has been marked as stuck, unmark it and log it's recovery. if _, ok := stuckMounts[labels.mountPoint]; ok { level.Debug(c.logger).Log("msg", "Mount point has recovered, monitoring will resume", "mountpoint", labels.mountPoint) delete(stuckMounts, labels.mountPoint) } stuckMountsMtx.Unlock() if err != nil { level.Debug(c.logger).Log("msg", "Error on statfs() system call", "rootfs", rootfsFilePath(labels.mountPoint), "err", err) return filesystemStats{ labels: labels, deviceError: 1, ro: ro, } } return filesystemStats{ labels: labels, size: float64(buf.Blocks) * float64(buf.Bsize), free: float64(buf.Bfree) * float64(buf.Bsize), avail: float64(buf.Bavail) * float64(buf.Bsize), files: float64(buf.Files), filesFree: float64(buf.Ffree), ro: ro, } } // stuckMountWatcher listens on the given success channel and if the channel closes // then the watcher does nothing. If instead the timeout is reached, the // mount point that is being watched is marked as stuck. func stuckMountWatcher(mountPoint string, success chan struct{}, logger log.Logger) { mountCheckTimer := time.NewTimer(*mountTimeout) defer mountCheckTimer.Stop() select { case <-success: // Success case <-mountCheckTimer.C: // Timed out, mark mount as stuck stuckMountsMtx.Lock() select { case <-success: // Success came in just after the timeout was reached, don't label the mount as stuck default: level.Debug(logger).Log("msg", "Mount point timed out, it is being labeled as stuck and will not be monitored", "mountpoint", mountPoint) stuckMounts[mountPoint] = struct{}{} } stuckMountsMtx.Unlock() } } func mountPointDetails(logger log.Logger) ([]filesystemLabels, error) { file, err := os.Open(procFilePath("1/mounts")) if errors.Is(err, os.ErrNotExist) { // Fallback to `/proc/mounts` if `/proc/1/mounts` is missing due hidepid. level.Debug(logger).Log("msg", "Reading root mounts failed, falling back to system mounts", "err", err) file, err = os.Open(procFilePath("mounts")) } if err != nil { return nil, err } defer file.Close() return parseFilesystemLabels(file) } func parseFilesystemLabels(r io.Reader) ([]filesystemLabels, error) { var filesystems []filesystemLabels scanner := bufio.NewScanner(r) for scanner.Scan() { parts := strings.Fields(scanner.Text()) if len(parts) < 4 { return nil, fmt.Errorf("malformed mount point information: %q", scanner.Text()) } // Ensure we handle the translation of \040 and \011 // as per fstab(5). parts[1] = strings.Replace(parts[1], "\\040", " ", -1) parts[1] = strings.Replace(parts[1], "\\011", "\t", -1) filesystems = append(filesystems, filesystemLabels{ device: parts[0], mountPoint: rootfsStripPrefix(parts[1]), fsType: parts[2], options: parts[3], }) } return filesystems, scanner.Err() } node_exporter-1.7.0/collector/filesystem_linux_test.go000066400000000000000000000102351452426057600233670ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nofilesystem // +build !nofilesystem package collector import ( "strings" "testing" "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" ) func Test_parseFilesystemLabelsError(t *testing.T) { tests := []struct { name string in string }{ { name: "too few fields", in: "hello world", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if _, err := parseFilesystemLabels(strings.NewReader(tt.in)); err == nil { t.Fatal("expected an error, but none occurred") } }) } } func TestMountPointDetails(t *testing.T) { if _, err := kingpin.CommandLine.Parse([]string{"--path.procfs", "./fixtures/proc"}); err != nil { t.Fatal(err) } expected := map[string]string{ "/": "", "/sys": "", "/proc": "", "/dev": "", "/dev/pts": "", "/run": "", "/sys/kernel/security": "", "/dev/shm": "", "/run/lock": "", "/sys/fs/cgroup": "", "/sys/fs/cgroup/systemd": "", "/sys/fs/pstore": "", "/sys/fs/cgroup/cpuset": "", "/sys/fs/cgroup/cpu,cpuacct": "", "/sys/fs/cgroup/devices": "", "/sys/fs/cgroup/freezer": "", "/sys/fs/cgroup/net_cls,net_prio": "", "/sys/fs/cgroup/blkio": "", "/sys/fs/cgroup/perf_event": "", "/proc/sys/fs/binfmt_misc": "", "/dev/mqueue": "", "/sys/kernel/debug": "", "/dev/hugepages": "", "/sys/fs/fuse/connections": "", "/boot": "", "/run/rpc_pipefs": "", "/run/user/1000": "", "/run/user/1000/gvfs": "", "/var/lib/kubelet/plugins/kubernetes.io/vsphere-volume/mounts/[vsanDatastore] bafb9e5a-8856-7e6c-699c-801844e77a4a/kubernetes-dynamic-pvc-3eba5bba-48a3-11e8-89ab-005056b92113.vmdk": "", "/var/lib/kubelet/plugins/kubernetes.io/vsphere-volume/mounts/[vsanDatastore] bafb9e5a-8856-7e6c-699c-801844e77a4a/kubernetes-dynamic-pvc-3eba5bba-48a3-11e8-89ab-005056b92113.vmdk": "", } filesystems, err := mountPointDetails(log.NewNopLogger()) if err != nil { t.Log(err) } for _, fs := range filesystems { if _, ok := expected[fs.mountPoint]; !ok { t.Errorf("Got unexpected %s", fs.mountPoint) } } } func TestMountsFallback(t *testing.T) { if _, err := kingpin.CommandLine.Parse([]string{"--path.procfs", "./fixtures_hidepid/proc"}); err != nil { t.Fatal(err) } expected := map[string]string{ "/": "", } filesystems, err := mountPointDetails(log.NewNopLogger()) if err != nil { t.Log(err) } for _, fs := range filesystems { if _, ok := expected[fs.mountPoint]; !ok { t.Errorf("Got unexpected %s", fs.mountPoint) } } } func TestPathRootfs(t *testing.T) { if _, err := kingpin.CommandLine.Parse([]string{"--path.procfs", "./fixtures_bindmount/proc", "--path.rootfs", "/host"}); err != nil { t.Fatal(err) } expected := map[string]string{ // should modify these mountpoints (removes /host, see fixture proc file) "/": "", "/media/volume1": "", "/media/volume2": "", // should not modify these mountpoints "/dev/shm": "", "/run/lock": "", "/sys/fs/cgroup": "", } filesystems, err := mountPointDetails(log.NewNopLogger()) if err != nil { t.Log(err) } for _, fs := range filesystems { if _, ok := expected[fs.mountPoint]; !ok { t.Errorf("Got unexpected %s", fs.mountPoint) } } } node_exporter-1.7.0/collector/filesystem_openbsd.go000066400000000000000000000042241452426057600226240ustar00rootroot00000000000000// Copyright 2020 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nofilesystem // +build !nofilesystem package collector import ( "github.com/go-kit/log/level" "golang.org/x/sys/unix" ) const ( defMountPointsExcluded = "^/(dev)($|/)" defFSTypesExcluded = "^devfs$" ) // Expose filesystem fullness. func (c *filesystemCollector) GetStats() (stats []filesystemStats, err error) { var mnt []unix.Statfs_t size, err := unix.Getfsstat(mnt, unix.MNT_NOWAIT) if err != nil { return nil, err } mnt = make([]unix.Statfs_t, size) _, err = unix.Getfsstat(mnt, unix.MNT_NOWAIT) if err != nil { return nil, err } stats = []filesystemStats{} for _, v := range mnt { mountpoint := unix.ByteSliceToString(v.F_mntonname[:]) if c.excludedMountPointsPattern.MatchString(mountpoint) { level.Debug(c.logger).Log("msg", "Ignoring mount point", "mountpoint", mountpoint) continue } device := unix.ByteSliceToString(v.F_mntfromname[:]) fstype := unix.ByteSliceToString(v.F_fstypename[:]) if c.excludedFSTypesPattern.MatchString(fstype) { level.Debug(c.logger).Log("msg", "Ignoring fs type", "type", fstype) continue } var ro float64 if (v.F_flags & unix.MNT_RDONLY) != 0 { ro = 1 } stats = append(stats, filesystemStats{ labels: filesystemLabels{ device: device, mountPoint: mountpoint, fsType: fstype, }, size: float64(v.F_blocks) * float64(v.F_bsize), free: float64(v.F_bfree) * float64(v.F_bsize), avail: float64(v.F_bavail) * float64(v.F_bsize), files: float64(v.F_files), filesFree: float64(v.F_ffree), ro: ro, }) } return stats, nil } node_exporter-1.7.0/collector/fixtures/000077500000000000000000000000001452426057600202465ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures/e2e-64k-page-output.txt000066400000000000000000010524741452426057600243510ustar00rootroot00000000000000# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles. # TYPE go_gc_duration_seconds summary # HELP go_goroutines Number of goroutines that currently exist. # TYPE go_goroutines gauge # HELP go_info Information about the Go environment. # TYPE go_info gauge # HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. # TYPE go_memstats_alloc_bytes gauge # HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. # TYPE go_memstats_alloc_bytes_total counter # HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. # TYPE go_memstats_buck_hash_sys_bytes gauge # HELP go_memstats_frees_total Total number of frees. # TYPE go_memstats_frees_total counter # HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. # TYPE go_memstats_gc_sys_bytes gauge # HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use. # TYPE go_memstats_heap_alloc_bytes gauge # HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. # TYPE go_memstats_heap_idle_bytes gauge # HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. # TYPE go_memstats_heap_inuse_bytes gauge # HELP go_memstats_heap_objects Number of allocated objects. # TYPE go_memstats_heap_objects gauge # HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. # TYPE go_memstats_heap_released_bytes gauge # HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. # TYPE go_memstats_heap_sys_bytes gauge # HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. # TYPE go_memstats_last_gc_time_seconds gauge # HELP go_memstats_lookups_total Total number of pointer lookups. # TYPE go_memstats_lookups_total counter # HELP go_memstats_mallocs_total Total number of mallocs. # TYPE go_memstats_mallocs_total counter # HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. # TYPE go_memstats_mcache_inuse_bytes gauge # HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. # TYPE go_memstats_mcache_sys_bytes gauge # HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. # TYPE go_memstats_mspan_inuse_bytes gauge # HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. # TYPE go_memstats_mspan_sys_bytes gauge # HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. # TYPE go_memstats_next_gc_bytes gauge # HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. # TYPE go_memstats_other_sys_bytes gauge # HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator. # TYPE go_memstats_stack_inuse_bytes gauge # HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. # TYPE go_memstats_stack_sys_bytes gauge # HELP go_memstats_sys_bytes Number of bytes obtained from system. # TYPE go_memstats_sys_bytes gauge # HELP go_threads Number of OS threads created. # TYPE go_threads gauge # HELP node_arp_entries ARP entries by device # TYPE node_arp_entries gauge node_arp_entries{device="eth0"} 3 node_arp_entries{device="eth1"} 3 # HELP node_bcache_active_journal_entries Number of journal entries that are newer than the index. # TYPE node_bcache_active_journal_entries gauge node_bcache_active_journal_entries{uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 1 # HELP node_bcache_average_key_size_sectors Average data per key in the btree (sectors). # TYPE node_bcache_average_key_size_sectors gauge node_bcache_average_key_size_sectors{uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 # HELP node_bcache_btree_cache_size_bytes Amount of memory currently used by the btree cache. # TYPE node_bcache_btree_cache_size_bytes gauge node_bcache_btree_cache_size_bytes{uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 # HELP node_bcache_btree_nodes Total nodes in the btree. # TYPE node_bcache_btree_nodes gauge node_bcache_btree_nodes{uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 # HELP node_bcache_btree_read_average_duration_seconds Average btree read duration. # TYPE node_bcache_btree_read_average_duration_seconds gauge node_bcache_btree_read_average_duration_seconds{uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 1.305e-06 # HELP node_bcache_bypassed_bytes_total Amount of IO (both reads and writes) that has bypassed the cache. # TYPE node_bcache_bypassed_bytes_total counter node_bcache_bypassed_bytes_total{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 # HELP node_bcache_cache_available_percent Percentage of cache device without dirty data, usable for writeback (may contain clean cached data). # TYPE node_bcache_cache_available_percent gauge node_bcache_cache_available_percent{uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 100 # HELP node_bcache_cache_bypass_hits_total Hits for IO intended to skip the cache. # TYPE node_bcache_cache_bypass_hits_total counter node_bcache_cache_bypass_hits_total{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 # HELP node_bcache_cache_bypass_misses_total Misses for IO intended to skip the cache. # TYPE node_bcache_cache_bypass_misses_total counter node_bcache_cache_bypass_misses_total{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 # HELP node_bcache_cache_hits_total Hits counted per individual IO as bcache sees them. # TYPE node_bcache_cache_hits_total counter node_bcache_cache_hits_total{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 546 # HELP node_bcache_cache_miss_collisions_total Instances where data insertion from cache miss raced with write (data already present). # TYPE node_bcache_cache_miss_collisions_total counter node_bcache_cache_miss_collisions_total{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 # HELP node_bcache_cache_misses_total Misses counted per individual IO as bcache sees them. # TYPE node_bcache_cache_misses_total counter node_bcache_cache_misses_total{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 # HELP node_bcache_cache_read_races_total Counts instances where while data was being read from the cache, the bucket was reused and invalidated - i.e. where the pointer was stale after the read completed. # TYPE node_bcache_cache_read_races_total counter node_bcache_cache_read_races_total{uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 # HELP node_bcache_cache_readaheads_total Count of times readahead occurred. # TYPE node_bcache_cache_readaheads_total counter node_bcache_cache_readaheads_total{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 13 # HELP node_bcache_congested Congestion. # TYPE node_bcache_congested gauge node_bcache_congested{uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 # HELP node_bcache_dirty_data_bytes Amount of dirty data for this backing device in the cache. # TYPE node_bcache_dirty_data_bytes gauge node_bcache_dirty_data_bytes{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 # HELP node_bcache_dirty_target_bytes Current dirty data target threshold for this backing device in bytes. # TYPE node_bcache_dirty_target_bytes gauge node_bcache_dirty_target_bytes{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 2.189426688e+10 # HELP node_bcache_io_errors Number of errors that have occurred, decayed by io_error_halflife. # TYPE node_bcache_io_errors gauge node_bcache_io_errors{cache_device="cache0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 # HELP node_bcache_metadata_written_bytes_total Sum of all non data writes (btree writes and all other metadata). # TYPE node_bcache_metadata_written_bytes_total counter node_bcache_metadata_written_bytes_total{cache_device="cache0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 512 # HELP node_bcache_priority_stats_metadata_percent Bcache's metadata overhead. # TYPE node_bcache_priority_stats_metadata_percent gauge node_bcache_priority_stats_metadata_percent{cache_device="cache0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 # HELP node_bcache_priority_stats_unused_percent The percentage of the cache that doesn't contain any data. # TYPE node_bcache_priority_stats_unused_percent gauge node_bcache_priority_stats_unused_percent{cache_device="cache0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 99 # HELP node_bcache_root_usage_percent Percentage of the root btree node in use (tree depth increases if too high). # TYPE node_bcache_root_usage_percent gauge node_bcache_root_usage_percent{uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 # HELP node_bcache_tree_depth Depth of the btree. # TYPE node_bcache_tree_depth gauge node_bcache_tree_depth{uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 # HELP node_bcache_writeback_change Last writeback rate change step for this backing device. # TYPE node_bcache_writeback_change gauge node_bcache_writeback_change{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 329204 # HELP node_bcache_writeback_rate Current writeback rate for this backing device in bytes. # TYPE node_bcache_writeback_rate gauge node_bcache_writeback_rate{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 1.150976e+06 # HELP node_bcache_writeback_rate_integral_term Current result of integral controller, part of writeback rate # TYPE node_bcache_writeback_rate_integral_term gauge node_bcache_writeback_rate_integral_term{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 808960 # HELP node_bcache_writeback_rate_proportional_term Current result of proportional controller, part of writeback rate # TYPE node_bcache_writeback_rate_proportional_term gauge node_bcache_writeback_rate_proportional_term{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 437748 # HELP node_bcache_written_bytes_total Sum of all data that has been written to the cache. # TYPE node_bcache_written_bytes_total counter node_bcache_written_bytes_total{cache_device="cache0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 # HELP node_bonding_active Number of active slaves per bonding interface. # TYPE node_bonding_active gauge node_bonding_active{master="bond0"} 0 node_bonding_active{master="dmz"} 2 node_bonding_active{master="int"} 1 # HELP node_bonding_slaves Number of configured slaves per bonding interface. # TYPE node_bonding_slaves gauge node_bonding_slaves{master="bond0"} 0 node_bonding_slaves{master="dmz"} 2 node_bonding_slaves{master="int"} 2 # HELP node_boot_time_seconds Node boot time, in unixtime. # TYPE node_boot_time_seconds gauge node_boot_time_seconds 1.418183276e+09 # HELP node_btrfs_allocation_ratio Data allocation ratio for a layout/data type # TYPE node_btrfs_allocation_ratio gauge node_btrfs_allocation_ratio{block_group_type="data",mode="raid0",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 1 node_btrfs_allocation_ratio{block_group_type="data",mode="raid5",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 1.3333333333333333 node_btrfs_allocation_ratio{block_group_type="metadata",mode="raid1",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 2 node_btrfs_allocation_ratio{block_group_type="metadata",mode="raid6",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 2 node_btrfs_allocation_ratio{block_group_type="system",mode="raid1",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 2 node_btrfs_allocation_ratio{block_group_type="system",mode="raid6",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 2 # HELP node_btrfs_device_size_bytes Size of a device that is part of the filesystem. # TYPE node_btrfs_device_size_bytes gauge node_btrfs_device_size_bytes{device="loop22",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 1.073741824e+10 node_btrfs_device_size_bytes{device="loop23",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 1.073741824e+10 node_btrfs_device_size_bytes{device="loop24",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 1.073741824e+10 node_btrfs_device_size_bytes{device="loop25",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 1.073741824e+10 node_btrfs_device_size_bytes{device="loop25",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 1.073741824e+10 node_btrfs_device_size_bytes{device="loop26",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 1.073741824e+10 # HELP node_btrfs_global_rsv_size_bytes Size of global reserve. # TYPE node_btrfs_global_rsv_size_bytes gauge node_btrfs_global_rsv_size_bytes{uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 1.6777216e+07 node_btrfs_global_rsv_size_bytes{uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 1.6777216e+07 # HELP node_btrfs_info Filesystem information # TYPE node_btrfs_info gauge node_btrfs_info{label="",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 1 node_btrfs_info{label="fixture",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 1 # HELP node_btrfs_reserved_bytes Amount of space reserved for a data type # TYPE node_btrfs_reserved_bytes gauge node_btrfs_reserved_bytes{block_group_type="data",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 0 node_btrfs_reserved_bytes{block_group_type="data",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 0 node_btrfs_reserved_bytes{block_group_type="metadata",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 0 node_btrfs_reserved_bytes{block_group_type="metadata",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 0 node_btrfs_reserved_bytes{block_group_type="system",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 0 node_btrfs_reserved_bytes{block_group_type="system",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 0 # HELP node_btrfs_size_bytes Amount of space allocated for a layout/data type # TYPE node_btrfs_size_bytes gauge node_btrfs_size_bytes{block_group_type="data",mode="raid0",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 2.147483648e+09 node_btrfs_size_bytes{block_group_type="data",mode="raid5",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 6.44087808e+08 node_btrfs_size_bytes{block_group_type="metadata",mode="raid1",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 1.073741824e+09 node_btrfs_size_bytes{block_group_type="metadata",mode="raid6",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 4.29391872e+08 node_btrfs_size_bytes{block_group_type="system",mode="raid1",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 8.388608e+06 node_btrfs_size_bytes{block_group_type="system",mode="raid6",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 1.6777216e+07 # HELP node_btrfs_used_bytes Amount of used space by a layout/data type # TYPE node_btrfs_used_bytes gauge node_btrfs_used_bytes{block_group_type="data",mode="raid0",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 8.08189952e+08 node_btrfs_used_bytes{block_group_type="data",mode="raid5",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 0 node_btrfs_used_bytes{block_group_type="metadata",mode="raid1",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 933888 node_btrfs_used_bytes{block_group_type="metadata",mode="raid6",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 114688 node_btrfs_used_bytes{block_group_type="system",mode="raid1",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 16384 node_btrfs_used_bytes{block_group_type="system",mode="raid6",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 16384 # HELP node_buddyinfo_blocks Count of free blocks according to size. # TYPE node_buddyinfo_blocks gauge node_buddyinfo_blocks{node="0",size="0",zone="DMA"} 1 node_buddyinfo_blocks{node="0",size="0",zone="DMA32"} 759 node_buddyinfo_blocks{node="0",size="0",zone="Normal"} 4381 node_buddyinfo_blocks{node="0",size="1",zone="DMA"} 0 node_buddyinfo_blocks{node="0",size="1",zone="DMA32"} 572 node_buddyinfo_blocks{node="0",size="1",zone="Normal"} 1093 node_buddyinfo_blocks{node="0",size="10",zone="DMA"} 3 node_buddyinfo_blocks{node="0",size="10",zone="DMA32"} 0 node_buddyinfo_blocks{node="0",size="10",zone="Normal"} 0 node_buddyinfo_blocks{node="0",size="2",zone="DMA"} 1 node_buddyinfo_blocks{node="0",size="2",zone="DMA32"} 791 node_buddyinfo_blocks{node="0",size="2",zone="Normal"} 185 node_buddyinfo_blocks{node="0",size="3",zone="DMA"} 0 node_buddyinfo_blocks{node="0",size="3",zone="DMA32"} 475 node_buddyinfo_blocks{node="0",size="3",zone="Normal"} 1530 node_buddyinfo_blocks{node="0",size="4",zone="DMA"} 2 node_buddyinfo_blocks{node="0",size="4",zone="DMA32"} 194 node_buddyinfo_blocks{node="0",size="4",zone="Normal"} 567 node_buddyinfo_blocks{node="0",size="5",zone="DMA"} 1 node_buddyinfo_blocks{node="0",size="5",zone="DMA32"} 45 node_buddyinfo_blocks{node="0",size="5",zone="Normal"} 102 node_buddyinfo_blocks{node="0",size="6",zone="DMA"} 1 node_buddyinfo_blocks{node="0",size="6",zone="DMA32"} 12 node_buddyinfo_blocks{node="0",size="6",zone="Normal"} 4 node_buddyinfo_blocks{node="0",size="7",zone="DMA"} 0 node_buddyinfo_blocks{node="0",size="7",zone="DMA32"} 0 node_buddyinfo_blocks{node="0",size="7",zone="Normal"} 0 node_buddyinfo_blocks{node="0",size="8",zone="DMA"} 1 node_buddyinfo_blocks{node="0",size="8",zone="DMA32"} 0 node_buddyinfo_blocks{node="0",size="8",zone="Normal"} 0 node_buddyinfo_blocks{node="0",size="9",zone="DMA"} 1 node_buddyinfo_blocks{node="0",size="9",zone="DMA32"} 0 node_buddyinfo_blocks{node="0",size="9",zone="Normal"} 0 # HELP node_cgroups_cgroups Current cgroup number of the subsystem. # TYPE node_cgroups_cgroups gauge node_cgroups_cgroups{subsys_name="blkio"} 170 node_cgroups_cgroups{subsys_name="cpu"} 172 node_cgroups_cgroups{subsys_name="cpuacct"} 172 node_cgroups_cgroups{subsys_name="cpuset"} 47 node_cgroups_cgroups{subsys_name="devices"} 170 node_cgroups_cgroups{subsys_name="freezer"} 47 node_cgroups_cgroups{subsys_name="hugetlb"} 47 node_cgroups_cgroups{subsys_name="memory"} 234 node_cgroups_cgroups{subsys_name="net_cls"} 47 node_cgroups_cgroups{subsys_name="perf_event"} 47 node_cgroups_cgroups{subsys_name="pids"} 170 node_cgroups_cgroups{subsys_name="rdma"} 1 # HELP node_cgroups_enabled Current cgroup number of the subsystem. # TYPE node_cgroups_enabled gauge node_cgroups_enabled{subsys_name="blkio"} 1 node_cgroups_enabled{subsys_name="cpu"} 1 node_cgroups_enabled{subsys_name="cpuacct"} 1 node_cgroups_enabled{subsys_name="cpuset"} 1 node_cgroups_enabled{subsys_name="devices"} 1 node_cgroups_enabled{subsys_name="freezer"} 1 node_cgroups_enabled{subsys_name="hugetlb"} 1 node_cgroups_enabled{subsys_name="memory"} 1 node_cgroups_enabled{subsys_name="net_cls"} 1 node_cgroups_enabled{subsys_name="perf_event"} 1 node_cgroups_enabled{subsys_name="pids"} 1 node_cgroups_enabled{subsys_name="rdma"} 1 # HELP node_context_switches_total Total number of context switches. # TYPE node_context_switches_total counter node_context_switches_total 3.8014093e+07 # HELP node_cooling_device_cur_state Current throttle state of the cooling device # TYPE node_cooling_device_cur_state gauge node_cooling_device_cur_state{name="0",type="Processor"} 0 # HELP node_cooling_device_max_state Maximum throttle state of the cooling device # TYPE node_cooling_device_max_state gauge node_cooling_device_max_state{name="0",type="Processor"} 3 # HELP node_cpu_core_throttles_total Number of times this CPU core has been throttled. # TYPE node_cpu_core_throttles_total counter node_cpu_core_throttles_total{core="0",package="0"} 5 node_cpu_core_throttles_total{core="0",package="1"} 0 node_cpu_core_throttles_total{core="1",package="0"} 0 node_cpu_core_throttles_total{core="1",package="1"} 9 # HELP node_cpu_guest_seconds_total Seconds the CPUs spent in guests (VMs) for each mode. # TYPE node_cpu_guest_seconds_total counter node_cpu_guest_seconds_total{cpu="0",mode="nice"} 0.01 node_cpu_guest_seconds_total{cpu="0",mode="user"} 0.02 node_cpu_guest_seconds_total{cpu="1",mode="nice"} 0.02 node_cpu_guest_seconds_total{cpu="1",mode="user"} 0.03 node_cpu_guest_seconds_total{cpu="2",mode="nice"} 0.03 node_cpu_guest_seconds_total{cpu="2",mode="user"} 0.04 node_cpu_guest_seconds_total{cpu="3",mode="nice"} 0.04 node_cpu_guest_seconds_total{cpu="3",mode="user"} 0.05 node_cpu_guest_seconds_total{cpu="4",mode="nice"} 0.05 node_cpu_guest_seconds_total{cpu="4",mode="user"} 0.06 node_cpu_guest_seconds_total{cpu="5",mode="nice"} 0.06 node_cpu_guest_seconds_total{cpu="5",mode="user"} 0.07 node_cpu_guest_seconds_total{cpu="6",mode="nice"} 0.07 node_cpu_guest_seconds_total{cpu="6",mode="user"} 0.08 node_cpu_guest_seconds_total{cpu="7",mode="nice"} 0.08 node_cpu_guest_seconds_total{cpu="7",mode="user"} 0.09 # HELP node_cpu_isolated Whether each core is isolated, information from /sys/devices/system/cpu/isolated. # TYPE node_cpu_isolated gauge node_cpu_isolated{cpu="1"} 1 node_cpu_isolated{cpu="3"} 1 node_cpu_isolated{cpu="4"} 1 node_cpu_isolated{cpu="5"} 1 node_cpu_isolated{cpu="9"} 1 # HELP node_cpu_package_throttles_total Number of times this CPU package has been throttled. # TYPE node_cpu_package_throttles_total counter node_cpu_package_throttles_total{package="0"} 30 node_cpu_package_throttles_total{package="1"} 6 # HELP node_cpu_scaling_frequency_hertz Current scaled CPU thread frequency in hertz. # TYPE node_cpu_scaling_frequency_hertz gauge node_cpu_scaling_frequency_hertz{cpu="0"} 1.699981e+09 node_cpu_scaling_frequency_hertz{cpu="1"} 1.699981e+09 node_cpu_scaling_frequency_hertz{cpu="2"} 8e+06 node_cpu_scaling_frequency_hertz{cpu="3"} 8e+06 # HELP node_cpu_scaling_frequency_max_hertz Maximum scaled CPU thread frequency in hertz. # TYPE node_cpu_scaling_frequency_max_hertz gauge node_cpu_scaling_frequency_max_hertz{cpu="0"} 3.7e+09 node_cpu_scaling_frequency_max_hertz{cpu="1"} 3.7e+09 node_cpu_scaling_frequency_max_hertz{cpu="2"} 4.2e+09 node_cpu_scaling_frequency_max_hertz{cpu="3"} 4.2e+09 # HELP node_cpu_scaling_frequency_min_hertz Minimum scaled CPU thread frequency in hertz. # TYPE node_cpu_scaling_frequency_min_hertz gauge node_cpu_scaling_frequency_min_hertz{cpu="0"} 8e+08 node_cpu_scaling_frequency_min_hertz{cpu="1"} 8e+08 node_cpu_scaling_frequency_min_hertz{cpu="2"} 1e+06 node_cpu_scaling_frequency_min_hertz{cpu="3"} 1e+06 # HELP node_cpu_scaling_governor Current enabled CPU frequency governor. # TYPE node_cpu_scaling_governor gauge node_cpu_scaling_governor{cpu="0",governor="performance"} 0 node_cpu_scaling_governor{cpu="0",governor="powersave"} 1 node_cpu_scaling_governor{cpu="1",governor="performance"} 0 node_cpu_scaling_governor{cpu="1",governor="powersave"} 1 node_cpu_scaling_governor{cpu="2",governor="performance"} 0 node_cpu_scaling_governor{cpu="2",governor="powersave"} 1 node_cpu_scaling_governor{cpu="3",governor="performance"} 0 node_cpu_scaling_governor{cpu="3",governor="powersave"} 1 # HELP node_cpu_seconds_total Seconds the CPUs spent in each mode. # TYPE node_cpu_seconds_total counter node_cpu_seconds_total{cpu="0",mode="idle"} 10870.69 node_cpu_seconds_total{cpu="0",mode="iowait"} 2.2 node_cpu_seconds_total{cpu="0",mode="irq"} 0.01 node_cpu_seconds_total{cpu="0",mode="nice"} 0.19 node_cpu_seconds_total{cpu="0",mode="softirq"} 34.1 node_cpu_seconds_total{cpu="0",mode="steal"} 0 node_cpu_seconds_total{cpu="0",mode="system"} 210.45 node_cpu_seconds_total{cpu="0",mode="user"} 444.9 node_cpu_seconds_total{cpu="1",mode="idle"} 11107.87 node_cpu_seconds_total{cpu="1",mode="iowait"} 5.91 node_cpu_seconds_total{cpu="1",mode="irq"} 0 node_cpu_seconds_total{cpu="1",mode="nice"} 0.23 node_cpu_seconds_total{cpu="1",mode="softirq"} 0.46 node_cpu_seconds_total{cpu="1",mode="steal"} 0 node_cpu_seconds_total{cpu="1",mode="system"} 164.74 node_cpu_seconds_total{cpu="1",mode="user"} 478.69 node_cpu_seconds_total{cpu="2",mode="idle"} 11123.21 node_cpu_seconds_total{cpu="2",mode="iowait"} 4.41 node_cpu_seconds_total{cpu="2",mode="irq"} 0 node_cpu_seconds_total{cpu="2",mode="nice"} 0.36 node_cpu_seconds_total{cpu="2",mode="softirq"} 3.26 node_cpu_seconds_total{cpu="2",mode="steal"} 0 node_cpu_seconds_total{cpu="2",mode="system"} 159.16 node_cpu_seconds_total{cpu="2",mode="user"} 465.04 node_cpu_seconds_total{cpu="3",mode="idle"} 11132.3 node_cpu_seconds_total{cpu="3",mode="iowait"} 5.33 node_cpu_seconds_total{cpu="3",mode="irq"} 0 node_cpu_seconds_total{cpu="3",mode="nice"} 1.02 node_cpu_seconds_total{cpu="3",mode="softirq"} 0.6 node_cpu_seconds_total{cpu="3",mode="steal"} 0 node_cpu_seconds_total{cpu="3",mode="system"} 156.83 node_cpu_seconds_total{cpu="3",mode="user"} 470.54 node_cpu_seconds_total{cpu="4",mode="idle"} 11403.21 node_cpu_seconds_total{cpu="4",mode="iowait"} 2.17 node_cpu_seconds_total{cpu="4",mode="irq"} 0 node_cpu_seconds_total{cpu="4",mode="nice"} 0.25 node_cpu_seconds_total{cpu="4",mode="softirq"} 0.08 node_cpu_seconds_total{cpu="4",mode="steal"} 0 node_cpu_seconds_total{cpu="4",mode="system"} 107.76 node_cpu_seconds_total{cpu="4",mode="user"} 284.13 node_cpu_seconds_total{cpu="5",mode="idle"} 11362.7 node_cpu_seconds_total{cpu="5",mode="iowait"} 6.72 node_cpu_seconds_total{cpu="5",mode="irq"} 0 node_cpu_seconds_total{cpu="5",mode="nice"} 1.01 node_cpu_seconds_total{cpu="5",mode="softirq"} 0.3 node_cpu_seconds_total{cpu="5",mode="steal"} 0 node_cpu_seconds_total{cpu="5",mode="system"} 115.86 node_cpu_seconds_total{cpu="5",mode="user"} 292.71 node_cpu_seconds_total{cpu="6",mode="idle"} 11397.21 node_cpu_seconds_total{cpu="6",mode="iowait"} 3.19 node_cpu_seconds_total{cpu="6",mode="irq"} 0 node_cpu_seconds_total{cpu="6",mode="nice"} 0.36 node_cpu_seconds_total{cpu="6",mode="softirq"} 0.29 node_cpu_seconds_total{cpu="6",mode="steal"} 0 node_cpu_seconds_total{cpu="6",mode="system"} 102.76 node_cpu_seconds_total{cpu="6",mode="user"} 291.52 node_cpu_seconds_total{cpu="7",mode="idle"} 11392.82 node_cpu_seconds_total{cpu="7",mode="iowait"} 5.55 node_cpu_seconds_total{cpu="7",mode="irq"} 0 node_cpu_seconds_total{cpu="7",mode="nice"} 2.68 node_cpu_seconds_total{cpu="7",mode="softirq"} 0.31 node_cpu_seconds_total{cpu="7",mode="steal"} 0 node_cpu_seconds_total{cpu="7",mode="system"} 101.64 node_cpu_seconds_total{cpu="7",mode="user"} 290.98 # HELP node_cpu_vulnerabilities_info Details of each CPU vulnerability reported by sysfs. The value of the series is an int encoded state of the vulnerability. The same state is stored as a string in the label # TYPE node_cpu_vulnerabilities_info gauge node_cpu_vulnerabilities_info{codename="itlb_multihit",state="not affected"} 1 node_cpu_vulnerabilities_info{codename="mds",state="vulnerable"} 1 node_cpu_vulnerabilities_info{codename="retbleed",state="mitigation"} 1 node_cpu_vulnerabilities_info{codename="spectre_v1",state="mitigation"} 1 node_cpu_vulnerabilities_info{codename="spectre_v2",state="mitigation"} 1 # HELP node_disk_ata_rotation_rate_rpm ATA disk rotation rate in RPMs (0 for SSDs). # TYPE node_disk_ata_rotation_rate_rpm gauge node_disk_ata_rotation_rate_rpm{device="sda"} 7200 node_disk_ata_rotation_rate_rpm{device="sdb"} 0 node_disk_ata_rotation_rate_rpm{device="sdc"} 0 # HELP node_disk_ata_write_cache ATA disk has a write cache. # TYPE node_disk_ata_write_cache gauge node_disk_ata_write_cache{device="sda"} 1 node_disk_ata_write_cache{device="sdb"} 1 node_disk_ata_write_cache{device="sdc"} 1 # HELP node_disk_ata_write_cache_enabled ATA disk has its write cache enabled. # TYPE node_disk_ata_write_cache_enabled gauge node_disk_ata_write_cache_enabled{device="sda"} 0 node_disk_ata_write_cache_enabled{device="sdb"} 1 node_disk_ata_write_cache_enabled{device="sdc"} 0 # HELP node_disk_device_mapper_info Info about disk device mapper. # TYPE node_disk_device_mapper_info gauge node_disk_device_mapper_info{device="dm-0",lv_layer="",lv_name="",name="nvme0n1_crypt",uuid="CRYPT-LUKS2-jolaulot80fy9zsiobkxyxo7y2dqeho2-nvme0n1_crypt",vg_name=""} 1 node_disk_device_mapper_info{device="dm-1",lv_layer="",lv_name="swap_1",name="system-swap_1",uuid="LVM-wbGqQEBL9SxrW2DLntJwgg8fAv946hw3Tvjqh0v31fWgxEtD4BoHO0lROWFUY65T",vg_name="system"} 1 node_disk_device_mapper_info{device="dm-2",lv_layer="",lv_name="root",name="system-root",uuid="LVM-NWEDo8q5ABDyJuC3F8veKNyWfYmeIBfFMS4MF3HakzUhkk7ekDm6fJTHkl2fYHe7",vg_name="system"} 1 node_disk_device_mapper_info{device="dm-3",lv_layer="",lv_name="var",name="system-var",uuid="LVM-hrxHo0rlZ6U95ku5841Lpd17bS1Z7V7lrtEE60DVgE6YEOCdS9gcDGyonWim4hGP",vg_name="system"} 1 node_disk_device_mapper_info{device="dm-4",lv_layer="",lv_name="tmp",name="system-tmp",uuid="LVM-XTNGOHjPWLHcxmJmVu5cWTXEtuzqDeBkdEHAZW5q9LxWQ2d4mb5CchUQzUPJpl8H",vg_name="system"} 1 node_disk_device_mapper_info{device="dm-5",lv_layer="",lv_name="home",name="system-home",uuid="LVM-MtoJaWTpjWRXlUnNFlpxZauTEuYlMvGFutigEzCCrfj8CNh6jCRi5LQJXZCpLjPf",vg_name="system"} 1 # HELP node_disk_discard_time_seconds_total This is the total number of seconds spent by all discards. # TYPE node_disk_discard_time_seconds_total counter node_disk_discard_time_seconds_total{device="sdb"} 11.13 node_disk_discard_time_seconds_total{device="sdc"} 11.13 # HELP node_disk_discarded_sectors_total The total number of sectors discarded successfully. # TYPE node_disk_discarded_sectors_total counter node_disk_discarded_sectors_total{device="sdb"} 1.925173784e+09 node_disk_discarded_sectors_total{device="sdc"} 1.25173784e+08 # HELP node_disk_discards_completed_total The total number of discards completed successfully. # TYPE node_disk_discards_completed_total counter node_disk_discards_completed_total{device="sdb"} 68851 node_disk_discards_completed_total{device="sdc"} 18851 # HELP node_disk_discards_merged_total The total number of discards merged. # TYPE node_disk_discards_merged_total counter node_disk_discards_merged_total{device="sdb"} 0 node_disk_discards_merged_total{device="sdc"} 0 # HELP node_disk_filesystem_info Info about disk filesystem. # TYPE node_disk_filesystem_info gauge node_disk_filesystem_info{device="dm-0",type="LVM2_member",usage="raid",uuid="c3C3uW-gD96-Yw69-c1CJ-5MwT-6ysM-mST0vB",version="LVM2 001"} 1 node_disk_filesystem_info{device="dm-1",type="swap",usage="other",uuid="5272bb60-04b5-49cd-b730-be57c7604450",version="1"} 1 node_disk_filesystem_info{device="dm-2",type="ext4",usage="filesystem",uuid="3deafd0d-faff-4695-8d15-51061ae1f51b",version="1.0"} 1 node_disk_filesystem_info{device="dm-3",type="ext4",usage="filesystem",uuid="5c772222-f7d4-4c8e-87e8-e97df6b7a45e",version="1.0"} 1 node_disk_filesystem_info{device="dm-4",type="ext4",usage="filesystem",uuid="a9479d44-60e1-4015-a1e5-bb065e6dd11b",version="1.0"} 1 node_disk_filesystem_info{device="dm-5",type="ext4",usage="filesystem",uuid="b05b726a-c718-4c4d-8641-7c73a7696d83",version="1.0"} 1 node_disk_filesystem_info{device="mmcblk0p1",type="vfat",usage="filesystem",uuid="6284-658D",version="FAT32"} 1 node_disk_filesystem_info{device="mmcblk0p2",type="ext4",usage="filesystem",uuid="83324ce8-a6f3-4e35-ad64-dbb3d6b87a32",version="1.0"} 1 node_disk_filesystem_info{device="sda",type="LVM2_member",usage="raid",uuid="cVVv6j-HSA2-IY33-1Jmj-dO2H-YL7w-b4Oxqw",version="LVM2 001"} 1 node_disk_filesystem_info{device="sdc",type="LVM2_member",usage="raid",uuid="QFy9W7-Brj3-hQ6v-AF8i-3Zqg-n3Vs-kGY4vb",version="LVM2 001"} 1 # HELP node_disk_flush_requests_time_seconds_total This is the total number of seconds spent by all flush requests. # TYPE node_disk_flush_requests_time_seconds_total counter node_disk_flush_requests_time_seconds_total{device="sdc"} 1.944 # HELP node_disk_flush_requests_total The total number of flush requests completed successfully # TYPE node_disk_flush_requests_total counter node_disk_flush_requests_total{device="sdc"} 1555 # HELP node_disk_info Info of /sys/block/. # TYPE node_disk_info gauge node_disk_info{device="dm-0",major="252",minor="0",model="",path="",revision="",serial="",wwn=""} 1 node_disk_info{device="dm-1",major="252",minor="1",model="",path="",revision="",serial="",wwn=""} 1 node_disk_info{device="dm-2",major="252",minor="2",model="",path="",revision="",serial="",wwn=""} 1 node_disk_info{device="dm-3",major="252",minor="3",model="",path="",revision="",serial="",wwn=""} 1 node_disk_info{device="dm-4",major="252",minor="4",model="",path="",revision="",serial="",wwn=""} 1 node_disk_info{device="dm-5",major="252",minor="5",model="",path="",revision="",serial="",wwn=""} 1 node_disk_info{device="mmcblk0",major="179",minor="0",model="",path="platform-df2969f3.mmc",revision="",serial="",wwn=""} 1 node_disk_info{device="mmcblk0p1",major="179",minor="1",model="",path="platform-df2969f3.mmc",revision="",serial="",wwn=""} 1 node_disk_info{device="mmcblk0p2",major="179",minor="2",model="",path="platform-df2969f3.mmc",revision="",serial="",wwn=""} 1 node_disk_info{device="nvme0n1",major="259",minor="0",model="SAMSUNG EHFTF55LURSY-000Y9",path="pci-0000:02:00.0-nvme-1",revision="4NBTUY95",serial="S252B6CU1HG3M1",wwn="eui.p3vbbiejx5aae2r3"} 1 node_disk_info{device="sda",major="8",minor="0",model="TOSHIBA_KSDB4U86",path="pci-0000:3b:00.0-sas-phy7-lun-0",revision="0102",serial="2160A0D5FVGG",wwn="0x7c72382b8de36a64"} 1 node_disk_info{device="sdb",major="8",minor="16",model="SuperMicro_SSD",path="pci-0000:00:1f.2-ata-1",revision="0R",serial="SMC0E1B87ABBB16BD84E",wwn="0xe1b87abbb16bd84e"} 1 node_disk_info{device="sdc",major="8",minor="32",model="INTEL_SSDS9X9SI0",path="pci-0000:00:1f.2-ata-4",revision="0100",serial="3EWB5Y25CWQWA7EH1U",wwn="0x58907ddc573a5de"} 1 node_disk_info{device="sr0",major="11",minor="0",model="Virtual_CDROM0",path="pci-0000:00:14.0-usb-0:1.1:1.0-scsi-0:0:0:0",revision="1.00",serial="AAAABBBBCCCC1",wwn=""} 1 node_disk_info{device="vda",major="254",minor="0",model="",path="pci-0000:00:06.0",revision="",serial="",wwn=""} 1 # HELP node_disk_io_now The number of I/Os currently in progress. # TYPE node_disk_io_now gauge node_disk_io_now{device="dm-0"} 0 node_disk_io_now{device="dm-1"} 0 node_disk_io_now{device="dm-2"} 0 node_disk_io_now{device="dm-3"} 0 node_disk_io_now{device="dm-4"} 0 node_disk_io_now{device="dm-5"} 0 node_disk_io_now{device="mmcblk0"} 0 node_disk_io_now{device="mmcblk0p1"} 0 node_disk_io_now{device="mmcblk0p2"} 0 node_disk_io_now{device="nvme0n1"} 0 node_disk_io_now{device="sda"} 0 node_disk_io_now{device="sdb"} 0 node_disk_io_now{device="sdc"} 0 node_disk_io_now{device="sr0"} 0 node_disk_io_now{device="vda"} 0 # HELP node_disk_io_time_seconds_total Total seconds spent doing I/Os. # TYPE node_disk_io_time_seconds_total counter node_disk_io_time_seconds_total{device="dm-0"} 11325.968 node_disk_io_time_seconds_total{device="dm-1"} 0.076 node_disk_io_time_seconds_total{device="dm-2"} 65.4 node_disk_io_time_seconds_total{device="dm-3"} 0.016 node_disk_io_time_seconds_total{device="dm-4"} 0.024 node_disk_io_time_seconds_total{device="dm-5"} 58.848 node_disk_io_time_seconds_total{device="mmcblk0"} 0.136 node_disk_io_time_seconds_total{device="mmcblk0p1"} 0.024 node_disk_io_time_seconds_total{device="mmcblk0p2"} 0.068 node_disk_io_time_seconds_total{device="nvme0n1"} 222.766 node_disk_io_time_seconds_total{device="sda"} 9653.880000000001 node_disk_io_time_seconds_total{device="sdb"} 60.730000000000004 node_disk_io_time_seconds_total{device="sdc"} 10.73 node_disk_io_time_seconds_total{device="sr0"} 0 node_disk_io_time_seconds_total{device="vda"} 41614.592000000004 # HELP node_disk_io_time_weighted_seconds_total The weighted # of seconds spent doing I/Os. # TYPE node_disk_io_time_weighted_seconds_total counter node_disk_io_time_weighted_seconds_total{device="dm-0"} 1.206301256e+06 node_disk_io_time_weighted_seconds_total{device="dm-1"} 0.084 node_disk_io_time_weighted_seconds_total{device="dm-2"} 129.416 node_disk_io_time_weighted_seconds_total{device="dm-3"} 0.10400000000000001 node_disk_io_time_weighted_seconds_total{device="dm-4"} 0.044 node_disk_io_time_weighted_seconds_total{device="dm-5"} 105.632 node_disk_io_time_weighted_seconds_total{device="mmcblk0"} 0.156 node_disk_io_time_weighted_seconds_total{device="mmcblk0p1"} 0.024 node_disk_io_time_weighted_seconds_total{device="mmcblk0p2"} 0.068 node_disk_io_time_weighted_seconds_total{device="nvme0n1"} 1032.546 node_disk_io_time_weighted_seconds_total{device="sda"} 82621.804 node_disk_io_time_weighted_seconds_total{device="sdb"} 67.07000000000001 node_disk_io_time_weighted_seconds_total{device="sdc"} 17.07 node_disk_io_time_weighted_seconds_total{device="sr0"} 0 node_disk_io_time_weighted_seconds_total{device="vda"} 2.0778722280000001e+06 # HELP node_disk_read_bytes_total The total number of bytes read successfully. # TYPE node_disk_read_bytes_total counter node_disk_read_bytes_total{device="dm-0"} 5.13708655616e+11 node_disk_read_bytes_total{device="dm-1"} 1.589248e+06 node_disk_read_bytes_total{device="dm-2"} 1.578752e+08 node_disk_read_bytes_total{device="dm-3"} 1.98144e+06 node_disk_read_bytes_total{device="dm-4"} 529408 node_disk_read_bytes_total{device="dm-5"} 4.3150848e+07 node_disk_read_bytes_total{device="mmcblk0"} 798720 node_disk_read_bytes_total{device="mmcblk0p1"} 81920 node_disk_read_bytes_total{device="mmcblk0p2"} 389120 node_disk_read_bytes_total{device="nvme0n1"} 2.377714176e+09 node_disk_read_bytes_total{device="sda"} 5.13713216512e+11 node_disk_read_bytes_total{device="sdb"} 4.944782848e+09 node_disk_read_bytes_total{device="sdc"} 8.48782848e+08 node_disk_read_bytes_total{device="sr0"} 0 node_disk_read_bytes_total{device="vda"} 1.6727491584e+10 # HELP node_disk_read_time_seconds_total The total number of seconds spent by all reads. # TYPE node_disk_read_time_seconds_total counter node_disk_read_time_seconds_total{device="dm-0"} 46229.572 node_disk_read_time_seconds_total{device="dm-1"} 0.084 node_disk_read_time_seconds_total{device="dm-2"} 6.5360000000000005 node_disk_read_time_seconds_total{device="dm-3"} 0.10400000000000001 node_disk_read_time_seconds_total{device="dm-4"} 0.028 node_disk_read_time_seconds_total{device="dm-5"} 0.924 node_disk_read_time_seconds_total{device="mmcblk0"} 0.156 node_disk_read_time_seconds_total{device="mmcblk0p1"} 0.024 node_disk_read_time_seconds_total{device="mmcblk0p2"} 0.068 node_disk_read_time_seconds_total{device="nvme0n1"} 21.650000000000002 node_disk_read_time_seconds_total{device="sda"} 18492.372 node_disk_read_time_seconds_total{device="sdb"} 0.084 node_disk_read_time_seconds_total{device="sdc"} 0.014 node_disk_read_time_seconds_total{device="sr0"} 0 node_disk_read_time_seconds_total{device="vda"} 8655.768 # HELP node_disk_reads_completed_total The total number of reads completed successfully. # TYPE node_disk_reads_completed_total counter node_disk_reads_completed_total{device="dm-0"} 5.9910002e+07 node_disk_reads_completed_total{device="dm-1"} 388 node_disk_reads_completed_total{device="dm-2"} 11571 node_disk_reads_completed_total{device="dm-3"} 3870 node_disk_reads_completed_total{device="dm-4"} 392 node_disk_reads_completed_total{device="dm-5"} 3729 node_disk_reads_completed_total{device="mmcblk0"} 192 node_disk_reads_completed_total{device="mmcblk0p1"} 17 node_disk_reads_completed_total{device="mmcblk0p2"} 95 node_disk_reads_completed_total{device="nvme0n1"} 47114 node_disk_reads_completed_total{device="sda"} 2.5354637e+07 node_disk_reads_completed_total{device="sdb"} 326552 node_disk_reads_completed_total{device="sdc"} 126552 node_disk_reads_completed_total{device="sr0"} 0 node_disk_reads_completed_total{device="vda"} 1.775784e+06 # HELP node_disk_reads_merged_total The total number of reads merged. # TYPE node_disk_reads_merged_total counter node_disk_reads_merged_total{device="dm-0"} 0 node_disk_reads_merged_total{device="dm-1"} 0 node_disk_reads_merged_total{device="dm-2"} 0 node_disk_reads_merged_total{device="dm-3"} 0 node_disk_reads_merged_total{device="dm-4"} 0 node_disk_reads_merged_total{device="dm-5"} 0 node_disk_reads_merged_total{device="mmcblk0"} 3 node_disk_reads_merged_total{device="mmcblk0p1"} 3 node_disk_reads_merged_total{device="mmcblk0p2"} 0 node_disk_reads_merged_total{device="nvme0n1"} 4 node_disk_reads_merged_total{device="sda"} 3.4367663e+07 node_disk_reads_merged_total{device="sdb"} 841 node_disk_reads_merged_total{device="sdc"} 141 node_disk_reads_merged_total{device="sr0"} 0 node_disk_reads_merged_total{device="vda"} 15386 # HELP node_disk_write_time_seconds_total This is the total number of seconds spent by all writes. # TYPE node_disk_write_time_seconds_total counter node_disk_write_time_seconds_total{device="dm-0"} 1.1585578e+06 node_disk_write_time_seconds_total{device="dm-1"} 0 node_disk_write_time_seconds_total{device="dm-2"} 122.884 node_disk_write_time_seconds_total{device="dm-3"} 0 node_disk_write_time_seconds_total{device="dm-4"} 0.016 node_disk_write_time_seconds_total{device="dm-5"} 104.684 node_disk_write_time_seconds_total{device="mmcblk0"} 0 node_disk_write_time_seconds_total{device="mmcblk0p1"} 0 node_disk_write_time_seconds_total{device="mmcblk0p2"} 0 node_disk_write_time_seconds_total{device="nvme0n1"} 1011.053 node_disk_write_time_seconds_total{device="sda"} 63877.96 node_disk_write_time_seconds_total{device="sdb"} 5.007 node_disk_write_time_seconds_total{device="sdc"} 1.0070000000000001 node_disk_write_time_seconds_total{device="sr0"} 0 node_disk_write_time_seconds_total{device="vda"} 2.069221364e+06 # HELP node_disk_writes_completed_total The total number of writes completed successfully. # TYPE node_disk_writes_completed_total counter node_disk_writes_completed_total{device="dm-0"} 3.9231014e+07 node_disk_writes_completed_total{device="dm-1"} 74 node_disk_writes_completed_total{device="dm-2"} 153522 node_disk_writes_completed_total{device="dm-3"} 0 node_disk_writes_completed_total{device="dm-4"} 38 node_disk_writes_completed_total{device="dm-5"} 98918 node_disk_writes_completed_total{device="mmcblk0"} 0 node_disk_writes_completed_total{device="mmcblk0p1"} 0 node_disk_writes_completed_total{device="mmcblk0p2"} 0 node_disk_writes_completed_total{device="nvme0n1"} 1.07832e+06 node_disk_writes_completed_total{device="sda"} 2.8444756e+07 node_disk_writes_completed_total{device="sdb"} 41822 node_disk_writes_completed_total{device="sdc"} 11822 node_disk_writes_completed_total{device="sr0"} 0 node_disk_writes_completed_total{device="vda"} 6.038856e+06 # HELP node_disk_writes_merged_total The number of writes merged. # TYPE node_disk_writes_merged_total counter node_disk_writes_merged_total{device="dm-0"} 0 node_disk_writes_merged_total{device="dm-1"} 0 node_disk_writes_merged_total{device="dm-2"} 0 node_disk_writes_merged_total{device="dm-3"} 0 node_disk_writes_merged_total{device="dm-4"} 0 node_disk_writes_merged_total{device="dm-5"} 0 node_disk_writes_merged_total{device="mmcblk0"} 0 node_disk_writes_merged_total{device="mmcblk0p1"} 0 node_disk_writes_merged_total{device="mmcblk0p2"} 0 node_disk_writes_merged_total{device="nvme0n1"} 43950 node_disk_writes_merged_total{device="sda"} 1.1134226e+07 node_disk_writes_merged_total{device="sdb"} 2895 node_disk_writes_merged_total{device="sdc"} 1895 node_disk_writes_merged_total{device="sr0"} 0 node_disk_writes_merged_total{device="vda"} 2.0711856e+07 # HELP node_disk_written_bytes_total The total number of bytes written successfully. # TYPE node_disk_written_bytes_total counter node_disk_written_bytes_total{device="dm-0"} 2.5891680256e+11 node_disk_written_bytes_total{device="dm-1"} 303104 node_disk_written_bytes_total{device="dm-2"} 2.607828992e+09 node_disk_written_bytes_total{device="dm-3"} 0 node_disk_written_bytes_total{device="dm-4"} 70144 node_disk_written_bytes_total{device="dm-5"} 5.89664256e+08 node_disk_written_bytes_total{device="mmcblk0"} 0 node_disk_written_bytes_total{device="mmcblk0p1"} 0 node_disk_written_bytes_total{device="mmcblk0p2"} 0 node_disk_written_bytes_total{device="nvme0n1"} 2.0199236096e+10 node_disk_written_bytes_total{device="sda"} 2.58916880384e+11 node_disk_written_bytes_total{device="sdb"} 1.01012736e+09 node_disk_written_bytes_total{device="sdc"} 8.852736e+07 node_disk_written_bytes_total{device="sr0"} 0 node_disk_written_bytes_total{device="vda"} 1.0938236928e+11 # HELP node_dmi_info A metric with a constant '1' value labeled by bios_date, bios_release, bios_vendor, bios_version, board_asset_tag, board_name, board_serial, board_vendor, board_version, chassis_asset_tag, chassis_serial, chassis_vendor, chassis_version, product_family, product_name, product_serial, product_sku, product_uuid, product_version, system_vendor if provided by DMI. # TYPE node_dmi_info gauge node_dmi_info{bios_date="04/12/2021",bios_release="2.2",bios_vendor="Dell Inc.",bios_version="2.2.4",board_name="07PXPY",board_serial=".7N62AI2.GRTCL6944100GP.",board_vendor="Dell Inc.",board_version="A01",chassis_asset_tag="",chassis_serial="7N62AI2",chassis_vendor="Dell Inc.",chassis_version="",product_family="PowerEdge",product_name="PowerEdge R6515",product_serial="7N62AI2",product_sku="SKU=NotProvided;ModelName=PowerEdge R6515",product_uuid="83340ca8-cb49-4474-8c29-d2088ca84dd9",product_version="�[�",system_vendor="Dell Inc."} 1 # HELP node_drbd_activitylog_writes_total Number of updates of the activity log area of the meta data. # TYPE node_drbd_activitylog_writes_total counter node_drbd_activitylog_writes_total{device="drbd1"} 1100 # HELP node_drbd_application_pending Number of block I/O requests forwarded to DRBD, but not yet answered by DRBD. # TYPE node_drbd_application_pending gauge node_drbd_application_pending{device="drbd1"} 12348 # HELP node_drbd_bitmap_writes_total Number of updates of the bitmap area of the meta data. # TYPE node_drbd_bitmap_writes_total counter node_drbd_bitmap_writes_total{device="drbd1"} 221 # HELP node_drbd_connected Whether DRBD is connected to the peer. # TYPE node_drbd_connected gauge node_drbd_connected{device="drbd1"} 1 # HELP node_drbd_disk_read_bytes_total Net data read from local hard disk; in bytes. # TYPE node_drbd_disk_read_bytes_total counter node_drbd_disk_read_bytes_total{device="drbd1"} 1.2154539008e+11 # HELP node_drbd_disk_state_is_up_to_date Whether the disk of the node is up to date. # TYPE node_drbd_disk_state_is_up_to_date gauge node_drbd_disk_state_is_up_to_date{device="drbd1",node="local"} 1 node_drbd_disk_state_is_up_to_date{device="drbd1",node="remote"} 1 # HELP node_drbd_disk_written_bytes_total Net data written on local hard disk; in bytes. # TYPE node_drbd_disk_written_bytes_total counter node_drbd_disk_written_bytes_total{device="drbd1"} 2.8941845504e+10 # HELP node_drbd_epochs Number of Epochs currently on the fly. # TYPE node_drbd_epochs gauge node_drbd_epochs{device="drbd1"} 1 # HELP node_drbd_local_pending Number of open requests to the local I/O sub-system. # TYPE node_drbd_local_pending gauge node_drbd_local_pending{device="drbd1"} 12345 # HELP node_drbd_network_received_bytes_total Total number of bytes received via the network. # TYPE node_drbd_network_received_bytes_total counter node_drbd_network_received_bytes_total{device="drbd1"} 1.0961011e+07 # HELP node_drbd_network_sent_bytes_total Total number of bytes sent via the network. # TYPE node_drbd_network_sent_bytes_total counter node_drbd_network_sent_bytes_total{device="drbd1"} 1.7740228608e+10 # HELP node_drbd_node_role_is_primary Whether the role of the node is in the primary state. # TYPE node_drbd_node_role_is_primary gauge node_drbd_node_role_is_primary{device="drbd1",node="local"} 1 node_drbd_node_role_is_primary{device="drbd1",node="remote"} 1 # HELP node_drbd_out_of_sync_bytes Amount of data known to be out of sync; in bytes. # TYPE node_drbd_out_of_sync_bytes gauge node_drbd_out_of_sync_bytes{device="drbd1"} 1.2645376e+07 # HELP node_drbd_remote_pending Number of requests sent to the peer, but that have not yet been answered by the latter. # TYPE node_drbd_remote_pending gauge node_drbd_remote_pending{device="drbd1"} 12346 # HELP node_drbd_remote_unacknowledged Number of requests received by the peer via the network connection, but that have not yet been answered. # TYPE node_drbd_remote_unacknowledged gauge node_drbd_remote_unacknowledged{device="drbd1"} 12347 # HELP node_edac_correctable_errors_total Total correctable memory errors. # TYPE node_edac_correctable_errors_total counter node_edac_correctable_errors_total{controller="0"} 1 # HELP node_edac_csrow_correctable_errors_total Total correctable memory errors for this csrow. # TYPE node_edac_csrow_correctable_errors_total counter node_edac_csrow_correctable_errors_total{controller="0",csrow="0"} 3 node_edac_csrow_correctable_errors_total{controller="0",csrow="unknown"} 2 # HELP node_edac_csrow_uncorrectable_errors_total Total uncorrectable memory errors for this csrow. # TYPE node_edac_csrow_uncorrectable_errors_total counter node_edac_csrow_uncorrectable_errors_total{controller="0",csrow="0"} 4 node_edac_csrow_uncorrectable_errors_total{controller="0",csrow="unknown"} 6 # HELP node_edac_uncorrectable_errors_total Total uncorrectable memory errors. # TYPE node_edac_uncorrectable_errors_total counter node_edac_uncorrectable_errors_total{controller="0"} 5 # HELP node_entropy_available_bits Bits of available entropy. # TYPE node_entropy_available_bits gauge node_entropy_available_bits 1337 # HELP node_entropy_pool_size_bits Bits of entropy pool. # TYPE node_entropy_pool_size_bits gauge node_entropy_pool_size_bits 4096 # HELP node_exporter_build_info A metric with a constant '1' value labeled by version, revision, branch, goversion from which node_exporter was built, and the goos and goarch for the build. # TYPE node_exporter_build_info gauge # HELP node_fibrechannel_error_frames_total Number of errors in frames # TYPE node_fibrechannel_error_frames_total counter node_fibrechannel_error_frames_total{fc_host="host0"} 0 # HELP node_fibrechannel_fcp_packet_aborts_total Number of aborted packets # TYPE node_fibrechannel_fcp_packet_aborts_total counter node_fibrechannel_fcp_packet_aborts_total{fc_host="host0"} 19 # HELP node_fibrechannel_info Non-numeric data from /sys/class/fc_host/, value is always 1. # TYPE node_fibrechannel_info gauge node_fibrechannel_info{dev_loss_tmo="30",fabric_name="0",fc_host="host0",port_id="000002",port_name="1000e0071bce95f2",port_state="Online",port_type="Point-To-Point (direct nport connection)",speed="16 Gbit",supported_classes="Class 3",supported_speeds="4 Gbit, 8 Gbit, 16 Gbit",symbolic_name="Emulex SN1100E2P FV12.4.270.3 DV12.4.0.0. HN:gotest. OS:Linux"} 1 # HELP node_fibrechannel_invalid_crc_total Invalid Cyclic Redundancy Check count # TYPE node_fibrechannel_invalid_crc_total counter node_fibrechannel_invalid_crc_total{fc_host="host0"} 2 # HELP node_fibrechannel_invalid_tx_words_total Number of invalid words transmitted by host port # TYPE node_fibrechannel_invalid_tx_words_total counter node_fibrechannel_invalid_tx_words_total{fc_host="host0"} 8 # HELP node_fibrechannel_link_failure_total Number of times the host port link has failed # TYPE node_fibrechannel_link_failure_total counter node_fibrechannel_link_failure_total{fc_host="host0"} 9 # HELP node_fibrechannel_loss_of_signal_total Number of times signal has been lost # TYPE node_fibrechannel_loss_of_signal_total counter node_fibrechannel_loss_of_signal_total{fc_host="host0"} 17 # HELP node_fibrechannel_loss_of_sync_total Number of failures on either bit or transmission word boundaries # TYPE node_fibrechannel_loss_of_sync_total counter node_fibrechannel_loss_of_sync_total{fc_host="host0"} 16 # HELP node_fibrechannel_nos_total Number Not_Operational Primitive Sequence received by host port # TYPE node_fibrechannel_nos_total counter node_fibrechannel_nos_total{fc_host="host0"} 18 # HELP node_fibrechannel_rx_frames_total Number of frames received # TYPE node_fibrechannel_rx_frames_total counter node_fibrechannel_rx_frames_total{fc_host="host0"} 3 # HELP node_fibrechannel_rx_words_total Number of words received by host port # TYPE node_fibrechannel_rx_words_total counter node_fibrechannel_rx_words_total{fc_host="host0"} 4 # HELP node_fibrechannel_seconds_since_last_reset_total Number of seconds since last host port reset # TYPE node_fibrechannel_seconds_since_last_reset_total counter node_fibrechannel_seconds_since_last_reset_total{fc_host="host0"} 7 # HELP node_fibrechannel_tx_frames_total Number of frames transmitted by host port # TYPE node_fibrechannel_tx_frames_total counter node_fibrechannel_tx_frames_total{fc_host="host0"} 5 # HELP node_fibrechannel_tx_words_total Number of words transmitted by host port # TYPE node_fibrechannel_tx_words_total counter node_fibrechannel_tx_words_total{fc_host="host0"} 6 # HELP node_filefd_allocated File descriptor statistics: allocated. # TYPE node_filefd_allocated gauge node_filefd_allocated 1024 # HELP node_filefd_maximum File descriptor statistics: maximum. # TYPE node_filefd_maximum gauge node_filefd_maximum 1.631329e+06 # HELP node_forks_total Total number of forks. # TYPE node_forks_total counter node_forks_total 26442 # HELP node_hwmon_chip_names Annotation metric for human-readable chip names # TYPE node_hwmon_chip_names gauge node_hwmon_chip_names{chip="nct6779",chip_name="nct6779"} 1 node_hwmon_chip_names{chip="platform_coretemp_0",chip_name="coretemp"} 1 node_hwmon_chip_names{chip="platform_coretemp_1",chip_name="coretemp"} 1 # HELP node_hwmon_fan_alarm Hardware sensor alarm status (fan) # TYPE node_hwmon_fan_alarm gauge node_hwmon_fan_alarm{chip="nct6779",sensor="fan2"} 0 # HELP node_hwmon_fan_beep_enabled Hardware monitor sensor has beeping enabled # TYPE node_hwmon_fan_beep_enabled gauge node_hwmon_fan_beep_enabled{chip="nct6779",sensor="fan2"} 0 # HELP node_hwmon_fan_manual Hardware monitor fan element manual # TYPE node_hwmon_fan_manual gauge node_hwmon_fan_manual{chip="platform_applesmc_768",sensor="fan1"} 0 node_hwmon_fan_manual{chip="platform_applesmc_768",sensor="fan2"} 0 # HELP node_hwmon_fan_max_rpm Hardware monitor for fan revolutions per minute (max) # TYPE node_hwmon_fan_max_rpm gauge node_hwmon_fan_max_rpm{chip="platform_applesmc_768",sensor="fan1"} 6156 node_hwmon_fan_max_rpm{chip="platform_applesmc_768",sensor="fan2"} 5700 # HELP node_hwmon_fan_min_rpm Hardware monitor for fan revolutions per minute (min) # TYPE node_hwmon_fan_min_rpm gauge node_hwmon_fan_min_rpm{chip="nct6779",sensor="fan2"} 0 node_hwmon_fan_min_rpm{chip="platform_applesmc_768",sensor="fan1"} 2160 node_hwmon_fan_min_rpm{chip="platform_applesmc_768",sensor="fan2"} 2000 # HELP node_hwmon_fan_output Hardware monitor fan element output # TYPE node_hwmon_fan_output gauge node_hwmon_fan_output{chip="platform_applesmc_768",sensor="fan1"} 2160 node_hwmon_fan_output{chip="platform_applesmc_768",sensor="fan2"} 2000 # HELP node_hwmon_fan_pulses Hardware monitor fan element pulses # TYPE node_hwmon_fan_pulses gauge node_hwmon_fan_pulses{chip="nct6779",sensor="fan2"} 2 # HELP node_hwmon_fan_rpm Hardware monitor for fan revolutions per minute (input) # TYPE node_hwmon_fan_rpm gauge node_hwmon_fan_rpm{chip="nct6779",sensor="fan2"} 1098 node_hwmon_fan_rpm{chip="platform_applesmc_768",sensor="fan1"} 0 node_hwmon_fan_rpm{chip="platform_applesmc_768",sensor="fan2"} 1998 # HELP node_hwmon_fan_target_rpm Hardware monitor for fan revolutions per minute (target) # TYPE node_hwmon_fan_target_rpm gauge node_hwmon_fan_target_rpm{chip="nct6779",sensor="fan2"} 27000 # HELP node_hwmon_fan_tolerance Hardware monitor fan element tolerance # TYPE node_hwmon_fan_tolerance gauge node_hwmon_fan_tolerance{chip="nct6779",sensor="fan2"} 0 # HELP node_hwmon_in_alarm Hardware sensor alarm status (in) # TYPE node_hwmon_in_alarm gauge node_hwmon_in_alarm{chip="nct6779",sensor="in0"} 0 node_hwmon_in_alarm{chip="nct6779",sensor="in1"} 1 # HELP node_hwmon_in_beep_enabled Hardware monitor sensor has beeping enabled # TYPE node_hwmon_in_beep_enabled gauge node_hwmon_in_beep_enabled{chip="nct6779",sensor="in0"} 0 node_hwmon_in_beep_enabled{chip="nct6779",sensor="in1"} 0 # HELP node_hwmon_in_max_volts Hardware monitor for voltage (max) # TYPE node_hwmon_in_max_volts gauge node_hwmon_in_max_volts{chip="nct6779",sensor="in0"} 1.744 node_hwmon_in_max_volts{chip="nct6779",sensor="in1"} 0 # HELP node_hwmon_in_min_volts Hardware monitor for voltage (min) # TYPE node_hwmon_in_min_volts gauge node_hwmon_in_min_volts{chip="nct6779",sensor="in0"} 0 node_hwmon_in_min_volts{chip="nct6779",sensor="in1"} 0 # HELP node_hwmon_in_volts Hardware monitor for voltage (input) # TYPE node_hwmon_in_volts gauge node_hwmon_in_volts{chip="nct6779",sensor="in0"} 0.792 node_hwmon_in_volts{chip="nct6779",sensor="in1"} 1.024 # HELP node_hwmon_intrusion_alarm Hardware sensor alarm status (intrusion) # TYPE node_hwmon_intrusion_alarm gauge node_hwmon_intrusion_alarm{chip="nct6779",sensor="intrusion0"} 1 node_hwmon_intrusion_alarm{chip="nct6779",sensor="intrusion1"} 1 # HELP node_hwmon_intrusion_beep_enabled Hardware monitor sensor has beeping enabled # TYPE node_hwmon_intrusion_beep_enabled gauge node_hwmon_intrusion_beep_enabled{chip="nct6779",sensor="intrusion0"} 0 node_hwmon_intrusion_beep_enabled{chip="nct6779",sensor="intrusion1"} 0 # HELP node_hwmon_pwm_auto_point1_pwm Hardware monitor pwm element auto_point1_pwm # TYPE node_hwmon_pwm_auto_point1_pwm gauge node_hwmon_pwm_auto_point1_pwm{chip="nct6779",sensor="pwm1"} 153 # HELP node_hwmon_pwm_auto_point1_temp Hardware monitor pwm element auto_point1_temp # TYPE node_hwmon_pwm_auto_point1_temp gauge node_hwmon_pwm_auto_point1_temp{chip="nct6779",sensor="pwm1"} 30000 # HELP node_hwmon_pwm_auto_point2_pwm Hardware monitor pwm element auto_point2_pwm # TYPE node_hwmon_pwm_auto_point2_pwm gauge node_hwmon_pwm_auto_point2_pwm{chip="nct6779",sensor="pwm1"} 255 # HELP node_hwmon_pwm_auto_point2_temp Hardware monitor pwm element auto_point2_temp # TYPE node_hwmon_pwm_auto_point2_temp gauge node_hwmon_pwm_auto_point2_temp{chip="nct6779",sensor="pwm1"} 70000 # HELP node_hwmon_pwm_auto_point3_pwm Hardware monitor pwm element auto_point3_pwm # TYPE node_hwmon_pwm_auto_point3_pwm gauge node_hwmon_pwm_auto_point3_pwm{chip="nct6779",sensor="pwm1"} 255 # HELP node_hwmon_pwm_auto_point3_temp Hardware monitor pwm element auto_point3_temp # TYPE node_hwmon_pwm_auto_point3_temp gauge node_hwmon_pwm_auto_point3_temp{chip="nct6779",sensor="pwm1"} 70000 # HELP node_hwmon_pwm_auto_point4_pwm Hardware monitor pwm element auto_point4_pwm # TYPE node_hwmon_pwm_auto_point4_pwm gauge node_hwmon_pwm_auto_point4_pwm{chip="nct6779",sensor="pwm1"} 255 # HELP node_hwmon_pwm_auto_point4_temp Hardware monitor pwm element auto_point4_temp # TYPE node_hwmon_pwm_auto_point4_temp gauge node_hwmon_pwm_auto_point4_temp{chip="nct6779",sensor="pwm1"} 70000 # HELP node_hwmon_pwm_auto_point5_pwm Hardware monitor pwm element auto_point5_pwm # TYPE node_hwmon_pwm_auto_point5_pwm gauge node_hwmon_pwm_auto_point5_pwm{chip="nct6779",sensor="pwm1"} 255 # HELP node_hwmon_pwm_auto_point5_temp Hardware monitor pwm element auto_point5_temp # TYPE node_hwmon_pwm_auto_point5_temp gauge node_hwmon_pwm_auto_point5_temp{chip="nct6779",sensor="pwm1"} 75000 # HELP node_hwmon_pwm_crit_temp_tolerance Hardware monitor pwm element crit_temp_tolerance # TYPE node_hwmon_pwm_crit_temp_tolerance gauge node_hwmon_pwm_crit_temp_tolerance{chip="nct6779",sensor="pwm1"} 2000 # HELP node_hwmon_pwm_enable Hardware monitor pwm element enable # TYPE node_hwmon_pwm_enable gauge node_hwmon_pwm_enable{chip="nct6779",sensor="pwm1"} 5 # HELP node_hwmon_pwm_floor Hardware monitor pwm element floor # TYPE node_hwmon_pwm_floor gauge node_hwmon_pwm_floor{chip="nct6779",sensor="pwm1"} 1 # HELP node_hwmon_pwm_mode Hardware monitor pwm element mode # TYPE node_hwmon_pwm_mode gauge node_hwmon_pwm_mode{chip="nct6779",sensor="pwm1"} 1 # HELP node_hwmon_pwm_start Hardware monitor pwm element start # TYPE node_hwmon_pwm_start gauge node_hwmon_pwm_start{chip="nct6779",sensor="pwm1"} 1 # HELP node_hwmon_pwm_step_down_time Hardware monitor pwm element step_down_time # TYPE node_hwmon_pwm_step_down_time gauge node_hwmon_pwm_step_down_time{chip="nct6779",sensor="pwm1"} 100 # HELP node_hwmon_pwm_step_up_time Hardware monitor pwm element step_up_time # TYPE node_hwmon_pwm_step_up_time gauge node_hwmon_pwm_step_up_time{chip="nct6779",sensor="pwm1"} 100 # HELP node_hwmon_pwm_stop_time Hardware monitor pwm element stop_time # TYPE node_hwmon_pwm_stop_time gauge node_hwmon_pwm_stop_time{chip="nct6779",sensor="pwm1"} 6000 # HELP node_hwmon_pwm_target_temp Hardware monitor pwm element target_temp # TYPE node_hwmon_pwm_target_temp gauge node_hwmon_pwm_target_temp{chip="nct6779",sensor="pwm1"} 0 # HELP node_hwmon_pwm_temp_sel Hardware monitor pwm element temp_sel # TYPE node_hwmon_pwm_temp_sel gauge node_hwmon_pwm_temp_sel{chip="nct6779",sensor="pwm1"} 7 # HELP node_hwmon_pwm_temp_tolerance Hardware monitor pwm element temp_tolerance # TYPE node_hwmon_pwm_temp_tolerance gauge node_hwmon_pwm_temp_tolerance{chip="nct6779",sensor="pwm1"} 0 # HELP node_hwmon_pwm_weight_duty_base Hardware monitor pwm element weight_duty_base # TYPE node_hwmon_pwm_weight_duty_base gauge node_hwmon_pwm_weight_duty_base{chip="nct6779",sensor="pwm1"} 0 # HELP node_hwmon_pwm_weight_duty_step Hardware monitor pwm element weight_duty_step # TYPE node_hwmon_pwm_weight_duty_step gauge node_hwmon_pwm_weight_duty_step{chip="nct6779",sensor="pwm1"} 0 # HELP node_hwmon_pwm_weight_temp_sel Hardware monitor pwm element weight_temp_sel # TYPE node_hwmon_pwm_weight_temp_sel gauge node_hwmon_pwm_weight_temp_sel{chip="nct6779",sensor="pwm1"} 1 # HELP node_hwmon_pwm_weight_temp_step Hardware monitor pwm element weight_temp_step # TYPE node_hwmon_pwm_weight_temp_step gauge node_hwmon_pwm_weight_temp_step{chip="nct6779",sensor="pwm1"} 0 # HELP node_hwmon_pwm_weight_temp_step_base Hardware monitor pwm element weight_temp_step_base # TYPE node_hwmon_pwm_weight_temp_step_base gauge node_hwmon_pwm_weight_temp_step_base{chip="nct6779",sensor="pwm1"} 0 # HELP node_hwmon_pwm_weight_temp_step_tol Hardware monitor pwm element weight_temp_step_tol # TYPE node_hwmon_pwm_weight_temp_step_tol gauge node_hwmon_pwm_weight_temp_step_tol{chip="nct6779",sensor="pwm1"} 0 # HELP node_hwmon_sensor_label Label for given chip and sensor # TYPE node_hwmon_sensor_label gauge node_hwmon_sensor_label{chip="hwmon4",label="foosensor",sensor="temp1"} 1 node_hwmon_sensor_label{chip="hwmon4",label="foosensor",sensor="temp2"} 1 node_hwmon_sensor_label{chip="platform_applesmc_768",label="Left side ",sensor="fan1"} 1 node_hwmon_sensor_label{chip="platform_applesmc_768",label="Right side ",sensor="fan2"} 1 node_hwmon_sensor_label{chip="platform_coretemp_0",label="Core 0",sensor="temp2"} 1 node_hwmon_sensor_label{chip="platform_coretemp_0",label="Core 1",sensor="temp3"} 1 node_hwmon_sensor_label{chip="platform_coretemp_0",label="Core 2",sensor="temp4"} 1 node_hwmon_sensor_label{chip="platform_coretemp_0",label="Core 3",sensor="temp5"} 1 node_hwmon_sensor_label{chip="platform_coretemp_0",label="Physical id 0",sensor="temp1"} 1 node_hwmon_sensor_label{chip="platform_coretemp_1",label="Core 0",sensor="temp2"} 1 node_hwmon_sensor_label{chip="platform_coretemp_1",label="Core 1",sensor="temp3"} 1 node_hwmon_sensor_label{chip="platform_coretemp_1",label="Core 2",sensor="temp4"} 1 node_hwmon_sensor_label{chip="platform_coretemp_1",label="Core 3",sensor="temp5"} 1 node_hwmon_sensor_label{chip="platform_coretemp_1",label="Physical id 0",sensor="temp1"} 1 # HELP node_hwmon_temp_celsius Hardware monitor for temperature (input) # TYPE node_hwmon_temp_celsius gauge node_hwmon_temp_celsius{chip="hwmon4",sensor="temp1"} 55 node_hwmon_temp_celsius{chip="hwmon4",sensor="temp2"} 54 node_hwmon_temp_celsius{chip="platform_coretemp_0",sensor="temp1"} 55 node_hwmon_temp_celsius{chip="platform_coretemp_0",sensor="temp2"} 54 node_hwmon_temp_celsius{chip="platform_coretemp_0",sensor="temp3"} 52 node_hwmon_temp_celsius{chip="platform_coretemp_0",sensor="temp4"} 53 node_hwmon_temp_celsius{chip="platform_coretemp_0",sensor="temp5"} 50 node_hwmon_temp_celsius{chip="platform_coretemp_1",sensor="temp1"} 55 node_hwmon_temp_celsius{chip="platform_coretemp_1",sensor="temp2"} 54 node_hwmon_temp_celsius{chip="platform_coretemp_1",sensor="temp3"} 52 node_hwmon_temp_celsius{chip="platform_coretemp_1",sensor="temp4"} 53 node_hwmon_temp_celsius{chip="platform_coretemp_1",sensor="temp5"} 50 # HELP node_hwmon_temp_crit_alarm_celsius Hardware monitor for temperature (crit_alarm) # TYPE node_hwmon_temp_crit_alarm_celsius gauge node_hwmon_temp_crit_alarm_celsius{chip="hwmon4",sensor="temp1"} 0 node_hwmon_temp_crit_alarm_celsius{chip="hwmon4",sensor="temp2"} 0 node_hwmon_temp_crit_alarm_celsius{chip="platform_coretemp_0",sensor="temp1"} 0 node_hwmon_temp_crit_alarm_celsius{chip="platform_coretemp_0",sensor="temp2"} 0 node_hwmon_temp_crit_alarm_celsius{chip="platform_coretemp_0",sensor="temp3"} 0 node_hwmon_temp_crit_alarm_celsius{chip="platform_coretemp_0",sensor="temp4"} 0 node_hwmon_temp_crit_alarm_celsius{chip="platform_coretemp_0",sensor="temp5"} 0 node_hwmon_temp_crit_alarm_celsius{chip="platform_coretemp_1",sensor="temp1"} 0 node_hwmon_temp_crit_alarm_celsius{chip="platform_coretemp_1",sensor="temp2"} 0 node_hwmon_temp_crit_alarm_celsius{chip="platform_coretemp_1",sensor="temp3"} 0 node_hwmon_temp_crit_alarm_celsius{chip="platform_coretemp_1",sensor="temp4"} 0 node_hwmon_temp_crit_alarm_celsius{chip="platform_coretemp_1",sensor="temp5"} 0 # HELP node_hwmon_temp_crit_celsius Hardware monitor for temperature (crit) # TYPE node_hwmon_temp_crit_celsius gauge node_hwmon_temp_crit_celsius{chip="hwmon4",sensor="temp1"} 100 node_hwmon_temp_crit_celsius{chip="hwmon4",sensor="temp2"} 100 node_hwmon_temp_crit_celsius{chip="platform_coretemp_0",sensor="temp1"} 100 node_hwmon_temp_crit_celsius{chip="platform_coretemp_0",sensor="temp2"} 100 node_hwmon_temp_crit_celsius{chip="platform_coretemp_0",sensor="temp3"} 100 node_hwmon_temp_crit_celsius{chip="platform_coretemp_0",sensor="temp4"} 100 node_hwmon_temp_crit_celsius{chip="platform_coretemp_0",sensor="temp5"} 100 node_hwmon_temp_crit_celsius{chip="platform_coretemp_1",sensor="temp1"} 100 node_hwmon_temp_crit_celsius{chip="platform_coretemp_1",sensor="temp2"} 100 node_hwmon_temp_crit_celsius{chip="platform_coretemp_1",sensor="temp3"} 100 node_hwmon_temp_crit_celsius{chip="platform_coretemp_1",sensor="temp4"} 100 node_hwmon_temp_crit_celsius{chip="platform_coretemp_1",sensor="temp5"} 100 # HELP node_hwmon_temp_max_celsius Hardware monitor for temperature (max) # TYPE node_hwmon_temp_max_celsius gauge node_hwmon_temp_max_celsius{chip="hwmon4",sensor="temp1"} 100 node_hwmon_temp_max_celsius{chip="hwmon4",sensor="temp2"} 100 node_hwmon_temp_max_celsius{chip="platform_coretemp_0",sensor="temp1"} 84 node_hwmon_temp_max_celsius{chip="platform_coretemp_0",sensor="temp2"} 84 node_hwmon_temp_max_celsius{chip="platform_coretemp_0",sensor="temp3"} 84 node_hwmon_temp_max_celsius{chip="platform_coretemp_0",sensor="temp4"} 84 node_hwmon_temp_max_celsius{chip="platform_coretemp_0",sensor="temp5"} 84 node_hwmon_temp_max_celsius{chip="platform_coretemp_1",sensor="temp1"} 84 node_hwmon_temp_max_celsius{chip="platform_coretemp_1",sensor="temp2"} 84 node_hwmon_temp_max_celsius{chip="platform_coretemp_1",sensor="temp3"} 84 node_hwmon_temp_max_celsius{chip="platform_coretemp_1",sensor="temp4"} 84 node_hwmon_temp_max_celsius{chip="platform_coretemp_1",sensor="temp5"} 84 # HELP node_infiniband_info Non-numeric data from /sys/class/infiniband/, value is always 1. # TYPE node_infiniband_info gauge node_infiniband_info{board_id="I40IW Board ID",device="i40iw0",firmware_version="0.2",hca_type="I40IW"} 1 node_infiniband_info{board_id="SM_1141000001000",device="mlx4_0",firmware_version="2.31.5050",hca_type="MT4099"} 1 # HELP node_infiniband_legacy_data_received_bytes_total Number of data octets received on all links # TYPE node_infiniband_legacy_data_received_bytes_total counter node_infiniband_legacy_data_received_bytes_total{device="mlx4_0",port="1"} 1.8527668e+07 node_infiniband_legacy_data_received_bytes_total{device="mlx4_0",port="2"} 1.8527668e+07 # HELP node_infiniband_legacy_data_transmitted_bytes_total Number of data octets transmitted on all links # TYPE node_infiniband_legacy_data_transmitted_bytes_total counter node_infiniband_legacy_data_transmitted_bytes_total{device="mlx4_0",port="1"} 1.493376e+07 node_infiniband_legacy_data_transmitted_bytes_total{device="mlx4_0",port="2"} 1.493376e+07 # HELP node_infiniband_legacy_multicast_packets_received_total Number of multicast packets received # TYPE node_infiniband_legacy_multicast_packets_received_total counter node_infiniband_legacy_multicast_packets_received_total{device="mlx4_0",port="1"} 93 node_infiniband_legacy_multicast_packets_received_total{device="mlx4_0",port="2"} 93 # HELP node_infiniband_legacy_multicast_packets_transmitted_total Number of multicast packets transmitted # TYPE node_infiniband_legacy_multicast_packets_transmitted_total counter node_infiniband_legacy_multicast_packets_transmitted_total{device="mlx4_0",port="1"} 16 node_infiniband_legacy_multicast_packets_transmitted_total{device="mlx4_0",port="2"} 16 # HELP node_infiniband_legacy_packets_received_total Number of data packets received on all links # TYPE node_infiniband_legacy_packets_received_total counter node_infiniband_legacy_packets_received_total{device="mlx4_0",port="1"} 0 node_infiniband_legacy_packets_received_total{device="mlx4_0",port="2"} 0 # HELP node_infiniband_legacy_packets_transmitted_total Number of data packets received on all links # TYPE node_infiniband_legacy_packets_transmitted_total counter node_infiniband_legacy_packets_transmitted_total{device="mlx4_0",port="1"} 0 node_infiniband_legacy_packets_transmitted_total{device="mlx4_0",port="2"} 0 # HELP node_infiniband_legacy_unicast_packets_received_total Number of unicast packets received # TYPE node_infiniband_legacy_unicast_packets_received_total counter node_infiniband_legacy_unicast_packets_received_total{device="mlx4_0",port="1"} 61148 node_infiniband_legacy_unicast_packets_received_total{device="mlx4_0",port="2"} 61148 # HELP node_infiniband_legacy_unicast_packets_transmitted_total Number of unicast packets transmitted # TYPE node_infiniband_legacy_unicast_packets_transmitted_total counter node_infiniband_legacy_unicast_packets_transmitted_total{device="mlx4_0",port="1"} 61239 node_infiniband_legacy_unicast_packets_transmitted_total{device="mlx4_0",port="2"} 61239 # HELP node_infiniband_link_downed_total Number of times the link failed to recover from an error state and went down # TYPE node_infiniband_link_downed_total counter node_infiniband_link_downed_total{device="mlx4_0",port="1"} 0 node_infiniband_link_downed_total{device="mlx4_0",port="2"} 0 # HELP node_infiniband_link_error_recovery_total Number of times the link successfully recovered from an error state # TYPE node_infiniband_link_error_recovery_total counter node_infiniband_link_error_recovery_total{device="mlx4_0",port="1"} 0 node_infiniband_link_error_recovery_total{device="mlx4_0",port="2"} 0 # HELP node_infiniband_multicast_packets_received_total Number of multicast packets received (including errors) # TYPE node_infiniband_multicast_packets_received_total counter node_infiniband_multicast_packets_received_total{device="mlx4_0",port="1"} 93 node_infiniband_multicast_packets_received_total{device="mlx4_0",port="2"} 0 # HELP node_infiniband_multicast_packets_transmitted_total Number of multicast packets transmitted (including errors) # TYPE node_infiniband_multicast_packets_transmitted_total counter node_infiniband_multicast_packets_transmitted_total{device="mlx4_0",port="1"} 16 node_infiniband_multicast_packets_transmitted_total{device="mlx4_0",port="2"} 0 # HELP node_infiniband_physical_state_id Physical state of the InfiniBand port (0: no change, 1: sleep, 2: polling, 3: disable, 4: shift, 5: link up, 6: link error recover, 7: phytest) # TYPE node_infiniband_physical_state_id gauge node_infiniband_physical_state_id{device="i40iw0",port="1"} 5 node_infiniband_physical_state_id{device="mlx4_0",port="1"} 5 node_infiniband_physical_state_id{device="mlx4_0",port="2"} 5 # HELP node_infiniband_port_constraint_errors_received_total Number of packets received on the switch physical port that are discarded # TYPE node_infiniband_port_constraint_errors_received_total counter node_infiniband_port_constraint_errors_received_total{device="mlx4_0",port="1"} 0 # HELP node_infiniband_port_constraint_errors_transmitted_total Number of packets not transmitted from the switch physical port # TYPE node_infiniband_port_constraint_errors_transmitted_total counter node_infiniband_port_constraint_errors_transmitted_total{device="mlx4_0",port="1"} 0 # HELP node_infiniband_port_data_received_bytes_total Number of data octets received on all links # TYPE node_infiniband_port_data_received_bytes_total counter node_infiniband_port_data_received_bytes_total{device="mlx4_0",port="1"} 1.8527668e+07 node_infiniband_port_data_received_bytes_total{device="mlx4_0",port="2"} 0 # HELP node_infiniband_port_data_transmitted_bytes_total Number of data octets transmitted on all links # TYPE node_infiniband_port_data_transmitted_bytes_total counter node_infiniband_port_data_transmitted_bytes_total{device="mlx4_0",port="1"} 1.493376e+07 node_infiniband_port_data_transmitted_bytes_total{device="mlx4_0",port="2"} 0 # HELP node_infiniband_port_discards_received_total Number of inbound packets discarded by the port because the port is down or congested # TYPE node_infiniband_port_discards_received_total counter node_infiniband_port_discards_received_total{device="mlx4_0",port="1"} 0 # HELP node_infiniband_port_discards_transmitted_total Number of outbound packets discarded by the port because the port is down or congested # TYPE node_infiniband_port_discards_transmitted_total counter node_infiniband_port_discards_transmitted_total{device="mlx4_0",port="1"} 5 # HELP node_infiniband_port_errors_received_total Number of packets containing an error that were received on this port # TYPE node_infiniband_port_errors_received_total counter node_infiniband_port_errors_received_total{device="mlx4_0",port="1"} 0 # HELP node_infiniband_port_packets_received_total Number of packets received on all VLs by this port (including errors) # TYPE node_infiniband_port_packets_received_total counter node_infiniband_port_packets_received_total{device="mlx4_0",port="1"} 6.825908347e+09 # HELP node_infiniband_port_packets_transmitted_total Number of packets transmitted on all VLs from this port (including errors) # TYPE node_infiniband_port_packets_transmitted_total counter node_infiniband_port_packets_transmitted_total{device="mlx4_0",port="1"} 6.235865e+06 # HELP node_infiniband_port_transmit_wait_total Number of ticks during which the port had data to transmit but no data was sent during the entire tick # TYPE node_infiniband_port_transmit_wait_total counter node_infiniband_port_transmit_wait_total{device="mlx4_0",port="1"} 4.294967295e+09 # HELP node_infiniband_rate_bytes_per_second Maximum signal transfer rate # TYPE node_infiniband_rate_bytes_per_second gauge node_infiniband_rate_bytes_per_second{device="i40iw0",port="1"} 1.25e+09 node_infiniband_rate_bytes_per_second{device="mlx4_0",port="1"} 5e+09 node_infiniband_rate_bytes_per_second{device="mlx4_0",port="2"} 5e+09 # HELP node_infiniband_state_id State of the InfiniBand port (0: no change, 1: down, 2: init, 3: armed, 4: active, 5: act defer) # TYPE node_infiniband_state_id gauge node_infiniband_state_id{device="i40iw0",port="1"} 4 node_infiniband_state_id{device="mlx4_0",port="1"} 4 node_infiniband_state_id{device="mlx4_0",port="2"} 4 # HELP node_infiniband_unicast_packets_received_total Number of unicast packets received (including errors) # TYPE node_infiniband_unicast_packets_received_total counter node_infiniband_unicast_packets_received_total{device="mlx4_0",port="1"} 61148 node_infiniband_unicast_packets_received_total{device="mlx4_0",port="2"} 0 # HELP node_infiniband_unicast_packets_transmitted_total Number of unicast packets transmitted (including errors) # TYPE node_infiniband_unicast_packets_transmitted_total counter node_infiniband_unicast_packets_transmitted_total{device="mlx4_0",port="1"} 61239 node_infiniband_unicast_packets_transmitted_total{device="mlx4_0",port="2"} 0 # HELP node_interrupts_total Interrupt details. # TYPE node_interrupts_total counter node_interrupts_total{cpu="0",devices="",info="APIC ICR read retries",type="RTR"} 0 node_interrupts_total{cpu="0",devices="",info="Function call interrupts",type="CAL"} 148554 node_interrupts_total{cpu="0",devices="",info="IRQ work interrupts",type="IWI"} 1.509379e+06 node_interrupts_total{cpu="0",devices="",info="Local timer interrupts",type="LOC"} 1.74326351e+08 node_interrupts_total{cpu="0",devices="",info="Machine check exceptions",type="MCE"} 0 node_interrupts_total{cpu="0",devices="",info="Machine check polls",type="MCP"} 2406 node_interrupts_total{cpu="0",devices="",info="Non-maskable interrupts",type="NMI"} 47 node_interrupts_total{cpu="0",devices="",info="Performance monitoring interrupts",type="PMI"} 47 node_interrupts_total{cpu="0",devices="",info="Rescheduling interrupts",type="RES"} 1.0847134e+07 node_interrupts_total{cpu="0",devices="",info="Spurious interrupts",type="SPU"} 0 node_interrupts_total{cpu="0",devices="",info="TLB shootdowns",type="TLB"} 1.0460334e+07 node_interrupts_total{cpu="0",devices="",info="Thermal event interrupts",type="TRM"} 0 node_interrupts_total{cpu="0",devices="",info="Threshold APIC interrupts",type="THR"} 0 node_interrupts_total{cpu="0",devices="acpi",info="IR-IO-APIC-fasteoi",type="9"} 398553 node_interrupts_total{cpu="0",devices="ahci",info="IR-PCI-MSI-edge",type="43"} 7.434032e+06 node_interrupts_total{cpu="0",devices="dmar0",info="DMAR_MSI-edge",type="40"} 0 node_interrupts_total{cpu="0",devices="dmar1",info="DMAR_MSI-edge",type="41"} 0 node_interrupts_total{cpu="0",devices="ehci_hcd:usb1, mmc0",info="IR-IO-APIC-fasteoi",type="16"} 328511 node_interrupts_total{cpu="0",devices="ehci_hcd:usb2",info="IR-IO-APIC-fasteoi",type="23"} 1.451445e+06 node_interrupts_total{cpu="0",devices="i8042",info="IR-IO-APIC-edge",type="1"} 17960 node_interrupts_total{cpu="0",devices="i8042",info="IR-IO-APIC-edge",type="12"} 380847 node_interrupts_total{cpu="0",devices="i915",info="IR-PCI-MSI-edge",type="44"} 140636 node_interrupts_total{cpu="0",devices="iwlwifi",info="IR-PCI-MSI-edge",type="46"} 4.3078464e+07 node_interrupts_total{cpu="0",devices="mei_me",info="IR-PCI-MSI-edge",type="45"} 4 node_interrupts_total{cpu="0",devices="rtc0",info="IR-IO-APIC-edge",type="8"} 1 node_interrupts_total{cpu="0",devices="snd_hda_intel",info="IR-PCI-MSI-edge",type="47"} 350 node_interrupts_total{cpu="0",devices="timer",info="IR-IO-APIC-edge",type="0"} 18 node_interrupts_total{cpu="0",devices="xhci_hcd",info="IR-PCI-MSI-edge",type="42"} 378324 node_interrupts_total{cpu="1",devices="",info="APIC ICR read retries",type="RTR"} 0 node_interrupts_total{cpu="1",devices="",info="Function call interrupts",type="CAL"} 157441 node_interrupts_total{cpu="1",devices="",info="IRQ work interrupts",type="IWI"} 2.411776e+06 node_interrupts_total{cpu="1",devices="",info="Local timer interrupts",type="LOC"} 1.35776678e+08 node_interrupts_total{cpu="1",devices="",info="Machine check exceptions",type="MCE"} 0 node_interrupts_total{cpu="1",devices="",info="Machine check polls",type="MCP"} 2399 node_interrupts_total{cpu="1",devices="",info="Non-maskable interrupts",type="NMI"} 5031 node_interrupts_total{cpu="1",devices="",info="Performance monitoring interrupts",type="PMI"} 5031 node_interrupts_total{cpu="1",devices="",info="Rescheduling interrupts",type="RES"} 9.111507e+06 node_interrupts_total{cpu="1",devices="",info="Spurious interrupts",type="SPU"} 0 node_interrupts_total{cpu="1",devices="",info="TLB shootdowns",type="TLB"} 9.918429e+06 node_interrupts_total{cpu="1",devices="",info="Thermal event interrupts",type="TRM"} 0 node_interrupts_total{cpu="1",devices="",info="Threshold APIC interrupts",type="THR"} 0 node_interrupts_total{cpu="1",devices="acpi",info="IR-IO-APIC-fasteoi",type="9"} 2320 node_interrupts_total{cpu="1",devices="ahci",info="IR-PCI-MSI-edge",type="43"} 8.092205e+06 node_interrupts_total{cpu="1",devices="dmar0",info="DMAR_MSI-edge",type="40"} 0 node_interrupts_total{cpu="1",devices="dmar1",info="DMAR_MSI-edge",type="41"} 0 node_interrupts_total{cpu="1",devices="ehci_hcd:usb1, mmc0",info="IR-IO-APIC-fasteoi",type="16"} 322879 node_interrupts_total{cpu="1",devices="ehci_hcd:usb2",info="IR-IO-APIC-fasteoi",type="23"} 3.333499e+06 node_interrupts_total{cpu="1",devices="i8042",info="IR-IO-APIC-edge",type="1"} 105 node_interrupts_total{cpu="1",devices="i8042",info="IR-IO-APIC-edge",type="12"} 1021 node_interrupts_total{cpu="1",devices="i915",info="IR-PCI-MSI-edge",type="44"} 226313 node_interrupts_total{cpu="1",devices="iwlwifi",info="IR-PCI-MSI-edge",type="46"} 130 node_interrupts_total{cpu="1",devices="mei_me",info="IR-PCI-MSI-edge",type="45"} 22 node_interrupts_total{cpu="1",devices="rtc0",info="IR-IO-APIC-edge",type="8"} 0 node_interrupts_total{cpu="1",devices="snd_hda_intel",info="IR-PCI-MSI-edge",type="47"} 224 node_interrupts_total{cpu="1",devices="timer",info="IR-IO-APIC-edge",type="0"} 0 node_interrupts_total{cpu="1",devices="xhci_hcd",info="IR-PCI-MSI-edge",type="42"} 1.734637e+06 node_interrupts_total{cpu="2",devices="",info="APIC ICR read retries",type="RTR"} 0 node_interrupts_total{cpu="2",devices="",info="Function call interrupts",type="CAL"} 142912 node_interrupts_total{cpu="2",devices="",info="IRQ work interrupts",type="IWI"} 1.512975e+06 node_interrupts_total{cpu="2",devices="",info="Local timer interrupts",type="LOC"} 1.68393257e+08 node_interrupts_total{cpu="2",devices="",info="Machine check exceptions",type="MCE"} 0 node_interrupts_total{cpu="2",devices="",info="Machine check polls",type="MCP"} 2399 node_interrupts_total{cpu="2",devices="",info="Non-maskable interrupts",type="NMI"} 6211 node_interrupts_total{cpu="2",devices="",info="Performance monitoring interrupts",type="PMI"} 6211 node_interrupts_total{cpu="2",devices="",info="Rescheduling interrupts",type="RES"} 1.5999335e+07 node_interrupts_total{cpu="2",devices="",info="Spurious interrupts",type="SPU"} 0 node_interrupts_total{cpu="2",devices="",info="TLB shootdowns",type="TLB"} 1.0494258e+07 node_interrupts_total{cpu="2",devices="",info="Thermal event interrupts",type="TRM"} 0 node_interrupts_total{cpu="2",devices="",info="Threshold APIC interrupts",type="THR"} 0 node_interrupts_total{cpu="2",devices="acpi",info="IR-IO-APIC-fasteoi",type="9"} 824 node_interrupts_total{cpu="2",devices="ahci",info="IR-PCI-MSI-edge",type="43"} 6.478877e+06 node_interrupts_total{cpu="2",devices="dmar0",info="DMAR_MSI-edge",type="40"} 0 node_interrupts_total{cpu="2",devices="dmar1",info="DMAR_MSI-edge",type="41"} 0 node_interrupts_total{cpu="2",devices="ehci_hcd:usb1, mmc0",info="IR-IO-APIC-fasteoi",type="16"} 293782 node_interrupts_total{cpu="2",devices="ehci_hcd:usb2",info="IR-IO-APIC-fasteoi",type="23"} 1.092032e+06 node_interrupts_total{cpu="2",devices="i8042",info="IR-IO-APIC-edge",type="1"} 28 node_interrupts_total{cpu="2",devices="i8042",info="IR-IO-APIC-edge",type="12"} 240 node_interrupts_total{cpu="2",devices="i915",info="IR-PCI-MSI-edge",type="44"} 347 node_interrupts_total{cpu="2",devices="iwlwifi",info="IR-PCI-MSI-edge",type="46"} 460171 node_interrupts_total{cpu="2",devices="mei_me",info="IR-PCI-MSI-edge",type="45"} 0 node_interrupts_total{cpu="2",devices="rtc0",info="IR-IO-APIC-edge",type="8"} 0 node_interrupts_total{cpu="2",devices="snd_hda_intel",info="IR-PCI-MSI-edge",type="47"} 0 node_interrupts_total{cpu="2",devices="timer",info="IR-IO-APIC-edge",type="0"} 0 node_interrupts_total{cpu="2",devices="xhci_hcd",info="IR-PCI-MSI-edge",type="42"} 440240 node_interrupts_total{cpu="3",devices="",info="APIC ICR read retries",type="RTR"} 0 node_interrupts_total{cpu="3",devices="",info="Function call interrupts",type="CAL"} 155528 node_interrupts_total{cpu="3",devices="",info="IRQ work interrupts",type="IWI"} 2.428828e+06 node_interrupts_total{cpu="3",devices="",info="Local timer interrupts",type="LOC"} 1.30980079e+08 node_interrupts_total{cpu="3",devices="",info="Machine check exceptions",type="MCE"} 0 node_interrupts_total{cpu="3",devices="",info="Machine check polls",type="MCP"} 2399 node_interrupts_total{cpu="3",devices="",info="Non-maskable interrupts",type="NMI"} 4968 node_interrupts_total{cpu="3",devices="",info="Performance monitoring interrupts",type="PMI"} 4968 node_interrupts_total{cpu="3",devices="",info="Rescheduling interrupts",type="RES"} 7.45726e+06 node_interrupts_total{cpu="3",devices="",info="Spurious interrupts",type="SPU"} 0 node_interrupts_total{cpu="3",devices="",info="TLB shootdowns",type="TLB"} 1.0345022e+07 node_interrupts_total{cpu="3",devices="",info="Thermal event interrupts",type="TRM"} 0 node_interrupts_total{cpu="3",devices="",info="Threshold APIC interrupts",type="THR"} 0 node_interrupts_total{cpu="3",devices="acpi",info="IR-IO-APIC-fasteoi",type="9"} 863 node_interrupts_total{cpu="3",devices="ahci",info="IR-PCI-MSI-edge",type="43"} 7.492252e+06 node_interrupts_total{cpu="3",devices="dmar0",info="DMAR_MSI-edge",type="40"} 0 node_interrupts_total{cpu="3",devices="dmar1",info="DMAR_MSI-edge",type="41"} 0 node_interrupts_total{cpu="3",devices="ehci_hcd:usb1, mmc0",info="IR-IO-APIC-fasteoi",type="16"} 351412 node_interrupts_total{cpu="3",devices="ehci_hcd:usb2",info="IR-IO-APIC-fasteoi",type="23"} 2.644609e+06 node_interrupts_total{cpu="3",devices="i8042",info="IR-IO-APIC-edge",type="1"} 28 node_interrupts_total{cpu="3",devices="i8042",info="IR-IO-APIC-edge",type="12"} 198 node_interrupts_total{cpu="3",devices="i915",info="IR-PCI-MSI-edge",type="44"} 633 node_interrupts_total{cpu="3",devices="iwlwifi",info="IR-PCI-MSI-edge",type="46"} 290 node_interrupts_total{cpu="3",devices="mei_me",info="IR-PCI-MSI-edge",type="45"} 0 node_interrupts_total{cpu="3",devices="rtc0",info="IR-IO-APIC-edge",type="8"} 0 node_interrupts_total{cpu="3",devices="snd_hda_intel",info="IR-PCI-MSI-edge",type="47"} 0 node_interrupts_total{cpu="3",devices="timer",info="IR-IO-APIC-edge",type="0"} 0 node_interrupts_total{cpu="3",devices="xhci_hcd",info="IR-PCI-MSI-edge",type="42"} 2.434308e+06 # HELP node_intr_total Total number of interrupts serviced. # TYPE node_intr_total counter node_intr_total 8.885917e+06 # HELP node_ipvs_backend_connections_active The current active connections by local and remote address. # TYPE node_ipvs_backend_connections_active gauge node_ipvs_backend_connections_active{local_address="",local_mark="10001000",local_port="0",proto="FWM",remote_address="192.168.49.32",remote_port="3306"} 321 node_ipvs_backend_connections_active{local_address="",local_mark="10001000",local_port="0",proto="FWM",remote_address="192.168.50.26",remote_port="3306"} 64 node_ipvs_backend_connections_active{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.82.22",remote_port="3306"} 248 node_ipvs_backend_connections_active{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.83.21",remote_port="3306"} 248 node_ipvs_backend_connections_active{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.83.24",remote_port="3306"} 248 node_ipvs_backend_connections_active{local_address="192.168.0.55",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.49.32",remote_port="3306"} 0 node_ipvs_backend_connections_active{local_address="192.168.0.55",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.50.26",remote_port="3306"} 0 node_ipvs_backend_connections_active{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.50.21",remote_port="3306"} 1498 node_ipvs_backend_connections_active{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.82.21",remote_port="3306"} 1499 node_ipvs_backend_connections_active{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.84.22",remote_port="3306"} 0 # HELP node_ipvs_backend_connections_inactive The current inactive connections by local and remote address. # TYPE node_ipvs_backend_connections_inactive gauge node_ipvs_backend_connections_inactive{local_address="",local_mark="10001000",local_port="0",proto="FWM",remote_address="192.168.49.32",remote_port="3306"} 5 node_ipvs_backend_connections_inactive{local_address="",local_mark="10001000",local_port="0",proto="FWM",remote_address="192.168.50.26",remote_port="3306"} 1 node_ipvs_backend_connections_inactive{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.82.22",remote_port="3306"} 2 node_ipvs_backend_connections_inactive{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.83.21",remote_port="3306"} 1 node_ipvs_backend_connections_inactive{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.83.24",remote_port="3306"} 2 node_ipvs_backend_connections_inactive{local_address="192.168.0.55",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.49.32",remote_port="3306"} 0 node_ipvs_backend_connections_inactive{local_address="192.168.0.55",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.50.26",remote_port="3306"} 0 node_ipvs_backend_connections_inactive{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.50.21",remote_port="3306"} 0 node_ipvs_backend_connections_inactive{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.82.21",remote_port="3306"} 0 node_ipvs_backend_connections_inactive{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.84.22",remote_port="3306"} 0 # HELP node_ipvs_backend_weight The current backend weight by local and remote address. # TYPE node_ipvs_backend_weight gauge node_ipvs_backend_weight{local_address="",local_mark="10001000",local_port="0",proto="FWM",remote_address="192.168.49.32",remote_port="3306"} 100 node_ipvs_backend_weight{local_address="",local_mark="10001000",local_port="0",proto="FWM",remote_address="192.168.50.26",remote_port="3306"} 20 node_ipvs_backend_weight{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.82.22",remote_port="3306"} 100 node_ipvs_backend_weight{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.83.21",remote_port="3306"} 100 node_ipvs_backend_weight{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.83.24",remote_port="3306"} 100 node_ipvs_backend_weight{local_address="192.168.0.55",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.49.32",remote_port="3306"} 100 node_ipvs_backend_weight{local_address="192.168.0.55",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.50.26",remote_port="3306"} 0 node_ipvs_backend_weight{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.50.21",remote_port="3306"} 100 node_ipvs_backend_weight{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.82.21",remote_port="3306"} 100 node_ipvs_backend_weight{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.84.22",remote_port="3306"} 0 # HELP node_ipvs_connections_total The total number of connections made. # TYPE node_ipvs_connections_total counter node_ipvs_connections_total 2.3765872e+07 # HELP node_ipvs_incoming_bytes_total The total amount of incoming data. # TYPE node_ipvs_incoming_bytes_total counter node_ipvs_incoming_bytes_total 8.9991519156915e+13 # HELP node_ipvs_incoming_packets_total The total number of incoming packets. # TYPE node_ipvs_incoming_packets_total counter node_ipvs_incoming_packets_total 3.811989221e+09 # HELP node_ipvs_outgoing_bytes_total The total amount of outgoing data. # TYPE node_ipvs_outgoing_bytes_total counter node_ipvs_outgoing_bytes_total 0 # HELP node_ipvs_outgoing_packets_total The total number of outgoing packets. # TYPE node_ipvs_outgoing_packets_total counter node_ipvs_outgoing_packets_total 0 # HELP node_ksmd_full_scans_total ksmd 'full_scans' file. # TYPE node_ksmd_full_scans_total counter node_ksmd_full_scans_total 323 # HELP node_ksmd_merge_across_nodes ksmd 'merge_across_nodes' file. # TYPE node_ksmd_merge_across_nodes gauge node_ksmd_merge_across_nodes 1 # HELP node_ksmd_pages_shared ksmd 'pages_shared' file. # TYPE node_ksmd_pages_shared gauge node_ksmd_pages_shared 1 # HELP node_ksmd_pages_sharing ksmd 'pages_sharing' file. # TYPE node_ksmd_pages_sharing gauge node_ksmd_pages_sharing 255 # HELP node_ksmd_pages_to_scan ksmd 'pages_to_scan' file. # TYPE node_ksmd_pages_to_scan gauge node_ksmd_pages_to_scan 100 # HELP node_ksmd_pages_unshared ksmd 'pages_unshared' file. # TYPE node_ksmd_pages_unshared gauge node_ksmd_pages_unshared 0 # HELP node_ksmd_pages_volatile ksmd 'pages_volatile' file. # TYPE node_ksmd_pages_volatile gauge node_ksmd_pages_volatile 0 # HELP node_ksmd_run ksmd 'run' file. # TYPE node_ksmd_run gauge node_ksmd_run 1 # HELP node_ksmd_sleep_seconds ksmd 'sleep_millisecs' file. # TYPE node_ksmd_sleep_seconds gauge node_ksmd_sleep_seconds 0.02 # HELP node_lnstat_allocs_total linux network cache stats # TYPE node_lnstat_allocs_total counter node_lnstat_allocs_total{cpu="0",subsystem="arp_cache"} 1 node_lnstat_allocs_total{cpu="0",subsystem="ndisc_cache"} 240 node_lnstat_allocs_total{cpu="1",subsystem="arp_cache"} 13 node_lnstat_allocs_total{cpu="1",subsystem="ndisc_cache"} 252 # HELP node_lnstat_delete_list_total linux network cache stats # TYPE node_lnstat_delete_list_total counter node_lnstat_delete_list_total{cpu="0",subsystem="nf_conntrack"} 0 node_lnstat_delete_list_total{cpu="1",subsystem="nf_conntrack"} 0 node_lnstat_delete_list_total{cpu="2",subsystem="nf_conntrack"} 0 node_lnstat_delete_list_total{cpu="3",subsystem="nf_conntrack"} 0 # HELP node_lnstat_delete_total linux network cache stats # TYPE node_lnstat_delete_total counter node_lnstat_delete_total{cpu="0",subsystem="nf_conntrack"} 0 node_lnstat_delete_total{cpu="1",subsystem="nf_conntrack"} 0 node_lnstat_delete_total{cpu="2",subsystem="nf_conntrack"} 0 node_lnstat_delete_total{cpu="3",subsystem="nf_conntrack"} 0 # HELP node_lnstat_destroys_total linux network cache stats # TYPE node_lnstat_destroys_total counter node_lnstat_destroys_total{cpu="0",subsystem="arp_cache"} 2 node_lnstat_destroys_total{cpu="0",subsystem="ndisc_cache"} 241 node_lnstat_destroys_total{cpu="1",subsystem="arp_cache"} 14 node_lnstat_destroys_total{cpu="1",subsystem="ndisc_cache"} 253 # HELP node_lnstat_drop_total linux network cache stats # TYPE node_lnstat_drop_total counter node_lnstat_drop_total{cpu="0",subsystem="nf_conntrack"} 0 node_lnstat_drop_total{cpu="1",subsystem="nf_conntrack"} 0 node_lnstat_drop_total{cpu="2",subsystem="nf_conntrack"} 0 node_lnstat_drop_total{cpu="3",subsystem="nf_conntrack"} 0 # HELP node_lnstat_early_drop_total linux network cache stats # TYPE node_lnstat_early_drop_total counter node_lnstat_early_drop_total{cpu="0",subsystem="nf_conntrack"} 0 node_lnstat_early_drop_total{cpu="1",subsystem="nf_conntrack"} 0 node_lnstat_early_drop_total{cpu="2",subsystem="nf_conntrack"} 0 node_lnstat_early_drop_total{cpu="3",subsystem="nf_conntrack"} 0 # HELP node_lnstat_entries_total linux network cache stats # TYPE node_lnstat_entries_total counter node_lnstat_entries_total{cpu="0",subsystem="arp_cache"} 20 node_lnstat_entries_total{cpu="0",subsystem="ndisc_cache"} 36 node_lnstat_entries_total{cpu="0",subsystem="nf_conntrack"} 33 node_lnstat_entries_total{cpu="1",subsystem="arp_cache"} 20 node_lnstat_entries_total{cpu="1",subsystem="ndisc_cache"} 36 node_lnstat_entries_total{cpu="1",subsystem="nf_conntrack"} 33 node_lnstat_entries_total{cpu="2",subsystem="nf_conntrack"} 33 node_lnstat_entries_total{cpu="3",subsystem="nf_conntrack"} 33 # HELP node_lnstat_expect_create_total linux network cache stats # TYPE node_lnstat_expect_create_total counter node_lnstat_expect_create_total{cpu="0",subsystem="nf_conntrack"} 0 node_lnstat_expect_create_total{cpu="1",subsystem="nf_conntrack"} 0 node_lnstat_expect_create_total{cpu="2",subsystem="nf_conntrack"} 0 node_lnstat_expect_create_total{cpu="3",subsystem="nf_conntrack"} 0 # HELP node_lnstat_expect_delete_total linux network cache stats # TYPE node_lnstat_expect_delete_total counter node_lnstat_expect_delete_total{cpu="0",subsystem="nf_conntrack"} 0 node_lnstat_expect_delete_total{cpu="1",subsystem="nf_conntrack"} 0 node_lnstat_expect_delete_total{cpu="2",subsystem="nf_conntrack"} 0 node_lnstat_expect_delete_total{cpu="3",subsystem="nf_conntrack"} 0 # HELP node_lnstat_expect_new_total linux network cache stats # TYPE node_lnstat_expect_new_total counter node_lnstat_expect_new_total{cpu="0",subsystem="nf_conntrack"} 0 node_lnstat_expect_new_total{cpu="1",subsystem="nf_conntrack"} 0 node_lnstat_expect_new_total{cpu="2",subsystem="nf_conntrack"} 0 node_lnstat_expect_new_total{cpu="3",subsystem="nf_conntrack"} 0 # HELP node_lnstat_forced_gc_runs_total linux network cache stats # TYPE node_lnstat_forced_gc_runs_total counter node_lnstat_forced_gc_runs_total{cpu="0",subsystem="arp_cache"} 10 node_lnstat_forced_gc_runs_total{cpu="0",subsystem="ndisc_cache"} 249 node_lnstat_forced_gc_runs_total{cpu="1",subsystem="arp_cache"} 22 node_lnstat_forced_gc_runs_total{cpu="1",subsystem="ndisc_cache"} 261 # HELP node_lnstat_found_total linux network cache stats # TYPE node_lnstat_found_total counter node_lnstat_found_total{cpu="0",subsystem="nf_conntrack"} 0 node_lnstat_found_total{cpu="1",subsystem="nf_conntrack"} 0 node_lnstat_found_total{cpu="2",subsystem="nf_conntrack"} 0 node_lnstat_found_total{cpu="3",subsystem="nf_conntrack"} 0 # HELP node_lnstat_hash_grows_total linux network cache stats # TYPE node_lnstat_hash_grows_total counter node_lnstat_hash_grows_total{cpu="0",subsystem="arp_cache"} 3 node_lnstat_hash_grows_total{cpu="0",subsystem="ndisc_cache"} 242 node_lnstat_hash_grows_total{cpu="1",subsystem="arp_cache"} 15 node_lnstat_hash_grows_total{cpu="1",subsystem="ndisc_cache"} 254 # HELP node_lnstat_hits_total linux network cache stats # TYPE node_lnstat_hits_total counter node_lnstat_hits_total{cpu="0",subsystem="arp_cache"} 5 node_lnstat_hits_total{cpu="0",subsystem="ndisc_cache"} 244 node_lnstat_hits_total{cpu="1",subsystem="arp_cache"} 17 node_lnstat_hits_total{cpu="1",subsystem="ndisc_cache"} 256 # HELP node_lnstat_icmp_error_total linux network cache stats # TYPE node_lnstat_icmp_error_total counter node_lnstat_icmp_error_total{cpu="0",subsystem="nf_conntrack"} 0 node_lnstat_icmp_error_total{cpu="1",subsystem="nf_conntrack"} 0 node_lnstat_icmp_error_total{cpu="2",subsystem="nf_conntrack"} 0 node_lnstat_icmp_error_total{cpu="3",subsystem="nf_conntrack"} 0 # HELP node_lnstat_ignore_total linux network cache stats # TYPE node_lnstat_ignore_total counter node_lnstat_ignore_total{cpu="0",subsystem="nf_conntrack"} 22666 node_lnstat_ignore_total{cpu="1",subsystem="nf_conntrack"} 22180 node_lnstat_ignore_total{cpu="2",subsystem="nf_conntrack"} 22740 node_lnstat_ignore_total{cpu="3",subsystem="nf_conntrack"} 22152 # HELP node_lnstat_insert_failed_total linux network cache stats # TYPE node_lnstat_insert_failed_total counter node_lnstat_insert_failed_total{cpu="0",subsystem="nf_conntrack"} 0 node_lnstat_insert_failed_total{cpu="1",subsystem="nf_conntrack"} 0 node_lnstat_insert_failed_total{cpu="2",subsystem="nf_conntrack"} 0 node_lnstat_insert_failed_total{cpu="3",subsystem="nf_conntrack"} 0 # HELP node_lnstat_insert_total linux network cache stats # TYPE node_lnstat_insert_total counter node_lnstat_insert_total{cpu="0",subsystem="nf_conntrack"} 0 node_lnstat_insert_total{cpu="1",subsystem="nf_conntrack"} 0 node_lnstat_insert_total{cpu="2",subsystem="nf_conntrack"} 0 node_lnstat_insert_total{cpu="3",subsystem="nf_conntrack"} 0 # HELP node_lnstat_invalid_total linux network cache stats # TYPE node_lnstat_invalid_total counter node_lnstat_invalid_total{cpu="0",subsystem="nf_conntrack"} 3 node_lnstat_invalid_total{cpu="1",subsystem="nf_conntrack"} 2 node_lnstat_invalid_total{cpu="2",subsystem="nf_conntrack"} 1 node_lnstat_invalid_total{cpu="3",subsystem="nf_conntrack"} 47 # HELP node_lnstat_lookups_total linux network cache stats # TYPE node_lnstat_lookups_total counter node_lnstat_lookups_total{cpu="0",subsystem="arp_cache"} 4 node_lnstat_lookups_total{cpu="0",subsystem="ndisc_cache"} 243 node_lnstat_lookups_total{cpu="1",subsystem="arp_cache"} 16 node_lnstat_lookups_total{cpu="1",subsystem="ndisc_cache"} 255 # HELP node_lnstat_new_total linux network cache stats # TYPE node_lnstat_new_total counter node_lnstat_new_total{cpu="0",subsystem="nf_conntrack"} 0 node_lnstat_new_total{cpu="1",subsystem="nf_conntrack"} 0 node_lnstat_new_total{cpu="2",subsystem="nf_conntrack"} 0 node_lnstat_new_total{cpu="3",subsystem="nf_conntrack"} 0 # HELP node_lnstat_periodic_gc_runs_total linux network cache stats # TYPE node_lnstat_periodic_gc_runs_total counter node_lnstat_periodic_gc_runs_total{cpu="0",subsystem="arp_cache"} 9 node_lnstat_periodic_gc_runs_total{cpu="0",subsystem="ndisc_cache"} 248 node_lnstat_periodic_gc_runs_total{cpu="1",subsystem="arp_cache"} 21 node_lnstat_periodic_gc_runs_total{cpu="1",subsystem="ndisc_cache"} 260 # HELP node_lnstat_rcv_probes_mcast_total linux network cache stats # TYPE node_lnstat_rcv_probes_mcast_total counter node_lnstat_rcv_probes_mcast_total{cpu="0",subsystem="arp_cache"} 7 node_lnstat_rcv_probes_mcast_total{cpu="0",subsystem="ndisc_cache"} 246 node_lnstat_rcv_probes_mcast_total{cpu="1",subsystem="arp_cache"} 19 node_lnstat_rcv_probes_mcast_total{cpu="1",subsystem="ndisc_cache"} 258 # HELP node_lnstat_rcv_probes_ucast_total linux network cache stats # TYPE node_lnstat_rcv_probes_ucast_total counter node_lnstat_rcv_probes_ucast_total{cpu="0",subsystem="arp_cache"} 8 node_lnstat_rcv_probes_ucast_total{cpu="0",subsystem="ndisc_cache"} 247 node_lnstat_rcv_probes_ucast_total{cpu="1",subsystem="arp_cache"} 20 node_lnstat_rcv_probes_ucast_total{cpu="1",subsystem="ndisc_cache"} 259 # HELP node_lnstat_res_failed_total linux network cache stats # TYPE node_lnstat_res_failed_total counter node_lnstat_res_failed_total{cpu="0",subsystem="arp_cache"} 6 node_lnstat_res_failed_total{cpu="0",subsystem="ndisc_cache"} 245 node_lnstat_res_failed_total{cpu="1",subsystem="arp_cache"} 18 node_lnstat_res_failed_total{cpu="1",subsystem="ndisc_cache"} 257 # HELP node_lnstat_search_restart_total linux network cache stats # TYPE node_lnstat_search_restart_total counter node_lnstat_search_restart_total{cpu="0",subsystem="nf_conntrack"} 0 node_lnstat_search_restart_total{cpu="1",subsystem="nf_conntrack"} 2 node_lnstat_search_restart_total{cpu="2",subsystem="nf_conntrack"} 1 node_lnstat_search_restart_total{cpu="3",subsystem="nf_conntrack"} 4 # HELP node_lnstat_searched_total linux network cache stats # TYPE node_lnstat_searched_total counter node_lnstat_searched_total{cpu="0",subsystem="nf_conntrack"} 0 node_lnstat_searched_total{cpu="1",subsystem="nf_conntrack"} 0 node_lnstat_searched_total{cpu="2",subsystem="nf_conntrack"} 0 node_lnstat_searched_total{cpu="3",subsystem="nf_conntrack"} 0 # HELP node_lnstat_table_fulls_total linux network cache stats # TYPE node_lnstat_table_fulls_total counter node_lnstat_table_fulls_total{cpu="0",subsystem="arp_cache"} 12 node_lnstat_table_fulls_total{cpu="0",subsystem="ndisc_cache"} 251 node_lnstat_table_fulls_total{cpu="1",subsystem="arp_cache"} 24 node_lnstat_table_fulls_total{cpu="1",subsystem="ndisc_cache"} 263 # HELP node_lnstat_unresolved_discards_total linux network cache stats # TYPE node_lnstat_unresolved_discards_total counter node_lnstat_unresolved_discards_total{cpu="0",subsystem="arp_cache"} 11 node_lnstat_unresolved_discards_total{cpu="0",subsystem="ndisc_cache"} 250 node_lnstat_unresolved_discards_total{cpu="1",subsystem="arp_cache"} 23 node_lnstat_unresolved_discards_total{cpu="1",subsystem="ndisc_cache"} 262 # HELP node_load1 1m load average. # TYPE node_load1 gauge node_load1 0.21 # HELP node_load15 15m load average. # TYPE node_load15 gauge node_load15 0.39 # HELP node_load5 5m load average. # TYPE node_load5 gauge node_load5 0.37 # HELP node_md_blocks Total number of blocks on device. # TYPE node_md_blocks gauge node_md_blocks{device="md0"} 248896 node_md_blocks{device="md00"} 4.186624e+06 node_md_blocks{device="md10"} 3.14159265e+08 node_md_blocks{device="md101"} 322560 node_md_blocks{device="md11"} 4.190208e+06 node_md_blocks{device="md12"} 3.886394368e+09 node_md_blocks{device="md120"} 2.095104e+06 node_md_blocks{device="md126"} 1.855870976e+09 node_md_blocks{device="md127"} 3.12319552e+08 node_md_blocks{device="md201"} 1.993728e+06 node_md_blocks{device="md219"} 7932 node_md_blocks{device="md3"} 5.853468288e+09 node_md_blocks{device="md4"} 4.883648e+06 node_md_blocks{device="md6"} 1.95310144e+08 node_md_blocks{device="md7"} 7.813735424e+09 node_md_blocks{device="md8"} 1.95310144e+08 node_md_blocks{device="md9"} 523968 # HELP node_md_blocks_synced Number of blocks synced on device. # TYPE node_md_blocks_synced gauge node_md_blocks_synced{device="md0"} 248896 node_md_blocks_synced{device="md00"} 4.186624e+06 node_md_blocks_synced{device="md10"} 3.14159265e+08 node_md_blocks_synced{device="md101"} 322560 node_md_blocks_synced{device="md11"} 0 node_md_blocks_synced{device="md12"} 3.886394368e+09 node_md_blocks_synced{device="md120"} 2.095104e+06 node_md_blocks_synced{device="md126"} 1.855870976e+09 node_md_blocks_synced{device="md127"} 3.12319552e+08 node_md_blocks_synced{device="md201"} 114176 node_md_blocks_synced{device="md219"} 7932 node_md_blocks_synced{device="md3"} 5.853468288e+09 node_md_blocks_synced{device="md4"} 4.883648e+06 node_md_blocks_synced{device="md6"} 1.6775552e+07 node_md_blocks_synced{device="md7"} 7.813735424e+09 node_md_blocks_synced{device="md8"} 1.6775552e+07 node_md_blocks_synced{device="md9"} 0 # HELP node_md_disks Number of active/failed/spare disks of device. # TYPE node_md_disks gauge node_md_disks{device="md0",state="active"} 2 node_md_disks{device="md0",state="failed"} 0 node_md_disks{device="md0",state="spare"} 0 node_md_disks{device="md00",state="active"} 1 node_md_disks{device="md00",state="failed"} 0 node_md_disks{device="md00",state="spare"} 0 node_md_disks{device="md10",state="active"} 2 node_md_disks{device="md10",state="failed"} 0 node_md_disks{device="md10",state="spare"} 0 node_md_disks{device="md101",state="active"} 3 node_md_disks{device="md101",state="failed"} 0 node_md_disks{device="md101",state="spare"} 0 node_md_disks{device="md11",state="active"} 2 node_md_disks{device="md11",state="failed"} 1 node_md_disks{device="md11",state="spare"} 2 node_md_disks{device="md12",state="active"} 2 node_md_disks{device="md12",state="failed"} 0 node_md_disks{device="md12",state="spare"} 0 node_md_disks{device="md120",state="active"} 2 node_md_disks{device="md120",state="failed"} 0 node_md_disks{device="md120",state="spare"} 0 node_md_disks{device="md126",state="active"} 2 node_md_disks{device="md126",state="failed"} 0 node_md_disks{device="md126",state="spare"} 0 node_md_disks{device="md127",state="active"} 2 node_md_disks{device="md127",state="failed"} 0 node_md_disks{device="md127",state="spare"} 0 node_md_disks{device="md201",state="active"} 2 node_md_disks{device="md201",state="failed"} 0 node_md_disks{device="md201",state="spare"} 0 node_md_disks{device="md219",state="active"} 0 node_md_disks{device="md219",state="failed"} 0 node_md_disks{device="md219",state="spare"} 3 node_md_disks{device="md3",state="active"} 8 node_md_disks{device="md3",state="failed"} 0 node_md_disks{device="md3",state="spare"} 2 node_md_disks{device="md4",state="active"} 0 node_md_disks{device="md4",state="failed"} 1 node_md_disks{device="md4",state="spare"} 1 node_md_disks{device="md6",state="active"} 1 node_md_disks{device="md6",state="failed"} 1 node_md_disks{device="md6",state="spare"} 1 node_md_disks{device="md7",state="active"} 3 node_md_disks{device="md7",state="failed"} 1 node_md_disks{device="md7",state="spare"} 0 node_md_disks{device="md8",state="active"} 2 node_md_disks{device="md8",state="failed"} 0 node_md_disks{device="md8",state="spare"} 2 node_md_disks{device="md9",state="active"} 4 node_md_disks{device="md9",state="failed"} 2 node_md_disks{device="md9",state="spare"} 1 # HELP node_md_disks_required Total number of disks of device. # TYPE node_md_disks_required gauge node_md_disks_required{device="md0"} 2 node_md_disks_required{device="md00"} 1 node_md_disks_required{device="md10"} 2 node_md_disks_required{device="md101"} 3 node_md_disks_required{device="md11"} 2 node_md_disks_required{device="md12"} 2 node_md_disks_required{device="md120"} 2 node_md_disks_required{device="md126"} 2 node_md_disks_required{device="md127"} 2 node_md_disks_required{device="md201"} 2 node_md_disks_required{device="md219"} 0 node_md_disks_required{device="md3"} 8 node_md_disks_required{device="md4"} 0 node_md_disks_required{device="md6"} 2 node_md_disks_required{device="md7"} 4 node_md_disks_required{device="md8"} 2 node_md_disks_required{device="md9"} 4 # HELP node_md_state Indicates the state of md-device. # TYPE node_md_state gauge node_md_state{device="md0",state="active"} 1 node_md_state{device="md0",state="check"} 0 node_md_state{device="md0",state="inactive"} 0 node_md_state{device="md0",state="recovering"} 0 node_md_state{device="md0",state="resync"} 0 node_md_state{device="md00",state="active"} 1 node_md_state{device="md00",state="check"} 0 node_md_state{device="md00",state="inactive"} 0 node_md_state{device="md00",state="recovering"} 0 node_md_state{device="md00",state="resync"} 0 node_md_state{device="md10",state="active"} 1 node_md_state{device="md10",state="check"} 0 node_md_state{device="md10",state="inactive"} 0 node_md_state{device="md10",state="recovering"} 0 node_md_state{device="md10",state="resync"} 0 node_md_state{device="md101",state="active"} 1 node_md_state{device="md101",state="check"} 0 node_md_state{device="md101",state="inactive"} 0 node_md_state{device="md101",state="recovering"} 0 node_md_state{device="md101",state="resync"} 0 node_md_state{device="md11",state="active"} 0 node_md_state{device="md11",state="check"} 0 node_md_state{device="md11",state="inactive"} 0 node_md_state{device="md11",state="recovering"} 0 node_md_state{device="md11",state="resync"} 1 node_md_state{device="md12",state="active"} 1 node_md_state{device="md12",state="check"} 0 node_md_state{device="md12",state="inactive"} 0 node_md_state{device="md12",state="recovering"} 0 node_md_state{device="md12",state="resync"} 0 node_md_state{device="md120",state="active"} 1 node_md_state{device="md120",state="check"} 0 node_md_state{device="md120",state="inactive"} 0 node_md_state{device="md120",state="recovering"} 0 node_md_state{device="md120",state="resync"} 0 node_md_state{device="md126",state="active"} 1 node_md_state{device="md126",state="check"} 0 node_md_state{device="md126",state="inactive"} 0 node_md_state{device="md126",state="recovering"} 0 node_md_state{device="md126",state="resync"} 0 node_md_state{device="md127",state="active"} 1 node_md_state{device="md127",state="check"} 0 node_md_state{device="md127",state="inactive"} 0 node_md_state{device="md127",state="recovering"} 0 node_md_state{device="md127",state="resync"} 0 node_md_state{device="md201",state="active"} 0 node_md_state{device="md201",state="check"} 1 node_md_state{device="md201",state="inactive"} 0 node_md_state{device="md201",state="recovering"} 0 node_md_state{device="md201",state="resync"} 0 node_md_state{device="md219",state="active"} 0 node_md_state{device="md219",state="check"} 0 node_md_state{device="md219",state="inactive"} 1 node_md_state{device="md219",state="recovering"} 0 node_md_state{device="md219",state="resync"} 0 node_md_state{device="md3",state="active"} 1 node_md_state{device="md3",state="check"} 0 node_md_state{device="md3",state="inactive"} 0 node_md_state{device="md3",state="recovering"} 0 node_md_state{device="md3",state="resync"} 0 node_md_state{device="md4",state="active"} 0 node_md_state{device="md4",state="check"} 0 node_md_state{device="md4",state="inactive"} 1 node_md_state{device="md4",state="recovering"} 0 node_md_state{device="md4",state="resync"} 0 node_md_state{device="md6",state="active"} 0 node_md_state{device="md6",state="check"} 0 node_md_state{device="md6",state="inactive"} 0 node_md_state{device="md6",state="recovering"} 1 node_md_state{device="md6",state="resync"} 0 node_md_state{device="md7",state="active"} 1 node_md_state{device="md7",state="check"} 0 node_md_state{device="md7",state="inactive"} 0 node_md_state{device="md7",state="recovering"} 0 node_md_state{device="md7",state="resync"} 0 node_md_state{device="md8",state="active"} 0 node_md_state{device="md8",state="check"} 0 node_md_state{device="md8",state="inactive"} 0 node_md_state{device="md8",state="recovering"} 0 node_md_state{device="md8",state="resync"} 1 node_md_state{device="md9",state="active"} 0 node_md_state{device="md9",state="check"} 0 node_md_state{device="md9",state="inactive"} 0 node_md_state{device="md9",state="recovering"} 0 node_md_state{device="md9",state="resync"} 1 # HELP node_memory_Active_anon_bytes Memory information field Active_anon_bytes. # TYPE node_memory_Active_anon_bytes gauge node_memory_Active_anon_bytes 2.068484096e+09 # HELP node_memory_Active_bytes Memory information field Active_bytes. # TYPE node_memory_Active_bytes gauge node_memory_Active_bytes 2.287017984e+09 # HELP node_memory_Active_file_bytes Memory information field Active_file_bytes. # TYPE node_memory_Active_file_bytes gauge node_memory_Active_file_bytes 2.18533888e+08 # HELP node_memory_AnonHugePages_bytes Memory information field AnonHugePages_bytes. # TYPE node_memory_AnonHugePages_bytes gauge node_memory_AnonHugePages_bytes 0 # HELP node_memory_AnonPages_bytes Memory information field AnonPages_bytes. # TYPE node_memory_AnonPages_bytes gauge node_memory_AnonPages_bytes 2.298032128e+09 # HELP node_memory_Bounce_bytes Memory information field Bounce_bytes. # TYPE node_memory_Bounce_bytes gauge node_memory_Bounce_bytes 0 # HELP node_memory_Buffers_bytes Memory information field Buffers_bytes. # TYPE node_memory_Buffers_bytes gauge node_memory_Buffers_bytes 2.256896e+07 # HELP node_memory_Cached_bytes Memory information field Cached_bytes. # TYPE node_memory_Cached_bytes gauge node_memory_Cached_bytes 9.53229312e+08 # HELP node_memory_CommitLimit_bytes Memory information field CommitLimit_bytes. # TYPE node_memory_CommitLimit_bytes gauge node_memory_CommitLimit_bytes 6.210940928e+09 # HELP node_memory_Committed_AS_bytes Memory information field Committed_AS_bytes. # TYPE node_memory_Committed_AS_bytes gauge node_memory_Committed_AS_bytes 8.023486464e+09 # HELP node_memory_DirectMap2M_bytes Memory information field DirectMap2M_bytes. # TYPE node_memory_DirectMap2M_bytes gauge node_memory_DirectMap2M_bytes 3.787456512e+09 # HELP node_memory_DirectMap4k_bytes Memory information field DirectMap4k_bytes. # TYPE node_memory_DirectMap4k_bytes gauge node_memory_DirectMap4k_bytes 1.9011584e+08 # HELP node_memory_Dirty_bytes Memory information field Dirty_bytes. # TYPE node_memory_Dirty_bytes gauge node_memory_Dirty_bytes 1.077248e+06 # HELP node_memory_HardwareCorrupted_bytes Memory information field HardwareCorrupted_bytes. # TYPE node_memory_HardwareCorrupted_bytes gauge node_memory_HardwareCorrupted_bytes 0 # HELP node_memory_HugePages_Free Memory information field HugePages_Free. # TYPE node_memory_HugePages_Free gauge node_memory_HugePages_Free 0 # HELP node_memory_HugePages_Rsvd Memory information field HugePages_Rsvd. # TYPE node_memory_HugePages_Rsvd gauge node_memory_HugePages_Rsvd 0 # HELP node_memory_HugePages_Surp Memory information field HugePages_Surp. # TYPE node_memory_HugePages_Surp gauge node_memory_HugePages_Surp 0 # HELP node_memory_HugePages_Total Memory information field HugePages_Total. # TYPE node_memory_HugePages_Total gauge node_memory_HugePages_Total 0 # HELP node_memory_Hugepagesize_bytes Memory information field Hugepagesize_bytes. # TYPE node_memory_Hugepagesize_bytes gauge node_memory_Hugepagesize_bytes 2.097152e+06 # HELP node_memory_Inactive_anon_bytes Memory information field Inactive_anon_bytes. # TYPE node_memory_Inactive_anon_bytes gauge node_memory_Inactive_anon_bytes 9.04245248e+08 # HELP node_memory_Inactive_bytes Memory information field Inactive_bytes. # TYPE node_memory_Inactive_bytes gauge node_memory_Inactive_bytes 1.053417472e+09 # HELP node_memory_Inactive_file_bytes Memory information field Inactive_file_bytes. # TYPE node_memory_Inactive_file_bytes gauge node_memory_Inactive_file_bytes 1.49172224e+08 # HELP node_memory_KernelStack_bytes Memory information field KernelStack_bytes. # TYPE node_memory_KernelStack_bytes gauge node_memory_KernelStack_bytes 5.9392e+06 # HELP node_memory_Mapped_bytes Memory information field Mapped_bytes. # TYPE node_memory_Mapped_bytes gauge node_memory_Mapped_bytes 2.4496128e+08 # HELP node_memory_MemFree_bytes Memory information field MemFree_bytes. # TYPE node_memory_MemFree_bytes gauge node_memory_MemFree_bytes 2.30883328e+08 # HELP node_memory_MemTotal_bytes Memory information field MemTotal_bytes. # TYPE node_memory_MemTotal_bytes gauge node_memory_MemTotal_bytes 3.831959552e+09 # HELP node_memory_Mlocked_bytes Memory information field Mlocked_bytes. # TYPE node_memory_Mlocked_bytes gauge node_memory_Mlocked_bytes 32768 # HELP node_memory_NFS_Unstable_bytes Memory information field NFS_Unstable_bytes. # TYPE node_memory_NFS_Unstable_bytes gauge node_memory_NFS_Unstable_bytes 0 # HELP node_memory_PageTables_bytes Memory information field PageTables_bytes. # TYPE node_memory_PageTables_bytes gauge node_memory_PageTables_bytes 7.7017088e+07 # HELP node_memory_SReclaimable_bytes Memory information field SReclaimable_bytes. # TYPE node_memory_SReclaimable_bytes gauge node_memory_SReclaimable_bytes 4.5846528e+07 # HELP node_memory_SUnreclaim_bytes Memory information field SUnreclaim_bytes. # TYPE node_memory_SUnreclaim_bytes gauge node_memory_SUnreclaim_bytes 5.545984e+07 # HELP node_memory_Shmem_bytes Memory information field Shmem_bytes. # TYPE node_memory_Shmem_bytes gauge node_memory_Shmem_bytes 6.0809216e+08 # HELP node_memory_Slab_bytes Memory information field Slab_bytes. # TYPE node_memory_Slab_bytes gauge node_memory_Slab_bytes 1.01306368e+08 # HELP node_memory_SwapCached_bytes Memory information field SwapCached_bytes. # TYPE node_memory_SwapCached_bytes gauge node_memory_SwapCached_bytes 1.97124096e+08 # HELP node_memory_SwapFree_bytes Memory information field SwapFree_bytes. # TYPE node_memory_SwapFree_bytes gauge node_memory_SwapFree_bytes 3.23108864e+09 # HELP node_memory_SwapTotal_bytes Memory information field SwapTotal_bytes. # TYPE node_memory_SwapTotal_bytes gauge node_memory_SwapTotal_bytes 4.2949632e+09 # HELP node_memory_Unevictable_bytes Memory information field Unevictable_bytes. # TYPE node_memory_Unevictable_bytes gauge node_memory_Unevictable_bytes 32768 # HELP node_memory_VmallocChunk_bytes Memory information field VmallocChunk_bytes. # TYPE node_memory_VmallocChunk_bytes gauge node_memory_VmallocChunk_bytes 3.5183963009024e+13 # HELP node_memory_VmallocTotal_bytes Memory information field VmallocTotal_bytes. # TYPE node_memory_VmallocTotal_bytes gauge node_memory_VmallocTotal_bytes 3.5184372087808e+13 # HELP node_memory_VmallocUsed_bytes Memory information field VmallocUsed_bytes. # TYPE node_memory_VmallocUsed_bytes gauge node_memory_VmallocUsed_bytes 3.6130816e+08 # HELP node_memory_WritebackTmp_bytes Memory information field WritebackTmp_bytes. # TYPE node_memory_WritebackTmp_bytes gauge node_memory_WritebackTmp_bytes 0 # HELP node_memory_Writeback_bytes Memory information field Writeback_bytes. # TYPE node_memory_Writeback_bytes gauge node_memory_Writeback_bytes 0 # HELP node_memory_numa_Active Memory information field Active. # TYPE node_memory_numa_Active gauge node_memory_numa_Active{node="0"} 5.58733312e+09 node_memory_numa_Active{node="1"} 5.739003904e+09 node_memory_numa_Active{node="2"} 5.739003904e+09 # HELP node_memory_numa_Active_anon Memory information field Active_anon. # TYPE node_memory_numa_Active_anon gauge node_memory_numa_Active_anon{node="0"} 7.07915776e+08 node_memory_numa_Active_anon{node="1"} 6.04635136e+08 node_memory_numa_Active_anon{node="2"} 6.04635136e+08 # HELP node_memory_numa_Active_file Memory information field Active_file. # TYPE node_memory_numa_Active_file gauge node_memory_numa_Active_file{node="0"} 4.879417344e+09 node_memory_numa_Active_file{node="1"} 5.134368768e+09 node_memory_numa_Active_file{node="2"} 5.134368768e+09 # HELP node_memory_numa_AnonHugePages Memory information field AnonHugePages. # TYPE node_memory_numa_AnonHugePages gauge node_memory_numa_AnonHugePages{node="0"} 1.50994944e+08 node_memory_numa_AnonHugePages{node="1"} 9.2274688e+07 node_memory_numa_AnonHugePages{node="2"} 9.2274688e+07 # HELP node_memory_numa_AnonPages Memory information field AnonPages. # TYPE node_memory_numa_AnonPages gauge node_memory_numa_AnonPages{node="0"} 8.07112704e+08 node_memory_numa_AnonPages{node="1"} 6.88058368e+08 node_memory_numa_AnonPages{node="2"} 6.88058368e+08 # HELP node_memory_numa_Bounce Memory information field Bounce. # TYPE node_memory_numa_Bounce gauge node_memory_numa_Bounce{node="0"} 0 node_memory_numa_Bounce{node="1"} 0 node_memory_numa_Bounce{node="2"} 0 # HELP node_memory_numa_Dirty Memory information field Dirty. # TYPE node_memory_numa_Dirty gauge node_memory_numa_Dirty{node="0"} 20480 node_memory_numa_Dirty{node="1"} 122880 node_memory_numa_Dirty{node="2"} 122880 # HELP node_memory_numa_FilePages Memory information field FilePages. # TYPE node_memory_numa_FilePages gauge node_memory_numa_FilePages{node="0"} 7.1855017984e+10 node_memory_numa_FilePages{node="1"} 8.5585088512e+10 node_memory_numa_FilePages{node="2"} 8.5585088512e+10 # HELP node_memory_numa_HugePages_Free Memory information field HugePages_Free. # TYPE node_memory_numa_HugePages_Free gauge node_memory_numa_HugePages_Free{node="0"} 0 node_memory_numa_HugePages_Free{node="1"} 0 node_memory_numa_HugePages_Free{node="2"} 0 # HELP node_memory_numa_HugePages_Surp Memory information field HugePages_Surp. # TYPE node_memory_numa_HugePages_Surp gauge node_memory_numa_HugePages_Surp{node="0"} 0 node_memory_numa_HugePages_Surp{node="1"} 0 node_memory_numa_HugePages_Surp{node="2"} 0 # HELP node_memory_numa_HugePages_Total Memory information field HugePages_Total. # TYPE node_memory_numa_HugePages_Total gauge node_memory_numa_HugePages_Total{node="0"} 0 node_memory_numa_HugePages_Total{node="1"} 0 node_memory_numa_HugePages_Total{node="2"} 0 # HELP node_memory_numa_Inactive Memory information field Inactive. # TYPE node_memory_numa_Inactive gauge node_memory_numa_Inactive{node="0"} 6.0569788416e+10 node_memory_numa_Inactive{node="1"} 7.3165406208e+10 node_memory_numa_Inactive{node="2"} 7.3165406208e+10 # HELP node_memory_numa_Inactive_anon Memory information field Inactive_anon. # TYPE node_memory_numa_Inactive_anon gauge node_memory_numa_Inactive_anon{node="0"} 3.48626944e+08 node_memory_numa_Inactive_anon{node="1"} 2.91930112e+08 node_memory_numa_Inactive_anon{node="2"} 2.91930112e+08 # HELP node_memory_numa_Inactive_file Memory information field Inactive_file. # TYPE node_memory_numa_Inactive_file gauge node_memory_numa_Inactive_file{node="0"} 6.0221161472e+10 node_memory_numa_Inactive_file{node="1"} 7.2873476096e+10 node_memory_numa_Inactive_file{node="2"} 7.2873476096e+10 # HELP node_memory_numa_KernelStack Memory information field KernelStack. # TYPE node_memory_numa_KernelStack gauge node_memory_numa_KernelStack{node="0"} 3.4832384e+07 node_memory_numa_KernelStack{node="1"} 3.1850496e+07 node_memory_numa_KernelStack{node="2"} 3.1850496e+07 # HELP node_memory_numa_Mapped Memory information field Mapped. # TYPE node_memory_numa_Mapped gauge node_memory_numa_Mapped{node="0"} 9.1570176e+08 node_memory_numa_Mapped{node="1"} 8.84850688e+08 node_memory_numa_Mapped{node="2"} 8.84850688e+08 # HELP node_memory_numa_MemFree Memory information field MemFree. # TYPE node_memory_numa_MemFree gauge node_memory_numa_MemFree{node="0"} 5.4303100928e+10 node_memory_numa_MemFree{node="1"} 4.0586022912e+10 node_memory_numa_MemFree{node="2"} 4.0586022912e+10 # HELP node_memory_numa_MemTotal Memory information field MemTotal. # TYPE node_memory_numa_MemTotal gauge node_memory_numa_MemTotal{node="0"} 1.3740271616e+11 node_memory_numa_MemTotal{node="1"} 1.37438953472e+11 node_memory_numa_MemTotal{node="2"} 1.37438953472e+11 # HELP node_memory_numa_MemUsed Memory information field MemUsed. # TYPE node_memory_numa_MemUsed gauge node_memory_numa_MemUsed{node="0"} 8.3099615232e+10 node_memory_numa_MemUsed{node="1"} 9.685293056e+10 node_memory_numa_MemUsed{node="2"} 9.685293056e+10 # HELP node_memory_numa_Mlocked Memory information field Mlocked. # TYPE node_memory_numa_Mlocked gauge node_memory_numa_Mlocked{node="0"} 0 node_memory_numa_Mlocked{node="1"} 0 node_memory_numa_Mlocked{node="2"} 0 # HELP node_memory_numa_NFS_Unstable Memory information field NFS_Unstable. # TYPE node_memory_numa_NFS_Unstable gauge node_memory_numa_NFS_Unstable{node="0"} 0 node_memory_numa_NFS_Unstable{node="1"} 0 node_memory_numa_NFS_Unstable{node="2"} 0 # HELP node_memory_numa_PageTables Memory information field PageTables. # TYPE node_memory_numa_PageTables gauge node_memory_numa_PageTables{node="0"} 1.46743296e+08 node_memory_numa_PageTables{node="1"} 1.27254528e+08 node_memory_numa_PageTables{node="2"} 1.27254528e+08 # HELP node_memory_numa_SReclaimable Memory information field SReclaimable. # TYPE node_memory_numa_SReclaimable gauge node_memory_numa_SReclaimable{node="0"} 4.580478976e+09 node_memory_numa_SReclaimable{node="1"} 4.724822016e+09 node_memory_numa_SReclaimable{node="2"} 4.724822016e+09 # HELP node_memory_numa_SUnreclaim Memory information field SUnreclaim. # TYPE node_memory_numa_SUnreclaim gauge node_memory_numa_SUnreclaim{node="0"} 2.23352832e+09 node_memory_numa_SUnreclaim{node="1"} 2.464391168e+09 node_memory_numa_SUnreclaim{node="2"} 2.464391168e+09 # HELP node_memory_numa_Shmem Memory information field Shmem. # TYPE node_memory_numa_Shmem gauge node_memory_numa_Shmem{node="0"} 4.900864e+07 node_memory_numa_Shmem{node="1"} 8.968192e+07 node_memory_numa_Shmem{node="2"} 8.968192e+07 # HELP node_memory_numa_Slab Memory information field Slab. # TYPE node_memory_numa_Slab gauge node_memory_numa_Slab{node="0"} 6.814007296e+09 node_memory_numa_Slab{node="1"} 7.189213184e+09 node_memory_numa_Slab{node="2"} 7.189213184e+09 # HELP node_memory_numa_Unevictable Memory information field Unevictable. # TYPE node_memory_numa_Unevictable gauge node_memory_numa_Unevictable{node="0"} 0 node_memory_numa_Unevictable{node="1"} 0 node_memory_numa_Unevictable{node="2"} 0 # HELP node_memory_numa_Writeback Memory information field Writeback. # TYPE node_memory_numa_Writeback gauge node_memory_numa_Writeback{node="0"} 0 node_memory_numa_Writeback{node="1"} 0 node_memory_numa_Writeback{node="2"} 0 # HELP node_memory_numa_WritebackTmp Memory information field WritebackTmp. # TYPE node_memory_numa_WritebackTmp gauge node_memory_numa_WritebackTmp{node="0"} 0 node_memory_numa_WritebackTmp{node="1"} 0 node_memory_numa_WritebackTmp{node="2"} 0 # HELP node_memory_numa_interleave_hit_total Memory information field interleave_hit_total. # TYPE node_memory_numa_interleave_hit_total counter node_memory_numa_interleave_hit_total{node="0"} 57146 node_memory_numa_interleave_hit_total{node="1"} 57286 node_memory_numa_interleave_hit_total{node="2"} 7286 # HELP node_memory_numa_local_node_total Memory information field local_node_total. # TYPE node_memory_numa_local_node_total counter node_memory_numa_local_node_total{node="0"} 1.93454780853e+11 node_memory_numa_local_node_total{node="1"} 3.2671904655e+11 node_memory_numa_local_node_total{node="2"} 2.671904655e+10 # HELP node_memory_numa_numa_foreign_total Memory information field numa_foreign_total. # TYPE node_memory_numa_numa_foreign_total counter node_memory_numa_numa_foreign_total{node="0"} 5.98586233e+10 node_memory_numa_numa_foreign_total{node="1"} 1.2624528e+07 node_memory_numa_numa_foreign_total{node="2"} 2.624528e+06 # HELP node_memory_numa_numa_hit_total Memory information field numa_hit_total. # TYPE node_memory_numa_numa_hit_total counter node_memory_numa_numa_hit_total{node="0"} 1.93460335812e+11 node_memory_numa_numa_hit_total{node="1"} 3.26720946761e+11 node_memory_numa_numa_hit_total{node="2"} 2.6720946761e+10 # HELP node_memory_numa_numa_miss_total Memory information field numa_miss_total. # TYPE node_memory_numa_numa_miss_total counter node_memory_numa_numa_miss_total{node="0"} 1.2624528e+07 node_memory_numa_numa_miss_total{node="1"} 5.9858626709e+10 node_memory_numa_numa_miss_total{node="2"} 9.858626709e+09 # HELP node_memory_numa_other_node_total Memory information field other_node_total. # TYPE node_memory_numa_other_node_total counter node_memory_numa_other_node_total{node="0"} 1.8179487e+07 node_memory_numa_other_node_total{node="1"} 5.986052692e+10 node_memory_numa_other_node_total{node="2"} 9.86052692e+09 # HELP node_mountstats_nfs_age_seconds_total The age of the NFS mount in seconds. # TYPE node_mountstats_nfs_age_seconds_total counter node_mountstats_nfs_age_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 13968 node_mountstats_nfs_age_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 13968 # HELP node_mountstats_nfs_direct_read_bytes_total Number of bytes read using the read() syscall in O_DIRECT mode. # TYPE node_mountstats_nfs_direct_read_bytes_total counter node_mountstats_nfs_direct_read_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_direct_read_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_direct_write_bytes_total Number of bytes written using the write() syscall in O_DIRECT mode. # TYPE node_mountstats_nfs_direct_write_bytes_total counter node_mountstats_nfs_direct_write_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_direct_write_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_attribute_invalidate_total Number of times cached inode attributes are invalidated. # TYPE node_mountstats_nfs_event_attribute_invalidate_total counter node_mountstats_nfs_event_attribute_invalidate_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_event_attribute_invalidate_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_data_invalidate_total Number of times an inode cache is cleared. # TYPE node_mountstats_nfs_event_data_invalidate_total counter node_mountstats_nfs_event_data_invalidate_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_event_data_invalidate_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_dnode_revalidate_total Number of times cached dentry nodes are re-validated from the server. # TYPE node_mountstats_nfs_event_dnode_revalidate_total counter node_mountstats_nfs_event_dnode_revalidate_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 226 node_mountstats_nfs_event_dnode_revalidate_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 226 # HELP node_mountstats_nfs_event_inode_revalidate_total Number of times cached inode attributes are re-validated from the server. # TYPE node_mountstats_nfs_event_inode_revalidate_total counter node_mountstats_nfs_event_inode_revalidate_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 52 node_mountstats_nfs_event_inode_revalidate_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 52 # HELP node_mountstats_nfs_event_jukebox_delay_total Number of times the NFS server indicated EJUKEBOX; retrieving data from offline storage. # TYPE node_mountstats_nfs_event_jukebox_delay_total counter node_mountstats_nfs_event_jukebox_delay_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_event_jukebox_delay_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_pnfs_read_total Number of NFS v4.1+ pNFS reads. # TYPE node_mountstats_nfs_event_pnfs_read_total counter node_mountstats_nfs_event_pnfs_read_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_event_pnfs_read_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_pnfs_write_total Number of NFS v4.1+ pNFS writes. # TYPE node_mountstats_nfs_event_pnfs_write_total counter node_mountstats_nfs_event_pnfs_write_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_event_pnfs_write_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_short_read_total Number of times the NFS server gave less data than expected while reading. # TYPE node_mountstats_nfs_event_short_read_total counter node_mountstats_nfs_event_short_read_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_event_short_read_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_short_write_total Number of times the NFS server wrote less data than expected while writing. # TYPE node_mountstats_nfs_event_short_write_total counter node_mountstats_nfs_event_short_write_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_event_short_write_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_silly_rename_total Number of times a file was removed while still open by another process. # TYPE node_mountstats_nfs_event_silly_rename_total counter node_mountstats_nfs_event_silly_rename_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_event_silly_rename_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_truncation_total Number of times files have been truncated. # TYPE node_mountstats_nfs_event_truncation_total counter node_mountstats_nfs_event_truncation_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_event_truncation_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_vfs_access_total Number of times permissions have been checked. # TYPE node_mountstats_nfs_event_vfs_access_total counter node_mountstats_nfs_event_vfs_access_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 398 node_mountstats_nfs_event_vfs_access_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 398 # HELP node_mountstats_nfs_event_vfs_file_release_total Number of times files have been closed and released. # TYPE node_mountstats_nfs_event_vfs_file_release_total counter node_mountstats_nfs_event_vfs_file_release_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 77 node_mountstats_nfs_event_vfs_file_release_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 77 # HELP node_mountstats_nfs_event_vfs_flush_total Number of pending writes that have been forcefully flushed to the server. # TYPE node_mountstats_nfs_event_vfs_flush_total counter node_mountstats_nfs_event_vfs_flush_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 77 node_mountstats_nfs_event_vfs_flush_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 77 # HELP node_mountstats_nfs_event_vfs_fsync_total Number of times fsync() has been called on directories and files. # TYPE node_mountstats_nfs_event_vfs_fsync_total counter node_mountstats_nfs_event_vfs_fsync_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_event_vfs_fsync_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_vfs_getdents_total Number of times directory entries have been read with getdents(). # TYPE node_mountstats_nfs_event_vfs_getdents_total counter node_mountstats_nfs_event_vfs_getdents_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_event_vfs_getdents_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_vfs_lock_total Number of times locking has been attempted on a file. # TYPE node_mountstats_nfs_event_vfs_lock_total counter node_mountstats_nfs_event_vfs_lock_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_event_vfs_lock_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_vfs_lookup_total Number of times a directory lookup has occurred. # TYPE node_mountstats_nfs_event_vfs_lookup_total counter node_mountstats_nfs_event_vfs_lookup_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 13 node_mountstats_nfs_event_vfs_lookup_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 13 # HELP node_mountstats_nfs_event_vfs_open_total Number of times cached inode attributes are invalidated. # TYPE node_mountstats_nfs_event_vfs_open_total counter node_mountstats_nfs_event_vfs_open_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 1 node_mountstats_nfs_event_vfs_open_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 1 # HELP node_mountstats_nfs_event_vfs_read_page_total Number of pages read directly via mmap()'d files. # TYPE node_mountstats_nfs_event_vfs_read_page_total counter node_mountstats_nfs_event_vfs_read_page_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_event_vfs_read_page_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_vfs_read_pages_total Number of times a group of pages have been read. # TYPE node_mountstats_nfs_event_vfs_read_pages_total counter node_mountstats_nfs_event_vfs_read_pages_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 331 node_mountstats_nfs_event_vfs_read_pages_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 331 # HELP node_mountstats_nfs_event_vfs_setattr_total Number of times directory entries have been read with getdents(). # TYPE node_mountstats_nfs_event_vfs_setattr_total counter node_mountstats_nfs_event_vfs_setattr_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_event_vfs_setattr_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_vfs_update_page_total Number of updates (and potential writes) to pages. # TYPE node_mountstats_nfs_event_vfs_update_page_total counter node_mountstats_nfs_event_vfs_update_page_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_event_vfs_update_page_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_vfs_write_page_total Number of pages written directly via mmap()'d files. # TYPE node_mountstats_nfs_event_vfs_write_page_total counter node_mountstats_nfs_event_vfs_write_page_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_event_vfs_write_page_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_vfs_write_pages_total Number of times a group of pages have been written. # TYPE node_mountstats_nfs_event_vfs_write_pages_total counter node_mountstats_nfs_event_vfs_write_pages_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 47 node_mountstats_nfs_event_vfs_write_pages_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 47 # HELP node_mountstats_nfs_event_write_extension_total Number of times a file has been grown due to writes beyond its existing end. # TYPE node_mountstats_nfs_event_write_extension_total counter node_mountstats_nfs_event_write_extension_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_event_write_extension_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_operations_major_timeouts_total Number of times a request has had a major timeout for a given operation. # TYPE node_mountstats_nfs_operations_major_timeouts_total counter node_mountstats_nfs_operations_major_timeouts_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="ACCESS",protocol="udp"} 0 node_mountstats_nfs_operations_major_timeouts_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="tcp"} 0 node_mountstats_nfs_operations_major_timeouts_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="udp"} 0 node_mountstats_nfs_operations_major_timeouts_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="tcp"} 0 node_mountstats_nfs_operations_major_timeouts_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="udp"} 0 node_mountstats_nfs_operations_major_timeouts_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="tcp"} 0 node_mountstats_nfs_operations_major_timeouts_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="udp"} 0 # HELP node_mountstats_nfs_operations_queue_time_seconds_total Duration all requests spent queued for transmission for a given operation before they were sent, in seconds. # TYPE node_mountstats_nfs_operations_queue_time_seconds_total counter node_mountstats_nfs_operations_queue_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="ACCESS",protocol="udp"} 9.007044786793922e+12 node_mountstats_nfs_operations_queue_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="tcp"} 0 node_mountstats_nfs_operations_queue_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="udp"} 0 node_mountstats_nfs_operations_queue_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="tcp"} 0.006 node_mountstats_nfs_operations_queue_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="udp"} 0.006 node_mountstats_nfs_operations_queue_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="tcp"} 0 node_mountstats_nfs_operations_queue_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="udp"} 0 # HELP node_mountstats_nfs_operations_received_bytes_total Number of bytes received for a given operation, including RPC headers and payload. # TYPE node_mountstats_nfs_operations_received_bytes_total counter node_mountstats_nfs_operations_received_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="ACCESS",protocol="udp"} 3.62996810236e+11 node_mountstats_nfs_operations_received_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="tcp"} 0 node_mountstats_nfs_operations_received_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="udp"} 0 node_mountstats_nfs_operations_received_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="tcp"} 1.210292152e+09 node_mountstats_nfs_operations_received_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="udp"} 1.210292152e+09 node_mountstats_nfs_operations_received_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="tcp"} 0 node_mountstats_nfs_operations_received_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="udp"} 0 # HELP node_mountstats_nfs_operations_request_time_seconds_total Duration all requests took from when a request was enqueued to when it was completely handled for a given operation, in seconds. # TYPE node_mountstats_nfs_operations_request_time_seconds_total counter node_mountstats_nfs_operations_request_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="ACCESS",protocol="udp"} 1.953587717e+06 node_mountstats_nfs_operations_request_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="tcp"} 0 node_mountstats_nfs_operations_request_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="udp"} 0 node_mountstats_nfs_operations_request_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="tcp"} 79.407 node_mountstats_nfs_operations_request_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="udp"} 79.407 node_mountstats_nfs_operations_request_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="tcp"} 0 node_mountstats_nfs_operations_request_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="udp"} 0 # HELP node_mountstats_nfs_operations_requests_total Number of requests performed for a given operation. # TYPE node_mountstats_nfs_operations_requests_total counter node_mountstats_nfs_operations_requests_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="ACCESS",protocol="udp"} 2.927395007e+09 node_mountstats_nfs_operations_requests_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="tcp"} 0 node_mountstats_nfs_operations_requests_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="udp"} 0 node_mountstats_nfs_operations_requests_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="tcp"} 1298 node_mountstats_nfs_operations_requests_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="udp"} 1298 node_mountstats_nfs_operations_requests_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="tcp"} 0 node_mountstats_nfs_operations_requests_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="udp"} 0 # HELP node_mountstats_nfs_operations_response_time_seconds_total Duration all requests took to get a reply back after a request for a given operation was transmitted, in seconds. # TYPE node_mountstats_nfs_operations_response_time_seconds_total counter node_mountstats_nfs_operations_response_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="ACCESS",protocol="udp"} 1.667369447e+06 node_mountstats_nfs_operations_response_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="tcp"} 0 node_mountstats_nfs_operations_response_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="udp"} 0 node_mountstats_nfs_operations_response_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="tcp"} 79.386 node_mountstats_nfs_operations_response_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="udp"} 79.386 node_mountstats_nfs_operations_response_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="tcp"} 0 node_mountstats_nfs_operations_response_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="udp"} 0 # HELP node_mountstats_nfs_operations_sent_bytes_total Number of bytes sent for a given operation, including RPC headers and payload. # TYPE node_mountstats_nfs_operations_sent_bytes_total counter node_mountstats_nfs_operations_sent_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="ACCESS",protocol="udp"} 5.26931094212e+11 node_mountstats_nfs_operations_sent_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="tcp"} 0 node_mountstats_nfs_operations_sent_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="udp"} 0 node_mountstats_nfs_operations_sent_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="tcp"} 207680 node_mountstats_nfs_operations_sent_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="udp"} 207680 node_mountstats_nfs_operations_sent_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="tcp"} 0 node_mountstats_nfs_operations_sent_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="udp"} 0 # HELP node_mountstats_nfs_operations_transmissions_total Number of times an actual RPC request has been transmitted for a given operation. # TYPE node_mountstats_nfs_operations_transmissions_total counter node_mountstats_nfs_operations_transmissions_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="ACCESS",protocol="udp"} 2.927394995e+09 node_mountstats_nfs_operations_transmissions_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="tcp"} 0 node_mountstats_nfs_operations_transmissions_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="udp"} 0 node_mountstats_nfs_operations_transmissions_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="tcp"} 1298 node_mountstats_nfs_operations_transmissions_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="udp"} 1298 node_mountstats_nfs_operations_transmissions_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="tcp"} 0 node_mountstats_nfs_operations_transmissions_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="udp"} 0 # HELP node_mountstats_nfs_read_bytes_total Number of bytes read using the read() syscall. # TYPE node_mountstats_nfs_read_bytes_total counter node_mountstats_nfs_read_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 1.20764023e+09 node_mountstats_nfs_read_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 1.20764023e+09 # HELP node_mountstats_nfs_read_pages_total Number of pages read directly via mmap()'d files. # TYPE node_mountstats_nfs_read_pages_total counter node_mountstats_nfs_read_pages_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 295483 node_mountstats_nfs_read_pages_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 295483 # HELP node_mountstats_nfs_total_read_bytes_total Number of bytes read from the NFS server, in total. # TYPE node_mountstats_nfs_total_read_bytes_total counter node_mountstats_nfs_total_read_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 1.210214218e+09 node_mountstats_nfs_total_read_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 1.210214218e+09 # HELP node_mountstats_nfs_total_write_bytes_total Number of bytes written to the NFS server, in total. # TYPE node_mountstats_nfs_total_write_bytes_total counter node_mountstats_nfs_total_write_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_total_write_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_transport_backlog_queue_total Total number of items added to the RPC backlog queue. # TYPE node_mountstats_nfs_transport_backlog_queue_total counter node_mountstats_nfs_transport_backlog_queue_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_transport_backlog_queue_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_transport_bad_transaction_ids_total Number of times the NFS server sent a response with a transaction ID unknown to this client. # TYPE node_mountstats_nfs_transport_bad_transaction_ids_total counter node_mountstats_nfs_transport_bad_transaction_ids_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_transport_bad_transaction_ids_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_transport_bind_total Number of times the client has had to establish a connection from scratch to the NFS server. # TYPE node_mountstats_nfs_transport_bind_total counter node_mountstats_nfs_transport_bind_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_transport_bind_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_transport_connect_total Number of times the client has made a TCP connection to the NFS server. # TYPE node_mountstats_nfs_transport_connect_total counter node_mountstats_nfs_transport_connect_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 1 node_mountstats_nfs_transport_connect_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_transport_idle_time_seconds Duration since the NFS mount last saw any RPC traffic, in seconds. # TYPE node_mountstats_nfs_transport_idle_time_seconds gauge node_mountstats_nfs_transport_idle_time_seconds{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 11 node_mountstats_nfs_transport_idle_time_seconds{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_transport_maximum_rpc_slots Maximum number of simultaneously active RPC requests ever used. # TYPE node_mountstats_nfs_transport_maximum_rpc_slots gauge node_mountstats_nfs_transport_maximum_rpc_slots{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 24 node_mountstats_nfs_transport_maximum_rpc_slots{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 24 # HELP node_mountstats_nfs_transport_pending_queue_total Total number of items added to the RPC transmission pending queue. # TYPE node_mountstats_nfs_transport_pending_queue_total counter node_mountstats_nfs_transport_pending_queue_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 5726 node_mountstats_nfs_transport_pending_queue_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 5726 # HELP node_mountstats_nfs_transport_receives_total Number of RPC responses for this mount received from the NFS server. # TYPE node_mountstats_nfs_transport_receives_total counter node_mountstats_nfs_transport_receives_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 6428 node_mountstats_nfs_transport_receives_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 6428 # HELP node_mountstats_nfs_transport_sending_queue_total Total number of items added to the RPC transmission sending queue. # TYPE node_mountstats_nfs_transport_sending_queue_total counter node_mountstats_nfs_transport_sending_queue_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 26 node_mountstats_nfs_transport_sending_queue_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 26 # HELP node_mountstats_nfs_transport_sends_total Number of RPC requests for this mount sent to the NFS server. # TYPE node_mountstats_nfs_transport_sends_total counter node_mountstats_nfs_transport_sends_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 6428 node_mountstats_nfs_transport_sends_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 6428 # HELP node_mountstats_nfs_write_bytes_total Number of bytes written using the write() syscall. # TYPE node_mountstats_nfs_write_bytes_total counter node_mountstats_nfs_write_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_write_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_write_pages_total Number of pages written directly via mmap()'d files. # TYPE node_mountstats_nfs_write_pages_total counter node_mountstats_nfs_write_pages_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_write_pages_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_netstat_Icmp6_InErrors Statistic Icmp6InErrors. # TYPE node_netstat_Icmp6_InErrors untyped node_netstat_Icmp6_InErrors 0 # HELP node_netstat_Icmp6_InMsgs Statistic Icmp6InMsgs. # TYPE node_netstat_Icmp6_InMsgs untyped node_netstat_Icmp6_InMsgs 0 # HELP node_netstat_Icmp6_OutMsgs Statistic Icmp6OutMsgs. # TYPE node_netstat_Icmp6_OutMsgs untyped node_netstat_Icmp6_OutMsgs 8 # HELP node_netstat_Icmp_InErrors Statistic IcmpInErrors. # TYPE node_netstat_Icmp_InErrors untyped node_netstat_Icmp_InErrors 0 # HELP node_netstat_Icmp_InMsgs Statistic IcmpInMsgs. # TYPE node_netstat_Icmp_InMsgs untyped node_netstat_Icmp_InMsgs 104 # HELP node_netstat_Icmp_OutMsgs Statistic IcmpOutMsgs. # TYPE node_netstat_Icmp_OutMsgs untyped node_netstat_Icmp_OutMsgs 120 # HELP node_netstat_Ip6_InOctets Statistic Ip6InOctets. # TYPE node_netstat_Ip6_InOctets untyped node_netstat_Ip6_InOctets 460 # HELP node_netstat_Ip6_OutOctets Statistic Ip6OutOctets. # TYPE node_netstat_Ip6_OutOctets untyped node_netstat_Ip6_OutOctets 536 # HELP node_netstat_IpExt_InOctets Statistic IpExtInOctets. # TYPE node_netstat_IpExt_InOctets untyped node_netstat_IpExt_InOctets 6.28639697e+09 # HELP node_netstat_IpExt_OutOctets Statistic IpExtOutOctets. # TYPE node_netstat_IpExt_OutOctets untyped node_netstat_IpExt_OutOctets 2.786264347e+09 # HELP node_netstat_Ip_Forwarding Statistic IpForwarding. # TYPE node_netstat_Ip_Forwarding untyped node_netstat_Ip_Forwarding 1 # HELP node_netstat_TcpExt_ListenDrops Statistic TcpExtListenDrops. # TYPE node_netstat_TcpExt_ListenDrops untyped node_netstat_TcpExt_ListenDrops 0 # HELP node_netstat_TcpExt_ListenOverflows Statistic TcpExtListenOverflows. # TYPE node_netstat_TcpExt_ListenOverflows untyped node_netstat_TcpExt_ListenOverflows 0 # HELP node_netstat_TcpExt_SyncookiesFailed Statistic TcpExtSyncookiesFailed. # TYPE node_netstat_TcpExt_SyncookiesFailed untyped node_netstat_TcpExt_SyncookiesFailed 2 # HELP node_netstat_TcpExt_SyncookiesRecv Statistic TcpExtSyncookiesRecv. # TYPE node_netstat_TcpExt_SyncookiesRecv untyped node_netstat_TcpExt_SyncookiesRecv 0 # HELP node_netstat_TcpExt_SyncookiesSent Statistic TcpExtSyncookiesSent. # TYPE node_netstat_TcpExt_SyncookiesSent untyped node_netstat_TcpExt_SyncookiesSent 0 # HELP node_netstat_TcpExt_TCPTimeouts Statistic TcpExtTCPTimeouts. # TYPE node_netstat_TcpExt_TCPTimeouts untyped node_netstat_TcpExt_TCPTimeouts 115 # HELP node_netstat_Tcp_ActiveOpens Statistic TcpActiveOpens. # TYPE node_netstat_Tcp_ActiveOpens untyped node_netstat_Tcp_ActiveOpens 3556 # HELP node_netstat_Tcp_CurrEstab Statistic TcpCurrEstab. # TYPE node_netstat_Tcp_CurrEstab untyped node_netstat_Tcp_CurrEstab 0 # HELP node_netstat_Tcp_InErrs Statistic TcpInErrs. # TYPE node_netstat_Tcp_InErrs untyped node_netstat_Tcp_InErrs 5 # HELP node_netstat_Tcp_InSegs Statistic TcpInSegs. # TYPE node_netstat_Tcp_InSegs untyped node_netstat_Tcp_InSegs 5.7252008e+07 # HELP node_netstat_Tcp_OutRsts Statistic TcpOutRsts. # TYPE node_netstat_Tcp_OutRsts untyped node_netstat_Tcp_OutRsts 1003 # HELP node_netstat_Tcp_OutSegs Statistic TcpOutSegs. # TYPE node_netstat_Tcp_OutSegs untyped node_netstat_Tcp_OutSegs 5.4915039e+07 # HELP node_netstat_Tcp_PassiveOpens Statistic TcpPassiveOpens. # TYPE node_netstat_Tcp_PassiveOpens untyped node_netstat_Tcp_PassiveOpens 230 # HELP node_netstat_Tcp_RetransSegs Statistic TcpRetransSegs. # TYPE node_netstat_Tcp_RetransSegs untyped node_netstat_Tcp_RetransSegs 227 # HELP node_netstat_Udp6_InDatagrams Statistic Udp6InDatagrams. # TYPE node_netstat_Udp6_InDatagrams untyped node_netstat_Udp6_InDatagrams 0 # HELP node_netstat_Udp6_InErrors Statistic Udp6InErrors. # TYPE node_netstat_Udp6_InErrors untyped node_netstat_Udp6_InErrors 0 # HELP node_netstat_Udp6_NoPorts Statistic Udp6NoPorts. # TYPE node_netstat_Udp6_NoPorts untyped node_netstat_Udp6_NoPorts 0 # HELP node_netstat_Udp6_OutDatagrams Statistic Udp6OutDatagrams. # TYPE node_netstat_Udp6_OutDatagrams untyped node_netstat_Udp6_OutDatagrams 0 # HELP node_netstat_Udp6_RcvbufErrors Statistic Udp6RcvbufErrors. # TYPE node_netstat_Udp6_RcvbufErrors untyped node_netstat_Udp6_RcvbufErrors 9 # HELP node_netstat_Udp6_SndbufErrors Statistic Udp6SndbufErrors. # TYPE node_netstat_Udp6_SndbufErrors untyped node_netstat_Udp6_SndbufErrors 8 # HELP node_netstat_UdpLite6_InErrors Statistic UdpLite6InErrors. # TYPE node_netstat_UdpLite6_InErrors untyped node_netstat_UdpLite6_InErrors 0 # HELP node_netstat_UdpLite_InErrors Statistic UdpLiteInErrors. # TYPE node_netstat_UdpLite_InErrors untyped node_netstat_UdpLite_InErrors 0 # HELP node_netstat_Udp_InDatagrams Statistic UdpInDatagrams. # TYPE node_netstat_Udp_InDatagrams untyped node_netstat_Udp_InDatagrams 88542 # HELP node_netstat_Udp_InErrors Statistic UdpInErrors. # TYPE node_netstat_Udp_InErrors untyped node_netstat_Udp_InErrors 0 # HELP node_netstat_Udp_NoPorts Statistic UdpNoPorts. # TYPE node_netstat_Udp_NoPorts untyped node_netstat_Udp_NoPorts 120 # HELP node_netstat_Udp_OutDatagrams Statistic UdpOutDatagrams. # TYPE node_netstat_Udp_OutDatagrams untyped node_netstat_Udp_OutDatagrams 53028 # HELP node_netstat_Udp_RcvbufErrors Statistic UdpRcvbufErrors. # TYPE node_netstat_Udp_RcvbufErrors untyped node_netstat_Udp_RcvbufErrors 9 # HELP node_netstat_Udp_SndbufErrors Statistic UdpSndbufErrors. # TYPE node_netstat_Udp_SndbufErrors untyped node_netstat_Udp_SndbufErrors 8 # HELP node_network_address_assign_type Network device property: address_assign_type # TYPE node_network_address_assign_type gauge node_network_address_assign_type{device="bond0"} 3 node_network_address_assign_type{device="eth0"} 3 # HELP node_network_carrier Network device property: carrier # TYPE node_network_carrier gauge node_network_carrier{device="bond0"} 1 node_network_carrier{device="eth0"} 1 # HELP node_network_carrier_changes_total Network device property: carrier_changes_total # TYPE node_network_carrier_changes_total counter node_network_carrier_changes_total{device="bond0"} 2 node_network_carrier_changes_total{device="eth0"} 2 # HELP node_network_carrier_down_changes_total Network device property: carrier_down_changes_total # TYPE node_network_carrier_down_changes_total counter node_network_carrier_down_changes_total{device="bond0"} 1 node_network_carrier_down_changes_total{device="eth0"} 1 # HELP node_network_carrier_up_changes_total Network device property: carrier_up_changes_total # TYPE node_network_carrier_up_changes_total counter node_network_carrier_up_changes_total{device="bond0"} 1 node_network_carrier_up_changes_total{device="eth0"} 1 # HELP node_network_device_id Network device property: device_id # TYPE node_network_device_id gauge node_network_device_id{device="bond0"} 32 node_network_device_id{device="eth0"} 32 # HELP node_network_dormant Network device property: dormant # TYPE node_network_dormant gauge node_network_dormant{device="bond0"} 1 node_network_dormant{device="eth0"} 1 # HELP node_network_flags Network device property: flags # TYPE node_network_flags gauge node_network_flags{device="bond0"} 4867 node_network_flags{device="eth0"} 4867 # HELP node_network_iface_id Network device property: iface_id # TYPE node_network_iface_id gauge node_network_iface_id{device="bond0"} 2 node_network_iface_id{device="eth0"} 2 # HELP node_network_iface_link Network device property: iface_link # TYPE node_network_iface_link gauge node_network_iface_link{device="bond0"} 2 node_network_iface_link{device="eth0"} 2 # HELP node_network_iface_link_mode Network device property: iface_link_mode # TYPE node_network_iface_link_mode gauge node_network_iface_link_mode{device="bond0"} 1 node_network_iface_link_mode{device="eth0"} 1 # HELP node_network_info Non-numeric data from /sys/class/net/, value is always 1. # TYPE node_network_info gauge node_network_info{address="01:01:01:01:01:01",adminstate="up",broadcast="ff:ff:ff:ff:ff:ff",device="bond0",duplex="full",ifalias="",operstate="up"} 1 node_network_info{address="01:01:01:01:01:01",adminstate="up",broadcast="ff:ff:ff:ff:ff:ff",device="eth0",duplex="full",ifalias="",operstate="up"} 1 # HELP node_network_mtu_bytes Network device property: mtu_bytes # TYPE node_network_mtu_bytes gauge node_network_mtu_bytes{device="bond0"} 1500 node_network_mtu_bytes{device="eth0"} 1500 # HELP node_network_name_assign_type Network device property: name_assign_type # TYPE node_network_name_assign_type gauge node_network_name_assign_type{device="bond0"} 2 node_network_name_assign_type{device="eth0"} 2 # HELP node_network_net_dev_group Network device property: net_dev_group # TYPE node_network_net_dev_group gauge node_network_net_dev_group{device="bond0"} 0 node_network_net_dev_group{device="eth0"} 0 # HELP node_network_protocol_type Network device property: protocol_type # TYPE node_network_protocol_type gauge node_network_protocol_type{device="bond0"} 1 node_network_protocol_type{device="eth0"} 1 # HELP node_network_receive_bytes_total Network device statistic receive_bytes. # TYPE node_network_receive_bytes_total counter # HELP node_network_receive_compressed_total Network device statistic receive_compressed. # TYPE node_network_receive_compressed_total counter node_network_receive_compressed_total{device="lo"} 0 # HELP node_network_receive_drop_total Network device statistic receive_drop. # TYPE node_network_receive_drop_total counter node_network_receive_drop_total{device="lo"} 0 # HELP node_network_receive_errs_total Network device statistic receive_errs. # TYPE node_network_receive_errs_total counter node_network_receive_errs_total{device="lo"} 0 # HELP node_network_receive_fifo_total Network device statistic receive_fifo. # TYPE node_network_receive_fifo_total counter node_network_receive_fifo_total{device="lo"} 0 # HELP node_network_receive_frame_total Network device statistic receive_frame. # TYPE node_network_receive_frame_total counter node_network_receive_frame_total{device="lo"} 0 # HELP node_network_receive_multicast_total Network device statistic receive_multicast. # TYPE node_network_receive_multicast_total counter node_network_receive_multicast_total{device="lo"} 0 # HELP node_network_receive_nohandler_total Network device statistic receive_nohandler. # TYPE node_network_receive_nohandler_total counter node_network_receive_nohandler_total{device="lo"} 0 # HELP node_network_receive_packets_total Network device statistic receive_packets. # TYPE node_network_receive_packets_total counter # HELP node_network_speed_bytes Network device property: speed_bytes # TYPE node_network_speed_bytes gauge node_network_speed_bytes{device="eth0"} 1.25e+08 # HELP node_network_transmit_bytes_total Network device statistic transmit_bytes. # TYPE node_network_transmit_bytes_total counter # HELP node_network_transmit_carrier_total Network device statistic transmit_carrier. # TYPE node_network_transmit_carrier_total counter node_network_transmit_carrier_total{device="lo"} 0 # HELP node_network_transmit_colls_total Network device statistic transmit_colls. # TYPE node_network_transmit_colls_total counter node_network_transmit_colls_total{device="lo"} 0 # HELP node_network_transmit_compressed_total Network device statistic transmit_compressed. # TYPE node_network_transmit_compressed_total counter node_network_transmit_compressed_total{device="lo"} 0 # HELP node_network_transmit_drop_total Network device statistic transmit_drop. # TYPE node_network_transmit_drop_total counter node_network_transmit_drop_total{device="lo"} 0 # HELP node_network_transmit_errs_total Network device statistic transmit_errs. # TYPE node_network_transmit_errs_total counter node_network_transmit_errs_total{device="lo"} 0 # HELP node_network_transmit_fifo_total Network device statistic transmit_fifo. # TYPE node_network_transmit_fifo_total counter node_network_transmit_fifo_total{device="lo"} 0 # HELP node_network_transmit_packets_total Network device statistic transmit_packets. # TYPE node_network_transmit_packets_total counter # HELP node_network_transmit_queue_length Network device property: transmit_queue_length # TYPE node_network_transmit_queue_length gauge node_network_transmit_queue_length{device="bond0"} 1000 node_network_transmit_queue_length{device="eth0"} 1000 # HELP node_network_up Value is 1 if operstate is 'up', 0 otherwise. # TYPE node_network_up gauge node_network_up{device="bond0"} 1 node_network_up{device="eth0"} 1 # HELP node_nf_conntrack_entries Number of currently allocated flow entries for connection tracking. # TYPE node_nf_conntrack_entries gauge node_nf_conntrack_entries 123 # HELP node_nf_conntrack_entries_limit Maximum size of connection tracking table. # TYPE node_nf_conntrack_entries_limit gauge node_nf_conntrack_entries_limit 65536 # HELP node_nf_conntrack_stat_drop Number of packets dropped due to conntrack failure. # TYPE node_nf_conntrack_stat_drop gauge node_nf_conntrack_stat_drop 0 # HELP node_nf_conntrack_stat_early_drop Number of dropped conntrack entries to make room for new ones, if maximum table size was reached. # TYPE node_nf_conntrack_stat_early_drop gauge node_nf_conntrack_stat_early_drop 0 # HELP node_nf_conntrack_stat_found Number of searched entries which were successful. # TYPE node_nf_conntrack_stat_found gauge node_nf_conntrack_stat_found 0 # HELP node_nf_conntrack_stat_ignore Number of packets seen which are already connected to a conntrack entry. # TYPE node_nf_conntrack_stat_ignore gauge node_nf_conntrack_stat_ignore 89738 # HELP node_nf_conntrack_stat_insert Number of entries inserted into the list. # TYPE node_nf_conntrack_stat_insert gauge node_nf_conntrack_stat_insert 0 # HELP node_nf_conntrack_stat_insert_failed Number of entries for which list insertion was attempted but failed. # TYPE node_nf_conntrack_stat_insert_failed gauge node_nf_conntrack_stat_insert_failed 0 # HELP node_nf_conntrack_stat_invalid Number of packets seen which can not be tracked. # TYPE node_nf_conntrack_stat_invalid gauge node_nf_conntrack_stat_invalid 53 # HELP node_nf_conntrack_stat_search_restart Number of conntrack table lookups which had to be restarted due to hashtable resizes. # TYPE node_nf_conntrack_stat_search_restart gauge node_nf_conntrack_stat_search_restart 7 # HELP node_nfs_connections_total Total number of NFSd TCP connections. # TYPE node_nfs_connections_total counter node_nfs_connections_total 45 # HELP node_nfs_packets_total Total NFSd network packets (sent+received) by protocol type. # TYPE node_nfs_packets_total counter node_nfs_packets_total{protocol="tcp"} 69 node_nfs_packets_total{protocol="udp"} 70 # HELP node_nfs_requests_total Number of NFS procedures invoked. # TYPE node_nfs_requests_total counter node_nfs_requests_total{method="Access",proto="3"} 1.17661341e+08 node_nfs_requests_total{method="Access",proto="4"} 58 node_nfs_requests_total{method="Allocate",proto="4"} 0 node_nfs_requests_total{method="BindConnToSession",proto="4"} 0 node_nfs_requests_total{method="Clone",proto="4"} 0 node_nfs_requests_total{method="Close",proto="4"} 28 node_nfs_requests_total{method="Commit",proto="3"} 23729 node_nfs_requests_total{method="Commit",proto="4"} 83 node_nfs_requests_total{method="Create",proto="2"} 52 node_nfs_requests_total{method="Create",proto="3"} 2.993289e+06 node_nfs_requests_total{method="Create",proto="4"} 15 node_nfs_requests_total{method="CreateSession",proto="4"} 32 node_nfs_requests_total{method="DeAllocate",proto="4"} 0 node_nfs_requests_total{method="DelegReturn",proto="4"} 97 node_nfs_requests_total{method="DestroyClientID",proto="4"} 0 node_nfs_requests_total{method="DestroySession",proto="4"} 67 node_nfs_requests_total{method="ExchangeID",proto="4"} 58 node_nfs_requests_total{method="FreeStateID",proto="4"} 0 node_nfs_requests_total{method="FsInfo",proto="3"} 2 node_nfs_requests_total{method="FsInfo",proto="4"} 68 node_nfs_requests_total{method="FsLocations",proto="4"} 32 node_nfs_requests_total{method="FsStat",proto="2"} 82 node_nfs_requests_total{method="FsStat",proto="3"} 13332 node_nfs_requests_total{method="FsidPresent",proto="4"} 11 node_nfs_requests_total{method="GetACL",proto="4"} 36 node_nfs_requests_total{method="GetAttr",proto="2"} 57 node_nfs_requests_total{method="GetAttr",proto="3"} 1.061909262e+09 node_nfs_requests_total{method="GetDeviceInfo",proto="4"} 1 node_nfs_requests_total{method="GetDeviceList",proto="4"} 0 node_nfs_requests_total{method="GetLeaseTime",proto="4"} 28 node_nfs_requests_total{method="Getattr",proto="4"} 88 node_nfs_requests_total{method="LayoutCommit",proto="4"} 26 node_nfs_requests_total{method="LayoutGet",proto="4"} 90 node_nfs_requests_total{method="LayoutReturn",proto="4"} 0 node_nfs_requests_total{method="LayoutStats",proto="4"} 0 node_nfs_requests_total{method="Link",proto="2"} 17 node_nfs_requests_total{method="Link",proto="3"} 0 node_nfs_requests_total{method="Link",proto="4"} 21 node_nfs_requests_total{method="Lock",proto="4"} 39 node_nfs_requests_total{method="Lockt",proto="4"} 68 node_nfs_requests_total{method="Locku",proto="4"} 59 node_nfs_requests_total{method="Lookup",proto="2"} 71 node_nfs_requests_total{method="Lookup",proto="3"} 4.077635e+06 node_nfs_requests_total{method="Lookup",proto="4"} 29 node_nfs_requests_total{method="LookupRoot",proto="4"} 74 node_nfs_requests_total{method="MkDir",proto="2"} 50 node_nfs_requests_total{method="MkDir",proto="3"} 590 node_nfs_requests_total{method="MkNod",proto="3"} 0 node_nfs_requests_total{method="Null",proto="2"} 16 node_nfs_requests_total{method="Null",proto="3"} 0 node_nfs_requests_total{method="Null",proto="4"} 98 node_nfs_requests_total{method="Open",proto="4"} 85 node_nfs_requests_total{method="OpenConfirm",proto="4"} 23 node_nfs_requests_total{method="OpenDowngrade",proto="4"} 1 node_nfs_requests_total{method="OpenNoattr",proto="4"} 24 node_nfs_requests_total{method="PathConf",proto="3"} 1 node_nfs_requests_total{method="Pathconf",proto="4"} 53 node_nfs_requests_total{method="Read",proto="2"} 45 node_nfs_requests_total{method="Read",proto="3"} 2.9391916e+07 node_nfs_requests_total{method="Read",proto="4"} 51 node_nfs_requests_total{method="ReadDir",proto="2"} 70 node_nfs_requests_total{method="ReadDir",proto="3"} 3983 node_nfs_requests_total{method="ReadDir",proto="4"} 66 node_nfs_requests_total{method="ReadDirPlus",proto="3"} 92385 node_nfs_requests_total{method="ReadLink",proto="2"} 73 node_nfs_requests_total{method="ReadLink",proto="3"} 5 node_nfs_requests_total{method="ReadLink",proto="4"} 54 node_nfs_requests_total{method="ReclaimComplete",proto="4"} 35 node_nfs_requests_total{method="ReleaseLockowner",proto="4"} 85 node_nfs_requests_total{method="Remove",proto="2"} 83 node_nfs_requests_total{method="Remove",proto="3"} 7815 node_nfs_requests_total{method="Remove",proto="4"} 69 node_nfs_requests_total{method="Rename",proto="2"} 61 node_nfs_requests_total{method="Rename",proto="3"} 1130 node_nfs_requests_total{method="Rename",proto="4"} 96 node_nfs_requests_total{method="Renew",proto="4"} 83 node_nfs_requests_total{method="RmDir",proto="2"} 23 node_nfs_requests_total{method="RmDir",proto="3"} 15 node_nfs_requests_total{method="Root",proto="2"} 52 node_nfs_requests_total{method="Secinfo",proto="4"} 81 node_nfs_requests_total{method="SecinfoNoName",proto="4"} 0 node_nfs_requests_total{method="Seek",proto="4"} 0 node_nfs_requests_total{method="Sequence",proto="4"} 13 node_nfs_requests_total{method="ServerCaps",proto="4"} 56 node_nfs_requests_total{method="SetACL",proto="4"} 49 node_nfs_requests_total{method="SetAttr",proto="2"} 74 node_nfs_requests_total{method="SetAttr",proto="3"} 48906 node_nfs_requests_total{method="SetClientID",proto="4"} 12 node_nfs_requests_total{method="SetClientIDConfirm",proto="4"} 84 node_nfs_requests_total{method="Setattr",proto="4"} 73 node_nfs_requests_total{method="StatFs",proto="4"} 86 node_nfs_requests_total{method="SymLink",proto="2"} 53 node_nfs_requests_total{method="SymLink",proto="3"} 0 node_nfs_requests_total{method="Symlink",proto="4"} 84 node_nfs_requests_total{method="TestStateID",proto="4"} 0 node_nfs_requests_total{method="WrCache",proto="2"} 86 node_nfs_requests_total{method="Write",proto="2"} 0 node_nfs_requests_total{method="Write",proto="3"} 2.570425e+06 node_nfs_requests_total{method="Write",proto="4"} 54 # HELP node_nfs_rpc_authentication_refreshes_total Number of RPC authentication refreshes performed. # TYPE node_nfs_rpc_authentication_refreshes_total counter node_nfs_rpc_authentication_refreshes_total 1.218815394e+09 # HELP node_nfs_rpc_retransmissions_total Number of RPC transmissions performed. # TYPE node_nfs_rpc_retransmissions_total counter node_nfs_rpc_retransmissions_total 374636 # HELP node_nfs_rpcs_total Total number of RPCs performed. # TYPE node_nfs_rpcs_total counter node_nfs_rpcs_total 1.218785755e+09 # HELP node_nfsd_connections_total Total number of NFSd TCP connections. # TYPE node_nfsd_connections_total counter node_nfsd_connections_total 1 # HELP node_nfsd_disk_bytes_read_total Total NFSd bytes read. # TYPE node_nfsd_disk_bytes_read_total counter node_nfsd_disk_bytes_read_total 1.572864e+08 # HELP node_nfsd_disk_bytes_written_total Total NFSd bytes written. # TYPE node_nfsd_disk_bytes_written_total counter node_nfsd_disk_bytes_written_total 72864 # HELP node_nfsd_file_handles_stale_total Total number of NFSd stale file handles # TYPE node_nfsd_file_handles_stale_total counter node_nfsd_file_handles_stale_total 0 # HELP node_nfsd_packets_total Total NFSd network packets (sent+received) by protocol type. # TYPE node_nfsd_packets_total counter node_nfsd_packets_total{proto="tcp"} 917 node_nfsd_packets_total{proto="udp"} 55 # HELP node_nfsd_read_ahead_cache_not_found_total Total number of NFSd read ahead cache not found. # TYPE node_nfsd_read_ahead_cache_not_found_total counter node_nfsd_read_ahead_cache_not_found_total 0 # HELP node_nfsd_read_ahead_cache_size_blocks How large the read ahead cache is in blocks. # TYPE node_nfsd_read_ahead_cache_size_blocks gauge node_nfsd_read_ahead_cache_size_blocks 32 # HELP node_nfsd_reply_cache_hits_total Total number of NFSd Reply Cache hits (client lost server response). # TYPE node_nfsd_reply_cache_hits_total counter node_nfsd_reply_cache_hits_total 0 # HELP node_nfsd_reply_cache_misses_total Total number of NFSd Reply Cache an operation that requires caching (idempotent). # TYPE node_nfsd_reply_cache_misses_total counter node_nfsd_reply_cache_misses_total 6 # HELP node_nfsd_reply_cache_nocache_total Total number of NFSd Reply Cache non-idempotent operations (rename/delete/…). # TYPE node_nfsd_reply_cache_nocache_total counter node_nfsd_reply_cache_nocache_total 18622 # HELP node_nfsd_requests_total Total number NFSd Requests by method and protocol. # TYPE node_nfsd_requests_total counter node_nfsd_requests_total{method="Access",proto="3"} 111 node_nfsd_requests_total{method="Access",proto="4"} 1098 node_nfsd_requests_total{method="Close",proto="4"} 2 node_nfsd_requests_total{method="Commit",proto="3"} 0 node_nfsd_requests_total{method="Commit",proto="4"} 0 node_nfsd_requests_total{method="Create",proto="2"} 0 node_nfsd_requests_total{method="Create",proto="3"} 0 node_nfsd_requests_total{method="Create",proto="4"} 0 node_nfsd_requests_total{method="DelegPurge",proto="4"} 0 node_nfsd_requests_total{method="DelegReturn",proto="4"} 0 node_nfsd_requests_total{method="FsInfo",proto="3"} 2 node_nfsd_requests_total{method="FsStat",proto="2"} 2 node_nfsd_requests_total{method="FsStat",proto="3"} 0 node_nfsd_requests_total{method="GetAttr",proto="2"} 69 node_nfsd_requests_total{method="GetAttr",proto="3"} 112 node_nfsd_requests_total{method="GetAttr",proto="4"} 8179 node_nfsd_requests_total{method="GetFH",proto="4"} 5896 node_nfsd_requests_total{method="Link",proto="2"} 0 node_nfsd_requests_total{method="Link",proto="3"} 0 node_nfsd_requests_total{method="Link",proto="4"} 0 node_nfsd_requests_total{method="Lock",proto="4"} 0 node_nfsd_requests_total{method="Lockt",proto="4"} 0 node_nfsd_requests_total{method="Locku",proto="4"} 0 node_nfsd_requests_total{method="Lookup",proto="2"} 4410 node_nfsd_requests_total{method="Lookup",proto="3"} 2719 node_nfsd_requests_total{method="Lookup",proto="4"} 5900 node_nfsd_requests_total{method="LookupRoot",proto="4"} 0 node_nfsd_requests_total{method="MkDir",proto="2"} 0 node_nfsd_requests_total{method="MkDir",proto="3"} 0 node_nfsd_requests_total{method="MkNod",proto="3"} 0 node_nfsd_requests_total{method="Nverify",proto="4"} 0 node_nfsd_requests_total{method="Open",proto="4"} 2 node_nfsd_requests_total{method="OpenAttr",proto="4"} 0 node_nfsd_requests_total{method="OpenConfirm",proto="4"} 2 node_nfsd_requests_total{method="OpenDgrd",proto="4"} 0 node_nfsd_requests_total{method="PathConf",proto="3"} 1 node_nfsd_requests_total{method="PutFH",proto="4"} 9609 node_nfsd_requests_total{method="Read",proto="2"} 0 node_nfsd_requests_total{method="Read",proto="3"} 0 node_nfsd_requests_total{method="Read",proto="4"} 150 node_nfsd_requests_total{method="ReadDir",proto="2"} 99 node_nfsd_requests_total{method="ReadDir",proto="3"} 27 node_nfsd_requests_total{method="ReadDir",proto="4"} 1272 node_nfsd_requests_total{method="ReadDirPlus",proto="3"} 216 node_nfsd_requests_total{method="ReadLink",proto="2"} 0 node_nfsd_requests_total{method="ReadLink",proto="3"} 0 node_nfsd_requests_total{method="ReadLink",proto="4"} 0 node_nfsd_requests_total{method="RelLockOwner",proto="4"} 0 node_nfsd_requests_total{method="Remove",proto="2"} 0 node_nfsd_requests_total{method="Remove",proto="3"} 0 node_nfsd_requests_total{method="Remove",proto="4"} 0 node_nfsd_requests_total{method="Rename",proto="2"} 0 node_nfsd_requests_total{method="Rename",proto="3"} 0 node_nfsd_requests_total{method="Rename",proto="4"} 0 node_nfsd_requests_total{method="Renew",proto="4"} 1236 node_nfsd_requests_total{method="RestoreFH",proto="4"} 0 node_nfsd_requests_total{method="RmDir",proto="2"} 0 node_nfsd_requests_total{method="RmDir",proto="3"} 0 node_nfsd_requests_total{method="Root",proto="2"} 0 node_nfsd_requests_total{method="SaveFH",proto="4"} 0 node_nfsd_requests_total{method="SecInfo",proto="4"} 0 node_nfsd_requests_total{method="SetAttr",proto="2"} 0 node_nfsd_requests_total{method="SetAttr",proto="3"} 0 node_nfsd_requests_total{method="SetAttr",proto="4"} 0 node_nfsd_requests_total{method="SetClientID",proto="4"} 3 node_nfsd_requests_total{method="SetClientIDConfirm",proto="4"} 3 node_nfsd_requests_total{method="SymLink",proto="2"} 0 node_nfsd_requests_total{method="SymLink",proto="3"} 0 node_nfsd_requests_total{method="Verify",proto="4"} 0 node_nfsd_requests_total{method="WrCache",proto="2"} 0 node_nfsd_requests_total{method="Write",proto="2"} 0 node_nfsd_requests_total{method="Write",proto="3"} 0 node_nfsd_requests_total{method="Write",proto="4"} 0 # HELP node_nfsd_rpc_errors_total Total number of NFSd RPC errors by error type. # TYPE node_nfsd_rpc_errors_total counter node_nfsd_rpc_errors_total{error="auth"} 2 node_nfsd_rpc_errors_total{error="cInt"} 0 node_nfsd_rpc_errors_total{error="fmt"} 1 # HELP node_nfsd_server_rpcs_total Total number of NFSd RPCs. # TYPE node_nfsd_server_rpcs_total counter node_nfsd_server_rpcs_total 18628 # HELP node_nfsd_server_threads Total number of NFSd kernel threads that are running. # TYPE node_nfsd_server_threads gauge node_nfsd_server_threads 8 # HELP node_nvme_info Non-numeric data from /sys/class/nvme/, value is always 1. # TYPE node_nvme_info gauge node_nvme_info{device="nvme0",firmware_revision="1B2QEXP7",model="Samsung SSD 970 PRO 512GB",serial="S680HF8N190894I",state="live"} 1 # HELP node_os_info A metric with a constant '1' value labeled by build_id, id, id_like, image_id, image_version, name, pretty_name, variant, variant_id, version, version_codename, version_id. # TYPE node_os_info gauge node_os_info{build_id="",id="ubuntu",id_like="debian",image_id="",image_version="",name="Ubuntu",pretty_name="Ubuntu 20.04.2 LTS",variant="",variant_id="",version="20.04.2 LTS (Focal Fossa)",version_codename="focal",version_id="20.04"} 1 # HELP node_os_version Metric containing the major.minor part of the OS version. # TYPE node_os_version gauge node_os_version{id="ubuntu",id_like="debian",name="Ubuntu"} 20.04 # HELP node_power_supply_capacity capacity value of /sys/class/power_supply/. # TYPE node_power_supply_capacity gauge node_power_supply_capacity{power_supply="BAT0"} 81 # HELP node_power_supply_cyclecount cyclecount value of /sys/class/power_supply/. # TYPE node_power_supply_cyclecount gauge node_power_supply_cyclecount{power_supply="BAT0"} 0 # HELP node_power_supply_energy_full energy_full value of /sys/class/power_supply/. # TYPE node_power_supply_energy_full gauge node_power_supply_energy_full{power_supply="BAT0"} 45.07 # HELP node_power_supply_energy_full_design energy_full_design value of /sys/class/power_supply/. # TYPE node_power_supply_energy_full_design gauge node_power_supply_energy_full_design{power_supply="BAT0"} 47.52 # HELP node_power_supply_energy_watthour energy_watthour value of /sys/class/power_supply/. # TYPE node_power_supply_energy_watthour gauge node_power_supply_energy_watthour{power_supply="BAT0"} 36.58 # HELP node_power_supply_info info of /sys/class/power_supply/. # TYPE node_power_supply_info gauge node_power_supply_info{power_supply="AC",type="Mains"} 1 node_power_supply_info{capacity_level="Normal",manufacturer="LGC",model_name="LNV-45N1��",power_supply="BAT0",serial_number="38109",status="Discharging",technology="Li-ion",type="Battery"} 1 # HELP node_power_supply_online online value of /sys/class/power_supply/. # TYPE node_power_supply_online gauge node_power_supply_online{power_supply="AC"} 0 # HELP node_power_supply_power_watt power_watt value of /sys/class/power_supply/. # TYPE node_power_supply_power_watt gauge node_power_supply_power_watt{power_supply="BAT0"} 5.002 # HELP node_power_supply_present present value of /sys/class/power_supply/. # TYPE node_power_supply_present gauge node_power_supply_present{power_supply="BAT0"} 1 # HELP node_power_supply_voltage_min_design voltage_min_design value of /sys/class/power_supply/. # TYPE node_power_supply_voltage_min_design gauge node_power_supply_voltage_min_design{power_supply="BAT0"} 10.8 # HELP node_power_supply_voltage_volt voltage_volt value of /sys/class/power_supply/. # TYPE node_power_supply_voltage_volt gauge node_power_supply_voltage_volt{power_supply="BAT0"} 11.66 # HELP node_pressure_cpu_waiting_seconds_total Total time in seconds that processes have waited for CPU time # TYPE node_pressure_cpu_waiting_seconds_total counter node_pressure_cpu_waiting_seconds_total 14.036781000000001 # HELP node_pressure_io_stalled_seconds_total Total time in seconds no process could make progress due to IO congestion # TYPE node_pressure_io_stalled_seconds_total counter node_pressure_io_stalled_seconds_total 159.229614 # HELP node_pressure_io_waiting_seconds_total Total time in seconds that processes have waited due to IO congestion # TYPE node_pressure_io_waiting_seconds_total counter node_pressure_io_waiting_seconds_total 159.886802 # HELP node_pressure_memory_stalled_seconds_total Total time in seconds no process could make progress due to memory congestion # TYPE node_pressure_memory_stalled_seconds_total counter node_pressure_memory_stalled_seconds_total 0 # HELP node_pressure_memory_waiting_seconds_total Total time in seconds that processes have waited for memory # TYPE node_pressure_memory_waiting_seconds_total counter node_pressure_memory_waiting_seconds_total 0 # HELP node_processes_max_processes Number of max PIDs limit # TYPE node_processes_max_processes gauge node_processes_max_processes 123 # HELP node_processes_max_threads Limit of threads in the system # TYPE node_processes_max_threads gauge node_processes_max_threads 7801 # HELP node_processes_pids Number of PIDs # TYPE node_processes_pids gauge node_processes_pids 3 # HELP node_processes_state Number of processes in each state. # TYPE node_processes_state gauge node_processes_state{state="I"} 1 node_processes_state{state="S"} 2 # HELP node_processes_threads Allocated threads in system # TYPE node_processes_threads gauge node_processes_threads 3 # HELP node_procs_blocked Number of processes blocked waiting for I/O to complete. # TYPE node_procs_blocked gauge node_procs_blocked 0 # HELP node_procs_running Number of processes in runnable state. # TYPE node_procs_running gauge node_procs_running 2 # HELP node_qdisc_backlog Number of bytes currently in queue to be sent. # TYPE node_qdisc_backlog gauge node_qdisc_backlog{device="eth0",kind="pfifo_fast"} 0 node_qdisc_backlog{device="wlan0",kind="fq"} 0 # HELP node_qdisc_bytes_total Number of bytes sent. # TYPE node_qdisc_bytes_total counter node_qdisc_bytes_total{device="eth0",kind="pfifo_fast"} 83 node_qdisc_bytes_total{device="wlan0",kind="fq"} 42 # HELP node_qdisc_current_queue_length Number of packets currently in queue to be sent. # TYPE node_qdisc_current_queue_length gauge node_qdisc_current_queue_length{device="eth0",kind="pfifo_fast"} 0 node_qdisc_current_queue_length{device="wlan0",kind="fq"} 0 # HELP node_qdisc_drops_total Number of packets dropped. # TYPE node_qdisc_drops_total counter node_qdisc_drops_total{device="eth0",kind="pfifo_fast"} 0 node_qdisc_drops_total{device="wlan0",kind="fq"} 1 # HELP node_qdisc_overlimits_total Number of overlimit packets. # TYPE node_qdisc_overlimits_total counter node_qdisc_overlimits_total{device="eth0",kind="pfifo_fast"} 0 node_qdisc_overlimits_total{device="wlan0",kind="fq"} 0 # HELP node_qdisc_packets_total Number of packets sent. # TYPE node_qdisc_packets_total counter node_qdisc_packets_total{device="eth0",kind="pfifo_fast"} 83 node_qdisc_packets_total{device="wlan0",kind="fq"} 42 # HELP node_qdisc_requeues_total Number of packets dequeued, not transmitted, and requeued. # TYPE node_qdisc_requeues_total counter node_qdisc_requeues_total{device="eth0",kind="pfifo_fast"} 2 node_qdisc_requeues_total{device="wlan0",kind="fq"} 1 # HELP node_rapl_core_joules_total Current RAPL core value in joules # TYPE node_rapl_core_joules_total counter node_rapl_core_joules_total{index="0",path="collector/fixtures/sys/class/powercap/intel-rapl:0:0"} 118821.284256 # HELP node_rapl_package_joules_total Current RAPL package value in joules # TYPE node_rapl_package_joules_total counter node_rapl_package_joules_total{index="0",path="collector/fixtures/sys/class/powercap/intel-rapl:0"} 240422.366267 # HELP node_schedstat_running_seconds_total Number of seconds CPU spent running a process. # TYPE node_schedstat_running_seconds_total counter node_schedstat_running_seconds_total{cpu="0"} 2.045936778163039e+06 node_schedstat_running_seconds_total{cpu="1"} 1.904686152592476e+06 # HELP node_schedstat_timeslices_total Number of timeslices executed by CPU. # TYPE node_schedstat_timeslices_total counter node_schedstat_timeslices_total{cpu="0"} 4.767485306e+09 node_schedstat_timeslices_total{cpu="1"} 5.145567945e+09 # HELP node_schedstat_waiting_seconds_total Number of seconds spent by processing waiting for this CPU. # TYPE node_schedstat_waiting_seconds_total counter node_schedstat_waiting_seconds_total{cpu="0"} 343796.328169361 node_schedstat_waiting_seconds_total{cpu="1"} 364107.263788241 # HELP node_scrape_collector_duration_seconds node_exporter: Duration of a collector scrape. # TYPE node_scrape_collector_duration_seconds gauge # HELP node_scrape_collector_success node_exporter: Whether a collector succeeded. # TYPE node_scrape_collector_success gauge node_scrape_collector_success{collector="arp"} 1 node_scrape_collector_success{collector="bcache"} 1 node_scrape_collector_success{collector="bonding"} 1 node_scrape_collector_success{collector="btrfs"} 1 node_scrape_collector_success{collector="buddyinfo"} 1 node_scrape_collector_success{collector="cgroups"} 1 node_scrape_collector_success{collector="conntrack"} 1 node_scrape_collector_success{collector="cpu"} 1 node_scrape_collector_success{collector="cpu_vulnerabilities"} 1 node_scrape_collector_success{collector="cpufreq"} 1 node_scrape_collector_success{collector="diskstats"} 1 node_scrape_collector_success{collector="dmi"} 1 node_scrape_collector_success{collector="drbd"} 1 node_scrape_collector_success{collector="edac"} 1 node_scrape_collector_success{collector="entropy"} 1 node_scrape_collector_success{collector="fibrechannel"} 1 node_scrape_collector_success{collector="filefd"} 1 node_scrape_collector_success{collector="hwmon"} 1 node_scrape_collector_success{collector="infiniband"} 1 node_scrape_collector_success{collector="interrupts"} 1 node_scrape_collector_success{collector="ipvs"} 1 node_scrape_collector_success{collector="ksmd"} 1 node_scrape_collector_success{collector="lnstat"} 1 node_scrape_collector_success{collector="loadavg"} 1 node_scrape_collector_success{collector="mdadm"} 1 node_scrape_collector_success{collector="meminfo"} 1 node_scrape_collector_success{collector="meminfo_numa"} 1 node_scrape_collector_success{collector="mountstats"} 1 node_scrape_collector_success{collector="netclass"} 1 node_scrape_collector_success{collector="netdev"} 1 node_scrape_collector_success{collector="netstat"} 1 node_scrape_collector_success{collector="nfs"} 1 node_scrape_collector_success{collector="nfsd"} 1 node_scrape_collector_success{collector="nvme"} 1 node_scrape_collector_success{collector="os"} 1 node_scrape_collector_success{collector="powersupplyclass"} 1 node_scrape_collector_success{collector="pressure"} 1 node_scrape_collector_success{collector="processes"} 1 node_scrape_collector_success{collector="qdisc"} 1 node_scrape_collector_success{collector="rapl"} 1 node_scrape_collector_success{collector="schedstat"} 1 node_scrape_collector_success{collector="selinux"} 1 node_scrape_collector_success{collector="slabinfo"} 1 node_scrape_collector_success{collector="sockstat"} 1 node_scrape_collector_success{collector="softirqs"} 1 node_scrape_collector_success{collector="softnet"} 1 node_scrape_collector_success{collector="stat"} 1 node_scrape_collector_success{collector="sysctl"} 1 node_scrape_collector_success{collector="tapestats"} 1 node_scrape_collector_success{collector="textfile"} 1 node_scrape_collector_success{collector="thermal_zone"} 1 node_scrape_collector_success{collector="time"} 1 node_scrape_collector_success{collector="udp_queues"} 1 node_scrape_collector_success{collector="vmstat"} 1 node_scrape_collector_success{collector="wifi"} 1 node_scrape_collector_success{collector="xfs"} 1 node_scrape_collector_success{collector="zfs"} 1 node_scrape_collector_success{collector="zoneinfo"} 1 # HELP node_selinux_enabled SELinux is enabled, 1 is true, 0 is false # TYPE node_selinux_enabled gauge node_selinux_enabled 0 # HELP node_slabinfo_active_objects The number of objects that are currently active (i.e., in use). # TYPE node_slabinfo_active_objects gauge node_slabinfo_active_objects{slab="dmaengine-unmap-128"} 1206 node_slabinfo_active_objects{slab="kmalloc-8192"} 132 node_slabinfo_active_objects{slab="kmem_cache"} 320 node_slabinfo_active_objects{slab="tw_sock_TCP"} 704 # HELP node_slabinfo_object_size_bytes The size of objects in this slab, in bytes. # TYPE node_slabinfo_object_size_bytes gauge node_slabinfo_object_size_bytes{slab="dmaengine-unmap-128"} 1088 node_slabinfo_object_size_bytes{slab="kmalloc-8192"} 8192 node_slabinfo_object_size_bytes{slab="kmem_cache"} 256 node_slabinfo_object_size_bytes{slab="tw_sock_TCP"} 256 # HELP node_slabinfo_objects The total number of allocated objects (i.e., objects that are both in use and not in use). # TYPE node_slabinfo_objects gauge node_slabinfo_objects{slab="dmaengine-unmap-128"} 1320 node_slabinfo_objects{slab="kmalloc-8192"} 148 node_slabinfo_objects{slab="kmem_cache"} 320 node_slabinfo_objects{slab="tw_sock_TCP"} 864 # HELP node_slabinfo_objects_per_slab The number of objects stored in each slab. # TYPE node_slabinfo_objects_per_slab gauge node_slabinfo_objects_per_slab{slab="dmaengine-unmap-128"} 30 node_slabinfo_objects_per_slab{slab="kmalloc-8192"} 4 node_slabinfo_objects_per_slab{slab="kmem_cache"} 32 node_slabinfo_objects_per_slab{slab="tw_sock_TCP"} 32 # HELP node_slabinfo_pages_per_slab The number of pages allocated for each slab. # TYPE node_slabinfo_pages_per_slab gauge node_slabinfo_pages_per_slab{slab="dmaengine-unmap-128"} 8 node_slabinfo_pages_per_slab{slab="kmalloc-8192"} 8 node_slabinfo_pages_per_slab{slab="kmem_cache"} 2 node_slabinfo_pages_per_slab{slab="tw_sock_TCP"} 2 # HELP node_sockstat_FRAG6_inuse Number of FRAG6 sockets in state inuse. # TYPE node_sockstat_FRAG6_inuse gauge node_sockstat_FRAG6_inuse 0 # HELP node_sockstat_FRAG6_memory Number of FRAG6 sockets in state memory. # TYPE node_sockstat_FRAG6_memory gauge node_sockstat_FRAG6_memory 0 # HELP node_sockstat_FRAG_inuse Number of FRAG sockets in state inuse. # TYPE node_sockstat_FRAG_inuse gauge node_sockstat_FRAG_inuse 0 # HELP node_sockstat_FRAG_memory Number of FRAG sockets in state memory. # TYPE node_sockstat_FRAG_memory gauge node_sockstat_FRAG_memory 0 # HELP node_sockstat_RAW6_inuse Number of RAW6 sockets in state inuse. # TYPE node_sockstat_RAW6_inuse gauge node_sockstat_RAW6_inuse 1 # HELP node_sockstat_RAW_inuse Number of RAW sockets in state inuse. # TYPE node_sockstat_RAW_inuse gauge node_sockstat_RAW_inuse 0 # HELP node_sockstat_TCP6_inuse Number of TCP6 sockets in state inuse. # TYPE node_sockstat_TCP6_inuse gauge node_sockstat_TCP6_inuse 17 # HELP node_sockstat_TCP_alloc Number of TCP sockets in state alloc. # TYPE node_sockstat_TCP_alloc gauge node_sockstat_TCP_alloc 17 # HELP node_sockstat_TCP_inuse Number of TCP sockets in state inuse. # TYPE node_sockstat_TCP_inuse gauge node_sockstat_TCP_inuse 4 # HELP node_sockstat_TCP_mem Number of TCP sockets in state mem. # TYPE node_sockstat_TCP_mem gauge node_sockstat_TCP_mem 1 # HELP node_sockstat_TCP_mem_bytes Number of TCP sockets in state mem_bytes. # TYPE node_sockstat_TCP_mem_bytes gauge node_sockstat_TCP_mem_bytes 4096 # HELP node_sockstat_TCP_orphan Number of TCP sockets in state orphan. # TYPE node_sockstat_TCP_orphan gauge node_sockstat_TCP_orphan 0 # HELP node_sockstat_TCP_tw Number of TCP sockets in state tw. # TYPE node_sockstat_TCP_tw gauge node_sockstat_TCP_tw 4 # HELP node_sockstat_UDP6_inuse Number of UDP6 sockets in state inuse. # TYPE node_sockstat_UDP6_inuse gauge node_sockstat_UDP6_inuse 9 # HELP node_sockstat_UDPLITE6_inuse Number of UDPLITE6 sockets in state inuse. # TYPE node_sockstat_UDPLITE6_inuse gauge node_sockstat_UDPLITE6_inuse 0 # HELP node_sockstat_UDPLITE_inuse Number of UDPLITE sockets in state inuse. # TYPE node_sockstat_UDPLITE_inuse gauge node_sockstat_UDPLITE_inuse 0 # HELP node_sockstat_UDP_inuse Number of UDP sockets in state inuse. # TYPE node_sockstat_UDP_inuse gauge node_sockstat_UDP_inuse 0 # HELP node_sockstat_UDP_mem Number of UDP sockets in state mem. # TYPE node_sockstat_UDP_mem gauge node_sockstat_UDP_mem 0 # HELP node_sockstat_UDP_mem_bytes Number of UDP sockets in state mem_bytes. # TYPE node_sockstat_UDP_mem_bytes gauge node_sockstat_UDP_mem_bytes 0 # HELP node_sockstat_sockets_used Number of IPv4 sockets in use. # TYPE node_sockstat_sockets_used gauge node_sockstat_sockets_used 229 # HELP node_softirqs_functions_total Softirq counts per CPU. # TYPE node_softirqs_functions_total counter node_softirqs_functions_total{cpu="0",type="BLOCK"} 23776 node_softirqs_functions_total{cpu="0",type="HI"} 7 node_softirqs_functions_total{cpu="0",type="HRTIMER"} 40 node_softirqs_functions_total{cpu="0",type="IRQ_POLL"} 0 node_softirqs_functions_total{cpu="0",type="NET_RX"} 43066 node_softirqs_functions_total{cpu="0",type="NET_TX"} 2301 node_softirqs_functions_total{cpu="0",type="RCU"} 155929 node_softirqs_functions_total{cpu="0",type="SCHED"} 378895 node_softirqs_functions_total{cpu="0",type="TASKLET"} 372 node_softirqs_functions_total{cpu="0",type="TIMER"} 424191 node_softirqs_functions_total{cpu="1",type="BLOCK"} 24115 node_softirqs_functions_total{cpu="1",type="HI"} 1 node_softirqs_functions_total{cpu="1",type="HRTIMER"} 346 node_softirqs_functions_total{cpu="1",type="IRQ_POLL"} 0 node_softirqs_functions_total{cpu="1",type="NET_RX"} 104508 node_softirqs_functions_total{cpu="1",type="NET_TX"} 2430 node_softirqs_functions_total{cpu="1",type="RCU"} 146631 node_softirqs_functions_total{cpu="1",type="SCHED"} 152852 node_softirqs_functions_total{cpu="1",type="TASKLET"} 1899 node_softirqs_functions_total{cpu="1",type="TIMER"} 108342 # HELP node_softirqs_total Number of softirq calls. # TYPE node_softirqs_total counter node_softirqs_total{vector="block"} 186066 node_softirqs_total{vector="block_iopoll"} 0 node_softirqs_total{vector="hi"} 250191 node_softirqs_total{vector="hrtimer"} 12499 node_softirqs_total{vector="net_rx"} 211099 node_softirqs_total{vector="net_tx"} 1647 node_softirqs_total{vector="rcu"} 508444 node_softirqs_total{vector="sched"} 622196 node_softirqs_total{vector="tasklet"} 1.783454e+06 node_softirqs_total{vector="timer"} 1.481983e+06 # HELP node_softnet_backlog_len Softnet backlog status # TYPE node_softnet_backlog_len gauge node_softnet_backlog_len{cpu="0"} 0 node_softnet_backlog_len{cpu="1"} 0 node_softnet_backlog_len{cpu="2"} 0 node_softnet_backlog_len{cpu="3"} 0 # HELP node_softnet_cpu_collision_total Number of collision occur while obtaining device lock while transmitting # TYPE node_softnet_cpu_collision_total counter node_softnet_cpu_collision_total{cpu="0"} 0 node_softnet_cpu_collision_total{cpu="1"} 0 node_softnet_cpu_collision_total{cpu="2"} 0 node_softnet_cpu_collision_total{cpu="3"} 0 # HELP node_softnet_dropped_total Number of dropped packets # TYPE node_softnet_dropped_total counter node_softnet_dropped_total{cpu="0"} 0 node_softnet_dropped_total{cpu="1"} 41 node_softnet_dropped_total{cpu="2"} 0 node_softnet_dropped_total{cpu="3"} 0 # HELP node_softnet_flow_limit_count_total Number of times flow limit has been reached # TYPE node_softnet_flow_limit_count_total counter node_softnet_flow_limit_count_total{cpu="0"} 0 node_softnet_flow_limit_count_total{cpu="1"} 0 node_softnet_flow_limit_count_total{cpu="2"} 0 node_softnet_flow_limit_count_total{cpu="3"} 0 # HELP node_softnet_processed_total Number of processed packets # TYPE node_softnet_processed_total counter node_softnet_processed_total{cpu="0"} 299641 node_softnet_processed_total{cpu="1"} 916354 node_softnet_processed_total{cpu="2"} 5.577791e+06 node_softnet_processed_total{cpu="3"} 3.113785e+06 # HELP node_softnet_received_rps_total Number of times cpu woken up received_rps # TYPE node_softnet_received_rps_total counter node_softnet_received_rps_total{cpu="0"} 0 node_softnet_received_rps_total{cpu="1"} 0 node_softnet_received_rps_total{cpu="2"} 0 node_softnet_received_rps_total{cpu="3"} 0 # HELP node_softnet_times_squeezed_total Number of times processing packets ran out of quota # TYPE node_softnet_times_squeezed_total counter node_softnet_times_squeezed_total{cpu="0"} 1 node_softnet_times_squeezed_total{cpu="1"} 10 node_softnet_times_squeezed_total{cpu="2"} 85 node_softnet_times_squeezed_total{cpu="3"} 50 # HELP node_sysctl_fs_file_nr sysctl fs.file-nr # TYPE node_sysctl_fs_file_nr untyped node_sysctl_fs_file_nr{index="0"} 1024 node_sysctl_fs_file_nr{index="1"} 0 node_sysctl_fs_file_nr{index="2"} 1.631329e+06 # HELP node_sysctl_fs_file_nr_current sysctl fs.file-nr, field 1 # TYPE node_sysctl_fs_file_nr_current untyped node_sysctl_fs_file_nr_current 0 # HELP node_sysctl_fs_file_nr_max sysctl fs.file-nr, field 2 # TYPE node_sysctl_fs_file_nr_max untyped node_sysctl_fs_file_nr_max 1.631329e+06 # HELP node_sysctl_fs_file_nr_total sysctl fs.file-nr, field 0 # TYPE node_sysctl_fs_file_nr_total untyped node_sysctl_fs_file_nr_total 1024 # HELP node_sysctl_info sysctl info # TYPE node_sysctl_info gauge node_sysctl_info{index="0",name="kernel.seccomp.actions_avail",value="kill_process"} 1 node_sysctl_info{index="1",name="kernel.seccomp.actions_avail",value="kill_thread"} 1 node_sysctl_info{index="2",name="kernel.seccomp.actions_avail",value="trap"} 1 node_sysctl_info{index="3",name="kernel.seccomp.actions_avail",value="errno"} 1 node_sysctl_info{index="4",name="kernel.seccomp.actions_avail",value="user_notif"} 1 node_sysctl_info{index="5",name="kernel.seccomp.actions_avail",value="trace"} 1 node_sysctl_info{index="6",name="kernel.seccomp.actions_avail",value="log"} 1 node_sysctl_info{index="7",name="kernel.seccomp.actions_avail",value="allow"} 1 # HELP node_sysctl_kernel_threads_max sysctl kernel.threads-max # TYPE node_sysctl_kernel_threads_max untyped node_sysctl_kernel_threads_max 7801 # HELP node_tape_io_now The number of I/Os currently outstanding to this device. # TYPE node_tape_io_now gauge node_tape_io_now{device="st0"} 1 # HELP node_tape_io_others_total The number of I/Os issued to the tape drive other than read or write commands. The time taken to complete these commands uses the following calculation io_time_seconds_total-read_time_seconds_total-write_time_seconds_total # TYPE node_tape_io_others_total counter node_tape_io_others_total{device="st0"} 1409 # HELP node_tape_io_time_seconds_total The amount of time spent waiting for all I/O to complete (including read and write). This includes tape movement commands such as seeking between file or set marks and implicit tape movement such as when rewind on close tape devices are used. # TYPE node_tape_io_time_seconds_total counter node_tape_io_time_seconds_total{device="st0"} 9247.01108772 # HELP node_tape_read_bytes_total The number of bytes read from the tape drive. # TYPE node_tape_read_bytes_total counter node_tape_read_bytes_total{device="st0"} 9.79383912e+08 # HELP node_tape_read_time_seconds_total The amount of time spent waiting for read requests to complete. # TYPE node_tape_read_time_seconds_total counter node_tape_read_time_seconds_total{device="st0"} 33.788355744 # HELP node_tape_reads_completed_total The number of read requests issued to the tape drive. # TYPE node_tape_reads_completed_total counter node_tape_reads_completed_total{device="st0"} 3741 # HELP node_tape_residual_total The number of times during a read or write we found the residual amount to be non-zero. This should mean that a program is issuing a read larger thean the block size on tape. For write not all data made it to tape. # TYPE node_tape_residual_total counter node_tape_residual_total{device="st0"} 19 # HELP node_tape_write_time_seconds_total The amount of time spent waiting for write requests to complete. # TYPE node_tape_write_time_seconds_total counter node_tape_write_time_seconds_total{device="st0"} 5233.597394395 # HELP node_tape_writes_completed_total The number of write requests issued to the tape drive. # TYPE node_tape_writes_completed_total counter node_tape_writes_completed_total{device="st0"} 5.3772916e+07 # HELP node_tape_written_bytes_total The number of bytes written to the tape drive. # TYPE node_tape_written_bytes_total counter node_tape_written_bytes_total{device="st0"} 1.496246784e+12 # HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. # TYPE node_textfile_mtime_seconds gauge # HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise # TYPE node_textfile_scrape_error gauge node_textfile_scrape_error 0 # HELP node_thermal_zone_temp Zone temperature in Celsius # TYPE node_thermal_zone_temp gauge node_thermal_zone_temp{type="cpu-thermal",zone="0"} 12.376 # HELP node_time_clocksource_available_info Available clocksources read from '/sys/devices/system/clocksource'. # TYPE node_time_clocksource_available_info gauge node_time_clocksource_available_info{clocksource="acpi_pm",device="0"} 1 node_time_clocksource_available_info{clocksource="hpet",device="0"} 1 node_time_clocksource_available_info{clocksource="tsc",device="0"} 1 # HELP node_time_clocksource_current_info Current clocksource read from '/sys/devices/system/clocksource'. # TYPE node_time_clocksource_current_info gauge node_time_clocksource_current_info{clocksource="tsc",device="0"} 1 # HELP node_time_seconds System time in seconds since epoch (1970). # TYPE node_time_seconds gauge # HELP node_time_zone_offset_seconds System time zone offset in seconds. # TYPE node_time_zone_offset_seconds gauge # HELP node_udp_queues Number of allocated memory in the kernel for UDP datagrams in bytes. # TYPE node_udp_queues gauge node_udp_queues{ip="v4",queue="rx"} 0 node_udp_queues{ip="v4",queue="tx"} 21 # HELP node_vmstat_oom_kill /proc/vmstat information field oom_kill. # TYPE node_vmstat_oom_kill untyped node_vmstat_oom_kill 0 # HELP node_vmstat_pgfault /proc/vmstat information field pgfault. # TYPE node_vmstat_pgfault untyped node_vmstat_pgfault 2.320168809e+09 # HELP node_vmstat_pgmajfault /proc/vmstat information field pgmajfault. # TYPE node_vmstat_pgmajfault untyped node_vmstat_pgmajfault 507162 # HELP node_vmstat_pgpgin /proc/vmstat information field pgpgin. # TYPE node_vmstat_pgpgin untyped node_vmstat_pgpgin 7.344136e+06 # HELP node_vmstat_pgpgout /proc/vmstat information field pgpgout. # TYPE node_vmstat_pgpgout untyped node_vmstat_pgpgout 1.541180581e+09 # HELP node_vmstat_pswpin /proc/vmstat information field pswpin. # TYPE node_vmstat_pswpin untyped node_vmstat_pswpin 1476 # HELP node_vmstat_pswpout /proc/vmstat information field pswpout. # TYPE node_vmstat_pswpout untyped node_vmstat_pswpout 35045 # HELP node_wifi_interface_frequency_hertz The current frequency a WiFi interface is operating at, in hertz. # TYPE node_wifi_interface_frequency_hertz gauge node_wifi_interface_frequency_hertz{device="wlan0"} 2.412e+09 node_wifi_interface_frequency_hertz{device="wlan1"} 2.412e+09 # HELP node_wifi_station_beacon_loss_total The total number of times a station has detected a beacon loss. # TYPE node_wifi_station_beacon_loss_total counter node_wifi_station_beacon_loss_total{device="wlan0",mac_address="01:02:03:04:05:06"} 2 node_wifi_station_beacon_loss_total{device="wlan0",mac_address="aa:bb:cc:dd:ee:ff"} 1 # HELP node_wifi_station_connected_seconds_total The total number of seconds a station has been connected to an access point. # TYPE node_wifi_station_connected_seconds_total counter node_wifi_station_connected_seconds_total{device="wlan0",mac_address="01:02:03:04:05:06"} 60 node_wifi_station_connected_seconds_total{device="wlan0",mac_address="aa:bb:cc:dd:ee:ff"} 30 # HELP node_wifi_station_inactive_seconds The number of seconds since any wireless activity has occurred on a station. # TYPE node_wifi_station_inactive_seconds gauge node_wifi_station_inactive_seconds{device="wlan0",mac_address="01:02:03:04:05:06"} 0.8 node_wifi_station_inactive_seconds{device="wlan0",mac_address="aa:bb:cc:dd:ee:ff"} 0.4 # HELP node_wifi_station_info Labeled WiFi interface station information as provided by the operating system. # TYPE node_wifi_station_info gauge node_wifi_station_info{bssid="00:11:22:33:44:55",device="wlan0",mode="client",ssid="Example"} 1 # HELP node_wifi_station_receive_bits_per_second The current WiFi receive bitrate of a station, in bits per second. # TYPE node_wifi_station_receive_bits_per_second gauge node_wifi_station_receive_bits_per_second{device="wlan0",mac_address="01:02:03:04:05:06"} 2.56e+08 node_wifi_station_receive_bits_per_second{device="wlan0",mac_address="aa:bb:cc:dd:ee:ff"} 1.28e+08 # HELP node_wifi_station_receive_bytes_total The total number of bytes received by a WiFi station. # TYPE node_wifi_station_receive_bytes_total counter node_wifi_station_receive_bytes_total{device="wlan0",mac_address="01:02:03:04:05:06"} 0 node_wifi_station_receive_bytes_total{device="wlan0",mac_address="aa:bb:cc:dd:ee:ff"} 0 # HELP node_wifi_station_signal_dbm The current WiFi signal strength, in decibel-milliwatts (dBm). # TYPE node_wifi_station_signal_dbm gauge node_wifi_station_signal_dbm{device="wlan0",mac_address="01:02:03:04:05:06"} -26 node_wifi_station_signal_dbm{device="wlan0",mac_address="aa:bb:cc:dd:ee:ff"} -52 # HELP node_wifi_station_transmit_bits_per_second The current WiFi transmit bitrate of a station, in bits per second. # TYPE node_wifi_station_transmit_bits_per_second gauge node_wifi_station_transmit_bits_per_second{device="wlan0",mac_address="01:02:03:04:05:06"} 3.28e+08 node_wifi_station_transmit_bits_per_second{device="wlan0",mac_address="aa:bb:cc:dd:ee:ff"} 1.64e+08 # HELP node_wifi_station_transmit_bytes_total The total number of bytes transmitted by a WiFi station. # TYPE node_wifi_station_transmit_bytes_total counter node_wifi_station_transmit_bytes_total{device="wlan0",mac_address="01:02:03:04:05:06"} 0 node_wifi_station_transmit_bytes_total{device="wlan0",mac_address="aa:bb:cc:dd:ee:ff"} 0 # HELP node_wifi_station_transmit_failed_total The total number of times a station has failed to send a packet. # TYPE node_wifi_station_transmit_failed_total counter node_wifi_station_transmit_failed_total{device="wlan0",mac_address="01:02:03:04:05:06"} 4 node_wifi_station_transmit_failed_total{device="wlan0",mac_address="aa:bb:cc:dd:ee:ff"} 2 # HELP node_wifi_station_transmit_retries_total The total number of times a station has had to retry while sending a packet. # TYPE node_wifi_station_transmit_retries_total counter node_wifi_station_transmit_retries_total{device="wlan0",mac_address="01:02:03:04:05:06"} 20 node_wifi_station_transmit_retries_total{device="wlan0",mac_address="aa:bb:cc:dd:ee:ff"} 10 # HELP node_xfs_allocation_btree_compares_total Number of allocation B-tree compares for a filesystem. # TYPE node_xfs_allocation_btree_compares_total counter node_xfs_allocation_btree_compares_total{device="sda1"} 0 # HELP node_xfs_allocation_btree_lookups_total Number of allocation B-tree lookups for a filesystem. # TYPE node_xfs_allocation_btree_lookups_total counter node_xfs_allocation_btree_lookups_total{device="sda1"} 0 # HELP node_xfs_allocation_btree_records_deleted_total Number of allocation B-tree records deleted for a filesystem. # TYPE node_xfs_allocation_btree_records_deleted_total counter node_xfs_allocation_btree_records_deleted_total{device="sda1"} 0 # HELP node_xfs_allocation_btree_records_inserted_total Number of allocation B-tree records inserted for a filesystem. # TYPE node_xfs_allocation_btree_records_inserted_total counter node_xfs_allocation_btree_records_inserted_total{device="sda1"} 0 # HELP node_xfs_block_map_btree_compares_total Number of block map B-tree compares for a filesystem. # TYPE node_xfs_block_map_btree_compares_total counter node_xfs_block_map_btree_compares_total{device="sda1"} 0 # HELP node_xfs_block_map_btree_lookups_total Number of block map B-tree lookups for a filesystem. # TYPE node_xfs_block_map_btree_lookups_total counter node_xfs_block_map_btree_lookups_total{device="sda1"} 0 # HELP node_xfs_block_map_btree_records_deleted_total Number of block map B-tree records deleted for a filesystem. # TYPE node_xfs_block_map_btree_records_deleted_total counter node_xfs_block_map_btree_records_deleted_total{device="sda1"} 0 # HELP node_xfs_block_map_btree_records_inserted_total Number of block map B-tree records inserted for a filesystem. # TYPE node_xfs_block_map_btree_records_inserted_total counter node_xfs_block_map_btree_records_inserted_total{device="sda1"} 0 # HELP node_xfs_block_mapping_extent_list_compares_total Number of extent list compares for a filesystem. # TYPE node_xfs_block_mapping_extent_list_compares_total counter node_xfs_block_mapping_extent_list_compares_total{device="sda1"} 0 # HELP node_xfs_block_mapping_extent_list_deletions_total Number of extent list deletions for a filesystem. # TYPE node_xfs_block_mapping_extent_list_deletions_total counter node_xfs_block_mapping_extent_list_deletions_total{device="sda1"} 1 # HELP node_xfs_block_mapping_extent_list_insertions_total Number of extent list insertions for a filesystem. # TYPE node_xfs_block_mapping_extent_list_insertions_total counter node_xfs_block_mapping_extent_list_insertions_total{device="sda1"} 1 # HELP node_xfs_block_mapping_extent_list_lookups_total Number of extent list lookups for a filesystem. # TYPE node_xfs_block_mapping_extent_list_lookups_total counter node_xfs_block_mapping_extent_list_lookups_total{device="sda1"} 91 # HELP node_xfs_block_mapping_reads_total Number of block map for read operations for a filesystem. # TYPE node_xfs_block_mapping_reads_total counter node_xfs_block_mapping_reads_total{device="sda1"} 61 # HELP node_xfs_block_mapping_unmaps_total Number of block unmaps (deletes) for a filesystem. # TYPE node_xfs_block_mapping_unmaps_total counter node_xfs_block_mapping_unmaps_total{device="sda1"} 1 # HELP node_xfs_block_mapping_writes_total Number of block map for write operations for a filesystem. # TYPE node_xfs_block_mapping_writes_total counter node_xfs_block_mapping_writes_total{device="sda1"} 29 # HELP node_xfs_directory_operation_create_total Number of times a new directory entry was created for a filesystem. # TYPE node_xfs_directory_operation_create_total counter node_xfs_directory_operation_create_total{device="sda1"} 2 # HELP node_xfs_directory_operation_getdents_total Number of times the directory getdents operation was performed for a filesystem. # TYPE node_xfs_directory_operation_getdents_total counter node_xfs_directory_operation_getdents_total{device="sda1"} 52 # HELP node_xfs_directory_operation_lookup_total Number of file name directory lookups which miss the operating systems directory name lookup cache. # TYPE node_xfs_directory_operation_lookup_total counter node_xfs_directory_operation_lookup_total{device="sda1"} 3 # HELP node_xfs_directory_operation_remove_total Number of times an existing directory entry was created for a filesystem. # TYPE node_xfs_directory_operation_remove_total counter node_xfs_directory_operation_remove_total{device="sda1"} 1 # HELP node_xfs_extent_allocation_blocks_allocated_total Number of blocks allocated for a filesystem. # TYPE node_xfs_extent_allocation_blocks_allocated_total counter node_xfs_extent_allocation_blocks_allocated_total{device="sda1"} 872 # HELP node_xfs_extent_allocation_blocks_freed_total Number of blocks freed for a filesystem. # TYPE node_xfs_extent_allocation_blocks_freed_total counter node_xfs_extent_allocation_blocks_freed_total{device="sda1"} 0 # HELP node_xfs_extent_allocation_extents_allocated_total Number of extents allocated for a filesystem. # TYPE node_xfs_extent_allocation_extents_allocated_total counter node_xfs_extent_allocation_extents_allocated_total{device="sda1"} 1 # HELP node_xfs_extent_allocation_extents_freed_total Number of extents freed for a filesystem. # TYPE node_xfs_extent_allocation_extents_freed_total counter node_xfs_extent_allocation_extents_freed_total{device="sda1"} 0 # HELP node_xfs_inode_operation_attempts_total Number of times the OS looked for an XFS inode in the inode cache. # TYPE node_xfs_inode_operation_attempts_total counter node_xfs_inode_operation_attempts_total{device="sda1"} 5 # HELP node_xfs_inode_operation_attribute_changes_total Number of times the OS explicitly changed the attributes of an XFS inode. # TYPE node_xfs_inode_operation_attribute_changes_total counter node_xfs_inode_operation_attribute_changes_total{device="sda1"} 1 # HELP node_xfs_inode_operation_duplicates_total Number of times the OS tried to add a missing XFS inode to the inode cache, but found it had already been added by another process. # TYPE node_xfs_inode_operation_duplicates_total counter node_xfs_inode_operation_duplicates_total{device="sda1"} 0 # HELP node_xfs_inode_operation_found_total Number of times the OS looked for and found an XFS inode in the inode cache. # TYPE node_xfs_inode_operation_found_total counter node_xfs_inode_operation_found_total{device="sda1"} 1 # HELP node_xfs_inode_operation_missed_total Number of times the OS looked for an XFS inode in the cache, but did not find it. # TYPE node_xfs_inode_operation_missed_total counter node_xfs_inode_operation_missed_total{device="sda1"} 4 # HELP node_xfs_inode_operation_reclaims_total Number of times the OS reclaimed an XFS inode from the inode cache to free memory for another purpose. # TYPE node_xfs_inode_operation_reclaims_total counter node_xfs_inode_operation_reclaims_total{device="sda1"} 0 # HELP node_xfs_inode_operation_recycled_total Number of times the OS found an XFS inode in the cache, but could not use it as it was being recycled. # TYPE node_xfs_inode_operation_recycled_total counter node_xfs_inode_operation_recycled_total{device="sda1"} 0 # HELP node_xfs_read_calls_total Number of read(2) system calls made to files in a filesystem. # TYPE node_xfs_read_calls_total counter node_xfs_read_calls_total{device="sda1"} 0 # HELP node_xfs_vnode_active_total Number of vnodes not on free lists for a filesystem. # TYPE node_xfs_vnode_active_total counter node_xfs_vnode_active_total{device="sda1"} 4 # HELP node_xfs_vnode_allocate_total Number of times vn_alloc called for a filesystem. # TYPE node_xfs_vnode_allocate_total counter node_xfs_vnode_allocate_total{device="sda1"} 0 # HELP node_xfs_vnode_get_total Number of times vn_get called for a filesystem. # TYPE node_xfs_vnode_get_total counter node_xfs_vnode_get_total{device="sda1"} 0 # HELP node_xfs_vnode_hold_total Number of times vn_hold called for a filesystem. # TYPE node_xfs_vnode_hold_total counter node_xfs_vnode_hold_total{device="sda1"} 0 # HELP node_xfs_vnode_reclaim_total Number of times vn_reclaim called for a filesystem. # TYPE node_xfs_vnode_reclaim_total counter node_xfs_vnode_reclaim_total{device="sda1"} 1 # HELP node_xfs_vnode_release_total Number of times vn_rele called for a filesystem. # TYPE node_xfs_vnode_release_total counter node_xfs_vnode_release_total{device="sda1"} 1 # HELP node_xfs_vnode_remove_total Number of times vn_remove called for a filesystem. # TYPE node_xfs_vnode_remove_total counter node_xfs_vnode_remove_total{device="sda1"} 1 # HELP node_xfs_write_calls_total Number of write(2) system calls made to files in a filesystem. # TYPE node_xfs_write_calls_total counter node_xfs_write_calls_total{device="sda1"} 28 # HELP node_zfs_abd_linear_cnt kstat.zfs.misc.abdstats.linear_cnt # TYPE node_zfs_abd_linear_cnt untyped node_zfs_abd_linear_cnt 62 # HELP node_zfs_abd_linear_data_size kstat.zfs.misc.abdstats.linear_data_size # TYPE node_zfs_abd_linear_data_size untyped node_zfs_abd_linear_data_size 223232 # HELP node_zfs_abd_scatter_chunk_waste kstat.zfs.misc.abdstats.scatter_chunk_waste # TYPE node_zfs_abd_scatter_chunk_waste untyped node_zfs_abd_scatter_chunk_waste 0 # HELP node_zfs_abd_scatter_cnt kstat.zfs.misc.abdstats.scatter_cnt # TYPE node_zfs_abd_scatter_cnt untyped node_zfs_abd_scatter_cnt 1 # HELP node_zfs_abd_scatter_data_size kstat.zfs.misc.abdstats.scatter_data_size # TYPE node_zfs_abd_scatter_data_size untyped node_zfs_abd_scatter_data_size 16384 # HELP node_zfs_abd_scatter_order_0 kstat.zfs.misc.abdstats.scatter_order_0 # TYPE node_zfs_abd_scatter_order_0 untyped node_zfs_abd_scatter_order_0 0 # HELP node_zfs_abd_scatter_order_1 kstat.zfs.misc.abdstats.scatter_order_1 # TYPE node_zfs_abd_scatter_order_1 untyped node_zfs_abd_scatter_order_1 0 # HELP node_zfs_abd_scatter_order_10 kstat.zfs.misc.abdstats.scatter_order_10 # TYPE node_zfs_abd_scatter_order_10 untyped node_zfs_abd_scatter_order_10 0 # HELP node_zfs_abd_scatter_order_2 kstat.zfs.misc.abdstats.scatter_order_2 # TYPE node_zfs_abd_scatter_order_2 untyped node_zfs_abd_scatter_order_2 1 # HELP node_zfs_abd_scatter_order_3 kstat.zfs.misc.abdstats.scatter_order_3 # TYPE node_zfs_abd_scatter_order_3 untyped node_zfs_abd_scatter_order_3 0 # HELP node_zfs_abd_scatter_order_4 kstat.zfs.misc.abdstats.scatter_order_4 # TYPE node_zfs_abd_scatter_order_4 untyped node_zfs_abd_scatter_order_4 0 # HELP node_zfs_abd_scatter_order_5 kstat.zfs.misc.abdstats.scatter_order_5 # TYPE node_zfs_abd_scatter_order_5 untyped node_zfs_abd_scatter_order_5 0 # HELP node_zfs_abd_scatter_order_6 kstat.zfs.misc.abdstats.scatter_order_6 # TYPE node_zfs_abd_scatter_order_6 untyped node_zfs_abd_scatter_order_6 0 # HELP node_zfs_abd_scatter_order_7 kstat.zfs.misc.abdstats.scatter_order_7 # TYPE node_zfs_abd_scatter_order_7 untyped node_zfs_abd_scatter_order_7 0 # HELP node_zfs_abd_scatter_order_8 kstat.zfs.misc.abdstats.scatter_order_8 # TYPE node_zfs_abd_scatter_order_8 untyped node_zfs_abd_scatter_order_8 0 # HELP node_zfs_abd_scatter_order_9 kstat.zfs.misc.abdstats.scatter_order_9 # TYPE node_zfs_abd_scatter_order_9 untyped node_zfs_abd_scatter_order_9 0 # HELP node_zfs_abd_scatter_page_alloc_retry kstat.zfs.misc.abdstats.scatter_page_alloc_retry # TYPE node_zfs_abd_scatter_page_alloc_retry untyped node_zfs_abd_scatter_page_alloc_retry 0 # HELP node_zfs_abd_scatter_page_multi_chunk kstat.zfs.misc.abdstats.scatter_page_multi_chunk # TYPE node_zfs_abd_scatter_page_multi_chunk untyped node_zfs_abd_scatter_page_multi_chunk 0 # HELP node_zfs_abd_scatter_page_multi_zone kstat.zfs.misc.abdstats.scatter_page_multi_zone # TYPE node_zfs_abd_scatter_page_multi_zone untyped node_zfs_abd_scatter_page_multi_zone 0 # HELP node_zfs_abd_scatter_sg_table_retry kstat.zfs.misc.abdstats.scatter_sg_table_retry # TYPE node_zfs_abd_scatter_sg_table_retry untyped node_zfs_abd_scatter_sg_table_retry 0 # HELP node_zfs_abd_struct_size kstat.zfs.misc.abdstats.struct_size # TYPE node_zfs_abd_struct_size untyped node_zfs_abd_struct_size 2520 # HELP node_zfs_arc_anon_evictable_data kstat.zfs.misc.arcstats.anon_evictable_data # TYPE node_zfs_arc_anon_evictable_data untyped node_zfs_arc_anon_evictable_data 0 # HELP node_zfs_arc_anon_evictable_metadata kstat.zfs.misc.arcstats.anon_evictable_metadata # TYPE node_zfs_arc_anon_evictable_metadata untyped node_zfs_arc_anon_evictable_metadata 0 # HELP node_zfs_arc_anon_size kstat.zfs.misc.arcstats.anon_size # TYPE node_zfs_arc_anon_size untyped node_zfs_arc_anon_size 1.91744e+06 # HELP node_zfs_arc_arc_loaned_bytes kstat.zfs.misc.arcstats.arc_loaned_bytes # TYPE node_zfs_arc_arc_loaned_bytes untyped node_zfs_arc_arc_loaned_bytes 0 # HELP node_zfs_arc_arc_meta_limit kstat.zfs.misc.arcstats.arc_meta_limit # TYPE node_zfs_arc_arc_meta_limit untyped node_zfs_arc_arc_meta_limit 6.275982336e+09 # HELP node_zfs_arc_arc_meta_max kstat.zfs.misc.arcstats.arc_meta_max # TYPE node_zfs_arc_arc_meta_max untyped node_zfs_arc_arc_meta_max 4.49286096e+08 # HELP node_zfs_arc_arc_meta_min kstat.zfs.misc.arcstats.arc_meta_min # TYPE node_zfs_arc_arc_meta_min untyped node_zfs_arc_arc_meta_min 1.6777216e+07 # HELP node_zfs_arc_arc_meta_used kstat.zfs.misc.arcstats.arc_meta_used # TYPE node_zfs_arc_arc_meta_used untyped node_zfs_arc_arc_meta_used 3.08103632e+08 # HELP node_zfs_arc_arc_need_free kstat.zfs.misc.arcstats.arc_need_free # TYPE node_zfs_arc_arc_need_free untyped node_zfs_arc_arc_need_free 0 # HELP node_zfs_arc_arc_no_grow kstat.zfs.misc.arcstats.arc_no_grow # TYPE node_zfs_arc_arc_no_grow untyped node_zfs_arc_arc_no_grow 0 # HELP node_zfs_arc_arc_prune kstat.zfs.misc.arcstats.arc_prune # TYPE node_zfs_arc_arc_prune untyped node_zfs_arc_arc_prune 0 # HELP node_zfs_arc_arc_sys_free kstat.zfs.misc.arcstats.arc_sys_free # TYPE node_zfs_arc_arc_sys_free untyped node_zfs_arc_arc_sys_free 2.61496832e+08 # HELP node_zfs_arc_arc_tempreserve kstat.zfs.misc.arcstats.arc_tempreserve # TYPE node_zfs_arc_arc_tempreserve untyped node_zfs_arc_arc_tempreserve 0 # HELP node_zfs_arc_c kstat.zfs.misc.arcstats.c # TYPE node_zfs_arc_c untyped node_zfs_arc_c 1.643208777e+09 # HELP node_zfs_arc_c_max kstat.zfs.misc.arcstats.c_max # TYPE node_zfs_arc_c_max untyped node_zfs_arc_c_max 8.367976448e+09 # HELP node_zfs_arc_c_min kstat.zfs.misc.arcstats.c_min # TYPE node_zfs_arc_c_min untyped node_zfs_arc_c_min 3.3554432e+07 # HELP node_zfs_arc_data_size kstat.zfs.misc.arcstats.data_size # TYPE node_zfs_arc_data_size untyped node_zfs_arc_data_size 1.29583616e+09 # HELP node_zfs_arc_deleted kstat.zfs.misc.arcstats.deleted # TYPE node_zfs_arc_deleted untyped node_zfs_arc_deleted 60403 # HELP node_zfs_arc_demand_data_hits kstat.zfs.misc.arcstats.demand_data_hits # TYPE node_zfs_arc_demand_data_hits untyped node_zfs_arc_demand_data_hits 7.221032e+06 # HELP node_zfs_arc_demand_data_misses kstat.zfs.misc.arcstats.demand_data_misses # TYPE node_zfs_arc_demand_data_misses untyped node_zfs_arc_demand_data_misses 73300 # HELP node_zfs_arc_demand_metadata_hits kstat.zfs.misc.arcstats.demand_metadata_hits # TYPE node_zfs_arc_demand_metadata_hits untyped node_zfs_arc_demand_metadata_hits 1.464353e+06 # HELP node_zfs_arc_demand_metadata_misses kstat.zfs.misc.arcstats.demand_metadata_misses # TYPE node_zfs_arc_demand_metadata_misses untyped node_zfs_arc_demand_metadata_misses 498170 # HELP node_zfs_arc_duplicate_buffers kstat.zfs.misc.arcstats.duplicate_buffers # TYPE node_zfs_arc_duplicate_buffers untyped node_zfs_arc_duplicate_buffers 0 # HELP node_zfs_arc_duplicate_buffers_size kstat.zfs.misc.arcstats.duplicate_buffers_size # TYPE node_zfs_arc_duplicate_buffers_size untyped node_zfs_arc_duplicate_buffers_size 0 # HELP node_zfs_arc_duplicate_reads kstat.zfs.misc.arcstats.duplicate_reads # TYPE node_zfs_arc_duplicate_reads untyped node_zfs_arc_duplicate_reads 0 # HELP node_zfs_arc_evict_l2_cached kstat.zfs.misc.arcstats.evict_l2_cached # TYPE node_zfs_arc_evict_l2_cached untyped node_zfs_arc_evict_l2_cached 0 # HELP node_zfs_arc_evict_l2_eligible kstat.zfs.misc.arcstats.evict_l2_eligible # TYPE node_zfs_arc_evict_l2_eligible untyped node_zfs_arc_evict_l2_eligible 8.99251456e+09 # HELP node_zfs_arc_evict_l2_ineligible kstat.zfs.misc.arcstats.evict_l2_ineligible # TYPE node_zfs_arc_evict_l2_ineligible untyped node_zfs_arc_evict_l2_ineligible 9.92552448e+08 # HELP node_zfs_arc_evict_l2_skip kstat.zfs.misc.arcstats.evict_l2_skip # TYPE node_zfs_arc_evict_l2_skip untyped node_zfs_arc_evict_l2_skip 0 # HELP node_zfs_arc_evict_not_enough kstat.zfs.misc.arcstats.evict_not_enough # TYPE node_zfs_arc_evict_not_enough untyped node_zfs_arc_evict_not_enough 680 # HELP node_zfs_arc_evict_skip kstat.zfs.misc.arcstats.evict_skip # TYPE node_zfs_arc_evict_skip untyped node_zfs_arc_evict_skip 2.265729e+06 # HELP node_zfs_arc_hash_chain_max kstat.zfs.misc.arcstats.hash_chain_max # TYPE node_zfs_arc_hash_chain_max untyped node_zfs_arc_hash_chain_max 3 # HELP node_zfs_arc_hash_chains kstat.zfs.misc.arcstats.hash_chains # TYPE node_zfs_arc_hash_chains untyped node_zfs_arc_hash_chains 412 # HELP node_zfs_arc_hash_collisions kstat.zfs.misc.arcstats.hash_collisions # TYPE node_zfs_arc_hash_collisions untyped node_zfs_arc_hash_collisions 50564 # HELP node_zfs_arc_hash_elements kstat.zfs.misc.arcstats.hash_elements # TYPE node_zfs_arc_hash_elements untyped node_zfs_arc_hash_elements 42359 # HELP node_zfs_arc_hash_elements_max kstat.zfs.misc.arcstats.hash_elements_max # TYPE node_zfs_arc_hash_elements_max untyped node_zfs_arc_hash_elements_max 88245 # HELP node_zfs_arc_hdr_size kstat.zfs.misc.arcstats.hdr_size # TYPE node_zfs_arc_hdr_size untyped node_zfs_arc_hdr_size 1.636108e+07 # HELP node_zfs_arc_hits kstat.zfs.misc.arcstats.hits # TYPE node_zfs_arc_hits untyped node_zfs_arc_hits 8.772612e+06 # HELP node_zfs_arc_l2_abort_lowmem kstat.zfs.misc.arcstats.l2_abort_lowmem # TYPE node_zfs_arc_l2_abort_lowmem untyped node_zfs_arc_l2_abort_lowmem 0 # HELP node_zfs_arc_l2_asize kstat.zfs.misc.arcstats.l2_asize # TYPE node_zfs_arc_l2_asize untyped node_zfs_arc_l2_asize 0 # HELP node_zfs_arc_l2_cdata_free_on_write kstat.zfs.misc.arcstats.l2_cdata_free_on_write # TYPE node_zfs_arc_l2_cdata_free_on_write untyped node_zfs_arc_l2_cdata_free_on_write 0 # HELP node_zfs_arc_l2_cksum_bad kstat.zfs.misc.arcstats.l2_cksum_bad # TYPE node_zfs_arc_l2_cksum_bad untyped node_zfs_arc_l2_cksum_bad 0 # HELP node_zfs_arc_l2_compress_failures kstat.zfs.misc.arcstats.l2_compress_failures # TYPE node_zfs_arc_l2_compress_failures untyped node_zfs_arc_l2_compress_failures 0 # HELP node_zfs_arc_l2_compress_successes kstat.zfs.misc.arcstats.l2_compress_successes # TYPE node_zfs_arc_l2_compress_successes untyped node_zfs_arc_l2_compress_successes 0 # HELP node_zfs_arc_l2_compress_zeros kstat.zfs.misc.arcstats.l2_compress_zeros # TYPE node_zfs_arc_l2_compress_zeros untyped node_zfs_arc_l2_compress_zeros 0 # HELP node_zfs_arc_l2_evict_l1cached kstat.zfs.misc.arcstats.l2_evict_l1cached # TYPE node_zfs_arc_l2_evict_l1cached untyped node_zfs_arc_l2_evict_l1cached 0 # HELP node_zfs_arc_l2_evict_lock_retry kstat.zfs.misc.arcstats.l2_evict_lock_retry # TYPE node_zfs_arc_l2_evict_lock_retry untyped node_zfs_arc_l2_evict_lock_retry 0 # HELP node_zfs_arc_l2_evict_reading kstat.zfs.misc.arcstats.l2_evict_reading # TYPE node_zfs_arc_l2_evict_reading untyped node_zfs_arc_l2_evict_reading 0 # HELP node_zfs_arc_l2_feeds kstat.zfs.misc.arcstats.l2_feeds # TYPE node_zfs_arc_l2_feeds untyped node_zfs_arc_l2_feeds 0 # HELP node_zfs_arc_l2_free_on_write kstat.zfs.misc.arcstats.l2_free_on_write # TYPE node_zfs_arc_l2_free_on_write untyped node_zfs_arc_l2_free_on_write 0 # HELP node_zfs_arc_l2_hdr_size kstat.zfs.misc.arcstats.l2_hdr_size # TYPE node_zfs_arc_l2_hdr_size untyped node_zfs_arc_l2_hdr_size 0 # HELP node_zfs_arc_l2_hits kstat.zfs.misc.arcstats.l2_hits # TYPE node_zfs_arc_l2_hits untyped node_zfs_arc_l2_hits 0 # HELP node_zfs_arc_l2_io_error kstat.zfs.misc.arcstats.l2_io_error # TYPE node_zfs_arc_l2_io_error untyped node_zfs_arc_l2_io_error 0 # HELP node_zfs_arc_l2_misses kstat.zfs.misc.arcstats.l2_misses # TYPE node_zfs_arc_l2_misses untyped node_zfs_arc_l2_misses 0 # HELP node_zfs_arc_l2_read_bytes kstat.zfs.misc.arcstats.l2_read_bytes # TYPE node_zfs_arc_l2_read_bytes untyped node_zfs_arc_l2_read_bytes 0 # HELP node_zfs_arc_l2_rw_clash kstat.zfs.misc.arcstats.l2_rw_clash # TYPE node_zfs_arc_l2_rw_clash untyped node_zfs_arc_l2_rw_clash 0 # HELP node_zfs_arc_l2_size kstat.zfs.misc.arcstats.l2_size # TYPE node_zfs_arc_l2_size untyped node_zfs_arc_l2_size 0 # HELP node_zfs_arc_l2_write_bytes kstat.zfs.misc.arcstats.l2_write_bytes # TYPE node_zfs_arc_l2_write_bytes untyped node_zfs_arc_l2_write_bytes 0 # HELP node_zfs_arc_l2_writes_done kstat.zfs.misc.arcstats.l2_writes_done # TYPE node_zfs_arc_l2_writes_done untyped node_zfs_arc_l2_writes_done 0 # HELP node_zfs_arc_l2_writes_error kstat.zfs.misc.arcstats.l2_writes_error # TYPE node_zfs_arc_l2_writes_error untyped node_zfs_arc_l2_writes_error 0 # HELP node_zfs_arc_l2_writes_lock_retry kstat.zfs.misc.arcstats.l2_writes_lock_retry # TYPE node_zfs_arc_l2_writes_lock_retry untyped node_zfs_arc_l2_writes_lock_retry 0 # HELP node_zfs_arc_l2_writes_sent kstat.zfs.misc.arcstats.l2_writes_sent # TYPE node_zfs_arc_l2_writes_sent untyped node_zfs_arc_l2_writes_sent 0 # HELP node_zfs_arc_memory_direct_count kstat.zfs.misc.arcstats.memory_direct_count # TYPE node_zfs_arc_memory_direct_count untyped node_zfs_arc_memory_direct_count 542 # HELP node_zfs_arc_memory_indirect_count kstat.zfs.misc.arcstats.memory_indirect_count # TYPE node_zfs_arc_memory_indirect_count untyped node_zfs_arc_memory_indirect_count 3006 # HELP node_zfs_arc_memory_throttle_count kstat.zfs.misc.arcstats.memory_throttle_count # TYPE node_zfs_arc_memory_throttle_count untyped node_zfs_arc_memory_throttle_count 0 # HELP node_zfs_arc_metadata_size kstat.zfs.misc.arcstats.metadata_size # TYPE node_zfs_arc_metadata_size untyped node_zfs_arc_metadata_size 1.7529856e+08 # HELP node_zfs_arc_mfu_evictable_data kstat.zfs.misc.arcstats.mfu_evictable_data # TYPE node_zfs_arc_mfu_evictable_data untyped node_zfs_arc_mfu_evictable_data 1.017613824e+09 # HELP node_zfs_arc_mfu_evictable_metadata kstat.zfs.misc.arcstats.mfu_evictable_metadata # TYPE node_zfs_arc_mfu_evictable_metadata untyped node_zfs_arc_mfu_evictable_metadata 9.163776e+06 # HELP node_zfs_arc_mfu_ghost_evictable_data kstat.zfs.misc.arcstats.mfu_ghost_evictable_data # TYPE node_zfs_arc_mfu_ghost_evictable_data untyped node_zfs_arc_mfu_ghost_evictable_data 9.6731136e+07 # HELP node_zfs_arc_mfu_ghost_evictable_metadata kstat.zfs.misc.arcstats.mfu_ghost_evictable_metadata # TYPE node_zfs_arc_mfu_ghost_evictable_metadata untyped node_zfs_arc_mfu_ghost_evictable_metadata 8.205312e+06 # HELP node_zfs_arc_mfu_ghost_hits kstat.zfs.misc.arcstats.mfu_ghost_hits # TYPE node_zfs_arc_mfu_ghost_hits untyped node_zfs_arc_mfu_ghost_hits 821 # HELP node_zfs_arc_mfu_ghost_size kstat.zfs.misc.arcstats.mfu_ghost_size # TYPE node_zfs_arc_mfu_ghost_size untyped node_zfs_arc_mfu_ghost_size 1.04936448e+08 # HELP node_zfs_arc_mfu_hits kstat.zfs.misc.arcstats.mfu_hits # TYPE node_zfs_arc_mfu_hits untyped node_zfs_arc_mfu_hits 7.829854e+06 # HELP node_zfs_arc_mfu_size kstat.zfs.misc.arcstats.mfu_size # TYPE node_zfs_arc_mfu_size untyped node_zfs_arc_mfu_size 1.066623488e+09 # HELP node_zfs_arc_misses kstat.zfs.misc.arcstats.misses # TYPE node_zfs_arc_misses untyped node_zfs_arc_misses 604635 # HELP node_zfs_arc_mru_evictable_data kstat.zfs.misc.arcstats.mru_evictable_data # TYPE node_zfs_arc_mru_evictable_data untyped node_zfs_arc_mru_evictable_data 2.78091264e+08 # HELP node_zfs_arc_mru_evictable_metadata kstat.zfs.misc.arcstats.mru_evictable_metadata # TYPE node_zfs_arc_mru_evictable_metadata untyped node_zfs_arc_mru_evictable_metadata 1.8606592e+07 # HELP node_zfs_arc_mru_ghost_evictable_data kstat.zfs.misc.arcstats.mru_ghost_evictable_data # TYPE node_zfs_arc_mru_ghost_evictable_data untyped node_zfs_arc_mru_ghost_evictable_data 8.83765248e+08 # HELP node_zfs_arc_mru_ghost_evictable_metadata kstat.zfs.misc.arcstats.mru_ghost_evictable_metadata # TYPE node_zfs_arc_mru_ghost_evictable_metadata untyped node_zfs_arc_mru_ghost_evictable_metadata 1.1596288e+08 # HELP node_zfs_arc_mru_ghost_hits kstat.zfs.misc.arcstats.mru_ghost_hits # TYPE node_zfs_arc_mru_ghost_hits untyped node_zfs_arc_mru_ghost_hits 21100 # HELP node_zfs_arc_mru_ghost_size kstat.zfs.misc.arcstats.mru_ghost_size # TYPE node_zfs_arc_mru_ghost_size untyped node_zfs_arc_mru_ghost_size 9.99728128e+08 # HELP node_zfs_arc_mru_hits kstat.zfs.misc.arcstats.mru_hits # TYPE node_zfs_arc_mru_hits untyped node_zfs_arc_mru_hits 855535 # HELP node_zfs_arc_mru_size kstat.zfs.misc.arcstats.mru_size # TYPE node_zfs_arc_mru_size untyped node_zfs_arc_mru_size 4.02593792e+08 # HELP node_zfs_arc_mutex_miss kstat.zfs.misc.arcstats.mutex_miss # TYPE node_zfs_arc_mutex_miss untyped node_zfs_arc_mutex_miss 2 # HELP node_zfs_arc_other_size kstat.zfs.misc.arcstats.other_size # TYPE node_zfs_arc_other_size untyped node_zfs_arc_other_size 1.16443992e+08 # HELP node_zfs_arc_p kstat.zfs.misc.arcstats.p # TYPE node_zfs_arc_p untyped node_zfs_arc_p 5.16395305e+08 # HELP node_zfs_arc_prefetch_data_hits kstat.zfs.misc.arcstats.prefetch_data_hits # TYPE node_zfs_arc_prefetch_data_hits untyped node_zfs_arc_prefetch_data_hits 3615 # HELP node_zfs_arc_prefetch_data_misses kstat.zfs.misc.arcstats.prefetch_data_misses # TYPE node_zfs_arc_prefetch_data_misses untyped node_zfs_arc_prefetch_data_misses 17094 # HELP node_zfs_arc_prefetch_metadata_hits kstat.zfs.misc.arcstats.prefetch_metadata_hits # TYPE node_zfs_arc_prefetch_metadata_hits untyped node_zfs_arc_prefetch_metadata_hits 83612 # HELP node_zfs_arc_prefetch_metadata_misses kstat.zfs.misc.arcstats.prefetch_metadata_misses # TYPE node_zfs_arc_prefetch_metadata_misses untyped node_zfs_arc_prefetch_metadata_misses 16071 # HELP node_zfs_arc_size kstat.zfs.misc.arcstats.size # TYPE node_zfs_arc_size untyped node_zfs_arc_size 1.603939792e+09 # HELP node_zfs_dbuf_dbuf_cache_count kstat.zfs.misc.dbufstats.dbuf_cache_count # TYPE node_zfs_dbuf_dbuf_cache_count untyped node_zfs_dbuf_dbuf_cache_count 27 # HELP node_zfs_dbuf_dbuf_cache_hiwater_bytes kstat.zfs.misc.dbufstats.dbuf_cache_hiwater_bytes # TYPE node_zfs_dbuf_dbuf_cache_hiwater_bytes untyped node_zfs_dbuf_dbuf_cache_hiwater_bytes 6.9117804e+07 # HELP node_zfs_dbuf_dbuf_cache_level_0 kstat.zfs.misc.dbufstats.dbuf_cache_level_0 # TYPE node_zfs_dbuf_dbuf_cache_level_0 untyped node_zfs_dbuf_dbuf_cache_level_0 27 # HELP node_zfs_dbuf_dbuf_cache_level_0_bytes kstat.zfs.misc.dbufstats.dbuf_cache_level_0_bytes # TYPE node_zfs_dbuf_dbuf_cache_level_0_bytes untyped node_zfs_dbuf_dbuf_cache_level_0_bytes 302080 # HELP node_zfs_dbuf_dbuf_cache_level_1 kstat.zfs.misc.dbufstats.dbuf_cache_level_1 # TYPE node_zfs_dbuf_dbuf_cache_level_1 untyped node_zfs_dbuf_dbuf_cache_level_1 0 # HELP node_zfs_dbuf_dbuf_cache_level_10 kstat.zfs.misc.dbufstats.dbuf_cache_level_10 # TYPE node_zfs_dbuf_dbuf_cache_level_10 untyped node_zfs_dbuf_dbuf_cache_level_10 0 # HELP node_zfs_dbuf_dbuf_cache_level_10_bytes kstat.zfs.misc.dbufstats.dbuf_cache_level_10_bytes # TYPE node_zfs_dbuf_dbuf_cache_level_10_bytes untyped node_zfs_dbuf_dbuf_cache_level_10_bytes 0 # HELP node_zfs_dbuf_dbuf_cache_level_11 kstat.zfs.misc.dbufstats.dbuf_cache_level_11 # TYPE node_zfs_dbuf_dbuf_cache_level_11 untyped node_zfs_dbuf_dbuf_cache_level_11 0 # HELP node_zfs_dbuf_dbuf_cache_level_11_bytes kstat.zfs.misc.dbufstats.dbuf_cache_level_11_bytes # TYPE node_zfs_dbuf_dbuf_cache_level_11_bytes untyped node_zfs_dbuf_dbuf_cache_level_11_bytes 0 # HELP node_zfs_dbuf_dbuf_cache_level_1_bytes kstat.zfs.misc.dbufstats.dbuf_cache_level_1_bytes # TYPE node_zfs_dbuf_dbuf_cache_level_1_bytes untyped node_zfs_dbuf_dbuf_cache_level_1_bytes 0 # HELP node_zfs_dbuf_dbuf_cache_level_2 kstat.zfs.misc.dbufstats.dbuf_cache_level_2 # TYPE node_zfs_dbuf_dbuf_cache_level_2 untyped node_zfs_dbuf_dbuf_cache_level_2 0 # HELP node_zfs_dbuf_dbuf_cache_level_2_bytes kstat.zfs.misc.dbufstats.dbuf_cache_level_2_bytes # TYPE node_zfs_dbuf_dbuf_cache_level_2_bytes untyped node_zfs_dbuf_dbuf_cache_level_2_bytes 0 # HELP node_zfs_dbuf_dbuf_cache_level_3 kstat.zfs.misc.dbufstats.dbuf_cache_level_3 # TYPE node_zfs_dbuf_dbuf_cache_level_3 untyped node_zfs_dbuf_dbuf_cache_level_3 0 # HELP node_zfs_dbuf_dbuf_cache_level_3_bytes kstat.zfs.misc.dbufstats.dbuf_cache_level_3_bytes # TYPE node_zfs_dbuf_dbuf_cache_level_3_bytes untyped node_zfs_dbuf_dbuf_cache_level_3_bytes 0 # HELP node_zfs_dbuf_dbuf_cache_level_4 kstat.zfs.misc.dbufstats.dbuf_cache_level_4 # TYPE node_zfs_dbuf_dbuf_cache_level_4 untyped node_zfs_dbuf_dbuf_cache_level_4 0 # HELP node_zfs_dbuf_dbuf_cache_level_4_bytes kstat.zfs.misc.dbufstats.dbuf_cache_level_4_bytes # TYPE node_zfs_dbuf_dbuf_cache_level_4_bytes untyped node_zfs_dbuf_dbuf_cache_level_4_bytes 0 # HELP node_zfs_dbuf_dbuf_cache_level_5 kstat.zfs.misc.dbufstats.dbuf_cache_level_5 # TYPE node_zfs_dbuf_dbuf_cache_level_5 untyped node_zfs_dbuf_dbuf_cache_level_5 0 # HELP node_zfs_dbuf_dbuf_cache_level_5_bytes kstat.zfs.misc.dbufstats.dbuf_cache_level_5_bytes # TYPE node_zfs_dbuf_dbuf_cache_level_5_bytes untyped node_zfs_dbuf_dbuf_cache_level_5_bytes 0 # HELP node_zfs_dbuf_dbuf_cache_level_6 kstat.zfs.misc.dbufstats.dbuf_cache_level_6 # TYPE node_zfs_dbuf_dbuf_cache_level_6 untyped node_zfs_dbuf_dbuf_cache_level_6 0 # HELP node_zfs_dbuf_dbuf_cache_level_6_bytes kstat.zfs.misc.dbufstats.dbuf_cache_level_6_bytes # TYPE node_zfs_dbuf_dbuf_cache_level_6_bytes untyped node_zfs_dbuf_dbuf_cache_level_6_bytes 0 # HELP node_zfs_dbuf_dbuf_cache_level_7 kstat.zfs.misc.dbufstats.dbuf_cache_level_7 # TYPE node_zfs_dbuf_dbuf_cache_level_7 untyped node_zfs_dbuf_dbuf_cache_level_7 0 # HELP node_zfs_dbuf_dbuf_cache_level_7_bytes kstat.zfs.misc.dbufstats.dbuf_cache_level_7_bytes # TYPE node_zfs_dbuf_dbuf_cache_level_7_bytes untyped node_zfs_dbuf_dbuf_cache_level_7_bytes 0 # HELP node_zfs_dbuf_dbuf_cache_level_8 kstat.zfs.misc.dbufstats.dbuf_cache_level_8 # TYPE node_zfs_dbuf_dbuf_cache_level_8 untyped node_zfs_dbuf_dbuf_cache_level_8 0 # HELP node_zfs_dbuf_dbuf_cache_level_8_bytes kstat.zfs.misc.dbufstats.dbuf_cache_level_8_bytes # TYPE node_zfs_dbuf_dbuf_cache_level_8_bytes untyped node_zfs_dbuf_dbuf_cache_level_8_bytes 0 # HELP node_zfs_dbuf_dbuf_cache_level_9 kstat.zfs.misc.dbufstats.dbuf_cache_level_9 # TYPE node_zfs_dbuf_dbuf_cache_level_9 untyped node_zfs_dbuf_dbuf_cache_level_9 0 # HELP node_zfs_dbuf_dbuf_cache_level_9_bytes kstat.zfs.misc.dbufstats.dbuf_cache_level_9_bytes # TYPE node_zfs_dbuf_dbuf_cache_level_9_bytes untyped node_zfs_dbuf_dbuf_cache_level_9_bytes 0 # HELP node_zfs_dbuf_dbuf_cache_lowater_bytes kstat.zfs.misc.dbufstats.dbuf_cache_lowater_bytes # TYPE node_zfs_dbuf_dbuf_cache_lowater_bytes untyped node_zfs_dbuf_dbuf_cache_lowater_bytes 5.6550932e+07 # HELP node_zfs_dbuf_dbuf_cache_max_bytes kstat.zfs.misc.dbufstats.dbuf_cache_max_bytes # TYPE node_zfs_dbuf_dbuf_cache_max_bytes untyped node_zfs_dbuf_dbuf_cache_max_bytes 6.2834368e+07 # HELP node_zfs_dbuf_dbuf_cache_size kstat.zfs.misc.dbufstats.dbuf_cache_size # TYPE node_zfs_dbuf_dbuf_cache_size untyped node_zfs_dbuf_dbuf_cache_size 302080 # HELP node_zfs_dbuf_dbuf_cache_size_max kstat.zfs.misc.dbufstats.dbuf_cache_size_max # TYPE node_zfs_dbuf_dbuf_cache_size_max untyped node_zfs_dbuf_dbuf_cache_size_max 394240 # HELP node_zfs_dbuf_dbuf_cache_total_evicts kstat.zfs.misc.dbufstats.dbuf_cache_total_evicts # TYPE node_zfs_dbuf_dbuf_cache_total_evicts untyped node_zfs_dbuf_dbuf_cache_total_evicts 0 # HELP node_zfs_dbuf_hash_chain_max kstat.zfs.misc.dbufstats.hash_chain_max # TYPE node_zfs_dbuf_hash_chain_max untyped node_zfs_dbuf_hash_chain_max 0 # HELP node_zfs_dbuf_hash_chains kstat.zfs.misc.dbufstats.hash_chains # TYPE node_zfs_dbuf_hash_chains untyped node_zfs_dbuf_hash_chains 0 # HELP node_zfs_dbuf_hash_collisions kstat.zfs.misc.dbufstats.hash_collisions # TYPE node_zfs_dbuf_hash_collisions untyped node_zfs_dbuf_hash_collisions 0 # HELP node_zfs_dbuf_hash_dbuf_level_0 kstat.zfs.misc.dbufstats.hash_dbuf_level_0 # TYPE node_zfs_dbuf_hash_dbuf_level_0 untyped node_zfs_dbuf_hash_dbuf_level_0 37 # HELP node_zfs_dbuf_hash_dbuf_level_0_bytes kstat.zfs.misc.dbufstats.hash_dbuf_level_0_bytes # TYPE node_zfs_dbuf_hash_dbuf_level_0_bytes untyped node_zfs_dbuf_hash_dbuf_level_0_bytes 465920 # HELP node_zfs_dbuf_hash_dbuf_level_1 kstat.zfs.misc.dbufstats.hash_dbuf_level_1 # TYPE node_zfs_dbuf_hash_dbuf_level_1 untyped node_zfs_dbuf_hash_dbuf_level_1 10 # HELP node_zfs_dbuf_hash_dbuf_level_10 kstat.zfs.misc.dbufstats.hash_dbuf_level_10 # TYPE node_zfs_dbuf_hash_dbuf_level_10 untyped node_zfs_dbuf_hash_dbuf_level_10 0 # HELP node_zfs_dbuf_hash_dbuf_level_10_bytes kstat.zfs.misc.dbufstats.hash_dbuf_level_10_bytes # TYPE node_zfs_dbuf_hash_dbuf_level_10_bytes untyped node_zfs_dbuf_hash_dbuf_level_10_bytes 0 # HELP node_zfs_dbuf_hash_dbuf_level_11 kstat.zfs.misc.dbufstats.hash_dbuf_level_11 # TYPE node_zfs_dbuf_hash_dbuf_level_11 untyped node_zfs_dbuf_hash_dbuf_level_11 0 # HELP node_zfs_dbuf_hash_dbuf_level_11_bytes kstat.zfs.misc.dbufstats.hash_dbuf_level_11_bytes # TYPE node_zfs_dbuf_hash_dbuf_level_11_bytes untyped node_zfs_dbuf_hash_dbuf_level_11_bytes 0 # HELP node_zfs_dbuf_hash_dbuf_level_1_bytes kstat.zfs.misc.dbufstats.hash_dbuf_level_1_bytes # TYPE node_zfs_dbuf_hash_dbuf_level_1_bytes untyped node_zfs_dbuf_hash_dbuf_level_1_bytes 1.31072e+06 # HELP node_zfs_dbuf_hash_dbuf_level_2 kstat.zfs.misc.dbufstats.hash_dbuf_level_2 # TYPE node_zfs_dbuf_hash_dbuf_level_2 untyped node_zfs_dbuf_hash_dbuf_level_2 2 # HELP node_zfs_dbuf_hash_dbuf_level_2_bytes kstat.zfs.misc.dbufstats.hash_dbuf_level_2_bytes # TYPE node_zfs_dbuf_hash_dbuf_level_2_bytes untyped node_zfs_dbuf_hash_dbuf_level_2_bytes 262144 # HELP node_zfs_dbuf_hash_dbuf_level_3 kstat.zfs.misc.dbufstats.hash_dbuf_level_3 # TYPE node_zfs_dbuf_hash_dbuf_level_3 untyped node_zfs_dbuf_hash_dbuf_level_3 2 # HELP node_zfs_dbuf_hash_dbuf_level_3_bytes kstat.zfs.misc.dbufstats.hash_dbuf_level_3_bytes # TYPE node_zfs_dbuf_hash_dbuf_level_3_bytes untyped node_zfs_dbuf_hash_dbuf_level_3_bytes 262144 # HELP node_zfs_dbuf_hash_dbuf_level_4 kstat.zfs.misc.dbufstats.hash_dbuf_level_4 # TYPE node_zfs_dbuf_hash_dbuf_level_4 untyped node_zfs_dbuf_hash_dbuf_level_4 2 # HELP node_zfs_dbuf_hash_dbuf_level_4_bytes kstat.zfs.misc.dbufstats.hash_dbuf_level_4_bytes # TYPE node_zfs_dbuf_hash_dbuf_level_4_bytes untyped node_zfs_dbuf_hash_dbuf_level_4_bytes 262144 # HELP node_zfs_dbuf_hash_dbuf_level_5 kstat.zfs.misc.dbufstats.hash_dbuf_level_5 # TYPE node_zfs_dbuf_hash_dbuf_level_5 untyped node_zfs_dbuf_hash_dbuf_level_5 2 # HELP node_zfs_dbuf_hash_dbuf_level_5_bytes kstat.zfs.misc.dbufstats.hash_dbuf_level_5_bytes # TYPE node_zfs_dbuf_hash_dbuf_level_5_bytes untyped node_zfs_dbuf_hash_dbuf_level_5_bytes 262144 # HELP node_zfs_dbuf_hash_dbuf_level_6 kstat.zfs.misc.dbufstats.hash_dbuf_level_6 # TYPE node_zfs_dbuf_hash_dbuf_level_6 untyped node_zfs_dbuf_hash_dbuf_level_6 0 # HELP node_zfs_dbuf_hash_dbuf_level_6_bytes kstat.zfs.misc.dbufstats.hash_dbuf_level_6_bytes # TYPE node_zfs_dbuf_hash_dbuf_level_6_bytes untyped node_zfs_dbuf_hash_dbuf_level_6_bytes 0 # HELP node_zfs_dbuf_hash_dbuf_level_7 kstat.zfs.misc.dbufstats.hash_dbuf_level_7 # TYPE node_zfs_dbuf_hash_dbuf_level_7 untyped node_zfs_dbuf_hash_dbuf_level_7 0 # HELP node_zfs_dbuf_hash_dbuf_level_7_bytes kstat.zfs.misc.dbufstats.hash_dbuf_level_7_bytes # TYPE node_zfs_dbuf_hash_dbuf_level_7_bytes untyped node_zfs_dbuf_hash_dbuf_level_7_bytes 0 # HELP node_zfs_dbuf_hash_dbuf_level_8 kstat.zfs.misc.dbufstats.hash_dbuf_level_8 # TYPE node_zfs_dbuf_hash_dbuf_level_8 untyped node_zfs_dbuf_hash_dbuf_level_8 0 # HELP node_zfs_dbuf_hash_dbuf_level_8_bytes kstat.zfs.misc.dbufstats.hash_dbuf_level_8_bytes # TYPE node_zfs_dbuf_hash_dbuf_level_8_bytes untyped node_zfs_dbuf_hash_dbuf_level_8_bytes 0 # HELP node_zfs_dbuf_hash_dbuf_level_9 kstat.zfs.misc.dbufstats.hash_dbuf_level_9 # TYPE node_zfs_dbuf_hash_dbuf_level_9 untyped node_zfs_dbuf_hash_dbuf_level_9 0 # HELP node_zfs_dbuf_hash_dbuf_level_9_bytes kstat.zfs.misc.dbufstats.hash_dbuf_level_9_bytes # TYPE node_zfs_dbuf_hash_dbuf_level_9_bytes untyped node_zfs_dbuf_hash_dbuf_level_9_bytes 0 # HELP node_zfs_dbuf_hash_elements kstat.zfs.misc.dbufstats.hash_elements # TYPE node_zfs_dbuf_hash_elements untyped node_zfs_dbuf_hash_elements 55 # HELP node_zfs_dbuf_hash_elements_max kstat.zfs.misc.dbufstats.hash_elements_max # TYPE node_zfs_dbuf_hash_elements_max untyped node_zfs_dbuf_hash_elements_max 55 # HELP node_zfs_dbuf_hash_hits kstat.zfs.misc.dbufstats.hash_hits # TYPE node_zfs_dbuf_hash_hits untyped node_zfs_dbuf_hash_hits 108807 # HELP node_zfs_dbuf_hash_insert_race kstat.zfs.misc.dbufstats.hash_insert_race # TYPE node_zfs_dbuf_hash_insert_race untyped node_zfs_dbuf_hash_insert_race 0 # HELP node_zfs_dbuf_hash_misses kstat.zfs.misc.dbufstats.hash_misses # TYPE node_zfs_dbuf_hash_misses untyped node_zfs_dbuf_hash_misses 1851 # HELP node_zfs_dmu_tx_dmu_tx_assigned kstat.zfs.misc.dmu_tx.dmu_tx_assigned # TYPE node_zfs_dmu_tx_dmu_tx_assigned untyped node_zfs_dmu_tx_dmu_tx_assigned 3.532844e+06 # HELP node_zfs_dmu_tx_dmu_tx_delay kstat.zfs.misc.dmu_tx.dmu_tx_delay # TYPE node_zfs_dmu_tx_dmu_tx_delay untyped node_zfs_dmu_tx_dmu_tx_delay 0 # HELP node_zfs_dmu_tx_dmu_tx_dirty_delay kstat.zfs.misc.dmu_tx.dmu_tx_dirty_delay # TYPE node_zfs_dmu_tx_dmu_tx_dirty_delay untyped node_zfs_dmu_tx_dmu_tx_dirty_delay 0 # HELP node_zfs_dmu_tx_dmu_tx_dirty_over_max kstat.zfs.misc.dmu_tx.dmu_tx_dirty_over_max # TYPE node_zfs_dmu_tx_dmu_tx_dirty_over_max untyped node_zfs_dmu_tx_dmu_tx_dirty_over_max 0 # HELP node_zfs_dmu_tx_dmu_tx_dirty_throttle kstat.zfs.misc.dmu_tx.dmu_tx_dirty_throttle # TYPE node_zfs_dmu_tx_dmu_tx_dirty_throttle untyped node_zfs_dmu_tx_dmu_tx_dirty_throttle 0 # HELP node_zfs_dmu_tx_dmu_tx_error kstat.zfs.misc.dmu_tx.dmu_tx_error # TYPE node_zfs_dmu_tx_dmu_tx_error untyped node_zfs_dmu_tx_dmu_tx_error 0 # HELP node_zfs_dmu_tx_dmu_tx_group kstat.zfs.misc.dmu_tx.dmu_tx_group # TYPE node_zfs_dmu_tx_dmu_tx_group untyped node_zfs_dmu_tx_dmu_tx_group 0 # HELP node_zfs_dmu_tx_dmu_tx_memory_reclaim kstat.zfs.misc.dmu_tx.dmu_tx_memory_reclaim # TYPE node_zfs_dmu_tx_dmu_tx_memory_reclaim untyped node_zfs_dmu_tx_dmu_tx_memory_reclaim 0 # HELP node_zfs_dmu_tx_dmu_tx_memory_reserve kstat.zfs.misc.dmu_tx.dmu_tx_memory_reserve # TYPE node_zfs_dmu_tx_dmu_tx_memory_reserve untyped node_zfs_dmu_tx_dmu_tx_memory_reserve 0 # HELP node_zfs_dmu_tx_dmu_tx_quota kstat.zfs.misc.dmu_tx.dmu_tx_quota # TYPE node_zfs_dmu_tx_dmu_tx_quota untyped node_zfs_dmu_tx_dmu_tx_quota 0 # HELP node_zfs_dmu_tx_dmu_tx_suspended kstat.zfs.misc.dmu_tx.dmu_tx_suspended # TYPE node_zfs_dmu_tx_dmu_tx_suspended untyped node_zfs_dmu_tx_dmu_tx_suspended 0 # HELP node_zfs_dnode_dnode_alloc_next_block kstat.zfs.misc.dnodestats.dnode_alloc_next_block # TYPE node_zfs_dnode_dnode_alloc_next_block untyped node_zfs_dnode_dnode_alloc_next_block 0 # HELP node_zfs_dnode_dnode_alloc_next_chunk kstat.zfs.misc.dnodestats.dnode_alloc_next_chunk # TYPE node_zfs_dnode_dnode_alloc_next_chunk untyped node_zfs_dnode_dnode_alloc_next_chunk 0 # HELP node_zfs_dnode_dnode_alloc_race kstat.zfs.misc.dnodestats.dnode_alloc_race # TYPE node_zfs_dnode_dnode_alloc_race untyped node_zfs_dnode_dnode_alloc_race 0 # HELP node_zfs_dnode_dnode_allocate kstat.zfs.misc.dnodestats.dnode_allocate # TYPE node_zfs_dnode_dnode_allocate untyped node_zfs_dnode_dnode_allocate 0 # HELP node_zfs_dnode_dnode_buf_evict kstat.zfs.misc.dnodestats.dnode_buf_evict # TYPE node_zfs_dnode_dnode_buf_evict untyped node_zfs_dnode_dnode_buf_evict 17 # HELP node_zfs_dnode_dnode_hold_alloc_hits kstat.zfs.misc.dnodestats.dnode_hold_alloc_hits # TYPE node_zfs_dnode_dnode_hold_alloc_hits untyped node_zfs_dnode_dnode_hold_alloc_hits 37617 # HELP node_zfs_dnode_dnode_hold_alloc_interior kstat.zfs.misc.dnodestats.dnode_hold_alloc_interior # TYPE node_zfs_dnode_dnode_hold_alloc_interior untyped node_zfs_dnode_dnode_hold_alloc_interior 0 # HELP node_zfs_dnode_dnode_hold_alloc_lock_misses kstat.zfs.misc.dnodestats.dnode_hold_alloc_lock_misses # TYPE node_zfs_dnode_dnode_hold_alloc_lock_misses untyped node_zfs_dnode_dnode_hold_alloc_lock_misses 0 # HELP node_zfs_dnode_dnode_hold_alloc_lock_retry kstat.zfs.misc.dnodestats.dnode_hold_alloc_lock_retry # TYPE node_zfs_dnode_dnode_hold_alloc_lock_retry untyped node_zfs_dnode_dnode_hold_alloc_lock_retry 0 # HELP node_zfs_dnode_dnode_hold_alloc_misses kstat.zfs.misc.dnodestats.dnode_hold_alloc_misses # TYPE node_zfs_dnode_dnode_hold_alloc_misses untyped node_zfs_dnode_dnode_hold_alloc_misses 0 # HELP node_zfs_dnode_dnode_hold_alloc_type_none kstat.zfs.misc.dnodestats.dnode_hold_alloc_type_none # TYPE node_zfs_dnode_dnode_hold_alloc_type_none untyped node_zfs_dnode_dnode_hold_alloc_type_none 0 # HELP node_zfs_dnode_dnode_hold_dbuf_hold kstat.zfs.misc.dnodestats.dnode_hold_dbuf_hold # TYPE node_zfs_dnode_dnode_hold_dbuf_hold untyped node_zfs_dnode_dnode_hold_dbuf_hold 0 # HELP node_zfs_dnode_dnode_hold_dbuf_read kstat.zfs.misc.dnodestats.dnode_hold_dbuf_read # TYPE node_zfs_dnode_dnode_hold_dbuf_read untyped node_zfs_dnode_dnode_hold_dbuf_read 0 # HELP node_zfs_dnode_dnode_hold_free_hits kstat.zfs.misc.dnodestats.dnode_hold_free_hits # TYPE node_zfs_dnode_dnode_hold_free_hits untyped node_zfs_dnode_dnode_hold_free_hits 0 # HELP node_zfs_dnode_dnode_hold_free_lock_misses kstat.zfs.misc.dnodestats.dnode_hold_free_lock_misses # TYPE node_zfs_dnode_dnode_hold_free_lock_misses untyped node_zfs_dnode_dnode_hold_free_lock_misses 0 # HELP node_zfs_dnode_dnode_hold_free_lock_retry kstat.zfs.misc.dnodestats.dnode_hold_free_lock_retry # TYPE node_zfs_dnode_dnode_hold_free_lock_retry untyped node_zfs_dnode_dnode_hold_free_lock_retry 0 # HELP node_zfs_dnode_dnode_hold_free_misses kstat.zfs.misc.dnodestats.dnode_hold_free_misses # TYPE node_zfs_dnode_dnode_hold_free_misses untyped node_zfs_dnode_dnode_hold_free_misses 0 # HELP node_zfs_dnode_dnode_hold_free_overflow kstat.zfs.misc.dnodestats.dnode_hold_free_overflow # TYPE node_zfs_dnode_dnode_hold_free_overflow untyped node_zfs_dnode_dnode_hold_free_overflow 0 # HELP node_zfs_dnode_dnode_hold_free_refcount kstat.zfs.misc.dnodestats.dnode_hold_free_refcount # TYPE node_zfs_dnode_dnode_hold_free_refcount untyped node_zfs_dnode_dnode_hold_free_refcount 0 # HELP node_zfs_dnode_dnode_hold_free_txg kstat.zfs.misc.dnodestats.dnode_hold_free_txg # TYPE node_zfs_dnode_dnode_hold_free_txg untyped node_zfs_dnode_dnode_hold_free_txg 0 # HELP node_zfs_dnode_dnode_move_active kstat.zfs.misc.dnodestats.dnode_move_active # TYPE node_zfs_dnode_dnode_move_active untyped node_zfs_dnode_dnode_move_active 0 # HELP node_zfs_dnode_dnode_move_handle kstat.zfs.misc.dnodestats.dnode_move_handle # TYPE node_zfs_dnode_dnode_move_handle untyped node_zfs_dnode_dnode_move_handle 0 # HELP node_zfs_dnode_dnode_move_invalid kstat.zfs.misc.dnodestats.dnode_move_invalid # TYPE node_zfs_dnode_dnode_move_invalid untyped node_zfs_dnode_dnode_move_invalid 0 # HELP node_zfs_dnode_dnode_move_recheck1 kstat.zfs.misc.dnodestats.dnode_move_recheck1 # TYPE node_zfs_dnode_dnode_move_recheck1 untyped node_zfs_dnode_dnode_move_recheck1 0 # HELP node_zfs_dnode_dnode_move_recheck2 kstat.zfs.misc.dnodestats.dnode_move_recheck2 # TYPE node_zfs_dnode_dnode_move_recheck2 untyped node_zfs_dnode_dnode_move_recheck2 0 # HELP node_zfs_dnode_dnode_move_rwlock kstat.zfs.misc.dnodestats.dnode_move_rwlock # TYPE node_zfs_dnode_dnode_move_rwlock untyped node_zfs_dnode_dnode_move_rwlock 0 # HELP node_zfs_dnode_dnode_move_special kstat.zfs.misc.dnodestats.dnode_move_special # TYPE node_zfs_dnode_dnode_move_special untyped node_zfs_dnode_dnode_move_special 0 # HELP node_zfs_dnode_dnode_reallocate kstat.zfs.misc.dnodestats.dnode_reallocate # TYPE node_zfs_dnode_dnode_reallocate untyped node_zfs_dnode_dnode_reallocate 0 # HELP node_zfs_fm_erpt_dropped kstat.zfs.misc.fm.erpt-dropped # TYPE node_zfs_fm_erpt_dropped untyped node_zfs_fm_erpt_dropped 18 # HELP node_zfs_fm_erpt_set_failed kstat.zfs.misc.fm.erpt-set-failed # TYPE node_zfs_fm_erpt_set_failed untyped node_zfs_fm_erpt_set_failed 0 # HELP node_zfs_fm_fmri_set_failed kstat.zfs.misc.fm.fmri-set-failed # TYPE node_zfs_fm_fmri_set_failed untyped node_zfs_fm_fmri_set_failed 0 # HELP node_zfs_fm_payload_set_failed kstat.zfs.misc.fm.payload-set-failed # TYPE node_zfs_fm_payload_set_failed untyped node_zfs_fm_payload_set_failed 0 # HELP node_zfs_vdev_cache_delegations kstat.zfs.misc.vdev_cache_stats.delegations # TYPE node_zfs_vdev_cache_delegations untyped node_zfs_vdev_cache_delegations 40 # HELP node_zfs_vdev_cache_hits kstat.zfs.misc.vdev_cache_stats.hits # TYPE node_zfs_vdev_cache_hits untyped node_zfs_vdev_cache_hits 0 # HELP node_zfs_vdev_cache_misses kstat.zfs.misc.vdev_cache_stats.misses # TYPE node_zfs_vdev_cache_misses untyped node_zfs_vdev_cache_misses 0 # HELP node_zfs_vdev_mirror_non_rotating_linear kstat.zfs.misc.vdev_mirror_stats.non_rotating_linear # TYPE node_zfs_vdev_mirror_non_rotating_linear untyped node_zfs_vdev_mirror_non_rotating_linear 0 # HELP node_zfs_vdev_mirror_non_rotating_seek kstat.zfs.misc.vdev_mirror_stats.non_rotating_seek # TYPE node_zfs_vdev_mirror_non_rotating_seek untyped node_zfs_vdev_mirror_non_rotating_seek 0 # HELP node_zfs_vdev_mirror_preferred_found kstat.zfs.misc.vdev_mirror_stats.preferred_found # TYPE node_zfs_vdev_mirror_preferred_found untyped node_zfs_vdev_mirror_preferred_found 0 # HELP node_zfs_vdev_mirror_preferred_not_found kstat.zfs.misc.vdev_mirror_stats.preferred_not_found # TYPE node_zfs_vdev_mirror_preferred_not_found untyped node_zfs_vdev_mirror_preferred_not_found 94 # HELP node_zfs_vdev_mirror_rotating_linear kstat.zfs.misc.vdev_mirror_stats.rotating_linear # TYPE node_zfs_vdev_mirror_rotating_linear untyped node_zfs_vdev_mirror_rotating_linear 0 # HELP node_zfs_vdev_mirror_rotating_offset kstat.zfs.misc.vdev_mirror_stats.rotating_offset # TYPE node_zfs_vdev_mirror_rotating_offset untyped node_zfs_vdev_mirror_rotating_offset 0 # HELP node_zfs_vdev_mirror_rotating_seek kstat.zfs.misc.vdev_mirror_stats.rotating_seek # TYPE node_zfs_vdev_mirror_rotating_seek untyped node_zfs_vdev_mirror_rotating_seek 0 # HELP node_zfs_xuio_onloan_read_buf kstat.zfs.misc.xuio_stats.onloan_read_buf # TYPE node_zfs_xuio_onloan_read_buf untyped node_zfs_xuio_onloan_read_buf 32 # HELP node_zfs_xuio_onloan_write_buf kstat.zfs.misc.xuio_stats.onloan_write_buf # TYPE node_zfs_xuio_onloan_write_buf untyped node_zfs_xuio_onloan_write_buf 0 # HELP node_zfs_xuio_read_buf_copied kstat.zfs.misc.xuio_stats.read_buf_copied # TYPE node_zfs_xuio_read_buf_copied untyped node_zfs_xuio_read_buf_copied 0 # HELP node_zfs_xuio_read_buf_nocopy kstat.zfs.misc.xuio_stats.read_buf_nocopy # TYPE node_zfs_xuio_read_buf_nocopy untyped node_zfs_xuio_read_buf_nocopy 0 # HELP node_zfs_xuio_write_buf_copied kstat.zfs.misc.xuio_stats.write_buf_copied # TYPE node_zfs_xuio_write_buf_copied untyped node_zfs_xuio_write_buf_copied 0 # HELP node_zfs_xuio_write_buf_nocopy kstat.zfs.misc.xuio_stats.write_buf_nocopy # TYPE node_zfs_xuio_write_buf_nocopy untyped node_zfs_xuio_write_buf_nocopy 0 # HELP node_zfs_zfetch_bogus_streams kstat.zfs.misc.zfetchstats.bogus_streams # TYPE node_zfs_zfetch_bogus_streams untyped node_zfs_zfetch_bogus_streams 0 # HELP node_zfs_zfetch_colinear_hits kstat.zfs.misc.zfetchstats.colinear_hits # TYPE node_zfs_zfetch_colinear_hits untyped node_zfs_zfetch_colinear_hits 0 # HELP node_zfs_zfetch_colinear_misses kstat.zfs.misc.zfetchstats.colinear_misses # TYPE node_zfs_zfetch_colinear_misses untyped node_zfs_zfetch_colinear_misses 11 # HELP node_zfs_zfetch_hits kstat.zfs.misc.zfetchstats.hits # TYPE node_zfs_zfetch_hits untyped node_zfs_zfetch_hits 7.067992e+06 # HELP node_zfs_zfetch_misses kstat.zfs.misc.zfetchstats.misses # TYPE node_zfs_zfetch_misses untyped node_zfs_zfetch_misses 11 # HELP node_zfs_zfetch_reclaim_failures kstat.zfs.misc.zfetchstats.reclaim_failures # TYPE node_zfs_zfetch_reclaim_failures untyped node_zfs_zfetch_reclaim_failures 11 # HELP node_zfs_zfetch_reclaim_successes kstat.zfs.misc.zfetchstats.reclaim_successes # TYPE node_zfs_zfetch_reclaim_successes untyped node_zfs_zfetch_reclaim_successes 0 # HELP node_zfs_zfetch_streams_noresets kstat.zfs.misc.zfetchstats.streams_noresets # TYPE node_zfs_zfetch_streams_noresets untyped node_zfs_zfetch_streams_noresets 2 # HELP node_zfs_zfetch_streams_resets kstat.zfs.misc.zfetchstats.streams_resets # TYPE node_zfs_zfetch_streams_resets untyped node_zfs_zfetch_streams_resets 0 # HELP node_zfs_zfetch_stride_hits kstat.zfs.misc.zfetchstats.stride_hits # TYPE node_zfs_zfetch_stride_hits untyped node_zfs_zfetch_stride_hits 7.06799e+06 # HELP node_zfs_zfetch_stride_misses kstat.zfs.misc.zfetchstats.stride_misses # TYPE node_zfs_zfetch_stride_misses untyped node_zfs_zfetch_stride_misses 0 # HELP node_zfs_zil_zil_commit_count kstat.zfs.misc.zil.zil_commit_count # TYPE node_zfs_zil_zil_commit_count untyped node_zfs_zil_zil_commit_count 10 # HELP node_zfs_zil_zil_commit_writer_count kstat.zfs.misc.zil.zil_commit_writer_count # TYPE node_zfs_zil_zil_commit_writer_count untyped node_zfs_zil_zil_commit_writer_count 0 # HELP node_zfs_zil_zil_itx_copied_bytes kstat.zfs.misc.zil.zil_itx_copied_bytes # TYPE node_zfs_zil_zil_itx_copied_bytes untyped node_zfs_zil_zil_itx_copied_bytes 0 # HELP node_zfs_zil_zil_itx_copied_count kstat.zfs.misc.zil.zil_itx_copied_count # TYPE node_zfs_zil_zil_itx_copied_count untyped node_zfs_zil_zil_itx_copied_count 0 # HELP node_zfs_zil_zil_itx_count kstat.zfs.misc.zil.zil_itx_count # TYPE node_zfs_zil_zil_itx_count untyped node_zfs_zil_zil_itx_count 0 # HELP node_zfs_zil_zil_itx_indirect_bytes kstat.zfs.misc.zil.zil_itx_indirect_bytes # TYPE node_zfs_zil_zil_itx_indirect_bytes untyped node_zfs_zil_zil_itx_indirect_bytes 0 # HELP node_zfs_zil_zil_itx_indirect_count kstat.zfs.misc.zil.zil_itx_indirect_count # TYPE node_zfs_zil_zil_itx_indirect_count untyped node_zfs_zil_zil_itx_indirect_count 0 # HELP node_zfs_zil_zil_itx_metaslab_normal_bytes kstat.zfs.misc.zil.zil_itx_metaslab_normal_bytes # TYPE node_zfs_zil_zil_itx_metaslab_normal_bytes untyped node_zfs_zil_zil_itx_metaslab_normal_bytes 0 # HELP node_zfs_zil_zil_itx_metaslab_normal_count kstat.zfs.misc.zil.zil_itx_metaslab_normal_count # TYPE node_zfs_zil_zil_itx_metaslab_normal_count untyped node_zfs_zil_zil_itx_metaslab_normal_count 0 # HELP node_zfs_zil_zil_itx_metaslab_slog_bytes kstat.zfs.misc.zil.zil_itx_metaslab_slog_bytes # TYPE node_zfs_zil_zil_itx_metaslab_slog_bytes untyped node_zfs_zil_zil_itx_metaslab_slog_bytes 0 # HELP node_zfs_zil_zil_itx_metaslab_slog_count kstat.zfs.misc.zil.zil_itx_metaslab_slog_count # TYPE node_zfs_zil_zil_itx_metaslab_slog_count untyped node_zfs_zil_zil_itx_metaslab_slog_count 0 # HELP node_zfs_zil_zil_itx_needcopy_bytes kstat.zfs.misc.zil.zil_itx_needcopy_bytes # TYPE node_zfs_zil_zil_itx_needcopy_bytes untyped node_zfs_zil_zil_itx_needcopy_bytes 1.8446744073709537e+19 # HELP node_zfs_zil_zil_itx_needcopy_count kstat.zfs.misc.zil.zil_itx_needcopy_count # TYPE node_zfs_zil_zil_itx_needcopy_count untyped node_zfs_zil_zil_itx_needcopy_count 0 # HELP node_zfs_zpool_dataset_nread kstat.zfs.misc.objset.nread # TYPE node_zfs_zpool_dataset_nread untyped node_zfs_zpool_dataset_nread{dataset="pool1",zpool="pool1"} 0 node_zfs_zpool_dataset_nread{dataset="pool1/dataset1",zpool="pool1"} 28 node_zfs_zpool_dataset_nread{dataset="poolz1",zpool="poolz1"} 0 node_zfs_zpool_dataset_nread{dataset="poolz1/dataset1",zpool="poolz1"} 28 # HELP node_zfs_zpool_dataset_nunlinked kstat.zfs.misc.objset.nunlinked # TYPE node_zfs_zpool_dataset_nunlinked untyped node_zfs_zpool_dataset_nunlinked{dataset="pool1",zpool="pool1"} 0 node_zfs_zpool_dataset_nunlinked{dataset="pool1/dataset1",zpool="pool1"} 3 node_zfs_zpool_dataset_nunlinked{dataset="poolz1",zpool="poolz1"} 0 node_zfs_zpool_dataset_nunlinked{dataset="poolz1/dataset1",zpool="poolz1"} 14 # HELP node_zfs_zpool_dataset_nunlinks kstat.zfs.misc.objset.nunlinks # TYPE node_zfs_zpool_dataset_nunlinks untyped node_zfs_zpool_dataset_nunlinks{dataset="pool1",zpool="pool1"} 0 node_zfs_zpool_dataset_nunlinks{dataset="pool1/dataset1",zpool="pool1"} 3 node_zfs_zpool_dataset_nunlinks{dataset="poolz1",zpool="poolz1"} 0 node_zfs_zpool_dataset_nunlinks{dataset="poolz1/dataset1",zpool="poolz1"} 14 # HELP node_zfs_zpool_dataset_nwritten kstat.zfs.misc.objset.nwritten # TYPE node_zfs_zpool_dataset_nwritten untyped node_zfs_zpool_dataset_nwritten{dataset="pool1",zpool="pool1"} 0 node_zfs_zpool_dataset_nwritten{dataset="pool1/dataset1",zpool="pool1"} 12302 node_zfs_zpool_dataset_nwritten{dataset="poolz1",zpool="poolz1"} 0 node_zfs_zpool_dataset_nwritten{dataset="poolz1/dataset1",zpool="poolz1"} 32806 # HELP node_zfs_zpool_dataset_reads kstat.zfs.misc.objset.reads # TYPE node_zfs_zpool_dataset_reads untyped node_zfs_zpool_dataset_reads{dataset="pool1",zpool="pool1"} 0 node_zfs_zpool_dataset_reads{dataset="pool1/dataset1",zpool="pool1"} 2 node_zfs_zpool_dataset_reads{dataset="poolz1",zpool="poolz1"} 0 node_zfs_zpool_dataset_reads{dataset="poolz1/dataset1",zpool="poolz1"} 2 # HELP node_zfs_zpool_dataset_writes kstat.zfs.misc.objset.writes # TYPE node_zfs_zpool_dataset_writes untyped node_zfs_zpool_dataset_writes{dataset="pool1",zpool="pool1"} 0 node_zfs_zpool_dataset_writes{dataset="pool1/dataset1",zpool="pool1"} 4 node_zfs_zpool_dataset_writes{dataset="poolz1",zpool="poolz1"} 0 node_zfs_zpool_dataset_writes{dataset="poolz1/dataset1",zpool="poolz1"} 10 # HELP node_zfs_zpool_nread kstat.zfs.misc.io.nread # TYPE node_zfs_zpool_nread untyped node_zfs_zpool_nread{zpool="pool1"} 1.88416e+06 node_zfs_zpool_nread{zpool="poolz1"} 2.82624e+06 # HELP node_zfs_zpool_nwritten kstat.zfs.misc.io.nwritten # TYPE node_zfs_zpool_nwritten untyped node_zfs_zpool_nwritten{zpool="pool1"} 3.206144e+06 node_zfs_zpool_nwritten{zpool="poolz1"} 2.680501248e+09 # HELP node_zfs_zpool_rcnt kstat.zfs.misc.io.rcnt # TYPE node_zfs_zpool_rcnt untyped node_zfs_zpool_rcnt{zpool="pool1"} 0 node_zfs_zpool_rcnt{zpool="poolz1"} 0 # HELP node_zfs_zpool_reads kstat.zfs.misc.io.reads # TYPE node_zfs_zpool_reads untyped node_zfs_zpool_reads{zpool="pool1"} 22 node_zfs_zpool_reads{zpool="poolz1"} 33 # HELP node_zfs_zpool_rlentime kstat.zfs.misc.io.rlentime # TYPE node_zfs_zpool_rlentime untyped node_zfs_zpool_rlentime{zpool="pool1"} 1.04112268e+08 node_zfs_zpool_rlentime{zpool="poolz1"} 6.472105124093e+12 # HELP node_zfs_zpool_rtime kstat.zfs.misc.io.rtime # TYPE node_zfs_zpool_rtime untyped node_zfs_zpool_rtime{zpool="pool1"} 2.4168078e+07 node_zfs_zpool_rtime{zpool="poolz1"} 9.82909164e+09 # HELP node_zfs_zpool_rupdate kstat.zfs.misc.io.rupdate # TYPE node_zfs_zpool_rupdate untyped node_zfs_zpool_rupdate{zpool="pool1"} 7.921048984922e+13 node_zfs_zpool_rupdate{zpool="poolz1"} 1.10734831944501e+14 # HELP node_zfs_zpool_state kstat.zfs.misc.state # TYPE node_zfs_zpool_state gauge node_zfs_zpool_state{state="degraded",zpool="pool1"} 0 node_zfs_zpool_state{state="degraded",zpool="pool2"} 0 node_zfs_zpool_state{state="degraded",zpool="poolz1"} 1 node_zfs_zpool_state{state="faulted",zpool="pool1"} 0 node_zfs_zpool_state{state="faulted",zpool="pool2"} 0 node_zfs_zpool_state{state="faulted",zpool="poolz1"} 0 node_zfs_zpool_state{state="offline",zpool="pool1"} 0 node_zfs_zpool_state{state="offline",zpool="pool2"} 0 node_zfs_zpool_state{state="offline",zpool="poolz1"} 0 node_zfs_zpool_state{state="online",zpool="pool1"} 1 node_zfs_zpool_state{state="online",zpool="pool2"} 0 node_zfs_zpool_state{state="online",zpool="poolz1"} 0 node_zfs_zpool_state{state="removed",zpool="pool1"} 0 node_zfs_zpool_state{state="removed",zpool="pool2"} 0 node_zfs_zpool_state{state="removed",zpool="poolz1"} 0 node_zfs_zpool_state{state="suspended",zpool="pool1"} 0 node_zfs_zpool_state{state="suspended",zpool="pool2"} 1 node_zfs_zpool_state{state="suspended",zpool="poolz1"} 0 node_zfs_zpool_state{state="unavail",zpool="pool1"} 0 node_zfs_zpool_state{state="unavail",zpool="pool2"} 0 node_zfs_zpool_state{state="unavail",zpool="poolz1"} 0 # HELP node_zfs_zpool_wcnt kstat.zfs.misc.io.wcnt # TYPE node_zfs_zpool_wcnt untyped node_zfs_zpool_wcnt{zpool="pool1"} 0 node_zfs_zpool_wcnt{zpool="poolz1"} 0 # HELP node_zfs_zpool_wlentime kstat.zfs.misc.io.wlentime # TYPE node_zfs_zpool_wlentime untyped node_zfs_zpool_wlentime{zpool="pool1"} 1.04112268e+08 node_zfs_zpool_wlentime{zpool="poolz1"} 6.472105124093e+12 # HELP node_zfs_zpool_writes kstat.zfs.misc.io.writes # TYPE node_zfs_zpool_writes untyped node_zfs_zpool_writes{zpool="pool1"} 132 node_zfs_zpool_writes{zpool="poolz1"} 25294 # HELP node_zfs_zpool_wtime kstat.zfs.misc.io.wtime # TYPE node_zfs_zpool_wtime untyped node_zfs_zpool_wtime{zpool="pool1"} 7.155162e+06 node_zfs_zpool_wtime{zpool="poolz1"} 9.673715628e+09 # HELP node_zfs_zpool_wupdate kstat.zfs.misc.io.wupdate # TYPE node_zfs_zpool_wupdate untyped node_zfs_zpool_wupdate{zpool="pool1"} 7.9210489694949e+13 node_zfs_zpool_wupdate{zpool="poolz1"} 1.10734831833266e+14 # HELP node_zoneinfo_high_pages Zone watermark pages_high # TYPE node_zoneinfo_high_pages gauge node_zoneinfo_high_pages{node="0",zone="DMA"} 14 node_zoneinfo_high_pages{node="0",zone="DMA32"} 2122 node_zoneinfo_high_pages{node="0",zone="Device"} 0 node_zoneinfo_high_pages{node="0",zone="Movable"} 0 node_zoneinfo_high_pages{node="0",zone="Normal"} 31113 # HELP node_zoneinfo_low_pages Zone watermark pages_low # TYPE node_zoneinfo_low_pages gauge node_zoneinfo_low_pages{node="0",zone="DMA"} 11 node_zoneinfo_low_pages{node="0",zone="DMA32"} 1600 node_zoneinfo_low_pages{node="0",zone="Device"} 0 node_zoneinfo_low_pages{node="0",zone="Movable"} 0 node_zoneinfo_low_pages{node="0",zone="Normal"} 23461 # HELP node_zoneinfo_managed_pages Present pages managed by the buddy system # TYPE node_zoneinfo_managed_pages gauge node_zoneinfo_managed_pages{node="0",zone="DMA"} 3973 node_zoneinfo_managed_pages{node="0",zone="DMA32"} 530339 node_zoneinfo_managed_pages{node="0",zone="Device"} 0 node_zoneinfo_managed_pages{node="0",zone="Movable"} 0 node_zoneinfo_managed_pages{node="0",zone="Normal"} 7.654794e+06 # HELP node_zoneinfo_min_pages Zone watermark pages_min # TYPE node_zoneinfo_min_pages gauge node_zoneinfo_min_pages{node="0",zone="DMA"} 8 node_zoneinfo_min_pages{node="0",zone="DMA32"} 1078 node_zoneinfo_min_pages{node="0",zone="Device"} 0 node_zoneinfo_min_pages{node="0",zone="Movable"} 0 node_zoneinfo_min_pages{node="0",zone="Normal"} 15809 # HELP node_zoneinfo_nr_active_anon_pages Number of anonymous pages recently more used # TYPE node_zoneinfo_nr_active_anon_pages gauge node_zoneinfo_nr_active_anon_pages{node="0",zone="DMA"} 1.175853e+06 # HELP node_zoneinfo_nr_active_file_pages Number of active pages with file-backing # TYPE node_zoneinfo_nr_active_file_pages gauge node_zoneinfo_nr_active_file_pages{node="0",zone="DMA"} 688810 # HELP node_zoneinfo_nr_anon_pages Number of anonymous pages currently used by the system # TYPE node_zoneinfo_nr_anon_pages gauge node_zoneinfo_nr_anon_pages{node="0",zone="DMA"} 1.156608e+06 # HELP node_zoneinfo_nr_anon_transparent_hugepages Number of anonymous transparent huge pages currently used by the system # TYPE node_zoneinfo_nr_anon_transparent_hugepages gauge node_zoneinfo_nr_anon_transparent_hugepages{node="0",zone="DMA"} 0 # HELP node_zoneinfo_nr_dirtied_total Page dirtyings since bootup # TYPE node_zoneinfo_nr_dirtied_total counter node_zoneinfo_nr_dirtied_total{node="0",zone="DMA"} 1.189097e+06 # HELP node_zoneinfo_nr_dirty_pages Number of dirty pages # TYPE node_zoneinfo_nr_dirty_pages gauge node_zoneinfo_nr_dirty_pages{node="0",zone="DMA"} 103 # HELP node_zoneinfo_nr_file_pages Number of file pages # TYPE node_zoneinfo_nr_file_pages gauge node_zoneinfo_nr_file_pages{node="0",zone="DMA"} 1.740118e+06 # HELP node_zoneinfo_nr_free_pages Total number of free pages in the zone # TYPE node_zoneinfo_nr_free_pages gauge node_zoneinfo_nr_free_pages{node="0",zone="DMA"} 2949 node_zoneinfo_nr_free_pages{node="0",zone="DMA32"} 528427 node_zoneinfo_nr_free_pages{node="0",zone="Normal"} 4.539739e+06 # HELP node_zoneinfo_nr_inactive_anon_pages Number of anonymous pages recently less used # TYPE node_zoneinfo_nr_inactive_anon_pages gauge node_zoneinfo_nr_inactive_anon_pages{node="0",zone="DMA"} 95612 # HELP node_zoneinfo_nr_inactive_file_pages Number of inactive pages with file-backing # TYPE node_zoneinfo_nr_inactive_file_pages gauge node_zoneinfo_nr_inactive_file_pages{node="0",zone="DMA"} 723339 # HELP node_zoneinfo_nr_isolated_anon_pages Temporary isolated pages from anon lru # TYPE node_zoneinfo_nr_isolated_anon_pages gauge node_zoneinfo_nr_isolated_anon_pages{node="0",zone="DMA"} 0 # HELP node_zoneinfo_nr_isolated_file_pages Temporary isolated pages from file lru # TYPE node_zoneinfo_nr_isolated_file_pages gauge node_zoneinfo_nr_isolated_file_pages{node="0",zone="DMA"} 0 # HELP node_zoneinfo_nr_kernel_stacks Number of kernel stacks # TYPE node_zoneinfo_nr_kernel_stacks gauge node_zoneinfo_nr_kernel_stacks{node="0",zone="DMA"} 0 node_zoneinfo_nr_kernel_stacks{node="0",zone="DMA32"} 0 node_zoneinfo_nr_kernel_stacks{node="0",zone="Normal"} 18864 # HELP node_zoneinfo_nr_mapped_pages Number of mapped pages # TYPE node_zoneinfo_nr_mapped_pages gauge node_zoneinfo_nr_mapped_pages{node="0",zone="DMA"} 423143 # HELP node_zoneinfo_nr_shmem_pages Number of shmem pages (included tmpfs/GEM pages) # TYPE node_zoneinfo_nr_shmem_pages gauge node_zoneinfo_nr_shmem_pages{node="0",zone="DMA"} 330517 # HELP node_zoneinfo_nr_slab_reclaimable_pages Number of reclaimable slab pages # TYPE node_zoneinfo_nr_slab_reclaimable_pages gauge node_zoneinfo_nr_slab_reclaimable_pages{node="0",zone="DMA"} 121763 # HELP node_zoneinfo_nr_slab_unreclaimable_pages Number of unreclaimable slab pages # TYPE node_zoneinfo_nr_slab_unreclaimable_pages gauge node_zoneinfo_nr_slab_unreclaimable_pages{node="0",zone="DMA"} 56182 # HELP node_zoneinfo_nr_unevictable_pages Number of unevictable pages # TYPE node_zoneinfo_nr_unevictable_pages gauge node_zoneinfo_nr_unevictable_pages{node="0",zone="DMA"} 213111 # HELP node_zoneinfo_nr_writeback_pages Number of writeback pages # TYPE node_zoneinfo_nr_writeback_pages gauge node_zoneinfo_nr_writeback_pages{node="0",zone="DMA"} 0 # HELP node_zoneinfo_nr_written_total Page writings since bootup # TYPE node_zoneinfo_nr_written_total counter node_zoneinfo_nr_written_total{node="0",zone="DMA"} 1.181554e+06 # HELP node_zoneinfo_numa_foreign_total Was intended here, hit elsewhere # TYPE node_zoneinfo_numa_foreign_total counter node_zoneinfo_numa_foreign_total{node="0",zone="DMA"} 0 node_zoneinfo_numa_foreign_total{node="0",zone="DMA32"} 0 node_zoneinfo_numa_foreign_total{node="0",zone="Normal"} 0 # HELP node_zoneinfo_numa_hit_total Allocated in intended node # TYPE node_zoneinfo_numa_hit_total counter node_zoneinfo_numa_hit_total{node="0",zone="DMA"} 1 node_zoneinfo_numa_hit_total{node="0",zone="DMA32"} 13 node_zoneinfo_numa_hit_total{node="0",zone="Normal"} 6.2836441e+07 # HELP node_zoneinfo_numa_interleave_total Interleaver preferred this zone # TYPE node_zoneinfo_numa_interleave_total counter node_zoneinfo_numa_interleave_total{node="0",zone="DMA"} 1 node_zoneinfo_numa_interleave_total{node="0",zone="DMA32"} 1 node_zoneinfo_numa_interleave_total{node="0",zone="Normal"} 23174 # HELP node_zoneinfo_numa_local_total Allocation from local node # TYPE node_zoneinfo_numa_local_total counter node_zoneinfo_numa_local_total{node="0",zone="DMA"} 1 node_zoneinfo_numa_local_total{node="0",zone="DMA32"} 13 node_zoneinfo_numa_local_total{node="0",zone="Normal"} 6.2836441e+07 # HELP node_zoneinfo_numa_miss_total Allocated in non intended node # TYPE node_zoneinfo_numa_miss_total counter node_zoneinfo_numa_miss_total{node="0",zone="DMA"} 0 node_zoneinfo_numa_miss_total{node="0",zone="DMA32"} 0 node_zoneinfo_numa_miss_total{node="0",zone="Normal"} 0 # HELP node_zoneinfo_numa_other_total Allocation from other node # TYPE node_zoneinfo_numa_other_total counter node_zoneinfo_numa_other_total{node="0",zone="DMA"} 0 node_zoneinfo_numa_other_total{node="0",zone="DMA32"} 0 node_zoneinfo_numa_other_total{node="0",zone="Normal"} 0 # HELP node_zoneinfo_present_pages Physical pages existing within the zone # TYPE node_zoneinfo_present_pages gauge node_zoneinfo_present_pages{node="0",zone="DMA"} 3997 node_zoneinfo_present_pages{node="0",zone="DMA32"} 546847 node_zoneinfo_present_pages{node="0",zone="Device"} 0 node_zoneinfo_present_pages{node="0",zone="Movable"} 0 node_zoneinfo_present_pages{node="0",zone="Normal"} 7.806976e+06 # HELP node_zoneinfo_protection_0 Protection array 0. field # TYPE node_zoneinfo_protection_0 gauge node_zoneinfo_protection_0{node="0",zone="DMA"} 0 node_zoneinfo_protection_0{node="0",zone="DMA32"} 0 node_zoneinfo_protection_0{node="0",zone="Device"} 0 node_zoneinfo_protection_0{node="0",zone="Movable"} 0 node_zoneinfo_protection_0{node="0",zone="Normal"} 0 # HELP node_zoneinfo_protection_1 Protection array 1. field # TYPE node_zoneinfo_protection_1 gauge node_zoneinfo_protection_1{node="0",zone="DMA"} 2039 node_zoneinfo_protection_1{node="0",zone="DMA32"} 0 node_zoneinfo_protection_1{node="0",zone="Device"} 0 node_zoneinfo_protection_1{node="0",zone="Movable"} 0 node_zoneinfo_protection_1{node="0",zone="Normal"} 0 # HELP node_zoneinfo_protection_2 Protection array 2. field # TYPE node_zoneinfo_protection_2 gauge node_zoneinfo_protection_2{node="0",zone="DMA"} 31932 node_zoneinfo_protection_2{node="0",zone="DMA32"} 29893 node_zoneinfo_protection_2{node="0",zone="Device"} 0 node_zoneinfo_protection_2{node="0",zone="Movable"} 0 node_zoneinfo_protection_2{node="0",zone="Normal"} 0 # HELP node_zoneinfo_protection_3 Protection array 3. field # TYPE node_zoneinfo_protection_3 gauge node_zoneinfo_protection_3{node="0",zone="DMA"} 31932 node_zoneinfo_protection_3{node="0",zone="DMA32"} 29893 node_zoneinfo_protection_3{node="0",zone="Device"} 0 node_zoneinfo_protection_3{node="0",zone="Movable"} 0 node_zoneinfo_protection_3{node="0",zone="Normal"} 0 # HELP node_zoneinfo_protection_4 Protection array 4. field # TYPE node_zoneinfo_protection_4 gauge node_zoneinfo_protection_4{node="0",zone="DMA"} 31932 node_zoneinfo_protection_4{node="0",zone="DMA32"} 29893 node_zoneinfo_protection_4{node="0",zone="Device"} 0 node_zoneinfo_protection_4{node="0",zone="Movable"} 0 node_zoneinfo_protection_4{node="0",zone="Normal"} 0 # HELP node_zoneinfo_spanned_pages Total pages spanned by the zone, including holes # TYPE node_zoneinfo_spanned_pages gauge node_zoneinfo_spanned_pages{node="0",zone="DMA"} 4095 node_zoneinfo_spanned_pages{node="0",zone="DMA32"} 1.04448e+06 node_zoneinfo_spanned_pages{node="0",zone="Device"} 0 node_zoneinfo_spanned_pages{node="0",zone="Movable"} 0 node_zoneinfo_spanned_pages{node="0",zone="Normal"} 7.806976e+06 # HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. # TYPE process_cpu_seconds_total counter # HELP process_max_fds Maximum number of open file descriptors. # TYPE process_max_fds gauge # HELP process_open_fds Number of open file descriptors. # TYPE process_open_fds gauge # HELP process_resident_memory_bytes Resident memory size in bytes. # TYPE process_resident_memory_bytes gauge # HELP process_start_time_seconds Start time of the process since unix epoch in seconds. # TYPE process_start_time_seconds gauge # HELP process_virtual_memory_bytes Virtual memory size in bytes. # TYPE process_virtual_memory_bytes gauge # HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes. # TYPE process_virtual_memory_max_bytes gauge # HELP promhttp_metric_handler_errors_total Total number of internal errors encountered by the promhttp metric handler. # TYPE promhttp_metric_handler_errors_total counter promhttp_metric_handler_errors_total{cause="encoding"} 0 promhttp_metric_handler_errors_total{cause="gathering"} 0 # HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served. # TYPE promhttp_metric_handler_requests_in_flight gauge promhttp_metric_handler_requests_in_flight 1 # HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code. # TYPE promhttp_metric_handler_requests_total counter promhttp_metric_handler_requests_total{code="200"} 0 promhttp_metric_handler_requests_total{code="500"} 0 promhttp_metric_handler_requests_total{code="503"} 0 # HELP testmetric1_1 Metric read from collector/fixtures/textfile/two_metric_files/metrics1.prom # TYPE testmetric1_1 untyped testmetric1_1{foo="bar"} 10 # HELP testmetric1_2 Metric read from collector/fixtures/textfile/two_metric_files/metrics1.prom # TYPE testmetric1_2 untyped testmetric1_2{foo="baz"} 20 # HELP testmetric2_1 Metric read from collector/fixtures/textfile/two_metric_files/metrics2.prom # TYPE testmetric2_1 untyped testmetric2_1{foo="bar"} 30 # HELP testmetric2_2 Metric read from collector/fixtures/textfile/two_metric_files/metrics2.prom # TYPE testmetric2_2 untyped testmetric2_2{foo="baz"} 40 node_exporter-1.7.0/collector/fixtures/e2e-output.txt000066400000000000000000010567501452426057600230360ustar00rootroot00000000000000# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles. # TYPE go_gc_duration_seconds summary # HELP go_goroutines Number of goroutines that currently exist. # TYPE go_goroutines gauge # HELP go_info Information about the Go environment. # TYPE go_info gauge # HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. # TYPE go_memstats_alloc_bytes gauge # HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. # TYPE go_memstats_alloc_bytes_total counter # HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. # TYPE go_memstats_buck_hash_sys_bytes gauge # HELP go_memstats_frees_total Total number of frees. # TYPE go_memstats_frees_total counter # HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. # TYPE go_memstats_gc_sys_bytes gauge # HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use. # TYPE go_memstats_heap_alloc_bytes gauge # HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. # TYPE go_memstats_heap_idle_bytes gauge # HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. # TYPE go_memstats_heap_inuse_bytes gauge # HELP go_memstats_heap_objects Number of allocated objects. # TYPE go_memstats_heap_objects gauge # HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. # TYPE go_memstats_heap_released_bytes gauge # HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. # TYPE go_memstats_heap_sys_bytes gauge # HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. # TYPE go_memstats_last_gc_time_seconds gauge # HELP go_memstats_lookups_total Total number of pointer lookups. # TYPE go_memstats_lookups_total counter # HELP go_memstats_mallocs_total Total number of mallocs. # TYPE go_memstats_mallocs_total counter # HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. # TYPE go_memstats_mcache_inuse_bytes gauge # HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. # TYPE go_memstats_mcache_sys_bytes gauge # HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. # TYPE go_memstats_mspan_inuse_bytes gauge # HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. # TYPE go_memstats_mspan_sys_bytes gauge # HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. # TYPE go_memstats_next_gc_bytes gauge # HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. # TYPE go_memstats_other_sys_bytes gauge # HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator. # TYPE go_memstats_stack_inuse_bytes gauge # HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. # TYPE go_memstats_stack_sys_bytes gauge # HELP go_memstats_sys_bytes Number of bytes obtained from system. # TYPE go_memstats_sys_bytes gauge # HELP go_threads Number of OS threads created. # TYPE go_threads gauge # HELP node_arp_entries ARP entries by device # TYPE node_arp_entries gauge node_arp_entries{device="eth0"} 3 node_arp_entries{device="eth1"} 3 # HELP node_bcache_active_journal_entries Number of journal entries that are newer than the index. # TYPE node_bcache_active_journal_entries gauge node_bcache_active_journal_entries{uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 1 # HELP node_bcache_average_key_size_sectors Average data per key in the btree (sectors). # TYPE node_bcache_average_key_size_sectors gauge node_bcache_average_key_size_sectors{uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 # HELP node_bcache_btree_cache_size_bytes Amount of memory currently used by the btree cache. # TYPE node_bcache_btree_cache_size_bytes gauge node_bcache_btree_cache_size_bytes{uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 # HELP node_bcache_btree_nodes Total nodes in the btree. # TYPE node_bcache_btree_nodes gauge node_bcache_btree_nodes{uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 # HELP node_bcache_btree_read_average_duration_seconds Average btree read duration. # TYPE node_bcache_btree_read_average_duration_seconds gauge node_bcache_btree_read_average_duration_seconds{uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 1.305e-06 # HELP node_bcache_bypassed_bytes_total Amount of IO (both reads and writes) that has bypassed the cache. # TYPE node_bcache_bypassed_bytes_total counter node_bcache_bypassed_bytes_total{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 # HELP node_bcache_cache_available_percent Percentage of cache device without dirty data, usable for writeback (may contain clean cached data). # TYPE node_bcache_cache_available_percent gauge node_bcache_cache_available_percent{uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 100 # HELP node_bcache_cache_bypass_hits_total Hits for IO intended to skip the cache. # TYPE node_bcache_cache_bypass_hits_total counter node_bcache_cache_bypass_hits_total{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 # HELP node_bcache_cache_bypass_misses_total Misses for IO intended to skip the cache. # TYPE node_bcache_cache_bypass_misses_total counter node_bcache_cache_bypass_misses_total{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 # HELP node_bcache_cache_hits_total Hits counted per individual IO as bcache sees them. # TYPE node_bcache_cache_hits_total counter node_bcache_cache_hits_total{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 546 # HELP node_bcache_cache_miss_collisions_total Instances where data insertion from cache miss raced with write (data already present). # TYPE node_bcache_cache_miss_collisions_total counter node_bcache_cache_miss_collisions_total{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 # HELP node_bcache_cache_misses_total Misses counted per individual IO as bcache sees them. # TYPE node_bcache_cache_misses_total counter node_bcache_cache_misses_total{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 # HELP node_bcache_cache_read_races_total Counts instances where while data was being read from the cache, the bucket was reused and invalidated - i.e. where the pointer was stale after the read completed. # TYPE node_bcache_cache_read_races_total counter node_bcache_cache_read_races_total{uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 # HELP node_bcache_cache_readaheads_total Count of times readahead occurred. # TYPE node_bcache_cache_readaheads_total counter node_bcache_cache_readaheads_total{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 13 # HELP node_bcache_congested Congestion. # TYPE node_bcache_congested gauge node_bcache_congested{uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 # HELP node_bcache_dirty_data_bytes Amount of dirty data for this backing device in the cache. # TYPE node_bcache_dirty_data_bytes gauge node_bcache_dirty_data_bytes{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 # HELP node_bcache_dirty_target_bytes Current dirty data target threshold for this backing device in bytes. # TYPE node_bcache_dirty_target_bytes gauge node_bcache_dirty_target_bytes{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 2.189426688e+10 # HELP node_bcache_io_errors Number of errors that have occurred, decayed by io_error_halflife. # TYPE node_bcache_io_errors gauge node_bcache_io_errors{cache_device="cache0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 # HELP node_bcache_metadata_written_bytes_total Sum of all non data writes (btree writes and all other metadata). # TYPE node_bcache_metadata_written_bytes_total counter node_bcache_metadata_written_bytes_total{cache_device="cache0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 512 # HELP node_bcache_priority_stats_metadata_percent Bcache's metadata overhead. # TYPE node_bcache_priority_stats_metadata_percent gauge node_bcache_priority_stats_metadata_percent{cache_device="cache0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 # HELP node_bcache_priority_stats_unused_percent The percentage of the cache that doesn't contain any data. # TYPE node_bcache_priority_stats_unused_percent gauge node_bcache_priority_stats_unused_percent{cache_device="cache0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 99 # HELP node_bcache_root_usage_percent Percentage of the root btree node in use (tree depth increases if too high). # TYPE node_bcache_root_usage_percent gauge node_bcache_root_usage_percent{uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 # HELP node_bcache_tree_depth Depth of the btree. # TYPE node_bcache_tree_depth gauge node_bcache_tree_depth{uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 # HELP node_bcache_writeback_change Last writeback rate change step for this backing device. # TYPE node_bcache_writeback_change gauge node_bcache_writeback_change{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 329204 # HELP node_bcache_writeback_rate Current writeback rate for this backing device in bytes. # TYPE node_bcache_writeback_rate gauge node_bcache_writeback_rate{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 1.150976e+06 # HELP node_bcache_writeback_rate_integral_term Current result of integral controller, part of writeback rate # TYPE node_bcache_writeback_rate_integral_term gauge node_bcache_writeback_rate_integral_term{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 808960 # HELP node_bcache_writeback_rate_proportional_term Current result of proportional controller, part of writeback rate # TYPE node_bcache_writeback_rate_proportional_term gauge node_bcache_writeback_rate_proportional_term{backing_device="bdev0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 437748 # HELP node_bcache_written_bytes_total Sum of all data that has been written to the cache. # TYPE node_bcache_written_bytes_total counter node_bcache_written_bytes_total{cache_device="cache0",uuid="deaddd54-c735-46d5-868e-f331c5fd7c74"} 0 # HELP node_bonding_active Number of active slaves per bonding interface. # TYPE node_bonding_active gauge node_bonding_active{master="bond0"} 0 node_bonding_active{master="dmz"} 2 node_bonding_active{master="int"} 1 # HELP node_bonding_slaves Number of configured slaves per bonding interface. # TYPE node_bonding_slaves gauge node_bonding_slaves{master="bond0"} 0 node_bonding_slaves{master="dmz"} 2 node_bonding_slaves{master="int"} 2 # HELP node_boot_time_seconds Node boot time, in unixtime. # TYPE node_boot_time_seconds gauge node_boot_time_seconds 1.418183276e+09 # HELP node_btrfs_allocation_ratio Data allocation ratio for a layout/data type # TYPE node_btrfs_allocation_ratio gauge node_btrfs_allocation_ratio{block_group_type="data",mode="raid0",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 1 node_btrfs_allocation_ratio{block_group_type="data",mode="raid5",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 1.3333333333333333 node_btrfs_allocation_ratio{block_group_type="metadata",mode="raid1",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 2 node_btrfs_allocation_ratio{block_group_type="metadata",mode="raid6",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 2 node_btrfs_allocation_ratio{block_group_type="system",mode="raid1",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 2 node_btrfs_allocation_ratio{block_group_type="system",mode="raid6",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 2 # HELP node_btrfs_device_size_bytes Size of a device that is part of the filesystem. # TYPE node_btrfs_device_size_bytes gauge node_btrfs_device_size_bytes{device="loop22",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 1.073741824e+10 node_btrfs_device_size_bytes{device="loop23",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 1.073741824e+10 node_btrfs_device_size_bytes{device="loop24",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 1.073741824e+10 node_btrfs_device_size_bytes{device="loop25",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 1.073741824e+10 node_btrfs_device_size_bytes{device="loop25",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 1.073741824e+10 node_btrfs_device_size_bytes{device="loop26",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 1.073741824e+10 # HELP node_btrfs_global_rsv_size_bytes Size of global reserve. # TYPE node_btrfs_global_rsv_size_bytes gauge node_btrfs_global_rsv_size_bytes{uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 1.6777216e+07 node_btrfs_global_rsv_size_bytes{uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 1.6777216e+07 # HELP node_btrfs_info Filesystem information # TYPE node_btrfs_info gauge node_btrfs_info{label="",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 1 node_btrfs_info{label="fixture",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 1 # HELP node_btrfs_reserved_bytes Amount of space reserved for a data type # TYPE node_btrfs_reserved_bytes gauge node_btrfs_reserved_bytes{block_group_type="data",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 0 node_btrfs_reserved_bytes{block_group_type="data",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 0 node_btrfs_reserved_bytes{block_group_type="metadata",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 0 node_btrfs_reserved_bytes{block_group_type="metadata",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 0 node_btrfs_reserved_bytes{block_group_type="system",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 0 node_btrfs_reserved_bytes{block_group_type="system",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 0 # HELP node_btrfs_size_bytes Amount of space allocated for a layout/data type # TYPE node_btrfs_size_bytes gauge node_btrfs_size_bytes{block_group_type="data",mode="raid0",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 2.147483648e+09 node_btrfs_size_bytes{block_group_type="data",mode="raid5",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 6.44087808e+08 node_btrfs_size_bytes{block_group_type="metadata",mode="raid1",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 1.073741824e+09 node_btrfs_size_bytes{block_group_type="metadata",mode="raid6",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 4.29391872e+08 node_btrfs_size_bytes{block_group_type="system",mode="raid1",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 8.388608e+06 node_btrfs_size_bytes{block_group_type="system",mode="raid6",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 1.6777216e+07 # HELP node_btrfs_used_bytes Amount of used space by a layout/data type # TYPE node_btrfs_used_bytes gauge node_btrfs_used_bytes{block_group_type="data",mode="raid0",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 8.08189952e+08 node_btrfs_used_bytes{block_group_type="data",mode="raid5",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 0 node_btrfs_used_bytes{block_group_type="metadata",mode="raid1",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 933888 node_btrfs_used_bytes{block_group_type="metadata",mode="raid6",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 114688 node_btrfs_used_bytes{block_group_type="system",mode="raid1",uuid="0abb23a9-579b-43e6-ad30-227ef47fcb9d"} 16384 node_btrfs_used_bytes{block_group_type="system",mode="raid6",uuid="7f07c59f-6136-449c-ab87-e1cf2328731b"} 16384 # HELP node_buddyinfo_blocks Count of free blocks according to size. # TYPE node_buddyinfo_blocks gauge node_buddyinfo_blocks{node="0",size="0",zone="DMA"} 1 node_buddyinfo_blocks{node="0",size="0",zone="DMA32"} 759 node_buddyinfo_blocks{node="0",size="0",zone="Normal"} 4381 node_buddyinfo_blocks{node="0",size="1",zone="DMA"} 0 node_buddyinfo_blocks{node="0",size="1",zone="DMA32"} 572 node_buddyinfo_blocks{node="0",size="1",zone="Normal"} 1093 node_buddyinfo_blocks{node="0",size="10",zone="DMA"} 3 node_buddyinfo_blocks{node="0",size="10",zone="DMA32"} 0 node_buddyinfo_blocks{node="0",size="10",zone="Normal"} 0 node_buddyinfo_blocks{node="0",size="2",zone="DMA"} 1 node_buddyinfo_blocks{node="0",size="2",zone="DMA32"} 791 node_buddyinfo_blocks{node="0",size="2",zone="Normal"} 185 node_buddyinfo_blocks{node="0",size="3",zone="DMA"} 0 node_buddyinfo_blocks{node="0",size="3",zone="DMA32"} 475 node_buddyinfo_blocks{node="0",size="3",zone="Normal"} 1530 node_buddyinfo_blocks{node="0",size="4",zone="DMA"} 2 node_buddyinfo_blocks{node="0",size="4",zone="DMA32"} 194 node_buddyinfo_blocks{node="0",size="4",zone="Normal"} 567 node_buddyinfo_blocks{node="0",size="5",zone="DMA"} 1 node_buddyinfo_blocks{node="0",size="5",zone="DMA32"} 45 node_buddyinfo_blocks{node="0",size="5",zone="Normal"} 102 node_buddyinfo_blocks{node="0",size="6",zone="DMA"} 1 node_buddyinfo_blocks{node="0",size="6",zone="DMA32"} 12 node_buddyinfo_blocks{node="0",size="6",zone="Normal"} 4 node_buddyinfo_blocks{node="0",size="7",zone="DMA"} 0 node_buddyinfo_blocks{node="0",size="7",zone="DMA32"} 0 node_buddyinfo_blocks{node="0",size="7",zone="Normal"} 0 node_buddyinfo_blocks{node="0",size="8",zone="DMA"} 1 node_buddyinfo_blocks{node="0",size="8",zone="DMA32"} 0 node_buddyinfo_blocks{node="0",size="8",zone="Normal"} 0 node_buddyinfo_blocks{node="0",size="9",zone="DMA"} 1 node_buddyinfo_blocks{node="0",size="9",zone="DMA32"} 0 node_buddyinfo_blocks{node="0",size="9",zone="Normal"} 0 # HELP node_cgroups_cgroups Current cgroup number of the subsystem. # TYPE node_cgroups_cgroups gauge node_cgroups_cgroups{subsys_name="blkio"} 170 node_cgroups_cgroups{subsys_name="cpu"} 172 node_cgroups_cgroups{subsys_name="cpuacct"} 172 node_cgroups_cgroups{subsys_name="cpuset"} 47 node_cgroups_cgroups{subsys_name="devices"} 170 node_cgroups_cgroups{subsys_name="freezer"} 47 node_cgroups_cgroups{subsys_name="hugetlb"} 47 node_cgroups_cgroups{subsys_name="memory"} 234 node_cgroups_cgroups{subsys_name="net_cls"} 47 node_cgroups_cgroups{subsys_name="perf_event"} 47 node_cgroups_cgroups{subsys_name="pids"} 170 node_cgroups_cgroups{subsys_name="rdma"} 1 # HELP node_cgroups_enabled Current cgroup number of the subsystem. # TYPE node_cgroups_enabled gauge node_cgroups_enabled{subsys_name="blkio"} 1 node_cgroups_enabled{subsys_name="cpu"} 1 node_cgroups_enabled{subsys_name="cpuacct"} 1 node_cgroups_enabled{subsys_name="cpuset"} 1 node_cgroups_enabled{subsys_name="devices"} 1 node_cgroups_enabled{subsys_name="freezer"} 1 node_cgroups_enabled{subsys_name="hugetlb"} 1 node_cgroups_enabled{subsys_name="memory"} 1 node_cgroups_enabled{subsys_name="net_cls"} 1 node_cgroups_enabled{subsys_name="perf_event"} 1 node_cgroups_enabled{subsys_name="pids"} 1 node_cgroups_enabled{subsys_name="rdma"} 1 # HELP node_context_switches_total Total number of context switches. # TYPE node_context_switches_total counter node_context_switches_total 3.8014093e+07 # HELP node_cooling_device_cur_state Current throttle state of the cooling device # TYPE node_cooling_device_cur_state gauge node_cooling_device_cur_state{name="0",type="Processor"} 0 # HELP node_cooling_device_max_state Maximum throttle state of the cooling device # TYPE node_cooling_device_max_state gauge node_cooling_device_max_state{name="0",type="Processor"} 3 # HELP node_cpu_bug_info The `bugs` field of CPU information from /proc/cpuinfo taken from the first core. # TYPE node_cpu_bug_info gauge node_cpu_bug_info{bug="cpu_meltdown"} 1 node_cpu_bug_info{bug="mds"} 1 node_cpu_bug_info{bug="spectre_v1"} 1 node_cpu_bug_info{bug="spectre_v2"} 1 # HELP node_cpu_core_throttles_total Number of times this CPU core has been throttled. # TYPE node_cpu_core_throttles_total counter node_cpu_core_throttles_total{core="0",package="0"} 5 node_cpu_core_throttles_total{core="0",package="1"} 0 node_cpu_core_throttles_total{core="1",package="0"} 0 node_cpu_core_throttles_total{core="1",package="1"} 9 # HELP node_cpu_flag_info The `flags` field of CPU information from /proc/cpuinfo taken from the first core. # TYPE node_cpu_flag_info gauge node_cpu_flag_info{flag="aes"} 1 node_cpu_flag_info{flag="avx"} 1 node_cpu_flag_info{flag="avx2"} 1 node_cpu_flag_info{flag="constant_tsc"} 1 # HELP node_cpu_guest_seconds_total Seconds the CPUs spent in guests (VMs) for each mode. # TYPE node_cpu_guest_seconds_total counter node_cpu_guest_seconds_total{cpu="0",mode="nice"} 0.01 node_cpu_guest_seconds_total{cpu="0",mode="user"} 0.02 node_cpu_guest_seconds_total{cpu="1",mode="nice"} 0.02 node_cpu_guest_seconds_total{cpu="1",mode="user"} 0.03 node_cpu_guest_seconds_total{cpu="2",mode="nice"} 0.03 node_cpu_guest_seconds_total{cpu="2",mode="user"} 0.04 node_cpu_guest_seconds_total{cpu="3",mode="nice"} 0.04 node_cpu_guest_seconds_total{cpu="3",mode="user"} 0.05 node_cpu_guest_seconds_total{cpu="4",mode="nice"} 0.05 node_cpu_guest_seconds_total{cpu="4",mode="user"} 0.06 node_cpu_guest_seconds_total{cpu="5",mode="nice"} 0.06 node_cpu_guest_seconds_total{cpu="5",mode="user"} 0.07 node_cpu_guest_seconds_total{cpu="6",mode="nice"} 0.07 node_cpu_guest_seconds_total{cpu="6",mode="user"} 0.08 node_cpu_guest_seconds_total{cpu="7",mode="nice"} 0.08 node_cpu_guest_seconds_total{cpu="7",mode="user"} 0.09 # HELP node_cpu_info CPU information from /proc/cpuinfo. # TYPE node_cpu_info gauge node_cpu_info{cachesize="8192 KB",core="0",cpu="0",family="6",microcode="0xb4",model="142",model_name="Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz",package="0",stepping="10",vendor="GenuineIntel"} 1 node_cpu_info{cachesize="8192 KB",core="0",cpu="4",family="6",microcode="0xb4",model="142",model_name="Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz",package="0",stepping="10",vendor="GenuineIntel"} 1 node_cpu_info{cachesize="8192 KB",core="1",cpu="1",family="6",microcode="0xb4",model="142",model_name="Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz",package="0",stepping="10",vendor="GenuineIntel"} 1 node_cpu_info{cachesize="8192 KB",core="1",cpu="5",family="6",microcode="0xb4",model="142",model_name="Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz",package="0",stepping="10",vendor="GenuineIntel"} 1 node_cpu_info{cachesize="8192 KB",core="2",cpu="2",family="6",microcode="0xb4",model="142",model_name="Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz",package="0",stepping="10",vendor="GenuineIntel"} 1 node_cpu_info{cachesize="8192 KB",core="2",cpu="6",family="6",microcode="0xb4",model="142",model_name="Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz",package="0",stepping="10",vendor="GenuineIntel"} 1 node_cpu_info{cachesize="8192 KB",core="3",cpu="3",family="6",microcode="0xb4",model="142",model_name="Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz",package="0",stepping="10",vendor="GenuineIntel"} 1 node_cpu_info{cachesize="8192 KB",core="3",cpu="7",family="6",microcode="0xb4",model="142",model_name="Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz",package="0",stepping="10",vendor="GenuineIntel"} 1 # HELP node_cpu_isolated Whether each core is isolated, information from /sys/devices/system/cpu/isolated. # TYPE node_cpu_isolated gauge node_cpu_isolated{cpu="1"} 1 node_cpu_isolated{cpu="3"} 1 node_cpu_isolated{cpu="4"} 1 node_cpu_isolated{cpu="5"} 1 node_cpu_isolated{cpu="9"} 1 # HELP node_cpu_package_throttles_total Number of times this CPU package has been throttled. # TYPE node_cpu_package_throttles_total counter node_cpu_package_throttles_total{package="0"} 30 node_cpu_package_throttles_total{package="1"} 6 # HELP node_cpu_scaling_frequency_hertz Current scaled CPU thread frequency in hertz. # TYPE node_cpu_scaling_frequency_hertz gauge node_cpu_scaling_frequency_hertz{cpu="0"} 1.699981e+09 node_cpu_scaling_frequency_hertz{cpu="1"} 1.699981e+09 node_cpu_scaling_frequency_hertz{cpu="2"} 8e+06 node_cpu_scaling_frequency_hertz{cpu="3"} 8e+06 # HELP node_cpu_scaling_frequency_max_hertz Maximum scaled CPU thread frequency in hertz. # TYPE node_cpu_scaling_frequency_max_hertz gauge node_cpu_scaling_frequency_max_hertz{cpu="0"} 3.7e+09 node_cpu_scaling_frequency_max_hertz{cpu="1"} 3.7e+09 node_cpu_scaling_frequency_max_hertz{cpu="2"} 4.2e+09 node_cpu_scaling_frequency_max_hertz{cpu="3"} 4.2e+09 # HELP node_cpu_scaling_frequency_min_hertz Minimum scaled CPU thread frequency in hertz. # TYPE node_cpu_scaling_frequency_min_hertz gauge node_cpu_scaling_frequency_min_hertz{cpu="0"} 8e+08 node_cpu_scaling_frequency_min_hertz{cpu="1"} 8e+08 node_cpu_scaling_frequency_min_hertz{cpu="2"} 1e+06 node_cpu_scaling_frequency_min_hertz{cpu="3"} 1e+06 # HELP node_cpu_scaling_governor Current enabled CPU frequency governor. # TYPE node_cpu_scaling_governor gauge node_cpu_scaling_governor{cpu="0",governor="performance"} 0 node_cpu_scaling_governor{cpu="0",governor="powersave"} 1 node_cpu_scaling_governor{cpu="1",governor="performance"} 0 node_cpu_scaling_governor{cpu="1",governor="powersave"} 1 node_cpu_scaling_governor{cpu="2",governor="performance"} 0 node_cpu_scaling_governor{cpu="2",governor="powersave"} 1 node_cpu_scaling_governor{cpu="3",governor="performance"} 0 node_cpu_scaling_governor{cpu="3",governor="powersave"} 1 # HELP node_cpu_seconds_total Seconds the CPUs spent in each mode. # TYPE node_cpu_seconds_total counter node_cpu_seconds_total{cpu="0",mode="idle"} 10870.69 node_cpu_seconds_total{cpu="0",mode="iowait"} 2.2 node_cpu_seconds_total{cpu="0",mode="irq"} 0.01 node_cpu_seconds_total{cpu="0",mode="nice"} 0.19 node_cpu_seconds_total{cpu="0",mode="softirq"} 34.1 node_cpu_seconds_total{cpu="0",mode="steal"} 0 node_cpu_seconds_total{cpu="0",mode="system"} 210.45 node_cpu_seconds_total{cpu="0",mode="user"} 444.9 node_cpu_seconds_total{cpu="1",mode="idle"} 11107.87 node_cpu_seconds_total{cpu="1",mode="iowait"} 5.91 node_cpu_seconds_total{cpu="1",mode="irq"} 0 node_cpu_seconds_total{cpu="1",mode="nice"} 0.23 node_cpu_seconds_total{cpu="1",mode="softirq"} 0.46 node_cpu_seconds_total{cpu="1",mode="steal"} 0 node_cpu_seconds_total{cpu="1",mode="system"} 164.74 node_cpu_seconds_total{cpu="1",mode="user"} 478.69 node_cpu_seconds_total{cpu="2",mode="idle"} 11123.21 node_cpu_seconds_total{cpu="2",mode="iowait"} 4.41 node_cpu_seconds_total{cpu="2",mode="irq"} 0 node_cpu_seconds_total{cpu="2",mode="nice"} 0.36 node_cpu_seconds_total{cpu="2",mode="softirq"} 3.26 node_cpu_seconds_total{cpu="2",mode="steal"} 0 node_cpu_seconds_total{cpu="2",mode="system"} 159.16 node_cpu_seconds_total{cpu="2",mode="user"} 465.04 node_cpu_seconds_total{cpu="3",mode="idle"} 11132.3 node_cpu_seconds_total{cpu="3",mode="iowait"} 5.33 node_cpu_seconds_total{cpu="3",mode="irq"} 0 node_cpu_seconds_total{cpu="3",mode="nice"} 1.02 node_cpu_seconds_total{cpu="3",mode="softirq"} 0.6 node_cpu_seconds_total{cpu="3",mode="steal"} 0 node_cpu_seconds_total{cpu="3",mode="system"} 156.83 node_cpu_seconds_total{cpu="3",mode="user"} 470.54 node_cpu_seconds_total{cpu="4",mode="idle"} 11403.21 node_cpu_seconds_total{cpu="4",mode="iowait"} 2.17 node_cpu_seconds_total{cpu="4",mode="irq"} 0 node_cpu_seconds_total{cpu="4",mode="nice"} 0.25 node_cpu_seconds_total{cpu="4",mode="softirq"} 0.08 node_cpu_seconds_total{cpu="4",mode="steal"} 0 node_cpu_seconds_total{cpu="4",mode="system"} 107.76 node_cpu_seconds_total{cpu="4",mode="user"} 284.13 node_cpu_seconds_total{cpu="5",mode="idle"} 11362.7 node_cpu_seconds_total{cpu="5",mode="iowait"} 6.72 node_cpu_seconds_total{cpu="5",mode="irq"} 0 node_cpu_seconds_total{cpu="5",mode="nice"} 1.01 node_cpu_seconds_total{cpu="5",mode="softirq"} 0.3 node_cpu_seconds_total{cpu="5",mode="steal"} 0 node_cpu_seconds_total{cpu="5",mode="system"} 115.86 node_cpu_seconds_total{cpu="5",mode="user"} 292.71 node_cpu_seconds_total{cpu="6",mode="idle"} 11397.21 node_cpu_seconds_total{cpu="6",mode="iowait"} 3.19 node_cpu_seconds_total{cpu="6",mode="irq"} 0 node_cpu_seconds_total{cpu="6",mode="nice"} 0.36 node_cpu_seconds_total{cpu="6",mode="softirq"} 0.29 node_cpu_seconds_total{cpu="6",mode="steal"} 0 node_cpu_seconds_total{cpu="6",mode="system"} 102.76 node_cpu_seconds_total{cpu="6",mode="user"} 291.52 node_cpu_seconds_total{cpu="7",mode="idle"} 11392.82 node_cpu_seconds_total{cpu="7",mode="iowait"} 5.55 node_cpu_seconds_total{cpu="7",mode="irq"} 0 node_cpu_seconds_total{cpu="7",mode="nice"} 2.68 node_cpu_seconds_total{cpu="7",mode="softirq"} 0.31 node_cpu_seconds_total{cpu="7",mode="steal"} 0 node_cpu_seconds_total{cpu="7",mode="system"} 101.64 node_cpu_seconds_total{cpu="7",mode="user"} 290.98 # HELP node_cpu_vulnerabilities_info Details of each CPU vulnerability reported by sysfs. The value of the series is an int encoded state of the vulnerability. The same state is stored as a string in the label # TYPE node_cpu_vulnerabilities_info gauge node_cpu_vulnerabilities_info{codename="itlb_multihit",state="not affected"} 1 node_cpu_vulnerabilities_info{codename="mds",state="vulnerable"} 1 node_cpu_vulnerabilities_info{codename="retbleed",state="mitigation"} 1 node_cpu_vulnerabilities_info{codename="spectre_v1",state="mitigation"} 1 node_cpu_vulnerabilities_info{codename="spectre_v2",state="mitigation"} 1 # HELP node_disk_ata_rotation_rate_rpm ATA disk rotation rate in RPMs (0 for SSDs). # TYPE node_disk_ata_rotation_rate_rpm gauge node_disk_ata_rotation_rate_rpm{device="sda"} 7200 node_disk_ata_rotation_rate_rpm{device="sdb"} 0 node_disk_ata_rotation_rate_rpm{device="sdc"} 0 # HELP node_disk_ata_write_cache ATA disk has a write cache. # TYPE node_disk_ata_write_cache gauge node_disk_ata_write_cache{device="sda"} 1 node_disk_ata_write_cache{device="sdb"} 1 node_disk_ata_write_cache{device="sdc"} 1 # HELP node_disk_ata_write_cache_enabled ATA disk has its write cache enabled. # TYPE node_disk_ata_write_cache_enabled gauge node_disk_ata_write_cache_enabled{device="sda"} 0 node_disk_ata_write_cache_enabled{device="sdb"} 1 node_disk_ata_write_cache_enabled{device="sdc"} 0 # HELP node_disk_device_mapper_info Info about disk device mapper. # TYPE node_disk_device_mapper_info gauge node_disk_device_mapper_info{device="dm-0",lv_layer="",lv_name="",name="nvme0n1_crypt",uuid="CRYPT-LUKS2-jolaulot80fy9zsiobkxyxo7y2dqeho2-nvme0n1_crypt",vg_name=""} 1 node_disk_device_mapper_info{device="dm-1",lv_layer="",lv_name="swap_1",name="system-swap_1",uuid="LVM-wbGqQEBL9SxrW2DLntJwgg8fAv946hw3Tvjqh0v31fWgxEtD4BoHO0lROWFUY65T",vg_name="system"} 1 node_disk_device_mapper_info{device="dm-2",lv_layer="",lv_name="root",name="system-root",uuid="LVM-NWEDo8q5ABDyJuC3F8veKNyWfYmeIBfFMS4MF3HakzUhkk7ekDm6fJTHkl2fYHe7",vg_name="system"} 1 node_disk_device_mapper_info{device="dm-3",lv_layer="",lv_name="var",name="system-var",uuid="LVM-hrxHo0rlZ6U95ku5841Lpd17bS1Z7V7lrtEE60DVgE6YEOCdS9gcDGyonWim4hGP",vg_name="system"} 1 node_disk_device_mapper_info{device="dm-4",lv_layer="",lv_name="tmp",name="system-tmp",uuid="LVM-XTNGOHjPWLHcxmJmVu5cWTXEtuzqDeBkdEHAZW5q9LxWQ2d4mb5CchUQzUPJpl8H",vg_name="system"} 1 node_disk_device_mapper_info{device="dm-5",lv_layer="",lv_name="home",name="system-home",uuid="LVM-MtoJaWTpjWRXlUnNFlpxZauTEuYlMvGFutigEzCCrfj8CNh6jCRi5LQJXZCpLjPf",vg_name="system"} 1 # HELP node_disk_discard_time_seconds_total This is the total number of seconds spent by all discards. # TYPE node_disk_discard_time_seconds_total counter node_disk_discard_time_seconds_total{device="sdb"} 11.13 node_disk_discard_time_seconds_total{device="sdc"} 11.13 # HELP node_disk_discarded_sectors_total The total number of sectors discarded successfully. # TYPE node_disk_discarded_sectors_total counter node_disk_discarded_sectors_total{device="sdb"} 1.925173784e+09 node_disk_discarded_sectors_total{device="sdc"} 1.25173784e+08 # HELP node_disk_discards_completed_total The total number of discards completed successfully. # TYPE node_disk_discards_completed_total counter node_disk_discards_completed_total{device="sdb"} 68851 node_disk_discards_completed_total{device="sdc"} 18851 # HELP node_disk_discards_merged_total The total number of discards merged. # TYPE node_disk_discards_merged_total counter node_disk_discards_merged_total{device="sdb"} 0 node_disk_discards_merged_total{device="sdc"} 0 # HELP node_disk_filesystem_info Info about disk filesystem. # TYPE node_disk_filesystem_info gauge node_disk_filesystem_info{device="dm-0",type="LVM2_member",usage="raid",uuid="c3C3uW-gD96-Yw69-c1CJ-5MwT-6ysM-mST0vB",version="LVM2 001"} 1 node_disk_filesystem_info{device="dm-1",type="swap",usage="other",uuid="5272bb60-04b5-49cd-b730-be57c7604450",version="1"} 1 node_disk_filesystem_info{device="dm-2",type="ext4",usage="filesystem",uuid="3deafd0d-faff-4695-8d15-51061ae1f51b",version="1.0"} 1 node_disk_filesystem_info{device="dm-3",type="ext4",usage="filesystem",uuid="5c772222-f7d4-4c8e-87e8-e97df6b7a45e",version="1.0"} 1 node_disk_filesystem_info{device="dm-4",type="ext4",usage="filesystem",uuid="a9479d44-60e1-4015-a1e5-bb065e6dd11b",version="1.0"} 1 node_disk_filesystem_info{device="dm-5",type="ext4",usage="filesystem",uuid="b05b726a-c718-4c4d-8641-7c73a7696d83",version="1.0"} 1 node_disk_filesystem_info{device="mmcblk0p1",type="vfat",usage="filesystem",uuid="6284-658D",version="FAT32"} 1 node_disk_filesystem_info{device="mmcblk0p2",type="ext4",usage="filesystem",uuid="83324ce8-a6f3-4e35-ad64-dbb3d6b87a32",version="1.0"} 1 node_disk_filesystem_info{device="sda",type="LVM2_member",usage="raid",uuid="cVVv6j-HSA2-IY33-1Jmj-dO2H-YL7w-b4Oxqw",version="LVM2 001"} 1 node_disk_filesystem_info{device="sdc",type="LVM2_member",usage="raid",uuid="QFy9W7-Brj3-hQ6v-AF8i-3Zqg-n3Vs-kGY4vb",version="LVM2 001"} 1 # HELP node_disk_flush_requests_time_seconds_total This is the total number of seconds spent by all flush requests. # TYPE node_disk_flush_requests_time_seconds_total counter node_disk_flush_requests_time_seconds_total{device="sdc"} 1.944 # HELP node_disk_flush_requests_total The total number of flush requests completed successfully # TYPE node_disk_flush_requests_total counter node_disk_flush_requests_total{device="sdc"} 1555 # HELP node_disk_info Info of /sys/block/. # TYPE node_disk_info gauge node_disk_info{device="dm-0",major="252",minor="0",model="",path="",revision="",serial="",wwn=""} 1 node_disk_info{device="dm-1",major="252",minor="1",model="",path="",revision="",serial="",wwn=""} 1 node_disk_info{device="dm-2",major="252",minor="2",model="",path="",revision="",serial="",wwn=""} 1 node_disk_info{device="dm-3",major="252",minor="3",model="",path="",revision="",serial="",wwn=""} 1 node_disk_info{device="dm-4",major="252",minor="4",model="",path="",revision="",serial="",wwn=""} 1 node_disk_info{device="dm-5",major="252",minor="5",model="",path="",revision="",serial="",wwn=""} 1 node_disk_info{device="mmcblk0",major="179",minor="0",model="",path="platform-df2969f3.mmc",revision="",serial="",wwn=""} 1 node_disk_info{device="mmcblk0p1",major="179",minor="1",model="",path="platform-df2969f3.mmc",revision="",serial="",wwn=""} 1 node_disk_info{device="mmcblk0p2",major="179",minor="2",model="",path="platform-df2969f3.mmc",revision="",serial="",wwn=""} 1 node_disk_info{device="nvme0n1",major="259",minor="0",model="SAMSUNG EHFTF55LURSY-000Y9",path="pci-0000:02:00.0-nvme-1",revision="4NBTUY95",serial="S252B6CU1HG3M1",wwn="eui.p3vbbiejx5aae2r3"} 1 node_disk_info{device="sda",major="8",minor="0",model="TOSHIBA_KSDB4U86",path="pci-0000:3b:00.0-sas-phy7-lun-0",revision="0102",serial="2160A0D5FVGG",wwn="0x7c72382b8de36a64"} 1 node_disk_info{device="sdb",major="8",minor="16",model="SuperMicro_SSD",path="pci-0000:00:1f.2-ata-1",revision="0R",serial="SMC0E1B87ABBB16BD84E",wwn="0xe1b87abbb16bd84e"} 1 node_disk_info{device="sdc",major="8",minor="32",model="INTEL_SSDS9X9SI0",path="pci-0000:00:1f.2-ata-4",revision="0100",serial="3EWB5Y25CWQWA7EH1U",wwn="0x58907ddc573a5de"} 1 node_disk_info{device="sr0",major="11",minor="0",model="Virtual_CDROM0",path="pci-0000:00:14.0-usb-0:1.1:1.0-scsi-0:0:0:0",revision="1.00",serial="AAAABBBBCCCC1",wwn=""} 1 node_disk_info{device="vda",major="254",minor="0",model="",path="pci-0000:00:06.0",revision="",serial="",wwn=""} 1 # HELP node_disk_io_now The number of I/Os currently in progress. # TYPE node_disk_io_now gauge node_disk_io_now{device="dm-0"} 0 node_disk_io_now{device="dm-1"} 0 node_disk_io_now{device="dm-2"} 0 node_disk_io_now{device="dm-3"} 0 node_disk_io_now{device="dm-4"} 0 node_disk_io_now{device="dm-5"} 0 node_disk_io_now{device="mmcblk0"} 0 node_disk_io_now{device="mmcblk0p1"} 0 node_disk_io_now{device="mmcblk0p2"} 0 node_disk_io_now{device="nvme0n1"} 0 node_disk_io_now{device="sda"} 0 node_disk_io_now{device="sdb"} 0 node_disk_io_now{device="sdc"} 0 node_disk_io_now{device="sr0"} 0 node_disk_io_now{device="vda"} 0 # HELP node_disk_io_time_seconds_total Total seconds spent doing I/Os. # TYPE node_disk_io_time_seconds_total counter node_disk_io_time_seconds_total{device="dm-0"} 11325.968 node_disk_io_time_seconds_total{device="dm-1"} 0.076 node_disk_io_time_seconds_total{device="dm-2"} 65.4 node_disk_io_time_seconds_total{device="dm-3"} 0.016 node_disk_io_time_seconds_total{device="dm-4"} 0.024 node_disk_io_time_seconds_total{device="dm-5"} 58.848 node_disk_io_time_seconds_total{device="mmcblk0"} 0.136 node_disk_io_time_seconds_total{device="mmcblk0p1"} 0.024 node_disk_io_time_seconds_total{device="mmcblk0p2"} 0.068 node_disk_io_time_seconds_total{device="nvme0n1"} 222.766 node_disk_io_time_seconds_total{device="sda"} 9653.880000000001 node_disk_io_time_seconds_total{device="sdb"} 60.730000000000004 node_disk_io_time_seconds_total{device="sdc"} 10.73 node_disk_io_time_seconds_total{device="sr0"} 0 node_disk_io_time_seconds_total{device="vda"} 41614.592000000004 # HELP node_disk_io_time_weighted_seconds_total The weighted # of seconds spent doing I/Os. # TYPE node_disk_io_time_weighted_seconds_total counter node_disk_io_time_weighted_seconds_total{device="dm-0"} 1.206301256e+06 node_disk_io_time_weighted_seconds_total{device="dm-1"} 0.084 node_disk_io_time_weighted_seconds_total{device="dm-2"} 129.416 node_disk_io_time_weighted_seconds_total{device="dm-3"} 0.10400000000000001 node_disk_io_time_weighted_seconds_total{device="dm-4"} 0.044 node_disk_io_time_weighted_seconds_total{device="dm-5"} 105.632 node_disk_io_time_weighted_seconds_total{device="mmcblk0"} 0.156 node_disk_io_time_weighted_seconds_total{device="mmcblk0p1"} 0.024 node_disk_io_time_weighted_seconds_total{device="mmcblk0p2"} 0.068 node_disk_io_time_weighted_seconds_total{device="nvme0n1"} 1032.546 node_disk_io_time_weighted_seconds_total{device="sda"} 82621.804 node_disk_io_time_weighted_seconds_total{device="sdb"} 67.07000000000001 node_disk_io_time_weighted_seconds_total{device="sdc"} 17.07 node_disk_io_time_weighted_seconds_total{device="sr0"} 0 node_disk_io_time_weighted_seconds_total{device="vda"} 2.0778722280000001e+06 # HELP node_disk_read_bytes_total The total number of bytes read successfully. # TYPE node_disk_read_bytes_total counter node_disk_read_bytes_total{device="dm-0"} 5.13708655616e+11 node_disk_read_bytes_total{device="dm-1"} 1.589248e+06 node_disk_read_bytes_total{device="dm-2"} 1.578752e+08 node_disk_read_bytes_total{device="dm-3"} 1.98144e+06 node_disk_read_bytes_total{device="dm-4"} 529408 node_disk_read_bytes_total{device="dm-5"} 4.3150848e+07 node_disk_read_bytes_total{device="mmcblk0"} 798720 node_disk_read_bytes_total{device="mmcblk0p1"} 81920 node_disk_read_bytes_total{device="mmcblk0p2"} 389120 node_disk_read_bytes_total{device="nvme0n1"} 2.377714176e+09 node_disk_read_bytes_total{device="sda"} 5.13713216512e+11 node_disk_read_bytes_total{device="sdb"} 4.944782848e+09 node_disk_read_bytes_total{device="sdc"} 8.48782848e+08 node_disk_read_bytes_total{device="sr0"} 0 node_disk_read_bytes_total{device="vda"} 1.6727491584e+10 # HELP node_disk_read_time_seconds_total The total number of seconds spent by all reads. # TYPE node_disk_read_time_seconds_total counter node_disk_read_time_seconds_total{device="dm-0"} 46229.572 node_disk_read_time_seconds_total{device="dm-1"} 0.084 node_disk_read_time_seconds_total{device="dm-2"} 6.5360000000000005 node_disk_read_time_seconds_total{device="dm-3"} 0.10400000000000001 node_disk_read_time_seconds_total{device="dm-4"} 0.028 node_disk_read_time_seconds_total{device="dm-5"} 0.924 node_disk_read_time_seconds_total{device="mmcblk0"} 0.156 node_disk_read_time_seconds_total{device="mmcblk0p1"} 0.024 node_disk_read_time_seconds_total{device="mmcblk0p2"} 0.068 node_disk_read_time_seconds_total{device="nvme0n1"} 21.650000000000002 node_disk_read_time_seconds_total{device="sda"} 18492.372 node_disk_read_time_seconds_total{device="sdb"} 0.084 node_disk_read_time_seconds_total{device="sdc"} 0.014 node_disk_read_time_seconds_total{device="sr0"} 0 node_disk_read_time_seconds_total{device="vda"} 8655.768 # HELP node_disk_reads_completed_total The total number of reads completed successfully. # TYPE node_disk_reads_completed_total counter node_disk_reads_completed_total{device="dm-0"} 5.9910002e+07 node_disk_reads_completed_total{device="dm-1"} 388 node_disk_reads_completed_total{device="dm-2"} 11571 node_disk_reads_completed_total{device="dm-3"} 3870 node_disk_reads_completed_total{device="dm-4"} 392 node_disk_reads_completed_total{device="dm-5"} 3729 node_disk_reads_completed_total{device="mmcblk0"} 192 node_disk_reads_completed_total{device="mmcblk0p1"} 17 node_disk_reads_completed_total{device="mmcblk0p2"} 95 node_disk_reads_completed_total{device="nvme0n1"} 47114 node_disk_reads_completed_total{device="sda"} 2.5354637e+07 node_disk_reads_completed_total{device="sdb"} 326552 node_disk_reads_completed_total{device="sdc"} 126552 node_disk_reads_completed_total{device="sr0"} 0 node_disk_reads_completed_total{device="vda"} 1.775784e+06 # HELP node_disk_reads_merged_total The total number of reads merged. # TYPE node_disk_reads_merged_total counter node_disk_reads_merged_total{device="dm-0"} 0 node_disk_reads_merged_total{device="dm-1"} 0 node_disk_reads_merged_total{device="dm-2"} 0 node_disk_reads_merged_total{device="dm-3"} 0 node_disk_reads_merged_total{device="dm-4"} 0 node_disk_reads_merged_total{device="dm-5"} 0 node_disk_reads_merged_total{device="mmcblk0"} 3 node_disk_reads_merged_total{device="mmcblk0p1"} 3 node_disk_reads_merged_total{device="mmcblk0p2"} 0 node_disk_reads_merged_total{device="nvme0n1"} 4 node_disk_reads_merged_total{device="sda"} 3.4367663e+07 node_disk_reads_merged_total{device="sdb"} 841 node_disk_reads_merged_total{device="sdc"} 141 node_disk_reads_merged_total{device="sr0"} 0 node_disk_reads_merged_total{device="vda"} 15386 # HELP node_disk_write_time_seconds_total This is the total number of seconds spent by all writes. # TYPE node_disk_write_time_seconds_total counter node_disk_write_time_seconds_total{device="dm-0"} 1.1585578e+06 node_disk_write_time_seconds_total{device="dm-1"} 0 node_disk_write_time_seconds_total{device="dm-2"} 122.884 node_disk_write_time_seconds_total{device="dm-3"} 0 node_disk_write_time_seconds_total{device="dm-4"} 0.016 node_disk_write_time_seconds_total{device="dm-5"} 104.684 node_disk_write_time_seconds_total{device="mmcblk0"} 0 node_disk_write_time_seconds_total{device="mmcblk0p1"} 0 node_disk_write_time_seconds_total{device="mmcblk0p2"} 0 node_disk_write_time_seconds_total{device="nvme0n1"} 1011.053 node_disk_write_time_seconds_total{device="sda"} 63877.96 node_disk_write_time_seconds_total{device="sdb"} 5.007 node_disk_write_time_seconds_total{device="sdc"} 1.0070000000000001 node_disk_write_time_seconds_total{device="sr0"} 0 node_disk_write_time_seconds_total{device="vda"} 2.069221364e+06 # HELP node_disk_writes_completed_total The total number of writes completed successfully. # TYPE node_disk_writes_completed_total counter node_disk_writes_completed_total{device="dm-0"} 3.9231014e+07 node_disk_writes_completed_total{device="dm-1"} 74 node_disk_writes_completed_total{device="dm-2"} 153522 node_disk_writes_completed_total{device="dm-3"} 0 node_disk_writes_completed_total{device="dm-4"} 38 node_disk_writes_completed_total{device="dm-5"} 98918 node_disk_writes_completed_total{device="mmcblk0"} 0 node_disk_writes_completed_total{device="mmcblk0p1"} 0 node_disk_writes_completed_total{device="mmcblk0p2"} 0 node_disk_writes_completed_total{device="nvme0n1"} 1.07832e+06 node_disk_writes_completed_total{device="sda"} 2.8444756e+07 node_disk_writes_completed_total{device="sdb"} 41822 node_disk_writes_completed_total{device="sdc"} 11822 node_disk_writes_completed_total{device="sr0"} 0 node_disk_writes_completed_total{device="vda"} 6.038856e+06 # HELP node_disk_writes_merged_total The number of writes merged. # TYPE node_disk_writes_merged_total counter node_disk_writes_merged_total{device="dm-0"} 0 node_disk_writes_merged_total{device="dm-1"} 0 node_disk_writes_merged_total{device="dm-2"} 0 node_disk_writes_merged_total{device="dm-3"} 0 node_disk_writes_merged_total{device="dm-4"} 0 node_disk_writes_merged_total{device="dm-5"} 0 node_disk_writes_merged_total{device="mmcblk0"} 0 node_disk_writes_merged_total{device="mmcblk0p1"} 0 node_disk_writes_merged_total{device="mmcblk0p2"} 0 node_disk_writes_merged_total{device="nvme0n1"} 43950 node_disk_writes_merged_total{device="sda"} 1.1134226e+07 node_disk_writes_merged_total{device="sdb"} 2895 node_disk_writes_merged_total{device="sdc"} 1895 node_disk_writes_merged_total{device="sr0"} 0 node_disk_writes_merged_total{device="vda"} 2.0711856e+07 # HELP node_disk_written_bytes_total The total number of bytes written successfully. # TYPE node_disk_written_bytes_total counter node_disk_written_bytes_total{device="dm-0"} 2.5891680256e+11 node_disk_written_bytes_total{device="dm-1"} 303104 node_disk_written_bytes_total{device="dm-2"} 2.607828992e+09 node_disk_written_bytes_total{device="dm-3"} 0 node_disk_written_bytes_total{device="dm-4"} 70144 node_disk_written_bytes_total{device="dm-5"} 5.89664256e+08 node_disk_written_bytes_total{device="mmcblk0"} 0 node_disk_written_bytes_total{device="mmcblk0p1"} 0 node_disk_written_bytes_total{device="mmcblk0p2"} 0 node_disk_written_bytes_total{device="nvme0n1"} 2.0199236096e+10 node_disk_written_bytes_total{device="sda"} 2.58916880384e+11 node_disk_written_bytes_total{device="sdb"} 1.01012736e+09 node_disk_written_bytes_total{device="sdc"} 8.852736e+07 node_disk_written_bytes_total{device="sr0"} 0 node_disk_written_bytes_total{device="vda"} 1.0938236928e+11 # HELP node_dmi_info A metric with a constant '1' value labeled by bios_date, bios_release, bios_vendor, bios_version, board_asset_tag, board_name, board_serial, board_vendor, board_version, chassis_asset_tag, chassis_serial, chassis_vendor, chassis_version, product_family, product_name, product_serial, product_sku, product_uuid, product_version, system_vendor if provided by DMI. # TYPE node_dmi_info gauge node_dmi_info{bios_date="04/12/2021",bios_release="2.2",bios_vendor="Dell Inc.",bios_version="2.2.4",board_name="07PXPY",board_serial=".7N62AI2.GRTCL6944100GP.",board_vendor="Dell Inc.",board_version="A01",chassis_asset_tag="",chassis_serial="7N62AI2",chassis_vendor="Dell Inc.",chassis_version="",product_family="PowerEdge",product_name="PowerEdge R6515",product_serial="7N62AI2",product_sku="SKU=NotProvided;ModelName=PowerEdge R6515",product_uuid="83340ca8-cb49-4474-8c29-d2088ca84dd9",product_version="�[�",system_vendor="Dell Inc."} 1 # HELP node_drbd_activitylog_writes_total Number of updates of the activity log area of the meta data. # TYPE node_drbd_activitylog_writes_total counter node_drbd_activitylog_writes_total{device="drbd1"} 1100 # HELP node_drbd_application_pending Number of block I/O requests forwarded to DRBD, but not yet answered by DRBD. # TYPE node_drbd_application_pending gauge node_drbd_application_pending{device="drbd1"} 12348 # HELP node_drbd_bitmap_writes_total Number of updates of the bitmap area of the meta data. # TYPE node_drbd_bitmap_writes_total counter node_drbd_bitmap_writes_total{device="drbd1"} 221 # HELP node_drbd_connected Whether DRBD is connected to the peer. # TYPE node_drbd_connected gauge node_drbd_connected{device="drbd1"} 1 # HELP node_drbd_disk_read_bytes_total Net data read from local hard disk; in bytes. # TYPE node_drbd_disk_read_bytes_total counter node_drbd_disk_read_bytes_total{device="drbd1"} 1.2154539008e+11 # HELP node_drbd_disk_state_is_up_to_date Whether the disk of the node is up to date. # TYPE node_drbd_disk_state_is_up_to_date gauge node_drbd_disk_state_is_up_to_date{device="drbd1",node="local"} 1 node_drbd_disk_state_is_up_to_date{device="drbd1",node="remote"} 1 # HELP node_drbd_disk_written_bytes_total Net data written on local hard disk; in bytes. # TYPE node_drbd_disk_written_bytes_total counter node_drbd_disk_written_bytes_total{device="drbd1"} 2.8941845504e+10 # HELP node_drbd_epochs Number of Epochs currently on the fly. # TYPE node_drbd_epochs gauge node_drbd_epochs{device="drbd1"} 1 # HELP node_drbd_local_pending Number of open requests to the local I/O sub-system. # TYPE node_drbd_local_pending gauge node_drbd_local_pending{device="drbd1"} 12345 # HELP node_drbd_network_received_bytes_total Total number of bytes received via the network. # TYPE node_drbd_network_received_bytes_total counter node_drbd_network_received_bytes_total{device="drbd1"} 1.0961011e+07 # HELP node_drbd_network_sent_bytes_total Total number of bytes sent via the network. # TYPE node_drbd_network_sent_bytes_total counter node_drbd_network_sent_bytes_total{device="drbd1"} 1.7740228608e+10 # HELP node_drbd_node_role_is_primary Whether the role of the node is in the primary state. # TYPE node_drbd_node_role_is_primary gauge node_drbd_node_role_is_primary{device="drbd1",node="local"} 1 node_drbd_node_role_is_primary{device="drbd1",node="remote"} 1 # HELP node_drbd_out_of_sync_bytes Amount of data known to be out of sync; in bytes. # TYPE node_drbd_out_of_sync_bytes gauge node_drbd_out_of_sync_bytes{device="drbd1"} 1.2645376e+07 # HELP node_drbd_remote_pending Number of requests sent to the peer, but that have not yet been answered by the latter. # TYPE node_drbd_remote_pending gauge node_drbd_remote_pending{device="drbd1"} 12346 # HELP node_drbd_remote_unacknowledged Number of requests received by the peer via the network connection, but that have not yet been answered. # TYPE node_drbd_remote_unacknowledged gauge node_drbd_remote_unacknowledged{device="drbd1"} 12347 # HELP node_edac_correctable_errors_total Total correctable memory errors. # TYPE node_edac_correctable_errors_total counter node_edac_correctable_errors_total{controller="0"} 1 # HELP node_edac_csrow_correctable_errors_total Total correctable memory errors for this csrow. # TYPE node_edac_csrow_correctable_errors_total counter node_edac_csrow_correctable_errors_total{controller="0",csrow="0"} 3 node_edac_csrow_correctable_errors_total{controller="0",csrow="unknown"} 2 # HELP node_edac_csrow_uncorrectable_errors_total Total uncorrectable memory errors for this csrow. # TYPE node_edac_csrow_uncorrectable_errors_total counter node_edac_csrow_uncorrectable_errors_total{controller="0",csrow="0"} 4 node_edac_csrow_uncorrectable_errors_total{controller="0",csrow="unknown"} 6 # HELP node_edac_uncorrectable_errors_total Total uncorrectable memory errors. # TYPE node_edac_uncorrectable_errors_total counter node_edac_uncorrectable_errors_total{controller="0"} 5 # HELP node_entropy_available_bits Bits of available entropy. # TYPE node_entropy_available_bits gauge node_entropy_available_bits 1337 # HELP node_entropy_pool_size_bits Bits of entropy pool. # TYPE node_entropy_pool_size_bits gauge node_entropy_pool_size_bits 4096 # HELP node_exporter_build_info A metric with a constant '1' value labeled by version, revision, branch, goversion from which node_exporter was built, and the goos and goarch for the build. # TYPE node_exporter_build_info gauge # HELP node_fibrechannel_error_frames_total Number of errors in frames # TYPE node_fibrechannel_error_frames_total counter node_fibrechannel_error_frames_total{fc_host="host0"} 0 # HELP node_fibrechannel_fcp_packet_aborts_total Number of aborted packets # TYPE node_fibrechannel_fcp_packet_aborts_total counter node_fibrechannel_fcp_packet_aborts_total{fc_host="host0"} 19 # HELP node_fibrechannel_info Non-numeric data from /sys/class/fc_host/, value is always 1. # TYPE node_fibrechannel_info gauge node_fibrechannel_info{dev_loss_tmo="30",fabric_name="0",fc_host="host0",port_id="000002",port_name="1000e0071bce95f2",port_state="Online",port_type="Point-To-Point (direct nport connection)",speed="16 Gbit",supported_classes="Class 3",supported_speeds="4 Gbit, 8 Gbit, 16 Gbit",symbolic_name="Emulex SN1100E2P FV12.4.270.3 DV12.4.0.0. HN:gotest. OS:Linux"} 1 # HELP node_fibrechannel_invalid_crc_total Invalid Cyclic Redundancy Check count # TYPE node_fibrechannel_invalid_crc_total counter node_fibrechannel_invalid_crc_total{fc_host="host0"} 2 # HELP node_fibrechannel_invalid_tx_words_total Number of invalid words transmitted by host port # TYPE node_fibrechannel_invalid_tx_words_total counter node_fibrechannel_invalid_tx_words_total{fc_host="host0"} 8 # HELP node_fibrechannel_link_failure_total Number of times the host port link has failed # TYPE node_fibrechannel_link_failure_total counter node_fibrechannel_link_failure_total{fc_host="host0"} 9 # HELP node_fibrechannel_loss_of_signal_total Number of times signal has been lost # TYPE node_fibrechannel_loss_of_signal_total counter node_fibrechannel_loss_of_signal_total{fc_host="host0"} 17 # HELP node_fibrechannel_loss_of_sync_total Number of failures on either bit or transmission word boundaries # TYPE node_fibrechannel_loss_of_sync_total counter node_fibrechannel_loss_of_sync_total{fc_host="host0"} 16 # HELP node_fibrechannel_nos_total Number Not_Operational Primitive Sequence received by host port # TYPE node_fibrechannel_nos_total counter node_fibrechannel_nos_total{fc_host="host0"} 18 # HELP node_fibrechannel_rx_frames_total Number of frames received # TYPE node_fibrechannel_rx_frames_total counter node_fibrechannel_rx_frames_total{fc_host="host0"} 3 # HELP node_fibrechannel_rx_words_total Number of words received by host port # TYPE node_fibrechannel_rx_words_total counter node_fibrechannel_rx_words_total{fc_host="host0"} 4 # HELP node_fibrechannel_seconds_since_last_reset_total Number of seconds since last host port reset # TYPE node_fibrechannel_seconds_since_last_reset_total counter node_fibrechannel_seconds_since_last_reset_total{fc_host="host0"} 7 # HELP node_fibrechannel_tx_frames_total Number of frames transmitted by host port # TYPE node_fibrechannel_tx_frames_total counter node_fibrechannel_tx_frames_total{fc_host="host0"} 5 # HELP node_fibrechannel_tx_words_total Number of words transmitted by host port # TYPE node_fibrechannel_tx_words_total counter node_fibrechannel_tx_words_total{fc_host="host0"} 6 # HELP node_filefd_allocated File descriptor statistics: allocated. # TYPE node_filefd_allocated gauge node_filefd_allocated 1024 # HELP node_filefd_maximum File descriptor statistics: maximum. # TYPE node_filefd_maximum gauge node_filefd_maximum 1.631329e+06 # HELP node_forks_total Total number of forks. # TYPE node_forks_total counter node_forks_total 26442 # HELP node_hwmon_chip_names Annotation metric for human-readable chip names # TYPE node_hwmon_chip_names gauge node_hwmon_chip_names{chip="nct6779",chip_name="nct6779"} 1 node_hwmon_chip_names{chip="platform_coretemp_0",chip_name="coretemp"} 1 node_hwmon_chip_names{chip="platform_coretemp_1",chip_name="coretemp"} 1 # HELP node_hwmon_fan_alarm Hardware sensor alarm status (fan) # TYPE node_hwmon_fan_alarm gauge node_hwmon_fan_alarm{chip="nct6779",sensor="fan2"} 0 # HELP node_hwmon_fan_beep_enabled Hardware monitor sensor has beeping enabled # TYPE node_hwmon_fan_beep_enabled gauge node_hwmon_fan_beep_enabled{chip="nct6779",sensor="fan2"} 0 # HELP node_hwmon_fan_manual Hardware monitor fan element manual # TYPE node_hwmon_fan_manual gauge node_hwmon_fan_manual{chip="platform_applesmc_768",sensor="fan1"} 0 node_hwmon_fan_manual{chip="platform_applesmc_768",sensor="fan2"} 0 # HELP node_hwmon_fan_max_rpm Hardware monitor for fan revolutions per minute (max) # TYPE node_hwmon_fan_max_rpm gauge node_hwmon_fan_max_rpm{chip="platform_applesmc_768",sensor="fan1"} 6156 node_hwmon_fan_max_rpm{chip="platform_applesmc_768",sensor="fan2"} 5700 # HELP node_hwmon_fan_min_rpm Hardware monitor for fan revolutions per minute (min) # TYPE node_hwmon_fan_min_rpm gauge node_hwmon_fan_min_rpm{chip="nct6779",sensor="fan2"} 0 node_hwmon_fan_min_rpm{chip="platform_applesmc_768",sensor="fan1"} 2160 node_hwmon_fan_min_rpm{chip="platform_applesmc_768",sensor="fan2"} 2000 # HELP node_hwmon_fan_output Hardware monitor fan element output # TYPE node_hwmon_fan_output gauge node_hwmon_fan_output{chip="platform_applesmc_768",sensor="fan1"} 2160 node_hwmon_fan_output{chip="platform_applesmc_768",sensor="fan2"} 2000 # HELP node_hwmon_fan_pulses Hardware monitor fan element pulses # TYPE node_hwmon_fan_pulses gauge node_hwmon_fan_pulses{chip="nct6779",sensor="fan2"} 2 # HELP node_hwmon_fan_rpm Hardware monitor for fan revolutions per minute (input) # TYPE node_hwmon_fan_rpm gauge node_hwmon_fan_rpm{chip="nct6779",sensor="fan2"} 1098 node_hwmon_fan_rpm{chip="platform_applesmc_768",sensor="fan1"} 0 node_hwmon_fan_rpm{chip="platform_applesmc_768",sensor="fan2"} 1998 # HELP node_hwmon_fan_target_rpm Hardware monitor for fan revolutions per minute (target) # TYPE node_hwmon_fan_target_rpm gauge node_hwmon_fan_target_rpm{chip="nct6779",sensor="fan2"} 27000 # HELP node_hwmon_fan_tolerance Hardware monitor fan element tolerance # TYPE node_hwmon_fan_tolerance gauge node_hwmon_fan_tolerance{chip="nct6779",sensor="fan2"} 0 # HELP node_hwmon_in_alarm Hardware sensor alarm status (in) # TYPE node_hwmon_in_alarm gauge node_hwmon_in_alarm{chip="nct6779",sensor="in0"} 0 node_hwmon_in_alarm{chip="nct6779",sensor="in1"} 1 # HELP node_hwmon_in_beep_enabled Hardware monitor sensor has beeping enabled # TYPE node_hwmon_in_beep_enabled gauge node_hwmon_in_beep_enabled{chip="nct6779",sensor="in0"} 0 node_hwmon_in_beep_enabled{chip="nct6779",sensor="in1"} 0 # HELP node_hwmon_in_max_volts Hardware monitor for voltage (max) # TYPE node_hwmon_in_max_volts gauge node_hwmon_in_max_volts{chip="nct6779",sensor="in0"} 1.744 node_hwmon_in_max_volts{chip="nct6779",sensor="in1"} 0 # HELP node_hwmon_in_min_volts Hardware monitor for voltage (min) # TYPE node_hwmon_in_min_volts gauge node_hwmon_in_min_volts{chip="nct6779",sensor="in0"} 0 node_hwmon_in_min_volts{chip="nct6779",sensor="in1"} 0 # HELP node_hwmon_in_volts Hardware monitor for voltage (input) # TYPE node_hwmon_in_volts gauge node_hwmon_in_volts{chip="nct6779",sensor="in0"} 0.792 node_hwmon_in_volts{chip="nct6779",sensor="in1"} 1.024 # HELP node_hwmon_intrusion_alarm Hardware sensor alarm status (intrusion) # TYPE node_hwmon_intrusion_alarm gauge node_hwmon_intrusion_alarm{chip="nct6779",sensor="intrusion0"} 1 node_hwmon_intrusion_alarm{chip="nct6779",sensor="intrusion1"} 1 # HELP node_hwmon_intrusion_beep_enabled Hardware monitor sensor has beeping enabled # TYPE node_hwmon_intrusion_beep_enabled gauge node_hwmon_intrusion_beep_enabled{chip="nct6779",sensor="intrusion0"} 0 node_hwmon_intrusion_beep_enabled{chip="nct6779",sensor="intrusion1"} 0 # HELP node_hwmon_pwm_auto_point1_pwm Hardware monitor pwm element auto_point1_pwm # TYPE node_hwmon_pwm_auto_point1_pwm gauge node_hwmon_pwm_auto_point1_pwm{chip="nct6779",sensor="pwm1"} 153 # HELP node_hwmon_pwm_auto_point1_temp Hardware monitor pwm element auto_point1_temp # TYPE node_hwmon_pwm_auto_point1_temp gauge node_hwmon_pwm_auto_point1_temp{chip="nct6779",sensor="pwm1"} 30000 # HELP node_hwmon_pwm_auto_point2_pwm Hardware monitor pwm element auto_point2_pwm # TYPE node_hwmon_pwm_auto_point2_pwm gauge node_hwmon_pwm_auto_point2_pwm{chip="nct6779",sensor="pwm1"} 255 # HELP node_hwmon_pwm_auto_point2_temp Hardware monitor pwm element auto_point2_temp # TYPE node_hwmon_pwm_auto_point2_temp gauge node_hwmon_pwm_auto_point2_temp{chip="nct6779",sensor="pwm1"} 70000 # HELP node_hwmon_pwm_auto_point3_pwm Hardware monitor pwm element auto_point3_pwm # TYPE node_hwmon_pwm_auto_point3_pwm gauge node_hwmon_pwm_auto_point3_pwm{chip="nct6779",sensor="pwm1"} 255 # HELP node_hwmon_pwm_auto_point3_temp Hardware monitor pwm element auto_point3_temp # TYPE node_hwmon_pwm_auto_point3_temp gauge node_hwmon_pwm_auto_point3_temp{chip="nct6779",sensor="pwm1"} 70000 # HELP node_hwmon_pwm_auto_point4_pwm Hardware monitor pwm element auto_point4_pwm # TYPE node_hwmon_pwm_auto_point4_pwm gauge node_hwmon_pwm_auto_point4_pwm{chip="nct6779",sensor="pwm1"} 255 # HELP node_hwmon_pwm_auto_point4_temp Hardware monitor pwm element auto_point4_temp # TYPE node_hwmon_pwm_auto_point4_temp gauge node_hwmon_pwm_auto_point4_temp{chip="nct6779",sensor="pwm1"} 70000 # HELP node_hwmon_pwm_auto_point5_pwm Hardware monitor pwm element auto_point5_pwm # TYPE node_hwmon_pwm_auto_point5_pwm gauge node_hwmon_pwm_auto_point5_pwm{chip="nct6779",sensor="pwm1"} 255 # HELP node_hwmon_pwm_auto_point5_temp Hardware monitor pwm element auto_point5_temp # TYPE node_hwmon_pwm_auto_point5_temp gauge node_hwmon_pwm_auto_point5_temp{chip="nct6779",sensor="pwm1"} 75000 # HELP node_hwmon_pwm_crit_temp_tolerance Hardware monitor pwm element crit_temp_tolerance # TYPE node_hwmon_pwm_crit_temp_tolerance gauge node_hwmon_pwm_crit_temp_tolerance{chip="nct6779",sensor="pwm1"} 2000 # HELP node_hwmon_pwm_enable Hardware monitor pwm element enable # TYPE node_hwmon_pwm_enable gauge node_hwmon_pwm_enable{chip="nct6779",sensor="pwm1"} 5 # HELP node_hwmon_pwm_floor Hardware monitor pwm element floor # TYPE node_hwmon_pwm_floor gauge node_hwmon_pwm_floor{chip="nct6779",sensor="pwm1"} 1 # HELP node_hwmon_pwm_mode Hardware monitor pwm element mode # TYPE node_hwmon_pwm_mode gauge node_hwmon_pwm_mode{chip="nct6779",sensor="pwm1"} 1 # HELP node_hwmon_pwm_start Hardware monitor pwm element start # TYPE node_hwmon_pwm_start gauge node_hwmon_pwm_start{chip="nct6779",sensor="pwm1"} 1 # HELP node_hwmon_pwm_step_down_time Hardware monitor pwm element step_down_time # TYPE node_hwmon_pwm_step_down_time gauge node_hwmon_pwm_step_down_time{chip="nct6779",sensor="pwm1"} 100 # HELP node_hwmon_pwm_step_up_time Hardware monitor pwm element step_up_time # TYPE node_hwmon_pwm_step_up_time gauge node_hwmon_pwm_step_up_time{chip="nct6779",sensor="pwm1"} 100 # HELP node_hwmon_pwm_stop_time Hardware monitor pwm element stop_time # TYPE node_hwmon_pwm_stop_time gauge node_hwmon_pwm_stop_time{chip="nct6779",sensor="pwm1"} 6000 # HELP node_hwmon_pwm_target_temp Hardware monitor pwm element target_temp # TYPE node_hwmon_pwm_target_temp gauge node_hwmon_pwm_target_temp{chip="nct6779",sensor="pwm1"} 0 # HELP node_hwmon_pwm_temp_sel Hardware monitor pwm element temp_sel # TYPE node_hwmon_pwm_temp_sel gauge node_hwmon_pwm_temp_sel{chip="nct6779",sensor="pwm1"} 7 # HELP node_hwmon_pwm_temp_tolerance Hardware monitor pwm element temp_tolerance # TYPE node_hwmon_pwm_temp_tolerance gauge node_hwmon_pwm_temp_tolerance{chip="nct6779",sensor="pwm1"} 0 # HELP node_hwmon_pwm_weight_duty_base Hardware monitor pwm element weight_duty_base # TYPE node_hwmon_pwm_weight_duty_base gauge node_hwmon_pwm_weight_duty_base{chip="nct6779",sensor="pwm1"} 0 # HELP node_hwmon_pwm_weight_duty_step Hardware monitor pwm element weight_duty_step # TYPE node_hwmon_pwm_weight_duty_step gauge node_hwmon_pwm_weight_duty_step{chip="nct6779",sensor="pwm1"} 0 # HELP node_hwmon_pwm_weight_temp_sel Hardware monitor pwm element weight_temp_sel # TYPE node_hwmon_pwm_weight_temp_sel gauge node_hwmon_pwm_weight_temp_sel{chip="nct6779",sensor="pwm1"} 1 # HELP node_hwmon_pwm_weight_temp_step Hardware monitor pwm element weight_temp_step # TYPE node_hwmon_pwm_weight_temp_step gauge node_hwmon_pwm_weight_temp_step{chip="nct6779",sensor="pwm1"} 0 # HELP node_hwmon_pwm_weight_temp_step_base Hardware monitor pwm element weight_temp_step_base # TYPE node_hwmon_pwm_weight_temp_step_base gauge node_hwmon_pwm_weight_temp_step_base{chip="nct6779",sensor="pwm1"} 0 # HELP node_hwmon_pwm_weight_temp_step_tol Hardware monitor pwm element weight_temp_step_tol # TYPE node_hwmon_pwm_weight_temp_step_tol gauge node_hwmon_pwm_weight_temp_step_tol{chip="nct6779",sensor="pwm1"} 0 # HELP node_hwmon_sensor_label Label for given chip and sensor # TYPE node_hwmon_sensor_label gauge node_hwmon_sensor_label{chip="hwmon4",label="foosensor",sensor="temp1"} 1 node_hwmon_sensor_label{chip="hwmon4",label="foosensor",sensor="temp2"} 1 node_hwmon_sensor_label{chip="platform_applesmc_768",label="Left side ",sensor="fan1"} 1 node_hwmon_sensor_label{chip="platform_applesmc_768",label="Right side ",sensor="fan2"} 1 node_hwmon_sensor_label{chip="platform_coretemp_0",label="Core 0",sensor="temp2"} 1 node_hwmon_sensor_label{chip="platform_coretemp_0",label="Core 1",sensor="temp3"} 1 node_hwmon_sensor_label{chip="platform_coretemp_0",label="Core 2",sensor="temp4"} 1 node_hwmon_sensor_label{chip="platform_coretemp_0",label="Core 3",sensor="temp5"} 1 node_hwmon_sensor_label{chip="platform_coretemp_0",label="Physical id 0",sensor="temp1"} 1 node_hwmon_sensor_label{chip="platform_coretemp_1",label="Core 0",sensor="temp2"} 1 node_hwmon_sensor_label{chip="platform_coretemp_1",label="Core 1",sensor="temp3"} 1 node_hwmon_sensor_label{chip="platform_coretemp_1",label="Core 2",sensor="temp4"} 1 node_hwmon_sensor_label{chip="platform_coretemp_1",label="Core 3",sensor="temp5"} 1 node_hwmon_sensor_label{chip="platform_coretemp_1",label="Physical id 0",sensor="temp1"} 1 # HELP node_hwmon_temp_celsius Hardware monitor for temperature (input) # TYPE node_hwmon_temp_celsius gauge node_hwmon_temp_celsius{chip="hwmon4",sensor="temp1"} 55 node_hwmon_temp_celsius{chip="hwmon4",sensor="temp2"} 54 node_hwmon_temp_celsius{chip="platform_coretemp_0",sensor="temp1"} 55 node_hwmon_temp_celsius{chip="platform_coretemp_0",sensor="temp2"} 54 node_hwmon_temp_celsius{chip="platform_coretemp_0",sensor="temp3"} 52 node_hwmon_temp_celsius{chip="platform_coretemp_0",sensor="temp4"} 53 node_hwmon_temp_celsius{chip="platform_coretemp_0",sensor="temp5"} 50 node_hwmon_temp_celsius{chip="platform_coretemp_1",sensor="temp1"} 55 node_hwmon_temp_celsius{chip="platform_coretemp_1",sensor="temp2"} 54 node_hwmon_temp_celsius{chip="platform_coretemp_1",sensor="temp3"} 52 node_hwmon_temp_celsius{chip="platform_coretemp_1",sensor="temp4"} 53 node_hwmon_temp_celsius{chip="platform_coretemp_1",sensor="temp5"} 50 # HELP node_hwmon_temp_crit_alarm_celsius Hardware monitor for temperature (crit_alarm) # TYPE node_hwmon_temp_crit_alarm_celsius gauge node_hwmon_temp_crit_alarm_celsius{chip="hwmon4",sensor="temp1"} 0 node_hwmon_temp_crit_alarm_celsius{chip="hwmon4",sensor="temp2"} 0 node_hwmon_temp_crit_alarm_celsius{chip="platform_coretemp_0",sensor="temp1"} 0 node_hwmon_temp_crit_alarm_celsius{chip="platform_coretemp_0",sensor="temp2"} 0 node_hwmon_temp_crit_alarm_celsius{chip="platform_coretemp_0",sensor="temp3"} 0 node_hwmon_temp_crit_alarm_celsius{chip="platform_coretemp_0",sensor="temp4"} 0 node_hwmon_temp_crit_alarm_celsius{chip="platform_coretemp_0",sensor="temp5"} 0 node_hwmon_temp_crit_alarm_celsius{chip="platform_coretemp_1",sensor="temp1"} 0 node_hwmon_temp_crit_alarm_celsius{chip="platform_coretemp_1",sensor="temp2"} 0 node_hwmon_temp_crit_alarm_celsius{chip="platform_coretemp_1",sensor="temp3"} 0 node_hwmon_temp_crit_alarm_celsius{chip="platform_coretemp_1",sensor="temp4"} 0 node_hwmon_temp_crit_alarm_celsius{chip="platform_coretemp_1",sensor="temp5"} 0 # HELP node_hwmon_temp_crit_celsius Hardware monitor for temperature (crit) # TYPE node_hwmon_temp_crit_celsius gauge node_hwmon_temp_crit_celsius{chip="hwmon4",sensor="temp1"} 100 node_hwmon_temp_crit_celsius{chip="hwmon4",sensor="temp2"} 100 node_hwmon_temp_crit_celsius{chip="platform_coretemp_0",sensor="temp1"} 100 node_hwmon_temp_crit_celsius{chip="platform_coretemp_0",sensor="temp2"} 100 node_hwmon_temp_crit_celsius{chip="platform_coretemp_0",sensor="temp3"} 100 node_hwmon_temp_crit_celsius{chip="platform_coretemp_0",sensor="temp4"} 100 node_hwmon_temp_crit_celsius{chip="platform_coretemp_0",sensor="temp5"} 100 node_hwmon_temp_crit_celsius{chip="platform_coretemp_1",sensor="temp1"} 100 node_hwmon_temp_crit_celsius{chip="platform_coretemp_1",sensor="temp2"} 100 node_hwmon_temp_crit_celsius{chip="platform_coretemp_1",sensor="temp3"} 100 node_hwmon_temp_crit_celsius{chip="platform_coretemp_1",sensor="temp4"} 100 node_hwmon_temp_crit_celsius{chip="platform_coretemp_1",sensor="temp5"} 100 # HELP node_hwmon_temp_max_celsius Hardware monitor for temperature (max) # TYPE node_hwmon_temp_max_celsius gauge node_hwmon_temp_max_celsius{chip="hwmon4",sensor="temp1"} 100 node_hwmon_temp_max_celsius{chip="hwmon4",sensor="temp2"} 100 node_hwmon_temp_max_celsius{chip="platform_coretemp_0",sensor="temp1"} 84 node_hwmon_temp_max_celsius{chip="platform_coretemp_0",sensor="temp2"} 84 node_hwmon_temp_max_celsius{chip="platform_coretemp_0",sensor="temp3"} 84 node_hwmon_temp_max_celsius{chip="platform_coretemp_0",sensor="temp4"} 84 node_hwmon_temp_max_celsius{chip="platform_coretemp_0",sensor="temp5"} 84 node_hwmon_temp_max_celsius{chip="platform_coretemp_1",sensor="temp1"} 84 node_hwmon_temp_max_celsius{chip="platform_coretemp_1",sensor="temp2"} 84 node_hwmon_temp_max_celsius{chip="platform_coretemp_1",sensor="temp3"} 84 node_hwmon_temp_max_celsius{chip="platform_coretemp_1",sensor="temp4"} 84 node_hwmon_temp_max_celsius{chip="platform_coretemp_1",sensor="temp5"} 84 # HELP node_infiniband_info Non-numeric data from /sys/class/infiniband/, value is always 1. # TYPE node_infiniband_info gauge node_infiniband_info{board_id="I40IW Board ID",device="i40iw0",firmware_version="0.2",hca_type="I40IW"} 1 node_infiniband_info{board_id="SM_1141000001000",device="mlx4_0",firmware_version="2.31.5050",hca_type="MT4099"} 1 # HELP node_infiniband_legacy_data_received_bytes_total Number of data octets received on all links # TYPE node_infiniband_legacy_data_received_bytes_total counter node_infiniband_legacy_data_received_bytes_total{device="mlx4_0",port="1"} 1.8527668e+07 node_infiniband_legacy_data_received_bytes_total{device="mlx4_0",port="2"} 1.8527668e+07 # HELP node_infiniband_legacy_data_transmitted_bytes_total Number of data octets transmitted on all links # TYPE node_infiniband_legacy_data_transmitted_bytes_total counter node_infiniband_legacy_data_transmitted_bytes_total{device="mlx4_0",port="1"} 1.493376e+07 node_infiniband_legacy_data_transmitted_bytes_total{device="mlx4_0",port="2"} 1.493376e+07 # HELP node_infiniband_legacy_multicast_packets_received_total Number of multicast packets received # TYPE node_infiniband_legacy_multicast_packets_received_total counter node_infiniband_legacy_multicast_packets_received_total{device="mlx4_0",port="1"} 93 node_infiniband_legacy_multicast_packets_received_total{device="mlx4_0",port="2"} 93 # HELP node_infiniband_legacy_multicast_packets_transmitted_total Number of multicast packets transmitted # TYPE node_infiniband_legacy_multicast_packets_transmitted_total counter node_infiniband_legacy_multicast_packets_transmitted_total{device="mlx4_0",port="1"} 16 node_infiniband_legacy_multicast_packets_transmitted_total{device="mlx4_0",port="2"} 16 # HELP node_infiniband_legacy_packets_received_total Number of data packets received on all links # TYPE node_infiniband_legacy_packets_received_total counter node_infiniband_legacy_packets_received_total{device="mlx4_0",port="1"} 0 node_infiniband_legacy_packets_received_total{device="mlx4_0",port="2"} 0 # HELP node_infiniband_legacy_packets_transmitted_total Number of data packets received on all links # TYPE node_infiniband_legacy_packets_transmitted_total counter node_infiniband_legacy_packets_transmitted_total{device="mlx4_0",port="1"} 0 node_infiniband_legacy_packets_transmitted_total{device="mlx4_0",port="2"} 0 # HELP node_infiniband_legacy_unicast_packets_received_total Number of unicast packets received # TYPE node_infiniband_legacy_unicast_packets_received_total counter node_infiniband_legacy_unicast_packets_received_total{device="mlx4_0",port="1"} 61148 node_infiniband_legacy_unicast_packets_received_total{device="mlx4_0",port="2"} 61148 # HELP node_infiniband_legacy_unicast_packets_transmitted_total Number of unicast packets transmitted # TYPE node_infiniband_legacy_unicast_packets_transmitted_total counter node_infiniband_legacy_unicast_packets_transmitted_total{device="mlx4_0",port="1"} 61239 node_infiniband_legacy_unicast_packets_transmitted_total{device="mlx4_0",port="2"} 61239 # HELP node_infiniband_link_downed_total Number of times the link failed to recover from an error state and went down # TYPE node_infiniband_link_downed_total counter node_infiniband_link_downed_total{device="mlx4_0",port="1"} 0 node_infiniband_link_downed_total{device="mlx4_0",port="2"} 0 # HELP node_infiniband_link_error_recovery_total Number of times the link successfully recovered from an error state # TYPE node_infiniband_link_error_recovery_total counter node_infiniband_link_error_recovery_total{device="mlx4_0",port="1"} 0 node_infiniband_link_error_recovery_total{device="mlx4_0",port="2"} 0 # HELP node_infiniband_multicast_packets_received_total Number of multicast packets received (including errors) # TYPE node_infiniband_multicast_packets_received_total counter node_infiniband_multicast_packets_received_total{device="mlx4_0",port="1"} 93 node_infiniband_multicast_packets_received_total{device="mlx4_0",port="2"} 0 # HELP node_infiniband_multicast_packets_transmitted_total Number of multicast packets transmitted (including errors) # TYPE node_infiniband_multicast_packets_transmitted_total counter node_infiniband_multicast_packets_transmitted_total{device="mlx4_0",port="1"} 16 node_infiniband_multicast_packets_transmitted_total{device="mlx4_0",port="2"} 0 # HELP node_infiniband_physical_state_id Physical state of the InfiniBand port (0: no change, 1: sleep, 2: polling, 3: disable, 4: shift, 5: link up, 6: link error recover, 7: phytest) # TYPE node_infiniband_physical_state_id gauge node_infiniband_physical_state_id{device="i40iw0",port="1"} 5 node_infiniband_physical_state_id{device="mlx4_0",port="1"} 5 node_infiniband_physical_state_id{device="mlx4_0",port="2"} 5 # HELP node_infiniband_port_constraint_errors_received_total Number of packets received on the switch physical port that are discarded # TYPE node_infiniband_port_constraint_errors_received_total counter node_infiniband_port_constraint_errors_received_total{device="mlx4_0",port="1"} 0 # HELP node_infiniband_port_constraint_errors_transmitted_total Number of packets not transmitted from the switch physical port # TYPE node_infiniband_port_constraint_errors_transmitted_total counter node_infiniband_port_constraint_errors_transmitted_total{device="mlx4_0",port="1"} 0 # HELP node_infiniband_port_data_received_bytes_total Number of data octets received on all links # TYPE node_infiniband_port_data_received_bytes_total counter node_infiniband_port_data_received_bytes_total{device="mlx4_0",port="1"} 1.8527668e+07 node_infiniband_port_data_received_bytes_total{device="mlx4_0",port="2"} 0 # HELP node_infiniband_port_data_transmitted_bytes_total Number of data octets transmitted on all links # TYPE node_infiniband_port_data_transmitted_bytes_total counter node_infiniband_port_data_transmitted_bytes_total{device="mlx4_0",port="1"} 1.493376e+07 node_infiniband_port_data_transmitted_bytes_total{device="mlx4_0",port="2"} 0 # HELP node_infiniband_port_discards_received_total Number of inbound packets discarded by the port because the port is down or congested # TYPE node_infiniband_port_discards_received_total counter node_infiniband_port_discards_received_total{device="mlx4_0",port="1"} 0 # HELP node_infiniband_port_discards_transmitted_total Number of outbound packets discarded by the port because the port is down or congested # TYPE node_infiniband_port_discards_transmitted_total counter node_infiniband_port_discards_transmitted_total{device="mlx4_0",port="1"} 5 # HELP node_infiniband_port_errors_received_total Number of packets containing an error that were received on this port # TYPE node_infiniband_port_errors_received_total counter node_infiniband_port_errors_received_total{device="mlx4_0",port="1"} 0 # HELP node_infiniband_port_packets_received_total Number of packets received on all VLs by this port (including errors) # TYPE node_infiniband_port_packets_received_total counter node_infiniband_port_packets_received_total{device="mlx4_0",port="1"} 6.825908347e+09 # HELP node_infiniband_port_packets_transmitted_total Number of packets transmitted on all VLs from this port (including errors) # TYPE node_infiniband_port_packets_transmitted_total counter node_infiniband_port_packets_transmitted_total{device="mlx4_0",port="1"} 6.235865e+06 # HELP node_infiniband_port_transmit_wait_total Number of ticks during which the port had data to transmit but no data was sent during the entire tick # TYPE node_infiniband_port_transmit_wait_total counter node_infiniband_port_transmit_wait_total{device="mlx4_0",port="1"} 4.294967295e+09 # HELP node_infiniband_rate_bytes_per_second Maximum signal transfer rate # TYPE node_infiniband_rate_bytes_per_second gauge node_infiniband_rate_bytes_per_second{device="i40iw0",port="1"} 1.25e+09 node_infiniband_rate_bytes_per_second{device="mlx4_0",port="1"} 5e+09 node_infiniband_rate_bytes_per_second{device="mlx4_0",port="2"} 5e+09 # HELP node_infiniband_state_id State of the InfiniBand port (0: no change, 1: down, 2: init, 3: armed, 4: active, 5: act defer) # TYPE node_infiniband_state_id gauge node_infiniband_state_id{device="i40iw0",port="1"} 4 node_infiniband_state_id{device="mlx4_0",port="1"} 4 node_infiniband_state_id{device="mlx4_0",port="2"} 4 # HELP node_infiniband_unicast_packets_received_total Number of unicast packets received (including errors) # TYPE node_infiniband_unicast_packets_received_total counter node_infiniband_unicast_packets_received_total{device="mlx4_0",port="1"} 61148 node_infiniband_unicast_packets_received_total{device="mlx4_0",port="2"} 0 # HELP node_infiniband_unicast_packets_transmitted_total Number of unicast packets transmitted (including errors) # TYPE node_infiniband_unicast_packets_transmitted_total counter node_infiniband_unicast_packets_transmitted_total{device="mlx4_0",port="1"} 61239 node_infiniband_unicast_packets_transmitted_total{device="mlx4_0",port="2"} 0 # HELP node_interrupts_total Interrupt details. # TYPE node_interrupts_total counter node_interrupts_total{cpu="0",devices="",info="APIC ICR read retries",type="RTR"} 0 node_interrupts_total{cpu="0",devices="",info="Function call interrupts",type="CAL"} 148554 node_interrupts_total{cpu="0",devices="",info="IRQ work interrupts",type="IWI"} 1.509379e+06 node_interrupts_total{cpu="0",devices="",info="Local timer interrupts",type="LOC"} 1.74326351e+08 node_interrupts_total{cpu="0",devices="",info="Machine check exceptions",type="MCE"} 0 node_interrupts_total{cpu="0",devices="",info="Machine check polls",type="MCP"} 2406 node_interrupts_total{cpu="0",devices="",info="Non-maskable interrupts",type="NMI"} 47 node_interrupts_total{cpu="0",devices="",info="Performance monitoring interrupts",type="PMI"} 47 node_interrupts_total{cpu="0",devices="",info="Rescheduling interrupts",type="RES"} 1.0847134e+07 node_interrupts_total{cpu="0",devices="",info="Spurious interrupts",type="SPU"} 0 node_interrupts_total{cpu="0",devices="",info="TLB shootdowns",type="TLB"} 1.0460334e+07 node_interrupts_total{cpu="0",devices="",info="Thermal event interrupts",type="TRM"} 0 node_interrupts_total{cpu="0",devices="",info="Threshold APIC interrupts",type="THR"} 0 node_interrupts_total{cpu="0",devices="acpi",info="IR-IO-APIC-fasteoi",type="9"} 398553 node_interrupts_total{cpu="0",devices="ahci",info="IR-PCI-MSI-edge",type="43"} 7.434032e+06 node_interrupts_total{cpu="0",devices="dmar0",info="DMAR_MSI-edge",type="40"} 0 node_interrupts_total{cpu="0",devices="dmar1",info="DMAR_MSI-edge",type="41"} 0 node_interrupts_total{cpu="0",devices="ehci_hcd:usb1, mmc0",info="IR-IO-APIC-fasteoi",type="16"} 328511 node_interrupts_total{cpu="0",devices="ehci_hcd:usb2",info="IR-IO-APIC-fasteoi",type="23"} 1.451445e+06 node_interrupts_total{cpu="0",devices="i8042",info="IR-IO-APIC-edge",type="1"} 17960 node_interrupts_total{cpu="0",devices="i8042",info="IR-IO-APIC-edge",type="12"} 380847 node_interrupts_total{cpu="0",devices="i915",info="IR-PCI-MSI-edge",type="44"} 140636 node_interrupts_total{cpu="0",devices="iwlwifi",info="IR-PCI-MSI-edge",type="46"} 4.3078464e+07 node_interrupts_total{cpu="0",devices="mei_me",info="IR-PCI-MSI-edge",type="45"} 4 node_interrupts_total{cpu="0",devices="rtc0",info="IR-IO-APIC-edge",type="8"} 1 node_interrupts_total{cpu="0",devices="snd_hda_intel",info="IR-PCI-MSI-edge",type="47"} 350 node_interrupts_total{cpu="0",devices="timer",info="IR-IO-APIC-edge",type="0"} 18 node_interrupts_total{cpu="0",devices="xhci_hcd",info="IR-PCI-MSI-edge",type="42"} 378324 node_interrupts_total{cpu="1",devices="",info="APIC ICR read retries",type="RTR"} 0 node_interrupts_total{cpu="1",devices="",info="Function call interrupts",type="CAL"} 157441 node_interrupts_total{cpu="1",devices="",info="IRQ work interrupts",type="IWI"} 2.411776e+06 node_interrupts_total{cpu="1",devices="",info="Local timer interrupts",type="LOC"} 1.35776678e+08 node_interrupts_total{cpu="1",devices="",info="Machine check exceptions",type="MCE"} 0 node_interrupts_total{cpu="1",devices="",info="Machine check polls",type="MCP"} 2399 node_interrupts_total{cpu="1",devices="",info="Non-maskable interrupts",type="NMI"} 5031 node_interrupts_total{cpu="1",devices="",info="Performance monitoring interrupts",type="PMI"} 5031 node_interrupts_total{cpu="1",devices="",info="Rescheduling interrupts",type="RES"} 9.111507e+06 node_interrupts_total{cpu="1",devices="",info="Spurious interrupts",type="SPU"} 0 node_interrupts_total{cpu="1",devices="",info="TLB shootdowns",type="TLB"} 9.918429e+06 node_interrupts_total{cpu="1",devices="",info="Thermal event interrupts",type="TRM"} 0 node_interrupts_total{cpu="1",devices="",info="Threshold APIC interrupts",type="THR"} 0 node_interrupts_total{cpu="1",devices="acpi",info="IR-IO-APIC-fasteoi",type="9"} 2320 node_interrupts_total{cpu="1",devices="ahci",info="IR-PCI-MSI-edge",type="43"} 8.092205e+06 node_interrupts_total{cpu="1",devices="dmar0",info="DMAR_MSI-edge",type="40"} 0 node_interrupts_total{cpu="1",devices="dmar1",info="DMAR_MSI-edge",type="41"} 0 node_interrupts_total{cpu="1",devices="ehci_hcd:usb1, mmc0",info="IR-IO-APIC-fasteoi",type="16"} 322879 node_interrupts_total{cpu="1",devices="ehci_hcd:usb2",info="IR-IO-APIC-fasteoi",type="23"} 3.333499e+06 node_interrupts_total{cpu="1",devices="i8042",info="IR-IO-APIC-edge",type="1"} 105 node_interrupts_total{cpu="1",devices="i8042",info="IR-IO-APIC-edge",type="12"} 1021 node_interrupts_total{cpu="1",devices="i915",info="IR-PCI-MSI-edge",type="44"} 226313 node_interrupts_total{cpu="1",devices="iwlwifi",info="IR-PCI-MSI-edge",type="46"} 130 node_interrupts_total{cpu="1",devices="mei_me",info="IR-PCI-MSI-edge",type="45"} 22 node_interrupts_total{cpu="1",devices="rtc0",info="IR-IO-APIC-edge",type="8"} 0 node_interrupts_total{cpu="1",devices="snd_hda_intel",info="IR-PCI-MSI-edge",type="47"} 224 node_interrupts_total{cpu="1",devices="timer",info="IR-IO-APIC-edge",type="0"} 0 node_interrupts_total{cpu="1",devices="xhci_hcd",info="IR-PCI-MSI-edge",type="42"} 1.734637e+06 node_interrupts_total{cpu="2",devices="",info="APIC ICR read retries",type="RTR"} 0 node_interrupts_total{cpu="2",devices="",info="Function call interrupts",type="CAL"} 142912 node_interrupts_total{cpu="2",devices="",info="IRQ work interrupts",type="IWI"} 1.512975e+06 node_interrupts_total{cpu="2",devices="",info="Local timer interrupts",type="LOC"} 1.68393257e+08 node_interrupts_total{cpu="2",devices="",info="Machine check exceptions",type="MCE"} 0 node_interrupts_total{cpu="2",devices="",info="Machine check polls",type="MCP"} 2399 node_interrupts_total{cpu="2",devices="",info="Non-maskable interrupts",type="NMI"} 6211 node_interrupts_total{cpu="2",devices="",info="Performance monitoring interrupts",type="PMI"} 6211 node_interrupts_total{cpu="2",devices="",info="Rescheduling interrupts",type="RES"} 1.5999335e+07 node_interrupts_total{cpu="2",devices="",info="Spurious interrupts",type="SPU"} 0 node_interrupts_total{cpu="2",devices="",info="TLB shootdowns",type="TLB"} 1.0494258e+07 node_interrupts_total{cpu="2",devices="",info="Thermal event interrupts",type="TRM"} 0 node_interrupts_total{cpu="2",devices="",info="Threshold APIC interrupts",type="THR"} 0 node_interrupts_total{cpu="2",devices="acpi",info="IR-IO-APIC-fasteoi",type="9"} 824 node_interrupts_total{cpu="2",devices="ahci",info="IR-PCI-MSI-edge",type="43"} 6.478877e+06 node_interrupts_total{cpu="2",devices="dmar0",info="DMAR_MSI-edge",type="40"} 0 node_interrupts_total{cpu="2",devices="dmar1",info="DMAR_MSI-edge",type="41"} 0 node_interrupts_total{cpu="2",devices="ehci_hcd:usb1, mmc0",info="IR-IO-APIC-fasteoi",type="16"} 293782 node_interrupts_total{cpu="2",devices="ehci_hcd:usb2",info="IR-IO-APIC-fasteoi",type="23"} 1.092032e+06 node_interrupts_total{cpu="2",devices="i8042",info="IR-IO-APIC-edge",type="1"} 28 node_interrupts_total{cpu="2",devices="i8042",info="IR-IO-APIC-edge",type="12"} 240 node_interrupts_total{cpu="2",devices="i915",info="IR-PCI-MSI-edge",type="44"} 347 node_interrupts_total{cpu="2",devices="iwlwifi",info="IR-PCI-MSI-edge",type="46"} 460171 node_interrupts_total{cpu="2",devices="mei_me",info="IR-PCI-MSI-edge",type="45"} 0 node_interrupts_total{cpu="2",devices="rtc0",info="IR-IO-APIC-edge",type="8"} 0 node_interrupts_total{cpu="2",devices="snd_hda_intel",info="IR-PCI-MSI-edge",type="47"} 0 node_interrupts_total{cpu="2",devices="timer",info="IR-IO-APIC-edge",type="0"} 0 node_interrupts_total{cpu="2",devices="xhci_hcd",info="IR-PCI-MSI-edge",type="42"} 440240 node_interrupts_total{cpu="3",devices="",info="APIC ICR read retries",type="RTR"} 0 node_interrupts_total{cpu="3",devices="",info="Function call interrupts",type="CAL"} 155528 node_interrupts_total{cpu="3",devices="",info="IRQ work interrupts",type="IWI"} 2.428828e+06 node_interrupts_total{cpu="3",devices="",info="Local timer interrupts",type="LOC"} 1.30980079e+08 node_interrupts_total{cpu="3",devices="",info="Machine check exceptions",type="MCE"} 0 node_interrupts_total{cpu="3",devices="",info="Machine check polls",type="MCP"} 2399 node_interrupts_total{cpu="3",devices="",info="Non-maskable interrupts",type="NMI"} 4968 node_interrupts_total{cpu="3",devices="",info="Performance monitoring interrupts",type="PMI"} 4968 node_interrupts_total{cpu="3",devices="",info="Rescheduling interrupts",type="RES"} 7.45726e+06 node_interrupts_total{cpu="3",devices="",info="Spurious interrupts",type="SPU"} 0 node_interrupts_total{cpu="3",devices="",info="TLB shootdowns",type="TLB"} 1.0345022e+07 node_interrupts_total{cpu="3",devices="",info="Thermal event interrupts",type="TRM"} 0 node_interrupts_total{cpu="3",devices="",info="Threshold APIC interrupts",type="THR"} 0 node_interrupts_total{cpu="3",devices="acpi",info="IR-IO-APIC-fasteoi",type="9"} 863 node_interrupts_total{cpu="3",devices="ahci",info="IR-PCI-MSI-edge",type="43"} 7.492252e+06 node_interrupts_total{cpu="3",devices="dmar0",info="DMAR_MSI-edge",type="40"} 0 node_interrupts_total{cpu="3",devices="dmar1",info="DMAR_MSI-edge",type="41"} 0 node_interrupts_total{cpu="3",devices="ehci_hcd:usb1, mmc0",info="IR-IO-APIC-fasteoi",type="16"} 351412 node_interrupts_total{cpu="3",devices="ehci_hcd:usb2",info="IR-IO-APIC-fasteoi",type="23"} 2.644609e+06 node_interrupts_total{cpu="3",devices="i8042",info="IR-IO-APIC-edge",type="1"} 28 node_interrupts_total{cpu="3",devices="i8042",info="IR-IO-APIC-edge",type="12"} 198 node_interrupts_total{cpu="3",devices="i915",info="IR-PCI-MSI-edge",type="44"} 633 node_interrupts_total{cpu="3",devices="iwlwifi",info="IR-PCI-MSI-edge",type="46"} 290 node_interrupts_total{cpu="3",devices="mei_me",info="IR-PCI-MSI-edge",type="45"} 0 node_interrupts_total{cpu="3",devices="rtc0",info="IR-IO-APIC-edge",type="8"} 0 node_interrupts_total{cpu="3",devices="snd_hda_intel",info="IR-PCI-MSI-edge",type="47"} 0 node_interrupts_total{cpu="3",devices="timer",info="IR-IO-APIC-edge",type="0"} 0 node_interrupts_total{cpu="3",devices="xhci_hcd",info="IR-PCI-MSI-edge",type="42"} 2.434308e+06 # HELP node_intr_total Total number of interrupts serviced. # TYPE node_intr_total counter node_intr_total 8.885917e+06 # HELP node_ipvs_backend_connections_active The current active connections by local and remote address. # TYPE node_ipvs_backend_connections_active gauge node_ipvs_backend_connections_active{local_address="",local_mark="10001000",local_port="0",proto="FWM",remote_address="192.168.49.32",remote_port="3306"} 321 node_ipvs_backend_connections_active{local_address="",local_mark="10001000",local_port="0",proto="FWM",remote_address="192.168.50.26",remote_port="3306"} 64 node_ipvs_backend_connections_active{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.82.22",remote_port="3306"} 248 node_ipvs_backend_connections_active{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.83.21",remote_port="3306"} 248 node_ipvs_backend_connections_active{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.83.24",remote_port="3306"} 248 node_ipvs_backend_connections_active{local_address="192.168.0.55",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.49.32",remote_port="3306"} 0 node_ipvs_backend_connections_active{local_address="192.168.0.55",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.50.26",remote_port="3306"} 0 node_ipvs_backend_connections_active{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.50.21",remote_port="3306"} 1498 node_ipvs_backend_connections_active{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.82.21",remote_port="3306"} 1499 node_ipvs_backend_connections_active{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.84.22",remote_port="3306"} 0 # HELP node_ipvs_backend_connections_inactive The current inactive connections by local and remote address. # TYPE node_ipvs_backend_connections_inactive gauge node_ipvs_backend_connections_inactive{local_address="",local_mark="10001000",local_port="0",proto="FWM",remote_address="192.168.49.32",remote_port="3306"} 5 node_ipvs_backend_connections_inactive{local_address="",local_mark="10001000",local_port="0",proto="FWM",remote_address="192.168.50.26",remote_port="3306"} 1 node_ipvs_backend_connections_inactive{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.82.22",remote_port="3306"} 2 node_ipvs_backend_connections_inactive{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.83.21",remote_port="3306"} 1 node_ipvs_backend_connections_inactive{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.83.24",remote_port="3306"} 2 node_ipvs_backend_connections_inactive{local_address="192.168.0.55",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.49.32",remote_port="3306"} 0 node_ipvs_backend_connections_inactive{local_address="192.168.0.55",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.50.26",remote_port="3306"} 0 node_ipvs_backend_connections_inactive{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.50.21",remote_port="3306"} 0 node_ipvs_backend_connections_inactive{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.82.21",remote_port="3306"} 0 node_ipvs_backend_connections_inactive{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.84.22",remote_port="3306"} 0 # HELP node_ipvs_backend_weight The current backend weight by local and remote address. # TYPE node_ipvs_backend_weight gauge node_ipvs_backend_weight{local_address="",local_mark="10001000",local_port="0",proto="FWM",remote_address="192.168.49.32",remote_port="3306"} 100 node_ipvs_backend_weight{local_address="",local_mark="10001000",local_port="0",proto="FWM",remote_address="192.168.50.26",remote_port="3306"} 20 node_ipvs_backend_weight{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.82.22",remote_port="3306"} 100 node_ipvs_backend_weight{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.83.21",remote_port="3306"} 100 node_ipvs_backend_weight{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.83.24",remote_port="3306"} 100 node_ipvs_backend_weight{local_address="192.168.0.55",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.49.32",remote_port="3306"} 100 node_ipvs_backend_weight{local_address="192.168.0.55",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.50.26",remote_port="3306"} 0 node_ipvs_backend_weight{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.50.21",remote_port="3306"} 100 node_ipvs_backend_weight{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.82.21",remote_port="3306"} 100 node_ipvs_backend_weight{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.84.22",remote_port="3306"} 0 # HELP node_ipvs_connections_total The total number of connections made. # TYPE node_ipvs_connections_total counter node_ipvs_connections_total 2.3765872e+07 # HELP node_ipvs_incoming_bytes_total The total amount of incoming data. # TYPE node_ipvs_incoming_bytes_total counter node_ipvs_incoming_bytes_total 8.9991519156915e+13 # HELP node_ipvs_incoming_packets_total The total number of incoming packets. # TYPE node_ipvs_incoming_packets_total counter node_ipvs_incoming_packets_total 3.811989221e+09 # HELP node_ipvs_outgoing_bytes_total The total amount of outgoing data. # TYPE node_ipvs_outgoing_bytes_total counter node_ipvs_outgoing_bytes_total 0 # HELP node_ipvs_outgoing_packets_total The total number of outgoing packets. # TYPE node_ipvs_outgoing_packets_total counter node_ipvs_outgoing_packets_total 0 # HELP node_ksmd_full_scans_total ksmd 'full_scans' file. # TYPE node_ksmd_full_scans_total counter node_ksmd_full_scans_total 323 # HELP node_ksmd_merge_across_nodes ksmd 'merge_across_nodes' file. # TYPE node_ksmd_merge_across_nodes gauge node_ksmd_merge_across_nodes 1 # HELP node_ksmd_pages_shared ksmd 'pages_shared' file. # TYPE node_ksmd_pages_shared gauge node_ksmd_pages_shared 1 # HELP node_ksmd_pages_sharing ksmd 'pages_sharing' file. # TYPE node_ksmd_pages_sharing gauge node_ksmd_pages_sharing 255 # HELP node_ksmd_pages_to_scan ksmd 'pages_to_scan' file. # TYPE node_ksmd_pages_to_scan gauge node_ksmd_pages_to_scan 100 # HELP node_ksmd_pages_unshared ksmd 'pages_unshared' file. # TYPE node_ksmd_pages_unshared gauge node_ksmd_pages_unshared 0 # HELP node_ksmd_pages_volatile ksmd 'pages_volatile' file. # TYPE node_ksmd_pages_volatile gauge node_ksmd_pages_volatile 0 # HELP node_ksmd_run ksmd 'run' file. # TYPE node_ksmd_run gauge node_ksmd_run 1 # HELP node_ksmd_sleep_seconds ksmd 'sleep_millisecs' file. # TYPE node_ksmd_sleep_seconds gauge node_ksmd_sleep_seconds 0.02 # HELP node_lnstat_allocs_total linux network cache stats # TYPE node_lnstat_allocs_total counter node_lnstat_allocs_total{cpu="0",subsystem="arp_cache"} 1 node_lnstat_allocs_total{cpu="0",subsystem="ndisc_cache"} 240 node_lnstat_allocs_total{cpu="1",subsystem="arp_cache"} 13 node_lnstat_allocs_total{cpu="1",subsystem="ndisc_cache"} 252 # HELP node_lnstat_delete_list_total linux network cache stats # TYPE node_lnstat_delete_list_total counter node_lnstat_delete_list_total{cpu="0",subsystem="nf_conntrack"} 0 node_lnstat_delete_list_total{cpu="1",subsystem="nf_conntrack"} 0 node_lnstat_delete_list_total{cpu="2",subsystem="nf_conntrack"} 0 node_lnstat_delete_list_total{cpu="3",subsystem="nf_conntrack"} 0 # HELP node_lnstat_delete_total linux network cache stats # TYPE node_lnstat_delete_total counter node_lnstat_delete_total{cpu="0",subsystem="nf_conntrack"} 0 node_lnstat_delete_total{cpu="1",subsystem="nf_conntrack"} 0 node_lnstat_delete_total{cpu="2",subsystem="nf_conntrack"} 0 node_lnstat_delete_total{cpu="3",subsystem="nf_conntrack"} 0 # HELP node_lnstat_destroys_total linux network cache stats # TYPE node_lnstat_destroys_total counter node_lnstat_destroys_total{cpu="0",subsystem="arp_cache"} 2 node_lnstat_destroys_total{cpu="0",subsystem="ndisc_cache"} 241 node_lnstat_destroys_total{cpu="1",subsystem="arp_cache"} 14 node_lnstat_destroys_total{cpu="1",subsystem="ndisc_cache"} 253 # HELP node_lnstat_drop_total linux network cache stats # TYPE node_lnstat_drop_total counter node_lnstat_drop_total{cpu="0",subsystem="nf_conntrack"} 0 node_lnstat_drop_total{cpu="1",subsystem="nf_conntrack"} 0 node_lnstat_drop_total{cpu="2",subsystem="nf_conntrack"} 0 node_lnstat_drop_total{cpu="3",subsystem="nf_conntrack"} 0 # HELP node_lnstat_early_drop_total linux network cache stats # TYPE node_lnstat_early_drop_total counter node_lnstat_early_drop_total{cpu="0",subsystem="nf_conntrack"} 0 node_lnstat_early_drop_total{cpu="1",subsystem="nf_conntrack"} 0 node_lnstat_early_drop_total{cpu="2",subsystem="nf_conntrack"} 0 node_lnstat_early_drop_total{cpu="3",subsystem="nf_conntrack"} 0 # HELP node_lnstat_entries_total linux network cache stats # TYPE node_lnstat_entries_total counter node_lnstat_entries_total{cpu="0",subsystem="arp_cache"} 20 node_lnstat_entries_total{cpu="0",subsystem="ndisc_cache"} 36 node_lnstat_entries_total{cpu="0",subsystem="nf_conntrack"} 33 node_lnstat_entries_total{cpu="1",subsystem="arp_cache"} 20 node_lnstat_entries_total{cpu="1",subsystem="ndisc_cache"} 36 node_lnstat_entries_total{cpu="1",subsystem="nf_conntrack"} 33 node_lnstat_entries_total{cpu="2",subsystem="nf_conntrack"} 33 node_lnstat_entries_total{cpu="3",subsystem="nf_conntrack"} 33 # HELP node_lnstat_expect_create_total linux network cache stats # TYPE node_lnstat_expect_create_total counter node_lnstat_expect_create_total{cpu="0",subsystem="nf_conntrack"} 0 node_lnstat_expect_create_total{cpu="1",subsystem="nf_conntrack"} 0 node_lnstat_expect_create_total{cpu="2",subsystem="nf_conntrack"} 0 node_lnstat_expect_create_total{cpu="3",subsystem="nf_conntrack"} 0 # HELP node_lnstat_expect_delete_total linux network cache stats # TYPE node_lnstat_expect_delete_total counter node_lnstat_expect_delete_total{cpu="0",subsystem="nf_conntrack"} 0 node_lnstat_expect_delete_total{cpu="1",subsystem="nf_conntrack"} 0 node_lnstat_expect_delete_total{cpu="2",subsystem="nf_conntrack"} 0 node_lnstat_expect_delete_total{cpu="3",subsystem="nf_conntrack"} 0 # HELP node_lnstat_expect_new_total linux network cache stats # TYPE node_lnstat_expect_new_total counter node_lnstat_expect_new_total{cpu="0",subsystem="nf_conntrack"} 0 node_lnstat_expect_new_total{cpu="1",subsystem="nf_conntrack"} 0 node_lnstat_expect_new_total{cpu="2",subsystem="nf_conntrack"} 0 node_lnstat_expect_new_total{cpu="3",subsystem="nf_conntrack"} 0 # HELP node_lnstat_forced_gc_runs_total linux network cache stats # TYPE node_lnstat_forced_gc_runs_total counter node_lnstat_forced_gc_runs_total{cpu="0",subsystem="arp_cache"} 10 node_lnstat_forced_gc_runs_total{cpu="0",subsystem="ndisc_cache"} 249 node_lnstat_forced_gc_runs_total{cpu="1",subsystem="arp_cache"} 22 node_lnstat_forced_gc_runs_total{cpu="1",subsystem="ndisc_cache"} 261 # HELP node_lnstat_found_total linux network cache stats # TYPE node_lnstat_found_total counter node_lnstat_found_total{cpu="0",subsystem="nf_conntrack"} 0 node_lnstat_found_total{cpu="1",subsystem="nf_conntrack"} 0 node_lnstat_found_total{cpu="2",subsystem="nf_conntrack"} 0 node_lnstat_found_total{cpu="3",subsystem="nf_conntrack"} 0 # HELP node_lnstat_hash_grows_total linux network cache stats # TYPE node_lnstat_hash_grows_total counter node_lnstat_hash_grows_total{cpu="0",subsystem="arp_cache"} 3 node_lnstat_hash_grows_total{cpu="0",subsystem="ndisc_cache"} 242 node_lnstat_hash_grows_total{cpu="1",subsystem="arp_cache"} 15 node_lnstat_hash_grows_total{cpu="1",subsystem="ndisc_cache"} 254 # HELP node_lnstat_hits_total linux network cache stats # TYPE node_lnstat_hits_total counter node_lnstat_hits_total{cpu="0",subsystem="arp_cache"} 5 node_lnstat_hits_total{cpu="0",subsystem="ndisc_cache"} 244 node_lnstat_hits_total{cpu="1",subsystem="arp_cache"} 17 node_lnstat_hits_total{cpu="1",subsystem="ndisc_cache"} 256 # HELP node_lnstat_icmp_error_total linux network cache stats # TYPE node_lnstat_icmp_error_total counter node_lnstat_icmp_error_total{cpu="0",subsystem="nf_conntrack"} 0 node_lnstat_icmp_error_total{cpu="1",subsystem="nf_conntrack"} 0 node_lnstat_icmp_error_total{cpu="2",subsystem="nf_conntrack"} 0 node_lnstat_icmp_error_total{cpu="3",subsystem="nf_conntrack"} 0 # HELP node_lnstat_ignore_total linux network cache stats # TYPE node_lnstat_ignore_total counter node_lnstat_ignore_total{cpu="0",subsystem="nf_conntrack"} 22666 node_lnstat_ignore_total{cpu="1",subsystem="nf_conntrack"} 22180 node_lnstat_ignore_total{cpu="2",subsystem="nf_conntrack"} 22740 node_lnstat_ignore_total{cpu="3",subsystem="nf_conntrack"} 22152 # HELP node_lnstat_insert_failed_total linux network cache stats # TYPE node_lnstat_insert_failed_total counter node_lnstat_insert_failed_total{cpu="0",subsystem="nf_conntrack"} 0 node_lnstat_insert_failed_total{cpu="1",subsystem="nf_conntrack"} 0 node_lnstat_insert_failed_total{cpu="2",subsystem="nf_conntrack"} 0 node_lnstat_insert_failed_total{cpu="3",subsystem="nf_conntrack"} 0 # HELP node_lnstat_insert_total linux network cache stats # TYPE node_lnstat_insert_total counter node_lnstat_insert_total{cpu="0",subsystem="nf_conntrack"} 0 node_lnstat_insert_total{cpu="1",subsystem="nf_conntrack"} 0 node_lnstat_insert_total{cpu="2",subsystem="nf_conntrack"} 0 node_lnstat_insert_total{cpu="3",subsystem="nf_conntrack"} 0 # HELP node_lnstat_invalid_total linux network cache stats # TYPE node_lnstat_invalid_total counter node_lnstat_invalid_total{cpu="0",subsystem="nf_conntrack"} 3 node_lnstat_invalid_total{cpu="1",subsystem="nf_conntrack"} 2 node_lnstat_invalid_total{cpu="2",subsystem="nf_conntrack"} 1 node_lnstat_invalid_total{cpu="3",subsystem="nf_conntrack"} 47 # HELP node_lnstat_lookups_total linux network cache stats # TYPE node_lnstat_lookups_total counter node_lnstat_lookups_total{cpu="0",subsystem="arp_cache"} 4 node_lnstat_lookups_total{cpu="0",subsystem="ndisc_cache"} 243 node_lnstat_lookups_total{cpu="1",subsystem="arp_cache"} 16 node_lnstat_lookups_total{cpu="1",subsystem="ndisc_cache"} 255 # HELP node_lnstat_new_total linux network cache stats # TYPE node_lnstat_new_total counter node_lnstat_new_total{cpu="0",subsystem="nf_conntrack"} 0 node_lnstat_new_total{cpu="1",subsystem="nf_conntrack"} 0 node_lnstat_new_total{cpu="2",subsystem="nf_conntrack"} 0 node_lnstat_new_total{cpu="3",subsystem="nf_conntrack"} 0 # HELP node_lnstat_periodic_gc_runs_total linux network cache stats # TYPE node_lnstat_periodic_gc_runs_total counter node_lnstat_periodic_gc_runs_total{cpu="0",subsystem="arp_cache"} 9 node_lnstat_periodic_gc_runs_total{cpu="0",subsystem="ndisc_cache"} 248 node_lnstat_periodic_gc_runs_total{cpu="1",subsystem="arp_cache"} 21 node_lnstat_periodic_gc_runs_total{cpu="1",subsystem="ndisc_cache"} 260 # HELP node_lnstat_rcv_probes_mcast_total linux network cache stats # TYPE node_lnstat_rcv_probes_mcast_total counter node_lnstat_rcv_probes_mcast_total{cpu="0",subsystem="arp_cache"} 7 node_lnstat_rcv_probes_mcast_total{cpu="0",subsystem="ndisc_cache"} 246 node_lnstat_rcv_probes_mcast_total{cpu="1",subsystem="arp_cache"} 19 node_lnstat_rcv_probes_mcast_total{cpu="1",subsystem="ndisc_cache"} 258 # HELP node_lnstat_rcv_probes_ucast_total linux network cache stats # TYPE node_lnstat_rcv_probes_ucast_total counter node_lnstat_rcv_probes_ucast_total{cpu="0",subsystem="arp_cache"} 8 node_lnstat_rcv_probes_ucast_total{cpu="0",subsystem="ndisc_cache"} 247 node_lnstat_rcv_probes_ucast_total{cpu="1",subsystem="arp_cache"} 20 node_lnstat_rcv_probes_ucast_total{cpu="1",subsystem="ndisc_cache"} 259 # HELP node_lnstat_res_failed_total linux network cache stats # TYPE node_lnstat_res_failed_total counter node_lnstat_res_failed_total{cpu="0",subsystem="arp_cache"} 6 node_lnstat_res_failed_total{cpu="0",subsystem="ndisc_cache"} 245 node_lnstat_res_failed_total{cpu="1",subsystem="arp_cache"} 18 node_lnstat_res_failed_total{cpu="1",subsystem="ndisc_cache"} 257 # HELP node_lnstat_search_restart_total linux network cache stats # TYPE node_lnstat_search_restart_total counter node_lnstat_search_restart_total{cpu="0",subsystem="nf_conntrack"} 0 node_lnstat_search_restart_total{cpu="1",subsystem="nf_conntrack"} 2 node_lnstat_search_restart_total{cpu="2",subsystem="nf_conntrack"} 1 node_lnstat_search_restart_total{cpu="3",subsystem="nf_conntrack"} 4 # HELP node_lnstat_searched_total linux network cache stats # TYPE node_lnstat_searched_total counter node_lnstat_searched_total{cpu="0",subsystem="nf_conntrack"} 0 node_lnstat_searched_total{cpu="1",subsystem="nf_conntrack"} 0 node_lnstat_searched_total{cpu="2",subsystem="nf_conntrack"} 0 node_lnstat_searched_total{cpu="3",subsystem="nf_conntrack"} 0 # HELP node_lnstat_table_fulls_total linux network cache stats # TYPE node_lnstat_table_fulls_total counter node_lnstat_table_fulls_total{cpu="0",subsystem="arp_cache"} 12 node_lnstat_table_fulls_total{cpu="0",subsystem="ndisc_cache"} 251 node_lnstat_table_fulls_total{cpu="1",subsystem="arp_cache"} 24 node_lnstat_table_fulls_total{cpu="1",subsystem="ndisc_cache"} 263 # HELP node_lnstat_unresolved_discards_total linux network cache stats # TYPE node_lnstat_unresolved_discards_total counter node_lnstat_unresolved_discards_total{cpu="0",subsystem="arp_cache"} 11 node_lnstat_unresolved_discards_total{cpu="0",subsystem="ndisc_cache"} 250 node_lnstat_unresolved_discards_total{cpu="1",subsystem="arp_cache"} 23 node_lnstat_unresolved_discards_total{cpu="1",subsystem="ndisc_cache"} 262 # HELP node_load1 1m load average. # TYPE node_load1 gauge node_load1 0.21 # HELP node_load15 15m load average. # TYPE node_load15 gauge node_load15 0.39 # HELP node_load5 5m load average. # TYPE node_load5 gauge node_load5 0.37 # HELP node_md_blocks Total number of blocks on device. # TYPE node_md_blocks gauge node_md_blocks{device="md0"} 248896 node_md_blocks{device="md00"} 4.186624e+06 node_md_blocks{device="md10"} 3.14159265e+08 node_md_blocks{device="md101"} 322560 node_md_blocks{device="md11"} 4.190208e+06 node_md_blocks{device="md12"} 3.886394368e+09 node_md_blocks{device="md120"} 2.095104e+06 node_md_blocks{device="md126"} 1.855870976e+09 node_md_blocks{device="md127"} 3.12319552e+08 node_md_blocks{device="md201"} 1.993728e+06 node_md_blocks{device="md219"} 7932 node_md_blocks{device="md3"} 5.853468288e+09 node_md_blocks{device="md4"} 4.883648e+06 node_md_blocks{device="md6"} 1.95310144e+08 node_md_blocks{device="md7"} 7.813735424e+09 node_md_blocks{device="md8"} 1.95310144e+08 node_md_blocks{device="md9"} 523968 # HELP node_md_blocks_synced Number of blocks synced on device. # TYPE node_md_blocks_synced gauge node_md_blocks_synced{device="md0"} 248896 node_md_blocks_synced{device="md00"} 4.186624e+06 node_md_blocks_synced{device="md10"} 3.14159265e+08 node_md_blocks_synced{device="md101"} 322560 node_md_blocks_synced{device="md11"} 0 node_md_blocks_synced{device="md12"} 3.886394368e+09 node_md_blocks_synced{device="md120"} 2.095104e+06 node_md_blocks_synced{device="md126"} 1.855870976e+09 node_md_blocks_synced{device="md127"} 3.12319552e+08 node_md_blocks_synced{device="md201"} 114176 node_md_blocks_synced{device="md219"} 7932 node_md_blocks_synced{device="md3"} 5.853468288e+09 node_md_blocks_synced{device="md4"} 4.883648e+06 node_md_blocks_synced{device="md6"} 1.6775552e+07 node_md_blocks_synced{device="md7"} 7.813735424e+09 node_md_blocks_synced{device="md8"} 1.6775552e+07 node_md_blocks_synced{device="md9"} 0 # HELP node_md_disks Number of active/failed/spare disks of device. # TYPE node_md_disks gauge node_md_disks{device="md0",state="active"} 2 node_md_disks{device="md0",state="failed"} 0 node_md_disks{device="md0",state="spare"} 0 node_md_disks{device="md00",state="active"} 1 node_md_disks{device="md00",state="failed"} 0 node_md_disks{device="md00",state="spare"} 0 node_md_disks{device="md10",state="active"} 2 node_md_disks{device="md10",state="failed"} 0 node_md_disks{device="md10",state="spare"} 0 node_md_disks{device="md101",state="active"} 3 node_md_disks{device="md101",state="failed"} 0 node_md_disks{device="md101",state="spare"} 0 node_md_disks{device="md11",state="active"} 2 node_md_disks{device="md11",state="failed"} 1 node_md_disks{device="md11",state="spare"} 2 node_md_disks{device="md12",state="active"} 2 node_md_disks{device="md12",state="failed"} 0 node_md_disks{device="md12",state="spare"} 0 node_md_disks{device="md120",state="active"} 2 node_md_disks{device="md120",state="failed"} 0 node_md_disks{device="md120",state="spare"} 0 node_md_disks{device="md126",state="active"} 2 node_md_disks{device="md126",state="failed"} 0 node_md_disks{device="md126",state="spare"} 0 node_md_disks{device="md127",state="active"} 2 node_md_disks{device="md127",state="failed"} 0 node_md_disks{device="md127",state="spare"} 0 node_md_disks{device="md201",state="active"} 2 node_md_disks{device="md201",state="failed"} 0 node_md_disks{device="md201",state="spare"} 0 node_md_disks{device="md219",state="active"} 0 node_md_disks{device="md219",state="failed"} 0 node_md_disks{device="md219",state="spare"} 3 node_md_disks{device="md3",state="active"} 8 node_md_disks{device="md3",state="failed"} 0 node_md_disks{device="md3",state="spare"} 2 node_md_disks{device="md4",state="active"} 0 node_md_disks{device="md4",state="failed"} 1 node_md_disks{device="md4",state="spare"} 1 node_md_disks{device="md6",state="active"} 1 node_md_disks{device="md6",state="failed"} 1 node_md_disks{device="md6",state="spare"} 1 node_md_disks{device="md7",state="active"} 3 node_md_disks{device="md7",state="failed"} 1 node_md_disks{device="md7",state="spare"} 0 node_md_disks{device="md8",state="active"} 2 node_md_disks{device="md8",state="failed"} 0 node_md_disks{device="md8",state="spare"} 2 node_md_disks{device="md9",state="active"} 4 node_md_disks{device="md9",state="failed"} 2 node_md_disks{device="md9",state="spare"} 1 # HELP node_md_disks_required Total number of disks of device. # TYPE node_md_disks_required gauge node_md_disks_required{device="md0"} 2 node_md_disks_required{device="md00"} 1 node_md_disks_required{device="md10"} 2 node_md_disks_required{device="md101"} 3 node_md_disks_required{device="md11"} 2 node_md_disks_required{device="md12"} 2 node_md_disks_required{device="md120"} 2 node_md_disks_required{device="md126"} 2 node_md_disks_required{device="md127"} 2 node_md_disks_required{device="md201"} 2 node_md_disks_required{device="md219"} 0 node_md_disks_required{device="md3"} 8 node_md_disks_required{device="md4"} 0 node_md_disks_required{device="md6"} 2 node_md_disks_required{device="md7"} 4 node_md_disks_required{device="md8"} 2 node_md_disks_required{device="md9"} 4 # HELP node_md_state Indicates the state of md-device. # TYPE node_md_state gauge node_md_state{device="md0",state="active"} 1 node_md_state{device="md0",state="check"} 0 node_md_state{device="md0",state="inactive"} 0 node_md_state{device="md0",state="recovering"} 0 node_md_state{device="md0",state="resync"} 0 node_md_state{device="md00",state="active"} 1 node_md_state{device="md00",state="check"} 0 node_md_state{device="md00",state="inactive"} 0 node_md_state{device="md00",state="recovering"} 0 node_md_state{device="md00",state="resync"} 0 node_md_state{device="md10",state="active"} 1 node_md_state{device="md10",state="check"} 0 node_md_state{device="md10",state="inactive"} 0 node_md_state{device="md10",state="recovering"} 0 node_md_state{device="md10",state="resync"} 0 node_md_state{device="md101",state="active"} 1 node_md_state{device="md101",state="check"} 0 node_md_state{device="md101",state="inactive"} 0 node_md_state{device="md101",state="recovering"} 0 node_md_state{device="md101",state="resync"} 0 node_md_state{device="md11",state="active"} 0 node_md_state{device="md11",state="check"} 0 node_md_state{device="md11",state="inactive"} 0 node_md_state{device="md11",state="recovering"} 0 node_md_state{device="md11",state="resync"} 1 node_md_state{device="md12",state="active"} 1 node_md_state{device="md12",state="check"} 0 node_md_state{device="md12",state="inactive"} 0 node_md_state{device="md12",state="recovering"} 0 node_md_state{device="md12",state="resync"} 0 node_md_state{device="md120",state="active"} 1 node_md_state{device="md120",state="check"} 0 node_md_state{device="md120",state="inactive"} 0 node_md_state{device="md120",state="recovering"} 0 node_md_state{device="md120",state="resync"} 0 node_md_state{device="md126",state="active"} 1 node_md_state{device="md126",state="check"} 0 node_md_state{device="md126",state="inactive"} 0 node_md_state{device="md126",state="recovering"} 0 node_md_state{device="md126",state="resync"} 0 node_md_state{device="md127",state="active"} 1 node_md_state{device="md127",state="check"} 0 node_md_state{device="md127",state="inactive"} 0 node_md_state{device="md127",state="recovering"} 0 node_md_state{device="md127",state="resync"} 0 node_md_state{device="md201",state="active"} 0 node_md_state{device="md201",state="check"} 1 node_md_state{device="md201",state="inactive"} 0 node_md_state{device="md201",state="recovering"} 0 node_md_state{device="md201",state="resync"} 0 node_md_state{device="md219",state="active"} 0 node_md_state{device="md219",state="check"} 0 node_md_state{device="md219",state="inactive"} 1 node_md_state{device="md219",state="recovering"} 0 node_md_state{device="md219",state="resync"} 0 node_md_state{device="md3",state="active"} 1 node_md_state{device="md3",state="check"} 0 node_md_state{device="md3",state="inactive"} 0 node_md_state{device="md3",state="recovering"} 0 node_md_state{device="md3",state="resync"} 0 node_md_state{device="md4",state="active"} 0 node_md_state{device="md4",state="check"} 0 node_md_state{device="md4",state="inactive"} 1 node_md_state{device="md4",state="recovering"} 0 node_md_state{device="md4",state="resync"} 0 node_md_state{device="md6",state="active"} 0 node_md_state{device="md6",state="check"} 0 node_md_state{device="md6",state="inactive"} 0 node_md_state{device="md6",state="recovering"} 1 node_md_state{device="md6",state="resync"} 0 node_md_state{device="md7",state="active"} 1 node_md_state{device="md7",state="check"} 0 node_md_state{device="md7",state="inactive"} 0 node_md_state{device="md7",state="recovering"} 0 node_md_state{device="md7",state="resync"} 0 node_md_state{device="md8",state="active"} 0 node_md_state{device="md8",state="check"} 0 node_md_state{device="md8",state="inactive"} 0 node_md_state{device="md8",state="recovering"} 0 node_md_state{device="md8",state="resync"} 1 node_md_state{device="md9",state="active"} 0 node_md_state{device="md9",state="check"} 0 node_md_state{device="md9",state="inactive"} 0 node_md_state{device="md9",state="recovering"} 0 node_md_state{device="md9",state="resync"} 1 # HELP node_memory_Active_anon_bytes Memory information field Active_anon_bytes. # TYPE node_memory_Active_anon_bytes gauge node_memory_Active_anon_bytes 2.068484096e+09 # HELP node_memory_Active_bytes Memory information field Active_bytes. # TYPE node_memory_Active_bytes gauge node_memory_Active_bytes 2.287017984e+09 # HELP node_memory_Active_file_bytes Memory information field Active_file_bytes. # TYPE node_memory_Active_file_bytes gauge node_memory_Active_file_bytes 2.18533888e+08 # HELP node_memory_AnonHugePages_bytes Memory information field AnonHugePages_bytes. # TYPE node_memory_AnonHugePages_bytes gauge node_memory_AnonHugePages_bytes 0 # HELP node_memory_AnonPages_bytes Memory information field AnonPages_bytes. # TYPE node_memory_AnonPages_bytes gauge node_memory_AnonPages_bytes 2.298032128e+09 # HELP node_memory_Bounce_bytes Memory information field Bounce_bytes. # TYPE node_memory_Bounce_bytes gauge node_memory_Bounce_bytes 0 # HELP node_memory_Buffers_bytes Memory information field Buffers_bytes. # TYPE node_memory_Buffers_bytes gauge node_memory_Buffers_bytes 2.256896e+07 # HELP node_memory_Cached_bytes Memory information field Cached_bytes. # TYPE node_memory_Cached_bytes gauge node_memory_Cached_bytes 9.53229312e+08 # HELP node_memory_CommitLimit_bytes Memory information field CommitLimit_bytes. # TYPE node_memory_CommitLimit_bytes gauge node_memory_CommitLimit_bytes 6.210940928e+09 # HELP node_memory_Committed_AS_bytes Memory information field Committed_AS_bytes. # TYPE node_memory_Committed_AS_bytes gauge node_memory_Committed_AS_bytes 8.023486464e+09 # HELP node_memory_DirectMap2M_bytes Memory information field DirectMap2M_bytes. # TYPE node_memory_DirectMap2M_bytes gauge node_memory_DirectMap2M_bytes 3.787456512e+09 # HELP node_memory_DirectMap4k_bytes Memory information field DirectMap4k_bytes. # TYPE node_memory_DirectMap4k_bytes gauge node_memory_DirectMap4k_bytes 1.9011584e+08 # HELP node_memory_Dirty_bytes Memory information field Dirty_bytes. # TYPE node_memory_Dirty_bytes gauge node_memory_Dirty_bytes 1.077248e+06 # HELP node_memory_HardwareCorrupted_bytes Memory information field HardwareCorrupted_bytes. # TYPE node_memory_HardwareCorrupted_bytes gauge node_memory_HardwareCorrupted_bytes 0 # HELP node_memory_HugePages_Free Memory information field HugePages_Free. # TYPE node_memory_HugePages_Free gauge node_memory_HugePages_Free 0 # HELP node_memory_HugePages_Rsvd Memory information field HugePages_Rsvd. # TYPE node_memory_HugePages_Rsvd gauge node_memory_HugePages_Rsvd 0 # HELP node_memory_HugePages_Surp Memory information field HugePages_Surp. # TYPE node_memory_HugePages_Surp gauge node_memory_HugePages_Surp 0 # HELP node_memory_HugePages_Total Memory information field HugePages_Total. # TYPE node_memory_HugePages_Total gauge node_memory_HugePages_Total 0 # HELP node_memory_Hugepagesize_bytes Memory information field Hugepagesize_bytes. # TYPE node_memory_Hugepagesize_bytes gauge node_memory_Hugepagesize_bytes 2.097152e+06 # HELP node_memory_Inactive_anon_bytes Memory information field Inactive_anon_bytes. # TYPE node_memory_Inactive_anon_bytes gauge node_memory_Inactive_anon_bytes 9.04245248e+08 # HELP node_memory_Inactive_bytes Memory information field Inactive_bytes. # TYPE node_memory_Inactive_bytes gauge node_memory_Inactive_bytes 1.053417472e+09 # HELP node_memory_Inactive_file_bytes Memory information field Inactive_file_bytes. # TYPE node_memory_Inactive_file_bytes gauge node_memory_Inactive_file_bytes 1.49172224e+08 # HELP node_memory_KernelStack_bytes Memory information field KernelStack_bytes. # TYPE node_memory_KernelStack_bytes gauge node_memory_KernelStack_bytes 5.9392e+06 # HELP node_memory_Mapped_bytes Memory information field Mapped_bytes. # TYPE node_memory_Mapped_bytes gauge node_memory_Mapped_bytes 2.4496128e+08 # HELP node_memory_MemFree_bytes Memory information field MemFree_bytes. # TYPE node_memory_MemFree_bytes gauge node_memory_MemFree_bytes 2.30883328e+08 # HELP node_memory_MemTotal_bytes Memory information field MemTotal_bytes. # TYPE node_memory_MemTotal_bytes gauge node_memory_MemTotal_bytes 3.831959552e+09 # HELP node_memory_Mlocked_bytes Memory information field Mlocked_bytes. # TYPE node_memory_Mlocked_bytes gauge node_memory_Mlocked_bytes 32768 # HELP node_memory_NFS_Unstable_bytes Memory information field NFS_Unstable_bytes. # TYPE node_memory_NFS_Unstable_bytes gauge node_memory_NFS_Unstable_bytes 0 # HELP node_memory_PageTables_bytes Memory information field PageTables_bytes. # TYPE node_memory_PageTables_bytes gauge node_memory_PageTables_bytes 7.7017088e+07 # HELP node_memory_SReclaimable_bytes Memory information field SReclaimable_bytes. # TYPE node_memory_SReclaimable_bytes gauge node_memory_SReclaimable_bytes 4.5846528e+07 # HELP node_memory_SUnreclaim_bytes Memory information field SUnreclaim_bytes. # TYPE node_memory_SUnreclaim_bytes gauge node_memory_SUnreclaim_bytes 5.545984e+07 # HELP node_memory_Shmem_bytes Memory information field Shmem_bytes. # TYPE node_memory_Shmem_bytes gauge node_memory_Shmem_bytes 6.0809216e+08 # HELP node_memory_Slab_bytes Memory information field Slab_bytes. # TYPE node_memory_Slab_bytes gauge node_memory_Slab_bytes 1.01306368e+08 # HELP node_memory_SwapCached_bytes Memory information field SwapCached_bytes. # TYPE node_memory_SwapCached_bytes gauge node_memory_SwapCached_bytes 1.97124096e+08 # HELP node_memory_SwapFree_bytes Memory information field SwapFree_bytes. # TYPE node_memory_SwapFree_bytes gauge node_memory_SwapFree_bytes 3.23108864e+09 # HELP node_memory_SwapTotal_bytes Memory information field SwapTotal_bytes. # TYPE node_memory_SwapTotal_bytes gauge node_memory_SwapTotal_bytes 4.2949632e+09 # HELP node_memory_Unevictable_bytes Memory information field Unevictable_bytes. # TYPE node_memory_Unevictable_bytes gauge node_memory_Unevictable_bytes 32768 # HELP node_memory_VmallocChunk_bytes Memory information field VmallocChunk_bytes. # TYPE node_memory_VmallocChunk_bytes gauge node_memory_VmallocChunk_bytes 3.5183963009024e+13 # HELP node_memory_VmallocTotal_bytes Memory information field VmallocTotal_bytes. # TYPE node_memory_VmallocTotal_bytes gauge node_memory_VmallocTotal_bytes 3.5184372087808e+13 # HELP node_memory_VmallocUsed_bytes Memory information field VmallocUsed_bytes. # TYPE node_memory_VmallocUsed_bytes gauge node_memory_VmallocUsed_bytes 3.6130816e+08 # HELP node_memory_WritebackTmp_bytes Memory information field WritebackTmp_bytes. # TYPE node_memory_WritebackTmp_bytes gauge node_memory_WritebackTmp_bytes 0 # HELP node_memory_Writeback_bytes Memory information field Writeback_bytes. # TYPE node_memory_Writeback_bytes gauge node_memory_Writeback_bytes 0 # HELP node_memory_numa_Active Memory information field Active. # TYPE node_memory_numa_Active gauge node_memory_numa_Active{node="0"} 5.58733312e+09 node_memory_numa_Active{node="1"} 5.739003904e+09 node_memory_numa_Active{node="2"} 5.739003904e+09 # HELP node_memory_numa_Active_anon Memory information field Active_anon. # TYPE node_memory_numa_Active_anon gauge node_memory_numa_Active_anon{node="0"} 7.07915776e+08 node_memory_numa_Active_anon{node="1"} 6.04635136e+08 node_memory_numa_Active_anon{node="2"} 6.04635136e+08 # HELP node_memory_numa_Active_file Memory information field Active_file. # TYPE node_memory_numa_Active_file gauge node_memory_numa_Active_file{node="0"} 4.879417344e+09 node_memory_numa_Active_file{node="1"} 5.134368768e+09 node_memory_numa_Active_file{node="2"} 5.134368768e+09 # HELP node_memory_numa_AnonHugePages Memory information field AnonHugePages. # TYPE node_memory_numa_AnonHugePages gauge node_memory_numa_AnonHugePages{node="0"} 1.50994944e+08 node_memory_numa_AnonHugePages{node="1"} 9.2274688e+07 node_memory_numa_AnonHugePages{node="2"} 9.2274688e+07 # HELP node_memory_numa_AnonPages Memory information field AnonPages. # TYPE node_memory_numa_AnonPages gauge node_memory_numa_AnonPages{node="0"} 8.07112704e+08 node_memory_numa_AnonPages{node="1"} 6.88058368e+08 node_memory_numa_AnonPages{node="2"} 6.88058368e+08 # HELP node_memory_numa_Bounce Memory information field Bounce. # TYPE node_memory_numa_Bounce gauge node_memory_numa_Bounce{node="0"} 0 node_memory_numa_Bounce{node="1"} 0 node_memory_numa_Bounce{node="2"} 0 # HELP node_memory_numa_Dirty Memory information field Dirty. # TYPE node_memory_numa_Dirty gauge node_memory_numa_Dirty{node="0"} 20480 node_memory_numa_Dirty{node="1"} 122880 node_memory_numa_Dirty{node="2"} 122880 # HELP node_memory_numa_FilePages Memory information field FilePages. # TYPE node_memory_numa_FilePages gauge node_memory_numa_FilePages{node="0"} 7.1855017984e+10 node_memory_numa_FilePages{node="1"} 8.5585088512e+10 node_memory_numa_FilePages{node="2"} 8.5585088512e+10 # HELP node_memory_numa_HugePages_Free Memory information field HugePages_Free. # TYPE node_memory_numa_HugePages_Free gauge node_memory_numa_HugePages_Free{node="0"} 0 node_memory_numa_HugePages_Free{node="1"} 0 node_memory_numa_HugePages_Free{node="2"} 0 # HELP node_memory_numa_HugePages_Surp Memory information field HugePages_Surp. # TYPE node_memory_numa_HugePages_Surp gauge node_memory_numa_HugePages_Surp{node="0"} 0 node_memory_numa_HugePages_Surp{node="1"} 0 node_memory_numa_HugePages_Surp{node="2"} 0 # HELP node_memory_numa_HugePages_Total Memory information field HugePages_Total. # TYPE node_memory_numa_HugePages_Total gauge node_memory_numa_HugePages_Total{node="0"} 0 node_memory_numa_HugePages_Total{node="1"} 0 node_memory_numa_HugePages_Total{node="2"} 0 # HELP node_memory_numa_Inactive Memory information field Inactive. # TYPE node_memory_numa_Inactive gauge node_memory_numa_Inactive{node="0"} 6.0569788416e+10 node_memory_numa_Inactive{node="1"} 7.3165406208e+10 node_memory_numa_Inactive{node="2"} 7.3165406208e+10 # HELP node_memory_numa_Inactive_anon Memory information field Inactive_anon. # TYPE node_memory_numa_Inactive_anon gauge node_memory_numa_Inactive_anon{node="0"} 3.48626944e+08 node_memory_numa_Inactive_anon{node="1"} 2.91930112e+08 node_memory_numa_Inactive_anon{node="2"} 2.91930112e+08 # HELP node_memory_numa_Inactive_file Memory information field Inactive_file. # TYPE node_memory_numa_Inactive_file gauge node_memory_numa_Inactive_file{node="0"} 6.0221161472e+10 node_memory_numa_Inactive_file{node="1"} 7.2873476096e+10 node_memory_numa_Inactive_file{node="2"} 7.2873476096e+10 # HELP node_memory_numa_KernelStack Memory information field KernelStack. # TYPE node_memory_numa_KernelStack gauge node_memory_numa_KernelStack{node="0"} 3.4832384e+07 node_memory_numa_KernelStack{node="1"} 3.1850496e+07 node_memory_numa_KernelStack{node="2"} 3.1850496e+07 # HELP node_memory_numa_Mapped Memory information field Mapped. # TYPE node_memory_numa_Mapped gauge node_memory_numa_Mapped{node="0"} 9.1570176e+08 node_memory_numa_Mapped{node="1"} 8.84850688e+08 node_memory_numa_Mapped{node="2"} 8.84850688e+08 # HELP node_memory_numa_MemFree Memory information field MemFree. # TYPE node_memory_numa_MemFree gauge node_memory_numa_MemFree{node="0"} 5.4303100928e+10 node_memory_numa_MemFree{node="1"} 4.0586022912e+10 node_memory_numa_MemFree{node="2"} 4.0586022912e+10 # HELP node_memory_numa_MemTotal Memory information field MemTotal. # TYPE node_memory_numa_MemTotal gauge node_memory_numa_MemTotal{node="0"} 1.3740271616e+11 node_memory_numa_MemTotal{node="1"} 1.37438953472e+11 node_memory_numa_MemTotal{node="2"} 1.37438953472e+11 # HELP node_memory_numa_MemUsed Memory information field MemUsed. # TYPE node_memory_numa_MemUsed gauge node_memory_numa_MemUsed{node="0"} 8.3099615232e+10 node_memory_numa_MemUsed{node="1"} 9.685293056e+10 node_memory_numa_MemUsed{node="2"} 9.685293056e+10 # HELP node_memory_numa_Mlocked Memory information field Mlocked. # TYPE node_memory_numa_Mlocked gauge node_memory_numa_Mlocked{node="0"} 0 node_memory_numa_Mlocked{node="1"} 0 node_memory_numa_Mlocked{node="2"} 0 # HELP node_memory_numa_NFS_Unstable Memory information field NFS_Unstable. # TYPE node_memory_numa_NFS_Unstable gauge node_memory_numa_NFS_Unstable{node="0"} 0 node_memory_numa_NFS_Unstable{node="1"} 0 node_memory_numa_NFS_Unstable{node="2"} 0 # HELP node_memory_numa_PageTables Memory information field PageTables. # TYPE node_memory_numa_PageTables gauge node_memory_numa_PageTables{node="0"} 1.46743296e+08 node_memory_numa_PageTables{node="1"} 1.27254528e+08 node_memory_numa_PageTables{node="2"} 1.27254528e+08 # HELP node_memory_numa_SReclaimable Memory information field SReclaimable. # TYPE node_memory_numa_SReclaimable gauge node_memory_numa_SReclaimable{node="0"} 4.580478976e+09 node_memory_numa_SReclaimable{node="1"} 4.724822016e+09 node_memory_numa_SReclaimable{node="2"} 4.724822016e+09 # HELP node_memory_numa_SUnreclaim Memory information field SUnreclaim. # TYPE node_memory_numa_SUnreclaim gauge node_memory_numa_SUnreclaim{node="0"} 2.23352832e+09 node_memory_numa_SUnreclaim{node="1"} 2.464391168e+09 node_memory_numa_SUnreclaim{node="2"} 2.464391168e+09 # HELP node_memory_numa_Shmem Memory information field Shmem. # TYPE node_memory_numa_Shmem gauge node_memory_numa_Shmem{node="0"} 4.900864e+07 node_memory_numa_Shmem{node="1"} 8.968192e+07 node_memory_numa_Shmem{node="2"} 8.968192e+07 # HELP node_memory_numa_Slab Memory information field Slab. # TYPE node_memory_numa_Slab gauge node_memory_numa_Slab{node="0"} 6.814007296e+09 node_memory_numa_Slab{node="1"} 7.189213184e+09 node_memory_numa_Slab{node="2"} 7.189213184e+09 # HELP node_memory_numa_Unevictable Memory information field Unevictable. # TYPE node_memory_numa_Unevictable gauge node_memory_numa_Unevictable{node="0"} 0 node_memory_numa_Unevictable{node="1"} 0 node_memory_numa_Unevictable{node="2"} 0 # HELP node_memory_numa_Writeback Memory information field Writeback. # TYPE node_memory_numa_Writeback gauge node_memory_numa_Writeback{node="0"} 0 node_memory_numa_Writeback{node="1"} 0 node_memory_numa_Writeback{node="2"} 0 # HELP node_memory_numa_WritebackTmp Memory information field WritebackTmp. # TYPE node_memory_numa_WritebackTmp gauge node_memory_numa_WritebackTmp{node="0"} 0 node_memory_numa_WritebackTmp{node="1"} 0 node_memory_numa_WritebackTmp{node="2"} 0 # HELP node_memory_numa_interleave_hit_total Memory information field interleave_hit_total. # TYPE node_memory_numa_interleave_hit_total counter node_memory_numa_interleave_hit_total{node="0"} 57146 node_memory_numa_interleave_hit_total{node="1"} 57286 node_memory_numa_interleave_hit_total{node="2"} 7286 # HELP node_memory_numa_local_node_total Memory information field local_node_total. # TYPE node_memory_numa_local_node_total counter node_memory_numa_local_node_total{node="0"} 1.93454780853e+11 node_memory_numa_local_node_total{node="1"} 3.2671904655e+11 node_memory_numa_local_node_total{node="2"} 2.671904655e+10 # HELP node_memory_numa_numa_foreign_total Memory information field numa_foreign_total. # TYPE node_memory_numa_numa_foreign_total counter node_memory_numa_numa_foreign_total{node="0"} 5.98586233e+10 node_memory_numa_numa_foreign_total{node="1"} 1.2624528e+07 node_memory_numa_numa_foreign_total{node="2"} 2.624528e+06 # HELP node_memory_numa_numa_hit_total Memory information field numa_hit_total. # TYPE node_memory_numa_numa_hit_total counter node_memory_numa_numa_hit_total{node="0"} 1.93460335812e+11 node_memory_numa_numa_hit_total{node="1"} 3.26720946761e+11 node_memory_numa_numa_hit_total{node="2"} 2.6720946761e+10 # HELP node_memory_numa_numa_miss_total Memory information field numa_miss_total. # TYPE node_memory_numa_numa_miss_total counter node_memory_numa_numa_miss_total{node="0"} 1.2624528e+07 node_memory_numa_numa_miss_total{node="1"} 5.9858626709e+10 node_memory_numa_numa_miss_total{node="2"} 9.858626709e+09 # HELP node_memory_numa_other_node_total Memory information field other_node_total. # TYPE node_memory_numa_other_node_total counter node_memory_numa_other_node_total{node="0"} 1.8179487e+07 node_memory_numa_other_node_total{node="1"} 5.986052692e+10 node_memory_numa_other_node_total{node="2"} 9.86052692e+09 # HELP node_mountstats_nfs_age_seconds_total The age of the NFS mount in seconds. # TYPE node_mountstats_nfs_age_seconds_total counter node_mountstats_nfs_age_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 13968 node_mountstats_nfs_age_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 13968 # HELP node_mountstats_nfs_direct_read_bytes_total Number of bytes read using the read() syscall in O_DIRECT mode. # TYPE node_mountstats_nfs_direct_read_bytes_total counter node_mountstats_nfs_direct_read_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_direct_read_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_direct_write_bytes_total Number of bytes written using the write() syscall in O_DIRECT mode. # TYPE node_mountstats_nfs_direct_write_bytes_total counter node_mountstats_nfs_direct_write_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_direct_write_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_attribute_invalidate_total Number of times cached inode attributes are invalidated. # TYPE node_mountstats_nfs_event_attribute_invalidate_total counter node_mountstats_nfs_event_attribute_invalidate_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_event_attribute_invalidate_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_data_invalidate_total Number of times an inode cache is cleared. # TYPE node_mountstats_nfs_event_data_invalidate_total counter node_mountstats_nfs_event_data_invalidate_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_event_data_invalidate_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_dnode_revalidate_total Number of times cached dentry nodes are re-validated from the server. # TYPE node_mountstats_nfs_event_dnode_revalidate_total counter node_mountstats_nfs_event_dnode_revalidate_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 226 node_mountstats_nfs_event_dnode_revalidate_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 226 # HELP node_mountstats_nfs_event_inode_revalidate_total Number of times cached inode attributes are re-validated from the server. # TYPE node_mountstats_nfs_event_inode_revalidate_total counter node_mountstats_nfs_event_inode_revalidate_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 52 node_mountstats_nfs_event_inode_revalidate_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 52 # HELP node_mountstats_nfs_event_jukebox_delay_total Number of times the NFS server indicated EJUKEBOX; retrieving data from offline storage. # TYPE node_mountstats_nfs_event_jukebox_delay_total counter node_mountstats_nfs_event_jukebox_delay_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_event_jukebox_delay_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_pnfs_read_total Number of NFS v4.1+ pNFS reads. # TYPE node_mountstats_nfs_event_pnfs_read_total counter node_mountstats_nfs_event_pnfs_read_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_event_pnfs_read_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_pnfs_write_total Number of NFS v4.1+ pNFS writes. # TYPE node_mountstats_nfs_event_pnfs_write_total counter node_mountstats_nfs_event_pnfs_write_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_event_pnfs_write_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_short_read_total Number of times the NFS server gave less data than expected while reading. # TYPE node_mountstats_nfs_event_short_read_total counter node_mountstats_nfs_event_short_read_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_event_short_read_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_short_write_total Number of times the NFS server wrote less data than expected while writing. # TYPE node_mountstats_nfs_event_short_write_total counter node_mountstats_nfs_event_short_write_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_event_short_write_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_silly_rename_total Number of times a file was removed while still open by another process. # TYPE node_mountstats_nfs_event_silly_rename_total counter node_mountstats_nfs_event_silly_rename_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_event_silly_rename_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_truncation_total Number of times files have been truncated. # TYPE node_mountstats_nfs_event_truncation_total counter node_mountstats_nfs_event_truncation_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_event_truncation_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_vfs_access_total Number of times permissions have been checked. # TYPE node_mountstats_nfs_event_vfs_access_total counter node_mountstats_nfs_event_vfs_access_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 398 node_mountstats_nfs_event_vfs_access_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 398 # HELP node_mountstats_nfs_event_vfs_file_release_total Number of times files have been closed and released. # TYPE node_mountstats_nfs_event_vfs_file_release_total counter node_mountstats_nfs_event_vfs_file_release_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 77 node_mountstats_nfs_event_vfs_file_release_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 77 # HELP node_mountstats_nfs_event_vfs_flush_total Number of pending writes that have been forcefully flushed to the server. # TYPE node_mountstats_nfs_event_vfs_flush_total counter node_mountstats_nfs_event_vfs_flush_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 77 node_mountstats_nfs_event_vfs_flush_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 77 # HELP node_mountstats_nfs_event_vfs_fsync_total Number of times fsync() has been called on directories and files. # TYPE node_mountstats_nfs_event_vfs_fsync_total counter node_mountstats_nfs_event_vfs_fsync_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_event_vfs_fsync_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_vfs_getdents_total Number of times directory entries have been read with getdents(). # TYPE node_mountstats_nfs_event_vfs_getdents_total counter node_mountstats_nfs_event_vfs_getdents_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_event_vfs_getdents_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_vfs_lock_total Number of times locking has been attempted on a file. # TYPE node_mountstats_nfs_event_vfs_lock_total counter node_mountstats_nfs_event_vfs_lock_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_event_vfs_lock_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_vfs_lookup_total Number of times a directory lookup has occurred. # TYPE node_mountstats_nfs_event_vfs_lookup_total counter node_mountstats_nfs_event_vfs_lookup_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 13 node_mountstats_nfs_event_vfs_lookup_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 13 # HELP node_mountstats_nfs_event_vfs_open_total Number of times cached inode attributes are invalidated. # TYPE node_mountstats_nfs_event_vfs_open_total counter node_mountstats_nfs_event_vfs_open_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 1 node_mountstats_nfs_event_vfs_open_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 1 # HELP node_mountstats_nfs_event_vfs_read_page_total Number of pages read directly via mmap()'d files. # TYPE node_mountstats_nfs_event_vfs_read_page_total counter node_mountstats_nfs_event_vfs_read_page_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_event_vfs_read_page_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_vfs_read_pages_total Number of times a group of pages have been read. # TYPE node_mountstats_nfs_event_vfs_read_pages_total counter node_mountstats_nfs_event_vfs_read_pages_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 331 node_mountstats_nfs_event_vfs_read_pages_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 331 # HELP node_mountstats_nfs_event_vfs_setattr_total Number of times directory entries have been read with getdents(). # TYPE node_mountstats_nfs_event_vfs_setattr_total counter node_mountstats_nfs_event_vfs_setattr_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_event_vfs_setattr_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_vfs_update_page_total Number of updates (and potential writes) to pages. # TYPE node_mountstats_nfs_event_vfs_update_page_total counter node_mountstats_nfs_event_vfs_update_page_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_event_vfs_update_page_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_vfs_write_page_total Number of pages written directly via mmap()'d files. # TYPE node_mountstats_nfs_event_vfs_write_page_total counter node_mountstats_nfs_event_vfs_write_page_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_event_vfs_write_page_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_event_vfs_write_pages_total Number of times a group of pages have been written. # TYPE node_mountstats_nfs_event_vfs_write_pages_total counter node_mountstats_nfs_event_vfs_write_pages_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 47 node_mountstats_nfs_event_vfs_write_pages_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 47 # HELP node_mountstats_nfs_event_write_extension_total Number of times a file has been grown due to writes beyond its existing end. # TYPE node_mountstats_nfs_event_write_extension_total counter node_mountstats_nfs_event_write_extension_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_event_write_extension_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_operations_major_timeouts_total Number of times a request has had a major timeout for a given operation. # TYPE node_mountstats_nfs_operations_major_timeouts_total counter node_mountstats_nfs_operations_major_timeouts_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="ACCESS",protocol="udp"} 0 node_mountstats_nfs_operations_major_timeouts_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="tcp"} 0 node_mountstats_nfs_operations_major_timeouts_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="udp"} 0 node_mountstats_nfs_operations_major_timeouts_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="tcp"} 0 node_mountstats_nfs_operations_major_timeouts_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="udp"} 0 node_mountstats_nfs_operations_major_timeouts_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="tcp"} 0 node_mountstats_nfs_operations_major_timeouts_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="udp"} 0 # HELP node_mountstats_nfs_operations_queue_time_seconds_total Duration all requests spent queued for transmission for a given operation before they were sent, in seconds. # TYPE node_mountstats_nfs_operations_queue_time_seconds_total counter node_mountstats_nfs_operations_queue_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="ACCESS",protocol="udp"} 9.007044786793922e+12 node_mountstats_nfs_operations_queue_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="tcp"} 0 node_mountstats_nfs_operations_queue_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="udp"} 0 node_mountstats_nfs_operations_queue_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="tcp"} 0.006 node_mountstats_nfs_operations_queue_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="udp"} 0.006 node_mountstats_nfs_operations_queue_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="tcp"} 0 node_mountstats_nfs_operations_queue_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="udp"} 0 # HELP node_mountstats_nfs_operations_received_bytes_total Number of bytes received for a given operation, including RPC headers and payload. # TYPE node_mountstats_nfs_operations_received_bytes_total counter node_mountstats_nfs_operations_received_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="ACCESS",protocol="udp"} 3.62996810236e+11 node_mountstats_nfs_operations_received_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="tcp"} 0 node_mountstats_nfs_operations_received_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="udp"} 0 node_mountstats_nfs_operations_received_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="tcp"} 1.210292152e+09 node_mountstats_nfs_operations_received_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="udp"} 1.210292152e+09 node_mountstats_nfs_operations_received_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="tcp"} 0 node_mountstats_nfs_operations_received_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="udp"} 0 # HELP node_mountstats_nfs_operations_request_time_seconds_total Duration all requests took from when a request was enqueued to when it was completely handled for a given operation, in seconds. # TYPE node_mountstats_nfs_operations_request_time_seconds_total counter node_mountstats_nfs_operations_request_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="ACCESS",protocol="udp"} 1.953587717e+06 node_mountstats_nfs_operations_request_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="tcp"} 0 node_mountstats_nfs_operations_request_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="udp"} 0 node_mountstats_nfs_operations_request_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="tcp"} 79.407 node_mountstats_nfs_operations_request_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="udp"} 79.407 node_mountstats_nfs_operations_request_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="tcp"} 0 node_mountstats_nfs_operations_request_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="udp"} 0 # HELP node_mountstats_nfs_operations_requests_total Number of requests performed for a given operation. # TYPE node_mountstats_nfs_operations_requests_total counter node_mountstats_nfs_operations_requests_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="ACCESS",protocol="udp"} 2.927395007e+09 node_mountstats_nfs_operations_requests_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="tcp"} 0 node_mountstats_nfs_operations_requests_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="udp"} 0 node_mountstats_nfs_operations_requests_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="tcp"} 1298 node_mountstats_nfs_operations_requests_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="udp"} 1298 node_mountstats_nfs_operations_requests_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="tcp"} 0 node_mountstats_nfs_operations_requests_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="udp"} 0 # HELP node_mountstats_nfs_operations_response_time_seconds_total Duration all requests took to get a reply back after a request for a given operation was transmitted, in seconds. # TYPE node_mountstats_nfs_operations_response_time_seconds_total counter node_mountstats_nfs_operations_response_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="ACCESS",protocol="udp"} 1.667369447e+06 node_mountstats_nfs_operations_response_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="tcp"} 0 node_mountstats_nfs_operations_response_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="udp"} 0 node_mountstats_nfs_operations_response_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="tcp"} 79.386 node_mountstats_nfs_operations_response_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="udp"} 79.386 node_mountstats_nfs_operations_response_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="tcp"} 0 node_mountstats_nfs_operations_response_time_seconds_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="udp"} 0 # HELP node_mountstats_nfs_operations_sent_bytes_total Number of bytes sent for a given operation, including RPC headers and payload. # TYPE node_mountstats_nfs_operations_sent_bytes_total counter node_mountstats_nfs_operations_sent_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="ACCESS",protocol="udp"} 5.26931094212e+11 node_mountstats_nfs_operations_sent_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="tcp"} 0 node_mountstats_nfs_operations_sent_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="udp"} 0 node_mountstats_nfs_operations_sent_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="tcp"} 207680 node_mountstats_nfs_operations_sent_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="udp"} 207680 node_mountstats_nfs_operations_sent_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="tcp"} 0 node_mountstats_nfs_operations_sent_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="udp"} 0 # HELP node_mountstats_nfs_operations_transmissions_total Number of times an actual RPC request has been transmitted for a given operation. # TYPE node_mountstats_nfs_operations_transmissions_total counter node_mountstats_nfs_operations_transmissions_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="ACCESS",protocol="udp"} 2.927394995e+09 node_mountstats_nfs_operations_transmissions_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="tcp"} 0 node_mountstats_nfs_operations_transmissions_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="NULL",protocol="udp"} 0 node_mountstats_nfs_operations_transmissions_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="tcp"} 1298 node_mountstats_nfs_operations_transmissions_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="READ",protocol="udp"} 1298 node_mountstats_nfs_operations_transmissions_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="tcp"} 0 node_mountstats_nfs_operations_transmissions_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",operation="WRITE",protocol="udp"} 0 # HELP node_mountstats_nfs_read_bytes_total Number of bytes read using the read() syscall. # TYPE node_mountstats_nfs_read_bytes_total counter node_mountstats_nfs_read_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 1.20764023e+09 node_mountstats_nfs_read_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 1.20764023e+09 # HELP node_mountstats_nfs_read_pages_total Number of pages read directly via mmap()'d files. # TYPE node_mountstats_nfs_read_pages_total counter node_mountstats_nfs_read_pages_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 295483 node_mountstats_nfs_read_pages_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 295483 # HELP node_mountstats_nfs_total_read_bytes_total Number of bytes read from the NFS server, in total. # TYPE node_mountstats_nfs_total_read_bytes_total counter node_mountstats_nfs_total_read_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 1.210214218e+09 node_mountstats_nfs_total_read_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 1.210214218e+09 # HELP node_mountstats_nfs_total_write_bytes_total Number of bytes written to the NFS server, in total. # TYPE node_mountstats_nfs_total_write_bytes_total counter node_mountstats_nfs_total_write_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_total_write_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_transport_backlog_queue_total Total number of items added to the RPC backlog queue. # TYPE node_mountstats_nfs_transport_backlog_queue_total counter node_mountstats_nfs_transport_backlog_queue_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_transport_backlog_queue_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_transport_bad_transaction_ids_total Number of times the NFS server sent a response with a transaction ID unknown to this client. # TYPE node_mountstats_nfs_transport_bad_transaction_ids_total counter node_mountstats_nfs_transport_bad_transaction_ids_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_transport_bad_transaction_ids_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_transport_bind_total Number of times the client has had to establish a connection from scratch to the NFS server. # TYPE node_mountstats_nfs_transport_bind_total counter node_mountstats_nfs_transport_bind_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_transport_bind_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_transport_connect_total Number of times the client has made a TCP connection to the NFS server. # TYPE node_mountstats_nfs_transport_connect_total counter node_mountstats_nfs_transport_connect_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 1 node_mountstats_nfs_transport_connect_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_transport_idle_time_seconds Duration since the NFS mount last saw any RPC traffic, in seconds. # TYPE node_mountstats_nfs_transport_idle_time_seconds gauge node_mountstats_nfs_transport_idle_time_seconds{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 11 node_mountstats_nfs_transport_idle_time_seconds{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_transport_maximum_rpc_slots Maximum number of simultaneously active RPC requests ever used. # TYPE node_mountstats_nfs_transport_maximum_rpc_slots gauge node_mountstats_nfs_transport_maximum_rpc_slots{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 24 node_mountstats_nfs_transport_maximum_rpc_slots{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 24 # HELP node_mountstats_nfs_transport_pending_queue_total Total number of items added to the RPC transmission pending queue. # TYPE node_mountstats_nfs_transport_pending_queue_total counter node_mountstats_nfs_transport_pending_queue_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 5726 node_mountstats_nfs_transport_pending_queue_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 5726 # HELP node_mountstats_nfs_transport_receives_total Number of RPC responses for this mount received from the NFS server. # TYPE node_mountstats_nfs_transport_receives_total counter node_mountstats_nfs_transport_receives_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 6428 node_mountstats_nfs_transport_receives_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 6428 # HELP node_mountstats_nfs_transport_sending_queue_total Total number of items added to the RPC transmission sending queue. # TYPE node_mountstats_nfs_transport_sending_queue_total counter node_mountstats_nfs_transport_sending_queue_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 26 node_mountstats_nfs_transport_sending_queue_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 26 # HELP node_mountstats_nfs_transport_sends_total Number of RPC requests for this mount sent to the NFS server. # TYPE node_mountstats_nfs_transport_sends_total counter node_mountstats_nfs_transport_sends_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 6428 node_mountstats_nfs_transport_sends_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 6428 # HELP node_mountstats_nfs_write_bytes_total Number of bytes written using the write() syscall. # TYPE node_mountstats_nfs_write_bytes_total counter node_mountstats_nfs_write_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_write_bytes_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_mountstats_nfs_write_pages_total Number of pages written directly via mmap()'d files. # TYPE node_mountstats_nfs_write_pages_total counter node_mountstats_nfs_write_pages_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="tcp"} 0 node_mountstats_nfs_write_pages_total{export="192.168.1.1:/srv/test",mountaddr="192.168.1.1",protocol="udp"} 0 # HELP node_netstat_Icmp6_InErrors Statistic Icmp6InErrors. # TYPE node_netstat_Icmp6_InErrors untyped node_netstat_Icmp6_InErrors 0 # HELP node_netstat_Icmp6_InMsgs Statistic Icmp6InMsgs. # TYPE node_netstat_Icmp6_InMsgs untyped node_netstat_Icmp6_InMsgs 0 # HELP node_netstat_Icmp6_OutMsgs Statistic Icmp6OutMsgs. # TYPE node_netstat_Icmp6_OutMsgs untyped node_netstat_Icmp6_OutMsgs 8 # HELP node_netstat_Icmp_InErrors Statistic IcmpInErrors. # TYPE node_netstat_Icmp_InErrors untyped node_netstat_Icmp_InErrors 0 # HELP node_netstat_Icmp_InMsgs Statistic IcmpInMsgs. # TYPE node_netstat_Icmp_InMsgs untyped node_netstat_Icmp_InMsgs 104 # HELP node_netstat_Icmp_OutMsgs Statistic IcmpOutMsgs. # TYPE node_netstat_Icmp_OutMsgs untyped node_netstat_Icmp_OutMsgs 120 # HELP node_netstat_Ip6_InOctets Statistic Ip6InOctets. # TYPE node_netstat_Ip6_InOctets untyped node_netstat_Ip6_InOctets 460 # HELP node_netstat_Ip6_OutOctets Statistic Ip6OutOctets. # TYPE node_netstat_Ip6_OutOctets untyped node_netstat_Ip6_OutOctets 536 # HELP node_netstat_IpExt_InOctets Statistic IpExtInOctets. # TYPE node_netstat_IpExt_InOctets untyped node_netstat_IpExt_InOctets 6.28639697e+09 # HELP node_netstat_IpExt_OutOctets Statistic IpExtOutOctets. # TYPE node_netstat_IpExt_OutOctets untyped node_netstat_IpExt_OutOctets 2.786264347e+09 # HELP node_netstat_Ip_Forwarding Statistic IpForwarding. # TYPE node_netstat_Ip_Forwarding untyped node_netstat_Ip_Forwarding 1 # HELP node_netstat_TcpExt_ListenDrops Statistic TcpExtListenDrops. # TYPE node_netstat_TcpExt_ListenDrops untyped node_netstat_TcpExt_ListenDrops 0 # HELP node_netstat_TcpExt_ListenOverflows Statistic TcpExtListenOverflows. # TYPE node_netstat_TcpExt_ListenOverflows untyped node_netstat_TcpExt_ListenOverflows 0 # HELP node_netstat_TcpExt_SyncookiesFailed Statistic TcpExtSyncookiesFailed. # TYPE node_netstat_TcpExt_SyncookiesFailed untyped node_netstat_TcpExt_SyncookiesFailed 2 # HELP node_netstat_TcpExt_SyncookiesRecv Statistic TcpExtSyncookiesRecv. # TYPE node_netstat_TcpExt_SyncookiesRecv untyped node_netstat_TcpExt_SyncookiesRecv 0 # HELP node_netstat_TcpExt_SyncookiesSent Statistic TcpExtSyncookiesSent. # TYPE node_netstat_TcpExt_SyncookiesSent untyped node_netstat_TcpExt_SyncookiesSent 0 # HELP node_netstat_TcpExt_TCPTimeouts Statistic TcpExtTCPTimeouts. # TYPE node_netstat_TcpExt_TCPTimeouts untyped node_netstat_TcpExt_TCPTimeouts 115 # HELP node_netstat_Tcp_ActiveOpens Statistic TcpActiveOpens. # TYPE node_netstat_Tcp_ActiveOpens untyped node_netstat_Tcp_ActiveOpens 3556 # HELP node_netstat_Tcp_CurrEstab Statistic TcpCurrEstab. # TYPE node_netstat_Tcp_CurrEstab untyped node_netstat_Tcp_CurrEstab 0 # HELP node_netstat_Tcp_InErrs Statistic TcpInErrs. # TYPE node_netstat_Tcp_InErrs untyped node_netstat_Tcp_InErrs 5 # HELP node_netstat_Tcp_InSegs Statistic TcpInSegs. # TYPE node_netstat_Tcp_InSegs untyped node_netstat_Tcp_InSegs 5.7252008e+07 # HELP node_netstat_Tcp_OutRsts Statistic TcpOutRsts. # TYPE node_netstat_Tcp_OutRsts untyped node_netstat_Tcp_OutRsts 1003 # HELP node_netstat_Tcp_OutSegs Statistic TcpOutSegs. # TYPE node_netstat_Tcp_OutSegs untyped node_netstat_Tcp_OutSegs 5.4915039e+07 # HELP node_netstat_Tcp_PassiveOpens Statistic TcpPassiveOpens. # TYPE node_netstat_Tcp_PassiveOpens untyped node_netstat_Tcp_PassiveOpens 230 # HELP node_netstat_Tcp_RetransSegs Statistic TcpRetransSegs. # TYPE node_netstat_Tcp_RetransSegs untyped node_netstat_Tcp_RetransSegs 227 # HELP node_netstat_Udp6_InDatagrams Statistic Udp6InDatagrams. # TYPE node_netstat_Udp6_InDatagrams untyped node_netstat_Udp6_InDatagrams 0 # HELP node_netstat_Udp6_InErrors Statistic Udp6InErrors. # TYPE node_netstat_Udp6_InErrors untyped node_netstat_Udp6_InErrors 0 # HELP node_netstat_Udp6_NoPorts Statistic Udp6NoPorts. # TYPE node_netstat_Udp6_NoPorts untyped node_netstat_Udp6_NoPorts 0 # HELP node_netstat_Udp6_OutDatagrams Statistic Udp6OutDatagrams. # TYPE node_netstat_Udp6_OutDatagrams untyped node_netstat_Udp6_OutDatagrams 0 # HELP node_netstat_Udp6_RcvbufErrors Statistic Udp6RcvbufErrors. # TYPE node_netstat_Udp6_RcvbufErrors untyped node_netstat_Udp6_RcvbufErrors 9 # HELP node_netstat_Udp6_SndbufErrors Statistic Udp6SndbufErrors. # TYPE node_netstat_Udp6_SndbufErrors untyped node_netstat_Udp6_SndbufErrors 8 # HELP node_netstat_UdpLite6_InErrors Statistic UdpLite6InErrors. # TYPE node_netstat_UdpLite6_InErrors untyped node_netstat_UdpLite6_InErrors 0 # HELP node_netstat_UdpLite_InErrors Statistic UdpLiteInErrors. # TYPE node_netstat_UdpLite_InErrors untyped node_netstat_UdpLite_InErrors 0 # HELP node_netstat_Udp_InDatagrams Statistic UdpInDatagrams. # TYPE node_netstat_Udp_InDatagrams untyped node_netstat_Udp_InDatagrams 88542 # HELP node_netstat_Udp_InErrors Statistic UdpInErrors. # TYPE node_netstat_Udp_InErrors untyped node_netstat_Udp_InErrors 0 # HELP node_netstat_Udp_NoPorts Statistic UdpNoPorts. # TYPE node_netstat_Udp_NoPorts untyped node_netstat_Udp_NoPorts 120 # HELP node_netstat_Udp_OutDatagrams Statistic UdpOutDatagrams. # TYPE node_netstat_Udp_OutDatagrams untyped node_netstat_Udp_OutDatagrams 53028 # HELP node_netstat_Udp_RcvbufErrors Statistic UdpRcvbufErrors. # TYPE node_netstat_Udp_RcvbufErrors untyped node_netstat_Udp_RcvbufErrors 9 # HELP node_netstat_Udp_SndbufErrors Statistic UdpSndbufErrors. # TYPE node_netstat_Udp_SndbufErrors untyped node_netstat_Udp_SndbufErrors 8 # HELP node_network_address_assign_type Network device property: address_assign_type # TYPE node_network_address_assign_type gauge node_network_address_assign_type{device="bond0"} 3 node_network_address_assign_type{device="eth0"} 3 # HELP node_network_carrier Network device property: carrier # TYPE node_network_carrier gauge node_network_carrier{device="bond0"} 1 node_network_carrier{device="eth0"} 1 # HELP node_network_carrier_changes_total Network device property: carrier_changes_total # TYPE node_network_carrier_changes_total counter node_network_carrier_changes_total{device="bond0"} 2 node_network_carrier_changes_total{device="eth0"} 2 # HELP node_network_carrier_down_changes_total Network device property: carrier_down_changes_total # TYPE node_network_carrier_down_changes_total counter node_network_carrier_down_changes_total{device="bond0"} 1 node_network_carrier_down_changes_total{device="eth0"} 1 # HELP node_network_carrier_up_changes_total Network device property: carrier_up_changes_total # TYPE node_network_carrier_up_changes_total counter node_network_carrier_up_changes_total{device="bond0"} 1 node_network_carrier_up_changes_total{device="eth0"} 1 # HELP node_network_device_id Network device property: device_id # TYPE node_network_device_id gauge node_network_device_id{device="bond0"} 32 node_network_device_id{device="eth0"} 32 # HELP node_network_dormant Network device property: dormant # TYPE node_network_dormant gauge node_network_dormant{device="bond0"} 1 node_network_dormant{device="eth0"} 1 # HELP node_network_flags Network device property: flags # TYPE node_network_flags gauge node_network_flags{device="bond0"} 4867 node_network_flags{device="eth0"} 4867 # HELP node_network_iface_id Network device property: iface_id # TYPE node_network_iface_id gauge node_network_iface_id{device="bond0"} 2 node_network_iface_id{device="eth0"} 2 # HELP node_network_iface_link Network device property: iface_link # TYPE node_network_iface_link gauge node_network_iface_link{device="bond0"} 2 node_network_iface_link{device="eth0"} 2 # HELP node_network_iface_link_mode Network device property: iface_link_mode # TYPE node_network_iface_link_mode gauge node_network_iface_link_mode{device="bond0"} 1 node_network_iface_link_mode{device="eth0"} 1 # HELP node_network_info Non-numeric data from /sys/class/net/, value is always 1. # TYPE node_network_info gauge node_network_info{address="01:01:01:01:01:01",adminstate="up",broadcast="ff:ff:ff:ff:ff:ff",device="bond0",duplex="full",ifalias="",operstate="up"} 1 node_network_info{address="01:01:01:01:01:01",adminstate="up",broadcast="ff:ff:ff:ff:ff:ff",device="eth0",duplex="full",ifalias="",operstate="up"} 1 # HELP node_network_mtu_bytes Network device property: mtu_bytes # TYPE node_network_mtu_bytes gauge node_network_mtu_bytes{device="bond0"} 1500 node_network_mtu_bytes{device="eth0"} 1500 # HELP node_network_name_assign_type Network device property: name_assign_type # TYPE node_network_name_assign_type gauge node_network_name_assign_type{device="bond0"} 2 node_network_name_assign_type{device="eth0"} 2 # HELP node_network_net_dev_group Network device property: net_dev_group # TYPE node_network_net_dev_group gauge node_network_net_dev_group{device="bond0"} 0 node_network_net_dev_group{device="eth0"} 0 # HELP node_network_protocol_type Network device property: protocol_type # TYPE node_network_protocol_type gauge node_network_protocol_type{device="bond0"} 1 node_network_protocol_type{device="eth0"} 1 # HELP node_network_receive_bytes_total Network device statistic receive_bytes. # TYPE node_network_receive_bytes_total counter # HELP node_network_receive_compressed_total Network device statistic receive_compressed. # TYPE node_network_receive_compressed_total counter node_network_receive_compressed_total{device="lo"} 0 # HELP node_network_receive_drop_total Network device statistic receive_drop. # TYPE node_network_receive_drop_total counter node_network_receive_drop_total{device="lo"} 0 # HELP node_network_receive_errs_total Network device statistic receive_errs. # TYPE node_network_receive_errs_total counter node_network_receive_errs_total{device="lo"} 0 # HELP node_network_receive_fifo_total Network device statistic receive_fifo. # TYPE node_network_receive_fifo_total counter node_network_receive_fifo_total{device="lo"} 0 # HELP node_network_receive_frame_total Network device statistic receive_frame. # TYPE node_network_receive_frame_total counter node_network_receive_frame_total{device="lo"} 0 # HELP node_network_receive_multicast_total Network device statistic receive_multicast. # TYPE node_network_receive_multicast_total counter node_network_receive_multicast_total{device="lo"} 0 # HELP node_network_receive_nohandler_total Network device statistic receive_nohandler. # TYPE node_network_receive_nohandler_total counter node_network_receive_nohandler_total{device="lo"} 0 # HELP node_network_receive_packets_total Network device statistic receive_packets. # TYPE node_network_receive_packets_total counter # HELP node_network_speed_bytes Network device property: speed_bytes # TYPE node_network_speed_bytes gauge node_network_speed_bytes{device="eth0"} 1.25e+08 # HELP node_network_transmit_bytes_total Network device statistic transmit_bytes. # TYPE node_network_transmit_bytes_total counter # HELP node_network_transmit_carrier_total Network device statistic transmit_carrier. # TYPE node_network_transmit_carrier_total counter node_network_transmit_carrier_total{device="lo"} 0 # HELP node_network_transmit_colls_total Network device statistic transmit_colls. # TYPE node_network_transmit_colls_total counter node_network_transmit_colls_total{device="lo"} 0 # HELP node_network_transmit_compressed_total Network device statistic transmit_compressed. # TYPE node_network_transmit_compressed_total counter node_network_transmit_compressed_total{device="lo"} 0 # HELP node_network_transmit_drop_total Network device statistic transmit_drop. # TYPE node_network_transmit_drop_total counter node_network_transmit_drop_total{device="lo"} 0 # HELP node_network_transmit_errs_total Network device statistic transmit_errs. # TYPE node_network_transmit_errs_total counter node_network_transmit_errs_total{device="lo"} 0 # HELP node_network_transmit_fifo_total Network device statistic transmit_fifo. # TYPE node_network_transmit_fifo_total counter node_network_transmit_fifo_total{device="lo"} 0 # HELP node_network_transmit_packets_total Network device statistic transmit_packets. # TYPE node_network_transmit_packets_total counter # HELP node_network_transmit_queue_length Network device property: transmit_queue_length # TYPE node_network_transmit_queue_length gauge node_network_transmit_queue_length{device="bond0"} 1000 node_network_transmit_queue_length{device="eth0"} 1000 # HELP node_network_up Value is 1 if operstate is 'up', 0 otherwise. # TYPE node_network_up gauge node_network_up{device="bond0"} 1 node_network_up{device="eth0"} 1 # HELP node_nf_conntrack_entries Number of currently allocated flow entries for connection tracking. # TYPE node_nf_conntrack_entries gauge node_nf_conntrack_entries 123 # HELP node_nf_conntrack_entries_limit Maximum size of connection tracking table. # TYPE node_nf_conntrack_entries_limit gauge node_nf_conntrack_entries_limit 65536 # HELP node_nf_conntrack_stat_drop Number of packets dropped due to conntrack failure. # TYPE node_nf_conntrack_stat_drop gauge node_nf_conntrack_stat_drop 0 # HELP node_nf_conntrack_stat_early_drop Number of dropped conntrack entries to make room for new ones, if maximum table size was reached. # TYPE node_nf_conntrack_stat_early_drop gauge node_nf_conntrack_stat_early_drop 0 # HELP node_nf_conntrack_stat_found Number of searched entries which were successful. # TYPE node_nf_conntrack_stat_found gauge node_nf_conntrack_stat_found 0 # HELP node_nf_conntrack_stat_ignore Number of packets seen which are already connected to a conntrack entry. # TYPE node_nf_conntrack_stat_ignore gauge node_nf_conntrack_stat_ignore 89738 # HELP node_nf_conntrack_stat_insert Number of entries inserted into the list. # TYPE node_nf_conntrack_stat_insert gauge node_nf_conntrack_stat_insert 0 # HELP node_nf_conntrack_stat_insert_failed Number of entries for which list insertion was attempted but failed. # TYPE node_nf_conntrack_stat_insert_failed gauge node_nf_conntrack_stat_insert_failed 0 # HELP node_nf_conntrack_stat_invalid Number of packets seen which can not be tracked. # TYPE node_nf_conntrack_stat_invalid gauge node_nf_conntrack_stat_invalid 53 # HELP node_nf_conntrack_stat_search_restart Number of conntrack table lookups which had to be restarted due to hashtable resizes. # TYPE node_nf_conntrack_stat_search_restart gauge node_nf_conntrack_stat_search_restart 7 # HELP node_nfs_connections_total Total number of NFSd TCP connections. # TYPE node_nfs_connections_total counter node_nfs_connections_total 45 # HELP node_nfs_packets_total Total NFSd network packets (sent+received) by protocol type. # TYPE node_nfs_packets_total counter node_nfs_packets_total{protocol="tcp"} 69 node_nfs_packets_total{protocol="udp"} 70 # HELP node_nfs_requests_total Number of NFS procedures invoked. # TYPE node_nfs_requests_total counter node_nfs_requests_total{method="Access",proto="3"} 1.17661341e+08 node_nfs_requests_total{method="Access",proto="4"} 58 node_nfs_requests_total{method="Allocate",proto="4"} 0 node_nfs_requests_total{method="BindConnToSession",proto="4"} 0 node_nfs_requests_total{method="Clone",proto="4"} 0 node_nfs_requests_total{method="Close",proto="4"} 28 node_nfs_requests_total{method="Commit",proto="3"} 23729 node_nfs_requests_total{method="Commit",proto="4"} 83 node_nfs_requests_total{method="Create",proto="2"} 52 node_nfs_requests_total{method="Create",proto="3"} 2.993289e+06 node_nfs_requests_total{method="Create",proto="4"} 15 node_nfs_requests_total{method="CreateSession",proto="4"} 32 node_nfs_requests_total{method="DeAllocate",proto="4"} 0 node_nfs_requests_total{method="DelegReturn",proto="4"} 97 node_nfs_requests_total{method="DestroyClientID",proto="4"} 0 node_nfs_requests_total{method="DestroySession",proto="4"} 67 node_nfs_requests_total{method="ExchangeID",proto="4"} 58 node_nfs_requests_total{method="FreeStateID",proto="4"} 0 node_nfs_requests_total{method="FsInfo",proto="3"} 2 node_nfs_requests_total{method="FsInfo",proto="4"} 68 node_nfs_requests_total{method="FsLocations",proto="4"} 32 node_nfs_requests_total{method="FsStat",proto="2"} 82 node_nfs_requests_total{method="FsStat",proto="3"} 13332 node_nfs_requests_total{method="FsidPresent",proto="4"} 11 node_nfs_requests_total{method="GetACL",proto="4"} 36 node_nfs_requests_total{method="GetAttr",proto="2"} 57 node_nfs_requests_total{method="GetAttr",proto="3"} 1.061909262e+09 node_nfs_requests_total{method="GetDeviceInfo",proto="4"} 1 node_nfs_requests_total{method="GetDeviceList",proto="4"} 0 node_nfs_requests_total{method="GetLeaseTime",proto="4"} 28 node_nfs_requests_total{method="Getattr",proto="4"} 88 node_nfs_requests_total{method="LayoutCommit",proto="4"} 26 node_nfs_requests_total{method="LayoutGet",proto="4"} 90 node_nfs_requests_total{method="LayoutReturn",proto="4"} 0 node_nfs_requests_total{method="LayoutStats",proto="4"} 0 node_nfs_requests_total{method="Link",proto="2"} 17 node_nfs_requests_total{method="Link",proto="3"} 0 node_nfs_requests_total{method="Link",proto="4"} 21 node_nfs_requests_total{method="Lock",proto="4"} 39 node_nfs_requests_total{method="Lockt",proto="4"} 68 node_nfs_requests_total{method="Locku",proto="4"} 59 node_nfs_requests_total{method="Lookup",proto="2"} 71 node_nfs_requests_total{method="Lookup",proto="3"} 4.077635e+06 node_nfs_requests_total{method="Lookup",proto="4"} 29 node_nfs_requests_total{method="LookupRoot",proto="4"} 74 node_nfs_requests_total{method="MkDir",proto="2"} 50 node_nfs_requests_total{method="MkDir",proto="3"} 590 node_nfs_requests_total{method="MkNod",proto="3"} 0 node_nfs_requests_total{method="Null",proto="2"} 16 node_nfs_requests_total{method="Null",proto="3"} 0 node_nfs_requests_total{method="Null",proto="4"} 98 node_nfs_requests_total{method="Open",proto="4"} 85 node_nfs_requests_total{method="OpenConfirm",proto="4"} 23 node_nfs_requests_total{method="OpenDowngrade",proto="4"} 1 node_nfs_requests_total{method="OpenNoattr",proto="4"} 24 node_nfs_requests_total{method="PathConf",proto="3"} 1 node_nfs_requests_total{method="Pathconf",proto="4"} 53 node_nfs_requests_total{method="Read",proto="2"} 45 node_nfs_requests_total{method="Read",proto="3"} 2.9391916e+07 node_nfs_requests_total{method="Read",proto="4"} 51 node_nfs_requests_total{method="ReadDir",proto="2"} 70 node_nfs_requests_total{method="ReadDir",proto="3"} 3983 node_nfs_requests_total{method="ReadDir",proto="4"} 66 node_nfs_requests_total{method="ReadDirPlus",proto="3"} 92385 node_nfs_requests_total{method="ReadLink",proto="2"} 73 node_nfs_requests_total{method="ReadLink",proto="3"} 5 node_nfs_requests_total{method="ReadLink",proto="4"} 54 node_nfs_requests_total{method="ReclaimComplete",proto="4"} 35 node_nfs_requests_total{method="ReleaseLockowner",proto="4"} 85 node_nfs_requests_total{method="Remove",proto="2"} 83 node_nfs_requests_total{method="Remove",proto="3"} 7815 node_nfs_requests_total{method="Remove",proto="4"} 69 node_nfs_requests_total{method="Rename",proto="2"} 61 node_nfs_requests_total{method="Rename",proto="3"} 1130 node_nfs_requests_total{method="Rename",proto="4"} 96 node_nfs_requests_total{method="Renew",proto="4"} 83 node_nfs_requests_total{method="RmDir",proto="2"} 23 node_nfs_requests_total{method="RmDir",proto="3"} 15 node_nfs_requests_total{method="Root",proto="2"} 52 node_nfs_requests_total{method="Secinfo",proto="4"} 81 node_nfs_requests_total{method="SecinfoNoName",proto="4"} 0 node_nfs_requests_total{method="Seek",proto="4"} 0 node_nfs_requests_total{method="Sequence",proto="4"} 13 node_nfs_requests_total{method="ServerCaps",proto="4"} 56 node_nfs_requests_total{method="SetACL",proto="4"} 49 node_nfs_requests_total{method="SetAttr",proto="2"} 74 node_nfs_requests_total{method="SetAttr",proto="3"} 48906 node_nfs_requests_total{method="SetClientID",proto="4"} 12 node_nfs_requests_total{method="SetClientIDConfirm",proto="4"} 84 node_nfs_requests_total{method="Setattr",proto="4"} 73 node_nfs_requests_total{method="StatFs",proto="4"} 86 node_nfs_requests_total{method="SymLink",proto="2"} 53 node_nfs_requests_total{method="SymLink",proto="3"} 0 node_nfs_requests_total{method="Symlink",proto="4"} 84 node_nfs_requests_total{method="TestStateID",proto="4"} 0 node_nfs_requests_total{method="WrCache",proto="2"} 86 node_nfs_requests_total{method="Write",proto="2"} 0 node_nfs_requests_total{method="Write",proto="3"} 2.570425e+06 node_nfs_requests_total{method="Write",proto="4"} 54 # HELP node_nfs_rpc_authentication_refreshes_total Number of RPC authentication refreshes performed. # TYPE node_nfs_rpc_authentication_refreshes_total counter node_nfs_rpc_authentication_refreshes_total 1.218815394e+09 # HELP node_nfs_rpc_retransmissions_total Number of RPC transmissions performed. # TYPE node_nfs_rpc_retransmissions_total counter node_nfs_rpc_retransmissions_total 374636 # HELP node_nfs_rpcs_total Total number of RPCs performed. # TYPE node_nfs_rpcs_total counter node_nfs_rpcs_total 1.218785755e+09 # HELP node_nfsd_connections_total Total number of NFSd TCP connections. # TYPE node_nfsd_connections_total counter node_nfsd_connections_total 1 # HELP node_nfsd_disk_bytes_read_total Total NFSd bytes read. # TYPE node_nfsd_disk_bytes_read_total counter node_nfsd_disk_bytes_read_total 1.572864e+08 # HELP node_nfsd_disk_bytes_written_total Total NFSd bytes written. # TYPE node_nfsd_disk_bytes_written_total counter node_nfsd_disk_bytes_written_total 72864 # HELP node_nfsd_file_handles_stale_total Total number of NFSd stale file handles # TYPE node_nfsd_file_handles_stale_total counter node_nfsd_file_handles_stale_total 0 # HELP node_nfsd_packets_total Total NFSd network packets (sent+received) by protocol type. # TYPE node_nfsd_packets_total counter node_nfsd_packets_total{proto="tcp"} 917 node_nfsd_packets_total{proto="udp"} 55 # HELP node_nfsd_read_ahead_cache_not_found_total Total number of NFSd read ahead cache not found. # TYPE node_nfsd_read_ahead_cache_not_found_total counter node_nfsd_read_ahead_cache_not_found_total 0 # HELP node_nfsd_read_ahead_cache_size_blocks How large the read ahead cache is in blocks. # TYPE node_nfsd_read_ahead_cache_size_blocks gauge node_nfsd_read_ahead_cache_size_blocks 32 # HELP node_nfsd_reply_cache_hits_total Total number of NFSd Reply Cache hits (client lost server response). # TYPE node_nfsd_reply_cache_hits_total counter node_nfsd_reply_cache_hits_total 0 # HELP node_nfsd_reply_cache_misses_total Total number of NFSd Reply Cache an operation that requires caching (idempotent). # TYPE node_nfsd_reply_cache_misses_total counter node_nfsd_reply_cache_misses_total 6 # HELP node_nfsd_reply_cache_nocache_total Total number of NFSd Reply Cache non-idempotent operations (rename/delete/…). # TYPE node_nfsd_reply_cache_nocache_total counter node_nfsd_reply_cache_nocache_total 18622 # HELP node_nfsd_requests_total Total number NFSd Requests by method and protocol. # TYPE node_nfsd_requests_total counter node_nfsd_requests_total{method="Access",proto="3"} 111 node_nfsd_requests_total{method="Access",proto="4"} 1098 node_nfsd_requests_total{method="Close",proto="4"} 2 node_nfsd_requests_total{method="Commit",proto="3"} 0 node_nfsd_requests_total{method="Commit",proto="4"} 0 node_nfsd_requests_total{method="Create",proto="2"} 0 node_nfsd_requests_total{method="Create",proto="3"} 0 node_nfsd_requests_total{method="Create",proto="4"} 0 node_nfsd_requests_total{method="DelegPurge",proto="4"} 0 node_nfsd_requests_total{method="DelegReturn",proto="4"} 0 node_nfsd_requests_total{method="FsInfo",proto="3"} 2 node_nfsd_requests_total{method="FsStat",proto="2"} 2 node_nfsd_requests_total{method="FsStat",proto="3"} 0 node_nfsd_requests_total{method="GetAttr",proto="2"} 69 node_nfsd_requests_total{method="GetAttr",proto="3"} 112 node_nfsd_requests_total{method="GetAttr",proto="4"} 8179 node_nfsd_requests_total{method="GetFH",proto="4"} 5896 node_nfsd_requests_total{method="Link",proto="2"} 0 node_nfsd_requests_total{method="Link",proto="3"} 0 node_nfsd_requests_total{method="Link",proto="4"} 0 node_nfsd_requests_total{method="Lock",proto="4"} 0 node_nfsd_requests_total{method="Lockt",proto="4"} 0 node_nfsd_requests_total{method="Locku",proto="4"} 0 node_nfsd_requests_total{method="Lookup",proto="2"} 4410 node_nfsd_requests_total{method="Lookup",proto="3"} 2719 node_nfsd_requests_total{method="Lookup",proto="4"} 5900 node_nfsd_requests_total{method="LookupRoot",proto="4"} 0 node_nfsd_requests_total{method="MkDir",proto="2"} 0 node_nfsd_requests_total{method="MkDir",proto="3"} 0 node_nfsd_requests_total{method="MkNod",proto="3"} 0 node_nfsd_requests_total{method="Nverify",proto="4"} 0 node_nfsd_requests_total{method="Open",proto="4"} 2 node_nfsd_requests_total{method="OpenAttr",proto="4"} 0 node_nfsd_requests_total{method="OpenConfirm",proto="4"} 2 node_nfsd_requests_total{method="OpenDgrd",proto="4"} 0 node_nfsd_requests_total{method="PathConf",proto="3"} 1 node_nfsd_requests_total{method="PutFH",proto="4"} 9609 node_nfsd_requests_total{method="Read",proto="2"} 0 node_nfsd_requests_total{method="Read",proto="3"} 0 node_nfsd_requests_total{method="Read",proto="4"} 150 node_nfsd_requests_total{method="ReadDir",proto="2"} 99 node_nfsd_requests_total{method="ReadDir",proto="3"} 27 node_nfsd_requests_total{method="ReadDir",proto="4"} 1272 node_nfsd_requests_total{method="ReadDirPlus",proto="3"} 216 node_nfsd_requests_total{method="ReadLink",proto="2"} 0 node_nfsd_requests_total{method="ReadLink",proto="3"} 0 node_nfsd_requests_total{method="ReadLink",proto="4"} 0 node_nfsd_requests_total{method="RelLockOwner",proto="4"} 0 node_nfsd_requests_total{method="Remove",proto="2"} 0 node_nfsd_requests_total{method="Remove",proto="3"} 0 node_nfsd_requests_total{method="Remove",proto="4"} 0 node_nfsd_requests_total{method="Rename",proto="2"} 0 node_nfsd_requests_total{method="Rename",proto="3"} 0 node_nfsd_requests_total{method="Rename",proto="4"} 0 node_nfsd_requests_total{method="Renew",proto="4"} 1236 node_nfsd_requests_total{method="RestoreFH",proto="4"} 0 node_nfsd_requests_total{method="RmDir",proto="2"} 0 node_nfsd_requests_total{method="RmDir",proto="3"} 0 node_nfsd_requests_total{method="Root",proto="2"} 0 node_nfsd_requests_total{method="SaveFH",proto="4"} 0 node_nfsd_requests_total{method="SecInfo",proto="4"} 0 node_nfsd_requests_total{method="SetAttr",proto="2"} 0 node_nfsd_requests_total{method="SetAttr",proto="3"} 0 node_nfsd_requests_total{method="SetAttr",proto="4"} 0 node_nfsd_requests_total{method="SetClientID",proto="4"} 3 node_nfsd_requests_total{method="SetClientIDConfirm",proto="4"} 3 node_nfsd_requests_total{method="SymLink",proto="2"} 0 node_nfsd_requests_total{method="SymLink",proto="3"} 0 node_nfsd_requests_total{method="Verify",proto="4"} 0 node_nfsd_requests_total{method="WrCache",proto="2"} 0 node_nfsd_requests_total{method="Write",proto="2"} 0 node_nfsd_requests_total{method="Write",proto="3"} 0 node_nfsd_requests_total{method="Write",proto="4"} 0 # HELP node_nfsd_rpc_errors_total Total number of NFSd RPC errors by error type. # TYPE node_nfsd_rpc_errors_total counter node_nfsd_rpc_errors_total{error="auth"} 2 node_nfsd_rpc_errors_total{error="cInt"} 0 node_nfsd_rpc_errors_total{error="fmt"} 1 # HELP node_nfsd_server_rpcs_total Total number of NFSd RPCs. # TYPE node_nfsd_server_rpcs_total counter node_nfsd_server_rpcs_total 18628 # HELP node_nfsd_server_threads Total number of NFSd kernel threads that are running. # TYPE node_nfsd_server_threads gauge node_nfsd_server_threads 8 # HELP node_nvme_info Non-numeric data from /sys/class/nvme/, value is always 1. # TYPE node_nvme_info gauge node_nvme_info{device="nvme0",firmware_revision="1B2QEXP7",model="Samsung SSD 970 PRO 512GB",serial="S680HF8N190894I",state="live"} 1 # HELP node_os_info A metric with a constant '1' value labeled by build_id, id, id_like, image_id, image_version, name, pretty_name, variant, variant_id, version, version_codename, version_id. # TYPE node_os_info gauge node_os_info{build_id="",id="ubuntu",id_like="debian",image_id="",image_version="",name="Ubuntu",pretty_name="Ubuntu 20.04.2 LTS",variant="",variant_id="",version="20.04.2 LTS (Focal Fossa)",version_codename="focal",version_id="20.04"} 1 # HELP node_os_version Metric containing the major.minor part of the OS version. # TYPE node_os_version gauge node_os_version{id="ubuntu",id_like="debian",name="Ubuntu"} 20.04 # HELP node_power_supply_capacity capacity value of /sys/class/power_supply/. # TYPE node_power_supply_capacity gauge node_power_supply_capacity{power_supply="BAT0"} 81 # HELP node_power_supply_cyclecount cyclecount value of /sys/class/power_supply/. # TYPE node_power_supply_cyclecount gauge node_power_supply_cyclecount{power_supply="BAT0"} 0 # HELP node_power_supply_energy_full energy_full value of /sys/class/power_supply/. # TYPE node_power_supply_energy_full gauge node_power_supply_energy_full{power_supply="BAT0"} 45.07 # HELP node_power_supply_energy_full_design energy_full_design value of /sys/class/power_supply/. # TYPE node_power_supply_energy_full_design gauge node_power_supply_energy_full_design{power_supply="BAT0"} 47.52 # HELP node_power_supply_energy_watthour energy_watthour value of /sys/class/power_supply/. # TYPE node_power_supply_energy_watthour gauge node_power_supply_energy_watthour{power_supply="BAT0"} 36.58 # HELP node_power_supply_info info of /sys/class/power_supply/. # TYPE node_power_supply_info gauge node_power_supply_info{power_supply="AC",type="Mains"} 1 node_power_supply_info{capacity_level="Normal",manufacturer="LGC",model_name="LNV-45N1��",power_supply="BAT0",serial_number="38109",status="Discharging",technology="Li-ion",type="Battery"} 1 # HELP node_power_supply_online online value of /sys/class/power_supply/. # TYPE node_power_supply_online gauge node_power_supply_online{power_supply="AC"} 0 # HELP node_power_supply_power_watt power_watt value of /sys/class/power_supply/. # TYPE node_power_supply_power_watt gauge node_power_supply_power_watt{power_supply="BAT0"} 5.002 # HELP node_power_supply_present present value of /sys/class/power_supply/. # TYPE node_power_supply_present gauge node_power_supply_present{power_supply="BAT0"} 1 # HELP node_power_supply_voltage_min_design voltage_min_design value of /sys/class/power_supply/. # TYPE node_power_supply_voltage_min_design gauge node_power_supply_voltage_min_design{power_supply="BAT0"} 10.8 # HELP node_power_supply_voltage_volt voltage_volt value of /sys/class/power_supply/. # TYPE node_power_supply_voltage_volt gauge node_power_supply_voltage_volt{power_supply="BAT0"} 11.66 # HELP node_pressure_cpu_waiting_seconds_total Total time in seconds that processes have waited for CPU time # TYPE node_pressure_cpu_waiting_seconds_total counter node_pressure_cpu_waiting_seconds_total 14.036781000000001 # HELP node_pressure_io_stalled_seconds_total Total time in seconds no process could make progress due to IO congestion # TYPE node_pressure_io_stalled_seconds_total counter node_pressure_io_stalled_seconds_total 159.229614 # HELP node_pressure_io_waiting_seconds_total Total time in seconds that processes have waited due to IO congestion # TYPE node_pressure_io_waiting_seconds_total counter node_pressure_io_waiting_seconds_total 159.886802 # HELP node_pressure_memory_stalled_seconds_total Total time in seconds no process could make progress due to memory congestion # TYPE node_pressure_memory_stalled_seconds_total counter node_pressure_memory_stalled_seconds_total 0 # HELP node_pressure_memory_waiting_seconds_total Total time in seconds that processes have waited for memory # TYPE node_pressure_memory_waiting_seconds_total counter node_pressure_memory_waiting_seconds_total 0 # HELP node_processes_max_processes Number of max PIDs limit # TYPE node_processes_max_processes gauge node_processes_max_processes 123 # HELP node_processes_max_threads Limit of threads in the system # TYPE node_processes_max_threads gauge node_processes_max_threads 7801 # HELP node_processes_pids Number of PIDs # TYPE node_processes_pids gauge node_processes_pids 3 # HELP node_processes_state Number of processes in each state. # TYPE node_processes_state gauge node_processes_state{state="I"} 1 node_processes_state{state="S"} 2 # HELP node_processes_threads Allocated threads in system # TYPE node_processes_threads gauge node_processes_threads 3 # HELP node_procs_blocked Number of processes blocked waiting for I/O to complete. # TYPE node_procs_blocked gauge node_procs_blocked 0 # HELP node_procs_running Number of processes in runnable state. # TYPE node_procs_running gauge node_procs_running 2 # HELP node_qdisc_backlog Number of bytes currently in queue to be sent. # TYPE node_qdisc_backlog gauge node_qdisc_backlog{device="eth0",kind="pfifo_fast"} 0 node_qdisc_backlog{device="wlan0",kind="fq"} 0 # HELP node_qdisc_bytes_total Number of bytes sent. # TYPE node_qdisc_bytes_total counter node_qdisc_bytes_total{device="eth0",kind="pfifo_fast"} 83 node_qdisc_bytes_total{device="wlan0",kind="fq"} 42 # HELP node_qdisc_current_queue_length Number of packets currently in queue to be sent. # TYPE node_qdisc_current_queue_length gauge node_qdisc_current_queue_length{device="eth0",kind="pfifo_fast"} 0 node_qdisc_current_queue_length{device="wlan0",kind="fq"} 0 # HELP node_qdisc_drops_total Number of packets dropped. # TYPE node_qdisc_drops_total counter node_qdisc_drops_total{device="eth0",kind="pfifo_fast"} 0 node_qdisc_drops_total{device="wlan0",kind="fq"} 1 # HELP node_qdisc_overlimits_total Number of overlimit packets. # TYPE node_qdisc_overlimits_total counter node_qdisc_overlimits_total{device="eth0",kind="pfifo_fast"} 0 node_qdisc_overlimits_total{device="wlan0",kind="fq"} 0 # HELP node_qdisc_packets_total Number of packets sent. # TYPE node_qdisc_packets_total counter node_qdisc_packets_total{device="eth0",kind="pfifo_fast"} 83 node_qdisc_packets_total{device="wlan0",kind="fq"} 42 # HELP node_qdisc_requeues_total Number of packets dequeued, not transmitted, and requeued. # TYPE node_qdisc_requeues_total counter node_qdisc_requeues_total{device="eth0",kind="pfifo_fast"} 2 node_qdisc_requeues_total{device="wlan0",kind="fq"} 1 # HELP node_rapl_core_joules_total Current RAPL core value in joules # TYPE node_rapl_core_joules_total counter node_rapl_core_joules_total{index="0",path="collector/fixtures/sys/class/powercap/intel-rapl:0:0"} 118821.284256 # HELP node_rapl_package_joules_total Current RAPL package value in joules # TYPE node_rapl_package_joules_total counter node_rapl_package_joules_total{index="0",path="collector/fixtures/sys/class/powercap/intel-rapl:0"} 240422.366267 # HELP node_schedstat_running_seconds_total Number of seconds CPU spent running a process. # TYPE node_schedstat_running_seconds_total counter node_schedstat_running_seconds_total{cpu="0"} 2.045936778163039e+06 node_schedstat_running_seconds_total{cpu="1"} 1.904686152592476e+06 # HELP node_schedstat_timeslices_total Number of timeslices executed by CPU. # TYPE node_schedstat_timeslices_total counter node_schedstat_timeslices_total{cpu="0"} 4.767485306e+09 node_schedstat_timeslices_total{cpu="1"} 5.145567945e+09 # HELP node_schedstat_waiting_seconds_total Number of seconds spent by processing waiting for this CPU. # TYPE node_schedstat_waiting_seconds_total counter node_schedstat_waiting_seconds_total{cpu="0"} 343796.328169361 node_schedstat_waiting_seconds_total{cpu="1"} 364107.263788241 # HELP node_scrape_collector_duration_seconds node_exporter: Duration of a collector scrape. # TYPE node_scrape_collector_duration_seconds gauge # HELP node_scrape_collector_success node_exporter: Whether a collector succeeded. # TYPE node_scrape_collector_success gauge node_scrape_collector_success{collector="arp"} 1 node_scrape_collector_success{collector="bcache"} 1 node_scrape_collector_success{collector="bonding"} 1 node_scrape_collector_success{collector="btrfs"} 1 node_scrape_collector_success{collector="buddyinfo"} 1 node_scrape_collector_success{collector="cgroups"} 1 node_scrape_collector_success{collector="conntrack"} 1 node_scrape_collector_success{collector="cpu"} 1 node_scrape_collector_success{collector="cpu_vulnerabilities"} 1 node_scrape_collector_success{collector="cpufreq"} 1 node_scrape_collector_success{collector="diskstats"} 1 node_scrape_collector_success{collector="dmi"} 1 node_scrape_collector_success{collector="drbd"} 1 node_scrape_collector_success{collector="edac"} 1 node_scrape_collector_success{collector="entropy"} 1 node_scrape_collector_success{collector="fibrechannel"} 1 node_scrape_collector_success{collector="filefd"} 1 node_scrape_collector_success{collector="hwmon"} 1 node_scrape_collector_success{collector="infiniband"} 1 node_scrape_collector_success{collector="interrupts"} 1 node_scrape_collector_success{collector="ipvs"} 1 node_scrape_collector_success{collector="ksmd"} 1 node_scrape_collector_success{collector="lnstat"} 1 node_scrape_collector_success{collector="loadavg"} 1 node_scrape_collector_success{collector="mdadm"} 1 node_scrape_collector_success{collector="meminfo"} 1 node_scrape_collector_success{collector="meminfo_numa"} 1 node_scrape_collector_success{collector="mountstats"} 1 node_scrape_collector_success{collector="netclass"} 1 node_scrape_collector_success{collector="netdev"} 1 node_scrape_collector_success{collector="netstat"} 1 node_scrape_collector_success{collector="nfs"} 1 node_scrape_collector_success{collector="nfsd"} 1 node_scrape_collector_success{collector="nvme"} 1 node_scrape_collector_success{collector="os"} 1 node_scrape_collector_success{collector="powersupplyclass"} 1 node_scrape_collector_success{collector="pressure"} 1 node_scrape_collector_success{collector="processes"} 1 node_scrape_collector_success{collector="qdisc"} 1 node_scrape_collector_success{collector="rapl"} 1 node_scrape_collector_success{collector="schedstat"} 1 node_scrape_collector_success{collector="selinux"} 1 node_scrape_collector_success{collector="slabinfo"} 1 node_scrape_collector_success{collector="sockstat"} 1 node_scrape_collector_success{collector="softirqs"} 1 node_scrape_collector_success{collector="softnet"} 1 node_scrape_collector_success{collector="stat"} 1 node_scrape_collector_success{collector="sysctl"} 1 node_scrape_collector_success{collector="tapestats"} 1 node_scrape_collector_success{collector="textfile"} 1 node_scrape_collector_success{collector="thermal_zone"} 1 node_scrape_collector_success{collector="time"} 1 node_scrape_collector_success{collector="udp_queues"} 1 node_scrape_collector_success{collector="vmstat"} 1 node_scrape_collector_success{collector="wifi"} 1 node_scrape_collector_success{collector="xfs"} 1 node_scrape_collector_success{collector="zfs"} 1 node_scrape_collector_success{collector="zoneinfo"} 1 # HELP node_selinux_enabled SELinux is enabled, 1 is true, 0 is false # TYPE node_selinux_enabled gauge node_selinux_enabled 0 # HELP node_slabinfo_active_objects The number of objects that are currently active (i.e., in use). # TYPE node_slabinfo_active_objects gauge node_slabinfo_active_objects{slab="dmaengine-unmap-128"} 1206 node_slabinfo_active_objects{slab="kmalloc-8192"} 132 node_slabinfo_active_objects{slab="kmem_cache"} 320 node_slabinfo_active_objects{slab="tw_sock_TCP"} 704 # HELP node_slabinfo_object_size_bytes The size of objects in this slab, in bytes. # TYPE node_slabinfo_object_size_bytes gauge node_slabinfo_object_size_bytes{slab="dmaengine-unmap-128"} 1088 node_slabinfo_object_size_bytes{slab="kmalloc-8192"} 8192 node_slabinfo_object_size_bytes{slab="kmem_cache"} 256 node_slabinfo_object_size_bytes{slab="tw_sock_TCP"} 256 # HELP node_slabinfo_objects The total number of allocated objects (i.e., objects that are both in use and not in use). # TYPE node_slabinfo_objects gauge node_slabinfo_objects{slab="dmaengine-unmap-128"} 1320 node_slabinfo_objects{slab="kmalloc-8192"} 148 node_slabinfo_objects{slab="kmem_cache"} 320 node_slabinfo_objects{slab="tw_sock_TCP"} 864 # HELP node_slabinfo_objects_per_slab The number of objects stored in each slab. # TYPE node_slabinfo_objects_per_slab gauge node_slabinfo_objects_per_slab{slab="dmaengine-unmap-128"} 30 node_slabinfo_objects_per_slab{slab="kmalloc-8192"} 4 node_slabinfo_objects_per_slab{slab="kmem_cache"} 32 node_slabinfo_objects_per_slab{slab="tw_sock_TCP"} 32 # HELP node_slabinfo_pages_per_slab The number of pages allocated for each slab. # TYPE node_slabinfo_pages_per_slab gauge node_slabinfo_pages_per_slab{slab="dmaengine-unmap-128"} 8 node_slabinfo_pages_per_slab{slab="kmalloc-8192"} 8 node_slabinfo_pages_per_slab{slab="kmem_cache"} 2 node_slabinfo_pages_per_slab{slab="tw_sock_TCP"} 2 # HELP node_sockstat_FRAG6_inuse Number of FRAG6 sockets in state inuse. # TYPE node_sockstat_FRAG6_inuse gauge node_sockstat_FRAG6_inuse 0 # HELP node_sockstat_FRAG6_memory Number of FRAG6 sockets in state memory. # TYPE node_sockstat_FRAG6_memory gauge node_sockstat_FRAG6_memory 0 # HELP node_sockstat_FRAG_inuse Number of FRAG sockets in state inuse. # TYPE node_sockstat_FRAG_inuse gauge node_sockstat_FRAG_inuse 0 # HELP node_sockstat_FRAG_memory Number of FRAG sockets in state memory. # TYPE node_sockstat_FRAG_memory gauge node_sockstat_FRAG_memory 0 # HELP node_sockstat_RAW6_inuse Number of RAW6 sockets in state inuse. # TYPE node_sockstat_RAW6_inuse gauge node_sockstat_RAW6_inuse 1 # HELP node_sockstat_RAW_inuse Number of RAW sockets in state inuse. # TYPE node_sockstat_RAW_inuse gauge node_sockstat_RAW_inuse 0 # HELP node_sockstat_TCP6_inuse Number of TCP6 sockets in state inuse. # TYPE node_sockstat_TCP6_inuse gauge node_sockstat_TCP6_inuse 17 # HELP node_sockstat_TCP_alloc Number of TCP sockets in state alloc. # TYPE node_sockstat_TCP_alloc gauge node_sockstat_TCP_alloc 17 # HELP node_sockstat_TCP_inuse Number of TCP sockets in state inuse. # TYPE node_sockstat_TCP_inuse gauge node_sockstat_TCP_inuse 4 # HELP node_sockstat_TCP_mem Number of TCP sockets in state mem. # TYPE node_sockstat_TCP_mem gauge node_sockstat_TCP_mem 1 # HELP node_sockstat_TCP_mem_bytes Number of TCP sockets in state mem_bytes. # TYPE node_sockstat_TCP_mem_bytes gauge node_sockstat_TCP_mem_bytes 4096 # HELP node_sockstat_TCP_orphan Number of TCP sockets in state orphan. # TYPE node_sockstat_TCP_orphan gauge node_sockstat_TCP_orphan 0 # HELP node_sockstat_TCP_tw Number of TCP sockets in state tw. # TYPE node_sockstat_TCP_tw gauge node_sockstat_TCP_tw 4 # HELP node_sockstat_UDP6_inuse Number of UDP6 sockets in state inuse. # TYPE node_sockstat_UDP6_inuse gauge node_sockstat_UDP6_inuse 9 # HELP node_sockstat_UDPLITE6_inuse Number of UDPLITE6 sockets in state inuse. # TYPE node_sockstat_UDPLITE6_inuse gauge node_sockstat_UDPLITE6_inuse 0 # HELP node_sockstat_UDPLITE_inuse Number of UDPLITE sockets in state inuse. # TYPE node_sockstat_UDPLITE_inuse gauge node_sockstat_UDPLITE_inuse 0 # HELP node_sockstat_UDP_inuse Number of UDP sockets in state inuse. # TYPE node_sockstat_UDP_inuse gauge node_sockstat_UDP_inuse 0 # HELP node_sockstat_UDP_mem Number of UDP sockets in state mem. # TYPE node_sockstat_UDP_mem gauge node_sockstat_UDP_mem 0 # HELP node_sockstat_UDP_mem_bytes Number of UDP sockets in state mem_bytes. # TYPE node_sockstat_UDP_mem_bytes gauge node_sockstat_UDP_mem_bytes 0 # HELP node_sockstat_sockets_used Number of IPv4 sockets in use. # TYPE node_sockstat_sockets_used gauge node_sockstat_sockets_used 229 # HELP node_softirqs_functions_total Softirq counts per CPU. # TYPE node_softirqs_functions_total counter node_softirqs_functions_total{cpu="0",type="BLOCK"} 23776 node_softirqs_functions_total{cpu="0",type="HI"} 7 node_softirqs_functions_total{cpu="0",type="HRTIMER"} 40 node_softirqs_functions_total{cpu="0",type="IRQ_POLL"} 0 node_softirqs_functions_total{cpu="0",type="NET_RX"} 43066 node_softirqs_functions_total{cpu="0",type="NET_TX"} 2301 node_softirqs_functions_total{cpu="0",type="RCU"} 155929 node_softirqs_functions_total{cpu="0",type="SCHED"} 378895 node_softirqs_functions_total{cpu="0",type="TASKLET"} 372 node_softirqs_functions_total{cpu="0",type="TIMER"} 424191 node_softirqs_functions_total{cpu="1",type="BLOCK"} 24115 node_softirqs_functions_total{cpu="1",type="HI"} 1 node_softirqs_functions_total{cpu="1",type="HRTIMER"} 346 node_softirqs_functions_total{cpu="1",type="IRQ_POLL"} 0 node_softirqs_functions_total{cpu="1",type="NET_RX"} 104508 node_softirqs_functions_total{cpu="1",type="NET_TX"} 2430 node_softirqs_functions_total{cpu="1",type="RCU"} 146631 node_softirqs_functions_total{cpu="1",type="SCHED"} 152852 node_softirqs_functions_total{cpu="1",type="TASKLET"} 1899 node_softirqs_functions_total{cpu="1",type="TIMER"} 108342 # HELP node_softirqs_total Number of softirq calls. # TYPE node_softirqs_total counter node_softirqs_total{vector="block"} 186066 node_softirqs_total{vector="block_iopoll"} 0 node_softirqs_total{vector="hi"} 250191 node_softirqs_total{vector="hrtimer"} 12499 node_softirqs_total{vector="net_rx"} 211099 node_softirqs_total{vector="net_tx"} 1647 node_softirqs_total{vector="rcu"} 508444 node_softirqs_total{vector="sched"} 622196 node_softirqs_total{vector="tasklet"} 1.783454e+06 node_softirqs_total{vector="timer"} 1.481983e+06 # HELP node_softnet_backlog_len Softnet backlog status # TYPE node_softnet_backlog_len gauge node_softnet_backlog_len{cpu="0"} 0 node_softnet_backlog_len{cpu="1"} 0 node_softnet_backlog_len{cpu="2"} 0 node_softnet_backlog_len{cpu="3"} 0 # HELP node_softnet_cpu_collision_total Number of collision occur while obtaining device lock while transmitting # TYPE node_softnet_cpu_collision_total counter node_softnet_cpu_collision_total{cpu="0"} 0 node_softnet_cpu_collision_total{cpu="1"} 0 node_softnet_cpu_collision_total{cpu="2"} 0 node_softnet_cpu_collision_total{cpu="3"} 0 # HELP node_softnet_dropped_total Number of dropped packets # TYPE node_softnet_dropped_total counter node_softnet_dropped_total{cpu="0"} 0 node_softnet_dropped_total{cpu="1"} 41 node_softnet_dropped_total{cpu="2"} 0 node_softnet_dropped_total{cpu="3"} 0 # HELP node_softnet_flow_limit_count_total Number of times flow limit has been reached # TYPE node_softnet_flow_limit_count_total counter node_softnet_flow_limit_count_total{cpu="0"} 0 node_softnet_flow_limit_count_total{cpu="1"} 0 node_softnet_flow_limit_count_total{cpu="2"} 0 node_softnet_flow_limit_count_total{cpu="3"} 0 # HELP node_softnet_processed_total Number of processed packets # TYPE node_softnet_processed_total counter node_softnet_processed_total{cpu="0"} 299641 node_softnet_processed_total{cpu="1"} 916354 node_softnet_processed_total{cpu="2"} 5.577791e+06 node_softnet_processed_total{cpu="3"} 3.113785e+06 # HELP node_softnet_received_rps_total Number of times cpu woken up received_rps # TYPE node_softnet_received_rps_total counter node_softnet_received_rps_total{cpu="0"} 0 node_softnet_received_rps_total{cpu="1"} 0 node_softnet_received_rps_total{cpu="2"} 0 node_softnet_received_rps_total{cpu="3"} 0 # HELP node_softnet_times_squeezed_total Number of times processing packets ran out of quota # TYPE node_softnet_times_squeezed_total counter node_softnet_times_squeezed_total{cpu="0"} 1 node_softnet_times_squeezed_total{cpu="1"} 10 node_softnet_times_squeezed_total{cpu="2"} 85 node_softnet_times_squeezed_total{cpu="3"} 50 # HELP node_sysctl_fs_file_nr sysctl fs.file-nr # TYPE node_sysctl_fs_file_nr untyped node_sysctl_fs_file_nr{index="0"} 1024 node_sysctl_fs_file_nr{index="1"} 0 node_sysctl_fs_file_nr{index="2"} 1.631329e+06 # HELP node_sysctl_fs_file_nr_current sysctl fs.file-nr, field 1 # TYPE node_sysctl_fs_file_nr_current untyped node_sysctl_fs_file_nr_current 0 # HELP node_sysctl_fs_file_nr_max sysctl fs.file-nr, field 2 # TYPE node_sysctl_fs_file_nr_max untyped node_sysctl_fs_file_nr_max 1.631329e+06 # HELP node_sysctl_fs_file_nr_total sysctl fs.file-nr, field 0 # TYPE node_sysctl_fs_file_nr_total untyped node_sysctl_fs_file_nr_total 1024 # HELP node_sysctl_info sysctl info # TYPE node_sysctl_info gauge node_sysctl_info{index="0",name="kernel.seccomp.actions_avail",value="kill_process"} 1 node_sysctl_info{index="1",name="kernel.seccomp.actions_avail",value="kill_thread"} 1 node_sysctl_info{index="2",name="kernel.seccomp.actions_avail",value="trap"} 1 node_sysctl_info{index="3",name="kernel.seccomp.actions_avail",value="errno"} 1 node_sysctl_info{index="4",name="kernel.seccomp.actions_avail",value="user_notif"} 1 node_sysctl_info{index="5",name="kernel.seccomp.actions_avail",value="trace"} 1 node_sysctl_info{index="6",name="kernel.seccomp.actions_avail",value="log"} 1 node_sysctl_info{index="7",name="kernel.seccomp.actions_avail",value="allow"} 1 # HELP node_sysctl_kernel_threads_max sysctl kernel.threads-max # TYPE node_sysctl_kernel_threads_max untyped node_sysctl_kernel_threads_max 7801 # HELP node_tape_io_now The number of I/Os currently outstanding to this device. # TYPE node_tape_io_now gauge node_tape_io_now{device="st0"} 1 # HELP node_tape_io_others_total The number of I/Os issued to the tape drive other than read or write commands. The time taken to complete these commands uses the following calculation io_time_seconds_total-read_time_seconds_total-write_time_seconds_total # TYPE node_tape_io_others_total counter node_tape_io_others_total{device="st0"} 1409 # HELP node_tape_io_time_seconds_total The amount of time spent waiting for all I/O to complete (including read and write). This includes tape movement commands such as seeking between file or set marks and implicit tape movement such as when rewind on close tape devices are used. # TYPE node_tape_io_time_seconds_total counter node_tape_io_time_seconds_total{device="st0"} 9247.01108772 # HELP node_tape_read_bytes_total The number of bytes read from the tape drive. # TYPE node_tape_read_bytes_total counter node_tape_read_bytes_total{device="st0"} 9.79383912e+08 # HELP node_tape_read_time_seconds_total The amount of time spent waiting for read requests to complete. # TYPE node_tape_read_time_seconds_total counter node_tape_read_time_seconds_total{device="st0"} 33.788355744 # HELP node_tape_reads_completed_total The number of read requests issued to the tape drive. # TYPE node_tape_reads_completed_total counter node_tape_reads_completed_total{device="st0"} 3741 # HELP node_tape_residual_total The number of times during a read or write we found the residual amount to be non-zero. This should mean that a program is issuing a read larger thean the block size on tape. For write not all data made it to tape. # TYPE node_tape_residual_total counter node_tape_residual_total{device="st0"} 19 # HELP node_tape_write_time_seconds_total The amount of time spent waiting for write requests to complete. # TYPE node_tape_write_time_seconds_total counter node_tape_write_time_seconds_total{device="st0"} 5233.597394395 # HELP node_tape_writes_completed_total The number of write requests issued to the tape drive. # TYPE node_tape_writes_completed_total counter node_tape_writes_completed_total{device="st0"} 5.3772916e+07 # HELP node_tape_written_bytes_total The number of bytes written to the tape drive. # TYPE node_tape_written_bytes_total counter node_tape_written_bytes_total{device="st0"} 1.496246784e+12 # HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. # TYPE node_textfile_mtime_seconds gauge # HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise # TYPE node_textfile_scrape_error gauge node_textfile_scrape_error 0 # HELP node_thermal_zone_temp Zone temperature in Celsius # TYPE node_thermal_zone_temp gauge node_thermal_zone_temp{type="cpu-thermal",zone="0"} 12.376 # HELP node_time_clocksource_available_info Available clocksources read from '/sys/devices/system/clocksource'. # TYPE node_time_clocksource_available_info gauge node_time_clocksource_available_info{clocksource="acpi_pm",device="0"} 1 node_time_clocksource_available_info{clocksource="hpet",device="0"} 1 node_time_clocksource_available_info{clocksource="tsc",device="0"} 1 # HELP node_time_clocksource_current_info Current clocksource read from '/sys/devices/system/clocksource'. # TYPE node_time_clocksource_current_info gauge node_time_clocksource_current_info{clocksource="tsc",device="0"} 1 # HELP node_time_seconds System time in seconds since epoch (1970). # TYPE node_time_seconds gauge # HELP node_time_zone_offset_seconds System time zone offset in seconds. # TYPE node_time_zone_offset_seconds gauge # HELP node_udp_queues Number of allocated memory in the kernel for UDP datagrams in bytes. # TYPE node_udp_queues gauge node_udp_queues{ip="v4",queue="rx"} 0 node_udp_queues{ip="v4",queue="tx"} 21 # HELP node_vmstat_oom_kill /proc/vmstat information field oom_kill. # TYPE node_vmstat_oom_kill untyped node_vmstat_oom_kill 0 # HELP node_vmstat_pgfault /proc/vmstat information field pgfault. # TYPE node_vmstat_pgfault untyped node_vmstat_pgfault 2.320168809e+09 # HELP node_vmstat_pgmajfault /proc/vmstat information field pgmajfault. # TYPE node_vmstat_pgmajfault untyped node_vmstat_pgmajfault 507162 # HELP node_vmstat_pgpgin /proc/vmstat information field pgpgin. # TYPE node_vmstat_pgpgin untyped node_vmstat_pgpgin 7.344136e+06 # HELP node_vmstat_pgpgout /proc/vmstat information field pgpgout. # TYPE node_vmstat_pgpgout untyped node_vmstat_pgpgout 1.541180581e+09 # HELP node_vmstat_pswpin /proc/vmstat information field pswpin. # TYPE node_vmstat_pswpin untyped node_vmstat_pswpin 1476 # HELP node_vmstat_pswpout /proc/vmstat information field pswpout. # TYPE node_vmstat_pswpout untyped node_vmstat_pswpout 35045 # HELP node_wifi_interface_frequency_hertz The current frequency a WiFi interface is operating at, in hertz. # TYPE node_wifi_interface_frequency_hertz gauge node_wifi_interface_frequency_hertz{device="wlan0"} 2.412e+09 node_wifi_interface_frequency_hertz{device="wlan1"} 2.412e+09 # HELP node_wifi_station_beacon_loss_total The total number of times a station has detected a beacon loss. # TYPE node_wifi_station_beacon_loss_total counter node_wifi_station_beacon_loss_total{device="wlan0",mac_address="01:02:03:04:05:06"} 2 node_wifi_station_beacon_loss_total{device="wlan0",mac_address="aa:bb:cc:dd:ee:ff"} 1 # HELP node_wifi_station_connected_seconds_total The total number of seconds a station has been connected to an access point. # TYPE node_wifi_station_connected_seconds_total counter node_wifi_station_connected_seconds_total{device="wlan0",mac_address="01:02:03:04:05:06"} 60 node_wifi_station_connected_seconds_total{device="wlan0",mac_address="aa:bb:cc:dd:ee:ff"} 30 # HELP node_wifi_station_inactive_seconds The number of seconds since any wireless activity has occurred on a station. # TYPE node_wifi_station_inactive_seconds gauge node_wifi_station_inactive_seconds{device="wlan0",mac_address="01:02:03:04:05:06"} 0.8 node_wifi_station_inactive_seconds{device="wlan0",mac_address="aa:bb:cc:dd:ee:ff"} 0.4 # HELP node_wifi_station_info Labeled WiFi interface station information as provided by the operating system. # TYPE node_wifi_station_info gauge node_wifi_station_info{bssid="00:11:22:33:44:55",device="wlan0",mode="client",ssid="Example"} 1 # HELP node_wifi_station_receive_bits_per_second The current WiFi receive bitrate of a station, in bits per second. # TYPE node_wifi_station_receive_bits_per_second gauge node_wifi_station_receive_bits_per_second{device="wlan0",mac_address="01:02:03:04:05:06"} 2.56e+08 node_wifi_station_receive_bits_per_second{device="wlan0",mac_address="aa:bb:cc:dd:ee:ff"} 1.28e+08 # HELP node_wifi_station_receive_bytes_total The total number of bytes received by a WiFi station. # TYPE node_wifi_station_receive_bytes_total counter node_wifi_station_receive_bytes_total{device="wlan0",mac_address="01:02:03:04:05:06"} 0 node_wifi_station_receive_bytes_total{device="wlan0",mac_address="aa:bb:cc:dd:ee:ff"} 0 # HELP node_wifi_station_signal_dbm The current WiFi signal strength, in decibel-milliwatts (dBm). # TYPE node_wifi_station_signal_dbm gauge node_wifi_station_signal_dbm{device="wlan0",mac_address="01:02:03:04:05:06"} -26 node_wifi_station_signal_dbm{device="wlan0",mac_address="aa:bb:cc:dd:ee:ff"} -52 # HELP node_wifi_station_transmit_bits_per_second The current WiFi transmit bitrate of a station, in bits per second. # TYPE node_wifi_station_transmit_bits_per_second gauge node_wifi_station_transmit_bits_per_second{device="wlan0",mac_address="01:02:03:04:05:06"} 3.28e+08 node_wifi_station_transmit_bits_per_second{device="wlan0",mac_address="aa:bb:cc:dd:ee:ff"} 1.64e+08 # HELP node_wifi_station_transmit_bytes_total The total number of bytes transmitted by a WiFi station. # TYPE node_wifi_station_transmit_bytes_total counter node_wifi_station_transmit_bytes_total{device="wlan0",mac_address="01:02:03:04:05:06"} 0 node_wifi_station_transmit_bytes_total{device="wlan0",mac_address="aa:bb:cc:dd:ee:ff"} 0 # HELP node_wifi_station_transmit_failed_total The total number of times a station has failed to send a packet. # TYPE node_wifi_station_transmit_failed_total counter node_wifi_station_transmit_failed_total{device="wlan0",mac_address="01:02:03:04:05:06"} 4 node_wifi_station_transmit_failed_total{device="wlan0",mac_address="aa:bb:cc:dd:ee:ff"} 2 # HELP node_wifi_station_transmit_retries_total The total number of times a station has had to retry while sending a packet. # TYPE node_wifi_station_transmit_retries_total counter node_wifi_station_transmit_retries_total{device="wlan0",mac_address="01:02:03:04:05:06"} 20 node_wifi_station_transmit_retries_total{device="wlan0",mac_address="aa:bb:cc:dd:ee:ff"} 10 # HELP node_xfs_allocation_btree_compares_total Number of allocation B-tree compares for a filesystem. # TYPE node_xfs_allocation_btree_compares_total counter node_xfs_allocation_btree_compares_total{device="sda1"} 0 # HELP node_xfs_allocation_btree_lookups_total Number of allocation B-tree lookups for a filesystem. # TYPE node_xfs_allocation_btree_lookups_total counter node_xfs_allocation_btree_lookups_total{device="sda1"} 0 # HELP node_xfs_allocation_btree_records_deleted_total Number of allocation B-tree records deleted for a filesystem. # TYPE node_xfs_allocation_btree_records_deleted_total counter node_xfs_allocation_btree_records_deleted_total{device="sda1"} 0 # HELP node_xfs_allocation_btree_records_inserted_total Number of allocation B-tree records inserted for a filesystem. # TYPE node_xfs_allocation_btree_records_inserted_total counter node_xfs_allocation_btree_records_inserted_total{device="sda1"} 0 # HELP node_xfs_block_map_btree_compares_total Number of block map B-tree compares for a filesystem. # TYPE node_xfs_block_map_btree_compares_total counter node_xfs_block_map_btree_compares_total{device="sda1"} 0 # HELP node_xfs_block_map_btree_lookups_total Number of block map B-tree lookups for a filesystem. # TYPE node_xfs_block_map_btree_lookups_total counter node_xfs_block_map_btree_lookups_total{device="sda1"} 0 # HELP node_xfs_block_map_btree_records_deleted_total Number of block map B-tree records deleted for a filesystem. # TYPE node_xfs_block_map_btree_records_deleted_total counter node_xfs_block_map_btree_records_deleted_total{device="sda1"} 0 # HELP node_xfs_block_map_btree_records_inserted_total Number of block map B-tree records inserted for a filesystem. # TYPE node_xfs_block_map_btree_records_inserted_total counter node_xfs_block_map_btree_records_inserted_total{device="sda1"} 0 # HELP node_xfs_block_mapping_extent_list_compares_total Number of extent list compares for a filesystem. # TYPE node_xfs_block_mapping_extent_list_compares_total counter node_xfs_block_mapping_extent_list_compares_total{device="sda1"} 0 # HELP node_xfs_block_mapping_extent_list_deletions_total Number of extent list deletions for a filesystem. # TYPE node_xfs_block_mapping_extent_list_deletions_total counter node_xfs_block_mapping_extent_list_deletions_total{device="sda1"} 1 # HELP node_xfs_block_mapping_extent_list_insertions_total Number of extent list insertions for a filesystem. # TYPE node_xfs_block_mapping_extent_list_insertions_total counter node_xfs_block_mapping_extent_list_insertions_total{device="sda1"} 1 # HELP node_xfs_block_mapping_extent_list_lookups_total Number of extent list lookups for a filesystem. # TYPE node_xfs_block_mapping_extent_list_lookups_total counter node_xfs_block_mapping_extent_list_lookups_total{device="sda1"} 91 # HELP node_xfs_block_mapping_reads_total Number of block map for read operations for a filesystem. # TYPE node_xfs_block_mapping_reads_total counter node_xfs_block_mapping_reads_total{device="sda1"} 61 # HELP node_xfs_block_mapping_unmaps_total Number of block unmaps (deletes) for a filesystem. # TYPE node_xfs_block_mapping_unmaps_total counter node_xfs_block_mapping_unmaps_total{device="sda1"} 1 # HELP node_xfs_block_mapping_writes_total Number of block map for write operations for a filesystem. # TYPE node_xfs_block_mapping_writes_total counter node_xfs_block_mapping_writes_total{device="sda1"} 29 # HELP node_xfs_directory_operation_create_total Number of times a new directory entry was created for a filesystem. # TYPE node_xfs_directory_operation_create_total counter node_xfs_directory_operation_create_total{device="sda1"} 2 # HELP node_xfs_directory_operation_getdents_total Number of times the directory getdents operation was performed for a filesystem. # TYPE node_xfs_directory_operation_getdents_total counter node_xfs_directory_operation_getdents_total{device="sda1"} 52 # HELP node_xfs_directory_operation_lookup_total Number of file name directory lookups which miss the operating systems directory name lookup cache. # TYPE node_xfs_directory_operation_lookup_total counter node_xfs_directory_operation_lookup_total{device="sda1"} 3 # HELP node_xfs_directory_operation_remove_total Number of times an existing directory entry was created for a filesystem. # TYPE node_xfs_directory_operation_remove_total counter node_xfs_directory_operation_remove_total{device="sda1"} 1 # HELP node_xfs_extent_allocation_blocks_allocated_total Number of blocks allocated for a filesystem. # TYPE node_xfs_extent_allocation_blocks_allocated_total counter node_xfs_extent_allocation_blocks_allocated_total{device="sda1"} 872 # HELP node_xfs_extent_allocation_blocks_freed_total Number of blocks freed for a filesystem. # TYPE node_xfs_extent_allocation_blocks_freed_total counter node_xfs_extent_allocation_blocks_freed_total{device="sda1"} 0 # HELP node_xfs_extent_allocation_extents_allocated_total Number of extents allocated for a filesystem. # TYPE node_xfs_extent_allocation_extents_allocated_total counter node_xfs_extent_allocation_extents_allocated_total{device="sda1"} 1 # HELP node_xfs_extent_allocation_extents_freed_total Number of extents freed for a filesystem. # TYPE node_xfs_extent_allocation_extents_freed_total counter node_xfs_extent_allocation_extents_freed_total{device="sda1"} 0 # HELP node_xfs_inode_operation_attempts_total Number of times the OS looked for an XFS inode in the inode cache. # TYPE node_xfs_inode_operation_attempts_total counter node_xfs_inode_operation_attempts_total{device="sda1"} 5 # HELP node_xfs_inode_operation_attribute_changes_total Number of times the OS explicitly changed the attributes of an XFS inode. # TYPE node_xfs_inode_operation_attribute_changes_total counter node_xfs_inode_operation_attribute_changes_total{device="sda1"} 1 # HELP node_xfs_inode_operation_duplicates_total Number of times the OS tried to add a missing XFS inode to the inode cache, but found it had already been added by another process. # TYPE node_xfs_inode_operation_duplicates_total counter node_xfs_inode_operation_duplicates_total{device="sda1"} 0 # HELP node_xfs_inode_operation_found_total Number of times the OS looked for and found an XFS inode in the inode cache. # TYPE node_xfs_inode_operation_found_total counter node_xfs_inode_operation_found_total{device="sda1"} 1 # HELP node_xfs_inode_operation_missed_total Number of times the OS looked for an XFS inode in the cache, but did not find it. # TYPE node_xfs_inode_operation_missed_total counter node_xfs_inode_operation_missed_total{device="sda1"} 4 # HELP node_xfs_inode_operation_reclaims_total Number of times the OS reclaimed an XFS inode from the inode cache to free memory for another purpose. # TYPE node_xfs_inode_operation_reclaims_total counter node_xfs_inode_operation_reclaims_total{device="sda1"} 0 # HELP node_xfs_inode_operation_recycled_total Number of times the OS found an XFS inode in the cache, but could not use it as it was being recycled. # TYPE node_xfs_inode_operation_recycled_total counter node_xfs_inode_operation_recycled_total{device="sda1"} 0 # HELP node_xfs_read_calls_total Number of read(2) system calls made to files in a filesystem. # TYPE node_xfs_read_calls_total counter node_xfs_read_calls_total{device="sda1"} 0 # HELP node_xfs_vnode_active_total Number of vnodes not on free lists for a filesystem. # TYPE node_xfs_vnode_active_total counter node_xfs_vnode_active_total{device="sda1"} 4 # HELP node_xfs_vnode_allocate_total Number of times vn_alloc called for a filesystem. # TYPE node_xfs_vnode_allocate_total counter node_xfs_vnode_allocate_total{device="sda1"} 0 # HELP node_xfs_vnode_get_total Number of times vn_get called for a filesystem. # TYPE node_xfs_vnode_get_total counter node_xfs_vnode_get_total{device="sda1"} 0 # HELP node_xfs_vnode_hold_total Number of times vn_hold called for a filesystem. # TYPE node_xfs_vnode_hold_total counter node_xfs_vnode_hold_total{device="sda1"} 0 # HELP node_xfs_vnode_reclaim_total Number of times vn_reclaim called for a filesystem. # TYPE node_xfs_vnode_reclaim_total counter node_xfs_vnode_reclaim_total{device="sda1"} 1 # HELP node_xfs_vnode_release_total Number of times vn_rele called for a filesystem. # TYPE node_xfs_vnode_release_total counter node_xfs_vnode_release_total{device="sda1"} 1 # HELP node_xfs_vnode_remove_total Number of times vn_remove called for a filesystem. # TYPE node_xfs_vnode_remove_total counter node_xfs_vnode_remove_total{device="sda1"} 1 # HELP node_xfs_write_calls_total Number of write(2) system calls made to files in a filesystem. # TYPE node_xfs_write_calls_total counter node_xfs_write_calls_total{device="sda1"} 28 # HELP node_zfs_abd_linear_cnt kstat.zfs.misc.abdstats.linear_cnt # TYPE node_zfs_abd_linear_cnt untyped node_zfs_abd_linear_cnt 62 # HELP node_zfs_abd_linear_data_size kstat.zfs.misc.abdstats.linear_data_size # TYPE node_zfs_abd_linear_data_size untyped node_zfs_abd_linear_data_size 223232 # HELP node_zfs_abd_scatter_chunk_waste kstat.zfs.misc.abdstats.scatter_chunk_waste # TYPE node_zfs_abd_scatter_chunk_waste untyped node_zfs_abd_scatter_chunk_waste 0 # HELP node_zfs_abd_scatter_cnt kstat.zfs.misc.abdstats.scatter_cnt # TYPE node_zfs_abd_scatter_cnt untyped node_zfs_abd_scatter_cnt 1 # HELP node_zfs_abd_scatter_data_size kstat.zfs.misc.abdstats.scatter_data_size # TYPE node_zfs_abd_scatter_data_size untyped node_zfs_abd_scatter_data_size 16384 # HELP node_zfs_abd_scatter_order_0 kstat.zfs.misc.abdstats.scatter_order_0 # TYPE node_zfs_abd_scatter_order_0 untyped node_zfs_abd_scatter_order_0 0 # HELP node_zfs_abd_scatter_order_1 kstat.zfs.misc.abdstats.scatter_order_1 # TYPE node_zfs_abd_scatter_order_1 untyped node_zfs_abd_scatter_order_1 0 # HELP node_zfs_abd_scatter_order_10 kstat.zfs.misc.abdstats.scatter_order_10 # TYPE node_zfs_abd_scatter_order_10 untyped node_zfs_abd_scatter_order_10 0 # HELP node_zfs_abd_scatter_order_2 kstat.zfs.misc.abdstats.scatter_order_2 # TYPE node_zfs_abd_scatter_order_2 untyped node_zfs_abd_scatter_order_2 1 # HELP node_zfs_abd_scatter_order_3 kstat.zfs.misc.abdstats.scatter_order_3 # TYPE node_zfs_abd_scatter_order_3 untyped node_zfs_abd_scatter_order_3 0 # HELP node_zfs_abd_scatter_order_4 kstat.zfs.misc.abdstats.scatter_order_4 # TYPE node_zfs_abd_scatter_order_4 untyped node_zfs_abd_scatter_order_4 0 # HELP node_zfs_abd_scatter_order_5 kstat.zfs.misc.abdstats.scatter_order_5 # TYPE node_zfs_abd_scatter_order_5 untyped node_zfs_abd_scatter_order_5 0 # HELP node_zfs_abd_scatter_order_6 kstat.zfs.misc.abdstats.scatter_order_6 # TYPE node_zfs_abd_scatter_order_6 untyped node_zfs_abd_scatter_order_6 0 # HELP node_zfs_abd_scatter_order_7 kstat.zfs.misc.abdstats.scatter_order_7 # TYPE node_zfs_abd_scatter_order_7 untyped node_zfs_abd_scatter_order_7 0 # HELP node_zfs_abd_scatter_order_8 kstat.zfs.misc.abdstats.scatter_order_8 # TYPE node_zfs_abd_scatter_order_8 untyped node_zfs_abd_scatter_order_8 0 # HELP node_zfs_abd_scatter_order_9 kstat.zfs.misc.abdstats.scatter_order_9 # TYPE node_zfs_abd_scatter_order_9 untyped node_zfs_abd_scatter_order_9 0 # HELP node_zfs_abd_scatter_page_alloc_retry kstat.zfs.misc.abdstats.scatter_page_alloc_retry # TYPE node_zfs_abd_scatter_page_alloc_retry untyped node_zfs_abd_scatter_page_alloc_retry 0 # HELP node_zfs_abd_scatter_page_multi_chunk kstat.zfs.misc.abdstats.scatter_page_multi_chunk # TYPE node_zfs_abd_scatter_page_multi_chunk untyped node_zfs_abd_scatter_page_multi_chunk 0 # HELP node_zfs_abd_scatter_page_multi_zone kstat.zfs.misc.abdstats.scatter_page_multi_zone # TYPE node_zfs_abd_scatter_page_multi_zone untyped node_zfs_abd_scatter_page_multi_zone 0 # HELP node_zfs_abd_scatter_sg_table_retry kstat.zfs.misc.abdstats.scatter_sg_table_retry # TYPE node_zfs_abd_scatter_sg_table_retry untyped node_zfs_abd_scatter_sg_table_retry 0 # HELP node_zfs_abd_struct_size kstat.zfs.misc.abdstats.struct_size # TYPE node_zfs_abd_struct_size untyped node_zfs_abd_struct_size 2520 # HELP node_zfs_arc_anon_evictable_data kstat.zfs.misc.arcstats.anon_evictable_data # TYPE node_zfs_arc_anon_evictable_data untyped node_zfs_arc_anon_evictable_data 0 # HELP node_zfs_arc_anon_evictable_metadata kstat.zfs.misc.arcstats.anon_evictable_metadata # TYPE node_zfs_arc_anon_evictable_metadata untyped node_zfs_arc_anon_evictable_metadata 0 # HELP node_zfs_arc_anon_size kstat.zfs.misc.arcstats.anon_size # TYPE node_zfs_arc_anon_size untyped node_zfs_arc_anon_size 1.91744e+06 # HELP node_zfs_arc_arc_loaned_bytes kstat.zfs.misc.arcstats.arc_loaned_bytes # TYPE node_zfs_arc_arc_loaned_bytes untyped node_zfs_arc_arc_loaned_bytes 0 # HELP node_zfs_arc_arc_meta_limit kstat.zfs.misc.arcstats.arc_meta_limit # TYPE node_zfs_arc_arc_meta_limit untyped node_zfs_arc_arc_meta_limit 6.275982336e+09 # HELP node_zfs_arc_arc_meta_max kstat.zfs.misc.arcstats.arc_meta_max # TYPE node_zfs_arc_arc_meta_max untyped node_zfs_arc_arc_meta_max 4.49286096e+08 # HELP node_zfs_arc_arc_meta_min kstat.zfs.misc.arcstats.arc_meta_min # TYPE node_zfs_arc_arc_meta_min untyped node_zfs_arc_arc_meta_min 1.6777216e+07 # HELP node_zfs_arc_arc_meta_used kstat.zfs.misc.arcstats.arc_meta_used # TYPE node_zfs_arc_arc_meta_used untyped node_zfs_arc_arc_meta_used 3.08103632e+08 # HELP node_zfs_arc_arc_need_free kstat.zfs.misc.arcstats.arc_need_free # TYPE node_zfs_arc_arc_need_free untyped node_zfs_arc_arc_need_free 0 # HELP node_zfs_arc_arc_no_grow kstat.zfs.misc.arcstats.arc_no_grow # TYPE node_zfs_arc_arc_no_grow untyped node_zfs_arc_arc_no_grow 0 # HELP node_zfs_arc_arc_prune kstat.zfs.misc.arcstats.arc_prune # TYPE node_zfs_arc_arc_prune untyped node_zfs_arc_arc_prune 0 # HELP node_zfs_arc_arc_sys_free kstat.zfs.misc.arcstats.arc_sys_free # TYPE node_zfs_arc_arc_sys_free untyped node_zfs_arc_arc_sys_free 2.61496832e+08 # HELP node_zfs_arc_arc_tempreserve kstat.zfs.misc.arcstats.arc_tempreserve # TYPE node_zfs_arc_arc_tempreserve untyped node_zfs_arc_arc_tempreserve 0 # HELP node_zfs_arc_c kstat.zfs.misc.arcstats.c # TYPE node_zfs_arc_c untyped node_zfs_arc_c 1.643208777e+09 # HELP node_zfs_arc_c_max kstat.zfs.misc.arcstats.c_max # TYPE node_zfs_arc_c_max untyped node_zfs_arc_c_max 8.367976448e+09 # HELP node_zfs_arc_c_min kstat.zfs.misc.arcstats.c_min # TYPE node_zfs_arc_c_min untyped node_zfs_arc_c_min 3.3554432e+07 # HELP node_zfs_arc_data_size kstat.zfs.misc.arcstats.data_size # TYPE node_zfs_arc_data_size untyped node_zfs_arc_data_size 1.29583616e+09 # HELP node_zfs_arc_deleted kstat.zfs.misc.arcstats.deleted # TYPE node_zfs_arc_deleted untyped node_zfs_arc_deleted 60403 # HELP node_zfs_arc_demand_data_hits kstat.zfs.misc.arcstats.demand_data_hits # TYPE node_zfs_arc_demand_data_hits untyped node_zfs_arc_demand_data_hits 7.221032e+06 # HELP node_zfs_arc_demand_data_misses kstat.zfs.misc.arcstats.demand_data_misses # TYPE node_zfs_arc_demand_data_misses untyped node_zfs_arc_demand_data_misses 73300 # HELP node_zfs_arc_demand_metadata_hits kstat.zfs.misc.arcstats.demand_metadata_hits # TYPE node_zfs_arc_demand_metadata_hits untyped node_zfs_arc_demand_metadata_hits 1.464353e+06 # HELP node_zfs_arc_demand_metadata_misses kstat.zfs.misc.arcstats.demand_metadata_misses # TYPE node_zfs_arc_demand_metadata_misses untyped node_zfs_arc_demand_metadata_misses 498170 # HELP node_zfs_arc_duplicate_buffers kstat.zfs.misc.arcstats.duplicate_buffers # TYPE node_zfs_arc_duplicate_buffers untyped node_zfs_arc_duplicate_buffers 0 # HELP node_zfs_arc_duplicate_buffers_size kstat.zfs.misc.arcstats.duplicate_buffers_size # TYPE node_zfs_arc_duplicate_buffers_size untyped node_zfs_arc_duplicate_buffers_size 0 # HELP node_zfs_arc_duplicate_reads kstat.zfs.misc.arcstats.duplicate_reads # TYPE node_zfs_arc_duplicate_reads untyped node_zfs_arc_duplicate_reads 0 # HELP node_zfs_arc_evict_l2_cached kstat.zfs.misc.arcstats.evict_l2_cached # TYPE node_zfs_arc_evict_l2_cached untyped node_zfs_arc_evict_l2_cached 0 # HELP node_zfs_arc_evict_l2_eligible kstat.zfs.misc.arcstats.evict_l2_eligible # TYPE node_zfs_arc_evict_l2_eligible untyped node_zfs_arc_evict_l2_eligible 8.99251456e+09 # HELP node_zfs_arc_evict_l2_ineligible kstat.zfs.misc.arcstats.evict_l2_ineligible # TYPE node_zfs_arc_evict_l2_ineligible untyped node_zfs_arc_evict_l2_ineligible 9.92552448e+08 # HELP node_zfs_arc_evict_l2_skip kstat.zfs.misc.arcstats.evict_l2_skip # TYPE node_zfs_arc_evict_l2_skip untyped node_zfs_arc_evict_l2_skip 0 # HELP node_zfs_arc_evict_not_enough kstat.zfs.misc.arcstats.evict_not_enough # TYPE node_zfs_arc_evict_not_enough untyped node_zfs_arc_evict_not_enough 680 # HELP node_zfs_arc_evict_skip kstat.zfs.misc.arcstats.evict_skip # TYPE node_zfs_arc_evict_skip untyped node_zfs_arc_evict_skip 2.265729e+06 # HELP node_zfs_arc_hash_chain_max kstat.zfs.misc.arcstats.hash_chain_max # TYPE node_zfs_arc_hash_chain_max untyped node_zfs_arc_hash_chain_max 3 # HELP node_zfs_arc_hash_chains kstat.zfs.misc.arcstats.hash_chains # TYPE node_zfs_arc_hash_chains untyped node_zfs_arc_hash_chains 412 # HELP node_zfs_arc_hash_collisions kstat.zfs.misc.arcstats.hash_collisions # TYPE node_zfs_arc_hash_collisions untyped node_zfs_arc_hash_collisions 50564 # HELP node_zfs_arc_hash_elements kstat.zfs.misc.arcstats.hash_elements # TYPE node_zfs_arc_hash_elements untyped node_zfs_arc_hash_elements 42359 # HELP node_zfs_arc_hash_elements_max kstat.zfs.misc.arcstats.hash_elements_max # TYPE node_zfs_arc_hash_elements_max untyped node_zfs_arc_hash_elements_max 88245 # HELP node_zfs_arc_hdr_size kstat.zfs.misc.arcstats.hdr_size # TYPE node_zfs_arc_hdr_size untyped node_zfs_arc_hdr_size 1.636108e+07 # HELP node_zfs_arc_hits kstat.zfs.misc.arcstats.hits # TYPE node_zfs_arc_hits untyped node_zfs_arc_hits 8.772612e+06 # HELP node_zfs_arc_l2_abort_lowmem kstat.zfs.misc.arcstats.l2_abort_lowmem # TYPE node_zfs_arc_l2_abort_lowmem untyped node_zfs_arc_l2_abort_lowmem 0 # HELP node_zfs_arc_l2_asize kstat.zfs.misc.arcstats.l2_asize # TYPE node_zfs_arc_l2_asize untyped node_zfs_arc_l2_asize 0 # HELP node_zfs_arc_l2_cdata_free_on_write kstat.zfs.misc.arcstats.l2_cdata_free_on_write # TYPE node_zfs_arc_l2_cdata_free_on_write untyped node_zfs_arc_l2_cdata_free_on_write 0 # HELP node_zfs_arc_l2_cksum_bad kstat.zfs.misc.arcstats.l2_cksum_bad # TYPE node_zfs_arc_l2_cksum_bad untyped node_zfs_arc_l2_cksum_bad 0 # HELP node_zfs_arc_l2_compress_failures kstat.zfs.misc.arcstats.l2_compress_failures # TYPE node_zfs_arc_l2_compress_failures untyped node_zfs_arc_l2_compress_failures 0 # HELP node_zfs_arc_l2_compress_successes kstat.zfs.misc.arcstats.l2_compress_successes # TYPE node_zfs_arc_l2_compress_successes untyped node_zfs_arc_l2_compress_successes 0 # HELP node_zfs_arc_l2_compress_zeros kstat.zfs.misc.arcstats.l2_compress_zeros # TYPE node_zfs_arc_l2_compress_zeros untyped node_zfs_arc_l2_compress_zeros 0 # HELP node_zfs_arc_l2_evict_l1cached kstat.zfs.misc.arcstats.l2_evict_l1cached # TYPE node_zfs_arc_l2_evict_l1cached untyped node_zfs_arc_l2_evict_l1cached 0 # HELP node_zfs_arc_l2_evict_lock_retry kstat.zfs.misc.arcstats.l2_evict_lock_retry # TYPE node_zfs_arc_l2_evict_lock_retry untyped node_zfs_arc_l2_evict_lock_retry 0 # HELP node_zfs_arc_l2_evict_reading kstat.zfs.misc.arcstats.l2_evict_reading # TYPE node_zfs_arc_l2_evict_reading untyped node_zfs_arc_l2_evict_reading 0 # HELP node_zfs_arc_l2_feeds kstat.zfs.misc.arcstats.l2_feeds # TYPE node_zfs_arc_l2_feeds untyped node_zfs_arc_l2_feeds 0 # HELP node_zfs_arc_l2_free_on_write kstat.zfs.misc.arcstats.l2_free_on_write # TYPE node_zfs_arc_l2_free_on_write untyped node_zfs_arc_l2_free_on_write 0 # HELP node_zfs_arc_l2_hdr_size kstat.zfs.misc.arcstats.l2_hdr_size # TYPE node_zfs_arc_l2_hdr_size untyped node_zfs_arc_l2_hdr_size 0 # HELP node_zfs_arc_l2_hits kstat.zfs.misc.arcstats.l2_hits # TYPE node_zfs_arc_l2_hits untyped node_zfs_arc_l2_hits 0 # HELP node_zfs_arc_l2_io_error kstat.zfs.misc.arcstats.l2_io_error # TYPE node_zfs_arc_l2_io_error untyped node_zfs_arc_l2_io_error 0 # HELP node_zfs_arc_l2_misses kstat.zfs.misc.arcstats.l2_misses # TYPE node_zfs_arc_l2_misses untyped node_zfs_arc_l2_misses 0 # HELP node_zfs_arc_l2_read_bytes kstat.zfs.misc.arcstats.l2_read_bytes # TYPE node_zfs_arc_l2_read_bytes untyped node_zfs_arc_l2_read_bytes 0 # HELP node_zfs_arc_l2_rw_clash kstat.zfs.misc.arcstats.l2_rw_clash # TYPE node_zfs_arc_l2_rw_clash untyped node_zfs_arc_l2_rw_clash 0 # HELP node_zfs_arc_l2_size kstat.zfs.misc.arcstats.l2_size # TYPE node_zfs_arc_l2_size untyped node_zfs_arc_l2_size 0 # HELP node_zfs_arc_l2_write_bytes kstat.zfs.misc.arcstats.l2_write_bytes # TYPE node_zfs_arc_l2_write_bytes untyped node_zfs_arc_l2_write_bytes 0 # HELP node_zfs_arc_l2_writes_done kstat.zfs.misc.arcstats.l2_writes_done # TYPE node_zfs_arc_l2_writes_done untyped node_zfs_arc_l2_writes_done 0 # HELP node_zfs_arc_l2_writes_error kstat.zfs.misc.arcstats.l2_writes_error # TYPE node_zfs_arc_l2_writes_error untyped node_zfs_arc_l2_writes_error 0 # HELP node_zfs_arc_l2_writes_lock_retry kstat.zfs.misc.arcstats.l2_writes_lock_retry # TYPE node_zfs_arc_l2_writes_lock_retry untyped node_zfs_arc_l2_writes_lock_retry 0 # HELP node_zfs_arc_l2_writes_sent kstat.zfs.misc.arcstats.l2_writes_sent # TYPE node_zfs_arc_l2_writes_sent untyped node_zfs_arc_l2_writes_sent 0 # HELP node_zfs_arc_memory_direct_count kstat.zfs.misc.arcstats.memory_direct_count # TYPE node_zfs_arc_memory_direct_count untyped node_zfs_arc_memory_direct_count 542 # HELP node_zfs_arc_memory_indirect_count kstat.zfs.misc.arcstats.memory_indirect_count # TYPE node_zfs_arc_memory_indirect_count untyped node_zfs_arc_memory_indirect_count 3006 # HELP node_zfs_arc_memory_throttle_count kstat.zfs.misc.arcstats.memory_throttle_count # TYPE node_zfs_arc_memory_throttle_count untyped node_zfs_arc_memory_throttle_count 0 # HELP node_zfs_arc_metadata_size kstat.zfs.misc.arcstats.metadata_size # TYPE node_zfs_arc_metadata_size untyped node_zfs_arc_metadata_size 1.7529856e+08 # HELP node_zfs_arc_mfu_evictable_data kstat.zfs.misc.arcstats.mfu_evictable_data # TYPE node_zfs_arc_mfu_evictable_data untyped node_zfs_arc_mfu_evictable_data 1.017613824e+09 # HELP node_zfs_arc_mfu_evictable_metadata kstat.zfs.misc.arcstats.mfu_evictable_metadata # TYPE node_zfs_arc_mfu_evictable_metadata untyped node_zfs_arc_mfu_evictable_metadata 9.163776e+06 # HELP node_zfs_arc_mfu_ghost_evictable_data kstat.zfs.misc.arcstats.mfu_ghost_evictable_data # TYPE node_zfs_arc_mfu_ghost_evictable_data untyped node_zfs_arc_mfu_ghost_evictable_data 9.6731136e+07 # HELP node_zfs_arc_mfu_ghost_evictable_metadata kstat.zfs.misc.arcstats.mfu_ghost_evictable_metadata # TYPE node_zfs_arc_mfu_ghost_evictable_metadata untyped node_zfs_arc_mfu_ghost_evictable_metadata 8.205312e+06 # HELP node_zfs_arc_mfu_ghost_hits kstat.zfs.misc.arcstats.mfu_ghost_hits # TYPE node_zfs_arc_mfu_ghost_hits untyped node_zfs_arc_mfu_ghost_hits 821 # HELP node_zfs_arc_mfu_ghost_size kstat.zfs.misc.arcstats.mfu_ghost_size # TYPE node_zfs_arc_mfu_ghost_size untyped node_zfs_arc_mfu_ghost_size 1.04936448e+08 # HELP node_zfs_arc_mfu_hits kstat.zfs.misc.arcstats.mfu_hits # TYPE node_zfs_arc_mfu_hits untyped node_zfs_arc_mfu_hits 7.829854e+06 # HELP node_zfs_arc_mfu_size kstat.zfs.misc.arcstats.mfu_size # TYPE node_zfs_arc_mfu_size untyped node_zfs_arc_mfu_size 1.066623488e+09 # HELP node_zfs_arc_misses kstat.zfs.misc.arcstats.misses # TYPE node_zfs_arc_misses untyped node_zfs_arc_misses 604635 # HELP node_zfs_arc_mru_evictable_data kstat.zfs.misc.arcstats.mru_evictable_data # TYPE node_zfs_arc_mru_evictable_data untyped node_zfs_arc_mru_evictable_data 2.78091264e+08 # HELP node_zfs_arc_mru_evictable_metadata kstat.zfs.misc.arcstats.mru_evictable_metadata # TYPE node_zfs_arc_mru_evictable_metadata untyped node_zfs_arc_mru_evictable_metadata 1.8606592e+07 # HELP node_zfs_arc_mru_ghost_evictable_data kstat.zfs.misc.arcstats.mru_ghost_evictable_data # TYPE node_zfs_arc_mru_ghost_evictable_data untyped node_zfs_arc_mru_ghost_evictable_data 8.83765248e+08 # HELP node_zfs_arc_mru_ghost_evictable_metadata kstat.zfs.misc.arcstats.mru_ghost_evictable_metadata # TYPE node_zfs_arc_mru_ghost_evictable_metadata untyped node_zfs_arc_mru_ghost_evictable_metadata 1.1596288e+08 # HELP node_zfs_arc_mru_ghost_hits kstat.zfs.misc.arcstats.mru_ghost_hits # TYPE node_zfs_arc_mru_ghost_hits untyped node_zfs_arc_mru_ghost_hits 21100 # HELP node_zfs_arc_mru_ghost_size kstat.zfs.misc.arcstats.mru_ghost_size # TYPE node_zfs_arc_mru_ghost_size untyped node_zfs_arc_mru_ghost_size 9.99728128e+08 # HELP node_zfs_arc_mru_hits kstat.zfs.misc.arcstats.mru_hits # TYPE node_zfs_arc_mru_hits untyped node_zfs_arc_mru_hits 855535 # HELP node_zfs_arc_mru_size kstat.zfs.misc.arcstats.mru_size # TYPE node_zfs_arc_mru_size untyped node_zfs_arc_mru_size 4.02593792e+08 # HELP node_zfs_arc_mutex_miss kstat.zfs.misc.arcstats.mutex_miss # TYPE node_zfs_arc_mutex_miss untyped node_zfs_arc_mutex_miss 2 # HELP node_zfs_arc_other_size kstat.zfs.misc.arcstats.other_size # TYPE node_zfs_arc_other_size untyped node_zfs_arc_other_size 1.16443992e+08 # HELP node_zfs_arc_p kstat.zfs.misc.arcstats.p # TYPE node_zfs_arc_p untyped node_zfs_arc_p 5.16395305e+08 # HELP node_zfs_arc_prefetch_data_hits kstat.zfs.misc.arcstats.prefetch_data_hits # TYPE node_zfs_arc_prefetch_data_hits untyped node_zfs_arc_prefetch_data_hits 3615 # HELP node_zfs_arc_prefetch_data_misses kstat.zfs.misc.arcstats.prefetch_data_misses # TYPE node_zfs_arc_prefetch_data_misses untyped node_zfs_arc_prefetch_data_misses 17094 # HELP node_zfs_arc_prefetch_metadata_hits kstat.zfs.misc.arcstats.prefetch_metadata_hits # TYPE node_zfs_arc_prefetch_metadata_hits untyped node_zfs_arc_prefetch_metadata_hits 83612 # HELP node_zfs_arc_prefetch_metadata_misses kstat.zfs.misc.arcstats.prefetch_metadata_misses # TYPE node_zfs_arc_prefetch_metadata_misses untyped node_zfs_arc_prefetch_metadata_misses 16071 # HELP node_zfs_arc_size kstat.zfs.misc.arcstats.size # TYPE node_zfs_arc_size untyped node_zfs_arc_size 1.603939792e+09 # HELP node_zfs_dbuf_dbuf_cache_count kstat.zfs.misc.dbufstats.dbuf_cache_count # TYPE node_zfs_dbuf_dbuf_cache_count untyped node_zfs_dbuf_dbuf_cache_count 27 # HELP node_zfs_dbuf_dbuf_cache_hiwater_bytes kstat.zfs.misc.dbufstats.dbuf_cache_hiwater_bytes # TYPE node_zfs_dbuf_dbuf_cache_hiwater_bytes untyped node_zfs_dbuf_dbuf_cache_hiwater_bytes 6.9117804e+07 # HELP node_zfs_dbuf_dbuf_cache_level_0 kstat.zfs.misc.dbufstats.dbuf_cache_level_0 # TYPE node_zfs_dbuf_dbuf_cache_level_0 untyped node_zfs_dbuf_dbuf_cache_level_0 27 # HELP node_zfs_dbuf_dbuf_cache_level_0_bytes kstat.zfs.misc.dbufstats.dbuf_cache_level_0_bytes # TYPE node_zfs_dbuf_dbuf_cache_level_0_bytes untyped node_zfs_dbuf_dbuf_cache_level_0_bytes 302080 # HELP node_zfs_dbuf_dbuf_cache_level_1 kstat.zfs.misc.dbufstats.dbuf_cache_level_1 # TYPE node_zfs_dbuf_dbuf_cache_level_1 untyped node_zfs_dbuf_dbuf_cache_level_1 0 # HELP node_zfs_dbuf_dbuf_cache_level_10 kstat.zfs.misc.dbufstats.dbuf_cache_level_10 # TYPE node_zfs_dbuf_dbuf_cache_level_10 untyped node_zfs_dbuf_dbuf_cache_level_10 0 # HELP node_zfs_dbuf_dbuf_cache_level_10_bytes kstat.zfs.misc.dbufstats.dbuf_cache_level_10_bytes # TYPE node_zfs_dbuf_dbuf_cache_level_10_bytes untyped node_zfs_dbuf_dbuf_cache_level_10_bytes 0 # HELP node_zfs_dbuf_dbuf_cache_level_11 kstat.zfs.misc.dbufstats.dbuf_cache_level_11 # TYPE node_zfs_dbuf_dbuf_cache_level_11 untyped node_zfs_dbuf_dbuf_cache_level_11 0 # HELP node_zfs_dbuf_dbuf_cache_level_11_bytes kstat.zfs.misc.dbufstats.dbuf_cache_level_11_bytes # TYPE node_zfs_dbuf_dbuf_cache_level_11_bytes untyped node_zfs_dbuf_dbuf_cache_level_11_bytes 0 # HELP node_zfs_dbuf_dbuf_cache_level_1_bytes kstat.zfs.misc.dbufstats.dbuf_cache_level_1_bytes # TYPE node_zfs_dbuf_dbuf_cache_level_1_bytes untyped node_zfs_dbuf_dbuf_cache_level_1_bytes 0 # HELP node_zfs_dbuf_dbuf_cache_level_2 kstat.zfs.misc.dbufstats.dbuf_cache_level_2 # TYPE node_zfs_dbuf_dbuf_cache_level_2 untyped node_zfs_dbuf_dbuf_cache_level_2 0 # HELP node_zfs_dbuf_dbuf_cache_level_2_bytes kstat.zfs.misc.dbufstats.dbuf_cache_level_2_bytes # TYPE node_zfs_dbuf_dbuf_cache_level_2_bytes untyped node_zfs_dbuf_dbuf_cache_level_2_bytes 0 # HELP node_zfs_dbuf_dbuf_cache_level_3 kstat.zfs.misc.dbufstats.dbuf_cache_level_3 # TYPE node_zfs_dbuf_dbuf_cache_level_3 untyped node_zfs_dbuf_dbuf_cache_level_3 0 # HELP node_zfs_dbuf_dbuf_cache_level_3_bytes kstat.zfs.misc.dbufstats.dbuf_cache_level_3_bytes # TYPE node_zfs_dbuf_dbuf_cache_level_3_bytes untyped node_zfs_dbuf_dbuf_cache_level_3_bytes 0 # HELP node_zfs_dbuf_dbuf_cache_level_4 kstat.zfs.misc.dbufstats.dbuf_cache_level_4 # TYPE node_zfs_dbuf_dbuf_cache_level_4 untyped node_zfs_dbuf_dbuf_cache_level_4 0 # HELP node_zfs_dbuf_dbuf_cache_level_4_bytes kstat.zfs.misc.dbufstats.dbuf_cache_level_4_bytes # TYPE node_zfs_dbuf_dbuf_cache_level_4_bytes untyped node_zfs_dbuf_dbuf_cache_level_4_bytes 0 # HELP node_zfs_dbuf_dbuf_cache_level_5 kstat.zfs.misc.dbufstats.dbuf_cache_level_5 # TYPE node_zfs_dbuf_dbuf_cache_level_5 untyped node_zfs_dbuf_dbuf_cache_level_5 0 # HELP node_zfs_dbuf_dbuf_cache_level_5_bytes kstat.zfs.misc.dbufstats.dbuf_cache_level_5_bytes # TYPE node_zfs_dbuf_dbuf_cache_level_5_bytes untyped node_zfs_dbuf_dbuf_cache_level_5_bytes 0 # HELP node_zfs_dbuf_dbuf_cache_level_6 kstat.zfs.misc.dbufstats.dbuf_cache_level_6 # TYPE node_zfs_dbuf_dbuf_cache_level_6 untyped node_zfs_dbuf_dbuf_cache_level_6 0 # HELP node_zfs_dbuf_dbuf_cache_level_6_bytes kstat.zfs.misc.dbufstats.dbuf_cache_level_6_bytes # TYPE node_zfs_dbuf_dbuf_cache_level_6_bytes untyped node_zfs_dbuf_dbuf_cache_level_6_bytes 0 # HELP node_zfs_dbuf_dbuf_cache_level_7 kstat.zfs.misc.dbufstats.dbuf_cache_level_7 # TYPE node_zfs_dbuf_dbuf_cache_level_7 untyped node_zfs_dbuf_dbuf_cache_level_7 0 # HELP node_zfs_dbuf_dbuf_cache_level_7_bytes kstat.zfs.misc.dbufstats.dbuf_cache_level_7_bytes # TYPE node_zfs_dbuf_dbuf_cache_level_7_bytes untyped node_zfs_dbuf_dbuf_cache_level_7_bytes 0 # HELP node_zfs_dbuf_dbuf_cache_level_8 kstat.zfs.misc.dbufstats.dbuf_cache_level_8 # TYPE node_zfs_dbuf_dbuf_cache_level_8 untyped node_zfs_dbuf_dbuf_cache_level_8 0 # HELP node_zfs_dbuf_dbuf_cache_level_8_bytes kstat.zfs.misc.dbufstats.dbuf_cache_level_8_bytes # TYPE node_zfs_dbuf_dbuf_cache_level_8_bytes untyped node_zfs_dbuf_dbuf_cache_level_8_bytes 0 # HELP node_zfs_dbuf_dbuf_cache_level_9 kstat.zfs.misc.dbufstats.dbuf_cache_level_9 # TYPE node_zfs_dbuf_dbuf_cache_level_9 untyped node_zfs_dbuf_dbuf_cache_level_9 0 # HELP node_zfs_dbuf_dbuf_cache_level_9_bytes kstat.zfs.misc.dbufstats.dbuf_cache_level_9_bytes # TYPE node_zfs_dbuf_dbuf_cache_level_9_bytes untyped node_zfs_dbuf_dbuf_cache_level_9_bytes 0 # HELP node_zfs_dbuf_dbuf_cache_lowater_bytes kstat.zfs.misc.dbufstats.dbuf_cache_lowater_bytes # TYPE node_zfs_dbuf_dbuf_cache_lowater_bytes untyped node_zfs_dbuf_dbuf_cache_lowater_bytes 5.6550932e+07 # HELP node_zfs_dbuf_dbuf_cache_max_bytes kstat.zfs.misc.dbufstats.dbuf_cache_max_bytes # TYPE node_zfs_dbuf_dbuf_cache_max_bytes untyped node_zfs_dbuf_dbuf_cache_max_bytes 6.2834368e+07 # HELP node_zfs_dbuf_dbuf_cache_size kstat.zfs.misc.dbufstats.dbuf_cache_size # TYPE node_zfs_dbuf_dbuf_cache_size untyped node_zfs_dbuf_dbuf_cache_size 302080 # HELP node_zfs_dbuf_dbuf_cache_size_max kstat.zfs.misc.dbufstats.dbuf_cache_size_max # TYPE node_zfs_dbuf_dbuf_cache_size_max untyped node_zfs_dbuf_dbuf_cache_size_max 394240 # HELP node_zfs_dbuf_dbuf_cache_total_evicts kstat.zfs.misc.dbufstats.dbuf_cache_total_evicts # TYPE node_zfs_dbuf_dbuf_cache_total_evicts untyped node_zfs_dbuf_dbuf_cache_total_evicts 0 # HELP node_zfs_dbuf_hash_chain_max kstat.zfs.misc.dbufstats.hash_chain_max # TYPE node_zfs_dbuf_hash_chain_max untyped node_zfs_dbuf_hash_chain_max 0 # HELP node_zfs_dbuf_hash_chains kstat.zfs.misc.dbufstats.hash_chains # TYPE node_zfs_dbuf_hash_chains untyped node_zfs_dbuf_hash_chains 0 # HELP node_zfs_dbuf_hash_collisions kstat.zfs.misc.dbufstats.hash_collisions # TYPE node_zfs_dbuf_hash_collisions untyped node_zfs_dbuf_hash_collisions 0 # HELP node_zfs_dbuf_hash_dbuf_level_0 kstat.zfs.misc.dbufstats.hash_dbuf_level_0 # TYPE node_zfs_dbuf_hash_dbuf_level_0 untyped node_zfs_dbuf_hash_dbuf_level_0 37 # HELP node_zfs_dbuf_hash_dbuf_level_0_bytes kstat.zfs.misc.dbufstats.hash_dbuf_level_0_bytes # TYPE node_zfs_dbuf_hash_dbuf_level_0_bytes untyped node_zfs_dbuf_hash_dbuf_level_0_bytes 465920 # HELP node_zfs_dbuf_hash_dbuf_level_1 kstat.zfs.misc.dbufstats.hash_dbuf_level_1 # TYPE node_zfs_dbuf_hash_dbuf_level_1 untyped node_zfs_dbuf_hash_dbuf_level_1 10 # HELP node_zfs_dbuf_hash_dbuf_level_10 kstat.zfs.misc.dbufstats.hash_dbuf_level_10 # TYPE node_zfs_dbuf_hash_dbuf_level_10 untyped node_zfs_dbuf_hash_dbuf_level_10 0 # HELP node_zfs_dbuf_hash_dbuf_level_10_bytes kstat.zfs.misc.dbufstats.hash_dbuf_level_10_bytes # TYPE node_zfs_dbuf_hash_dbuf_level_10_bytes untyped node_zfs_dbuf_hash_dbuf_level_10_bytes 0 # HELP node_zfs_dbuf_hash_dbuf_level_11 kstat.zfs.misc.dbufstats.hash_dbuf_level_11 # TYPE node_zfs_dbuf_hash_dbuf_level_11 untyped node_zfs_dbuf_hash_dbuf_level_11 0 # HELP node_zfs_dbuf_hash_dbuf_level_11_bytes kstat.zfs.misc.dbufstats.hash_dbuf_level_11_bytes # TYPE node_zfs_dbuf_hash_dbuf_level_11_bytes untyped node_zfs_dbuf_hash_dbuf_level_11_bytes 0 # HELP node_zfs_dbuf_hash_dbuf_level_1_bytes kstat.zfs.misc.dbufstats.hash_dbuf_level_1_bytes # TYPE node_zfs_dbuf_hash_dbuf_level_1_bytes untyped node_zfs_dbuf_hash_dbuf_level_1_bytes 1.31072e+06 # HELP node_zfs_dbuf_hash_dbuf_level_2 kstat.zfs.misc.dbufstats.hash_dbuf_level_2 # TYPE node_zfs_dbuf_hash_dbuf_level_2 untyped node_zfs_dbuf_hash_dbuf_level_2 2 # HELP node_zfs_dbuf_hash_dbuf_level_2_bytes kstat.zfs.misc.dbufstats.hash_dbuf_level_2_bytes # TYPE node_zfs_dbuf_hash_dbuf_level_2_bytes untyped node_zfs_dbuf_hash_dbuf_level_2_bytes 262144 # HELP node_zfs_dbuf_hash_dbuf_level_3 kstat.zfs.misc.dbufstats.hash_dbuf_level_3 # TYPE node_zfs_dbuf_hash_dbuf_level_3 untyped node_zfs_dbuf_hash_dbuf_level_3 2 # HELP node_zfs_dbuf_hash_dbuf_level_3_bytes kstat.zfs.misc.dbufstats.hash_dbuf_level_3_bytes # TYPE node_zfs_dbuf_hash_dbuf_level_3_bytes untyped node_zfs_dbuf_hash_dbuf_level_3_bytes 262144 # HELP node_zfs_dbuf_hash_dbuf_level_4 kstat.zfs.misc.dbufstats.hash_dbuf_level_4 # TYPE node_zfs_dbuf_hash_dbuf_level_4 untyped node_zfs_dbuf_hash_dbuf_level_4 2 # HELP node_zfs_dbuf_hash_dbuf_level_4_bytes kstat.zfs.misc.dbufstats.hash_dbuf_level_4_bytes # TYPE node_zfs_dbuf_hash_dbuf_level_4_bytes untyped node_zfs_dbuf_hash_dbuf_level_4_bytes 262144 # HELP node_zfs_dbuf_hash_dbuf_level_5 kstat.zfs.misc.dbufstats.hash_dbuf_level_5 # TYPE node_zfs_dbuf_hash_dbuf_level_5 untyped node_zfs_dbuf_hash_dbuf_level_5 2 # HELP node_zfs_dbuf_hash_dbuf_level_5_bytes kstat.zfs.misc.dbufstats.hash_dbuf_level_5_bytes # TYPE node_zfs_dbuf_hash_dbuf_level_5_bytes untyped node_zfs_dbuf_hash_dbuf_level_5_bytes 262144 # HELP node_zfs_dbuf_hash_dbuf_level_6 kstat.zfs.misc.dbufstats.hash_dbuf_level_6 # TYPE node_zfs_dbuf_hash_dbuf_level_6 untyped node_zfs_dbuf_hash_dbuf_level_6 0 # HELP node_zfs_dbuf_hash_dbuf_level_6_bytes kstat.zfs.misc.dbufstats.hash_dbuf_level_6_bytes # TYPE node_zfs_dbuf_hash_dbuf_level_6_bytes untyped node_zfs_dbuf_hash_dbuf_level_6_bytes 0 # HELP node_zfs_dbuf_hash_dbuf_level_7 kstat.zfs.misc.dbufstats.hash_dbuf_level_7 # TYPE node_zfs_dbuf_hash_dbuf_level_7 untyped node_zfs_dbuf_hash_dbuf_level_7 0 # HELP node_zfs_dbuf_hash_dbuf_level_7_bytes kstat.zfs.misc.dbufstats.hash_dbuf_level_7_bytes # TYPE node_zfs_dbuf_hash_dbuf_level_7_bytes untyped node_zfs_dbuf_hash_dbuf_level_7_bytes 0 # HELP node_zfs_dbuf_hash_dbuf_level_8 kstat.zfs.misc.dbufstats.hash_dbuf_level_8 # TYPE node_zfs_dbuf_hash_dbuf_level_8 untyped node_zfs_dbuf_hash_dbuf_level_8 0 # HELP node_zfs_dbuf_hash_dbuf_level_8_bytes kstat.zfs.misc.dbufstats.hash_dbuf_level_8_bytes # TYPE node_zfs_dbuf_hash_dbuf_level_8_bytes untyped node_zfs_dbuf_hash_dbuf_level_8_bytes 0 # HELP node_zfs_dbuf_hash_dbuf_level_9 kstat.zfs.misc.dbufstats.hash_dbuf_level_9 # TYPE node_zfs_dbuf_hash_dbuf_level_9 untyped node_zfs_dbuf_hash_dbuf_level_9 0 # HELP node_zfs_dbuf_hash_dbuf_level_9_bytes kstat.zfs.misc.dbufstats.hash_dbuf_level_9_bytes # TYPE node_zfs_dbuf_hash_dbuf_level_9_bytes untyped node_zfs_dbuf_hash_dbuf_level_9_bytes 0 # HELP node_zfs_dbuf_hash_elements kstat.zfs.misc.dbufstats.hash_elements # TYPE node_zfs_dbuf_hash_elements untyped node_zfs_dbuf_hash_elements 55 # HELP node_zfs_dbuf_hash_elements_max kstat.zfs.misc.dbufstats.hash_elements_max # TYPE node_zfs_dbuf_hash_elements_max untyped node_zfs_dbuf_hash_elements_max 55 # HELP node_zfs_dbuf_hash_hits kstat.zfs.misc.dbufstats.hash_hits # TYPE node_zfs_dbuf_hash_hits untyped node_zfs_dbuf_hash_hits 108807 # HELP node_zfs_dbuf_hash_insert_race kstat.zfs.misc.dbufstats.hash_insert_race # TYPE node_zfs_dbuf_hash_insert_race untyped node_zfs_dbuf_hash_insert_race 0 # HELP node_zfs_dbuf_hash_misses kstat.zfs.misc.dbufstats.hash_misses # TYPE node_zfs_dbuf_hash_misses untyped node_zfs_dbuf_hash_misses 1851 # HELP node_zfs_dmu_tx_dmu_tx_assigned kstat.zfs.misc.dmu_tx.dmu_tx_assigned # TYPE node_zfs_dmu_tx_dmu_tx_assigned untyped node_zfs_dmu_tx_dmu_tx_assigned 3.532844e+06 # HELP node_zfs_dmu_tx_dmu_tx_delay kstat.zfs.misc.dmu_tx.dmu_tx_delay # TYPE node_zfs_dmu_tx_dmu_tx_delay untyped node_zfs_dmu_tx_dmu_tx_delay 0 # HELP node_zfs_dmu_tx_dmu_tx_dirty_delay kstat.zfs.misc.dmu_tx.dmu_tx_dirty_delay # TYPE node_zfs_dmu_tx_dmu_tx_dirty_delay untyped node_zfs_dmu_tx_dmu_tx_dirty_delay 0 # HELP node_zfs_dmu_tx_dmu_tx_dirty_over_max kstat.zfs.misc.dmu_tx.dmu_tx_dirty_over_max # TYPE node_zfs_dmu_tx_dmu_tx_dirty_over_max untyped node_zfs_dmu_tx_dmu_tx_dirty_over_max 0 # HELP node_zfs_dmu_tx_dmu_tx_dirty_throttle kstat.zfs.misc.dmu_tx.dmu_tx_dirty_throttle # TYPE node_zfs_dmu_tx_dmu_tx_dirty_throttle untyped node_zfs_dmu_tx_dmu_tx_dirty_throttle 0 # HELP node_zfs_dmu_tx_dmu_tx_error kstat.zfs.misc.dmu_tx.dmu_tx_error # TYPE node_zfs_dmu_tx_dmu_tx_error untyped node_zfs_dmu_tx_dmu_tx_error 0 # HELP node_zfs_dmu_tx_dmu_tx_group kstat.zfs.misc.dmu_tx.dmu_tx_group # TYPE node_zfs_dmu_tx_dmu_tx_group untyped node_zfs_dmu_tx_dmu_tx_group 0 # HELP node_zfs_dmu_tx_dmu_tx_memory_reclaim kstat.zfs.misc.dmu_tx.dmu_tx_memory_reclaim # TYPE node_zfs_dmu_tx_dmu_tx_memory_reclaim untyped node_zfs_dmu_tx_dmu_tx_memory_reclaim 0 # HELP node_zfs_dmu_tx_dmu_tx_memory_reserve kstat.zfs.misc.dmu_tx.dmu_tx_memory_reserve # TYPE node_zfs_dmu_tx_dmu_tx_memory_reserve untyped node_zfs_dmu_tx_dmu_tx_memory_reserve 0 # HELP node_zfs_dmu_tx_dmu_tx_quota kstat.zfs.misc.dmu_tx.dmu_tx_quota # TYPE node_zfs_dmu_tx_dmu_tx_quota untyped node_zfs_dmu_tx_dmu_tx_quota 0 # HELP node_zfs_dmu_tx_dmu_tx_suspended kstat.zfs.misc.dmu_tx.dmu_tx_suspended # TYPE node_zfs_dmu_tx_dmu_tx_suspended untyped node_zfs_dmu_tx_dmu_tx_suspended 0 # HELP node_zfs_dnode_dnode_alloc_next_block kstat.zfs.misc.dnodestats.dnode_alloc_next_block # TYPE node_zfs_dnode_dnode_alloc_next_block untyped node_zfs_dnode_dnode_alloc_next_block 0 # HELP node_zfs_dnode_dnode_alloc_next_chunk kstat.zfs.misc.dnodestats.dnode_alloc_next_chunk # TYPE node_zfs_dnode_dnode_alloc_next_chunk untyped node_zfs_dnode_dnode_alloc_next_chunk 0 # HELP node_zfs_dnode_dnode_alloc_race kstat.zfs.misc.dnodestats.dnode_alloc_race # TYPE node_zfs_dnode_dnode_alloc_race untyped node_zfs_dnode_dnode_alloc_race 0 # HELP node_zfs_dnode_dnode_allocate kstat.zfs.misc.dnodestats.dnode_allocate # TYPE node_zfs_dnode_dnode_allocate untyped node_zfs_dnode_dnode_allocate 0 # HELP node_zfs_dnode_dnode_buf_evict kstat.zfs.misc.dnodestats.dnode_buf_evict # TYPE node_zfs_dnode_dnode_buf_evict untyped node_zfs_dnode_dnode_buf_evict 17 # HELP node_zfs_dnode_dnode_hold_alloc_hits kstat.zfs.misc.dnodestats.dnode_hold_alloc_hits # TYPE node_zfs_dnode_dnode_hold_alloc_hits untyped node_zfs_dnode_dnode_hold_alloc_hits 37617 # HELP node_zfs_dnode_dnode_hold_alloc_interior kstat.zfs.misc.dnodestats.dnode_hold_alloc_interior # TYPE node_zfs_dnode_dnode_hold_alloc_interior untyped node_zfs_dnode_dnode_hold_alloc_interior 0 # HELP node_zfs_dnode_dnode_hold_alloc_lock_misses kstat.zfs.misc.dnodestats.dnode_hold_alloc_lock_misses # TYPE node_zfs_dnode_dnode_hold_alloc_lock_misses untyped node_zfs_dnode_dnode_hold_alloc_lock_misses 0 # HELP node_zfs_dnode_dnode_hold_alloc_lock_retry kstat.zfs.misc.dnodestats.dnode_hold_alloc_lock_retry # TYPE node_zfs_dnode_dnode_hold_alloc_lock_retry untyped node_zfs_dnode_dnode_hold_alloc_lock_retry 0 # HELP node_zfs_dnode_dnode_hold_alloc_misses kstat.zfs.misc.dnodestats.dnode_hold_alloc_misses # TYPE node_zfs_dnode_dnode_hold_alloc_misses untyped node_zfs_dnode_dnode_hold_alloc_misses 0 # HELP node_zfs_dnode_dnode_hold_alloc_type_none kstat.zfs.misc.dnodestats.dnode_hold_alloc_type_none # TYPE node_zfs_dnode_dnode_hold_alloc_type_none untyped node_zfs_dnode_dnode_hold_alloc_type_none 0 # HELP node_zfs_dnode_dnode_hold_dbuf_hold kstat.zfs.misc.dnodestats.dnode_hold_dbuf_hold # TYPE node_zfs_dnode_dnode_hold_dbuf_hold untyped node_zfs_dnode_dnode_hold_dbuf_hold 0 # HELP node_zfs_dnode_dnode_hold_dbuf_read kstat.zfs.misc.dnodestats.dnode_hold_dbuf_read # TYPE node_zfs_dnode_dnode_hold_dbuf_read untyped node_zfs_dnode_dnode_hold_dbuf_read 0 # HELP node_zfs_dnode_dnode_hold_free_hits kstat.zfs.misc.dnodestats.dnode_hold_free_hits # TYPE node_zfs_dnode_dnode_hold_free_hits untyped node_zfs_dnode_dnode_hold_free_hits 0 # HELP node_zfs_dnode_dnode_hold_free_lock_misses kstat.zfs.misc.dnodestats.dnode_hold_free_lock_misses # TYPE node_zfs_dnode_dnode_hold_free_lock_misses untyped node_zfs_dnode_dnode_hold_free_lock_misses 0 # HELP node_zfs_dnode_dnode_hold_free_lock_retry kstat.zfs.misc.dnodestats.dnode_hold_free_lock_retry # TYPE node_zfs_dnode_dnode_hold_free_lock_retry untyped node_zfs_dnode_dnode_hold_free_lock_retry 0 # HELP node_zfs_dnode_dnode_hold_free_misses kstat.zfs.misc.dnodestats.dnode_hold_free_misses # TYPE node_zfs_dnode_dnode_hold_free_misses untyped node_zfs_dnode_dnode_hold_free_misses 0 # HELP node_zfs_dnode_dnode_hold_free_overflow kstat.zfs.misc.dnodestats.dnode_hold_free_overflow # TYPE node_zfs_dnode_dnode_hold_free_overflow untyped node_zfs_dnode_dnode_hold_free_overflow 0 # HELP node_zfs_dnode_dnode_hold_free_refcount kstat.zfs.misc.dnodestats.dnode_hold_free_refcount # TYPE node_zfs_dnode_dnode_hold_free_refcount untyped node_zfs_dnode_dnode_hold_free_refcount 0 # HELP node_zfs_dnode_dnode_hold_free_txg kstat.zfs.misc.dnodestats.dnode_hold_free_txg # TYPE node_zfs_dnode_dnode_hold_free_txg untyped node_zfs_dnode_dnode_hold_free_txg 0 # HELP node_zfs_dnode_dnode_move_active kstat.zfs.misc.dnodestats.dnode_move_active # TYPE node_zfs_dnode_dnode_move_active untyped node_zfs_dnode_dnode_move_active 0 # HELP node_zfs_dnode_dnode_move_handle kstat.zfs.misc.dnodestats.dnode_move_handle # TYPE node_zfs_dnode_dnode_move_handle untyped node_zfs_dnode_dnode_move_handle 0 # HELP node_zfs_dnode_dnode_move_invalid kstat.zfs.misc.dnodestats.dnode_move_invalid # TYPE node_zfs_dnode_dnode_move_invalid untyped node_zfs_dnode_dnode_move_invalid 0 # HELP node_zfs_dnode_dnode_move_recheck1 kstat.zfs.misc.dnodestats.dnode_move_recheck1 # TYPE node_zfs_dnode_dnode_move_recheck1 untyped node_zfs_dnode_dnode_move_recheck1 0 # HELP node_zfs_dnode_dnode_move_recheck2 kstat.zfs.misc.dnodestats.dnode_move_recheck2 # TYPE node_zfs_dnode_dnode_move_recheck2 untyped node_zfs_dnode_dnode_move_recheck2 0 # HELP node_zfs_dnode_dnode_move_rwlock kstat.zfs.misc.dnodestats.dnode_move_rwlock # TYPE node_zfs_dnode_dnode_move_rwlock untyped node_zfs_dnode_dnode_move_rwlock 0 # HELP node_zfs_dnode_dnode_move_special kstat.zfs.misc.dnodestats.dnode_move_special # TYPE node_zfs_dnode_dnode_move_special untyped node_zfs_dnode_dnode_move_special 0 # HELP node_zfs_dnode_dnode_reallocate kstat.zfs.misc.dnodestats.dnode_reallocate # TYPE node_zfs_dnode_dnode_reallocate untyped node_zfs_dnode_dnode_reallocate 0 # HELP node_zfs_fm_erpt_dropped kstat.zfs.misc.fm.erpt-dropped # TYPE node_zfs_fm_erpt_dropped untyped node_zfs_fm_erpt_dropped 18 # HELP node_zfs_fm_erpt_set_failed kstat.zfs.misc.fm.erpt-set-failed # TYPE node_zfs_fm_erpt_set_failed untyped node_zfs_fm_erpt_set_failed 0 # HELP node_zfs_fm_fmri_set_failed kstat.zfs.misc.fm.fmri-set-failed # TYPE node_zfs_fm_fmri_set_failed untyped node_zfs_fm_fmri_set_failed 0 # HELP node_zfs_fm_payload_set_failed kstat.zfs.misc.fm.payload-set-failed # TYPE node_zfs_fm_payload_set_failed untyped node_zfs_fm_payload_set_failed 0 # HELP node_zfs_vdev_cache_delegations kstat.zfs.misc.vdev_cache_stats.delegations # TYPE node_zfs_vdev_cache_delegations untyped node_zfs_vdev_cache_delegations 40 # HELP node_zfs_vdev_cache_hits kstat.zfs.misc.vdev_cache_stats.hits # TYPE node_zfs_vdev_cache_hits untyped node_zfs_vdev_cache_hits 0 # HELP node_zfs_vdev_cache_misses kstat.zfs.misc.vdev_cache_stats.misses # TYPE node_zfs_vdev_cache_misses untyped node_zfs_vdev_cache_misses 0 # HELP node_zfs_vdev_mirror_non_rotating_linear kstat.zfs.misc.vdev_mirror_stats.non_rotating_linear # TYPE node_zfs_vdev_mirror_non_rotating_linear untyped node_zfs_vdev_mirror_non_rotating_linear 0 # HELP node_zfs_vdev_mirror_non_rotating_seek kstat.zfs.misc.vdev_mirror_stats.non_rotating_seek # TYPE node_zfs_vdev_mirror_non_rotating_seek untyped node_zfs_vdev_mirror_non_rotating_seek 0 # HELP node_zfs_vdev_mirror_preferred_found kstat.zfs.misc.vdev_mirror_stats.preferred_found # TYPE node_zfs_vdev_mirror_preferred_found untyped node_zfs_vdev_mirror_preferred_found 0 # HELP node_zfs_vdev_mirror_preferred_not_found kstat.zfs.misc.vdev_mirror_stats.preferred_not_found # TYPE node_zfs_vdev_mirror_preferred_not_found untyped node_zfs_vdev_mirror_preferred_not_found 94 # HELP node_zfs_vdev_mirror_rotating_linear kstat.zfs.misc.vdev_mirror_stats.rotating_linear # TYPE node_zfs_vdev_mirror_rotating_linear untyped node_zfs_vdev_mirror_rotating_linear 0 # HELP node_zfs_vdev_mirror_rotating_offset kstat.zfs.misc.vdev_mirror_stats.rotating_offset # TYPE node_zfs_vdev_mirror_rotating_offset untyped node_zfs_vdev_mirror_rotating_offset 0 # HELP node_zfs_vdev_mirror_rotating_seek kstat.zfs.misc.vdev_mirror_stats.rotating_seek # TYPE node_zfs_vdev_mirror_rotating_seek untyped node_zfs_vdev_mirror_rotating_seek 0 # HELP node_zfs_xuio_onloan_read_buf kstat.zfs.misc.xuio_stats.onloan_read_buf # TYPE node_zfs_xuio_onloan_read_buf untyped node_zfs_xuio_onloan_read_buf 32 # HELP node_zfs_xuio_onloan_write_buf kstat.zfs.misc.xuio_stats.onloan_write_buf # TYPE node_zfs_xuio_onloan_write_buf untyped node_zfs_xuio_onloan_write_buf 0 # HELP node_zfs_xuio_read_buf_copied kstat.zfs.misc.xuio_stats.read_buf_copied # TYPE node_zfs_xuio_read_buf_copied untyped node_zfs_xuio_read_buf_copied 0 # HELP node_zfs_xuio_read_buf_nocopy kstat.zfs.misc.xuio_stats.read_buf_nocopy # TYPE node_zfs_xuio_read_buf_nocopy untyped node_zfs_xuio_read_buf_nocopy 0 # HELP node_zfs_xuio_write_buf_copied kstat.zfs.misc.xuio_stats.write_buf_copied # TYPE node_zfs_xuio_write_buf_copied untyped node_zfs_xuio_write_buf_copied 0 # HELP node_zfs_xuio_write_buf_nocopy kstat.zfs.misc.xuio_stats.write_buf_nocopy # TYPE node_zfs_xuio_write_buf_nocopy untyped node_zfs_xuio_write_buf_nocopy 0 # HELP node_zfs_zfetch_bogus_streams kstat.zfs.misc.zfetchstats.bogus_streams # TYPE node_zfs_zfetch_bogus_streams untyped node_zfs_zfetch_bogus_streams 0 # HELP node_zfs_zfetch_colinear_hits kstat.zfs.misc.zfetchstats.colinear_hits # TYPE node_zfs_zfetch_colinear_hits untyped node_zfs_zfetch_colinear_hits 0 # HELP node_zfs_zfetch_colinear_misses kstat.zfs.misc.zfetchstats.colinear_misses # TYPE node_zfs_zfetch_colinear_misses untyped node_zfs_zfetch_colinear_misses 11 # HELP node_zfs_zfetch_hits kstat.zfs.misc.zfetchstats.hits # TYPE node_zfs_zfetch_hits untyped node_zfs_zfetch_hits 7.067992e+06 # HELP node_zfs_zfetch_misses kstat.zfs.misc.zfetchstats.misses # TYPE node_zfs_zfetch_misses untyped node_zfs_zfetch_misses 11 # HELP node_zfs_zfetch_reclaim_failures kstat.zfs.misc.zfetchstats.reclaim_failures # TYPE node_zfs_zfetch_reclaim_failures untyped node_zfs_zfetch_reclaim_failures 11 # HELP node_zfs_zfetch_reclaim_successes kstat.zfs.misc.zfetchstats.reclaim_successes # TYPE node_zfs_zfetch_reclaim_successes untyped node_zfs_zfetch_reclaim_successes 0 # HELP node_zfs_zfetch_streams_noresets kstat.zfs.misc.zfetchstats.streams_noresets # TYPE node_zfs_zfetch_streams_noresets untyped node_zfs_zfetch_streams_noresets 2 # HELP node_zfs_zfetch_streams_resets kstat.zfs.misc.zfetchstats.streams_resets # TYPE node_zfs_zfetch_streams_resets untyped node_zfs_zfetch_streams_resets 0 # HELP node_zfs_zfetch_stride_hits kstat.zfs.misc.zfetchstats.stride_hits # TYPE node_zfs_zfetch_stride_hits untyped node_zfs_zfetch_stride_hits 7.06799e+06 # HELP node_zfs_zfetch_stride_misses kstat.zfs.misc.zfetchstats.stride_misses # TYPE node_zfs_zfetch_stride_misses untyped node_zfs_zfetch_stride_misses 0 # HELP node_zfs_zil_zil_commit_count kstat.zfs.misc.zil.zil_commit_count # TYPE node_zfs_zil_zil_commit_count untyped node_zfs_zil_zil_commit_count 10 # HELP node_zfs_zil_zil_commit_writer_count kstat.zfs.misc.zil.zil_commit_writer_count # TYPE node_zfs_zil_zil_commit_writer_count untyped node_zfs_zil_zil_commit_writer_count 0 # HELP node_zfs_zil_zil_itx_copied_bytes kstat.zfs.misc.zil.zil_itx_copied_bytes # TYPE node_zfs_zil_zil_itx_copied_bytes untyped node_zfs_zil_zil_itx_copied_bytes 0 # HELP node_zfs_zil_zil_itx_copied_count kstat.zfs.misc.zil.zil_itx_copied_count # TYPE node_zfs_zil_zil_itx_copied_count untyped node_zfs_zil_zil_itx_copied_count 0 # HELP node_zfs_zil_zil_itx_count kstat.zfs.misc.zil.zil_itx_count # TYPE node_zfs_zil_zil_itx_count untyped node_zfs_zil_zil_itx_count 0 # HELP node_zfs_zil_zil_itx_indirect_bytes kstat.zfs.misc.zil.zil_itx_indirect_bytes # TYPE node_zfs_zil_zil_itx_indirect_bytes untyped node_zfs_zil_zil_itx_indirect_bytes 0 # HELP node_zfs_zil_zil_itx_indirect_count kstat.zfs.misc.zil.zil_itx_indirect_count # TYPE node_zfs_zil_zil_itx_indirect_count untyped node_zfs_zil_zil_itx_indirect_count 0 # HELP node_zfs_zil_zil_itx_metaslab_normal_bytes kstat.zfs.misc.zil.zil_itx_metaslab_normal_bytes # TYPE node_zfs_zil_zil_itx_metaslab_normal_bytes untyped node_zfs_zil_zil_itx_metaslab_normal_bytes 0 # HELP node_zfs_zil_zil_itx_metaslab_normal_count kstat.zfs.misc.zil.zil_itx_metaslab_normal_count # TYPE node_zfs_zil_zil_itx_metaslab_normal_count untyped node_zfs_zil_zil_itx_metaslab_normal_count 0 # HELP node_zfs_zil_zil_itx_metaslab_slog_bytes kstat.zfs.misc.zil.zil_itx_metaslab_slog_bytes # TYPE node_zfs_zil_zil_itx_metaslab_slog_bytes untyped node_zfs_zil_zil_itx_metaslab_slog_bytes 0 # HELP node_zfs_zil_zil_itx_metaslab_slog_count kstat.zfs.misc.zil.zil_itx_metaslab_slog_count # TYPE node_zfs_zil_zil_itx_metaslab_slog_count untyped node_zfs_zil_zil_itx_metaslab_slog_count 0 # HELP node_zfs_zil_zil_itx_needcopy_bytes kstat.zfs.misc.zil.zil_itx_needcopy_bytes # TYPE node_zfs_zil_zil_itx_needcopy_bytes untyped node_zfs_zil_zil_itx_needcopy_bytes 1.8446744073709537e+19 # HELP node_zfs_zil_zil_itx_needcopy_count kstat.zfs.misc.zil.zil_itx_needcopy_count # TYPE node_zfs_zil_zil_itx_needcopy_count untyped node_zfs_zil_zil_itx_needcopy_count 0 # HELP node_zfs_zpool_dataset_nread kstat.zfs.misc.objset.nread # TYPE node_zfs_zpool_dataset_nread untyped node_zfs_zpool_dataset_nread{dataset="pool1",zpool="pool1"} 0 node_zfs_zpool_dataset_nread{dataset="pool1/dataset1",zpool="pool1"} 28 node_zfs_zpool_dataset_nread{dataset="poolz1",zpool="poolz1"} 0 node_zfs_zpool_dataset_nread{dataset="poolz1/dataset1",zpool="poolz1"} 28 # HELP node_zfs_zpool_dataset_nunlinked kstat.zfs.misc.objset.nunlinked # TYPE node_zfs_zpool_dataset_nunlinked untyped node_zfs_zpool_dataset_nunlinked{dataset="pool1",zpool="pool1"} 0 node_zfs_zpool_dataset_nunlinked{dataset="pool1/dataset1",zpool="pool1"} 3 node_zfs_zpool_dataset_nunlinked{dataset="poolz1",zpool="poolz1"} 0 node_zfs_zpool_dataset_nunlinked{dataset="poolz1/dataset1",zpool="poolz1"} 14 # HELP node_zfs_zpool_dataset_nunlinks kstat.zfs.misc.objset.nunlinks # TYPE node_zfs_zpool_dataset_nunlinks untyped node_zfs_zpool_dataset_nunlinks{dataset="pool1",zpool="pool1"} 0 node_zfs_zpool_dataset_nunlinks{dataset="pool1/dataset1",zpool="pool1"} 3 node_zfs_zpool_dataset_nunlinks{dataset="poolz1",zpool="poolz1"} 0 node_zfs_zpool_dataset_nunlinks{dataset="poolz1/dataset1",zpool="poolz1"} 14 # HELP node_zfs_zpool_dataset_nwritten kstat.zfs.misc.objset.nwritten # TYPE node_zfs_zpool_dataset_nwritten untyped node_zfs_zpool_dataset_nwritten{dataset="pool1",zpool="pool1"} 0 node_zfs_zpool_dataset_nwritten{dataset="pool1/dataset1",zpool="pool1"} 12302 node_zfs_zpool_dataset_nwritten{dataset="poolz1",zpool="poolz1"} 0 node_zfs_zpool_dataset_nwritten{dataset="poolz1/dataset1",zpool="poolz1"} 32806 # HELP node_zfs_zpool_dataset_reads kstat.zfs.misc.objset.reads # TYPE node_zfs_zpool_dataset_reads untyped node_zfs_zpool_dataset_reads{dataset="pool1",zpool="pool1"} 0 node_zfs_zpool_dataset_reads{dataset="pool1/dataset1",zpool="pool1"} 2 node_zfs_zpool_dataset_reads{dataset="poolz1",zpool="poolz1"} 0 node_zfs_zpool_dataset_reads{dataset="poolz1/dataset1",zpool="poolz1"} 2 # HELP node_zfs_zpool_dataset_writes kstat.zfs.misc.objset.writes # TYPE node_zfs_zpool_dataset_writes untyped node_zfs_zpool_dataset_writes{dataset="pool1",zpool="pool1"} 0 node_zfs_zpool_dataset_writes{dataset="pool1/dataset1",zpool="pool1"} 4 node_zfs_zpool_dataset_writes{dataset="poolz1",zpool="poolz1"} 0 node_zfs_zpool_dataset_writes{dataset="poolz1/dataset1",zpool="poolz1"} 10 # HELP node_zfs_zpool_nread kstat.zfs.misc.io.nread # TYPE node_zfs_zpool_nread untyped node_zfs_zpool_nread{zpool="pool1"} 1.88416e+06 node_zfs_zpool_nread{zpool="poolz1"} 2.82624e+06 # HELP node_zfs_zpool_nwritten kstat.zfs.misc.io.nwritten # TYPE node_zfs_zpool_nwritten untyped node_zfs_zpool_nwritten{zpool="pool1"} 3.206144e+06 node_zfs_zpool_nwritten{zpool="poolz1"} 2.680501248e+09 # HELP node_zfs_zpool_rcnt kstat.zfs.misc.io.rcnt # TYPE node_zfs_zpool_rcnt untyped node_zfs_zpool_rcnt{zpool="pool1"} 0 node_zfs_zpool_rcnt{zpool="poolz1"} 0 # HELP node_zfs_zpool_reads kstat.zfs.misc.io.reads # TYPE node_zfs_zpool_reads untyped node_zfs_zpool_reads{zpool="pool1"} 22 node_zfs_zpool_reads{zpool="poolz1"} 33 # HELP node_zfs_zpool_rlentime kstat.zfs.misc.io.rlentime # TYPE node_zfs_zpool_rlentime untyped node_zfs_zpool_rlentime{zpool="pool1"} 1.04112268e+08 node_zfs_zpool_rlentime{zpool="poolz1"} 6.472105124093e+12 # HELP node_zfs_zpool_rtime kstat.zfs.misc.io.rtime # TYPE node_zfs_zpool_rtime untyped node_zfs_zpool_rtime{zpool="pool1"} 2.4168078e+07 node_zfs_zpool_rtime{zpool="poolz1"} 9.82909164e+09 # HELP node_zfs_zpool_rupdate kstat.zfs.misc.io.rupdate # TYPE node_zfs_zpool_rupdate untyped node_zfs_zpool_rupdate{zpool="pool1"} 7.921048984922e+13 node_zfs_zpool_rupdate{zpool="poolz1"} 1.10734831944501e+14 # HELP node_zfs_zpool_state kstat.zfs.misc.state # TYPE node_zfs_zpool_state gauge node_zfs_zpool_state{state="degraded",zpool="pool1"} 0 node_zfs_zpool_state{state="degraded",zpool="pool2"} 0 node_zfs_zpool_state{state="degraded",zpool="poolz1"} 1 node_zfs_zpool_state{state="faulted",zpool="pool1"} 0 node_zfs_zpool_state{state="faulted",zpool="pool2"} 0 node_zfs_zpool_state{state="faulted",zpool="poolz1"} 0 node_zfs_zpool_state{state="offline",zpool="pool1"} 0 node_zfs_zpool_state{state="offline",zpool="pool2"} 0 node_zfs_zpool_state{state="offline",zpool="poolz1"} 0 node_zfs_zpool_state{state="online",zpool="pool1"} 1 node_zfs_zpool_state{state="online",zpool="pool2"} 0 node_zfs_zpool_state{state="online",zpool="poolz1"} 0 node_zfs_zpool_state{state="removed",zpool="pool1"} 0 node_zfs_zpool_state{state="removed",zpool="pool2"} 0 node_zfs_zpool_state{state="removed",zpool="poolz1"} 0 node_zfs_zpool_state{state="suspended",zpool="pool1"} 0 node_zfs_zpool_state{state="suspended",zpool="pool2"} 1 node_zfs_zpool_state{state="suspended",zpool="poolz1"} 0 node_zfs_zpool_state{state="unavail",zpool="pool1"} 0 node_zfs_zpool_state{state="unavail",zpool="pool2"} 0 node_zfs_zpool_state{state="unavail",zpool="poolz1"} 0 # HELP node_zfs_zpool_wcnt kstat.zfs.misc.io.wcnt # TYPE node_zfs_zpool_wcnt untyped node_zfs_zpool_wcnt{zpool="pool1"} 0 node_zfs_zpool_wcnt{zpool="poolz1"} 0 # HELP node_zfs_zpool_wlentime kstat.zfs.misc.io.wlentime # TYPE node_zfs_zpool_wlentime untyped node_zfs_zpool_wlentime{zpool="pool1"} 1.04112268e+08 node_zfs_zpool_wlentime{zpool="poolz1"} 6.472105124093e+12 # HELP node_zfs_zpool_writes kstat.zfs.misc.io.writes # TYPE node_zfs_zpool_writes untyped node_zfs_zpool_writes{zpool="pool1"} 132 node_zfs_zpool_writes{zpool="poolz1"} 25294 # HELP node_zfs_zpool_wtime kstat.zfs.misc.io.wtime # TYPE node_zfs_zpool_wtime untyped node_zfs_zpool_wtime{zpool="pool1"} 7.155162e+06 node_zfs_zpool_wtime{zpool="poolz1"} 9.673715628e+09 # HELP node_zfs_zpool_wupdate kstat.zfs.misc.io.wupdate # TYPE node_zfs_zpool_wupdate untyped node_zfs_zpool_wupdate{zpool="pool1"} 7.9210489694949e+13 node_zfs_zpool_wupdate{zpool="poolz1"} 1.10734831833266e+14 # HELP node_zoneinfo_high_pages Zone watermark pages_high # TYPE node_zoneinfo_high_pages gauge node_zoneinfo_high_pages{node="0",zone="DMA"} 14 node_zoneinfo_high_pages{node="0",zone="DMA32"} 2122 node_zoneinfo_high_pages{node="0",zone="Device"} 0 node_zoneinfo_high_pages{node="0",zone="Movable"} 0 node_zoneinfo_high_pages{node="0",zone="Normal"} 31113 # HELP node_zoneinfo_low_pages Zone watermark pages_low # TYPE node_zoneinfo_low_pages gauge node_zoneinfo_low_pages{node="0",zone="DMA"} 11 node_zoneinfo_low_pages{node="0",zone="DMA32"} 1600 node_zoneinfo_low_pages{node="0",zone="Device"} 0 node_zoneinfo_low_pages{node="0",zone="Movable"} 0 node_zoneinfo_low_pages{node="0",zone="Normal"} 23461 # HELP node_zoneinfo_managed_pages Present pages managed by the buddy system # TYPE node_zoneinfo_managed_pages gauge node_zoneinfo_managed_pages{node="0",zone="DMA"} 3973 node_zoneinfo_managed_pages{node="0",zone="DMA32"} 530339 node_zoneinfo_managed_pages{node="0",zone="Device"} 0 node_zoneinfo_managed_pages{node="0",zone="Movable"} 0 node_zoneinfo_managed_pages{node="0",zone="Normal"} 7.654794e+06 # HELP node_zoneinfo_min_pages Zone watermark pages_min # TYPE node_zoneinfo_min_pages gauge node_zoneinfo_min_pages{node="0",zone="DMA"} 8 node_zoneinfo_min_pages{node="0",zone="DMA32"} 1078 node_zoneinfo_min_pages{node="0",zone="Device"} 0 node_zoneinfo_min_pages{node="0",zone="Movable"} 0 node_zoneinfo_min_pages{node="0",zone="Normal"} 15809 # HELP node_zoneinfo_nr_active_anon_pages Number of anonymous pages recently more used # TYPE node_zoneinfo_nr_active_anon_pages gauge node_zoneinfo_nr_active_anon_pages{node="0",zone="DMA"} 1.175853e+06 # HELP node_zoneinfo_nr_active_file_pages Number of active pages with file-backing # TYPE node_zoneinfo_nr_active_file_pages gauge node_zoneinfo_nr_active_file_pages{node="0",zone="DMA"} 688810 # HELP node_zoneinfo_nr_anon_pages Number of anonymous pages currently used by the system # TYPE node_zoneinfo_nr_anon_pages gauge node_zoneinfo_nr_anon_pages{node="0",zone="DMA"} 1.156608e+06 # HELP node_zoneinfo_nr_anon_transparent_hugepages Number of anonymous transparent huge pages currently used by the system # TYPE node_zoneinfo_nr_anon_transparent_hugepages gauge node_zoneinfo_nr_anon_transparent_hugepages{node="0",zone="DMA"} 0 # HELP node_zoneinfo_nr_dirtied_total Page dirtyings since bootup # TYPE node_zoneinfo_nr_dirtied_total counter node_zoneinfo_nr_dirtied_total{node="0",zone="DMA"} 1.189097e+06 # HELP node_zoneinfo_nr_dirty_pages Number of dirty pages # TYPE node_zoneinfo_nr_dirty_pages gauge node_zoneinfo_nr_dirty_pages{node="0",zone="DMA"} 103 # HELP node_zoneinfo_nr_file_pages Number of file pages # TYPE node_zoneinfo_nr_file_pages gauge node_zoneinfo_nr_file_pages{node="0",zone="DMA"} 1.740118e+06 # HELP node_zoneinfo_nr_free_pages Total number of free pages in the zone # TYPE node_zoneinfo_nr_free_pages gauge node_zoneinfo_nr_free_pages{node="0",zone="DMA"} 2949 node_zoneinfo_nr_free_pages{node="0",zone="DMA32"} 528427 node_zoneinfo_nr_free_pages{node="0",zone="Normal"} 4.539739e+06 # HELP node_zoneinfo_nr_inactive_anon_pages Number of anonymous pages recently less used # TYPE node_zoneinfo_nr_inactive_anon_pages gauge node_zoneinfo_nr_inactive_anon_pages{node="0",zone="DMA"} 95612 # HELP node_zoneinfo_nr_inactive_file_pages Number of inactive pages with file-backing # TYPE node_zoneinfo_nr_inactive_file_pages gauge node_zoneinfo_nr_inactive_file_pages{node="0",zone="DMA"} 723339 # HELP node_zoneinfo_nr_isolated_anon_pages Temporary isolated pages from anon lru # TYPE node_zoneinfo_nr_isolated_anon_pages gauge node_zoneinfo_nr_isolated_anon_pages{node="0",zone="DMA"} 0 # HELP node_zoneinfo_nr_isolated_file_pages Temporary isolated pages from file lru # TYPE node_zoneinfo_nr_isolated_file_pages gauge node_zoneinfo_nr_isolated_file_pages{node="0",zone="DMA"} 0 # HELP node_zoneinfo_nr_kernel_stacks Number of kernel stacks # TYPE node_zoneinfo_nr_kernel_stacks gauge node_zoneinfo_nr_kernel_stacks{node="0",zone="DMA"} 0 node_zoneinfo_nr_kernel_stacks{node="0",zone="DMA32"} 0 node_zoneinfo_nr_kernel_stacks{node="0",zone="Normal"} 18864 # HELP node_zoneinfo_nr_mapped_pages Number of mapped pages # TYPE node_zoneinfo_nr_mapped_pages gauge node_zoneinfo_nr_mapped_pages{node="0",zone="DMA"} 423143 # HELP node_zoneinfo_nr_shmem_pages Number of shmem pages (included tmpfs/GEM pages) # TYPE node_zoneinfo_nr_shmem_pages gauge node_zoneinfo_nr_shmem_pages{node="0",zone="DMA"} 330517 # HELP node_zoneinfo_nr_slab_reclaimable_pages Number of reclaimable slab pages # TYPE node_zoneinfo_nr_slab_reclaimable_pages gauge node_zoneinfo_nr_slab_reclaimable_pages{node="0",zone="DMA"} 121763 # HELP node_zoneinfo_nr_slab_unreclaimable_pages Number of unreclaimable slab pages # TYPE node_zoneinfo_nr_slab_unreclaimable_pages gauge node_zoneinfo_nr_slab_unreclaimable_pages{node="0",zone="DMA"} 56182 # HELP node_zoneinfo_nr_unevictable_pages Number of unevictable pages # TYPE node_zoneinfo_nr_unevictable_pages gauge node_zoneinfo_nr_unevictable_pages{node="0",zone="DMA"} 213111 # HELP node_zoneinfo_nr_writeback_pages Number of writeback pages # TYPE node_zoneinfo_nr_writeback_pages gauge node_zoneinfo_nr_writeback_pages{node="0",zone="DMA"} 0 # HELP node_zoneinfo_nr_written_total Page writings since bootup # TYPE node_zoneinfo_nr_written_total counter node_zoneinfo_nr_written_total{node="0",zone="DMA"} 1.181554e+06 # HELP node_zoneinfo_numa_foreign_total Was intended here, hit elsewhere # TYPE node_zoneinfo_numa_foreign_total counter node_zoneinfo_numa_foreign_total{node="0",zone="DMA"} 0 node_zoneinfo_numa_foreign_total{node="0",zone="DMA32"} 0 node_zoneinfo_numa_foreign_total{node="0",zone="Normal"} 0 # HELP node_zoneinfo_numa_hit_total Allocated in intended node # TYPE node_zoneinfo_numa_hit_total counter node_zoneinfo_numa_hit_total{node="0",zone="DMA"} 1 node_zoneinfo_numa_hit_total{node="0",zone="DMA32"} 13 node_zoneinfo_numa_hit_total{node="0",zone="Normal"} 6.2836441e+07 # HELP node_zoneinfo_numa_interleave_total Interleaver preferred this zone # TYPE node_zoneinfo_numa_interleave_total counter node_zoneinfo_numa_interleave_total{node="0",zone="DMA"} 1 node_zoneinfo_numa_interleave_total{node="0",zone="DMA32"} 1 node_zoneinfo_numa_interleave_total{node="0",zone="Normal"} 23174 # HELP node_zoneinfo_numa_local_total Allocation from local node # TYPE node_zoneinfo_numa_local_total counter node_zoneinfo_numa_local_total{node="0",zone="DMA"} 1 node_zoneinfo_numa_local_total{node="0",zone="DMA32"} 13 node_zoneinfo_numa_local_total{node="0",zone="Normal"} 6.2836441e+07 # HELP node_zoneinfo_numa_miss_total Allocated in non intended node # TYPE node_zoneinfo_numa_miss_total counter node_zoneinfo_numa_miss_total{node="0",zone="DMA"} 0 node_zoneinfo_numa_miss_total{node="0",zone="DMA32"} 0 node_zoneinfo_numa_miss_total{node="0",zone="Normal"} 0 # HELP node_zoneinfo_numa_other_total Allocation from other node # TYPE node_zoneinfo_numa_other_total counter node_zoneinfo_numa_other_total{node="0",zone="DMA"} 0 node_zoneinfo_numa_other_total{node="0",zone="DMA32"} 0 node_zoneinfo_numa_other_total{node="0",zone="Normal"} 0 # HELP node_zoneinfo_present_pages Physical pages existing within the zone # TYPE node_zoneinfo_present_pages gauge node_zoneinfo_present_pages{node="0",zone="DMA"} 3997 node_zoneinfo_present_pages{node="0",zone="DMA32"} 546847 node_zoneinfo_present_pages{node="0",zone="Device"} 0 node_zoneinfo_present_pages{node="0",zone="Movable"} 0 node_zoneinfo_present_pages{node="0",zone="Normal"} 7.806976e+06 # HELP node_zoneinfo_protection_0 Protection array 0. field # TYPE node_zoneinfo_protection_0 gauge node_zoneinfo_protection_0{node="0",zone="DMA"} 0 node_zoneinfo_protection_0{node="0",zone="DMA32"} 0 node_zoneinfo_protection_0{node="0",zone="Device"} 0 node_zoneinfo_protection_0{node="0",zone="Movable"} 0 node_zoneinfo_protection_0{node="0",zone="Normal"} 0 # HELP node_zoneinfo_protection_1 Protection array 1. field # TYPE node_zoneinfo_protection_1 gauge node_zoneinfo_protection_1{node="0",zone="DMA"} 2039 node_zoneinfo_protection_1{node="0",zone="DMA32"} 0 node_zoneinfo_protection_1{node="0",zone="Device"} 0 node_zoneinfo_protection_1{node="0",zone="Movable"} 0 node_zoneinfo_protection_1{node="0",zone="Normal"} 0 # HELP node_zoneinfo_protection_2 Protection array 2. field # TYPE node_zoneinfo_protection_2 gauge node_zoneinfo_protection_2{node="0",zone="DMA"} 31932 node_zoneinfo_protection_2{node="0",zone="DMA32"} 29893 node_zoneinfo_protection_2{node="0",zone="Device"} 0 node_zoneinfo_protection_2{node="0",zone="Movable"} 0 node_zoneinfo_protection_2{node="0",zone="Normal"} 0 # HELP node_zoneinfo_protection_3 Protection array 3. field # TYPE node_zoneinfo_protection_3 gauge node_zoneinfo_protection_3{node="0",zone="DMA"} 31932 node_zoneinfo_protection_3{node="0",zone="DMA32"} 29893 node_zoneinfo_protection_3{node="0",zone="Device"} 0 node_zoneinfo_protection_3{node="0",zone="Movable"} 0 node_zoneinfo_protection_3{node="0",zone="Normal"} 0 # HELP node_zoneinfo_protection_4 Protection array 4. field # TYPE node_zoneinfo_protection_4 gauge node_zoneinfo_protection_4{node="0",zone="DMA"} 31932 node_zoneinfo_protection_4{node="0",zone="DMA32"} 29893 node_zoneinfo_protection_4{node="0",zone="Device"} 0 node_zoneinfo_protection_4{node="0",zone="Movable"} 0 node_zoneinfo_protection_4{node="0",zone="Normal"} 0 # HELP node_zoneinfo_spanned_pages Total pages spanned by the zone, including holes # TYPE node_zoneinfo_spanned_pages gauge node_zoneinfo_spanned_pages{node="0",zone="DMA"} 4095 node_zoneinfo_spanned_pages{node="0",zone="DMA32"} 1.04448e+06 node_zoneinfo_spanned_pages{node="0",zone="Device"} 0 node_zoneinfo_spanned_pages{node="0",zone="Movable"} 0 node_zoneinfo_spanned_pages{node="0",zone="Normal"} 7.806976e+06 # HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. # TYPE process_cpu_seconds_total counter # HELP process_max_fds Maximum number of open file descriptors. # TYPE process_max_fds gauge # HELP process_open_fds Number of open file descriptors. # TYPE process_open_fds gauge # HELP process_resident_memory_bytes Resident memory size in bytes. # TYPE process_resident_memory_bytes gauge # HELP process_start_time_seconds Start time of the process since unix epoch in seconds. # TYPE process_start_time_seconds gauge # HELP process_virtual_memory_bytes Virtual memory size in bytes. # TYPE process_virtual_memory_bytes gauge # HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes. # TYPE process_virtual_memory_max_bytes gauge # HELP promhttp_metric_handler_errors_total Total number of internal errors encountered by the promhttp metric handler. # TYPE promhttp_metric_handler_errors_total counter promhttp_metric_handler_errors_total{cause="encoding"} 0 promhttp_metric_handler_errors_total{cause="gathering"} 0 # HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served. # TYPE promhttp_metric_handler_requests_in_flight gauge promhttp_metric_handler_requests_in_flight 1 # HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code. # TYPE promhttp_metric_handler_requests_total counter promhttp_metric_handler_requests_total{code="200"} 0 promhttp_metric_handler_requests_total{code="500"} 0 promhttp_metric_handler_requests_total{code="503"} 0 # HELP testmetric1_1 Metric read from collector/fixtures/textfile/two_metric_files/metrics1.prom # TYPE testmetric1_1 untyped testmetric1_1{foo="bar"} 10 # HELP testmetric1_2 Metric read from collector/fixtures/textfile/two_metric_files/metrics1.prom # TYPE testmetric1_2 untyped testmetric1_2{foo="baz"} 20 # HELP testmetric2_1 Metric read from collector/fixtures/textfile/two_metric_files/metrics2.prom # TYPE testmetric2_1 untyped testmetric2_1{foo="bar"} 30 # HELP testmetric2_2 Metric read from collector/fixtures/textfile/two_metric_files/metrics2.prom # TYPE testmetric2_2 untyped testmetric2_2{foo="baz"} 40 node_exporter-1.7.0/collector/fixtures/ethtool/000077500000000000000000000000001452426057600217245ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures/ethtool/bond0/000077500000000000000000000000001452426057600227265ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures/ethtool/bond0/statistics000066400000000000000000000000101452426057600250320ustar00rootroot00000000000000ERROR: 1node_exporter-1.7.0/collector/fixtures/ethtool/eth0/000077500000000000000000000000001452426057600225645ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures/ethtool/eth0/driver000066400000000000000000000004001452426057600237740ustar00rootroot00000000000000# ethtool -i eth0 driver: e1000e version: 5.11.0-22-generic firmware-version: 0.5-4 expansion-rom-version: bus-info: 0000:00:1f.6 supports-statistics: yes supports-test: yes supports-eeprom-access: yes supports-register-dump: yes supports-priv-flags: yes node_exporter-1.7.0/collector/fixtures/ethtool/eth0/settings000066400000000000000000000015331452426057600243510ustar00rootroot00000000000000# ethtool eth0 Settings for eth0: Supported ports: [ TP MII ] Supported link modes: 10baseT/Half 10baseT/Full 100baseT/Half 100baseT/Full 1000baseT/Full 10000baseT/Full Supported pause frame use: Symmetric Supports auto-negotiation: Yes Supported FEC modes: Not reported Advertised link modes: 10baseT/Half 10baseT/Full 100baseT/Half 100baseT/Full 1000baseT/Full Advertised pause frame use: Symmetric Advertised auto-negotiation: Yes Advertised FEC modes: Not reported Speed: 1000Mb/s Duplex: Full Auto-negotiation: on Port: Twisted Pair PHYAD: 1 Transceiver: internal MDI-X: off (auto) netlink error: Operation not permitted Current message level: 0x00000007 (7) drv probe link Link detected: yes node_exporter-1.7.0/collector/fixtures/ethtool/eth0/statistics000066400000000000000000000005741452426057600247070ustar00rootroot00000000000000# ethtool -S eth0 NIC statistics: tx_packets: 961500 rx_packets: 1260062 tx_errors: 0 rx_errors: 0 rx_missed: 401 align_errors: 0 tx_single_collisions: 0 tx_multi_collisions: 0 rx_unicast: 1230297 rx_broadcast: 5792 rx_multicast: 23973 tx_aborted: 0 tx_underrun: 0 duplicate metric: 1 duplicate_metric: 2 node_exporter-1.7.0/collector/fixtures/ip_vs_result.txt000066400000000000000000000136221452426057600235310ustar00rootroot00000000000000# HELP node_ipvs_backend_connections_active The current active connections by local and remote address. # TYPE node_ipvs_backend_connections_active gauge node_ipvs_backend_connections_active{local_address="",local_mark="10001000",local_port="0",proto="FWM",remote_address="192.168.49.32",remote_port="3306"} 321 node_ipvs_backend_connections_active{local_address="",local_mark="10001000",local_port="0",proto="FWM",remote_address="192.168.50.26",remote_port="3306"} 64 node_ipvs_backend_connections_active{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.82.22",remote_port="3306"} 248 node_ipvs_backend_connections_active{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.83.21",remote_port="3306"} 248 node_ipvs_backend_connections_active{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.83.24",remote_port="3306"} 248 node_ipvs_backend_connections_active{local_address="192.168.0.55",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.49.32",remote_port="3306"} 0 node_ipvs_backend_connections_active{local_address="192.168.0.55",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.50.26",remote_port="3306"} 0 node_ipvs_backend_connections_active{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.50.21",remote_port="3306"} 1498 node_ipvs_backend_connections_active{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.82.21",remote_port="3306"} 1499 node_ipvs_backend_connections_active{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.84.22",remote_port="3306"} 0 # HELP node_ipvs_backend_connections_inactive The current inactive connections by local and remote address. # TYPE node_ipvs_backend_connections_inactive gauge node_ipvs_backend_connections_inactive{local_address="",local_mark="10001000",local_port="0",proto="FWM",remote_address="192.168.49.32",remote_port="3306"} 5 node_ipvs_backend_connections_inactive{local_address="",local_mark="10001000",local_port="0",proto="FWM",remote_address="192.168.50.26",remote_port="3306"} 1 node_ipvs_backend_connections_inactive{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.82.22",remote_port="3306"} 2 node_ipvs_backend_connections_inactive{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.83.21",remote_port="3306"} 1 node_ipvs_backend_connections_inactive{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.83.24",remote_port="3306"} 2 node_ipvs_backend_connections_inactive{local_address="192.168.0.55",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.49.32",remote_port="3306"} 0 node_ipvs_backend_connections_inactive{local_address="192.168.0.55",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.50.26",remote_port="3306"} 0 node_ipvs_backend_connections_inactive{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.50.21",remote_port="3306"} 0 node_ipvs_backend_connections_inactive{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.82.21",remote_port="3306"} 0 node_ipvs_backend_connections_inactive{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.84.22",remote_port="3306"} 0 # HELP node_ipvs_backend_weight The current backend weight by local and remote address. # TYPE node_ipvs_backend_weight gauge node_ipvs_backend_weight{local_address="",local_mark="10001000",local_port="0",proto="FWM",remote_address="192.168.49.32",remote_port="3306"} 100 node_ipvs_backend_weight{local_address="",local_mark="10001000",local_port="0",proto="FWM",remote_address="192.168.50.26",remote_port="3306"} 20 node_ipvs_backend_weight{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.82.22",remote_port="3306"} 100 node_ipvs_backend_weight{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.83.21",remote_port="3306"} 100 node_ipvs_backend_weight{local_address="192.168.0.22",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.83.24",remote_port="3306"} 100 node_ipvs_backend_weight{local_address="192.168.0.55",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.49.32",remote_port="3306"} 100 node_ipvs_backend_weight{local_address="192.168.0.55",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.50.26",remote_port="3306"} 0 node_ipvs_backend_weight{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.50.21",remote_port="3306"} 100 node_ipvs_backend_weight{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.82.21",remote_port="3306"} 100 node_ipvs_backend_weight{local_address="192.168.0.57",local_mark="",local_port="3306",proto="TCP",remote_address="192.168.84.22",remote_port="3306"} 0 # HELP node_ipvs_connections_total The total number of connections made. # TYPE node_ipvs_connections_total counter node_ipvs_connections_total 2.3765872e+07 # HELP node_ipvs_incoming_bytes_total The total amount of incoming data. # TYPE node_ipvs_incoming_bytes_total counter node_ipvs_incoming_bytes_total 8.9991519156915e+13 # HELP node_ipvs_incoming_packets_total The total number of incoming packets. # TYPE node_ipvs_incoming_packets_total counter node_ipvs_incoming_packets_total 3.811989221e+09 # HELP node_ipvs_outgoing_bytes_total The total amount of outgoing data. # TYPE node_ipvs_outgoing_bytes_total counter node_ipvs_outgoing_bytes_total 0 # HELP node_ipvs_outgoing_packets_total The total number of outgoing packets. # TYPE node_ipvs_outgoing_packets_total counter node_ipvs_outgoing_packets_total 0 node_exporter-1.7.0/collector/fixtures/ip_vs_result_lbs_local_address_local_port.txt000066400000000000000000000042661452426057600314720ustar00rootroot00000000000000# HELP node_ipvs_backend_connections_active The current active connections by local and remote address. # TYPE node_ipvs_backend_connections_active gauge node_ipvs_backend_connections_active{local_address="",local_port="0"} 385 node_ipvs_backend_connections_active{local_address="192.168.0.22",local_port="3306"} 744 node_ipvs_backend_connections_active{local_address="192.168.0.55",local_port="3306"} 0 node_ipvs_backend_connections_active{local_address="192.168.0.57",local_port="3306"} 2997 # HELP node_ipvs_backend_connections_inactive The current inactive connections by local and remote address. # TYPE node_ipvs_backend_connections_inactive gauge node_ipvs_backend_connections_inactive{local_address="",local_port="0"} 6 node_ipvs_backend_connections_inactive{local_address="192.168.0.22",local_port="3306"} 5 node_ipvs_backend_connections_inactive{local_address="192.168.0.55",local_port="3306"} 0 node_ipvs_backend_connections_inactive{local_address="192.168.0.57",local_port="3306"} 0 # HELP node_ipvs_backend_weight The current backend weight by local and remote address. # TYPE node_ipvs_backend_weight gauge node_ipvs_backend_weight{local_address="",local_port="0"} 120 node_ipvs_backend_weight{local_address="192.168.0.22",local_port="3306"} 300 node_ipvs_backend_weight{local_address="192.168.0.55",local_port="3306"} 100 node_ipvs_backend_weight{local_address="192.168.0.57",local_port="3306"} 200 # HELP node_ipvs_connections_total The total number of connections made. # TYPE node_ipvs_connections_total counter node_ipvs_connections_total 2.3765872e+07 # HELP node_ipvs_incoming_bytes_total The total amount of incoming data. # TYPE node_ipvs_incoming_bytes_total counter node_ipvs_incoming_bytes_total 8.9991519156915e+13 # HELP node_ipvs_incoming_packets_total The total number of incoming packets. # TYPE node_ipvs_incoming_packets_total counter node_ipvs_incoming_packets_total 3.811989221e+09 # HELP node_ipvs_outgoing_bytes_total The total amount of outgoing data. # TYPE node_ipvs_outgoing_bytes_total counter node_ipvs_outgoing_bytes_total 0 # HELP node_ipvs_outgoing_packets_total The total number of outgoing packets. # TYPE node_ipvs_outgoing_packets_total counter node_ipvs_outgoing_packets_total 0 node_exporter-1.7.0/collector/fixtures/ip_vs_result_lbs_local_port.txt000066400000000000000000000030601452426057600266020ustar00rootroot00000000000000# HELP node_ipvs_backend_connections_active The current active connections by local and remote address. # TYPE node_ipvs_backend_connections_active gauge node_ipvs_backend_connections_active{local_port="0"} 385 node_ipvs_backend_connections_active{local_port="3306"} 3741 # HELP node_ipvs_backend_connections_inactive The current inactive connections by local and remote address. # TYPE node_ipvs_backend_connections_inactive gauge node_ipvs_backend_connections_inactive{local_port="0"} 6 node_ipvs_backend_connections_inactive{local_port="3306"} 5 # HELP node_ipvs_backend_weight The current backend weight by local and remote address. # TYPE node_ipvs_backend_weight gauge node_ipvs_backend_weight{local_port="0"} 120 node_ipvs_backend_weight{local_port="3306"} 600 # HELP node_ipvs_connections_total The total number of connections made. # TYPE node_ipvs_connections_total counter node_ipvs_connections_total 2.3765872e+07 # HELP node_ipvs_incoming_bytes_total The total amount of incoming data. # TYPE node_ipvs_incoming_bytes_total counter node_ipvs_incoming_bytes_total 8.9991519156915e+13 # HELP node_ipvs_incoming_packets_total The total number of incoming packets. # TYPE node_ipvs_incoming_packets_total counter node_ipvs_incoming_packets_total 3.811989221e+09 # HELP node_ipvs_outgoing_bytes_total The total amount of outgoing data. # TYPE node_ipvs_outgoing_bytes_total counter node_ipvs_outgoing_bytes_total 0 # HELP node_ipvs_outgoing_packets_total The total number of outgoing packets. # TYPE node_ipvs_outgoing_packets_total counter node_ipvs_outgoing_packets_total 0 node_exporter-1.7.0/collector/fixtures/ip_vs_result_lbs_none.txt000066400000000000000000000025311452426057600254050ustar00rootroot00000000000000# HELP node_ipvs_backend_connections_active The current active connections by local and remote address. # TYPE node_ipvs_backend_connections_active gauge node_ipvs_backend_connections_active 4126 # HELP node_ipvs_backend_connections_inactive The current inactive connections by local and remote address. # TYPE node_ipvs_backend_connections_inactive gauge node_ipvs_backend_connections_inactive 11 # HELP node_ipvs_backend_weight The current backend weight by local and remote address. # TYPE node_ipvs_backend_weight gauge node_ipvs_backend_weight 720 # HELP node_ipvs_connections_total The total number of connections made. # TYPE node_ipvs_connections_total counter node_ipvs_connections_total 2.3765872e+07 # HELP node_ipvs_incoming_bytes_total The total amount of incoming data. # TYPE node_ipvs_incoming_bytes_total counter node_ipvs_incoming_bytes_total 8.9991519156915e+13 # HELP node_ipvs_incoming_packets_total The total number of incoming packets. # TYPE node_ipvs_incoming_packets_total counter node_ipvs_incoming_packets_total 3.811989221e+09 # HELP node_ipvs_outgoing_bytes_total The total amount of outgoing data. # TYPE node_ipvs_outgoing_bytes_total counter node_ipvs_outgoing_bytes_total 0 # HELP node_ipvs_outgoing_packets_total The total number of outgoing packets. # TYPE node_ipvs_outgoing_packets_total counter node_ipvs_outgoing_packets_total 0 node_exporter-1.7.0/collector/fixtures/proc/000077500000000000000000000000001452426057600212115ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures/proc/1/000077500000000000000000000000001452426057600213515ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures/proc/1/mounts000066400000000000000000000050221452426057600226200ustar00rootroot00000000000000rootfs / rootfs rw 0 0 sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0 proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0 udev /dev devtmpfs rw,relatime,size=10240k,nr_inodes=1008585,mode=755 0 0 devpts /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0 tmpfs /run tmpfs rw,nosuid,relatime,size=1617716k,mode=755 0 0 /dev/dm-2 / ext4 rw,relatime,errors=remount-ro,data=ordered 0 0 securityfs /sys/kernel/security securityfs rw,nosuid,nodev,noexec,relatime 0 0 tmpfs /dev/shm tmpfs rw,nosuid,nodev 0 0 tmpfs /run/lock tmpfs rw,nosuid,nodev,noexec,relatime,size=5120k 0 0 tmpfs /sys/fs/cgroup tmpfs ro,nosuid,nodev,noexec,mode=755 0 0 cgroup /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/lib/systemd/systemd-cgroups-agent,name=systemd 0 0 pstore /sys/fs/pstore pstore rw,nosuid,nodev,noexec,relatime 0 0 cgroup /sys/fs/cgroup/cpuset cgroup rw,nosuid,nodev,noexec,relatime,cpuset 0 0 cgroup /sys/fs/cgroup/cpu,cpuacct cgroup rw,nosuid,nodev,noexec,relatime,cpu,cpuacct 0 0 cgroup /sys/fs/cgroup/devices cgroup rw,nosuid,nodev,noexec,relatime,devices 0 0 cgroup /sys/fs/cgroup/freezer cgroup rw,nosuid,nodev,noexec,relatime,freezer 0 0 cgroup /sys/fs/cgroup/net_cls,net_prio cgroup rw,nosuid,nodev,noexec,relatime,net_cls,net_prio 0 0 cgroup /sys/fs/cgroup/blkio cgroup rw,nosuid,nodev,noexec,relatime,blkio 0 0 cgroup /sys/fs/cgroup/perf_event cgroup rw,nosuid,nodev,noexec,relatime,perf_event 0 0 systemd-1 /proc/sys/fs/binfmt_misc autofs rw,relatime,fd=22,pgrp=1,timeout=300,minproto=5,maxproto=5,direct 0 0 mqueue /dev/mqueue mqueue rw,relatime 0 0 debugfs /sys/kernel/debug debugfs rw,relatime 0 0 hugetlbfs /dev/hugepages hugetlbfs rw,relatime 0 0 fusectl /sys/fs/fuse/connections fusectl rw,relatime 0 0 /dev/sda3 /boot ext2 rw,relatime 0 0 rpc_pipefs /run/rpc_pipefs rpc_pipefs rw,relatime 0 0 binfmt_misc /proc/sys/fs/binfmt_misc binfmt_misc rw,relatime 0 0 tmpfs /run/user/1000 tmpfs rw,nosuid,nodev,relatime,size=808860k,mode=700,uid=1000,gid=1000 0 0 gvfsd-fuse /run/user/1000/gvfs fuse.gvfsd-fuse rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0 /dev/sda /var/lib/kubelet/plugins/kubernetes.io/vsphere-volume/mounts/[vsanDatastore]\040bafb9e5a-8856-7e6c-699c-801844e77a4a/kubernetes-dynamic-pvc-3eba5bba-48a3-11e8-89ab-005056b92113.vmdk ext4 rw,relatime,data=ordered 0 0 /dev/sda /var/lib/kubelet/plugins/kubernetes.io/vsphere-volume/mounts/[vsanDatastore]\011bafb9e5a-8856-7e6c-699c-801844e77a4a/kubernetes-dynamic-pvc-3eba5bba-48a3-11e8-89ab-005056b92113.vmdk ext4 rw,relatime,data=ordered 0 0 node_exporter-1.7.0/collector/fixtures/proc/1/stat000066400000000000000000000002761452426057600222540ustar00rootroot000000000000001 (systemd) S 0 1 1 0 -1 4194560 9061 9416027 94 2620 36 98 54406 13885 20 0 1 0 29 109604864 2507 18446744073709551615 1 1 0 0 0 0 671173123 4096 1260 0 0 0 17 0 0 0 19 0 0 0 0 0 0 0 0 0 0 node_exporter-1.7.0/collector/fixtures/proc/10/000077500000000000000000000000001452426057600214315ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures/proc/10/mountinfo000066400000000000000000000021241452426057600233710ustar00rootroot000000000000001 1 0:5 / /root rw,nosuid shared:8 - rootfs rootfs rw 16 21 0:16 / /sys rw,nosuid,nodev,noexec,relatime shared:7 - sysfs sysfs rw 17 21 0:4 / /proc rw,nosuid,nodev,noexec,relatime shared:12 - proc proc rw 21 0 8:1 / / rw,relatime shared:1 - ext4 /dev/sda1 rw,errors=remount-ro,data=ordered 194 21 0:42 / /mnt/nfs/test rw shared:144 - nfs4 192.168.1.1:/srv/test rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,clientaddr=192.168.1.5,addr=192.168.1.1,local_lock=none 177 21 0:42 / /mnt/nfs/test rw shared:130 - nfs4 192.168.1.1:/srv/test rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,clientaddr=192.168.1.5,addr=192.168.1.1,local_lock=none 1398 798 0:44 / /mnt/nfs/test rw,relatime shared:1154 - nfs 192.168.1.1:/srv/test rw,vers=3,rsize=32768,wsize=32768,namlen=255,hard,proto=udp,timeo=11,retrans=3,sec=sys,mountaddr=192.168.1.1,mountvers=3,mountport=49602,mountproto=udp,local_lock=none,addr=192.168.1.1 node_exporter-1.7.0/collector/fixtures/proc/10/mountstats000066400000000000000000000055341452426057600236040ustar00rootroot00000000000000device rootfs mounted on / with fstype rootfs device sysfs mounted on /sys with fstype sysfs device proc mounted on /proc with fstype proc device /dev/sda1 mounted on / with fstype ext4 device 192.168.1.1:/srv/test mounted on /mnt/nfs/test with fstype nfs4 statvers=1.1 opts: rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,clientaddr=192.168.1.5,local_lock=none age: 13968 caps: caps=0xfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255 nfsv4: bm0=0xfdffafff,bm1=0xf9be3e,bm2=0x0,acl=0x0,pnfs=not configured sec: flavor=1,pseudoflavor=1 events: 52 226 0 0 1 13 398 0 0 331 0 47 0 0 77 0 0 77 0 0 0 0 0 0 0 0 0 bytes: 1207640230 0 0 0 1210214218 0 295483 0 RPC iostats version: 1.0 p/v: 100003/4 (nfs) xprt: tcp 832 0 1 0 11 6428 6428 0 12154 0 24 26 5726 per-op statistics NULL: 0 0 0 0 0 0 0 0 READ: 1298 1298 0 207680 1210292152 6 79386 79407 WRITE: 0 0 0 0 0 0 0 0 device 192.168.1.1:/srv/test mounted on /mnt/nfs/test-dupe with fstype nfs4 statvers=1.1 opts: rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,clientaddr=192.168.1.5,local_lock=none age: 13968 caps: caps=0xfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255 nfsv4: bm0=0xfdffafff,bm1=0xf9be3e,bm2=0x0,acl=0x0,pnfs=not configured sec: flavor=1,pseudoflavor=1 events: 52 226 0 0 1 13 398 0 0 331 0 47 0 0 77 0 0 77 0 0 0 0 0 0 0 0 0 bytes: 1207640230 0 0 0 1210214218 0 295483 0 RPC iostats version: 1.0 p/v: 100003/4 (nfs) xprt: tcp 832 0 1 0 11 6428 6428 0 12154 0 24 26 5726 per-op statistics NULL: 0 0 0 0 0 0 0 0 READ: 1298 1298 0 207680 1210292152 6 79386 79407 WRITE: 0 0 0 0 0 0 0 0 ACCESS: 2927395007 2927394995 0 526931094212 362996810236 18446743919241604546 1667369447 1953587717 device 192.168.1.1:/srv/test mounted on /mnt/nfs/test-dupe with fstype nfs statvers=1.1 opts: rw,vers=3,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=udp,port=0,timeo=600,retrans=2,sec=sys,clientaddr=192.168.1.5,local_lock=none,mountaddr=192.168.1.1,mountproto=udp,mountport=47853 age: 13968 caps: caps=0xfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255 nfsv4: bm0=0xfdffafff,bm1=0xf9be3e,bm2=0x0,acl=0x0,pnfs=not configured sec: flavor=1,pseudoflavor=1 events: 52 226 0 0 1 13 398 0 0 331 0 47 0 0 77 0 0 77 0 0 0 0 0 0 0 0 0 bytes: 1207640230 0 0 0 1210214218 0 295483 0 RPC iostats version: 1.0 p/v: 100003/4 (nfs) xprt: udp 832 0 6428 6428 0 12154 0 24 26 5726 per-op statistics NULL: 0 0 0 0 0 0 0 0 READ: 1298 1298 0 207680 1210292152 6 79386 79407 WRITE: 0 0 0 0 0 0 0 0 ACCESS: 2927395007 2927394995 0 526931094212 362996810236 18446743919241604546 1667369447 1953587717 node_exporter-1.7.0/collector/fixtures/proc/10/stat000066400000000000000000000002321452426057600223240ustar00rootroot0000000000000017 (khungtaskd) S 2 0 0 0 -1 2129984 0 0 0 0 14 0 0 0 20 0 1 0 24 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 0 0 0 17 0 0 0 0 0 0 0 0 0 0 0 0 0 0node_exporter-1.7.0/collector/fixtures/proc/11/000077500000000000000000000000001452426057600214325ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures/proc/11/.missing_stat000066400000000000000000000000001452426057600241250ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures/proc/11/stat000066400000000000000000000002351452426057600223300ustar00rootroot0000000000000011 (rcu_preempt) I 2 0 0 0 -1 2129984 0 0 0 0 0 346 0 0 -2 0 1 0 32 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 0 0 0 17 2 1 1 0 0 0 0 0 0 0 0 0 0 0 node_exporter-1.7.0/collector/fixtures/proc/buddyinfo000066400000000000000000000004541452426057600231220ustar00rootroot00000000000000Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3 Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 0 node_exporter-1.7.0/collector/fixtures/proc/cgroups000066400000000000000000000003331452426057600226150ustar00rootroot00000000000000#subsys_name hierarchy num_cgroups enabled cpuset 5 47 1 cpu 3 172 1 cpuacct 3 172 1 blkio 6 170 1 memory 7 234 1 devices 11 170 1 freezer 9 47 1 net_cls 2 47 1 perf_event 8 47 1 hugetlb 12 47 1 pids 10 170 1 rdma 4 1 1node_exporter-1.7.0/collector/fixtures/proc/cpuinfo000066400000000000000000000244001452426057600225770ustar00rootroot00000000000000processor : 0 vendor_id : GenuineIntel cpu family : 6 model : 142 model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz stepping : 10 microcode : 0xb4 cpu MHz : 799.998 cache size : 8192 KB physical id : 0 siblings : 8 core id : 0 cpu cores : 4 apicid : 0 initial apicid : 0 fpu : yes fpu_exception : yes cpuid level : 22 wp : yes flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs bogomips : 4224.00 clflush size : 64 cache_alignment : 64 address sizes : 39 bits physical, 48 bits virtual power management: processor : 1 vendor_id : GenuineIntel cpu family : 6 model : 142 model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz stepping : 10 microcode : 0xb4 cpu MHz : 800.037 cache size : 8192 KB physical id : 0 siblings : 8 core id : 1 cpu cores : 4 apicid : 2 initial apicid : 2 fpu : yes fpu_exception : yes cpuid level : 22 wp : yes flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs bogomips : 4224.00 clflush size : 64 cache_alignment : 64 address sizes : 39 bits physical, 48 bits virtual power management: processor : 2 vendor_id : GenuineIntel cpu family : 6 model : 142 model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz stepping : 10 microcode : 0xb4 cpu MHz : 800.010 cache size : 8192 KB physical id : 0 siblings : 8 core id : 2 cpu cores : 4 apicid : 4 initial apicid : 4 fpu : yes fpu_exception : yes cpuid level : 22 wp : yes flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs bogomips : 4224.00 clflush size : 64 cache_alignment : 64 address sizes : 39 bits physical, 48 bits virtual power management: processor : 3 vendor_id : GenuineIntel cpu family : 6 model : 142 model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz stepping : 10 microcode : 0xb4 cpu MHz : 800.028 cache size : 8192 KB physical id : 0 siblings : 8 core id : 3 cpu cores : 4 apicid : 6 initial apicid : 6 fpu : yes fpu_exception : yes cpuid level : 22 wp : yes flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs bogomips : 4224.00 clflush size : 64 cache_alignment : 64 address sizes : 39 bits physical, 48 bits virtual power management: processor : 4 vendor_id : GenuineIntel cpu family : 6 model : 142 model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz stepping : 10 microcode : 0xb4 cpu MHz : 799.989 cache size : 8192 KB physical id : 0 siblings : 8 core id : 0 cpu cores : 4 apicid : 1 initial apicid : 1 fpu : yes fpu_exception : yes cpuid level : 22 wp : yes flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs bogomips : 4224.00 clflush size : 64 cache_alignment : 64 address sizes : 39 bits physical, 48 bits virtual power management: processor : 5 vendor_id : GenuineIntel cpu family : 6 model : 142 model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz stepping : 10 microcode : 0xb4 cpu MHz : 800.083 cache size : 8192 KB physical id : 0 siblings : 8 core id : 1 cpu cores : 4 apicid : 3 initial apicid : 3 fpu : yes fpu_exception : yes cpuid level : 22 wp : yes flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs bogomips : 4224.00 clflush size : 64 cache_alignment : 64 address sizes : 39 bits physical, 48 bits virtual power management: processor : 6 vendor_id : GenuineIntel cpu family : 6 model : 142 model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz stepping : 10 microcode : 0xb4 cpu MHz : 800.017 cache size : 8192 KB physical id : 0 siblings : 8 core id : 2 cpu cores : 4 apicid : 5 initial apicid : 5 fpu : yes fpu_exception : yes cpuid level : 22 wp : yes flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs bogomips : 4224.00 clflush size : 64 cache_alignment : 64 address sizes : 39 bits physical, 48 bits virtual power management: processor : 7 vendor_id : GenuineIntel cpu family : 6 model : 142 model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz stepping : 10 microcode : 0xb4 cpu MHz : 800.030 cache size : 8192 KB physical id : 0 siblings : 8 core id : 3 cpu cores : 4 apicid : 7 initial apicid : 7 fpu : yes fpu_exception : yes cpuid level : 22 wp : yes flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs bogomips : 4224.00 clflush size : 64 cache_alignment : 64 address sizes : 39 bits physical, 48 bits virtual power management: node_exporter-1.7.0/collector/fixtures/proc/diskstats000066400000000000000000000056061452426057600231540ustar00rootroot00000000000000 1 0 ram0 0 0 0 0 0 0 0 0 0 0 0 1 1 ram1 0 0 0 0 0 0 0 0 0 0 0 1 2 ram2 0 0 0 0 0 0 0 0 0 0 0 1 3 ram3 0 0 0 0 0 0 0 0 0 0 0 1 4 ram4 0 0 0 0 0 0 0 0 0 0 0 1 5 ram5 0 0 0 0 0 0 0 0 0 0 0 1 6 ram6 0 0 0 0 0 0 0 0 0 0 0 1 7 ram7 0 0 0 0 0 0 0 0 0 0 0 1 8 ram8 0 0 0 0 0 0 0 0 0 0 0 1 9 ram9 0 0 0 0 0 0 0 0 0 0 0 1 10 ram10 0 0 0 0 0 0 0 0 0 0 0 1 11 ram11 0 0 0 0 0 0 0 0 0 0 0 1 12 ram12 0 0 0 0 0 0 0 0 0 0 0 1 13 ram13 0 0 0 0 0 0 0 0 0 0 0 1 14 ram14 0 0 0 0 0 0 0 0 0 0 0 1 15 ram15 0 0 0 0 0 0 0 0 0 0 0 7 0 loop0 0 0 0 0 0 0 0 0 0 0 0 7 1 loop1 0 0 0 0 0 0 0 0 0 0 0 7 2 loop2 0 0 0 0 0 0 0 0 0 0 0 7 3 loop3 0 0 0 0 0 0 0 0 0 0 0 7 4 loop4 0 0 0 0 0 0 0 0 0 0 0 7 5 loop5 0 0 0 0 0 0 0 0 0 0 0 7 6 loop6 0 0 0 0 0 0 0 0 0 0 0 7 7 loop7 0 0 0 0 0 0 0 0 0 0 0 8 0 sda 25354637 34367663 1003346126 18492372 28444756 11134226 505697032 63877960 0 9653880 82621804 8 1 sda1 250 0 2000 36 0 0 0 0 0 36 36 8 2 sda2 246 0 1968 32 0 0 0 0 0 32 32 8 3 sda3 340 13 2818 52 11 8 152 8 0 56 60 8 4 sda4 25353629 34367650 1003337964 18492232 27448755 11134218 505696880 61593380 0 7576432 80332428 252 0 dm-0 59910002 0 1003337218 46229572 39231014 0 505696880 1158557800 0 11325968 1206301256 252 1 dm-1 388 0 3104 84 74 0 592 0 0 76 84 252 2 dm-2 11571 0 308350 6536 153522 0 5093416 122884 0 65400 129416 252 3 dm-3 3870 0 3870 104 0 0 0 0 0 16 104 252 4 dm-4 392 0 1034 28 38 0 137 16 0 24 44 252 5 dm-5 3729 0 84279 924 98918 0 1151688 104684 0 58848 105632 179 0 mmcblk0 192 3 1560 156 0 0 0 0 0 136 156 179 1 mmcblk0p1 17 3 160 24 0 0 0 0 0 24 24 179 2 mmcblk0p2 95 0 760 68 0 0 0 0 0 68 68 2 0 fd0 2 0 16 80 0 0 0 0 0 80 80 254 0 vda 1775784 15386 32670882 8655768 6038856 20711856 213637440 2069221364 0 41614592 2077872228 254 1 vda1 668 85 5984 956 207 4266 35784 32772 0 8808 33720 254 2 vda2 1774936 15266 32663262 8654692 5991028 20707590 213601656 2069152216 0 41607628 2077801992 11 0 sr0 0 0 0 0 0 0 0 0 0 0 0 259 0 nvme0n1 47114 4 4643973 21650 1078320 43950 39451633 1011053 0 222766 1032546 259 1 nvme0n1p1 1140 0 9370 16 1 0 1 0 0 16 16 259 2 nvme0n1p2 45914 4 4631243 21626 1036885 43950 39451632 919480 0 131580 940970 8 16 sdb 326552 841 9657779 84 41822 2895 1972905 5007 0 60730 67070 68851 0 1925173784 11130 8 17 sdb1 231 3 34466 4 24 23 106 0 0 64 64 0 0 0 0 8 18 sdb2 326310 838 9622281 67 40726 2872 1972799 4924 0 58250 64567 68851 0 1925173784 11130 8 32 sdc 126552 141 1657779 14 11822 1895 172905 1007 0 10730 17070 18851 0 125173784 11130 1555 1944 8 33 sdc1 231 3 34466 4 24 23 106 0 0 64 64 0 0 0 0 0 0 node_exporter-1.7.0/collector/fixtures/proc/drbd000066400000000000000000000004101452426057600220420ustar00rootroot00000000000000version: 8.4.3 (api:1/proto:86-101) srcversion: 1A9F77B1CA5FF92235C2213 1: cs:Connected ro:Primary/Primary ds:UpToDate/UpToDate C r----- ns:17324442 nr:10961011 dw:28263521 dr:118696670 al:1100 bm:221 lo:12345 pe:12346 ua:12347 ap:12348 ep:1 wo:d oos:12349 node_exporter-1.7.0/collector/fixtures/proc/interrupts000066400000000000000000000042751452426057600233630ustar00rootroot00000000000000 CPU0 CPU1 CPU2 CPU3 0: 18 0 0 0 IR-IO-APIC-edge timer 1: 17960 105 28 28 IR-IO-APIC-edge i8042 8: 1 0 0 0 IR-IO-APIC-edge rtc0 9: 398553 2320 824 863 IR-IO-APIC-fasteoi acpi 12: 380847 1021 240 198 IR-IO-APIC-edge i8042 16: 328511 322879 293782 351412 IR-IO-APIC-fasteoi ehci_hcd:usb1, mmc0 23: 1451445 3333499 1092032 2644609 IR-IO-APIC-fasteoi ehci_hcd:usb2 40: 0 0 0 0 DMAR_MSI-edge dmar0 41: 0 0 0 0 DMAR_MSI-edge dmar1 42: 378324 1734637 440240 2434308 IR-PCI-MSI-edge xhci_hcd 43: 7434032 8092205 6478877 7492252 IR-PCI-MSI-edge ahci 44: 140636 226313 347 633 IR-PCI-MSI-edge i915 45: 4 22 0 0 IR-PCI-MSI-edge mei_me 46: 43078464 130 460171 290 IR-PCI-MSI-edge iwlwifi 47: 350 224 0 0 IR-PCI-MSI-edge snd_hda_intel NMI: 47 5031 6211 4968 Non-maskable interrupts LOC: 174326351 135776678 168393257 130980079 Local timer interrupts SPU: 0 0 0 0 Spurious interrupts PMI: 47 5031 6211 4968 Performance monitoring interrupts IWI: 1509379 2411776 1512975 2428828 IRQ work interrupts RTR: 0 0 0 0 APIC ICR read retries RES: 10847134 9111507 15999335 7457260 Rescheduling interrupts CAL: 148554 157441 142912 155528 Function call interrupts TLB: 10460334 9918429 10494258 10345022 TLB shootdowns TRM: 0 0 0 0 Thermal event interrupts THR: 0 0 0 0 Threshold APIC interrupts MCE: 0 0 0 0 Machine check exceptions MCP: 2406 2399 2399 2399 Machine check polls ERR: 0 MIS: 0 node_exporter-1.7.0/collector/fixtures/proc/interrupts_aarch64000066400000000000000000000166121452426057600246710ustar00rootroot00000000000000 CPU0 CPU1 CPU2 CPU3 CPU4 CPU5 CPU6 CPU7 10: 3287008667 3310445093 3301386305 3273132897 3368262064 3641875466 3360412019 3225020442 GICv3 27 Level arch_timer 14: 7815 0 0 4 0 0 0 0 GICv3 37 Level ttyS0 17: 0 0 0 0 0 0 0 0 GICv3 48 Edge ACPI:Ged 18: 0 0 0 0 0 0 0 0 GICv3 49 Edge ACPI:Ged 19: 0 0 0 0 0 0 0 0 GICv3 50 Edge ACPI:Ged 20: 0 0 0 0 0 0 0 0 GICv3 51 Edge ACPI:Ged 21: 0 0 0 0 0 0 0 0 GICv3 52 Edge ACPI:Ged 22: 0 0 0 0 0 0 0 0 GICv3 53 Edge ACPI:Ged 23: 0 0 0 0 0 0 0 0 GICv3 54 Edge ACPI:Ged 24: 0 0 0 0 0 0 0 0 GICv3 55 Edge ACPI:Ged 25: 0 0 0 0 0 0 0 0 GICv3 56 Edge ACPI:Ged 26: 0 0 0 0 0 0 0 0 GICv3 57 Edge ACPI:Ged 27: 0 0 0 0 0 0 0 0 GICv3 58 Edge ACPI:Ged 28: 0 0 0 0 0 0 0 0 GICv3 59 Edge ACPI:Ged 29: 0 0 0 0 0 0 0 0 GICv3 60 Edge ACPI:Ged 30: 0 0 0 0 0 0 0 0 GICv3 61 Edge ACPI:Ged 31: 0 0 0 0 0 0 0 0 GICv3 62 Edge ACPI:Ged 32: 0 0 0 0 0 0 0 0 GICv3 63 Edge ACPI:Ged 33: 0 0 0 0 0 0 0 0 GICv3 64 Edge ACPI:Ged 34: 0 0 0 0 0 0 0 0 GICv3 65 Edge ACPI:Ged 35: 0 0 0 0 0 0 0 0 GICv3 66 Edge ACPI:Ged 36: 0 0 0 0 0 0 0 0 GICv3 67 Edge ACPI:Ged 37: 0 0 0 0 0 0 0 0 GICv3 68 Edge ACPI:Ged 38: 0 0 0 0 0 0 0 0 GICv3 69 Edge ACPI:Ged 39: 0 0 0 0 0 0 0 0 GICv3 70 Edge ACPI:Ged 40: 0 0 0 0 0 0 0 0 GICv3 71 Edge ACPI:Ged 41: 0 0 0 0 0 0 0 0 GICv3 72 Edge ACPI:Ged 42: 0 0 0 0 0 0 0 0 GICv3 73 Edge ACPI:Ged 43: 0 0 0 0 0 0 0 0 GICv3 74 Edge ACPI:Ged 44: 0 0 0 0 0 0 0 0 GICv3 75 Edge ACPI:Ged 45: 0 0 0 0 0 0 0 0 GICv3 76 Edge ACPI:Ged 46: 0 0 0 0 0 0 0 0 GICv3 77 Edge ACPI:Ged 47: 0 0 0 0 0 0 0 0 GICv3 78 Edge ACPI:Ged 48: 0 0 0 0 0 0 0 0 GICv3 79 Edge ACPI:Ged 49: 0 0 0 0 0 0 0 0 GICv3 23 Level arm-pmu 50: 0 0 0 0 0 0 0 0 ARMH0061:00 3 Edge ACPI:Event 51: 13 0 0 20 4 0 0 0 ITS-MSI 65536 Edge nvme0q0 52: 0 9 0 0 0 5 20 0 ITS-MSI 507904 Edge nvme1q0 53: 129969327 0 0 0 0 0 0 0 ITS-MSI 65537 Edge nvme0q1 54: 0 0 0 0 126913956 0 0 0 ITS-MSI 65538 Edge nvme0q2 55: 0 199619844 0 0 0 0 0 0 ITS-MSI 507905 Edge nvme1q1 56: 0 0 0 0 0 198494086 0 0 ITS-MSI 507906 Edge nvme1q2 57: 0 0 51 0 0 32479308 0 0 ITS-MSI 81920 Edge ena-mgmnt@pci:0000:00:05.0 58: 0 0 1195697946 437 0 0 0 0 ITS-MSI 81921 Edge eth0-Tx-Rx-0 59: 0 0 0 2709937608 1619 0 0 0 ITS-MSI 81922 Edge eth0-Tx-Rx-1 60: 0 1457922109 0 0 0 71 0 0 ITS-MSI 81923 Edge eth0-Tx-Rx-2 61: 2052879736 0 0 0 0 0 124 0 ITS-MSI 81924 Edge eth0-Tx-Rx-3 62: 0 0 0 0 0 0 2268695629 1530 ITS-MSI 81925 Edge eth0-Tx-Rx-4 63: 50 0 0 0 0 0 0 1997799253 ITS-MSI 81926 Edge eth0-Tx-Rx-5 64: 0 48 0 0 1238622585 0 0 0 ITS-MSI 81927 Edge eth0-Tx-Rx-6 65: 0 0 47 0 0 0 0 1574978449 ITS-MSI 81928 Edge eth0-Tx-Rx-7 IPI0:2768808080 2844211768 2878602432 2730576120 2723524623 3349096412 2717389879 2154252810 Rescheduling interrupts IPI1: 357815098 213258177 153713187 132890624 124746406 123498004 122386326 120728639 Function call interrupts IPI2: 0 0 0 0 0 0 0 0 CPU stop interrupts IPI3: 0 0 0 0 0 0 0 0 CPU stop (for crash dump) interrupts IPI4: 0 0 0 0 0 0 0 0 Timer broadcast interrupts IPI5: 0 0 0 0 0 0 0 0 IRQ work interrupts IPI6: 0 0 0 0 0 0 0 0 CPU wake-up interrupts Err: 0 node_exporter-1.7.0/collector/fixtures/proc/loadavg000066400000000000000000000000331452426057600225450ustar00rootroot000000000000000.21 0.37 0.39 1/719 19737 node_exporter-1.7.0/collector/fixtures/proc/mdstat000066400000000000000000000041301452426057600224260ustar00rootroot00000000000000Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10] md3 : active raid6 sda1[8] sdh1[7] sdg1[6] sdf1[5] sde1[11] sdd1[3] sdc1[10] sdb1[9] sdd1[10](S) sdd2[11](S) 5853468288 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU] md127 : active raid1 sdi2[0] sdj2[1] 312319552 blocks [2/2] [UU] md0 : active raid1 sdi1[0] sdj1[1] 248896 blocks [2/2] [UU] md4 : inactive raid1 sda3[0](F) sdb3[1](S) 4883648 blocks [2/2] [UU] md6 : active raid1 sdb2[2](F) sdc[1](S) sda2[0] 195310144 blocks [2/1] [U_] [=>...................] recovery = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec md8 : active raid1 sdb1[1] sda1[0] sdc[2](S) sde[3](S) 195310144 blocks [2/2] [UU] [=>...................] resync = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec md201 : active raid1 sda3[0] sdb3[1] 1993728 blocks super 1.2 [2/2] [UU] [=>...................] check = 5.7% (114176/1993728) finish=0.2min speed=114176K/sec md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1](F) 7813735424 blocks super 1.2 level 6, 512k chunk, algorithm 2 [4/3] [U_UU] bitmap: 0/30 pages [0KB], 65536KB chunk md9 : active raid1 sdc2[2] sdd2[3] sdb2[1] sda2[0] sde[4](F) sdf[5](F) sdg[6](S) 523968 blocks super 1.2 [4/4] [UUUU] resync=DELAYED md10 : active raid0 sda1[0] sdb1[1] 314159265 blocks 64k chunks md11 : active (auto-read-only) raid1 sdb2[0] sdc2[1] sdc3[2](F) hda[4](S) ssdc2[3](S) 4190208 blocks super 1.2 [2/2] [UU] resync=PENDING md12 : active raid0 sdc2[0] sdd2[1] 3886394368 blocks super 1.2 512k chunks md126 : active raid0 sdb[1] sdc[0] 1855870976 blocks super external:/md127/0 128k chunks md219 : inactive sdb[2](S) sdc[1](S) sda[0](S) 7932 blocks super external:imsm md00 : active raid0 xvdb[0] 4186624 blocks super 1.2 256k chunks md120 : active linear sda1[1] sdb1[0] 2095104 blocks super 1.2 0k rounding md101 : active (read-only) raid0 sdb[2] sdd[1] sdc[0] 322560 blocks super 1.2 512k chunks unused devices: node_exporter-1.7.0/collector/fixtures/proc/meminfo000066400000000000000000000022221452426057600225640ustar00rootroot00000000000000MemTotal: 3742148 kB MemFree: 225472 kB Buffers: 22040 kB Cached: 930888 kB SwapCached: 192504 kB Active: 2233416 kB Inactive: 1028728 kB Active(anon): 2020004 kB Inactive(anon): 883052 kB Active(file): 213412 kB Inactive(file): 145676 kB Unevictable: 32 kB Mlocked: 32 kB SwapTotal: 4194300 kB SwapFree: 3155360 kB Dirty: 1052 kB Writeback: 0 kB AnonPages: 2244172 kB Mapped: 239220 kB Shmem: 593840 kB Slab: 98932 kB SReclaimable: 44772 kB SUnreclaim: 54160 kB KernelStack: 5800 kB PageTables: 75212 kB NFS_Unstable: 0 kB Bounce: 0 kB WritebackTmp: 0 kB CommitLimit: 6065372 kB Committed_AS: 7835436 kB VmallocTotal: 34359738367 kB VmallocUsed: 352840 kB VmallocChunk: 34359338876 kB HardwareCorrupted: 0 kB AnonHugePages: 0 kB HugePages_Total: 0 HugePages_Free: 0 HugePages_Rsvd: 0 HugePages_Surp: 0 Hugepagesize: 2048 kB DirectMap4k: 185660 kB DirectMap2M: 3698688 kB node_exporter-1.7.0/collector/fixtures/proc/net/000077500000000000000000000000001452426057600217775ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures/proc/net/arp000066400000000000000000000011341452426057600225030ustar00rootroot00000000000000IP address HW type Flags HW address Mask Device 192.168.1.1 0x1 0x2 cc:aa:dd:ee:aa:bb * eth0 192.168.1.2 0x1 0x2 bb:cc:dd:ee:ff:aa * eth0 192.168.1.3 0x1 0x2 aa:bb:cc:dd:ee:ff * eth0 192.168.1.4 0x1 0x2 dd:ee:ff:aa:bb:cc * eth1 192.168.1.5 0x1 0x2 ee:ff:aa:bb:cc:dd * eth1 192.168.1.6 0x1 0x2 ff:aa:bb:cc:dd:ee * eth1 10.0.0.1 0x1 0x2 de:ad:be:ef:00:00 * nope node_exporter-1.7.0/collector/fixtures/proc/net/ip_vs000066400000000000000000000015211452426057600230410ustar00rootroot00000000000000IP Virtual Server version 1.2.1 (size=4096) Prot LocalAddress:Port Scheduler Flags -> RemoteAddress:Port Forward Weight ActiveConn InActConn TCP C0A80016:0CEA wlc -> C0A85216:0CEA Tunnel 100 248 2 -> C0A85318:0CEA Tunnel 100 248 2 -> C0A85315:0CEA Tunnel 100 248 1 TCP C0A80039:0CEA wlc -> C0A85416:0CEA Tunnel 0 0 0 -> C0A85215:0CEA Tunnel 100 1499 0 -> C0A83215:0CEA Tunnel 100 1498 0 TCP C0A80037:0CEA wlc -> C0A8321A:0CEA Tunnel 0 0 0 -> C0A83120:0CEA Tunnel 100 0 0 FWM 10001000 wlc -> C0A8321A:0CEA Tunnel 20 64 1 -> C0A83120:0CEA Tunnel 100 321 5 node_exporter-1.7.0/collector/fixtures/proc/net/ip_vs_stats000066400000000000000000000004621452426057600242620ustar00rootroot00000000000000 Total Incoming Outgoing Incoming Outgoing Conns Packets Packets Bytes Bytes 16AA370 E33656E5 0 51D8C8883AB3 0 Conns/s Pkts/s Pkts/s Bytes/s Bytes/s 4 1FB3C 0 1282A8F 0 node_exporter-1.7.0/collector/fixtures/proc/net/netstat000066400000000000000000000032321452426057600234040ustar00rootroot00000000000000TcpExt: SyncookiesSent SyncookiesRecv SyncookiesFailed EmbryonicRsts PruneCalled RcvPruned OfoPruned OutOfWindowIcmps LockDroppedIcmps ArpFilter TW TWRecycled TWKilled PAWSPassive PAWSActive PAWSEstab DelayedACKs DelayedACKLocked DelayedACKLost ListenOverflows ListenDrops TCPPrequeued TCPDirectCopyFromBacklog TCPDirectCopyFromPrequeue TCPPrequeueDropped TCPHPHits TCPHPHitsToUser TCPPureAcks TCPHPAcks TCPRenoRecovery TCPSackRecovery TCPSACKReneging TCPFACKReorder TCPSACKReorder TCPRenoReorder TCPTSReorder TCPFullUndo TCPPartialUndo TCPDSACKUndo TCPLossUndo TCPLoss TCPLostRetransmit TCPRenoFailures TCPSackFailures TCPLossFailures TCPFastRetrans TCPForwardRetrans TCPSlowStartRetrans TCPTimeouts TCPRenoRecoveryFail TCPSackRecoveryFail TCPSchedulerFailed TCPRcvCollapsed TCPDSACKOldSent TCPDSACKOfoSent TCPDSACKRecv TCPDSACKOfoRecv TCPAbortOnData TCPAbortOnClose TCPAbortOnMemory TCPAbortOnTimeout TCPAbortOnLinger TCPAbortFailed TCPMemoryPressures TCPSACKDiscard TCPDSACKIgnoredOld TCPDSACKIgnoredNoUndo TCPSpuriousRTOs TCPMD5NotFound TCPMD5Unexpected TCPSackShifted TCPSackMerged TCPSackShiftFallback TCPBacklogDrop TCPMinTTLDrop TCPDeferAcceptDrop IPReversePathFilter TCPTimeWaitOverflow TCPReqQFullDoCookies TCPReqQFullDrop TCPChallengeACK TCPSYNChallenge TcpExt: 0 0 2 0 0 0 0 0 0 0 388812 0 0 0 0 6 102471 17 9 0 0 80568 0 168808 0 4471289 26 1433940 3744565 0 1 0 0 0 0 0 0 0 0 48 0 0 0 1 0 1 0 1 115 0 0 0 0 9 0 5 0 41 4 0 0 0 0 0 0 0 1 0 0 0 0 2 5 0 0 0 0 0 0 0 2 2 IpExt: InNoRoutes InTruncatedPkts InMcastPkts OutMcastPkts InBcastPkts OutBcastPkts InOctets OutOctets InMcastOctets OutMcastOctets InBcastOctets OutBcastOctets IpExt: 0 0 0 0 0 0 6286396970 2786264347 0 0 0 0 node_exporter-1.7.0/collector/fixtures/proc/net/rpc/000077500000000000000000000000001452426057600225635ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures/proc/net/rpc/nfs000066400000000000000000000006001452426057600232700ustar00rootroot00000000000000net 70 70 69 45 rpc 1218785755 374636 1218815394 proc2 18 16 57 74 52 71 73 45 86 0 52 83 61 17 53 50 23 70 82 proc3 22 0 1061909262 48906 4077635 117661341 5 29391916 2570425 2993289 590 0 0 7815 15 1130 0 3983 92385 13332 2 1 23729 proc4 48 98 51 54 83 85 23 24 1 28 73 68 83 12 84 39 68 59 58 88 29 74 69 96 21 84 15 53 86 54 66 56 97 36 49 32 85 81 11 58 32 67 13 28 35 90 1 26 0 node_exporter-1.7.0/collector/fixtures/proc/net/rpc/nfsd000066400000000000000000000007431452426057600234440ustar00rootroot00000000000000rc 0 6 18622 fh 0 0 0 0 0 io 157286400 72864 th 8 0 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 ra 32 0 0 0 0 0 0 0 0 0 0 0 net 972 55 917 1 rpc 18628 3 1 2 0 proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2 proc3 22 2 112 0 2719 111 0 0 0 0 0 0 0 0 0 0 0 27 216 0 2 1 0 proc4 2 2 10853 proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 node_exporter-1.7.0/collector/fixtures/proc/net/snmp000066400000000000000000000022621452426057600227010ustar00rootroot00000000000000Ip: Forwarding DefaultTTL InReceives InHdrErrors InAddrErrors ForwDatagrams InUnknownProtos InDiscards InDelivers OutRequests OutDiscards OutNoRoutes ReasmTimeout ReasmReqds ReasmOKs ReasmFails FragOKs FragFails FragCreates Ip: 1 64 57740232 0 25 397750 0 0 57340175 55365537 0 54 0 0 0 0 0 0 0 Icmp: InMsgs InErrors InCsumErrors InDestUnreachs InTimeExcds InParmProbs InSrcQuenchs InRedirects InEchos InEchoReps InTimestamps InTimestampReps InAddrMasks InAddrMaskReps OutMsgs OutErrors OutDestUnreachs OutTimeExcds OutParmProbs OutSrcQuenchs OutRedirects OutEchos OutEchoReps OutTimestamps OutTimestampReps OutAddrMasks OutAddrMaskReps Icmp: 104 0 0 104 0 0 0 0 0 0 0 0 0 0 120 0 120 0 0 0 0 0 0 0 0 0 0 IcmpMsg: InType3 OutType3 IcmpMsg: 104 120 Tcp: RtoAlgorithm RtoMin RtoMax MaxConn ActiveOpens PassiveOpens AttemptFails EstabResets CurrEstab InSegs OutSegs RetransSegs InErrs OutRsts InCsumErrors Tcp: 1 200 120000 -1 3556 230 341 161 0 57252008 54915039 227 5 1003 0 Udp: InDatagrams NoPorts InErrors OutDatagrams RcvbufErrors SndbufErrors InCsumErrors Udp: 88542 120 0 53028 9 8 0 UdpLite: InDatagrams NoPorts InErrors OutDatagrams RcvbufErrors SndbufErrors InCsumErrors UdpLite: 0 0 0 0 0 0 0 node_exporter-1.7.0/collector/fixtures/proc/net/snmp6000066400000000000000000000056531452426057600227760ustar00rootroot00000000000000Ip6InReceives 7 Ip6InHdrErrors 0 Ip6InTooBigErrors 0 Ip6InNoRoutes 5 Ip6InAddrErrors 0 Ip6InUnknownProtos 0 Ip6InTruncatedPkts 0 Ip6InDiscards 0 Ip6InDelivers 0 Ip6OutForwDatagrams 0 Ip6OutRequests 8 Ip6OutDiscards 0 Ip6OutNoRoutes 3003 Ip6ReasmTimeout 0 Ip6ReasmReqds 0 Ip6ReasmOKs 0 Ip6ReasmFails 0 Ip6FragOKs 0 Ip6FragFails 0 Ip6FragCreates 0 Ip6InMcastPkts 2 Ip6OutMcastPkts 12 Ip6InOctets 460 Ip6OutOctets 536 Ip6InMcastOctets 112 Ip6OutMcastOctets 840 Ip6InBcastOctets 0 Ip6OutBcastOctets 0 Ip6InNoECTPkts 7 Ip6InECT1Pkts 0 Ip6InECT0Pkts 0 Ip6InCEPkts 0 Icmp6InMsgs 0 Icmp6InErrors 0 Icmp6OutMsgs 8 Icmp6OutErrors 0 Icmp6InCsumErrors 0 Icmp6InDestUnreachs 0 Icmp6InPktTooBigs 0 Icmp6InTimeExcds 0 Icmp6InParmProblems 0 Icmp6InEchos 0 Icmp6InEchoReplies 0 Icmp6InGroupMembQueries 0 Icmp6InGroupMembResponses 0 Icmp6InGroupMembReductions 0 Icmp6InRouterSolicits 0 Icmp6InRouterAdvertisements 0 Icmp6InNeighborSolicits 0 Icmp6InNeighborAdvertisements 0 Icmp6InRedirects 0 Icmp6InMLDv2Reports 0 Icmp6OutDestUnreachs 0 Icmp6OutPktTooBigs 0 Icmp6OutTimeExcds 0 Icmp6OutParmProblems 0 Icmp6OutEchos 0 Icmp6OutEchoReplies 0 Icmp6OutGroupMembQueries 0 Icmp6OutGroupMembResponses 0 Icmp6OutGroupMembReductions 0 Icmp6OutRouterSolicits 3 Icmp6OutRouterAdvertisements 0 Icmp6OutNeighborSolicits 1 Icmp6OutNeighborAdvertisements 0 Icmp6OutRedirects 0 Icmp6OutMLDv2Reports 4 Icmp6OutType133 3 Icmp6OutType135 1 Icmp6OutType143 4 Udp6InDatagrams 0 Udp6NoPorts 0 Udp6InErrors 0 Udp6OutDatagrams 0 Udp6RcvbufErrors 9 Udp6SndbufErrors 8 Udp6InCsumErrors 0 Udp6IgnoredMulti 0 UdpLite6InDatagrams 0 UdpLite6NoPorts 0 UdpLite6InErrors 0 UdpLite6OutDatagrams 0 UdpLite6RcvbufErrors 0 UdpLite6SndbufErrors 0 UdpLite6InCsumErrors 0 node_exporter-1.7.0/collector/fixtures/proc/net/sockstat000066400000000000000000000002041452426057600235510ustar00rootroot00000000000000sockets: used 229 TCP: inuse 4 orphan 0 tw 4 alloc 17 mem 1 UDP: inuse 0 mem 0 UDPLITE: inuse 0 RAW: inuse 0 FRAG: inuse 0 memory 0 node_exporter-1.7.0/collector/fixtures/proc/net/sockstat6000066400000000000000000000001251452426057600236410ustar00rootroot00000000000000TCP6: inuse 17 UDP6: inuse 9 UDPLITE6: inuse 0 RAW6: inuse 1 FRAG6: inuse 0 memory 0 node_exporter-1.7.0/collector/fixtures/proc/net/softnet_stat000066400000000000000000000006131452426057600244370ustar00rootroot0000000000000000049279 00000000 00000001 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 000dfb82 00000029 0000000a 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00551c3f 00000000 00000055 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 002f8339 00000000 00000032 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000node_exporter-1.7.0/collector/fixtures/proc/net/stat/000077500000000000000000000000001452426057600227525ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures/proc/net/stat/arp_cache000066400000000000000000000006261452426057600246060ustar00rootroot00000000000000entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls 00000014 00000001 00000002 00000003 00000004 00000005 00000006 00000007 00000008 00000009 0000000a 0000000b 0000000c 00000014 0000000d 0000000e 0000000f 00000010 00000011 00000012 00000013 00000014 00000015 00000016 00000017 00000018 node_exporter-1.7.0/collector/fixtures/proc/net/stat/ndisc_cache000066400000000000000000000006261452426057600251240ustar00rootroot00000000000000entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls 00000024 000000f0 000000f1 000000f2 000000f3 000000f4 000000f5 000000f6 000000f7 000000f8 000000f9 000000fa 000000fb 00000024 000000fc 000000fd 000000fe 000000ff 00000100 00000101 00000102 00000103 00000104 00000105 00000106 00000107 node_exporter-1.7.0/collector/fixtures/proc/net/stat/nf_conntrack000066400000000000000000000014211452426057600253400ustar00rootroot00000000000000entries searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error expect_new expect_create expect_delete search_restart 00000021 00000000 00000000 00000000 00000003 0000588a 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000021 00000000 00000000 00000000 00000002 000056a4 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000002 00000021 00000000 00000000 00000000 00000001 000058d4 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000001 00000021 00000000 00000000 00000000 0000002f 00005688 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000004 node_exporter-1.7.0/collector/fixtures/proc/net/udp000066400000000000000000000004541452426057600225150ustar00rootroot00000000000000 sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode 0: 00000000:0016 00000000:0000 0A 00000015:00000000 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 node_exporter-1.7.0/collector/fixtures/proc/pressure/000077500000000000000000000000001452426057600230615ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures/proc/pressure/cpu000066400000000000000000000000661452426057600235750ustar00rootroot00000000000000some avg10=0.00 avg60=0.00 avg300=0.00 total=14036781 node_exporter-1.7.0/collector/fixtures/proc/pressure/io000066400000000000000000000001561452426057600234150ustar00rootroot00000000000000some avg10=0.18 avg60=0.34 avg300=0.10 total=159886802 full avg10=0.18 avg60=0.34 avg300=0.10 total=159229614 node_exporter-1.7.0/collector/fixtures/proc/pressure/memory000066400000000000000000000001361452426057600243140ustar00rootroot00000000000000some avg10=0.00 avg60=0.00 avg300=0.00 total=0 full avg10=0.00 avg60=0.00 avg300=0.00 total=0 node_exporter-1.7.0/collector/fixtures/proc/schedstat000066400000000000000000000014431452426057600231200ustar00rootroot00000000000000version 15 timestamp 15819019232 cpu0 498494191 0 3533438552 2553969831 3853684107 2465731542 2045936778163039 343796328169361 4767485306 domain0 00000000,00000003 212499247 210112015 1861015 1860405436 536440 369895 32599 210079416 25368550 24241256 384652 927363878 807233 6366 1647 24239609 2122447165 1886868564 121112060 2848625533 125678146 241025 1032026 1885836538 2545 12 2533 0 0 0 0 0 0 1387952561 21076581 0 cpu1 518377256 0 4155211005 2778589869 10466382 2867629021 1904686152592476 364107263788241 5145567945 domain0 00000000,00000003 217653037 215526982 1577949 1580427380 557469 393576 28538 215498444 28721913 27662819 371153 870843407 745912 5523 1639 27661180 2331056874 2107732788 111442342 652402556 123615235 196159 1045245 2106687543 2400 3 2397 0 0 0 0 0 0 1437804657 26220076 0 node_exporter-1.7.0/collector/fixtures/proc/self00007770000000000000000000000000145242605760022223210ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures/proc/slabinfo000066400000000000000000000011721452426057600227320ustar00rootroot00000000000000slabinfo - version: 2.1 # name : tunables : slabdata tw_sock_TCP 704 864 256 32 2 : tunables 0 0 0 : slabdata 27 27 0 dmaengine-unmap-128 1206 1320 1088 30 8 : tunables 0 0 0 : slabdata 44 44 0 kmalloc-8192 132 148 8192 4 8 : tunables 0 0 0 : slabdata 37 37 0 kmem_cache 320 320 256 32 2 : tunables 0 0 0 : slabdata 10 10 0 node_exporter-1.7.0/collector/fixtures/proc/softirqs000066400000000000000000000006231452426057600230070ustar00rootroot00000000000000 CPU0 CPU1 HI: 7 1 TIMER: 424191 108342 NET_TX: 2301 2430 NET_RX: 43066 104508 BLOCK: 23776 24115 IRQ_POLL: 0 0 TASKLET: 372 1899 SCHED: 378895 152852 HRTIMER: 40 346 RCU: 155929 146631 node_exporter-1.7.0/collector/fixtures/proc/spl/000077500000000000000000000000001452426057600220075ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures/proc/spl/kstat/000077500000000000000000000000001452426057600231355ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures/proc/spl/kstat/zfs/000077500000000000000000000000001452426057600237375ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures/proc/spl/kstat/zfs/abdstats000066400000000000000000000016321452426057600254710ustar00rootroot000000000000007 1 0x01 21 5712 73163810083184 309946154984654 name type data struct_size 4 2520 linear_cnt 4 62 linear_data_size 4 223232 scatter_cnt 4 1 scatter_data_size 4 16384 scatter_chunk_waste 4 0 scatter_order_0 4 0 scatter_order_1 4 0 scatter_order_2 4 1 scatter_order_3 4 0 scatter_order_4 4 0 scatter_order_5 4 0 scatter_order_6 4 0 scatter_order_7 4 0 scatter_order_8 4 0 scatter_order_9 4 0 scatter_order_10 4 0 scatter_page_multi_chunk 4 0 scatter_page_multi_zone 4 0 scatter_page_alloc_retry 4 0 scatter_sg_table_retry 4 0 node_exporter-1.7.0/collector/fixtures/proc/spl/kstat/zfs/arcstats000066400000000000000000000075661452426057600255240ustar00rootroot000000000000006 1 0x01 91 4368 5266997922 97951858082072 name type data hits 4 8772612 misses 4 604635 demand_data_hits 4 7221032 demand_data_misses 4 73300 demand_metadata_hits 4 1464353 demand_metadata_misses 4 498170 prefetch_data_hits 4 3615 prefetch_data_misses 4 17094 prefetch_metadata_hits 4 83612 prefetch_metadata_misses 4 16071 mru_hits 4 855535 mru_ghost_hits 4 21100 mfu_hits 4 7829854 mfu_ghost_hits 4 821 deleted 4 60403 mutex_miss 4 2 evict_skip 4 2265729 evict_not_enough 4 680 evict_l2_cached 4 0 evict_l2_eligible 4 8992514560 evict_l2_ineligible 4 992552448 evict_l2_skip 4 0 hash_elements 4 42359 hash_elements_max 4 88245 hash_collisions 4 50564 hash_chains 4 412 hash_chain_max 4 3 p 4 516395305 c 4 1643208777 c_min 4 33554432 c_max 4 8367976448 size 4 1603939792 hdr_size 4 16361080 data_size 4 1295836160 metadata_size 4 175298560 other_size 4 116443992 anon_size 4 1917440 anon_evictable_data 4 0 anon_evictable_metadata 4 0 mru_size 4 402593792 mru_evictable_data 4 278091264 mru_evictable_metadata 4 18606592 mru_ghost_size 4 999728128 mru_ghost_evictable_data 4 883765248 mru_ghost_evictable_metadata 4 115962880 mfu_size 4 1066623488 mfu_evictable_data 4 1017613824 mfu_evictable_metadata 4 9163776 mfu_ghost_size 4 104936448 mfu_ghost_evictable_data 4 96731136 mfu_ghost_evictable_metadata 4 8205312 l2_hits 4 0 l2_misses 4 0 l2_feeds 4 0 l2_rw_clash 4 0 l2_read_bytes 4 0 l2_write_bytes 4 0 l2_writes_sent 4 0 l2_writes_done 4 0 l2_writes_error 4 0 l2_writes_lock_retry 4 0 l2_evict_lock_retry 4 0 l2_evict_reading 4 0 l2_evict_l1cached 4 0 l2_free_on_write 4 0 l2_cdata_free_on_write 4 0 l2_abort_lowmem 4 0 l2_cksum_bad 4 0 l2_io_error 4 0 l2_size 4 0 l2_asize 4 0 l2_hdr_size 4 0 l2_compress_successes 4 0 l2_compress_zeros 4 0 l2_compress_failures 4 0 memory_throttle_count 4 0 duplicate_buffers 4 0 duplicate_buffers_size 4 0 duplicate_reads 4 0 memory_direct_count 4 542 memory_indirect_count 4 3006 arc_no_grow 4 0 arc_tempreserve 4 0 arc_loaned_bytes 4 0 arc_prune 4 0 arc_meta_used 4 308103632 arc_meta_limit 4 6275982336 arc_meta_max 4 449286096 arc_meta_min 4 16777216 arc_need_free 4 0 arc_sys_free 4 261496832 node_exporter-1.7.0/collector/fixtures/proc/spl/kstat/zfs/dbufstats000066400000000000000000000051061452426057600256630ustar00rootroot0000000000000015 1 0x01 63 17136 73163812943503 309964267073187 name type data dbuf_cache_count 4 27 dbuf_cache_size 4 302080 dbuf_cache_size_max 4 394240 dbuf_cache_max_bytes 4 62834368 dbuf_cache_lowater_bytes 4 56550932 dbuf_cache_hiwater_bytes 4 69117804 dbuf_cache_total_evicts 4 0 dbuf_cache_level_0 4 27 dbuf_cache_level_1 4 0 dbuf_cache_level_2 4 0 dbuf_cache_level_3 4 0 dbuf_cache_level_4 4 0 dbuf_cache_level_5 4 0 dbuf_cache_level_6 4 0 dbuf_cache_level_7 4 0 dbuf_cache_level_8 4 0 dbuf_cache_level_9 4 0 dbuf_cache_level_10 4 0 dbuf_cache_level_11 4 0 dbuf_cache_level_0_bytes 4 302080 dbuf_cache_level_1_bytes 4 0 dbuf_cache_level_2_bytes 4 0 dbuf_cache_level_3_bytes 4 0 dbuf_cache_level_4_bytes 4 0 dbuf_cache_level_5_bytes 4 0 dbuf_cache_level_6_bytes 4 0 dbuf_cache_level_7_bytes 4 0 dbuf_cache_level_8_bytes 4 0 dbuf_cache_level_9_bytes 4 0 dbuf_cache_level_10_bytes 4 0 dbuf_cache_level_11_bytes 4 0 hash_hits 4 108807 hash_misses 4 1851 hash_collisions 4 0 hash_elements 4 55 hash_elements_max 4 55 hash_chains 4 0 hash_chain_max 4 0 hash_insert_race 4 0 hash_dbuf_level_0 4 37 hash_dbuf_level_1 4 10 hash_dbuf_level_2 4 2 hash_dbuf_level_3 4 2 hash_dbuf_level_4 4 2 hash_dbuf_level_5 4 2 hash_dbuf_level_6 4 0 hash_dbuf_level_7 4 0 hash_dbuf_level_8 4 0 hash_dbuf_level_9 4 0 hash_dbuf_level_10 4 0 hash_dbuf_level_11 4 0 hash_dbuf_level_0_bytes 4 465920 hash_dbuf_level_1_bytes 4 1310720 hash_dbuf_level_2_bytes 4 262144 hash_dbuf_level_3_bytes 4 262144 hash_dbuf_level_4_bytes 4 262144 hash_dbuf_level_5_bytes 4 262144 hash_dbuf_level_6_bytes 4 0 hash_dbuf_level_7_bytes 4 0 hash_dbuf_level_8_bytes 4 0 hash_dbuf_level_9_bytes 4 0 hash_dbuf_level_10_bytes 4 0 hash_dbuf_level_11_bytes 4 0 node_exporter-1.7.0/collector/fixtures/proc/spl/kstat/zfs/dmu_tx000066400000000000000000000010101452426057600251520ustar00rootroot000000000000005 1 0x01 11 528 8010436841 354962070418194 name type data dmu_tx_assigned 4 3532844 dmu_tx_delay 4 0 dmu_tx_error 4 0 dmu_tx_suspended 4 0 dmu_tx_group 4 0 dmu_tx_memory_reserve 4 0 dmu_tx_memory_reclaim 4 0 dmu_tx_dirty_throttle 4 0 dmu_tx_dirty_delay 4 0 dmu_tx_dirty_over_max 4 0 dmu_tx_quota 4 0 node_exporter-1.7.0/collector/fixtures/proc/spl/kstat/zfs/dnodestats000066400000000000000000000022441452426057600260340ustar00rootroot0000000000000010 1 0x01 28 7616 73163810135894 309969103316276 name type data dnode_hold_dbuf_hold 4 0 dnode_hold_dbuf_read 4 0 dnode_hold_alloc_hits 4 37617 dnode_hold_alloc_misses 4 0 dnode_hold_alloc_interior 4 0 dnode_hold_alloc_lock_retry 4 0 dnode_hold_alloc_lock_misses 4 0 dnode_hold_alloc_type_none 4 0 dnode_hold_free_hits 4 0 dnode_hold_free_misses 4 0 dnode_hold_free_lock_misses 4 0 dnode_hold_free_lock_retry 4 0 dnode_hold_free_overflow 4 0 dnode_hold_free_refcount 4 0 dnode_hold_free_txg 4 0 dnode_allocate 4 0 dnode_reallocate 4 0 dnode_buf_evict 4 17 dnode_alloc_next_chunk 4 0 dnode_alloc_race 4 0 dnode_alloc_next_block 4 0 dnode_move_invalid 4 0 dnode_move_recheck1 4 0 dnode_move_recheck2 4 0 dnode_move_special 4 0 dnode_move_handle 4 0 dnode_move_rwlock 4 0 dnode_move_active 4 0 node_exporter-1.7.0/collector/fixtures/proc/spl/kstat/zfs/fm000066400000000000000000000003611452426057600242640ustar00rootroot000000000000000 1 0x01 4 192 8007255140 354329591145385 name type data erpt-dropped 4 18 erpt-set-failed 4 0 fmri-set-failed 4 0 payload-set-failed 4 0 node_exporter-1.7.0/collector/fixtures/proc/spl/kstat/zfs/pool1/000077500000000000000000000000001452426057600247715ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures/proc/spl/kstat/zfs/pool1/io000066400000000000000000000004241452426057600253230ustar00rootroot0000000000000012 3 0x00 1 80 79205351707403 395818011156865 nread nwritten reads writes wtime wlentime wupdate rtime rlentime rupdate wcnt rcnt 1884160 3206144 22 132 7155162 104112268 79210489694949 24168078 104112268 79210489849220 0 0 node_exporter-1.7.0/collector/fixtures/proc/spl/kstat/zfs/pool1/objset-1000066400000000000000000000005531452426057600263430ustar00rootroot0000000000000023 1 0x01 7 2160 221578688875 6665999035587 name type data dataset_name 7 pool1 writes 4 0 nwritten 4 0 reads 4 0 nread 4 0 nunlinks 4 0 nunlinked 4 0 node_exporter-1.7.0/collector/fixtures/proc/spl/kstat/zfs/pool1/objset-2000066400000000000000000000005711452426057600263440ustar00rootroot0000000000000024 1 0x01 7 2160 221611904716 7145015038451 name type data dataset_name 7 pool1/dataset1 writes 4 4 nwritten 4 12302 reads 4 2 nread 4 28 nunlinks 4 3 nunlinked 4 3 node_exporter-1.7.0/collector/fixtures/proc/spl/kstat/zfs/pool1/state000066400000000000000000000000071452426057600260310ustar00rootroot00000000000000ONLINE node_exporter-1.7.0/collector/fixtures/proc/spl/kstat/zfs/pool2/000077500000000000000000000000001452426057600247725ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures/proc/spl/kstat/zfs/pool2/state000066400000000000000000000000121452426057600260260ustar00rootroot00000000000000SUSPENDED node_exporter-1.7.0/collector/fixtures/proc/spl/kstat/zfs/poolz1/000077500000000000000000000000001452426057600251635ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures/proc/spl/kstat/zfs/poolz1/io000066400000000000000000000004441452426057600255170ustar00rootroot0000000000000016 3 0x00 1 80 79568650431241 395832279341621 nread nwritten reads writes wtime wlentime wupdate rtime rlentime rupdate wcnt rcnt 2826240 2680501248 33 25294 9673715628 6472105124093 110734831833266 9829091640 6472105124093 110734831944501 0 0 node_exporter-1.7.0/collector/fixtures/proc/spl/kstat/zfs/poolz1/objset-1000066400000000000000000000005531452426057600265350ustar00rootroot0000000000000030 1 0x01 7 2160 217993779684 2621674546179 name type data dataset_name 7 poolz1 writes 4 0 nwritten 4 0 reads 4 0 nread 4 0 nunlinks 4 0 nunlinked 4 0node_exporter-1.7.0/collector/fixtures/proc/spl/kstat/zfs/poolz1/objset-2000066400000000000000000000005741452426057600265410ustar00rootroot0000000000000031 1 0x01 7 2160 218133979890 3024169078920 name type data dataset_name 7 poolz1/dataset1 writes 4 10 nwritten 4 32806 reads 4 2 nread 4 28 nunlinks 4 14 nunlinked 4 14node_exporter-1.7.0/collector/fixtures/proc/spl/kstat/zfs/poolz1/state000066400000000000000000000000111452426057600262160ustar00rootroot00000000000000DEGRADED node_exporter-1.7.0/collector/fixtures/proc/spl/kstat/zfs/vdev_cache_stats000066400000000000000000000003121452426057600271630ustar00rootroot000000000000008 1 0x01 3 144 8012540758 352116106118781 name type data delegations 4 40 hits 4 0 misses 4 0 node_exporter-1.7.0/collector/fixtures/proc/spl/kstat/zfs/vdev_mirror_stats000066400000000000000000000005541452426057600274420ustar00rootroot0000000000000018 1 0x01 7 1904 73163813004224 309980651991187 name type data rotating_linear 4 0 rotating_offset 4 0 rotating_seek 4 0 non_rotating_linear 4 0 non_rotating_seek 4 0 preferred_found 4 0 preferred_not_found 4 94 node_exporter-1.7.0/collector/fixtures/proc/spl/kstat/zfs/xuio_stats000066400000000000000000000004771452426057600260740ustar00rootroot000000000000002 1 0x01 6 288 8009100742 353415816865654 name type data onloan_read_buf 4 32 onloan_write_buf 4 0 read_buf_copied 4 0 read_buf_nocopy 4 0 write_buf_copied 4 0 write_buf_nocopy 4 0 node_exporter-1.7.0/collector/fixtures/proc/spl/kstat/zfs/zfetchstats000066400000000000000000000010211452426057600262160ustar00rootroot000000000000004 1 0x01 11 528 8010434610 345692669858836 name type data hits 4 7067992 misses 4 11 colinear_hits 4 0 colinear_misses 4 11 stride_hits 4 7067990 stride_misses 4 0 reclaim_successes 4 0 reclaim_failures 4 11 streams_resets 4 0 streams_noresets 4 2 bogus_streams 4 0 node_exporter-1.7.0/collector/fixtures/proc/spl/kstat/zfs/zil000066400000000000000000000011441452426057600244600ustar00rootroot000000000000007 1 0x01 13 624 8012538347 351689526932992 name type data zil_commit_count 4 10 zil_commit_writer_count 4 0 zil_itx_count 4 0 zil_itx_indirect_count 4 0 zil_itx_indirect_bytes 4 0 zil_itx_copied_count 4 0 zil_itx_copied_bytes 4 0 zil_itx_needcopy_count 4 0 zil_itx_needcopy_bytes 4 18446744073709537686 zil_itx_metaslab_normal_count 4 0 zil_itx_metaslab_normal_bytes 4 0 zil_itx_metaslab_slog_count 4 0 zil_itx_metaslab_slog_bytes 4 0 node_exporter-1.7.0/collector/fixtures/proc/stat000066400000000000000000000040601452426057600221070ustar00rootroot00000000000000cpu 301854 612 111922 8979004 3552 2 3944 0 44 36 cpu0 44490 19 21045 1087069 220 1 3410 0 2 1 cpu1 47869 23 16474 1110787 591 0 46 0 3 2 cpu2 46504 36 15916 1112321 441 0 326 0 4 3 cpu3 47054 102 15683 1113230 533 0 60 0 5 4 cpu4 28413 25 10776 1140321 217 0 8 0 6 5 cpu5 29271 101 11586 1136270 672 0 30 0 7 6 cpu6 29152 36 10276 1139721 319 0 29 0 8 7 cpu7 29098 268 10164 1139282 555 0 31 0 9 8 intr 8885917 17 0 0 0 0 0 0 0 1 79281 0 0 0 0 0 0 0 231237 0 0 0 0 250586 103 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 223424 190745 13 906 1283803 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ctxt 38014093 btime 1418183276 processes 26442 procs_running 2 procs_blocked 0 softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444 node_exporter-1.7.0/collector/fixtures/proc/sys/000077500000000000000000000000001452426057600220275ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures/proc/sys/fs/000077500000000000000000000000001452426057600224375ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures/proc/sys/fs/file-nr000066400000000000000000000000171452426057600237140ustar00rootroot000000000000001024 0 1631329 node_exporter-1.7.0/collector/fixtures/proc/sys/kernel/000077500000000000000000000000001452426057600233075ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures/proc/sys/kernel/pid_max000066400000000000000000000000041452426057600246450ustar00rootroot00000000000000123 node_exporter-1.7.0/collector/fixtures/proc/sys/kernel/random/000077500000000000000000000000001452426057600245675ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures/proc/sys/kernel/random/entropy_avail000066400000000000000000000000051452426057600273610ustar00rootroot000000000000001337 node_exporter-1.7.0/collector/fixtures/proc/sys/kernel/random/poolsize000066400000000000000000000000051452426057600263510ustar00rootroot000000000000004096 node_exporter-1.7.0/collector/fixtures/proc/sys/kernel/seccomp/000077500000000000000000000000001452426057600247405ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures/proc/sys/kernel/seccomp/actions_avail000066400000000000000000000000771452426057600275030ustar00rootroot00000000000000kill_process kill_thread trap errno user_notif trace log allow node_exporter-1.7.0/collector/fixtures/proc/sys/kernel/threads-max000066400000000000000000000000041452426057600254410ustar00rootroot000000000000007801node_exporter-1.7.0/collector/fixtures/proc/sys/net/000077500000000000000000000000001452426057600226155ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures/proc/sys/net/netfilter/000077500000000000000000000000001452426057600246115ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures/proc/sys/net/netfilter/nf_conntrack_count000066400000000000000000000000041452426057600304030ustar00rootroot00000000000000123 node_exporter-1.7.0/collector/fixtures/proc/sys/net/netfilter/nf_conntrack_max000066400000000000000000000000061452426057600300420ustar00rootroot0000000000000065536 node_exporter-1.7.0/collector/fixtures/proc/sys/pid_max000066400000000000000000000000041452426057600233650ustar00rootroot00000000000000123 node_exporter-1.7.0/collector/fixtures/proc/sys/threads-max000066400000000000000000000000041452426057600241610ustar00rootroot000000000000007801node_exporter-1.7.0/collector/fixtures/proc/vmstat000066400000000000000000000051161452426057600224550ustar00rootroot00000000000000nr_free_pages 977769 nr_alloc_batch 4158 nr_inactive_anon 125031 nr_active_anon 622512 nr_inactive_file 92317 nr_active_file 324014 nr_unevictable 12 nr_mlock 12 nr_anon_pages 713633 nr_mapped 118021 nr_file_pages 450840 nr_dirty 21 nr_writeback 0 nr_slab_reclaimable 85763 nr_slab_unreclaimable 431112 nr_page_table_pages 12504 nr_kernel_stack 1156 nr_overhead 4956 nr_unstable 0 nr_bounce 0 nr_vmscan_write 35050 nr_vmscan_immediate_reclaim 27 nr_writeback_temp 0 nr_isolated_anon 0 nr_isolated_file 0 nr_shmem 20623 nr_dirtied 11127183 nr_written 11122061 nr_pages_scanned 0 numa_hit 2601972389 numa_miss 0 numa_foreign 0 numa_interleave 32353 numa_local 2601972389 numa_other 0 workingset_refault 157066 workingset_activate 104270 workingset_nodereclaim 0 nr_anon_transparent_hugepages 556 nr_free_cma 0 nr_dirty_threshold 270390 nr_dirty_background_threshold 135030 pgpgin 7344136 pgpgout 1541180581 pswpin 1476 pswpout 35045 pgalloc_dma 12 pgalloc_dma32 611781566 pgalloc_normal 2287227526 pgalloc_movable 0 pgfree 2938719870 pgactivate 152952989 pgdeactivate 898450 pgfault 2320168809 pgmajfault 507162 pgrefill_dma 0 pgrefill_dma32 186367 pgrefill_normal 603970 pgrefill_movable 0 pgsteal_kswapd_dma 0 pgsteal_kswapd_dma32 78783 pgsteal_kswapd_normal 254128 pgsteal_kswapd_movable 0 pgsteal_direct_dma 0 pgsteal_direct_dma32 44 pgsteal_direct_normal 6484 pgsteal_direct_movable 0 pgscan_kswapd_dma 0 pgscan_kswapd_dma32 107656 pgscan_kswapd_normal 358784 pgscan_kswapd_movable 0 pgscan_direct_dma 0 pgscan_direct_dma32 67 pgscan_direct_normal 6796 pgscan_direct_movable 0 pgscan_direct_throttle 0 zone_reclaim_failed 0 pginodesteal 412258 slabs_scanned 14355346 kswapd_inodesteal 288891 kswapd_low_wmark_hit_quickly 109 kswapd_high_wmark_hit_quickly 45 pageoutrun 247 allocstall 83165 pgrotated 35014 drop_pagecache 0 drop_slab 0 numa_pte_updates 0 numa_huge_pte_updates 0 numa_hint_faults 0 numa_hint_faults_local 0 numa_pages_migrated 0 pgmigrate_success 37070309 pgmigrate_fail 36815 compact_migrate_scanned 830267783 compact_free_scanned 12336622550 compact_isolated 82707414 compact_stall 210959 compact_fail 164840 compact_success 46119 htlb_buddy_alloc_success 0 htlb_buddy_alloc_fail 0 unevictable_pgs_culled 2188 unevictable_pgs_scanned 0 unevictable_pgs_rescued 3962 unevictable_pgs_mlocked 3994 unevictable_pgs_munlocked 3968 unevictable_pgs_cleared 14 unevictable_pgs_stranded 14 thp_fault_alloc 142261 thp_fault_fallback 98119 thp_collapse_alloc 88421 thp_collapse_alloc_failed 20954 thp_split 69984 thp_zero_page_alloc 9 thp_zero_page_alloc_failed 20 balloon_inflate 0 balloon_deflate 0 balloon_migrate 0 oom_kill 0 node_exporter-1.7.0/collector/fixtures/proc/zoneinfo000066400000000000000000000141321452426057600227640ustar00rootroot00000000000000Node 0, zone DMA per-node stats nr_inactive_anon 95612 nr_active_anon 1175853 nr_inactive_file 723339 nr_active_file 688810 nr_unevictable 213111 nr_slab_reclaimable 121763 nr_slab_unreclaimable 56182 nr_isolated_anon 0 nr_isolated_file 0 workingset_nodes 0 workingset_refault 0 workingset_activate 0 workingset_restore 0 workingset_nodereclaim 0 nr_anon_pages 1156608 nr_mapped 423143 nr_file_pages 1740118 nr_dirty 103 nr_writeback 0 nr_writeback_temp 0 nr_shmem 330517 nr_shmem_hugepages 0 nr_shmem_pmdmapped 0 nr_file_hugepages 0 nr_file_pmdmapped 0 nr_anon_transparent_hugepages 0 nr_vmscan_write 0 nr_vmscan_immediate_reclaim 0 nr_dirtied 1189097 nr_written 1181554 nr_kernel_misc_reclaimable 0 nr_foll_pin_acquired 3 nr_foll_pin_released 3 pages free 2949 min 8 low 11 high 14 spanned 4095 present 3997 managed 3973 protection: (0, 2039, 31932, 31932, 31932) nr_free_pages 2949 nr_zone_inactive_anon 0 nr_zone_active_anon 0 nr_zone_inactive_file 0 nr_zone_active_file 0 nr_zone_unevictable 0 nr_zone_write_pending 0 nr_mlock 0 nr_page_table_pages 0 nr_kernel_stack 0 nr_bounce 0 nr_zspages 0 nr_free_cma 0 numa_hit 1 numa_miss 0 numa_foreign 0 numa_interleave 1 numa_local 1 numa_other 0 pagesets cpu: 0 count: 0 high: 0 batch: 1 vm stats threshold: 8 cpu: 1 count: 0 high: 0 batch: 1 vm stats threshold: 8 cpu: 2 count: 0 high: 0 batch: 1 vm stats threshold: 8 cpu: 3 count: 0 high: 0 batch: 1 vm stats threshold: 8 cpu: 4 count: 0 high: 0 batch: 1 vm stats threshold: 8 cpu: 5 count: 0 high: 0 batch: 1 vm stats threshold: 8 cpu: 6 count: 0 high: 0 batch: 1 vm stats threshold: 8 cpu: 7 count: 0 high: 0 batch: 1 vm stats threshold: 8 node_unreclaimable: 0 start_pfn: 1 Node 0, zone DMA32 pages free 528427 min 1078 low 1600 high 2122 spanned 1044480 present 546847 managed 530339 protection: (0, 0, 29893, 29893, 29893) nr_free_pages 528427 nr_zone_inactive_anon 0 nr_zone_active_anon 0 nr_zone_inactive_file 0 nr_zone_active_file 0 nr_zone_unevictable 0 nr_zone_write_pending 0 nr_mlock 0 nr_page_table_pages 0 nr_kernel_stack 0 nr_bounce 0 nr_zspages 0 nr_free_cma 0 numa_hit 13 numa_miss 0 numa_foreign 0 numa_interleave 1 numa_local 13 numa_other 0 pagesets cpu: 0 count: 357 high: 378 batch: 63 vm stats threshold: 48 cpu: 1 count: 0 high: 378 batch: 63 vm stats threshold: 48 cpu: 2 count: 338 high: 378 batch: 63 vm stats threshold: 48 cpu: 3 count: 0 high: 378 batch: 63 vm stats threshold: 48 cpu: 4 count: 62 high: 378 batch: 63 vm stats threshold: 48 cpu: 5 count: 63 high: 378 batch: 63 vm stats threshold: 48 cpu: 6 count: 0 high: 378 batch: 63 vm stats threshold: 48 cpu: 7 count: 63 high: 378 batch: 63 vm stats threshold: 48 node_unreclaimable: 0 start_pfn: 4096 Node 0, zone Normal pages free 4539739 min 15809 low 23461 high 31113 spanned 7806976 present 7806976 managed 7654794 protection: (0, 0, 0, 0, 0) nr_free_pages 4539739 nr_zone_inactive_anon 95612 nr_zone_active_anon 1175853 nr_zone_inactive_file 723339 nr_zone_active_file 688810 nr_zone_unevictable 213111 nr_zone_write_pending 103 nr_mlock 12 nr_page_table_pages 13921 nr_kernel_stack 18864 nr_bounce 0 nr_zspages 0 nr_free_cma 0 numa_hit 62836441 numa_miss 0 numa_foreign 0 numa_interleave 23174 numa_local 62836441 numa_other 0 pagesets cpu: 0 count: 351 high: 378 batch: 63 vm stats threshold: 72 cpu: 1 count: 112 high: 378 batch: 63 vm stats threshold: 72 cpu: 2 count: 368 high: 378 batch: 63 vm stats threshold: 72 cpu: 3 count: 358 high: 378 batch: 63 vm stats threshold: 72 cpu: 4 count: 304 high: 378 batch: 63 vm stats threshold: 72 cpu: 5 count: 112 high: 378 batch: 63 vm stats threshold: 72 cpu: 6 count: 488 high: 378 batch: 63 vm stats threshold: 72 cpu: 7 count: 342 high: 378 batch: 63 vm stats threshold: 72 node_unreclaimable: 0 start_pfn: 1048576 Node 0, zone Movable pages free 0 min 0 low 0 high 0 spanned 0 present 0 managed 0 protection: (0, 0, 0, 0, 0) Node 0, zone Device pages free 0 min 0 low 0 high 0 spanned 0 present 0 managed 0 protection: (0, 0, 0, 0, 0)node_exporter-1.7.0/collector/fixtures/qdisc/000077500000000000000000000000001452426057600213515ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures/qdisc/results.json000066400000000000000000000004441452426057600237470ustar00rootroot00000000000000[ { "IfaceName": "wlan0", "Bytes": 42, "Packets": 42, "Requeues": 1, "Kind": "fq", "Drops": 1 }, { "IfaceName": "eth0", "Bytes": 83, "Packets": 83, "Requeues": 2, "Kind": "pfifo_fast" } ] node_exporter-1.7.0/collector/fixtures/sys.ttar000066400000000000000000005352651452426057600220000ustar00rootroot00000000000000# Archive created by ttar -C collector/fixtures -c -f collector/fixtures/sys.ttar sys Directory: sys Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/bus Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/bus/cpu Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/bus/cpu/devices Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/bus/cpu/devices/cpu0 SymlinkTo: ../../../devices/system/cpu/cpu0 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/bus/cpu/devices/cpu1 SymlinkTo: ../../../devices/system/cpu/cpu1 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/bus/cpu/devices/cpu2 SymlinkTo: ../../../devices/system/cpu/cpu2 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/bus/cpu/devices/cpu3 SymlinkTo: ../../../devices/system/cpu/cpu3 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/bus/node Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/bus/node/devices Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/bus/node/devices/node0 SymlinkTo: ../../../devices/system/node/node0 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/bus/node/devices/node1 SymlinkTo: ../../../devices/system/node/node1 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/dmi Mode: 775 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/dmi/id Mode: 775 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/dmi/id/bios_date Lines: 1 04/12/2021 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/dmi/id/bios_release Lines: 1 2.2 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/dmi/id/bios_vendor Lines: 1 Dell Inc. Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/dmi/id/bios_version Lines: 1 2.2.4 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/dmi/id/board_name Lines: 1 07PXPY Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/dmi/id/board_serial Lines: 1 .7N62AI2.GRTCL6944100GP. Mode: 400 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/dmi/id/board_vendor Lines: 1 Dell Inc. Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/dmi/id/board_version Lines: 1 A01 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/dmi/id/chassis_asset_tag Lines: 1 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/dmi/id/chassis_serial Lines: 1 7N62AI2 Mode: 400 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/dmi/id/chassis_type Lines: 1 23 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/dmi/id/chassis_vendor Lines: 1 Dell Inc. Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/dmi/id/chassis_version Lines: 1 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/dmi/id/modalias Lines: 1 dmi:bvnDellInc.:bvr2.2.4:bd04/12/2021:br2.2:svnDellInc.:pnPowerEdgeR6515:pvr:rvnDellInc.:rn07PXPY:rvrA01:cvnDellInc.:ct23:cvr: Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/dmi/id/product_family Lines: 1 PowerEdge Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/dmi/id/product_name Lines: 1 PowerEdge R6515 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/dmi/id/product_serial Lines: 1 7N62AI2 Mode: 400 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/dmi/id/product_sku Lines: 1 SKU=NotProvided;ModelName=PowerEdge R6515 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/dmi/id/product_uuid Lines: 1 83340ca8-cb49-4474-8c29-d2088ca84dd9 Mode: 400 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/dmi/id/product_version Lines: 1 �[� Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/dmi/id/sys_vendor Lines: 1 Dell Inc. Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/dmi/id/uevent Lines: 1 MODALIAS=dmi:bvnDellInc.:bvr2.2.4:bd04/12/2021:br2.2:svnDellInc.:pnPowerEdgeR6515:pvr:rvnDellInc.:rn07PXPY:rvrA01:cvnDellInc.:ct23:cvr: Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/fc_host Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/fc_host/host0 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/fc_host/host0/dev_loss_tmo Lines: 1 30 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/fc_host/host0/fabric_name Lines: 1 0x0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/fc_host/host0/node_name Lines: 1 0x2000e0071bce95f2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/fc_host/host0/port_id Lines: 1 0x000002 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/fc_host/host0/port_name Lines: 1 0x1000e0071bce95f2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/fc_host/host0/port_state Lines: 1 Online Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/fc_host/host0/port_type Lines: 1 Point-To-Point (direct nport connection) Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/fc_host/host0/speed Lines: 1 16 Gbit Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/fc_host/host0/statistics Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/fc_host/host0/statistics/dumped_frames Lines: 1 0xffffffffffffffff Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/fc_host/host0/statistics/error_frames Lines: 1 0x0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/fc_host/host0/statistics/fcp_packet_aborts Lines: 1 0x13 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/fc_host/host0/statistics/invalid_crc_count Lines: 1 0x2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/fc_host/host0/statistics/invalid_tx_word_count Lines: 1 0x8 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/fc_host/host0/statistics/link_failure_count Lines: 1 0x9 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/fc_host/host0/statistics/loss_of_signal_count Lines: 1 0x11 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/fc_host/host0/statistics/loss_of_sync_count Lines: 1 0x10 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/fc_host/host0/statistics/nos_count Lines: 1 0x12 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/fc_host/host0/statistics/rx_frames Lines: 1 0x3 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/fc_host/host0/statistics/rx_words Lines: 1 0x4 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/fc_host/host0/statistics/seconds_since_last_reset Lines: 1 0x7 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/fc_host/host0/statistics/tx_frames Lines: 1 0x5 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/fc_host/host0/statistics/tx_words Lines: 1 0x6 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/fc_host/host0/supported_classes Lines: 1 Class 3 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/fc_host/host0/supported_speeds Lines: 1 4 Gbit, 8 Gbit, 16 Gbit Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/fc_host/host0/symbolic_name Lines: 1 Emulex SN1100E2P FV12.4.270.3 DV12.4.0.0. HN:gotest. OS:Linux Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/hwmon Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/hwmon/hwmon0 SymlinkTo: ../../devices/platform/coretemp.0/hwmon/hwmon0 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/hwmon/hwmon1 SymlinkTo: ../../devices/platform/coretemp.1/hwmon/hwmon1 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/hwmon/hwmon2 SymlinkTo: ../../devices/platform/applesmc.768/hwmon/hwmon2 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/hwmon/hwmon3 SymlinkTo: ../../devices/platform/nct6775.656/hwmon/hwmon3 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/hwmon/hwmon4 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/hwmon/hwmon4/temp1_crit Lines: 1 100000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/hwmon/hwmon4/temp1_crit_alarm Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/hwmon/hwmon4/temp1_input Lines: 1 55000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/hwmon/hwmon4/temp1_label Lines: 1 foosensor Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/hwmon/hwmon4/temp1_max Lines: 1 100000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/hwmon/hwmon4/temp2_crit Lines: 1 100000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/hwmon/hwmon4/temp2_crit_alarm Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/hwmon/hwmon4/temp2_input Lines: 1 54000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/hwmon/hwmon4/temp2_label Lines: 1 foosensor Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/hwmon/hwmon4/temp2_max Lines: 1 100000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/hwmon/hwmon5 SymlinkTo: ../../devices/platform/bogus.0/hwmon/hwmon5/ # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/infiniband Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/infiniband/i40iw0 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/i40iw0/board_id Lines: 1 I40IW Board ID Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/i40iw0/fw_ver Lines: 1 0.2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/i40iw0/hca_type Lines: 1 I40IW Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/infiniband/i40iw0/ports Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/infiniband/i40iw0/ports/1 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/infiniband/i40iw0/ports/1/counters Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/i40iw0/ports/1/counters/VL15_dropped Lines: 1 N/A (no PMA) Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/i40iw0/ports/1/counters/excessive_buffer_overrun_errors Lines: 1 N/A (no PMA) Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/i40iw0/ports/1/counters/link_downed Lines: 1 N/A (no PMA) Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/i40iw0/ports/1/counters/link_error_recovery Lines: 1 N/A (no PMA) Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/i40iw0/ports/1/counters/local_link_integrity_errors Lines: 1 N/A (no PMA) Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/i40iw0/ports/1/counters/port_rcv_constraint_errors Lines: 1 N/A (no PMA) Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/i40iw0/ports/1/counters/port_rcv_data Lines: 1 N/A (no PMA) Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/i40iw0/ports/1/counters/port_rcv_errors Lines: 1 N/A (no PMA) Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/i40iw0/ports/1/counters/port_rcv_packets Lines: 1 N/A (no PMA) Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/i40iw0/ports/1/counters/port_rcv_remote_physical_errors Lines: 1 N/A (no PMA) Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/i40iw0/ports/1/counters/port_rcv_switch_relay_errors Lines: 1 N/A (no PMA) Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/i40iw0/ports/1/counters/port_xmit_constraint_errors Lines: 1 N/A (no PMA) Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/i40iw0/ports/1/counters/port_xmit_data Lines: 1 N/A (no PMA) Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/i40iw0/ports/1/counters/port_xmit_discards Lines: 1 N/A (no PMA) Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/i40iw0/ports/1/counters/port_xmit_packets Lines: 1 N/A (no PMA) Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/i40iw0/ports/1/counters/port_xmit_wait Lines: 1 N/A (no PMA) Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/i40iw0/ports/1/counters/symbol_error Lines: 1 N/A (no PMA) Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/i40iw0/ports/1/phys_state Lines: 1 5: LinkUp Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/i40iw0/ports/1/rate Lines: 1 10 Gb/sec (4X) Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/i40iw0/ports/1/state Lines: 1 4: ACTIVE Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/infiniband/mlx4_0 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/board_id Lines: 1 SM_1141000001000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/fw_ver Lines: 1 2.31.5050 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/hca_type Lines: 1 MT4099 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/infiniband/mlx4_0/ports Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/infiniband/mlx4_0/ports/1 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/infiniband/mlx4_0/ports/1/counters Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/1/counters/link_downed Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/1/counters/link_error_recovery Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/1/counters/multicast_rcv_packets Lines: 1 93 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/1/counters/multicast_xmit_packets Lines: 1 16 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_constraint_errors Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_data Lines: 1 4631917 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_discards Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_errors Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_packets Lines: 1 6825908347 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_constraint_errors Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_data Lines: 1 3733440 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_discards Lines: 1 5 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_packets Lines: 1 6235865 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_wait Lines: 1 4294967295 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/1/counters/unicast_rcv_packets Lines: 1 61148 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/1/counters/unicast_xmit_packets Lines: 1 61239 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/infiniband/mlx4_0/ports/1/counters_ext Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/1/counters_ext/port_multicast_rcv_packets Lines: 1 93 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/1/counters_ext/port_multicast_xmit_packets Lines: 1 16 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/1/counters_ext/port_rcv_data_64 Lines: 1 4631917 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/1/counters_ext/port_rcv_packets_64 Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/1/counters_ext/port_unicast_rcv_packets Lines: 1 61148 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/1/counters_ext/port_unicast_xmit_packets Lines: 1 61239 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/1/counters_ext/port_xmit_data_64 Lines: 1 3733440 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/1/counters_ext/port_xmit_packets_64 Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/1/phys_state Lines: 1 5: LinkUp Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/1/rate Lines: 1 40 Gb/sec (4X QDR) Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/1/state Lines: 1 4: ACTIVE Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/infiniband/mlx4_0/ports/2 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/infiniband/mlx4_0/ports/2/counters Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/2/counters/link_downed Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/2/counters/link_error_recovery Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/2/counters/multicast_rcv_packets Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/2/counters/multicast_xmit_packets Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_data Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_data Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/2/counters/unicast_rcv_packets Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/2/counters/unicast_xmit_packets Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/infiniband/mlx4_0/ports/2/counters_ext Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/2/counters_ext/port_multicast_rcv_packets Lines: 1 93 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/2/counters_ext/port_multicast_xmit_packets Lines: 1 16 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/2/counters_ext/port_rcv_data_64 Lines: 1 4631917 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/2/counters_ext/port_rcv_packets_64 Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/2/counters_ext/port_unicast_rcv_packets Lines: 1 61148 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/2/counters_ext/port_unicast_xmit_packets Lines: 1 61239 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/2/counters_ext/port_xmit_data_64 Lines: 1 3733440 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/2/counters_ext/port_xmit_packets_64 Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/2/phys_state Lines: 1 5: LinkUp Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/2/rate Lines: 1 40 Gb/sec (4X QDR) Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/infiniband/mlx4_0/ports/2/state Lines: 1 4: ACTIVE Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/net Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/net/bond0 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/bond0/addr_assign_type Lines: 1 3 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/bond0/addr_len Lines: 1 6 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/bond0/address Lines: 1 01:01:01:01:01:01 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/net/bond0/bonding Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/bond0/bonding/slaves Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/bond0/broadcast Lines: 1 ff:ff:ff:ff:ff:ff Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/bond0/carrier Lines: 1 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/bond0/carrier_changes Lines: 1 2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/bond0/carrier_down_count Lines: 1 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/bond0/carrier_up_count Lines: 1 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/bond0/dev_id Lines: 1 0x20 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/bond0/dormant Lines: 1 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/bond0/duplex Lines: 1 full Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/bond0/flags Lines: 1 0x1303 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/bond0/ifalias Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/bond0/ifindex Lines: 1 2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/bond0/iflink Lines: 1 2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/bond0/link_mode Lines: 1 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/bond0/mtu Lines: 1 1500 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/bond0/name_assign_type Lines: 1 2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/bond0/netdev_group Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/bond0/operstate Lines: 1 up Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/bond0/phys_port_id Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/bond0/phys_port_name Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/bond0/phys_switch_id Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/bond0/speed Lines: 1 -1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/bond0/tx_queue_len Lines: 1 1000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/bond0/type Lines: 1 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/bonding_masters Lines: 1 bond0 dmz int Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/net/dmz Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/dmz/addr_assign_type Lines: 1 3 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/dmz/addr_len Lines: 1 6 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/dmz/address Lines: 1 01:01:01:01:01:01 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/net/dmz/bonding Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/dmz/bonding/slaves Lines: 1 eth0 eth4 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/dmz/broadcast Lines: 1 ff:ff:ff:ff:ff:ff Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/dmz/carrier Lines: 1 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/dmz/carrier_changes Lines: 1 2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/dmz/carrier_down_count Lines: 1 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/dmz/carrier_up_count Lines: 1 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/dmz/dev_id Lines: 1 0x20 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/dmz/dormant Lines: 1 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/dmz/duplex Lines: 1 full Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/dmz/flags Lines: 1 0x1303 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/dmz/ifalias Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/dmz/ifindex Lines: 1 2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/dmz/iflink Lines: 1 2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/dmz/link_mode Lines: 1 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/dmz/mtu Lines: 1 1500 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/dmz/name_assign_type Lines: 1 2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/dmz/netdev_group Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/dmz/operstate Lines: 1 up Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/dmz/phys_port_id Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/dmz/phys_port_name Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/dmz/phys_switch_id Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/net/dmz/slave_eth0 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/net/dmz/slave_eth0/bonding_slave Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/dmz/slave_eth0/bonding_slave/mii_status Lines: 1 up Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/dmz/slave_eth0/operstate Lines: 1 up Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/net/dmz/slave_eth4 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/net/dmz/slave_eth4/bonding_slave Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/dmz/slave_eth4/bonding_slave/mii_status Lines: 1 up Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/dmz/slave_eth4/operstate Lines: 1 up Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/dmz/speed Lines: 1 1000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/dmz/tx_queue_len Lines: 1 1000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/dmz/type Lines: 1 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/eth0 SymlinkTo: ../../devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/ # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/net/int Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/int/addr_assign_type Lines: 1 3 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/int/addr_len Lines: 1 6 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/int/address Lines: 1 01:01:01:01:01:01 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/net/int/bonding Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/int/bonding/slaves Lines: 1 eth5 eth1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/int/broadcast Lines: 1 ff:ff:ff:ff:ff:ff Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/int/carrier Lines: 1 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/int/carrier_changes Lines: 1 2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/int/carrier_down_count Lines: 1 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/int/carrier_up_count Lines: 1 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/int/dev_id Lines: 1 0x20 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/int/dormant Lines: 1 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/int/duplex Lines: 1 full Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/int/flags Lines: 1 0x1303 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/int/ifalias Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/int/ifindex Lines: 1 2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/int/iflink Lines: 1 2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/int/link_mode Lines: 1 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/int/mtu Lines: 1 1500 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/int/name_assign_type Lines: 1 2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/int/netdev_group Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/int/operstate Lines: 1 up Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/int/phys_port_id Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/int/phys_port_name Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/int/phys_switch_id Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/net/int/slave_eth1 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/net/int/slave_eth1/bonding_slave Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/int/slave_eth1/bonding_slave/mii_status Lines: 1 down Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/int/slave_eth1/operstate Lines: 1 down Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/net/int/slave_eth5 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/net/int/slave_eth5/bonding_slave Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/int/slave_eth5/bonding_slave/mii_status Lines: 1 up Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/int/slave_eth5/operstate Lines: 1 up Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/int/speed Lines: 1 1000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/int/tx_queue_len Lines: 1 1000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/net/int/type Lines: 1 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/nvme Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/nvme/nvme0 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/nvme/nvme0/firmware_rev Lines: 1 1B2QEXP7 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/nvme/nvme0/model Lines: 1 Samsung SSD 970 PRO 512GB Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/nvme/nvme0/serial Lines: 1 S680HF8N190894I Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/nvme/nvme0/state Lines: 1 live Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/power_supply Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/power_supply/AC Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/AC/online Lines: 1 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/power_supply/AC/power Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/AC/power/async Lines: 1 disabled Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/AC/power/autosuspend_delay_ms Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/AC/power/control Lines: 1 auto Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/AC/power/runtime_active_kids Lines: 1 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/AC/power/runtime_active_time Lines: 1 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/AC/power/runtime_enabled Lines: 1 disabled Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/AC/power/runtime_status Lines: 1 unsupported Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/AC/power/runtime_suspended_time Lines: 1 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/AC/power/runtime_usage Lines: 1 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/AC/power/wakeup Lines: 1 enabled Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/AC/power/wakeup_abort_count Lines: 1 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/AC/power/wakeup_active Lines: 1 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/AC/power/wakeup_active_count Lines: 1 1 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/AC/power/wakeup_count Lines: 1 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/AC/power/wakeup_expire_count Lines: 1 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/AC/power/wakeup_last_time_ms Lines: 1 7888 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/AC/power/wakeup_max_time_ms Lines: 1 2 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/AC/power/wakeup_prevent_sleep_time_ms Lines: 1 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/AC/power/wakeup_total_time_ms Lines: 1 2 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/AC/type Lines: 1 Mains Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/AC/uevent Lines: 2 POWER_SUPPLY_NAME=AC POWER_SUPPLY_ONLINE=0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/power_supply/BAT0 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/BAT0/alarm Lines: 1 2253000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/BAT0/capacity Lines: 1 81 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/BAT0/capacity_level Lines: 1 Normal Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/BAT0/charge_start_threshold Lines: 1 95 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/BAT0/charge_stop_threshold Lines: 1 100 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/BAT0/cycle_count Lines: 1 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/BAT0/energy_full Lines: 1 45070000 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/BAT0/energy_full_design Lines: 1 47520000 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/BAT0/energy_now Lines: 1 36580000 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/BAT0/manufacturer Lines: 1 LGC Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/BAT0/model_name Lines: 1 LNV-45N1�� Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/power_supply/BAT0/power Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/BAT0/power/async Lines: 1 disabled Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/BAT0/power/autosuspend_delay_ms Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/BAT0/power/control Lines: 1 auto Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/BAT0/power/runtime_active_kids Lines: 1 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/BAT0/power/runtime_active_time Lines: 1 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/BAT0/power/runtime_enabled Lines: 1 disabled Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/BAT0/power/runtime_status Lines: 1 unsupported Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/BAT0/power/runtime_suspended_time Lines: 1 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/BAT0/power/runtime_usage Lines: 1 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/BAT0/power_now Lines: 1 5002000 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/BAT0/present Lines: 1 1 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/BAT0/serial_number Lines: 1 38109 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/BAT0/status Lines: 1 Discharging Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/BAT0/technology Lines: 1 Li-ion Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/BAT0/type Lines: 1 Battery Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/BAT0/uevent Lines: 16 POWER_SUPPLY_NAME=BAT0 POWER_SUPPLY_STATUS=Discharging POWER_SUPPLY_PRESENT=1 POWER_SUPPLY_TECHNOLOGY=Li-ion POWER_SUPPLY_CYCLE_COUNT=0 POWER_SUPPLY_VOLTAGE_MIN_DESIGN=10800000 POWER_SUPPLY_VOLTAGE_NOW=11660000 POWER_SUPPLY_POWER_NOW=5002000 POWER_SUPPLY_ENERGY_FULL_DESIGN=47520000 POWER_SUPPLY_ENERGY_FULL=45070000 POWER_SUPPLY_ENERGY_NOW=36580000 POWER_SUPPLY_CAPACITY=81 POWER_SUPPLY_CAPACITY_LEVEL=Normal POWER_SUPPLY_MODEL_NAME=LNV-45N1 POWER_SUPPLY_MANUFACTURER=LGC POWER_SUPPLY_SERIAL_NUMBER=38109 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/BAT0/voltage_min_design Lines: 1 10800000 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/power_supply/BAT0/voltage_now Lines: 1 11660000 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/powercap Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/powercap/intel-rapl Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/powercap/intel-rapl/enabled Lines: 1 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/powercap/intel-rapl/uevent Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/powercap/intel-rapl:0 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/powercap/intel-rapl:0/constraint_0_max_power_uw Lines: 1 95000000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/powercap/intel-rapl:0/constraint_0_name Lines: 1 long_term Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/powercap/intel-rapl:0/constraint_0_power_limit_uw Lines: 1 4090000000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/powercap/intel-rapl:0/constraint_0_time_window_us Lines: 1 999424 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/powercap/intel-rapl:0/constraint_1_max_power_uw Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/powercap/intel-rapl:0/constraint_1_name Lines: 1 short_term Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/powercap/intel-rapl:0/constraint_1_power_limit_uw Lines: 1 4090000000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/powercap/intel-rapl:0/constraint_1_time_window_us Lines: 1 2440 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/powercap/intel-rapl:0/enabled Lines: 1 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/powercap/intel-rapl:0/energy_uj Lines: 1 240422366267 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/powercap/intel-rapl:0/max_energy_range_uj Lines: 1 262143328850 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/powercap/intel-rapl:0/name Lines: 1 package-0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/powercap/intel-rapl:0/uevent Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/powercap/intel-rapl:0:0 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/powercap/intel-rapl:0:0/constraint_0_max_power_uw Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/powercap/intel-rapl:0:0/constraint_0_name Lines: 1 long_term Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/powercap/intel-rapl:0:0/constraint_0_power_limit_uw Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/powercap/intel-rapl:0:0/constraint_0_time_window_us Lines: 1 976 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/powercap/intel-rapl:0:0/enabled Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/powercap/intel-rapl:0:0/energy_uj Lines: 1 118821284256 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/powercap/intel-rapl:0:0/max_energy_range_uj Lines: 1 262143328850 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/powercap/intel-rapl:0:0/name Lines: 1 core Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/powercap/intel-rapl:0:0/uevent Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/scsi_tape Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/scsi_tape/nst0 SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/scsi_tape/nst0a SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/scsi_tape/nst0l SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/scsi_tape/nst0m SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/scsi_tape/st0 SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/scsi_tape/st0a SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/scsi_tape/st0l SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/scsi_tape/st0m SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/class/thermal Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/thermal/cooling_device0 SymlinkTo: ../../devices/virtual/thermal/cooling_device0 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/class/thermal/thermal_zone0 SymlinkTo: ../../devices/virtual/thermal/thermal_zone0 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/pci0000:00 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/pci0000:00/0000:00:00.0 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/pci0000:00/0000:00:00.0/host0 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/in_flight Lines: 1 1EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/io_ns Lines: 1 9247011087720EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/other_cnt Lines: 1 1409EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/read_byte_cnt Lines: 1 979383912EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/read_cnt Lines: 1 3741EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/read_ns Lines: 1 33788355744EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/resid_cnt Lines: 1 19EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/write_byte_cnt Lines: 1 1496246784000EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/write_cnt Lines: 1 53772916EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/write_ns Lines: 1 5233597394395EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/in_flight Lines: 1 1EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/io_ns Lines: 1 9247011087720EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/other_cnt Lines: 1 1409EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/read_byte_cnt Lines: 1 979383912EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/read_cnt Lines: 1 3741EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/read_ns Lines: 1 33788355744EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/resid_cnt Lines: 1 19EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/write_byte_cnt Lines: 1 1496246784000EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/write_cnt Lines: 1 53772916EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/write_ns Lines: 1 5233597394395EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/in_flight Lines: 1 1EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/io_ns Lines: 1 9247011087720EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/other_cnt Lines: 1 1409EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/read_byte_cnt Lines: 1 979383912EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/read_cnt Lines: 1 3741EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/read_ns Lines: 1 33788355744EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/resid_cnt Lines: 1 19EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/write_byte_cnt Lines: 1 1496246784000EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/write_cnt Lines: 1 53772916EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/write_ns Lines: 1 5233597394395EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/in_flight Lines: 1 1EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/io_ns Lines: 1 9247011087720EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/other_cnt Lines: 1 1409EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/read_byte_cnt Lines: 1 979383912EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/read_cnt Lines: 1 3741EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/read_ns Lines: 1 33788355744EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/resid_cnt Lines: 1 19EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/write_byte_cnt Lines: 1 1496246784000EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/write_cnt Lines: 1 53772916EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/write_ns Lines: 1 5233597394395EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/in_flight Lines: 1 1EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/io_ns Lines: 1 9247011087720EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/other_cnt Lines: 1 1409EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/read_byte_cnt Lines: 1 979383912EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/read_cnt Lines: 1 3741EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/read_ns Lines: 1 33788355744EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/resid_cnt Lines: 1 19EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/write_byte_cnt Lines: 1 1496246784000EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/write_cnt Lines: 1 53772916EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/write_ns Lines: 1 5233597394395EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/in_flight Lines: 1 1EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/io_ns Lines: 1 9247011087720EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/other_cnt Lines: 1 1409EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/read_byte_cnt Lines: 1 979383912EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/read_cnt Lines: 1 3741EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/read_ns Lines: 1 33788355744EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/resid_cnt Lines: 1 19EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/write_byte_cnt Lines: 1 1496246784000EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/write_cnt Lines: 1 53772916EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/write_ns Lines: 1 5233597394395EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/in_flight Lines: 1 1EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/io_ns Lines: 1 9247011087720EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/other_cnt Lines: 1 1409EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/read_byte_cnt Lines: 1 979383912EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/read_cnt Lines: 1 3741EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/read_ns Lines: 1 33788355744EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/resid_cnt Lines: 1 19EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/write_byte_cnt Lines: 1 1496246784000EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/write_cnt Lines: 1 53772916EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/write_ns Lines: 1 5233597394395EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/in_flight Lines: 1 1EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/io_ns Lines: 1 9247011087720EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/other_cnt Lines: 1 1409EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/read_byte_cnt Lines: 1 979383912EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/read_cnt Lines: 1 3741EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/read_ns Lines: 1 33788355744EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/resid_cnt Lines: 1 19EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/write_byte_cnt Lines: 1 1496246784000EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/write_cnt Lines: 1 53772916EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/write_ns Lines: 1 5233597394395EOF Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/pci0000:00/0000:00:03.0 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/addr_assign_type Lines: 1 3 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/addr_len Lines: 1 6 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/address Lines: 1 01:01:01:01:01:01 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/broadcast Lines: 1 ff:ff:ff:ff:ff:ff Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/carrier Lines: 1 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/carrier_changes Lines: 1 2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/carrier_down_count Lines: 1 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/carrier_up_count Lines: 1 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/dev_id Lines: 1 0x20 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/dormant Lines: 1 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/duplex Lines: 1 full Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/flags Lines: 1 0x1303 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/ifalias Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/ifindex Lines: 1 2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/iflink Lines: 1 2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/link_mode Lines: 1 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/mtu Lines: 1 1500 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/name_assign_type Lines: 1 2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/netdev_group Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/operstate Lines: 1 up Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/phys_port_id Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/phys_port_name Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/phys_switch_id Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/speed Lines: 1 1000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/tx_queue_len Lines: 1 1000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:03.0/0000:03:00.0/net/eth0/type Lines: 1 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/pci0000:00/0000:00:0d.0 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/dirty_data Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/bypassed Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_hits Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_misses Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hit_ratio Lines: 1 100 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hits Lines: 1 289 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_miss_collisions Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_misses Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_readaheads Lines: 1 13 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/bypassed Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_hits Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_misses Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hit_ratio Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hits Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_miss_collisions Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_misses Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_readaheads Lines: 1 13 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/bypassed Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_hits Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_misses Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hit_ratio Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hits Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_miss_collisions Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_misses Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_readaheads Lines: 1 13 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/bypassed Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_hits Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_misses Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hit_ratio Lines: 1 100 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hits Lines: 1 546 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_miss_collisions Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_misses Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_readaheads Lines: 1 13 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/writeback_rate_debug Lines: 7 rate: 1.1M/sec dirty: 20.4G target: 20.4G proportional: 427.5k integral: 790.0k change: 321.5k/sec next io: 17ms Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/pci0000:00/0000:00:0d.0/ata5 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/io_errors Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/metadata_written Lines: 1 512 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/priority_stats Lines: 5 Unused: 99% Metadata: 0% Average: 10473 Sectors per Q: 64 Quantiles: [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946] Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/written Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/platform Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/platform/applesmc.768 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/applesmc.768/fan1_input Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/applesmc.768/fan1_label Lines: 1 Left side Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/applesmc.768/fan1_manual Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/applesmc.768/fan1_max Lines: 1 6156 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/applesmc.768/fan1_min Lines: 1 2160 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/applesmc.768/fan1_output Lines: 1 2160 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/applesmc.768/fan1_safe Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/applesmc.768/fan2_input Lines: 1 1998 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/applesmc.768/fan2_label Lines: 1 Right side Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/applesmc.768/fan2_manual Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/applesmc.768/fan2_max Lines: 1 5700 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/applesmc.768/fan2_min Lines: 1 2000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/applesmc.768/fan2_output Lines: 1 2000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/applesmc.768/fan2_safe Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/platform/applesmc.768/hwmon Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/platform/applesmc.768/hwmon/hwmon2 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/applesmc.768/hwmon/hwmon2/device SymlinkTo: ../../../applesmc.768 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/applesmc.768/name Lines: 1 applesmc Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/platform/bogus.0 Mode: 775 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/platform/bogus.0/hwmon Mode: 775 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/platform/bogus.0/hwmon/hwmon5 Mode: 775 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/bogus.0/hwmon/hwmon5/bogus1_crit Lines: 1 100000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/bogus.0/hwmon/hwmon5/bogus1_crit_alarm Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/bogus.0/hwmon/hwmon5/bogus1_input Lines: 1 55000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/bogus.0/hwmon/hwmon5/bogus1_label Lines: 1 Physical id 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/bogus.0/hwmon/hwmon5/bogus1_max Lines: 1 84000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/bogus.0/hwmon/hwmon5/name Lines: 1 bogus Mode: 664 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/platform/coretemp.0 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/platform/coretemp.0/hwmon Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/platform/coretemp.0/hwmon/hwmon0 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/device SymlinkTo: ../../../coretemp.0 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/name Lines: 1 coretemp Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp1_crit Lines: 1 100000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp1_crit_alarm Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp1_input Lines: 1 55000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp1_label Lines: 1 Physical id 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp1_max Lines: 1 84000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp2_crit Lines: 1 100000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp2_crit_alarm Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp2_input Lines: 1 54000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp2_label Lines: 1 Core 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp2_max Lines: 1 84000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp3_crit Lines: 1 100000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp3_crit_alarm Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp3_input Lines: 1 52000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp3_label Lines: 1 Core 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp3_max Lines: 1 84000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp4_crit Lines: 1 100000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp4_crit_alarm Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp4_input Lines: 1 53000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp4_label Lines: 1 Core 2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp4_max Lines: 1 84000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp5_crit Lines: 1 100000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp5_crit_alarm Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp5_input Lines: 1 50000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp5_label Lines: 1 Core 3 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.0/hwmon/hwmon0/temp5_max Lines: 1 84000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/platform/coretemp.1 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/platform/coretemp.1/hwmon Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/platform/coretemp.1/hwmon/hwmon1 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/device SymlinkTo: ../../../coretemp.1 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/name Lines: 1 coretemp Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp1_crit Lines: 1 100000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp1_crit_alarm Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp1_input Lines: 1 55000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp1_label Lines: 1 Physical id 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp1_max Lines: 1 84000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp2_crit Lines: 1 100000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp2_crit_alarm Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp2_input Lines: 1 54000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp2_label Lines: 1 Core 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp2_max Lines: 1 84000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp3_crit Lines: 1 100000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp3_crit_alarm Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp3_input Lines: 1 52000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp3_label Lines: 1 Core 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp3_max Lines: 1 84000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp4_crit Lines: 1 100000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp4_crit_alarm Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp4_input Lines: 1 53000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp4_label Lines: 1 Core 2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp4_max Lines: 1 84000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp5_crit Lines: 1 100000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp5_crit_alarm Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp5_input Lines: 1 50000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp5_label Lines: 1 Core 3 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/coretemp.1/hwmon/hwmon1/temp5_max Lines: 1 84000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/platform/nct6775.656 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/platform/nct6775.656/hwmon Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/platform/nct6775.656/hwmon/hwmon3 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/fan2_alarm Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/fan2_beep Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/fan2_input Lines: 1 1098 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/fan2_min Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/fan2_pulses Lines: 1 2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/fan2_target Lines: 1 27000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/fan2_tolerance Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/in0_alarm Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/in0_beep Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/in0_input Lines: 1 792 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/in0_max Lines: 1 1744 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/in0_min Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/in1_alarm Lines: 1 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/in1_beep Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/in1_input Lines: 1 1024 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/in1_max Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/in1_min Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/intrusion0_alarm Lines: 1 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/intrusion0_beep Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/intrusion1_alarm Lines: 1 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/intrusion1_beep Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/name Lines: 1 nct6779 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_auto_point1_pwm Lines: 1 153 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_auto_point1_temp Lines: 1 30000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_auto_point2_pwm Lines: 1 255 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_auto_point2_temp Lines: 1 70000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_auto_point3_pwm Lines: 1 255 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_auto_point3_temp Lines: 1 70000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_auto_point4_pwm Lines: 1 255 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_auto_point4_temp Lines: 1 70000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_auto_point5_pwm Lines: 1 255 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_auto_point5_temp Lines: 1 75000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_crit_temp_tolerance Lines: 1 2000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_enable Lines: 1 5 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_floor Lines: 1 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_mode Lines: 1 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_start Lines: 1 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_step_down_time Lines: 1 100 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_step_up_time Lines: 1 100 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_stop_time Lines: 1 6000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_target_temp Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_temp_sel Lines: 1 7 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_temp_tolerance Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_weight_duty_base Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_weight_duty_step Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_weight_temp_sel Lines: 1 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_weight_temp_step Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_weight_temp_step_base Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/platform/nct6775.656/hwmon/hwmon3/pwm1_weight_temp_step_tol Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/system Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/system/clocksource Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/system/clocksource/clocksource0 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/clocksource/clocksource0/available_clocksource Lines: 1 tsc hpet acpi_pm Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/clocksource/clocksource0/current_clocksource Lines: 1 tsc Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/system/cpu Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/system/cpu/cpu0 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/system/cpu/cpu0/cpufreq Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_transition_latency Lines: 1 0 Mode: 664 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu0/cpufreq/related_cpus Lines: 1 0 Mode: 664 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu0/cpufreq/scaling_available_governors Lines: 1 performance powersave Mode: 664 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu0/cpufreq/scaling_cur_freq Lines: 1 1699981 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu0/cpufreq/scaling_driver Lines: 1 intel_pstate Mode: 664 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu0/cpufreq/scaling_governor Lines: 1 powersave Mode: 664 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq Lines: 1 3700000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq Lines: 1 800000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu0/cpufreq/scaling_setspeed Lines: 1 Mode: 664 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/system/cpu/cpu0/thermal_throttle Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu0/thermal_throttle/core_throttle_count Lines: 1 5 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu0/thermal_throttle/package_throttle_count Lines: 1 30 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/system/cpu/cpu0/topology Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu0/topology/core_id Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu0/topology/physical_package_id Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/system/cpu/cpu1 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/system/cpu/cpu1/cpufreq Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_transition_latency Lines: 1 0 Mode: 664 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu1/cpufreq/related_cpus Lines: 1 0 Mode: 664 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu1/cpufreq/scaling_available_governors Lines: 1 performance powersave Mode: 664 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu1/cpufreq/scaling_cur_freq Lines: 1 1699981 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu1/cpufreq/scaling_driver Lines: 1 intel_pstate Mode: 664 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu1/cpufreq/scaling_governor Lines: 1 powersave Mode: 664 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu1/cpufreq/scaling_max_freq Lines: 1 3700000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu1/cpufreq/scaling_min_freq Lines: 1 800000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu1/cpufreq/scaling_setspeed Lines: 1 Mode: 664 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/system/cpu/cpu1/thermal_throttle Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu1/thermal_throttle/core_throttle_count Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu1/thermal_throttle/package_throttle_count Lines: 1 30 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/system/cpu/cpu1/topology Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu1/topology/core_id Lines: 1 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu1/topology/physical_package_id Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/system/cpu/cpu2 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/system/cpu/cpu2/cpufreq Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu2/cpufreq/cpuinfo_transition_latency Lines: 1 0 Mode: 664 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu2/cpufreq/related_cpus Lines: 1 0 Mode: 664 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu2/cpufreq/scaling_available_governors Lines: 1 performance powersave Mode: 664 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu2/cpufreq/scaling_cur_freq Lines: 1 8000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu2/cpufreq/scaling_driver Lines: 1 intel_pstate Mode: 664 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu2/cpufreq/scaling_governor Lines: 1 powersave Mode: 664 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu2/cpufreq/scaling_max_freq Lines: 1 4200000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu2/cpufreq/scaling_min_freq Lines: 1 1000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu2/cpufreq/scaling_setspeed Lines: 1 Mode: 664 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/system/cpu/cpu2/thermal_throttle Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu2/thermal_throttle/core_throttle_count Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu2/thermal_throttle/package_throttle_count Lines: 1 6 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/system/cpu/cpu2/topology Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu2/topology/core_id Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu2/topology/physical_package_id Lines: 1 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/system/cpu/cpu3 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/system/cpu/cpu3/cpufreq Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu3/cpufreq/cpuinfo_transition_latency Lines: 1 0 Mode: 664 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu3/cpufreq/related_cpus Lines: 1 0 Mode: 664 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu3/cpufreq/scaling_available_governors Lines: 1 performance powersave Mode: 664 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu3/cpufreq/scaling_cur_freq Lines: 1 8000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu3/cpufreq/scaling_driver Lines: 1 intel_pstate Mode: 664 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu3/cpufreq/scaling_governor Lines: 1 powersave Mode: 664 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu3/cpufreq/scaling_max_freq Lines: 1 4200000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu3/cpufreq/scaling_min_freq Lines: 1 1000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu3/cpufreq/scaling_setspeed Lines: 1 Mode: 664 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/system/cpu/cpu3/thermal_throttle Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu3/thermal_throttle/core_throttle_count Lines: 1 9 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu3/thermal_throttle/package_throttle_count Lines: 1 6 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/system/cpu/cpu3/topology Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu3/topology/core_id Lines: 1 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/cpu3/topology/physical_package_id Lines: 1 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/isolated Lines: 1 1,3-5,9 Mode: 664 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/offline Lines: 1 Mode: 664 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/online Lines: 1 0-3 Mode: 664 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/system/cpu/vulnerabilities Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/vulnerabilities/itlb_multihit Lines: 1 Not affected Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/vulnerabilities/mds Lines: 1 Vulnerable Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/vulnerabilities/retbleed Lines: 1 Mitigation: untrained return thunk; SMT enabled with STIBP protection Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/vulnerabilities/spectre_v1 Lines: 1 Mitigation: usercopy/swapgs barriers and __user pointer sanitization Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/cpu/vulnerabilities/spectre_v2 Lines: 1 Mitigation: Retpolines, IBPB: conditional, STIBP: always-on, RSB filling, PBRSB-eIBRS: Not affected Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/system/edac Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/system/edac/mc Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/system/edac/mc/mc0 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/edac/mc/mc0/ce_count Lines: 1 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/edac/mc/mc0/ce_noinfo_count Lines: 1 2 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/system/edac/mc/mc0/csrow0 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/edac/mc/mc0/csrow0/ce_count Lines: 1 3 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/edac/mc/mc0/csrow0/ue_count Lines: 1 4 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/edac/mc/mc0/ue_count Lines: 1 5 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/edac/mc/mc0/ue_noinfo_count Lines: 1 6 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/system/node Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/system/node/node0 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/node/node0/cpu0 SymlinkTo: ../../cpu/cpu0 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/node/node0/cpu1 SymlinkTo: ../../cpu/cpu1 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/node/node0/cpulist Lines: 1 0-1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/node/node0/meminfo Lines: 29 Node 0 MemTotal: 134182340 kB Node 0 MemFree: 53030372 kB Node 0 MemUsed: 81151968 kB Node 0 Active: 5456380 kB Node 0 Inactive: 59150184 kB Node 0 Active(anon): 691324 kB Node 0 Inactive(anon): 340456 kB Node 0 Active(file): 4765056 kB Node 0 Inactive(file): 58809728 kB Node 0 Unevictable: 0 kB Node 0 Mlocked: 0 kB Node 0 Dirty: 20 kB Node 0 Writeback: 0 kB Node 0 FilePages: 70170916 kB Node 0 Mapped: 894240 kB Node 0 AnonPages: 788196 kB Node 0 Shmem: 47860 kB Node 0 KernelStack: 34016 kB Node 0 PageTables: 143304 kB Node 0 NFS_Unstable: 0 kB Node 0 Bounce: 0 kB Node 0 WritebackTmp: 0 kB Node 0 Slab: 6654304 kB Node 0 SReclaimable: 4473124 kB Node 0 SUnreclaim: 2181180 kB Node 0 AnonHugePages: 147456 kB Node 0 HugePages_Total: 0 Node 0 HugePages_Free: 0 Node 0 HugePages_Surp: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/node/node0/numastat Lines: 6 numa_hit 193460335812 numa_miss 12624528 numa_foreign 59858623300 interleave_hit 57146 local_node 193454780853 other_node 18179487 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/system/node/node1 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/node/node1/cpu2 SymlinkTo: ../../cpu/cpu2 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/node/node1/cpu3 SymlinkTo: ../../cpu/cpu3 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/node/node1/cpulist Lines: 1 2-3 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/node/node1/meminfo Lines: 29 Node 1 MemTotal: 134217728 kB Node 1 MemFree: 39634788 kB Node 1 MemUsed: 94582940 kB Node 1 Active: 5604496 kB Node 1 Inactive: 71450592 kB Node 1 Active(anon): 590464 kB Node 1 Inactive(anon): 285088 kB Node 1 Active(file): 5014032 kB Node 1 Inactive(file): 71165504 kB Node 1 Unevictable: 0 kB Node 1 Mlocked: 0 kB Node 1 Dirty: 120 kB Node 1 Writeback: 0 kB Node 1 FilePages: 83579188 kB Node 1 Mapped: 864112 kB Node 1 AnonPages: 671932 kB Node 1 Shmem: 87580 kB Node 1 KernelStack: 31104 kB Node 1 PageTables: 124272 kB Node 1 NFS_Unstable: 0 kB Node 1 Bounce: 0 kB Node 1 WritebackTmp: 0 kB Node 1 Slab: 7020716 kB Node 1 SReclaimable: 4614084 kB Node 1 SUnreclaim: 2406632 kB Node 1 AnonHugePages: 90112 kB Node 1 HugePages_Total: 0 Node 1 HugePages_Free: 0 Node 1 HugePages_Surp: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/node/node1/numastat Lines: 6 numa_hit 326720946761 numa_miss 59858626709 numa_foreign 12624528 interleave_hit 57286 local_node 326719046550 other_node 59860526920 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/system/node/node2 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/node/node2/cpulist Lines: 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/node/node2/meminfo Lines: 29 Node 2 MemTotal: 134217728 kB Node 2 MemFree: 39634788 kB Node 2 MemUsed: 94582940 kB Node 2 Active: 5604496 kB Node 2 Inactive: 71450592 kB Node 2 Active(anon): 590464 kB Node 2 Inactive(anon): 285088 kB Node 2 Active(file): 5014032 kB Node 2 Inactive(file): 71165504 kB Node 2 Unevictable: 0 kB Node 2 Mlocked: 0 kB Node 2 Dirty: 120 kB Node 2 Writeback: 0 kB Node 2 FilePages: 83579188 kB Node 2 Mapped: 864112 kB Node 2 AnonPages: 671932 kB Node 2 Shmem: 87580 kB Node 2 KernelStack: 31104 kB Node 2 PageTables: 124272 kB Node 2 NFS_Unstable: 0 kB Node 2 Bounce: 0 kB Node 2 WritebackTmp: 0 kB Node 2 Slab: 7020716 kB Node 2 SReclaimable: 4614084 kB Node 2 SUnreclaim: 2406632 kB Node 2 AnonHugePages: 90112 kB Node 2 HugePages_Total: 0 Node 2 HugePages_Free: 0 Node 2 HugePages_Surp: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/system/node/node2/numastat Lines: 6 numa_hit 26720946761 numa_miss 9858626709 numa_foreign 2624528 interleave_hit 7286 local_node 26719046550 other_node 9860526920 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/virtual Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/virtual/thermal Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/virtual/thermal/cooling_device0 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/virtual/thermal/cooling_device0/cur_state Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/virtual/thermal/cooling_device0/max_state Lines: 1 3 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/virtual/thermal/cooling_device0/type Lines: 1 Processor Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/devices/virtual/thermal/thermal_zone0 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/virtual/thermal/thermal_zone0/policy Lines: 1 step_wise Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/virtual/thermal/thermal_zone0/temp Lines: 1 12376 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/devices/virtual/thermal/thermal_zone0/type Lines: 1 cpu-thermal Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/fs Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/fs/bcache Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/average_key_size Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0 SymlinkTo: ../../../devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/btree_cache_size Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0 SymlinkTo: ../../../devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache_available_percent Lines: 1 100 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/congested Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/active_journal_entries Lines: 1 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_nodes Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_read_average_duration_us Lines: 1 1305 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/cache_read_races Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/root_usage_percent Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/bypassed Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_hits Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_misses Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hit_ratio Lines: 1 100 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hits Lines: 1 289 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_miss_collisions Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_misses Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_readaheads Lines: 1 13 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/bypassed Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_hits Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_misses Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hit_ratio Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hits Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_miss_collisions Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_misses Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_readaheads Lines: 1 13 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/bypassed Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_hits Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_misses Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hit_ratio Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hits Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_miss_collisions Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_misses Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_readaheads Lines: 1 13 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/bypassed Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_hits Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_misses Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hit_ratio Lines: 1 100 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hits Lines: 1 546 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_miss_collisions Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_misses Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_readaheads Lines: 1 13 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/tree_depth Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/fs/btrfs Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_may_use Lines: 1 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_pinned Lines: 1 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_readonly Lines: 1 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_reserved Lines: 1 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_used Lines: 1 808189952 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/disk_total Lines: 1 2147483648 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/disk_used Lines: 1 808189952 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/flags Lines: 1 1 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0/total_bytes Lines: 1 2147483648 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0/used_bytes Lines: 1 808189952 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/total_bytes Lines: 1 2147483648 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/total_bytes_pinned Lines: 1 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/global_rsv_reserved Lines: 1 16777216 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/global_rsv_size Lines: 1 16777216 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_may_use Lines: 1 16777216 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_pinned Lines: 1 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_readonly Lines: 1 131072 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_reserved Lines: 1 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_used Lines: 1 933888 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/disk_total Lines: 1 2147483648 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/disk_used Lines: 1 1867776 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/flags Lines: 1 4 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1/total_bytes Lines: 1 1073741824 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1/used_bytes Lines: 1 933888 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/total_bytes Lines: 1 1073741824 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/total_bytes_pinned Lines: 1 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_may_use Lines: 1 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_pinned Lines: 1 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_readonly Lines: 1 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_reserved Lines: 1 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_used Lines: 1 16384 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/disk_total Lines: 1 16777216 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/disk_used Lines: 1 32768 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/flags Lines: 1 2 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1/total_bytes Lines: 1 8388608 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1/used_bytes Lines: 1 16384 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/total_bytes Lines: 1 8388608 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/total_bytes_pinned Lines: 1 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/clone_alignment Lines: 1 4096 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop25 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop25/size Lines: 1 20971520 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop26 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop26/size Lines: 1 20971520 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/big_metadata Lines: 1 1 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/extended_iref Lines: 1 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/mixed_backref Lines: 1 1 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/skinny_metadata Lines: 1 1 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/label Lines: 1 fixture Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/metadata_uuid Lines: 1 0abb23a9-579b-43e6-ad30-227ef47fcb9d Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/nodesize Lines: 1 16384 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/quota_override Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/sectorsize Lines: 1 4096 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_may_use Lines: 1 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_pinned Lines: 1 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_readonly Lines: 1 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_reserved Lines: 1 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_used Lines: 1 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/disk_total Lines: 1 644087808 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/disk_used Lines: 1 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/flags Lines: 1 1 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5/total_bytes Lines: 1 644087808 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5/used_bytes Lines: 1 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/total_bytes Lines: 1 644087808 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/total_bytes_pinned Lines: 1 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/global_rsv_reserved Lines: 1 16777216 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/global_rsv_size Lines: 1 16777216 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_may_use Lines: 1 16777216 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_pinned Lines: 1 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_readonly Lines: 1 262144 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_reserved Lines: 1 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_used Lines: 1 114688 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/disk_total Lines: 1 429391872 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/disk_used Lines: 1 114688 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/flags Lines: 1 4 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6/total_bytes Lines: 1 429391872 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6/used_bytes Lines: 1 114688 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/total_bytes Lines: 1 429391872 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/total_bytes_pinned Lines: 1 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_may_use Lines: 1 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_pinned Lines: 1 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_readonly Lines: 1 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_reserved Lines: 1 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_used Lines: 1 16384 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/disk_total Lines: 1 16777216 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/disk_used Lines: 1 16384 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/flags Lines: 1 2 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6/total_bytes Lines: 1 16777216 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6/used_bytes Lines: 1 16384 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/total_bytes Lines: 1 16777216 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/total_bytes_pinned Lines: 1 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/clone_alignment Lines: 1 4096 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop22 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop22/size Lines: 1 20971520 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop23 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop23/size Lines: 1 20971520 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop24 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop24/size Lines: 1 20971520 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop25 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop25/size Lines: 1 20971520 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/big_metadata Lines: 1 1 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/extended_iref Lines: 1 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/mixed_backref Lines: 1 1 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/raid56 Lines: 1 1 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/skinny_metadata Lines: 1 1 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/label Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/metadata_uuid Lines: 1 7f07c59f-6136-449c-ab87-e1cf2328731b Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/nodesize Lines: 1 16384 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/quota_override Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/sectorsize Lines: 1 4096 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/fs/xfs Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/fs/xfs/sda1 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/fs/xfs/sda1/stats Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/fs/xfs/sda1/stats/stats Lines: 24 extent_alloc 1 872 0 0 abt 0 0 0 0 blk_map 61 29 1 1 1 91 0 bmbt 0 0 0 0 dir 3 2 1 52 trans 4 40 0 ig 5 1 0 4 0 0 1 log 8 21 0 5821 4 push_ail 44 0 1102 15 0 2 0 2 0 2 xstrat 1 0 rw 28 0 attr 0 0 0 0 icluster 2 2 2 vnodes 4 0 0 0 1 1 1 0 buf 22 25 14 0 0 8 0 8 8 abtb2 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 abtc2 2 1 1 1 0 0 0 0 0 0 0 0 0 0 0 bmbt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ibt2 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 fibt2 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 rmapbt 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 qm 0 0 0 0 0 0 0 0 xpc 3571712 3568056 0 debug 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/kernel Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/kernel/mm Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: sys/kernel/mm/ksm Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/kernel/mm/ksm/full_scans Lines: 1 323 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/kernel/mm/ksm/merge_across_nodes Lines: 1 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/kernel/mm/ksm/pages_shared Lines: 1 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/kernel/mm/ksm/pages_sharing Lines: 1 255 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/kernel/mm/ksm/pages_to_scan Lines: 1 100 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/kernel/mm/ksm/pages_unshared Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/kernel/mm/ksm/pages_volatile Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/kernel/mm/ksm/run Lines: 1 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: sys/kernel/mm/ksm/sleep_millisecs Lines: 1 20 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - node_exporter-1.7.0/collector/fixtures/textfile/000077500000000000000000000000001452426057600220725ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures/textfile/client_side_timestamp.out000066400000000000000000000002461452426057600271720ustar00rootroot00000000000000# HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise # TYPE node_textfile_scrape_error gauge node_textfile_scrape_error 1 node_exporter-1.7.0/collector/fixtures/textfile/client_side_timestamp/000077500000000000000000000000001452426057600264375ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures/textfile/client_side_timestamp/metrics.prom000066400000000000000000000000751452426057600310060ustar00rootroot00000000000000metric_with_custom_timestamp 1 1441205977284 normal_metric 2 node_exporter-1.7.0/collector/fixtures/textfile/different_metric_types.out000066400000000000000000000037771452426057600273760ustar00rootroot00000000000000# HELP event_duration_seconds_total Query timings # TYPE event_duration_seconds_total summary event_duration_seconds_total{baz="inner_eval",quantile="0.5"} 1.073e-06 event_duration_seconds_total{baz="inner_eval",quantile="0.9"} 1.928e-06 event_duration_seconds_total{baz="inner_eval",quantile="0.99"} 4.35e-06 event_duration_seconds_total_sum{baz="inner_eval"} 1.8652166505091474e+06 event_duration_seconds_total_count{baz="inner_eval"} 1.492355615e+09 event_duration_seconds_total{baz="prepare_time",quantile="0.5"} 4.283e-06 event_duration_seconds_total{baz="prepare_time",quantile="0.9"} 7.796e-06 event_duration_seconds_total{baz="prepare_time",quantile="0.99"} 2.2083e-05 event_duration_seconds_total_sum{baz="prepare_time"} 840923.7919437207 event_duration_seconds_total_count{baz="prepare_time"} 1.492355814e+09 event_duration_seconds_total{baz="result_append",quantile="0.5"} 1.566e-06 event_duration_seconds_total{baz="result_append",quantile="0.9"} 3.223e-06 event_duration_seconds_total{baz="result_append",quantile="0.99"} 6.53e-06 event_duration_seconds_total_sum{baz="result_append"} 4.404109951000078 event_duration_seconds_total_count{baz="result_append"} 1.427647e+06 event_duration_seconds_total{baz="result_sort",quantile="0.5"} 1.847e-06 event_duration_seconds_total{baz="result_sort",quantile="0.9"} 2.975e-06 event_duration_seconds_total{baz="result_sort",quantile="0.99"} 4.08e-06 event_duration_seconds_total_sum{baz="result_sort"} 3.4123187829998307 event_duration_seconds_total_count{baz="result_sort"} 1.427647e+06 # HELP events_total this is a test metric # TYPE events_total counter events_total{foo="bar"} 10 events_total{foo="baz"} 20 # HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. # TYPE node_textfile_mtime_seconds gauge node_textfile_mtime_seconds{file="fixtures/textfile/different_metric_types/metrics.prom"} 1 # HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise # TYPE node_textfile_scrape_error gauge node_textfile_scrape_error 0 node_exporter-1.7.0/collector/fixtures/textfile/different_metric_types/000077500000000000000000000000001452426057600266275ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures/textfile/different_metric_types/metrics.prom000066400000000000000000000032041452426057600311730ustar00rootroot00000000000000# HELP events_total this is a test metric # TYPE events_total counter events_total{foo="bar"} 10 events_total{foo="baz"} 20 # HELP event_duration_seconds_total Query timings # TYPE event_duration_seconds_total summary event_duration_seconds_total{baz="inner_eval",quantile="0.5"} 1.073e-06 event_duration_seconds_total{baz="inner_eval",quantile="0.9"} 1.928e-06 event_duration_seconds_total{baz="inner_eval",quantile="0.99"} 4.35e-06 event_duration_seconds_total_sum{baz="inner_eval"} 1.8652166505091474e+06 event_duration_seconds_total_count{baz="inner_eval"} 1.492355615e+09 event_duration_seconds_total{baz="prepare_time",quantile="0.5"} 4.283e-06 event_duration_seconds_total{baz="prepare_time",quantile="0.9"} 7.796e-06 event_duration_seconds_total{baz="prepare_time",quantile="0.99"} 2.2083e-05 event_duration_seconds_total_sum{baz="prepare_time"} 840923.7919437207 event_duration_seconds_total_count{baz="prepare_time"} 1.492355814e+09 event_duration_seconds_total{baz="result_append",quantile="0.5"} 1.566e-06 event_duration_seconds_total{baz="result_append",quantile="0.9"} 3.223e-06 event_duration_seconds_total{baz="result_append",quantile="0.99"} 6.53e-06 event_duration_seconds_total_sum{baz="result_append"} 4.404109951000078 event_duration_seconds_total_count{baz="result_append"} 1.427647e+06 event_duration_seconds_total{baz="result_sort",quantile="0.5"} 1.847e-06 event_duration_seconds_total{baz="result_sort",quantile="0.9"} 2.975e-06 event_duration_seconds_total{baz="result_sort",quantile="0.99"} 4.08e-06 event_duration_seconds_total_sum{baz="result_sort"} 3.4123187829998307 event_duration_seconds_total_count{baz="result_sort"} 1.427647e+06 node_exporter-1.7.0/collector/fixtures/textfile/glob_extra_dimension.out000066400000000000000000000075231452426057600270250ustar00rootroot00000000000000# HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. # TYPE node_textfile_mtime_seconds gauge node_textfile_mtime_seconds{file="fixtures/textfile/histogram_extra_dimension/metrics.prom"} 1 node_textfile_mtime_seconds{file="fixtures/textfile/summary_extra_dimension/metrics.prom"} 1 # HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise # TYPE node_textfile_scrape_error gauge node_textfile_scrape_error 0 # HELP prometheus_rule_evaluation_duration_seconds The duration for a rule to execute. # TYPE prometheus_rule_evaluation_duration_seconds summary prometheus_rule_evaluation_duration_seconds{handler="",rule_type="alerting",quantile="0.9"} 0.001765451 prometheus_rule_evaluation_duration_seconds{handler="",rule_type="alerting",quantile="0.99"} 0.018672076 prometheus_rule_evaluation_duration_seconds_sum{handler="",rule_type="alerting"} 214.85081044700146 prometheus_rule_evaluation_duration_seconds_count{handler="",rule_type="alerting"} 185209 prometheus_rule_evaluation_duration_seconds{handler="",rule_type="recording",quantile="0.5"} 4.3132e-05 prometheus_rule_evaluation_duration_seconds{handler="",rule_type="recording",quantile="0.9"} 8.9295e-05 prometheus_rule_evaluation_duration_seconds{handler="",rule_type="recording",quantile="0.99"} 0.000193657 prometheus_rule_evaluation_duration_seconds_sum{handler="",rule_type="recording"} 185091.01317759082 prometheus_rule_evaluation_duration_seconds_count{handler="",rule_type="recording"} 1.0020195e+08 prometheus_rule_evaluation_duration_seconds{handler="foo",rule_type="alerting",quantile="0.5"} 0.000571464 prometheus_rule_evaluation_duration_seconds_sum{handler="foo",rule_type="alerting"} 0 prometheus_rule_evaluation_duration_seconds_count{handler="foo",rule_type="alerting"} 0 # HELP prometheus_tsdb_compaction_chunk_range Final time range of chunks on their first compaction # TYPE prometheus_tsdb_compaction_chunk_range histogram prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="100"} 0 prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="400"} 0 prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="1600"} 0 prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="6400"} 0 prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="25600"} 7 prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="102400"} 7 prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="409600"} 1.412839e+06 prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="1.6384e+06"} 1.69185e+06 prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="6.5536e+06"} 1.691853e+06 prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="2.62144e+07"} 1.691853e+06 prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="+Inf"} 1.691853e+06 prometheus_tsdb_compaction_chunk_range_sum{foo="bar"} 6.71393432189e+11 prometheus_tsdb_compaction_chunk_range_count{foo="bar"} 1.691853e+06 prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="100"} 0 prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="400"} 0 prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="1600"} 0 prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="6400"} 0 prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="25600"} 7 prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="102400"} 7 prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="409600"} 1.412839e+06 prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="1.6384e+06"} 1.69185e+06 prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="6.5536e+06"} 1.691853e+06 prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="2.62144e+07"} 1.691853e+06 prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="+Inf"} 1.691853e+06 prometheus_tsdb_compaction_chunk_range_sum{foo="baz"} 6.71393432189e+11 prometheus_tsdb_compaction_chunk_range_count{foo="baz"} 1.691853e+06 node_exporter-1.7.0/collector/fixtures/textfile/histogram.out000066400000000000000000000025271452426057600246260ustar00rootroot00000000000000# HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. # TYPE node_textfile_mtime_seconds gauge node_textfile_mtime_seconds{file="fixtures/textfile/histogram/metrics.prom"} 1 # HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise # TYPE node_textfile_scrape_error gauge node_textfile_scrape_error 0 # HELP prometheus_tsdb_compaction_chunk_range Final time range of chunks on their first compaction # TYPE prometheus_tsdb_compaction_chunk_range histogram prometheus_tsdb_compaction_chunk_range_bucket{le="100"} 0 prometheus_tsdb_compaction_chunk_range_bucket{le="400"} 0 prometheus_tsdb_compaction_chunk_range_bucket{le="1600"} 0 prometheus_tsdb_compaction_chunk_range_bucket{le="6400"} 0 prometheus_tsdb_compaction_chunk_range_bucket{le="25600"} 7 prometheus_tsdb_compaction_chunk_range_bucket{le="102400"} 7 prometheus_tsdb_compaction_chunk_range_bucket{le="409600"} 1.412839e+06 prometheus_tsdb_compaction_chunk_range_bucket{le="1.6384e+06"} 1.69185e+06 prometheus_tsdb_compaction_chunk_range_bucket{le="6.5536e+06"} 1.691853e+06 prometheus_tsdb_compaction_chunk_range_bucket{le="2.62144e+07"} 1.691853e+06 prometheus_tsdb_compaction_chunk_range_bucket{le="+Inf"} 1.691853e+06 prometheus_tsdb_compaction_chunk_range_sum 6.71393432189e+11 prometheus_tsdb_compaction_chunk_range_count 1.691853e+06 node_exporter-1.7.0/collector/fixtures/textfile/histogram/000077500000000000000000000000001452426057600240675ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures/textfile/histogram/metrics.prom000066400000000000000000000017471452426057600264450ustar00rootroot00000000000000# HELP prometheus_tsdb_compaction_chunk_range Final time range of chunks on their first compaction # TYPE prometheus_tsdb_compaction_chunk_range histogram prometheus_tsdb_compaction_chunk_range_bucket{le="100"} 0 prometheus_tsdb_compaction_chunk_range_bucket{le="400"} 0 prometheus_tsdb_compaction_chunk_range_bucket{le="1600"} 0 prometheus_tsdb_compaction_chunk_range_bucket{le="6400"} 0 prometheus_tsdb_compaction_chunk_range_bucket{le="25600"} 7 prometheus_tsdb_compaction_chunk_range_bucket{le="102400"} 7 prometheus_tsdb_compaction_chunk_range_bucket{le="409600"} 1.412839e+06 prometheus_tsdb_compaction_chunk_range_bucket{le="1.6384e+06"} 1.69185e+06 prometheus_tsdb_compaction_chunk_range_bucket{le="6.5536e+06"} 1.691853e+06 prometheus_tsdb_compaction_chunk_range_bucket{le="2.62144e+07"} 1.691853e+06 prometheus_tsdb_compaction_chunk_range_bucket{le="+Inf"} 1.691853e+06 prometheus_tsdb_compaction_chunk_range_sum 6.71393432189e+11 prometheus_tsdb_compaction_chunk_range_count 1.691853e+06 node_exporter-1.7.0/collector/fixtures/textfile/histogram_extra_dimension.out000066400000000000000000000046731452426057600301020ustar00rootroot00000000000000# HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. # TYPE node_textfile_mtime_seconds gauge node_textfile_mtime_seconds{file="fixtures/textfile/histogram_extra_dimension/metrics.prom"} 1 # HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise # TYPE node_textfile_scrape_error gauge node_textfile_scrape_error 0 # HELP prometheus_tsdb_compaction_chunk_range Final time range of chunks on their first compaction # TYPE prometheus_tsdb_compaction_chunk_range histogram prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="100"} 0 prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="400"} 0 prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="1600"} 0 prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="6400"} 0 prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="25600"} 7 prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="102400"} 7 prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="409600"} 1.412839e+06 prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="1.6384e+06"} 1.69185e+06 prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="6.5536e+06"} 1.691853e+06 prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="2.62144e+07"} 1.691853e+06 prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="+Inf"} 1.691853e+06 prometheus_tsdb_compaction_chunk_range_sum{foo="bar"} 6.71393432189e+11 prometheus_tsdb_compaction_chunk_range_count{foo="bar"} 1.691853e+06 prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="100"} 0 prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="400"} 0 prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="1600"} 0 prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="6400"} 0 prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="25600"} 7 prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="102400"} 7 prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="409600"} 1.412839e+06 prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="1.6384e+06"} 1.69185e+06 prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="6.5536e+06"} 1.691853e+06 prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="2.62144e+07"} 1.691853e+06 prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="+Inf"} 1.691853e+06 prometheus_tsdb_compaction_chunk_range_sum{foo="baz"} 6.71393432189e+11 prometheus_tsdb_compaction_chunk_range_count{foo="baz"} 1.691853e+06 node_exporter-1.7.0/collector/fixtures/textfile/histogram_extra_dimension/000077500000000000000000000000001452426057600273375ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures/textfile/histogram_extra_dimension/metrics.prom000066400000000000000000000040731452426057600317100ustar00rootroot00000000000000# HELP prometheus_tsdb_compaction_chunk_range Final time range of chunks on their first compaction # TYPE prometheus_tsdb_compaction_chunk_range histogram prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="100"} 0 prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="400"} 0 prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="1600"} 0 prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="6400"} 0 prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="25600"} 7 prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="102400"} 7 prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="409600"} 1.412839e+06 prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="1.6384e+06"} 1.69185e+06 prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="6.5536e+06"} 1.691853e+06 prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="2.62144e+07"} 1.691853e+06 prometheus_tsdb_compaction_chunk_range_bucket{foo="bar",le="+Inf"} 1.691853e+06 prometheus_tsdb_compaction_chunk_range_sum{foo="bar"} 6.71393432189e+11 prometheus_tsdb_compaction_chunk_range_count{foo="bar"} 1.691853e+06 prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="100"} 0 prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="400"} 0 prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="1600"} 0 prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="6400"} 0 prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="25600"} 7 prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="102400"} 7 prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="409600"} 1.412839e+06 prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="1.6384e+06"} 1.69185e+06 prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="6.5536e+06"} 1.691853e+06 prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="2.62144e+07"} 1.691853e+06 prometheus_tsdb_compaction_chunk_range_bucket{foo="baz",le="+Inf"} 1.691853e+06 prometheus_tsdb_compaction_chunk_range_sum{foo="baz"} 6.71393432189e+11 prometheus_tsdb_compaction_chunk_range_count{foo="baz"} 1.691853e+06 node_exporter-1.7.0/collector/fixtures/textfile/inconsistent_metrics.out000066400000000000000000000036671452426057600271050ustar00rootroot00000000000000# HELP go_goroutines Number of goroutines that currently exist. # TYPE go_goroutines gauge go_goroutines{foo=""} 20 go_goroutines{foo="bar"} 229 # HELP http_requests_total Total number of HTTP requests made. # TYPE http_requests_total counter http_requests_total{baz="",code="200",foo="",handler="",method="get"} 11 http_requests_total{baz="",code="200",foo="",handler="alerts",method="get"} 35 http_requests_total{baz="",code="200",foo="",handler="config",method="get"} 8 http_requests_total{baz="",code="200",foo="",handler="flags",method="get"} 18 http_requests_total{baz="",code="200",foo="",handler="graph",method="get"} 89 http_requests_total{baz="",code="200",foo="",handler="prometheus",method="get"} 17051 http_requests_total{baz="",code="200",foo="",handler="query",method="get"} 401 http_requests_total{baz="",code="200",foo="",handler="query_range",method="get"} 15663 http_requests_total{baz="",code="200",foo="",handler="rules",method="get"} 7 http_requests_total{baz="",code="200",foo="",handler="series",method="get"} 221 http_requests_total{baz="",code="200",foo="",handler="static",method="get"} 1647 http_requests_total{baz="",code="200",foo="",handler="status",method="get"} 12 http_requests_total{baz="",code="200",foo="bar",handler="",method="get"} 325 http_requests_total{baz="",code="206",foo="",handler="static",method="get"} 2 http_requests_total{baz="",code="400",foo="",handler="query_range",method="get"} 40 http_requests_total{baz="",code="503",foo="",handler="query_range",method="get"} 3 http_requests_total{baz="bar",code="200",foo="",handler="",method="get"} 93 # HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. # TYPE node_textfile_mtime_seconds gauge node_textfile_mtime_seconds{file="fixtures/textfile/inconsistent_metrics/metrics.prom"} 1 # HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise # TYPE node_textfile_scrape_error gauge node_textfile_scrape_error 0 node_exporter-1.7.0/collector/fixtures/textfile/inconsistent_metrics/000077500000000000000000000000001452426057600263405ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures/textfile/inconsistent_metrics/metrics.prom000066400000000000000000000024661452426057600307150ustar00rootroot00000000000000# HELP http_requests_total Total number of HTTP requests made. # TYPE http_requests_total counter http_requests_total{code="200",handler="alerts",method="get"} 35 http_requests_total{code="200",handler="config",method="get"} 8 http_requests_total{code="200",method="get", foo="bar"} 325 http_requests_total{code="200",handler="flags",method="get"} 18 http_requests_total{code="200",handler="graph",method="get"} 89 http_requests_total{code="200",method="get", baz="bar"} 93 http_requests_total{code="200",handler="prometheus",method="get"} 17051 http_requests_total{code="200",handler="query",method="get"} 401 http_requests_total{code="200",handler="query_range",method="get"} 15663 http_requests_total{code="200",handler="rules",method="get"} 7 http_requests_total{code="200",handler="series",method="get"} 221 http_requests_total{code="200",handler="static",method="get"} 1647 http_requests_total{code="200",handler="status",method="get"} 12 http_requests_total{code="200",method="get"} 11 http_requests_total{code="206",handler="static",method="get"} 2 http_requests_total{code="400",handler="query_range",method="get"} 40 http_requests_total{code="503",handler="query_range",method="get"} 3 # HELP go_goroutines Number of goroutines that currently exist. # TYPE go_goroutines gauge go_goroutines{foo="bar"} 229 go_goroutines 20 node_exporter-1.7.0/collector/fixtures/textfile/metrics_merge_different_help.out000066400000000000000000000011461452426057600305100ustar00rootroot00000000000000# HELP events_total A nice help message. # TYPE events_total counter events_total{file="a",foo="bar"} 10 events_total{file="a",foo="baz"} 20 # HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. # TYPE node_textfile_mtime_seconds gauge node_textfile_mtime_seconds{file="fixtures/textfile/metrics_merge_different_help/a.prom"} 1 node_textfile_mtime_seconds{file="fixtures/textfile/metrics_merge_different_help/b.prom"} 1 # HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise # TYPE node_textfile_scrape_error gauge node_textfile_scrape_error 0 node_exporter-1.7.0/collector/fixtures/textfile/metrics_merge_different_help/000077500000000000000000000000001452426057600277555ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures/textfile/metrics_merge_different_help/a.prom000066400000000000000000000002161452426057600310730ustar00rootroot00000000000000# HELP events_total A nice help message. # TYPE events_total counter events_total{foo="bar",file="a"} 10 events_total{foo="baz",file="a"} 20 node_exporter-1.7.0/collector/fixtures/textfile/metrics_merge_different_help/b.prom000066400000000000000000000002231452426057600310720ustar00rootroot00000000000000# HELP events_total A different help message. # TYPE events_total counter events_total{foo="bar",file="b"} 30 events_total{foo="baz",file="b"} 40 node_exporter-1.7.0/collector/fixtures/textfile/metrics_merge_empty_help.out000066400000000000000000000014071452426057600277000ustar00rootroot00000000000000# HELP events_total Metric read from fixtures/textfile/metrics_merge_empty_help/a.prom, fixtures/textfile/metrics_merge_empty_help/b.prom # TYPE events_total counter events_total{file="a",foo="bar"} 10 events_total{file="a",foo="baz"} 20 events_total{file="b",foo="bar"} 30 events_total{file="b",foo="baz"} 40 # HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. # TYPE node_textfile_mtime_seconds gauge node_textfile_mtime_seconds{file="fixtures/textfile/metrics_merge_empty_help/a.prom"} 1 node_textfile_mtime_seconds{file="fixtures/textfile/metrics_merge_empty_help/b.prom"} 1 # HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise # TYPE node_textfile_scrape_error gauge node_textfile_scrape_error 0 node_exporter-1.7.0/collector/fixtures/textfile/metrics_merge_empty_help/000077500000000000000000000000001452426057600271455ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures/textfile/metrics_merge_empty_help/a.prom000066400000000000000000000001721452426057600302640ustar00rootroot00000000000000# HELP events_total # TYPE events_total counter events_total{foo="bar",file="a"} 10 events_total{foo="baz",file="a"} 20 node_exporter-1.7.0/collector/fixtures/textfile/metrics_merge_empty_help/b.prom000066400000000000000000000001721452426057600302650ustar00rootroot00000000000000# HELP events_total # TYPE events_total counter events_total{foo="bar",file="b"} 30 events_total{foo="baz",file="b"} 40 node_exporter-1.7.0/collector/fixtures/textfile/metrics_merge_no_help.out000066400000000000000000000013731452426057600271600ustar00rootroot00000000000000# HELP events_total Metric read from fixtures/textfile/metrics_merge_no_help/a.prom, fixtures/textfile/metrics_merge_no_help/b.prom # TYPE events_total counter events_total{file="a",foo="bar"} 10 events_total{file="a",foo="baz"} 20 events_total{file="b",foo="bar"} 30 events_total{file="b",foo="baz"} 40 # HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. # TYPE node_textfile_mtime_seconds gauge node_textfile_mtime_seconds{file="fixtures/textfile/metrics_merge_no_help/a.prom"} 1 node_textfile_mtime_seconds{file="fixtures/textfile/metrics_merge_no_help/b.prom"} 1 # HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise # TYPE node_textfile_scrape_error gauge node_textfile_scrape_error 0 node_exporter-1.7.0/collector/fixtures/textfile/metrics_merge_no_help/000077500000000000000000000000001452426057600264235ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures/textfile/metrics_merge_no_help/a.prom000066400000000000000000000001451452426057600275420ustar00rootroot00000000000000# TYPE events_total counter events_total{foo="bar",file="a"} 10 events_total{foo="baz",file="a"} 20 node_exporter-1.7.0/collector/fixtures/textfile/metrics_merge_no_help/b.prom000066400000000000000000000001451452426057600275430ustar00rootroot00000000000000# TYPE events_total counter events_total{foo="bar",file="b"} 30 events_total{foo="baz",file="b"} 40 node_exporter-1.7.0/collector/fixtures/textfile/metrics_merge_same_help.out000066400000000000000000000012361452426057600274670ustar00rootroot00000000000000# HELP events_total The same help. # TYPE events_total counter events_total{file="a",foo="bar"} 10 events_total{file="a",foo="baz"} 20 events_total{file="b",foo="bar"} 30 events_total{file="b",foo="baz"} 40 # HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. # TYPE node_textfile_mtime_seconds gauge node_textfile_mtime_seconds{file="fixtures/textfile/metrics_merge_same_help/a.prom"} 1 node_textfile_mtime_seconds{file="fixtures/textfile/metrics_merge_same_help/b.prom"} 1 # HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise # TYPE node_textfile_scrape_error gauge node_textfile_scrape_error 0 node_exporter-1.7.0/collector/fixtures/textfile/metrics_merge_same_help/000077500000000000000000000000001452426057600267345ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures/textfile/metrics_merge_same_help/a.prom000066400000000000000000000002101452426057600300440ustar00rootroot00000000000000# HELP events_total The same help. # TYPE events_total counter events_total{foo="bar",file="a"} 10 events_total{foo="baz",file="a"} 20 node_exporter-1.7.0/collector/fixtures/textfile/metrics_merge_same_help/b.prom000066400000000000000000000002101452426057600300450ustar00rootroot00000000000000# HELP events_total The same help. # TYPE events_total counter events_total{foo="bar",file="b"} 30 events_total{foo="baz",file="b"} 40 node_exporter-1.7.0/collector/fixtures/textfile/no_metric_files.out000066400000000000000000000002461452426057600257660ustar00rootroot00000000000000# HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise # TYPE node_textfile_scrape_error gauge node_textfile_scrape_error 0 node_exporter-1.7.0/collector/fixtures/textfile/no_metric_files/000077500000000000000000000000001452426057600252335ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures/textfile/no_metric_files/non_matching_file.txt000066400000000000000000000000351452426057600314350ustar00rootroot00000000000000This file should be ignored. node_exporter-1.7.0/collector/fixtures/textfile/nonexistent_path.out000066400000000000000000000002461452426057600262170ustar00rootroot00000000000000# HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise # TYPE node_textfile_scrape_error gauge node_textfile_scrape_error 1 node_exporter-1.7.0/collector/fixtures/textfile/summary.out000066400000000000000000000035641452426057600243300ustar00rootroot00000000000000# HELP event_duration_seconds_total Query timings # TYPE event_duration_seconds_total summary event_duration_seconds_total{baz="inner_eval",quantile="0.5"} 1.073e-06 event_duration_seconds_total{baz="inner_eval",quantile="0.9"} 1.928e-06 event_duration_seconds_total{baz="inner_eval",quantile="0.99"} 4.35e-06 event_duration_seconds_total_sum{baz="inner_eval"} 1.8652166505091474e+06 event_duration_seconds_total_count{baz="inner_eval"} 1.492355615e+09 event_duration_seconds_total{baz="prepare_time",quantile="0.5"} 4.283e-06 event_duration_seconds_total{baz="prepare_time",quantile="0.9"} 7.796e-06 event_duration_seconds_total{baz="prepare_time",quantile="0.99"} 2.2083e-05 event_duration_seconds_total_sum{baz="prepare_time"} 840923.7919437207 event_duration_seconds_total_count{baz="prepare_time"} 1.492355814e+09 event_duration_seconds_total{baz="result_append",quantile="0.5"} 1.566e-06 event_duration_seconds_total{baz="result_append",quantile="0.9"} 3.223e-06 event_duration_seconds_total{baz="result_append",quantile="0.99"} 6.53e-06 event_duration_seconds_total_sum{baz="result_append"} 4.404109951000078 event_duration_seconds_total_count{baz="result_append"} 1.427647e+06 event_duration_seconds_total{baz="result_sort",quantile="0.5"} 1.847e-06 event_duration_seconds_total{baz="result_sort",quantile="0.9"} 2.975e-06 event_duration_seconds_total{baz="result_sort",quantile="0.99"} 4.08e-06 event_duration_seconds_total_sum{baz="result_sort"} 3.4123187829998307 event_duration_seconds_total_count{baz="result_sort"} 1.427647e+06 # HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. # TYPE node_textfile_mtime_seconds gauge node_textfile_mtime_seconds{file="fixtures/textfile/summary/metrics.prom"} 1 # HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise # TYPE node_textfile_scrape_error gauge node_textfile_scrape_error 0 node_exporter-1.7.0/collector/fixtures/textfile/summary/000077500000000000000000000000001452426057600235675ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures/textfile/summary/metrics.prom000066400000000000000000000030061452426057600261330ustar00rootroot00000000000000# HELP event_duration_seconds_total Query timings # TYPE event_duration_seconds_total summary event_duration_seconds_total{baz="inner_eval",quantile="0.5"} 1.073e-06 event_duration_seconds_total{baz="inner_eval",quantile="0.9"} 1.928e-06 event_duration_seconds_total{baz="inner_eval",quantile="0.99"} 4.35e-06 event_duration_seconds_total_sum{baz="inner_eval"} 1.8652166505091474e+06 event_duration_seconds_total_count{baz="inner_eval"} 1.492355615e+09 event_duration_seconds_total{baz="prepare_time",quantile="0.5"} 4.283e-06 event_duration_seconds_total{baz="prepare_time",quantile="0.9"} 7.796e-06 event_duration_seconds_total{baz="prepare_time",quantile="0.99"} 2.2083e-05 event_duration_seconds_total_sum{baz="prepare_time"} 840923.7919437207 event_duration_seconds_total_count{baz="prepare_time"} 1.492355814e+09 event_duration_seconds_total{baz="result_append",quantile="0.5"} 1.566e-06 event_duration_seconds_total{baz="result_append",quantile="0.9"} 3.223e-06 event_duration_seconds_total{baz="result_append",quantile="0.99"} 6.53e-06 event_duration_seconds_total_sum{baz="result_append"} 4.404109951000078 event_duration_seconds_total_count{baz="result_append"} 1.427647e+06 event_duration_seconds_total{baz="result_sort",quantile="0.5"} 1.847e-06 event_duration_seconds_total{baz="result_sort",quantile="0.9"} 2.975e-06 event_duration_seconds_total{baz="result_sort",quantile="0.99"} 4.08e-06 event_duration_seconds_total_sum{baz="result_sort"} 3.4123187829998307 event_duration_seconds_total_count{baz="result_sort"} 1.427647e+06 node_exporter-1.7.0/collector/fixtures/textfile/summary_extra_dimension.out000066400000000000000000000032711452426057600275730ustar00rootroot00000000000000# HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. # TYPE node_textfile_mtime_seconds gauge node_textfile_mtime_seconds{file="fixtures/textfile/summary_extra_dimension/metrics.prom"} 1 # HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise # TYPE node_textfile_scrape_error gauge node_textfile_scrape_error 0 # HELP prometheus_rule_evaluation_duration_seconds The duration for a rule to execute. # TYPE prometheus_rule_evaluation_duration_seconds summary prometheus_rule_evaluation_duration_seconds{handler="",rule_type="alerting",quantile="0.9"} 0.001765451 prometheus_rule_evaluation_duration_seconds{handler="",rule_type="alerting",quantile="0.99"} 0.018672076 prometheus_rule_evaluation_duration_seconds_sum{handler="",rule_type="alerting"} 214.85081044700146 prometheus_rule_evaluation_duration_seconds_count{handler="",rule_type="alerting"} 185209 prometheus_rule_evaluation_duration_seconds{handler="",rule_type="recording",quantile="0.5"} 4.3132e-05 prometheus_rule_evaluation_duration_seconds{handler="",rule_type="recording",quantile="0.9"} 8.9295e-05 prometheus_rule_evaluation_duration_seconds{handler="",rule_type="recording",quantile="0.99"} 0.000193657 prometheus_rule_evaluation_duration_seconds_sum{handler="",rule_type="recording"} 185091.01317759082 prometheus_rule_evaluation_duration_seconds_count{handler="",rule_type="recording"} 1.0020195e+08 prometheus_rule_evaluation_duration_seconds{handler="foo",rule_type="alerting",quantile="0.5"} 0.000571464 prometheus_rule_evaluation_duration_seconds_sum{handler="foo",rule_type="alerting"} 0 prometheus_rule_evaluation_duration_seconds_count{handler="foo",rule_type="alerting"} 0 node_exporter-1.7.0/collector/fixtures/textfile/summary_extra_dimension/000077500000000000000000000000001452426057600270375ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures/textfile/summary_extra_dimension/metrics.prom000066400000000000000000000020531452426057600314040ustar00rootroot00000000000000# HELP prometheus_rule_evaluation_duration_seconds The duration for a rule to execute. # TYPE prometheus_rule_evaluation_duration_seconds summary prometheus_rule_evaluation_duration_seconds{rule_type="alerting",quantile="0.5", handler="foo"} 0.000571464 prometheus_rule_evaluation_duration_seconds{rule_type="alerting",quantile="0.9"} 0.001765451 prometheus_rule_evaluation_duration_seconds{rule_type="alerting",quantile="0.99"} 0.018672076 prometheus_rule_evaluation_duration_seconds_sum{rule_type="alerting"} 214.85081044700146 prometheus_rule_evaluation_duration_seconds_count{rule_type="alerting"} 185209 prometheus_rule_evaluation_duration_seconds{rule_type="recording",quantile="0.5"} 4.3132e-05 prometheus_rule_evaluation_duration_seconds{rule_type="recording",quantile="0.9"} 8.9295e-05 prometheus_rule_evaluation_duration_seconds{rule_type="recording",quantile="0.99"} 0.000193657 prometheus_rule_evaluation_duration_seconds_sum{rule_type="recording"} 185091.01317759082 prometheus_rule_evaluation_duration_seconds_count{rule_type="recording"} 1.0020195e+08 node_exporter-1.7.0/collector/fixtures/textfile/two_metric_files.out000066400000000000000000000020171452426057600261610ustar00rootroot00000000000000# HELP node_textfile_mtime_seconds Unixtime mtime of textfiles successfully read. # TYPE node_textfile_mtime_seconds gauge node_textfile_mtime_seconds{file="fixtures/textfile/two_metric_files/metrics1.prom"} 1 node_textfile_mtime_seconds{file="fixtures/textfile/two_metric_files/metrics2.prom"} 1 # HELP node_textfile_scrape_error 1 if there was an error opening or reading a file, 0 otherwise # TYPE node_textfile_scrape_error gauge node_textfile_scrape_error 0 # HELP testmetric1_1 Metric read from fixtures/textfile/two_metric_files/metrics1.prom # TYPE testmetric1_1 untyped testmetric1_1{foo="bar"} 10 # HELP testmetric1_2 Metric read from fixtures/textfile/two_metric_files/metrics1.prom # TYPE testmetric1_2 untyped testmetric1_2{foo="baz"} 20 # HELP testmetric2_1 Metric read from fixtures/textfile/two_metric_files/metrics2.prom # TYPE testmetric2_1 untyped testmetric2_1{foo="bar"} 30 # HELP testmetric2_2 Metric read from fixtures/textfile/two_metric_files/metrics2.prom # TYPE testmetric2_2 untyped testmetric2_2{foo="baz"} 40 node_exporter-1.7.0/collector/fixtures/textfile/two_metric_files/000077500000000000000000000000001452426057600254305ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures/textfile/two_metric_files/metrics1.prom000066400000000000000000000000701452426057600300530ustar00rootroot00000000000000testmetric1_1{foo="bar"} 10 testmetric1_2{foo="baz"} 20 node_exporter-1.7.0/collector/fixtures/textfile/two_metric_files/metrics2.prom000066400000000000000000000000701452426057600300540ustar00rootroot00000000000000testmetric2_1{foo="bar"} 30 testmetric2_2{foo="baz"} 40 node_exporter-1.7.0/collector/fixtures/textfile/two_metric_files/non_matching_file.txt000066400000000000000000000000351452426057600316320ustar00rootroot00000000000000This file should be ignored. node_exporter-1.7.0/collector/fixtures/udev.ttar000066400000000000000000000370101452426057600221060ustar00rootroot00000000000000# Archive created by ttar -C collector/fixtures -c -f collector/fixtures/udev.ttar udev Directory: udev Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: udev/data Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: udev/data/b11:0 Lines: 38 S:disk/by-id/usb-AMI_Virtual_CDROM0_AAAABBBBCCCC1-0:0 S:disk/by-path/pci-0000:00:14.0-usb-0:1.1:1.0-scsi-0:0:0:0 S:cdrom L:-100 I:83543243 E:ID_CDROM=1 E:SYSTEMD_MOUNT_DEVICE_BOUND=1 E:ID_VENDOR=AMI E:ID_VENDOR_ENC=AMI\x20\x20\x20\x20\x20 E:ID_VENDOR_ID=c096 E:ID_MODEL=Virtual_CDROM0 E:ID_MODEL_ENC=Virtual\x20CDROM0\x20\x20 E:ID_MODEL_ID=ee31 E:ID_REVISION=1.00 E:ID_SERIAL=AMI_Virtual_CDROM0_AAAABBBBCCCC1-0:0 E:ID_SERIAL_SHORT=AAAABBBBCCCC1 E:ID_TYPE=cd/dvd E:ID_INSTANCE=0:0 E:ID_BUS=usb E:ID_USB_INTERFACES=:905639: E:ID_USB_INTERFACE_NUM=00 E:ID_USB_DRIVER=usb-storage E:ID_PATH=pci-0000:00:14.0-usb-0:1.1:1.0-scsi-0:0:0:0 E:ID_PATH_TAG=pci-0000_00_14_0-usb-0_1_1_1_0-scsi-0_0_0_0 E:SCSI_TPGS=0 E:SCSI_TYPE=cd/dvd E:SCSI_VENDOR=AMI E:SCSI_VENDOR_ENC=AMI\x20\x20\x20\x20\x20 E:SCSI_MODEL=Virtual_CDROM0 E:SCSI_MODEL_ENC=Virtual\x20CDROM0\x20\x20 E:SCSI_REVISION=1.00 E:ID_SCSI=1 E:ID_SCSI_INQUIRY=1 E:ID_FS_TYPE= E:ID_FOR_SEAT=block-pci-0000_00_14_0-usb-0_1_1_1_0-scsi-0_0_0_0 G:uaccess G:systemd G:seat Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: udev/data/b179:0 Lines: 13 S:disk/by-path/platform-df2969f3.mmc S:disk/by-id/mmc-SC64G_0x83e36d93 W:1 I:7679747 E:ID_NAME=SC64G E:ID_SERIAL=0x83e36d93 E:ID_PATH=platform-df2969f3.mmc E:ID_PATH_TAG=platform-df2969f3_mmc E:ID_PART_TABLE_UUID=1954c9df E:ID_PART_TABLE_TYPE=dos E:ID_DRIVE_FLASH_SD=1 E:ID_DRIVE_MEDIA_FLASH_SD=1 G:systemd Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: udev/data/b179:1 Lines: 30 S:disk/by-id/mmc-SC64G_0x83e36d93-part1 S:disk/by-path/platform-df2969f3.mmc-part1 S:disk/by-label/boot S:disk/by-uuid/6284-658D S:disk/by-partuuid/1954c9df-01 W:12 I:8463403 E:ID_NAME=SC64G E:ID_SERIAL=0x83e36d93 E:ID_PATH=platform-df2969f3.mmc E:ID_PATH_TAG=platform-df2969f3_mmc E:ID_PART_TABLE_UUID=1954c9df E:ID_PART_TABLE_TYPE=dos E:ID_DRIVE_FLASH_SD=1 E:ID_DRIVE_MEDIA_FLASH_SD=1 E:ID_FS_LABEL=boot E:ID_FS_LABEL_ENC=boot E:ID_FS_UUID=6284-658D E:ID_FS_UUID_ENC=6284-658D E:ID_FS_VERSION=FAT32 E:ID_FS_TYPE=vfat E:ID_FS_USAGE=filesystem E:ID_PART_ENTRY_SCHEME=dos E:ID_PART_ENTRY_UUID=1954c9df-01 E:ID_PART_ENTRY_TYPE=0xc E:ID_PART_ENTRY_NUMBER=1 E:ID_PART_ENTRY_OFFSET=8192 E:ID_PART_ENTRY_SIZE=524288 E:ID_PART_ENTRY_DISK=179:0 G:systemd Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: udev/data/b179:2 Lines: 30 S:disk/by-id/mmc-SC64G_0x83e36d93-part2 S:disk/by-path/platform-df2969f3.mmc-part2 S:disk/by-label/rootfs S:disk/by-uuid/83324ce8-a6f3-4e35-ad64-dbb3d6b87a32 S:disk/by-partuuid/1954c9df-02 W:2 I:7676649 E:ID_NAME=SC64G E:ID_SERIAL=0x83e36d93 E:ID_PATH=platform-df2969f3.mmc E:ID_PATH_TAG=platform-df2969f3_mmc E:ID_PART_TABLE_UUID=1954c9df E:ID_PART_TABLE_TYPE=dos E:ID_DRIVE_FLASH_SD=1 E:ID_DRIVE_MEDIA_FLASH_SD=1 E:ID_FS_LABEL=rootfs E:ID_FS_LABEL_ENC=rootfs E:ID_FS_UUID=83324ce8-a6f3-4e35-ad64-dbb3d6b87a32 E:ID_FS_UUID_ENC=83324ce8-a6f3-4e35-ad64-dbb3d6b87a32 E:ID_FS_VERSION=1.0 E:ID_FS_TYPE=ext4 E:ID_FS_USAGE=filesystem E:ID_PART_ENTRY_SCHEME=dos E:ID_PART_ENTRY_UUID=1954c9df-02 E:ID_PART_ENTRY_TYPE=0x83 E:ID_PART_ENTRY_NUMBER=2 E:ID_PART_ENTRY_OFFSET=532480 E:ID_PART_ENTRY_SIZE=124203008 E:ID_PART_ENTRY_DISK=179:0 G:systemd Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: udev/data/b252:0 Lines: 20 S:disk/by-id/dm-name-nvme0n1_crypt S:mapper/nvme0n1_crypt S:disk/by-id/lvm-pv-uuid-c3C3uW-gD96-Yw69-c1CJ-5MwT-6ysM-mST0vB S:disk/by-id/dm-uuid-CRYPT-LUKS2-jolaulot80fy9zsiobkxyxo7y2dqeho2-nvme0n1_crypt I:72859885 E:DM_UDEV_DISABLE_LIBRARY_FALLBACK_FLAG=1 E:DM_UDEV_PRIMARY_SOURCE_FLAG=1 E:DM_UDEV_RULES=1 E:DM_UDEV_RULES_VSN=2 E:DM_NAME=nvme0n1_crypt E:DM_UUID=CRYPT-LUKS2-jolaulot80fy9zsiobkxyxo7y2dqeho2-nvme0n1_crypt E:DM_SUSPENDED=0 E:ID_FS_UUID=c3C3uW-gD96-Yw69-c1CJ-5MwT-6ysM-mST0vB E:ID_FS_UUID_ENC=c3C3uW-gD96-Yw69-c1CJ-5MwT-6ysM-mST0vB E:ID_FS_VERSION=LVM2 001 E:ID_FS_TYPE=LVM2_member E:ID_FS_USAGE=raid G:systemd Q:systemd V:1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: udev/data/b252:1 Lines: 24 S:disk/by-id/dm-uuid-LVM-wbGqQEBL9SxrW2DLntJwgg8fAv946hw3Tvjqh0v31fWgxEtD4BoHO0lROWFUY65T S:mapper/system-swap_1 S:disk/by-id/dm-name-system-swap_1 S:disk/by-uuid/5272bb60-04b5-49cd-b730-be57c7604450 S:system/swap_1 I:78705530 E:DM_UDEV_DISABLE_LIBRARY_FALLBACK_FLAG=1 E:DM_UDEV_PRIMARY_SOURCE_FLAG=1 E:DM_UDEV_RULES=1 E:DM_UDEV_RULES_VSN=2 E:DM_NAME=system-swap_1 E:DM_UUID=LVM-wbGqQEBL9SxrW2DLntJwgg8fAv946hw3Tvjqh0v31fWgxEtD4BoHO0lROWFUY65T E:DM_SUSPENDED=0 E:DM_VG_NAME=system E:DM_LV_NAME=swap_1 E:DM_LV_LAYER= E:ID_FS_UUID=5272bb60-04b5-49cd-b730-be57c7604450 E:ID_FS_UUID_ENC=5272bb60-04b5-49cd-b730-be57c7604450 E:ID_FS_VERSION=1 E:ID_FS_TYPE=swap E:ID_FS_USAGE=other G:systemd Q:systemd V:1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: udev/data/b252:2 Lines: 24 S:disk/by-id/dm-name-system-root S:disk/by-id/dm-uuid-LVM-NWEDo8q5ABDyJuC3F8veKNyWfYmeIBfFMS4MF3HakzUhkk7ekDm6fJTHkl2fYHe7 S:mapper/system-root S:disk/by-uuid/3deafd0d-faff-4695-8d15-51061ae1f51b S:system/root I:77655410 E:DM_UDEV_DISABLE_LIBRARY_FALLBACK_FLAG=1 E:DM_UDEV_PRIMARY_SOURCE_FLAG=1 E:DM_UDEV_RULES=1 E:DM_UDEV_RULES_VSN=2 E:DM_NAME=system-root E:DM_UUID=LVM-NWEDo8q5ABDyJuC3F8veKNyWfYmeIBfFMS4MF3HakzUhkk7ekDm6fJTHkl2fYHe7 E:DM_SUSPENDED=0 E:DM_VG_NAME=system E:DM_LV_NAME=root E:DM_LV_LAYER= E:ID_FS_UUID=3deafd0d-faff-4695-8d15-51061ae1f51b E:ID_FS_UUID_ENC=3deafd0d-faff-4695-8d15-51061ae1f51b E:ID_FS_VERSION=1.0 E:ID_FS_TYPE=ext4 E:ID_FS_USAGE=filesystem G:systemd Q:systemd V:1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: udev/data/b252:3 Lines: 24 S:disk/by-id/dm-name-system-var S:disk/by-id/dm-uuid-LVM-hrxHo0rlZ6U95ku5841Lpd17bS1Z7V7lrtEE60DVgE6YEOCdS9gcDGyonWim4hGP S:mapper/system-var S:disk/by-uuid/5c772222-f7d4-4c8e-87e8-e97df6b7a45e S:system/var I:79395348 E:DM_UDEV_DISABLE_LIBRARY_FALLBACK_FLAG=1 E:DM_UDEV_PRIMARY_SOURCE_FLAG=1 E:DM_UDEV_RULES=1 E:DM_UDEV_RULES_VSN=2 E:DM_NAME=system-var E:DM_UUID=LVM-hrxHo0rlZ6U95ku5841Lpd17bS1Z7V7lrtEE60DVgE6YEOCdS9gcDGyonWim4hGP E:DM_SUSPENDED=0 E:DM_VG_NAME=system E:DM_LV_NAME=var E:DM_LV_LAYER= E:ID_FS_UUID=5c772222-f7d4-4c8e-87e8-e97df6b7a45e E:ID_FS_UUID_ENC=5c772222-f7d4-4c8e-87e8-e97df6b7a45e E:ID_FS_VERSION=1.0 E:ID_FS_TYPE=ext4 E:ID_FS_USAGE=filesystem G:systemd Q:systemd V:1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: udev/data/b252:4 Lines: 24 S:system/tmp S:disk/by-uuid/a9479d44-60e1-4015-a1e5-bb065e6dd11b S:disk/by-id/dm-uuid-LVM-XTNGOHjPWLHcxmJmVu5cWTXEtuzqDeBkdEHAZW5q9LxWQ2d4mb5CchUQzUPJpl8H S:mapper/system-tmp S:disk/by-id/dm-name-system-tmp I:75852450 E:DM_UDEV_DISABLE_LIBRARY_FALLBACK_FLAG=1 E:DM_UDEV_PRIMARY_SOURCE_FLAG=1 E:DM_UDEV_RULES=1 E:DM_UDEV_RULES_VSN=2 E:DM_NAME=system-tmp E:DM_UUID=LVM-XTNGOHjPWLHcxmJmVu5cWTXEtuzqDeBkdEHAZW5q9LxWQ2d4mb5CchUQzUPJpl8H E:DM_SUSPENDED=0 E:DM_VG_NAME=system E:DM_LV_NAME=tmp E:DM_LV_LAYER= E:ID_FS_UUID=a9479d44-60e1-4015-a1e5-bb065e6dd11b E:ID_FS_UUID_ENC=a9479d44-60e1-4015-a1e5-bb065e6dd11b E:ID_FS_VERSION=1.0 E:ID_FS_TYPE=ext4 E:ID_FS_USAGE=filesystem G:systemd Q:systemd V:1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: udev/data/b252:5 Lines: 24 S:disk/by-uuid/b05b726a-c718-4c4d-8641-7c73a7696d83 S:mapper/system-home S:system/home S:disk/by-id/dm-name-system-home S:disk/by-id/dm-uuid-LVM-MtoJaWTpjWRXlUnNFlpxZauTEuYlMvGFutigEzCCrfj8CNh6jCRi5LQJXZCpLjPf I:72604009 E:DM_UDEV_DISABLE_LIBRARY_FALLBACK_FLAG=1 E:DM_UDEV_PRIMARY_SOURCE_FLAG=1 E:DM_UDEV_RULES=1 E:DM_UDEV_RULES_VSN=2 E:DM_NAME=system-home E:DM_UUID=LVM-MtoJaWTpjWRXlUnNFlpxZauTEuYlMvGFutigEzCCrfj8CNh6jCRi5LQJXZCpLjPf E:DM_SUSPENDED=0 E:DM_VG_NAME=system E:DM_LV_NAME=home E:DM_LV_LAYER= E:ID_FS_UUID=b05b726a-c718-4c4d-8641-7c73a7696d83 E:ID_FS_UUID_ENC=b05b726a-c718-4c4d-8641-7c73a7696d83 E:ID_FS_VERSION=1.0 E:ID_FS_TYPE=ext4 E:ID_FS_USAGE=filesystem G:systemd Q:systemd V:1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: udev/data/b254:0 Lines: 10 S:disk/by-path/pci-0000:00:06.0 S:disk/by-path/virtio-pci-0000:00:06.0 W:1 I:8524171 E:ID_PATH=pci-0000:00:06.0 E:ID_PATH_TAG=pci-0000_00_06_0 E:ID_PART_TABLE_UUID=653b59fd E:ID_PART_TABLE_TYPE=dos E:ID_FS_TYPE= G:systemd Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: udev/data/b259:0 Lines: 17 S:disk/by-path/pci-0000:02:00.0-nvme-1 S:disk/by-id/nvme-eui.p3vbbiejx5aae2r3 S:disk/by-id/nvme-SAMSUNG_EHFTF55LURSY-000Y9_S252B6CU1HG3M1 I:79621327 E:ID_SERIAL_SHORT=S252B6CU1HG3M1 E:ID_WWN=eui.p3vbbiejx5aae2r3 E:ID_MODEL=SAMSUNG EHFTF55LURSY-000Y9 E:ID_REVISION=4NBTUY95 E:ID_SERIAL=SAMSUNG_EHFTF55LURSY-000Y9_S252B6CU1HG3M1 E:ID_PATH=pci-0000:02:00.0-nvme-1 E:ID_PATH_TAG=pci-0000_02_00_0-nvme-1 E:ID_PART_TABLE_UUID=f301fdbd-fd1f-46d4-9fb8-c9aeb757f050 E:ID_PART_TABLE_TYPE=gpt E:ID_FS_TYPE= G:systemd Q:systemd V:1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: udev/data/b8:0 Lines: 60 S:disk/by-id/lvm-pv-uuid-cVVv6j-HSA2-IY33-1Jmj-dO2H-YL7w-b4Oxqw S:disk/by-id/scsi-SATA_TOSHIBA_KSDB4U86_2160A0D5FVGG S:disk/by-id/ata-TOSHIBA_KSDB4U866TE_2160A0D5FVGG S:disk/by-path/pci-0000:3b:00.0-sas-phy7-lun-0 S:disk/by-id/scsi-37c72382b8de36a64 S:disk/by-id/wwn-0x7c72382b8de36a64 W:702 I:73815117 E:ID_ATA=1 E:ID_TYPE=disk E:ID_BUS=ata E:ID_MODEL=TOSHIBA_KSDB4U86 E:ID_MODEL_ENC=TOSHIBA\x20KSDB4U86 E:ID_REVISION=0102 E:ID_SERIAL=TOSHIBA_KSDB4U866TE_DTB0QRJR2EIG E:ID_SERIAL_SHORT=2160A0D5FVGG E:ID_ATA_WRITE_CACHE=1 E:ID_ATA_WRITE_CACHE_ENABLED=0 E:ID_ATA_FEATURE_SET_PM=1 E:ID_ATA_FEATURE_SET_PM_ENABLED=1 E:ID_ATA_FEATURE_SET_SECURITY=1 E:ID_ATA_FEATURE_SET_SECURITY_ENABLED=0 E:ID_ATA_FEATURE_SET_SECURITY_ERASE_UNIT_MIN=66892 E:ID_ATA_FEATURE_SET_SECURITY_ENHANCED_ERASE_UNIT_MIN=66892 E:ID_ATA_FEATURE_SET_SMART=1 E:ID_ATA_FEATURE_SET_SMART_ENABLED=1 E:ID_ATA_FEATURE_SET_APM=1 E:ID_ATA_FEATURE_SET_APM_ENABLED=1 E:ID_ATA_FEATURE_SET_APM_CURRENT_VALUE=128 E:ID_ATA_DOWNLOAD_MICROCODE=1 E:ID_ATA_SATA=1 E:ID_ATA_SATA_SIGNAL_RATE_GEN2=1 E:ID_ATA_SATA_SIGNAL_RATE_GEN1=1 E:ID_ATA_ROTATION_RATE_RPM=7200 E:ID_WWN=0x7c72382b8de36a64 E:ID_WWN_WITH_EXTENSION=0x7c72382b8de36a64 E:ID_PATH=pci-0000:3b:00.0-sas-phy7-lun-0 E:ID_PATH_TAG=pci-0000_3b_00_0-sas-phy7-lun-0 E:ID_FS_UUID=cVVv6j-HSA2-IY33-1Jmj-dO2H-YL7w-b4Oxqw E:ID_FS_UUID_ENC=cVVv6j-HSA2-IY33-1Jmj-dO2H-YL7w-b4Oxqw E:ID_FS_VERSION=LVM2 001 E:ID_FS_TYPE=LVM2_member E:ID_FS_USAGE=raid E:SCSI_TPGS=0 E:SCSI_TYPE=disk E:SCSI_VENDOR=ATA E:SCSI_VENDOR_ENC=ATA\x20\x20\x20\x20\x20 E:SCSI_MODEL=TOSHIBA_KSDB4U86 E:SCSI_MODEL_ENC=TOSHIBA\x20KSDB4U86 E:SCSI_REVISION=0102 E:ID_SCSI=1 E:ID_SCSI_INQUIRY=1 E:ID_VENDOR=ATA E:ID_VENDOR_ENC=ATA\x20\x20\x20\x20\x20 E:SCSI_IDENT_SERIAL=2160A0D5FVGG E:SCSI_IDENT_LUN_NAA_REG=7c72382b8de36a64 E:SYSTEMD_READY=1 E:SYSTEMD_ALIAS=/dev/block/8:0 E:SYSTEMD_WANTS=lvm2-pvscan@8:0.service G:systemd Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: udev/data/b8:16 Lines: 62 S:disk/by-id/scsi-3e1b87abbb16bd84e S:disk/by-id/wwn-0xe1b87abbb16bd84e S:disk/by-path/pci-0000:00:1f.2-ata-1 S:disk/by-id/scsi-0ATA_SuperMicro_SSD_SMC0E1B87ABBB16BD84E S:disk/by-id/scsi-SATA_SuperMicro_SSD_SMC0E1B87ABBB16BD84E S:disk/by-id/scsi-1ATA_SuperMicro_SSD_SMC0E1B87ABBB16BD84E S:disk/by-id/ata-SuperMicro_SSD_SMC0E1B87ABBB16BD84E W:58 I:147686920 E:ID_ATA=1 E:ID_TYPE=disk E:ID_BUS=ata E:ID_MODEL=SuperMicro_SSD E:ID_MODEL_ENC=SuperMicro\x20SSD\x20\x20 E:ID_REVISION=0R E:ID_SERIAL=SuperMicro_SSD_SMC0E1B87ABBB16BD84E E:ID_SERIAL_SHORT=SMC0E1B87ABBB16BD84E E:ID_ATA_WRITE_CACHE=1 E:ID_ATA_WRITE_CACHE_ENABLED=1 E:ID_ATA_FEATURE_SET_HPA=1 E:ID_ATA_FEATURE_SET_HPA_ENABLED=1 E:ID_ATA_FEATURE_SET_PM=1 E:ID_ATA_FEATURE_SET_PM_ENABLED=1 E:ID_ATA_FEATURE_SET_SECURITY=1 E:ID_ATA_FEATURE_SET_SECURITY_ENABLED=0 E:ID_ATA_FEATURE_SET_SECURITY_ERASE_UNIT_MIN=4 E:ID_ATA_FEATURE_SET_SECURITY_ENHANCED_ERASE_UNIT_MIN=4 E:ID_ATA_FEATURE_SET_SMART=1 E:ID_ATA_FEATURE_SET_SMART_ENABLED=1 E:ID_ATA_FEATURE_SET_AAM=1 E:ID_ATA_FEATURE_SET_AAM_ENABLED=0 E:ID_ATA_FEATURE_SET_AAM_VENDOR_RECOMMENDED_VALUE=0 E:ID_ATA_FEATURE_SET_AAM_CURRENT_VALUE=0 E:ID_ATA_DOWNLOAD_MICROCODE=1 E:ID_ATA_SATA=1 E:ID_ATA_SATA_SIGNAL_RATE_GEN2=1 E:ID_ATA_SATA_SIGNAL_RATE_GEN1=1 E:ID_ATA_ROTATION_RATE_RPM=0 E:ID_WWN=0xe1b87abbb16bd84e E:ID_WWN_WITH_EXTENSION=0xe1b87abbb16bd84e E:ID_PATH=pci-0000:00:1f.2-ata-1 E:ID_PATH_TAG=pci-0000_00_1f_2-ata-1 E:ID_PART_TABLE_UUID=45980145-24e2-4302-a7f0-364c68cfaf59 E:ID_PART_TABLE_TYPE=gpt E:SCSI_TPGS=0 E:SCSI_TYPE=disk E:SCSI_VENDOR=ATA E:SCSI_VENDOR_ENC=ATA\x20\x20\x20\x20\x20 E:SCSI_MODEL=SuperMicro_SSD E:SCSI_MODEL_ENC=SuperMicro\x20SSD\x20\x20 E:SCSI_REVISION=0R E:ID_SCSI=1 E:ID_SCSI_INQUIRY=1 E:ID_VENDOR=ATA E:ID_VENDOR_ENC=ATA\x20\x20\x20\x20\x20 E:SCSI_IDENT_SERIAL=SMC0E1B87ABBB16BD84E E:SCSI_IDENT_LUN_VENDOR=SMC0E1B87ABBB16BD84E E:SCSI_IDENT_LUN_T10=ATA_SuperMicro_SSD_SMC0E1B87ABBB16BD84E E:SCSI_IDENT_LUN_ATA=SuperMicro_SSD_SMC0E1B87ABBB16BD84E E:SCSI_IDENT_LUN_NAA_REG=e1b87abbb16bd84e E:ID_FS_TYPE= G:systemd Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: udev/data/b8:32 Lines: 62 S:disk/by-path/pci-0000:00:1f.2-ata-4 S:disk/by-id/scsi-SATA_INTEL_SSDS9X9SI0_3EWB5Y25CWQWA7EH1U S:disk/by-id/scsi-0ATA_INTEL_SSDS9X9SI0_3EWB5Y25CWQWA7EH1U S:disk/by-id/scsi-1ATA_INTEL_SSDS9X9SI00G8_3EWB5Y25CWQWA7EH1U S:disk/by-id/lvm-pv-uuid-QFy9W7-Brj3-hQ6v-AF8i-3Zqg-n3Vs-kGY4vb S:disk/by-id/ata-INTEL_SSDS9X9SI00G8_3EWB5Y25CWQWA7EH1U S:disk/by-id/scsi-358907ddc573a5de S:disk/by-id/wwn-0x58907ddc573a5de W:10 I:145572852 E:ID_ATA=1 E:ID_TYPE=disk E:ID_BUS=ata E:ID_MODEL=INTEL_SSDS9X9SI0 E:ID_MODEL_ENC=INTEL\x20SSDS9X9SI0 E:ID_REVISION=0100 E:ID_SERIAL=INTEL_SSDS9X9SI00G8_3EWB5Y25CWQWA7EH1U E:ID_SERIAL_SHORT=3EWB5Y25CWQWA7EH1U E:ID_ATA_WRITE_CACHE=1 E:ID_ATA_WRITE_CACHE_ENABLED=0 E:ID_ATA_FEATURE_SET_PM=1 E:ID_ATA_FEATURE_SET_PM_ENABLED=1 E:ID_ATA_FEATURE_SET_SECURITY=1 E:ID_ATA_FEATURE_SET_SECURITY_ENABLED=0 E:ID_ATA_FEATURE_SET_SECURITY_ERASE_UNIT_MIN=4 E:ID_ATA_FEATURE_SET_SECURITY_ENHANCED_ERASE_UNIT_MIN=4 E:ID_ATA_FEATURE_SET_SMART=1 E:ID_ATA_FEATURE_SET_SMART_ENABLED=1 E:ID_ATA_DOWNLOAD_MICROCODE=1 E:ID_ATA_SATA=1 E:ID_ATA_SATA_SIGNAL_RATE_GEN2=1 E:ID_ATA_SATA_SIGNAL_RATE_GEN1=1 E:ID_ATA_ROTATION_RATE_RPM=0 E:ID_WWN=0x58907ddc573a5de E:ID_WWN_WITH_EXTENSION=0x58907ddc573a5de E:ID_PATH=pci-0000:00:1f.2-ata-4 E:ID_PATH_TAG=pci-0000_00_1f_2-ata-4 E:ID_FS_UUID=QFy9W7-Brj3-hQ6v-AF8i-3Zqg-n3Vs-kGY4vb E:ID_FS_UUID_ENC=QFy9W7-Brj3-hQ6v-AF8i-3Zqg-n3Vs-kGY4vb E:ID_FS_VERSION=LVM2 001 E:ID_FS_TYPE=LVM2_member E:ID_FS_USAGE=raid E:SCSI_TPGS=0 E:SCSI_TYPE=disk E:SCSI_VENDOR=ATA E:SCSI_VENDOR_ENC=ATA\x20\x20\x20\x20\x20 E:SCSI_MODEL=INTEL_SSDS9X9SI0 E:SCSI_MODEL_ENC=INTEL\x20SSDS9X9SI0 E:SCSI_REVISION=0100 E:ID_SCSI=1 E:ID_SCSI_INQUIRY=1 E:ID_VENDOR=ATA E:ID_VENDOR_ENC=ATA\x20\x20\x20\x20\x20 E:SCSI_IDENT_SERIAL=3EWB5Y25CWQWA7EH1U E:SCSI_IDENT_LUN_VENDOR=3EWB5Y25CWQWA7EH1U E:SCSI_IDENT_LUN_T10=ATA_INTEL_SSDS9X9SI00G8_3EWB5Y25CWQWA7EH1U E:SCSI_IDENT_LUN_ATA=INTEL_SSDS9X9SI00G8_3EWB5Y25CWQWA7EH1U E:SCSI_IDENT_LUN_NAA_REG=58907ddc573a5de E:SYSTEMD_READY=1 E:SYSTEMD_ALIAS=/dev/block/8:32 E:SYSTEMD_WANTS=lvm2-pvscan@8:32.service G:systemd Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - node_exporter-1.7.0/collector/fixtures/usr/000077500000000000000000000000001452426057600210575ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures/usr/lib/000077500000000000000000000000001452426057600216255ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures/usr/lib/os-release000066400000000000000000000005761452426057600236170ustar00rootroot00000000000000NAME="Ubuntu" VERSION="20.04.2 LTS (Focal Fossa)" ID=ubuntu ID_LIKE=debian PRETTY_NAME="Ubuntu 20.04.2 LTS" VERSION_ID="20.04" HOME_URL="https://www.ubuntu.com/" SUPPORT_URL="https://help.ubuntu.com/" BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/" PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy" VERSION_CODENAME=focal UBUNTU_CODENAME=focal node_exporter-1.7.0/collector/fixtures/wifi/000077500000000000000000000000001452426057600212045ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures/wifi/interfaces.json000066400000000000000000000002151452426057600242200ustar00rootroot00000000000000[ { "name": "wlan0", "type": 2, "frequency": 2412 }, { "name": "wlan1", "type": 3, "frequency": 2412 }, { "type": 10 } ] node_exporter-1.7.0/collector/fixtures/wifi/wlan0/000077500000000000000000000000001452426057600222255ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures/wifi/wlan0/bss.json000066400000000000000000000000731452426057600237070ustar00rootroot00000000000000{ "ssid": "Example", "bssid": "ABEiM0RV", "status": 1 } node_exporter-1.7.0/collector/fixtures/wifi/wlan0/stationinfo.json000066400000000000000000000007341452426057600254610ustar00rootroot00000000000000[ { "hardwareaddr": "qrvM3e7/", "connected": 30000000000, "inactive": 400000000, "receivebitrate": 128000000, "transmitbitrate": 164000000, "signal": -52, "transmitretries": 10, "transmitfailed": 2, "beaconloss": 1 }, { "hardwareaddr": "AQIDBAUG", "connected": 60000000000, "inactive": 800000000, "receivebitrate": 256000000, "transmitbitrate": 328000000, "signal": -26, "transmitretries": 20, "transmitfailed": 4, "beaconloss": 2 } ] node_exporter-1.7.0/collector/fixtures_bindmount/000077500000000000000000000000001452426057600223255ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures_bindmount/proc/000077500000000000000000000000001452426057600232705ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures_bindmount/proc/mounts000066400000000000000000000006031452426057600245370ustar00rootroot00000000000000/dev/nvme1n0 /host ext4 rw,seclabel,relatime,data=ordered 0 0 /dev/nvme1n1 /host/media/volume1 ext4 rw,seclabel,relatime,data=ordered 0 0 /dev/nvme1n2 /host/media/volume2 ext4 rw,seclabel,relatime,data=ordered 0 0 tmpfs /dev/shm tmpfs rw,nosuid,nodev 0 0 tmpfs /run/lock tmpfs rw,nosuid,nodev,noexec,relatime,size=5120k 0 0 tmpfs /sys/fs/cgroup tmpfs ro,nosuid,nodev,noexec,mode=755 0 0 node_exporter-1.7.0/collector/fixtures_hidepid/000077500000000000000000000000001452426057600217345ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures_hidepid/proc/000077500000000000000000000000001452426057600226775ustar00rootroot00000000000000node_exporter-1.7.0/collector/fixtures_hidepid/proc/mounts000066400000000000000000000000271452426057600241460ustar00rootroot00000000000000rootfs / rootfs rw 0 0 node_exporter-1.7.0/collector/helper.go000066400000000000000000000033351452426057600202070ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector import ( "os" "regexp" "strconv" "strings" ) func readUintFromFile(path string) (uint64, error) { data, err := os.ReadFile(path) if err != nil { return 0, err } value, err := strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) if err != nil { return 0, err } return value, nil } var metricNameRegex = regexp.MustCompile(`_*[^0-9A-Za-z_]+_*`) // SanitizeMetricName sanitize the given metric name by replacing invalid characters by underscores. // // OpenMetrics and the Prometheus exposition format require the metric name // to consist only of alphanumericals and "_", ":" and they must not start // with digits. Since colons in MetricFamily are reserved to signal that the // MetricFamily is the result of a calculation or aggregation of a general // purpose monitoring system, colons will be replaced as well. // // Note: If not subsequently prepending a namespace and/or subsystem (e.g., // with prometheus.BuildFQName), the caller must ensure that the supplied // metricName does not begin with a digit. func SanitizeMetricName(metricName string) string { return metricNameRegex.ReplaceAllString(metricName, "_") } node_exporter-1.7.0/collector/helper_test.go000066400000000000000000000023361452426057600212460ustar00rootroot00000000000000// Copyright 2020 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector import ( "testing" ) func TestSanitizeMetricName(t *testing.T) { testcases := map[string]string{ "": "", "rx_errors": "rx_errors", "Queue[0] AllocFails": "Queue_0_AllocFails", "Tx LPI entry count": "Tx_LPI_entry_count", "port.VF_admin_queue_requests": "port_VF_admin_queue_requests", "[3]: tx_bytes": "_3_tx_bytes", " err": "_err", } for metricName, expected := range testcases { got := SanitizeMetricName(metricName) if expected != got { t.Errorf("Expected '%s' but got '%s'", expected, got) } } } node_exporter-1.7.0/collector/hwmon_linux.go000066400000000000000000000344441452426057600213040ustar00rootroot00000000000000// Copyright 2016 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nohwmon // +build !nohwmon package collector import ( "errors" "os" "path/filepath" "regexp" "strconv" "strings" "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "golang.org/x/sys/unix" ) var ( collectorHWmonChipInclude = kingpin.Flag("collector.hwmon.chip-include", "Regexp of hwmon chip to include (mutually exclusive to device-exclude).").String() collectorHWmonChipExclude = kingpin.Flag("collector.hwmon.chip-exclude", "Regexp of hwmon chip to exclude (mutually exclusive to device-include).").String() hwmonInvalidMetricChars = regexp.MustCompile("[^a-z0-9:_]") hwmonFilenameFormat = regexp.MustCompile(`^(?P[^0-9]+)(?P[0-9]*)?(_(?P.+))?$`) hwmonLabelDesc = []string{"chip", "sensor"} hwmonChipNameLabelDesc = []string{"chip", "chip_name"} hwmonSensorTypes = []string{ "vrm", "beep_enable", "update_interval", "in", "cpu", "fan", "pwm", "temp", "curr", "power", "energy", "humidity", "intrusion", } ) func init() { registerCollector("hwmon", defaultEnabled, NewHwMonCollector) } type hwMonCollector struct { deviceFilter deviceFilter logger log.Logger } // NewHwMonCollector returns a new Collector exposing /sys/class/hwmon stats // (similar to lm-sensors). func NewHwMonCollector(logger log.Logger) (Collector, error) { return &hwMonCollector{ logger: logger, deviceFilter: newDeviceFilter(*collectorHWmonChipExclude, *collectorHWmonChipInclude), }, nil } func cleanMetricName(name string) string { lower := strings.ToLower(name) replaced := hwmonInvalidMetricChars.ReplaceAllLiteralString(lower, "_") cleaned := strings.Trim(replaced, "_") return cleaned } func addValueFile(data map[string]map[string]string, sensor string, prop string, file string) { raw, err := sysReadFile(file) if err != nil { return } value := strings.Trim(string(raw), "\n") if _, ok := data[sensor]; !ok { data[sensor] = make(map[string]string) } data[sensor][prop] = value } // sysReadFile is a simplified os.ReadFile that invokes syscall.Read directly. func sysReadFile(file string) ([]byte, error) { f, err := os.Open(file) if err != nil { return nil, err } defer f.Close() // On some machines, hwmon drivers are broken and return EAGAIN. This causes // Go's os.ReadFile implementation to poll forever. // // Since we either want to read data or bail immediately, do the simplest // possible read using system call directly. b := make([]byte, 128) n, err := unix.Read(int(f.Fd()), b) if err != nil { return nil, err } return b[:n], nil } // explodeSensorFilename splits a sensor name into _. func explodeSensorFilename(filename string) (ok bool, sensorType string, sensorNum int, sensorProperty string) { matches := hwmonFilenameFormat.FindStringSubmatch(filename) if len(matches) == 0 { return false, sensorType, sensorNum, sensorProperty } for i, match := range hwmonFilenameFormat.SubexpNames() { if i >= len(matches) { return true, sensorType, sensorNum, sensorProperty } if match == "type" { sensorType = matches[i] } if match == "property" { sensorProperty = matches[i] } if match == "id" && len(matches[i]) > 0 { if num, err := strconv.Atoi(matches[i]); err == nil { sensorNum = num } else { return false, sensorType, sensorNum, sensorProperty } } } return true, sensorType, sensorNum, sensorProperty } func collectSensorData(dir string, data map[string]map[string]string) error { sensorFiles, dirError := os.ReadDir(dir) if dirError != nil { return dirError } for _, file := range sensorFiles { filename := file.Name() ok, sensorType, sensorNum, sensorProperty := explodeSensorFilename(filename) if !ok { continue } for _, t := range hwmonSensorTypes { if t == sensorType { addValueFile(data, sensorType+strconv.Itoa(sensorNum), sensorProperty, filepath.Join(dir, file.Name())) break } } } return nil } func (c *hwMonCollector) updateHwmon(ch chan<- prometheus.Metric, dir string) error { hwmonName, err := c.hwmonName(dir) if err != nil { return err } if c.deviceFilter.ignored(hwmonName) { level.Debug(c.logger).Log("msg", "ignoring hwmon chip", "chip", hwmonName) return nil } data := make(map[string]map[string]string) err = collectSensorData(dir, data) if err != nil { return err } if _, err := os.Stat(filepath.Join(dir, "device")); err == nil { err := collectSensorData(filepath.Join(dir, "device"), data) if err != nil { return err } } hwmonChipName, err := c.hwmonHumanReadableChipName(dir) if err == nil { // sensor chip metadata desc := prometheus.NewDesc( "node_hwmon_chip_names", "Annotation metric for human-readable chip names", hwmonChipNameLabelDesc, nil, ) ch <- prometheus.MustNewConstMetric( desc, prometheus.GaugeValue, 1.0, hwmonName, hwmonChipName, ) } // Format all sensors. for sensor, sensorData := range data { _, sensorType, _, _ := explodeSensorFilename(sensor) labels := []string{hwmonName, sensor} if labelText, ok := sensorData["label"]; ok { label := strings.ToValidUTF8(labelText, "�") desc := prometheus.NewDesc("node_hwmon_sensor_label", "Label for given chip and sensor", []string{"chip", "sensor", "label"}, nil) ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, 1.0, hwmonName, sensor, label) } if sensorType == "beep_enable" { value := 0.0 if sensorData[""] == "1" { value = 1.0 } metricName := "node_hwmon_beep_enabled" desc := prometheus.NewDesc(metricName, "Hardware beep enabled", hwmonLabelDesc, nil) ch <- prometheus.MustNewConstMetric( desc, prometheus.GaugeValue, value, labels...) continue } if sensorType == "vrm" { parsedValue, err := strconv.ParseFloat(sensorData[""], 64) if err != nil { continue } metricName := "node_hwmon_voltage_regulator_version" desc := prometheus.NewDesc(metricName, "Hardware voltage regulator", hwmonLabelDesc, nil) ch <- prometheus.MustNewConstMetric( desc, prometheus.GaugeValue, parsedValue, labels...) continue } if sensorType == "update_interval" { parsedValue, err := strconv.ParseFloat(sensorData[""], 64) if err != nil { continue } metricName := "node_hwmon_update_interval_seconds" desc := prometheus.NewDesc(metricName, "Hardware monitor update interval", hwmonLabelDesc, nil) ch <- prometheus.MustNewConstMetric( desc, prometheus.GaugeValue, parsedValue*0.001, labels...) continue } prefix := "node_hwmon_" + sensorType for element, value := range sensorData { if element == "label" { continue } name := prefix if element == "input" { // input is actually the value if _, ok := sensorData[""]; ok { name = name + "_input" } } else if element != "" { name = name + "_" + cleanMetricName(element) } parsedValue, err := strconv.ParseFloat(value, 64) if err != nil { continue } // special elements, fault, alarm & beep should be handed out without units if element == "fault" || element == "alarm" { desc := prometheus.NewDesc(name, "Hardware sensor "+element+" status ("+sensorType+")", hwmonLabelDesc, nil) ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, parsedValue, labels...) continue } if element == "beep" { desc := prometheus.NewDesc(name+"_enabled", "Hardware monitor sensor has beeping enabled", hwmonLabelDesc, nil) ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, parsedValue, labels...) continue } // everything else should get a unit if sensorType == "in" || sensorType == "cpu" { desc := prometheus.NewDesc(name+"_volts", "Hardware monitor for voltage ("+element+")", hwmonLabelDesc, nil) ch <- prometheus.MustNewConstMetric( desc, prometheus.GaugeValue, parsedValue*0.001, labels...) continue } if sensorType == "temp" && element != "type" { if element == "" { element = "input" } desc := prometheus.NewDesc(name+"_celsius", "Hardware monitor for temperature ("+element+")", hwmonLabelDesc, nil) ch <- prometheus.MustNewConstMetric( desc, prometheus.GaugeValue, parsedValue*0.001, labels...) continue } if sensorType == "curr" { desc := prometheus.NewDesc(name+"_amps", "Hardware monitor for current ("+element+")", hwmonLabelDesc, nil) ch <- prometheus.MustNewConstMetric( desc, prometheus.GaugeValue, parsedValue*0.001, labels...) continue } if sensorType == "energy" { desc := prometheus.NewDesc(name+"_joule_total", "Hardware monitor for joules used so far ("+element+")", hwmonLabelDesc, nil) ch <- prometheus.MustNewConstMetric( desc, prometheus.CounterValue, parsedValue/1000000.0, labels...) continue } if sensorType == "power" && element == "accuracy" { desc := prometheus.NewDesc(name, "Hardware monitor power meter accuracy, as a ratio", hwmonLabelDesc, nil) ch <- prometheus.MustNewConstMetric( desc, prometheus.GaugeValue, parsedValue/1000000.0, labels...) continue } if sensorType == "power" && (element == "average_interval" || element == "average_interval_min" || element == "average_interval_max") { desc := prometheus.NewDesc(name+"_seconds", "Hardware monitor power usage update interval ("+element+")", hwmonLabelDesc, nil) ch <- prometheus.MustNewConstMetric( desc, prometheus.GaugeValue, parsedValue*0.001, labels...) continue } if sensorType == "power" { desc := prometheus.NewDesc(name+"_watt", "Hardware monitor for power usage in watts ("+element+")", hwmonLabelDesc, nil) ch <- prometheus.MustNewConstMetric( desc, prometheus.GaugeValue, parsedValue/1000000.0, labels...) continue } if sensorType == "humidity" { desc := prometheus.NewDesc(name, "Hardware monitor for humidity, as a ratio (multiply with 100.0 to get the humidity as a percentage) ("+element+")", hwmonLabelDesc, nil) ch <- prometheus.MustNewConstMetric( desc, prometheus.GaugeValue, parsedValue/1000000.0, labels...) continue } if sensorType == "fan" && (element == "input" || element == "min" || element == "max" || element == "target") { desc := prometheus.NewDesc(name+"_rpm", "Hardware monitor for fan revolutions per minute ("+element+")", hwmonLabelDesc, nil) ch <- prometheus.MustNewConstMetric( desc, prometheus.GaugeValue, parsedValue, labels...) continue } // fallback, just dump the metric as is desc := prometheus.NewDesc(name, "Hardware monitor "+sensorType+" element "+element, hwmonLabelDesc, nil) ch <- prometheus.MustNewConstMetric( desc, prometheus.GaugeValue, parsedValue, labels...) } } return nil } func (c *hwMonCollector) hwmonName(dir string) (string, error) { // generate a name for a sensor path // sensor numbering depends on the order of linux module loading and // is thus unstable. // However the path of the device has to be stable: // - /sys/devices// // Some hardware monitors have a "name" file that exports a human // readable name that can be used. // human readable names would be bat0 or coretemp, while a path string // could be platform_applesmc.768 // preference 1: construct a name based on device name, always unique devicePath, devErr := filepath.EvalSymlinks(filepath.Join(dir, "device")) if devErr == nil { devPathPrefix, devName := filepath.Split(devicePath) _, devType := filepath.Split(strings.TrimRight(devPathPrefix, "/")) cleanDevName := cleanMetricName(devName) cleanDevType := cleanMetricName(devType) if cleanDevType != "" && cleanDevName != "" { return cleanDevType + "_" + cleanDevName, nil } if cleanDevName != "" { return cleanDevName, nil } } // preference 2: is there a name file sysnameRaw, nameErr := os.ReadFile(filepath.Join(dir, "name")) if nameErr == nil && string(sysnameRaw) != "" { cleanName := cleanMetricName(string(sysnameRaw)) if cleanName != "" { return cleanName, nil } } // it looks bad, name and device don't provide enough information // return a hwmon[0-9]* name realDir, err := filepath.EvalSymlinks(dir) if err != nil { return "", err } // take the last path element, this will be hwmonX _, name := filepath.Split(realDir) cleanName := cleanMetricName(name) if cleanName != "" { return cleanName, nil } return "", errors.New("Could not derive a monitoring name for " + dir) } // hwmonHumanReadableChipName is similar to the methods in hwmonName, but with // different precedences -- we can allow duplicates here. func (c *hwMonCollector) hwmonHumanReadableChipName(dir string) (string, error) { sysnameRaw, nameErr := os.ReadFile(filepath.Join(dir, "name")) if nameErr != nil { return "", nameErr } if string(sysnameRaw) != "" { cleanName := cleanMetricName(string(sysnameRaw)) if cleanName != "" { return cleanName, nil } } return "", errors.New("Could not derive a human-readable chip type for " + dir) } func (c *hwMonCollector) Update(ch chan<- prometheus.Metric) error { // Step 1: scan /sys/class/hwmon, resolve all symlinks and call // updatesHwmon for each folder hwmonPathName := filepath.Join(sysFilePath("class"), "hwmon") hwmonFiles, err := os.ReadDir(hwmonPathName) if err != nil { if errors.Is(err, os.ErrNotExist) { level.Debug(c.logger).Log("msg", "hwmon collector metrics are not available for this system") return ErrNoData } return err } for _, hwDir := range hwmonFiles { hwmonXPathName := filepath.Join(hwmonPathName, hwDir.Name()) fileInfo, _ := os.Lstat(hwmonXPathName) if fileInfo.Mode()&os.ModeSymlink > 0 { fileInfo, err = os.Stat(hwmonXPathName) if err != nil { continue } } if !fileInfo.IsDir() { continue } if lastErr := c.updateHwmon(ch, hwmonXPathName); lastErr != nil { err = lastErr } } return err } node_exporter-1.7.0/collector/infiniband_linux.go000066400000000000000000000250701452426057600222500ustar00rootroot00000000000000// Copyright 2017-2019 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !noinfiniband // +build !noinfiniband package collector import ( "errors" "fmt" "os" "strconv" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs/sysfs" ) type infinibandCollector struct { fs sysfs.FS metricDescs map[string]*prometheus.Desc logger log.Logger subsystem string } func init() { registerCollector("infiniband", defaultEnabled, NewInfiniBandCollector) } // NewInfiniBandCollector returns a new Collector exposing InfiniBand stats. func NewInfiniBandCollector(logger log.Logger) (Collector, error) { var i infinibandCollector var err error i.fs, err = sysfs.NewFS(*sysPath) if err != nil { return nil, fmt.Errorf("failed to open sysfs: %w", err) } i.logger = logger // Detailed description for all metrics. descriptions := map[string]string{ "legacy_multicast_packets_received_total": "Number of multicast packets received", "legacy_multicast_packets_transmitted_total": "Number of multicast packets transmitted", "legacy_data_received_bytes_total": "Number of data octets received on all links", "legacy_packets_received_total": "Number of data packets received on all links", "legacy_unicast_packets_received_total": "Number of unicast packets received", "legacy_unicast_packets_transmitted_total": "Number of unicast packets transmitted", "legacy_data_transmitted_bytes_total": "Number of data octets transmitted on all links", "legacy_packets_transmitted_total": "Number of data packets received on all links", "excessive_buffer_overrun_errors_total": "Number of times that OverrunErrors consecutive flow control update periods occurred, each having at least one overrun error.", "link_downed_total": "Number of times the link failed to recover from an error state and went down", "link_error_recovery_total": "Number of times the link successfully recovered from an error state", "local_link_integrity_errors_total": "Number of times that the count of local physical errors exceeded the threshold specified by LocalPhyErrors.", "multicast_packets_received_total": "Number of multicast packets received (including errors)", "multicast_packets_transmitted_total": "Number of multicast packets transmitted (including errors)", "physical_state_id": "Physical state of the InfiniBand port (0: no change, 1: sleep, 2: polling, 3: disable, 4: shift, 5: link up, 6: link error recover, 7: phytest)", "port_constraint_errors_received_total": "Number of packets received on the switch physical port that are discarded", "port_constraint_errors_transmitted_total": "Number of packets not transmitted from the switch physical port", "port_data_received_bytes_total": "Number of data octets received on all links", "port_data_transmitted_bytes_total": "Number of data octets transmitted on all links", "port_discards_received_total": "Number of inbound packets discarded by the port because the port is down or congested", "port_discards_transmitted_total": "Number of outbound packets discarded by the port because the port is down or congested", "port_errors_received_total": "Number of packets containing an error that were received on this port", "port_packets_received_total": "Number of packets received on all VLs by this port (including errors)", "port_packets_transmitted_total": "Number of packets transmitted on all VLs from this port (including errors)", "port_transmit_wait_total": "Number of ticks during which the port had data to transmit but no data was sent during the entire tick", "rate_bytes_per_second": "Maximum signal transfer rate", "state_id": "State of the InfiniBand port (0: no change, 1: down, 2: init, 3: armed, 4: active, 5: act defer)", "unicast_packets_received_total": "Number of unicast packets received (including errors)", "unicast_packets_transmitted_total": "Number of unicast packets transmitted (including errors)", "port_receive_remote_physical_errors_total": "Number of packets marked with the EBP (End of Bad Packet) delimiter received on the port.", "port_receive_switch_relay_errors_total": "Number of packets that could not be forwarded by the switch.", "symbol_error_total": "Number of minor link errors detected on one or more physical lanes.", "vl15_dropped_total": "Number of incoming VL15 packets dropped due to resource limitations.", } i.metricDescs = make(map[string]*prometheus.Desc) i.subsystem = "infiniband" for metricName, description := range descriptions { i.metricDescs[metricName] = prometheus.NewDesc( prometheus.BuildFQName(namespace, i.subsystem, metricName), description, []string{"device", "port"}, nil, ) } return &i, nil } func (c *infinibandCollector) pushMetric(ch chan<- prometheus.Metric, name string, value uint64, deviceName string, port string, valueType prometheus.ValueType) { ch <- prometheus.MustNewConstMetric(c.metricDescs[name], valueType, float64(value), deviceName, port) } func (c *infinibandCollector) pushCounter(ch chan<- prometheus.Metric, name string, value *uint64, deviceName string, port string) { if value != nil { c.pushMetric(ch, name, *value, deviceName, port, prometheus.CounterValue) } } func (c *infinibandCollector) Update(ch chan<- prometheus.Metric) error { devices, err := c.fs.InfiniBandClass() if err != nil { if errors.Is(err, os.ErrNotExist) { level.Debug(c.logger).Log("msg", "infiniband statistics not found, skipping") return ErrNoData } return fmt.Errorf("error obtaining InfiniBand class info: %w", err) } for _, device := range devices { infoDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, c.subsystem, "info"), "Non-numeric data from /sys/class/infiniband/, value is always 1.", []string{"device", "board_id", "firmware_version", "hca_type"}, nil, ) infoValue := 1.0 ch <- prometheus.MustNewConstMetric(infoDesc, prometheus.GaugeValue, infoValue, device.Name, device.BoardID, device.FirmwareVersion, device.HCAType) for _, port := range device.Ports { portStr := strconv.FormatUint(uint64(port.Port), 10) c.pushMetric(ch, "state_id", uint64(port.StateID), port.Name, portStr, prometheus.GaugeValue) c.pushMetric(ch, "physical_state_id", uint64(port.PhysStateID), port.Name, portStr, prometheus.GaugeValue) c.pushMetric(ch, "rate_bytes_per_second", port.Rate, port.Name, portStr, prometheus.GaugeValue) c.pushCounter(ch, "legacy_multicast_packets_received_total", port.Counters.LegacyPortMulticastRcvPackets, port.Name, portStr) c.pushCounter(ch, "legacy_multicast_packets_transmitted_total", port.Counters.LegacyPortMulticastXmitPackets, port.Name, portStr) c.pushCounter(ch, "legacy_data_received_bytes_total", port.Counters.LegacyPortRcvData64, port.Name, portStr) c.pushCounter(ch, "legacy_packets_received_total", port.Counters.LegacyPortRcvPackets64, port.Name, portStr) c.pushCounter(ch, "legacy_unicast_packets_received_total", port.Counters.LegacyPortUnicastRcvPackets, port.Name, portStr) c.pushCounter(ch, "legacy_unicast_packets_transmitted_total", port.Counters.LegacyPortUnicastXmitPackets, port.Name, portStr) c.pushCounter(ch, "legacy_data_transmitted_bytes_total", port.Counters.LegacyPortXmitData64, port.Name, portStr) c.pushCounter(ch, "legacy_packets_transmitted_total", port.Counters.LegacyPortXmitPackets64, port.Name, portStr) c.pushCounter(ch, "excessive_buffer_overrun_errors_total", port.Counters.ExcessiveBufferOverrunErrors, port.Name, portStr) c.pushCounter(ch, "link_downed_total", port.Counters.LinkDowned, port.Name, portStr) c.pushCounter(ch, "link_error_recovery_total", port.Counters.LinkErrorRecovery, port.Name, portStr) c.pushCounter(ch, "local_link_integrity_errors_total", port.Counters.LocalLinkIntegrityErrors, port.Name, portStr) c.pushCounter(ch, "multicast_packets_received_total", port.Counters.MulticastRcvPackets, port.Name, portStr) c.pushCounter(ch, "multicast_packets_transmitted_total", port.Counters.MulticastXmitPackets, port.Name, portStr) c.pushCounter(ch, "port_constraint_errors_received_total", port.Counters.PortRcvConstraintErrors, port.Name, portStr) c.pushCounter(ch, "port_constraint_errors_transmitted_total", port.Counters.PortXmitConstraintErrors, port.Name, portStr) c.pushCounter(ch, "port_data_received_bytes_total", port.Counters.PortRcvData, port.Name, portStr) c.pushCounter(ch, "port_data_transmitted_bytes_total", port.Counters.PortXmitData, port.Name, portStr) c.pushCounter(ch, "port_discards_received_total", port.Counters.PortRcvDiscards, port.Name, portStr) c.pushCounter(ch, "port_discards_transmitted_total", port.Counters.PortXmitDiscards, port.Name, portStr) c.pushCounter(ch, "port_errors_received_total", port.Counters.PortRcvErrors, port.Name, portStr) c.pushCounter(ch, "port_packets_received_total", port.Counters.PortRcvPackets, port.Name, portStr) c.pushCounter(ch, "port_packets_transmitted_total", port.Counters.PortXmitPackets, port.Name, portStr) c.pushCounter(ch, "port_transmit_wait_total", port.Counters.PortXmitWait, port.Name, portStr) c.pushCounter(ch, "unicast_packets_received_total", port.Counters.UnicastRcvPackets, port.Name, portStr) c.pushCounter(ch, "unicast_packets_transmitted_total", port.Counters.UnicastXmitPackets, port.Name, portStr) c.pushCounter(ch, "port_receive_remote_physical_errors_total", port.Counters.PortRcvRemotePhysicalErrors, port.Name, portStr) c.pushCounter(ch, "port_receive_switch_relay_errors_total", port.Counters.PortRcvSwitchRelayErrors, port.Name, portStr) c.pushCounter(ch, "symbol_error_total", port.Counters.SymbolError, port.Name, portStr) c.pushCounter(ch, "vl15_dropped_total", port.Counters.VL15Dropped, port.Name, portStr) } } return nil } node_exporter-1.7.0/collector/interrupts_common.go000066400000000000000000000024501452426057600225140ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build (linux || openbsd) && !nointerrupts // +build linux openbsd // +build !nointerrupts package collector import ( "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) type interruptsCollector struct { desc typedDesc logger log.Logger } func init() { registerCollector("interrupts", defaultDisabled, NewInterruptsCollector) } // NewInterruptsCollector returns a new Collector exposing interrupts stats. func NewInterruptsCollector(logger log.Logger) (Collector, error) { return &interruptsCollector{ desc: typedDesc{prometheus.NewDesc( namespace+"_interrupts_total", "Interrupt details.", interruptLabelNames, nil, ), prometheus.CounterValue}, logger: logger, }, nil } node_exporter-1.7.0/collector/interrupts_linux.go000066400000000000000000000053431452426057600223670ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nointerrupts // +build !nointerrupts package collector import ( "bufio" "errors" "fmt" "io" "os" "strconv" "strings" "github.com/prometheus/client_golang/prometheus" ) var ( interruptLabelNames = []string{"cpu", "type", "info", "devices"} ) func (c *interruptsCollector) Update(ch chan<- prometheus.Metric) (err error) { interrupts, err := getInterrupts() if err != nil { return fmt.Errorf("couldn't get interrupts: %w", err) } for name, interrupt := range interrupts { for cpuNo, value := range interrupt.values { fv, err := strconv.ParseFloat(value, 64) if err != nil { return fmt.Errorf("invalid value %s in interrupts: %w", value, err) } ch <- c.desc.mustNewConstMetric(fv, strconv.Itoa(cpuNo), name, interrupt.info, interrupt.devices) } } return err } type interrupt struct { info string devices string values []string } func getInterrupts() (map[string]interrupt, error) { file, err := os.Open(procFilePath("interrupts")) if err != nil { return nil, err } defer file.Close() return parseInterrupts(file) } func parseInterrupts(r io.Reader) (map[string]interrupt, error) { var ( interrupts = map[string]interrupt{} scanner = bufio.NewScanner(r) ) if !scanner.Scan() { return nil, errors.New("interrupts empty") } cpuNum := len(strings.Fields(scanner.Text())) // one header per cpu for scanner.Scan() { // On aarch64 there can be zero space between the name/label // and the values, so we need to split on `:` before using // strings.Fields() to split on fields. group := strings.SplitN(scanner.Text(), ":", 2) if len(group) > 1 { parts := strings.Fields(group[1]) if len(parts) < cpuNum+1 { // irq + one column per cpu + details, continue // we ignore ERR and MIS for now } intName := strings.TrimLeft(group[0], " ") intr := interrupt{ values: parts[0:cpuNum], } if _, err := strconv.Atoi(intName); err == nil { // numeral interrupt intr.info = parts[cpuNum] intr.devices = strings.Join(parts[cpuNum+1:], " ") } else { intr.info = strings.Join(parts[cpuNum:], " ") } interrupts[intName] = intr } } return interrupts, scanner.Err() } node_exporter-1.7.0/collector/interrupts_linux_test.go000066400000000000000000000036361452426057600234310ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nointerrupts // +build !nointerrupts package collector import ( "os" "testing" ) func TestInterrupts(t *testing.T) { file, err := os.Open("fixtures/proc/interrupts") if err != nil { t.Fatal(err) } defer file.Close() interrupts, err := parseInterrupts(file) if err != nil { t.Fatal(err) } if want, got := "5031", interrupts["NMI"].values[1]; want != got { t.Errorf("want interrupts value %s, got %s", want, got) } if want, got := "4968", interrupts["NMI"].values[3]; want != got { t.Errorf("want interrupts value %s, got %s", want, got) } if want, got := "IR-IO-APIC-edge", interrupts["12"].info; want != got { t.Errorf("want interrupts info %s, got %s", want, got) } if want, got := "i8042", interrupts["12"].devices; want != got { t.Errorf("want interrupts devices %s, got %s", want, got) } } // https://github.com/prometheus/node_exporter/issues/2557 // On aarch64 the interrupts file can have zero spaces between the label of // the row and the first value if the value is large func TestInterruptsArm(t *testing.T) { file, err := os.Open("fixtures/proc/interrupts_aarch64") if err != nil { t.Fatal(err) } defer file.Close() interrupts, err := parseInterrupts(file) if err != nil { t.Fatal(err) } if _, ok := interrupts["IPI0"]; !ok { t.Errorf("IPI0 label not found in interrupts") } } node_exporter-1.7.0/collector/interrupts_openbsd.go000066400000000000000000000056171452426057600226660ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nointerrupts && !amd64 // +build !nointerrupts,!amd64 package collector import ( "fmt" "strconv" "github.com/prometheus/client_golang/prometheus" ) /* #include #include #include #include #include struct intr { int vector; char device[128]; u_int64_t count; }; int sysctl_nintr(void) { int nintr, mib[4]; size_t siz; mib[0] = CTL_KERN; mib[1] = KERN_INTRCNT; mib[2] = KERN_INTRCNT_NUM; siz = sizeof(nintr); if (sysctl(mib, 3, &nintr, &siz, NULL, 0) < 0) { return -1; } return nintr; } int sysctl_intr(struct intr *intr, int idx) { int mib[4]; size_t siz; u_quad_t cnt; mib[0] = CTL_KERN; mib[1] = KERN_INTRCNT; mib[2] = KERN_INTRCNT_NAME; mib[3] = idx; siz = sizeof intr->device; if (sysctl(mib, 4, intr->device, &siz, NULL, 0) < 0) { return -1; } mib[0] = CTL_KERN; mib[1] = KERN_INTRCNT; mib[2] = KERN_INTRCNT_VECTOR; mib[3] = idx; siz = sizeof intr->vector; if (sysctl(mib, 4, &intr->vector, &siz, NULL, 0) < 0) { return -1; } mib[0] = CTL_KERN; mib[1] = KERN_INTRCNT; mib[2] = KERN_INTRCNT_CNT; mib[3] = idx; siz = sizeof(cnt); if (sysctl(mib, 4, &cnt, &siz, NULL, 0) < 0) { return -1; } intr->count = cnt; return 1; } */ import "C" var ( interruptLabelNames = []string{"cpu", "type", "devices"} ) func (c *interruptsCollector) Update(ch chan<- prometheus.Metric) error { interrupts, err := getInterrupts() if err != nil { return fmt.Errorf("couldn't get interrupts: %w", err) } for dev, interrupt := range interrupts { for cpuNo, value := range interrupt.values { ch <- c.desc.mustNewConstMetric( value, strconv.Itoa(cpuNo), strconv.Itoa(interrupt.vector), dev, ) } } return nil } type interrupt struct { vector int device string values []float64 } func getInterrupts() (map[string]interrupt, error) { var ( cintr C.struct_intr interrupts = map[string]interrupt{} ) nintr := C.sysctl_nintr() for i := C.int(0); i < nintr; i++ { _, err := C.sysctl_intr(&cintr, i) if err != nil { return nil, err } dev := C.GoString(&cintr.device[0]) interrupts[dev] = interrupt{ vector: int(cintr.vector), device: dev, // XXX: openbsd appears to only handle interrupts on cpu 0. values: []float64{float64(cintr.count)}, } } return interrupts, nil } node_exporter-1.7.0/collector/interrupts_openbsd_amd64.go000066400000000000000000000047071452426057600236600ustar00rootroot00000000000000// Copyright 2020 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nointerrupts // +build !nointerrupts package collector import ( "fmt" "strconv" "unsafe" "github.com/prometheus/client_golang/prometheus" "golang.org/x/sys/unix" ) const ( KERN_INTRCNT = 63 KERN_INTRCNT_NUM = 1 KERN_INTRCNT_CNT = 2 KERN_INTRCNT_NAME = 3 KERN_INTRCNT_VECTOR = 4 ) func nintr() _C_int { mib := [3]_C_int{unix.CTL_KERN, KERN_INTRCNT, KERN_INTRCNT_NUM} buf, err := sysctl(mib[:]) if err != nil { return 0 } return *(*_C_int)(unsafe.Pointer(&buf[0])) } func intr(idx _C_int) (itr interrupt, err error) { mib := [4]_C_int{unix.CTL_KERN, KERN_INTRCNT, KERN_INTRCNT_NAME, idx} buf, err := sysctl(mib[:]) if err != nil { return } dev := *(*[128]byte)(unsafe.Pointer(&buf[0])) itr.device = string(dev[:]) mib[2] = KERN_INTRCNT_VECTOR buf, err = sysctl(mib[:]) if err != nil { return } itr.vector = *(*int)(unsafe.Pointer(&buf[0])) mib[2] = KERN_INTRCNT_CNT buf, err = sysctl(mib[:]) if err != nil { return } count := *(*uint64)(unsafe.Pointer(&buf[0])) itr.values = []float64{float64(count)} return } var interruptLabelNames = []string{"cpu", "type", "devices"} func (c *interruptsCollector) Update(ch chan<- prometheus.Metric) error { interrupts, err := getInterrupts() if err != nil { return fmt.Errorf("couldn't get interrupts: %s", err) } for dev, interrupt := range interrupts { for cpuNo, value := range interrupt.values { ch <- c.desc.mustNewConstMetric( value, strconv.Itoa(cpuNo), fmt.Sprintf("%d", interrupt.vector), dev, ) } } return nil } type interrupt struct { vector int device string values []float64 } func getInterrupts() (map[string]interrupt, error) { var interrupts = map[string]interrupt{} n := nintr() for i := _C_int(0); i < n; i++ { itr, err := intr(i) if err != nil { return nil, err } interrupts[itr.device] = itr } return interrupts, nil } node_exporter-1.7.0/collector/ipvs_linux.go000066400000000000000000000165751452426057600211420ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !noipvs // +build !noipvs package collector import ( "errors" "fmt" "os" "sort" "strconv" "strings" "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs" ) type ipvsCollector struct { Collector fs procfs.FS backendLabels []string backendConnectionsActive, backendConnectionsInact, backendWeight typedDesc connections, incomingPackets, outgoingPackets, incomingBytes, outgoingBytes typedDesc logger log.Logger } type ipvsBackendStatus struct { ActiveConn uint64 InactConn uint64 Weight uint64 } const ( ipvsLabelLocalAddress = "local_address" ipvsLabelLocalPort = "local_port" ipvsLabelRemoteAddress = "remote_address" ipvsLabelRemotePort = "remote_port" ipvsLabelProto = "proto" ipvsLabelLocalMark = "local_mark" ) var ( fullIpvsBackendLabels = []string{ ipvsLabelLocalAddress, ipvsLabelLocalPort, ipvsLabelRemoteAddress, ipvsLabelRemotePort, ipvsLabelProto, ipvsLabelLocalMark, } ipvsLabels = kingpin.Flag("collector.ipvs.backend-labels", "Comma separated list for IPVS backend stats labels.").Default(strings.Join(fullIpvsBackendLabels, ",")).String() ) func init() { registerCollector("ipvs", defaultEnabled, NewIPVSCollector) } // NewIPVSCollector sets up a new collector for IPVS metrics. It accepts the // "procfs" config parameter to override the default proc location (/proc). func NewIPVSCollector(logger log.Logger) (Collector, error) { return newIPVSCollector(logger) } func newIPVSCollector(logger log.Logger) (*ipvsCollector, error) { var ( c ipvsCollector err error subsystem = "ipvs" ) if c.backendLabels, err = c.parseIpvsLabels(*ipvsLabels); err != nil { return nil, err } c.logger = logger c.fs, err = procfs.NewFS(*procPath) if err != nil { return nil, fmt.Errorf("failed to open procfs: %w", err) } c.connections = typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "connections_total"), "The total number of connections made.", nil, nil, ), prometheus.CounterValue} c.incomingPackets = typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "incoming_packets_total"), "The total number of incoming packets.", nil, nil, ), prometheus.CounterValue} c.outgoingPackets = typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "outgoing_packets_total"), "The total number of outgoing packets.", nil, nil, ), prometheus.CounterValue} c.incomingBytes = typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "incoming_bytes_total"), "The total amount of incoming data.", nil, nil, ), prometheus.CounterValue} c.outgoingBytes = typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "outgoing_bytes_total"), "The total amount of outgoing data.", nil, nil, ), prometheus.CounterValue} c.backendConnectionsActive = typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "backend_connections_active"), "The current active connections by local and remote address.", c.backendLabels, nil, ), prometheus.GaugeValue} c.backendConnectionsInact = typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "backend_connections_inactive"), "The current inactive connections by local and remote address.", c.backendLabels, nil, ), prometheus.GaugeValue} c.backendWeight = typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "backend_weight"), "The current backend weight by local and remote address.", c.backendLabels, nil, ), prometheus.GaugeValue} return &c, nil } func (c *ipvsCollector) Update(ch chan<- prometheus.Metric) error { ipvsStats, err := c.fs.IPVSStats() if err != nil { // Cannot access ipvs metrics, report no error. if errors.Is(err, os.ErrNotExist) { level.Debug(c.logger).Log("msg", "ipvs collector metrics are not available for this system") return ErrNoData } return fmt.Errorf("could not get IPVS stats: %w", err) } ch <- c.connections.mustNewConstMetric(float64(ipvsStats.Connections)) ch <- c.incomingPackets.mustNewConstMetric(float64(ipvsStats.IncomingPackets)) ch <- c.outgoingPackets.mustNewConstMetric(float64(ipvsStats.OutgoingPackets)) ch <- c.incomingBytes.mustNewConstMetric(float64(ipvsStats.IncomingBytes)) ch <- c.outgoingBytes.mustNewConstMetric(float64(ipvsStats.OutgoingBytes)) backendStats, err := c.fs.IPVSBackendStatus() if err != nil { return fmt.Errorf("could not get backend status: %w", err) } sums := map[string]ipvsBackendStatus{} labelValues := map[string][]string{} for _, backend := range backendStats { localAddress := "" if backend.LocalAddress.String() != "" { localAddress = backend.LocalAddress.String() } kv := make([]string, len(c.backendLabels)) for i, label := range c.backendLabels { var labelValue string switch label { case ipvsLabelLocalAddress: labelValue = localAddress case ipvsLabelLocalPort: labelValue = strconv.FormatUint(uint64(backend.LocalPort), 10) case ipvsLabelRemoteAddress: labelValue = backend.RemoteAddress.String() case ipvsLabelRemotePort: labelValue = strconv.FormatUint(uint64(backend.RemotePort), 10) case ipvsLabelProto: labelValue = backend.Proto case ipvsLabelLocalMark: labelValue = backend.LocalMark } kv[i] = labelValue } key := strings.Join(kv, "-") status := sums[key] status.ActiveConn += backend.ActiveConn status.InactConn += backend.InactConn status.Weight += backend.Weight sums[key] = status labelValues[key] = kv } for key, status := range sums { kv := labelValues[key] ch <- c.backendConnectionsActive.mustNewConstMetric(float64(status.ActiveConn), kv...) ch <- c.backendConnectionsInact.mustNewConstMetric(float64(status.InactConn), kv...) ch <- c.backendWeight.mustNewConstMetric(float64(status.Weight), kv...) } return nil } func (c *ipvsCollector) parseIpvsLabels(labelString string) ([]string, error) { labels := strings.Split(labelString, ",") labelSet := make(map[string]bool, len(labels)) results := make([]string, 0, len(labels)) for _, label := range labels { if label != "" { labelSet[label] = true } } for _, label := range fullIpvsBackendLabels { if labelSet[label] { results = append(results, label) } delete(labelSet, label) } if len(labelSet) > 0 { keys := make([]string, 0, len(labelSet)) for label := range labelSet { keys = append(keys, label) } sort.Strings(keys) return nil, fmt.Errorf("unknown IPVS backend labels: %q", strings.Join(keys, ", ")) } return results, nil } node_exporter-1.7.0/collector/ipvs_linux_test.go000066400000000000000000000223501452426057600221650ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !noipvs // +build !noipvs package collector import ( "errors" "fmt" "net/http" "net/http/httptest" "os" "strings" "testing" "github.com/go-kit/log" "github.com/alecthomas/kingpin/v2" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" ) func TestIPVSCollector(t *testing.T) { testcases := []struct { labels string expects []string err error }{ { "", []string{ prometheus.NewDesc("node_ipvs_connections_total", "The total number of connections made.", nil, nil).String(), prometheus.NewDesc("node_ipvs_incoming_packets_total", "The total number of incoming packets.", nil, nil).String(), prometheus.NewDesc("node_ipvs_outgoing_packets_total", "The total number of outgoing packets.", nil, nil).String(), prometheus.NewDesc("node_ipvs_incoming_bytes_total", "The total amount of incoming data.", nil, nil).String(), prometheus.NewDesc("node_ipvs_outgoing_bytes_total", "The total amount of outgoing data.", nil, nil).String(), prometheus.NewDesc("node_ipvs_backend_connections_active", "The current active connections by local and remote address.", []string{"local_address", "local_port", "remote_address", "remote_port", "proto", "local_mark"}, nil).String(), prometheus.NewDesc("node_ipvs_backend_connections_inactive", "The current inactive connections by local and remote address.", []string{"local_address", "local_port", "remote_address", "remote_port", "proto", "local_mark"}, nil).String(), prometheus.NewDesc("node_ipvs_backend_weight", "The current backend weight by local and remote address.", []string{"local_address", "local_port", "remote_address", "remote_port", "proto", "local_mark"}, nil).String(), }, nil, }, { "", []string{ prometheus.NewDesc("node_ipvs_connections_total", "The total number of connections made.", nil, nil).String(), prometheus.NewDesc("node_ipvs_incoming_packets_total", "The total number of incoming packets.", nil, nil).String(), prometheus.NewDesc("node_ipvs_outgoing_packets_total", "The total number of outgoing packets.", nil, nil).String(), prometheus.NewDesc("node_ipvs_incoming_bytes_total", "The total amount of incoming data.", nil, nil).String(), prometheus.NewDesc("node_ipvs_outgoing_bytes_total", "The total amount of outgoing data.", nil, nil).String(), prometheus.NewDesc("node_ipvs_backend_connections_active", "The current active connections by local and remote address.", nil, nil).String(), prometheus.NewDesc("node_ipvs_backend_connections_inactive", "The current inactive connections by local and remote address.", nil, nil).String(), prometheus.NewDesc("node_ipvs_backend_weight", "The current backend weight by local and remote address.", nil, nil).String(), }, nil, }, { "local_port", []string{ prometheus.NewDesc("node_ipvs_connections_total", "The total number of connections made.", nil, nil).String(), prometheus.NewDesc("node_ipvs_incoming_packets_total", "The total number of incoming packets.", nil, nil).String(), prometheus.NewDesc("node_ipvs_outgoing_packets_total", "The total number of outgoing packets.", nil, nil).String(), prometheus.NewDesc("node_ipvs_incoming_bytes_total", "The total amount of incoming data.", nil, nil).String(), prometheus.NewDesc("node_ipvs_outgoing_bytes_total", "The total amount of outgoing data.", nil, nil).String(), prometheus.NewDesc("node_ipvs_backend_connections_active", "The current active connections by local and remote address.", []string{"local_port"}, nil).String(), prometheus.NewDesc("node_ipvs_backend_connections_inactive", "The current inactive connections by local and remote address.", []string{"local_port"}, nil).String(), prometheus.NewDesc("node_ipvs_backend_weight", "The current backend weight by local and remote address.", []string{"local_port"}, nil).String(), }, nil, }, { "local_address,local_port", []string{ prometheus.NewDesc("node_ipvs_connections_total", "The total number of connections made.", nil, nil).String(), prometheus.NewDesc("node_ipvs_incoming_packets_total", "The total number of incoming packets.", nil, nil).String(), prometheus.NewDesc("node_ipvs_outgoing_packets_total", "The total number of outgoing packets.", nil, nil).String(), prometheus.NewDesc("node_ipvs_incoming_bytes_total", "The total amount of incoming data.", nil, nil).String(), prometheus.NewDesc("node_ipvs_outgoing_bytes_total", "The total amount of outgoing data.", nil, nil).String(), prometheus.NewDesc("node_ipvs_backend_connections_active", "The current active connections by local and remote address.", []string{"local_address", "local_port"}, nil).String(), prometheus.NewDesc("node_ipvs_backend_connections_inactive", "The current inactive connections by local and remote address.", []string{"local_address", "local_port"}, nil).String(), prometheus.NewDesc("node_ipvs_backend_weight", "The current backend weight by local and remote address.", []string{"local_address", "local_port"}, nil).String(), }, nil, }, { "invalid_label", nil, errors.New(`unknown IPVS backend labels: "invalid_label"`), }, { "invalid_label,bad_label", nil, errors.New(`unknown IPVS backend labels: "bad_label, invalid_label"`), }, } for _, test := range testcases { t.Run(test.labels, func(t *testing.T) { args := []string{"--path.procfs", "fixtures/proc"} if test.labels != "" { args = append(args, "--collector.ipvs.backend-labels="+test.labels) } if _, err := kingpin.CommandLine.Parse(args); err != nil { t.Fatal(err) } collector, err := newIPVSCollector(log.NewNopLogger()) if err != nil { if test.err == nil { t.Fatal(err) } if !strings.Contains(err.Error(), test.err.Error()) { t.Fatalf("expect error: %v contains %v", err, test.err) } return } if test.err != nil { t.Fatalf("expect error: %v but got no error", test.err) } sink := make(chan prometheus.Metric) go func() { err = collector.Update(sink) if err != nil { panic(fmt.Sprintf("failed to update collector: %v", err)) } }() for _, expected := range test.expects { got := (<-sink).Desc().String() if expected != got { t.Fatalf("Expected '%s' but got '%s'", expected, got) } } }) } } // mock collector type miniCollector struct { c Collector } func (c miniCollector) Collect(ch chan<- prometheus.Metric) { c.c.Update(ch) } func (c miniCollector) Describe(ch chan<- *prometheus.Desc) { prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: "fake", Subsystem: "fake", Name: "fake", Help: "fake", }).Describe(ch) } func TestIPVSCollectorResponse(t *testing.T) { testcases := []struct { labels string metricsFile string }{ {"", "fixtures/ip_vs_result.txt"}, {"", "fixtures/ip_vs_result_lbs_none.txt"}, {"local_port", "fixtures/ip_vs_result_lbs_local_port.txt"}, {"local_address,local_port", "fixtures/ip_vs_result_lbs_local_address_local_port.txt"}, } for _, test := range testcases { t.Run(test.labels, func(t *testing.T) { args := []string{"--path.procfs", "fixtures/proc"} if test.labels != "" { args = append(args, "--collector.ipvs.backend-labels="+test.labels) } if _, err := kingpin.CommandLine.Parse(args); err != nil { t.Fatal(err) } collector, err := NewIPVSCollector(log.NewNopLogger()) if err != nil { t.Fatal(err) } registry := prometheus.NewRegistry() registry.MustRegister(miniCollector{c: collector}) rw := httptest.NewRecorder() promhttp.InstrumentMetricHandler(registry, promhttp.HandlerFor(registry, promhttp.HandlerOpts{})).ServeHTTP(rw, &http.Request{}) wantMetrics, err := os.ReadFile(test.metricsFile) if err != nil { t.Fatalf("unable to read input test file %s: %s", test.metricsFile, err) } wantLines := strings.Split(string(wantMetrics), "\n") gotLines := strings.Split(string(rw.Body.String()), "\n") gotLinesIdx := 0 // Until the Prometheus Go client library offers better testability // (https://github.com/prometheus/client_golang/issues/58), we simply compare // verbatim text-format metrics outputs, but ignore any lines we don't have // in the fixture. Put differently, we are only testing that each line from // the fixture is present, in the order given. wantLoop: for _, want := range wantLines { for _, got := range gotLines[gotLinesIdx:] { if want == got { // this is a line we are interested in, and it is correct continue wantLoop } gotLinesIdx++ } // if this point is reached, the line we want was missing t.Fatalf("Missing expected output line(s), first missing line is %s", want) } }) } } node_exporter-1.7.0/collector/ksmd_linux.go000066400000000000000000000043651452426057600211110ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !noksmd // +build !noksmd package collector import ( "fmt" "path/filepath" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) var ( ksmdFiles = []string{"full_scans", "merge_across_nodes", "pages_shared", "pages_sharing", "pages_to_scan", "pages_unshared", "pages_volatile", "run", "sleep_millisecs"} ) type ksmdCollector struct { metricDescs map[string]*prometheus.Desc logger log.Logger } func init() { registerCollector("ksmd", defaultDisabled, NewKsmdCollector) } func getCanonicalMetricName(filename string) string { switch filename { case "full_scans": return filename + "_total" case "sleep_millisecs": return "sleep_seconds" default: return filename } } // NewKsmdCollector returns a new Collector exposing kernel/system statistics. func NewKsmdCollector(logger log.Logger) (Collector, error) { subsystem := "ksmd" descs := make(map[string]*prometheus.Desc) for _, n := range ksmdFiles { descs[n] = prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, getCanonicalMetricName(n)), fmt.Sprintf("ksmd '%s' file.", n), nil, nil) } return &ksmdCollector{descs, logger}, nil } // Update implements Collector and exposes kernel and system statistics. func (c *ksmdCollector) Update(ch chan<- prometheus.Metric) error { for _, n := range ksmdFiles { val, err := readUintFromFile(sysFilePath(filepath.Join("kernel/mm/ksm", n))) if err != nil { return err } t := prometheus.GaugeValue v := float64(val) switch n { case "full_scans": t = prometheus.CounterValue case "sleep_millisecs": v /= 1000 } ch <- prometheus.MustNewConstMetric(c.metricDescs[n], t, v) } return nil } node_exporter-1.7.0/collector/kvm_bsd.c000066400000000000000000000022641452426057600201720ustar00rootroot00000000000000// Copyright 2017 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // +build !nomeminfo // +build freebsd dragonfly #include #include #include #include #include int _kvm_swap_used_pages(uint64_t *out) { const int total_only = 1; // from kvm_getswapinfo(3) kvm_t *kd; struct kvm_swap current; kd = kvm_open(NULL, _PATH_DEVNULL, NULL, O_RDONLY, NULL); if (kd == NULL) { return -1; } if (kvm_getswapinfo(kd, ¤t, total_only, 0) == -1) { goto error1; } if (kvm_close(kd) != 0) { return -1; } kd = NULL; *out = current.ksw_used; return 0; error1: if (kd != NULL) { kvm_close(kd); } return -1; } node_exporter-1.7.0/collector/kvm_bsd.go000066400000000000000000000020731452426057600203530ustar00rootroot00000000000000// Copyright 2017 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nokvm && (freebsd || dragonfly) // +build !nokvm // +build freebsd dragonfly package collector import ( "fmt" "sync" ) // #cgo LDFLAGS: -lkvm // #include "kvm_bsd.h" import "C" type kvm struct { mu sync.Mutex hasErr bool } func (k *kvm) SwapUsedPages() (value uint64, err error) { k.mu.Lock() defer k.mu.Unlock() if C._kvm_swap_used_pages((*C.uint64_t)(&value)) == -1 { k.hasErr = true return 0, fmt.Errorf("couldn't get kvm stats") } return value, nil } node_exporter-1.7.0/collector/kvm_bsd.h000066400000000000000000000013041452426057600201710ustar00rootroot00000000000000// Copyright 2017 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // +build !nomeminfo // +build freebsd dragonfly #include int _kvm_swap_used_pages(uint64_t *out); node_exporter-1.7.0/collector/lnstat_linux.go000066400000000000000000000035271452426057600214570ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nolnstat // +build !nolnstat package collector import ( "fmt" "strconv" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs" ) type lnstatCollector struct { logger log.Logger } func init() { registerCollector("lnstat", defaultDisabled, NewLnstatCollector) } func NewLnstatCollector(logger log.Logger) (Collector, error) { return &lnstatCollector{logger}, nil } func (c *lnstatCollector) Update(ch chan<- prometheus.Metric) error { const ( subsystem = "lnstat" ) fs, err := procfs.NewFS(*procPath) if err != nil { return fmt.Errorf("failed to open procfs: %w", err) } netStats, err := fs.NetStat() if err != nil { return fmt.Errorf("lnstat error: %s", err) } for _, netStatFile := range netStats { labelNames := []string{"subsystem", "cpu"} for header, stats := range netStatFile.Stats { for cpu, value := range stats { labelValues := []string{netStatFile.Filename, strconv.Itoa(cpu)} ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, header+"_total"), "linux network cache stats", labelNames, nil, ), prometheus.CounterValue, float64(value), labelValues..., ) } } } return nil } node_exporter-1.7.0/collector/loadavg.go000066400000000000000000000036171452426057600203500ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build (darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris) && !noloadavg // +build darwin dragonfly freebsd linux netbsd openbsd solaris // +build !noloadavg package collector import ( "fmt" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" ) type loadavgCollector struct { metric []typedDesc logger log.Logger } func init() { registerCollector("loadavg", defaultEnabled, NewLoadavgCollector) } // NewLoadavgCollector returns a new Collector exposing load average stats. func NewLoadavgCollector(logger log.Logger) (Collector, error) { return &loadavgCollector{ metric: []typedDesc{ {prometheus.NewDesc(namespace+"_load1", "1m load average.", nil, nil), prometheus.GaugeValue}, {prometheus.NewDesc(namespace+"_load5", "5m load average.", nil, nil), prometheus.GaugeValue}, {prometheus.NewDesc(namespace+"_load15", "15m load average.", nil, nil), prometheus.GaugeValue}, }, logger: logger, }, nil } func (c *loadavgCollector) Update(ch chan<- prometheus.Metric) error { loads, err := getLoad() if err != nil { return fmt.Errorf("couldn't get load: %w", err) } for i, load := range loads { level.Debug(c.logger).Log("msg", "return load", "index", i, "load", load) ch <- c.metric[i].mustNewConstMetric(load) } return err } node_exporter-1.7.0/collector/loadavg_bsd.go000066400000000000000000000022411452426057600211700ustar00rootroot00000000000000// Copyright 2016 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build (darwin || dragonfly || freebsd || netbsd || openbsd) && !noloadavg // +build darwin dragonfly freebsd netbsd openbsd // +build !noloadavg package collector import ( "unsafe" "golang.org/x/sys/unix" ) func getLoad() ([]float64, error) { type loadavg struct { load [3]uint32 scale int } b, err := unix.SysctlRaw("vm.loadavg") if err != nil { return nil, err } load := *(*loadavg)(unsafe.Pointer((&b[0]))) scale := float64(load.scale) return []float64{ float64(load.load[0]) / scale, float64(load.load[1]) / scale, float64(load.load[2]) / scale, }, nil } node_exporter-1.7.0/collector/loadavg_linux.go000066400000000000000000000026301452426057600215610ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !noloadavg // +build !noloadavg package collector import ( "fmt" "os" "strconv" "strings" ) // Read loadavg from /proc. func getLoad() (loads []float64, err error) { data, err := os.ReadFile(procFilePath("loadavg")) if err != nil { return nil, err } loads, err = parseLoad(string(data)) if err != nil { return nil, err } return loads, nil } // Parse /proc loadavg and return 1m, 5m and 15m. func parseLoad(data string) (loads []float64, err error) { loads = make([]float64, 3) parts := strings.Fields(data) if len(parts) < 3 { return nil, fmt.Errorf("unexpected content in %s", procFilePath("loadavg")) } for i, load := range parts[0:3] { loads[i], err = strconv.ParseFloat(load, 64) if err != nil { return nil, fmt.Errorf("could not parse load '%s': %w", load, err) } } return loads, nil } node_exporter-1.7.0/collector/loadavg_linux_test.go000066400000000000000000000016611452426057600226230ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !noloadavg // +build !noloadavg package collector import "testing" func TestLoad(t *testing.T) { want := []float64{0.21, 0.37, 0.39} loads, err := parseLoad("0.21 0.37 0.39 1/719 19737") if err != nil { t.Fatal(err) } for i, load := range loads { if want[i] != load { t.Fatalf("want load %f, got %f", want[i], load) } } } node_exporter-1.7.0/collector/loadavg_solaris.go000066400000000000000000000027041452426057600221000ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !noloadavg // +build !noloadavg package collector import ( "fmt" "strconv" "github.com/illumos/go-kstat" ) // #include import "C" func kstatToFloat(ks *kstat.KStat, kstatKey string) float64 { kstatValue, err := ks.GetNamed(kstatKey) if err != nil { panic(err) } kstatLoadavg, err := strconv.ParseFloat( fmt.Sprintf("%.2f", float64(kstatValue.UintVal)/C.FSCALE), 64) if err != nil { panic(err) } return kstatLoadavg } func getLoad() ([]float64, error) { tok, err := kstat.Open() if err != nil { panic(err) } defer tok.Close() ks, err := tok.Lookup("unix", 0, "system_misc") if err != nil { panic(err) } loadavg1Min := kstatToFloat(ks, "avenrun_1min") loadavg5Min := kstatToFloat(ks, "avenrun_5min") loadavg15Min := kstatToFloat(ks, "avenrun_15min") return []float64{loadavg1Min, loadavg5Min, loadavg15Min}, nil } node_exporter-1.7.0/collector/logind_linux.go000066400000000000000000000144121452426057600214210ustar00rootroot00000000000000// Copyright 2016 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nologind // +build !nologind package collector import ( "fmt" "os" "strconv" "github.com/go-kit/log" "github.com/godbus/dbus/v5" "github.com/prometheus/client_golang/prometheus" ) const ( logindSubsystem = "logind" dbusObject = "org.freedesktop.login1" dbusPath = "/org/freedesktop/login1" ) var ( // Taken from logind as of systemd v229. // "other" is the fallback value for unknown values (in case logind gets extended in the future). attrRemoteValues = []string{"true", "false"} attrTypeValues = []string{"other", "unspecified", "tty", "x11", "wayland", "mir", "web"} attrClassValues = []string{"other", "user", "greeter", "lock-screen", "background"} sessionsDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, logindSubsystem, "sessions"), "Number of sessions registered in logind.", []string{"seat", "remote", "type", "class"}, nil, ) ) type logindCollector struct { logger log.Logger } type logindDbus struct { conn *dbus.Conn object dbus.BusObject } type logindInterface interface { listSeats() ([]string, error) listSessions() ([]logindSessionEntry, error) getSession(logindSessionEntry) *logindSession } type logindSession struct { seat string remote string sessionType string class string } // Struct elements must be public for the reflection magic of godbus to work. type logindSessionEntry struct { SessionID string UserID uint32 UserName string SeatID string SessionObjectPath dbus.ObjectPath } type logindSeatEntry struct { SeatID string SeatObjectPath dbus.ObjectPath } func init() { registerCollector("logind", defaultDisabled, NewLogindCollector) } // NewLogindCollector returns a new Collector exposing logind statistics. func NewLogindCollector(logger log.Logger) (Collector, error) { return &logindCollector{logger}, nil } func (lc *logindCollector) Update(ch chan<- prometheus.Metric) error { c, err := newDbus() if err != nil { return fmt.Errorf("unable to connect to dbus: %w", err) } defer c.conn.Close() return collectMetrics(ch, c) } func collectMetrics(ch chan<- prometheus.Metric, c logindInterface) error { seats, err := c.listSeats() if err != nil { return fmt.Errorf("unable to get seats: %w", err) } sessionList, err := c.listSessions() if err != nil { return fmt.Errorf("unable to get sessions: %w", err) } sessions := make(map[logindSession]float64) for _, s := range sessionList { session := c.getSession(s) if session != nil { sessions[*session]++ } } for _, remote := range attrRemoteValues { for _, sessionType := range attrTypeValues { for _, class := range attrClassValues { for _, seat := range seats { count := sessions[logindSession{seat, remote, sessionType, class}] ch <- prometheus.MustNewConstMetric( sessionsDesc, prometheus.GaugeValue, count, seat, remote, sessionType, class) } } } } return nil } func knownStringOrOther(value string, known []string) string { for i := range known { if value == known[i] { return value } } return "other" } func newDbus() (*logindDbus, error) { conn, err := dbus.SystemBusPrivate() if err != nil { return nil, err } methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(os.Getuid()))} err = conn.Auth(methods) if err != nil { conn.Close() return nil, err } err = conn.Hello() if err != nil { conn.Close() return nil, err } object := conn.Object(dbusObject, dbus.ObjectPath(dbusPath)) return &logindDbus{ conn: conn, object: object, }, nil } func (c *logindDbus) listSeats() ([]string, error) { var result [][]interface{} err := c.object.Call(dbusObject+".Manager.ListSeats", 0).Store(&result) if err != nil { return nil, err } resultInterface := make([]interface{}, len(result)) for i := range result { resultInterface[i] = result[i] } seats := make([]logindSeatEntry, len(result)) seatsInterface := make([]interface{}, len(seats)) for i := range seats { seatsInterface[i] = &seats[i] } err = dbus.Store(resultInterface, seatsInterface...) if err != nil { return nil, err } ret := make([]string, len(seats)+1) for i := range seats { ret[i] = seats[i].SeatID } // Always add the empty seat, which is used for remote sessions like SSH ret[len(seats)] = "" return ret, nil } func (c *logindDbus) listSessions() ([]logindSessionEntry, error) { var result [][]interface{} err := c.object.Call(dbusObject+".Manager.ListSessions", 0).Store(&result) if err != nil { return nil, err } resultInterface := make([]interface{}, len(result)) for i := range result { resultInterface[i] = result[i] } sessions := make([]logindSessionEntry, len(result)) sessionsInterface := make([]interface{}, len(sessions)) for i := range sessions { sessionsInterface[i] = &sessions[i] } err = dbus.Store(resultInterface, sessionsInterface...) if err != nil { return nil, err } return sessions, nil } func (c *logindDbus) getSession(session logindSessionEntry) *logindSession { object := c.conn.Object(dbusObject, session.SessionObjectPath) remote, err := object.GetProperty(dbusObject + ".Session.Remote") if err != nil { return nil } sessionType, err := object.GetProperty(dbusObject + ".Session.Type") if err != nil { return nil } sessionTypeStr, ok := sessionType.Value().(string) if !ok { return nil } class, err := object.GetProperty(dbusObject + ".Session.Class") if err != nil { return nil } classStr, ok := class.Value().(string) if !ok { return nil } return &logindSession{ seat: session.SeatID, remote: remote.String(), sessionType: knownStringOrOther(sessionTypeStr, attrTypeValues), class: knownStringOrOther(classStr, attrClassValues), } } node_exporter-1.7.0/collector/logind_linux_test.go000066400000000000000000000056431452426057600224660ustar00rootroot00000000000000// Copyright 2016 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nologind // +build !nologind package collector import ( "testing" "github.com/godbus/dbus/v5" "github.com/prometheus/client_golang/prometheus" ) type testLogindInterface struct{} var testSeats = []string{"seat0", ""} func (c *testLogindInterface) listSeats() ([]string, error) { return testSeats, nil } func (c *testLogindInterface) listSessions() ([]logindSessionEntry, error) { return []logindSessionEntry{ { SessionID: "1", UserID: 0, UserName: "", SeatID: "", SessionObjectPath: dbus.ObjectPath("/org/freedesktop/login1/session/1"), }, { SessionID: "2", UserID: 0, UserName: "", SeatID: "seat0", SessionObjectPath: dbus.ObjectPath("/org/freedesktop/login1/session/2"), }, }, nil } func (c *testLogindInterface) getSession(session logindSessionEntry) *logindSession { sessions := map[dbus.ObjectPath]*logindSession{ dbus.ObjectPath("/org/freedesktop/login1/session/1"): { seat: session.SeatID, remote: "true", sessionType: knownStringOrOther("tty", attrTypeValues), class: knownStringOrOther("user", attrClassValues), }, dbus.ObjectPath("/org/freedesktop/login1/session/2"): { seat: session.SeatID, remote: "false", sessionType: knownStringOrOther("x11", attrTypeValues), class: knownStringOrOther("greeter", attrClassValues), }, } return sessions[session.SessionObjectPath] } func TestLogindCollectorKnownStringOrOther(t *testing.T) { known := []string{"foo", "bar"} actual := knownStringOrOther("foo", known) expected := "foo" if actual != expected { t.Errorf("knownStringOrOther failed: got %q, expected %q.", actual, expected) } actual = knownStringOrOther("baz", known) expected = "other" if actual != expected { t.Errorf("knownStringOrOther failed: got %q, expected %q.", actual, expected) } } func TestLogindCollectorCollectMetrics(t *testing.T) { ch := make(chan prometheus.Metric) go func() { collectMetrics(ch, &testLogindInterface{}) close(ch) }() count := 0 for range ch { count++ } expected := len(testSeats) * len(attrRemoteValues) * len(attrTypeValues) * len(attrClassValues) if count != expected { t.Errorf("collectMetrics did not generate the expected number of metrics: got %d, expected %d.", count, expected) } } node_exporter-1.7.0/collector/mdadm_linux.go000066400000000000000000000115171452426057600212320ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nomdadm // +build !nomdadm package collector import ( "errors" "fmt" "os" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs" ) type mdadmCollector struct { logger log.Logger } func init() { registerCollector("mdadm", defaultEnabled, NewMdadmCollector) } // NewMdadmCollector returns a new Collector exposing raid statistics. func NewMdadmCollector(logger log.Logger) (Collector, error) { return &mdadmCollector{logger}, nil } var ( activeDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, "md", "state"), "Indicates the state of md-device.", []string{"device"}, prometheus.Labels{"state": "active"}, ) inActiveDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, "md", "state"), "Indicates the state of md-device.", []string{"device"}, prometheus.Labels{"state": "inactive"}, ) recoveringDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, "md", "state"), "Indicates the state of md-device.", []string{"device"}, prometheus.Labels{"state": "recovering"}, ) resyncDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, "md", "state"), "Indicates the state of md-device.", []string{"device"}, prometheus.Labels{"state": "resync"}, ) checkDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, "md", "state"), "Indicates the state of md-device.", []string{"device"}, prometheus.Labels{"state": "check"}, ) disksDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, "md", "disks"), "Number of active/failed/spare disks of device.", []string{"device", "state"}, nil, ) disksTotalDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, "md", "disks_required"), "Total number of disks of device.", []string{"device"}, nil, ) blocksTotalDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, "md", "blocks"), "Total number of blocks on device.", []string{"device"}, nil, ) blocksSyncedDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, "md", "blocks_synced"), "Number of blocks synced on device.", []string{"device"}, nil, ) ) func (c *mdadmCollector) Update(ch chan<- prometheus.Metric) error { fs, err := procfs.NewFS(*procPath) if err != nil { return fmt.Errorf("failed to open procfs: %w", err) } mdStats, err := fs.MDStat() if err != nil { if errors.Is(err, os.ErrNotExist) { level.Debug(c.logger).Log("msg", "Not collecting mdstat, file does not exist", "file", *procPath) return ErrNoData } return fmt.Errorf("error parsing mdstatus: %w", err) } for _, mdStat := range mdStats { level.Debug(c.logger).Log("msg", "collecting metrics for device", "device", mdStat.Name) stateVals := make(map[string]float64) stateVals[mdStat.ActivityState] = 1 ch <- prometheus.MustNewConstMetric( disksTotalDesc, prometheus.GaugeValue, float64(mdStat.DisksTotal), mdStat.Name, ) ch <- prometheus.MustNewConstMetric( disksDesc, prometheus.GaugeValue, float64(mdStat.DisksActive), mdStat.Name, "active", ) ch <- prometheus.MustNewConstMetric( disksDesc, prometheus.GaugeValue, float64(mdStat.DisksFailed), mdStat.Name, "failed", ) ch <- prometheus.MustNewConstMetric( disksDesc, prometheus.GaugeValue, float64(mdStat.DisksSpare), mdStat.Name, "spare", ) ch <- prometheus.MustNewConstMetric( activeDesc, prometheus.GaugeValue, stateVals["active"], mdStat.Name, ) ch <- prometheus.MustNewConstMetric( inActiveDesc, prometheus.GaugeValue, stateVals["inactive"], mdStat.Name, ) ch <- prometheus.MustNewConstMetric( recoveringDesc, prometheus.GaugeValue, stateVals["recovering"], mdStat.Name, ) ch <- prometheus.MustNewConstMetric( resyncDesc, prometheus.GaugeValue, stateVals["resyncing"], mdStat.Name, ) ch <- prometheus.MustNewConstMetric( checkDesc, prometheus.GaugeValue, stateVals["checking"], mdStat.Name, ) ch <- prometheus.MustNewConstMetric( blocksTotalDesc, prometheus.GaugeValue, float64(mdStat.BlocksTotal), mdStat.Name, ) ch <- prometheus.MustNewConstMetric( blocksSyncedDesc, prometheus.GaugeValue, float64(mdStat.BlocksSynced), mdStat.Name, ) } return nil } node_exporter-1.7.0/collector/meminfo.go000066400000000000000000000037161452426057600203650ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build (darwin || linux || openbsd || netbsd) && !nomeminfo // +build darwin linux openbsd netbsd // +build !nomeminfo package collector import ( "fmt" "strings" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" ) const ( memInfoSubsystem = "memory" ) type meminfoCollector struct { logger log.Logger } func init() { registerCollector("meminfo", defaultEnabled, NewMeminfoCollector) } // NewMeminfoCollector returns a new Collector exposing memory stats. func NewMeminfoCollector(logger log.Logger) (Collector, error) { return &meminfoCollector{logger}, nil } // Update calls (*meminfoCollector).getMemInfo to get the platform specific // memory metrics. func (c *meminfoCollector) Update(ch chan<- prometheus.Metric) error { var metricType prometheus.ValueType memInfo, err := c.getMemInfo() if err != nil { return fmt.Errorf("couldn't get meminfo: %w", err) } level.Debug(c.logger).Log("msg", "Set node_mem", "memInfo", memInfo) for k, v := range memInfo { if strings.HasSuffix(k, "_total") { metricType = prometheus.CounterValue } else { metricType = prometheus.GaugeValue } ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(namespace, memInfoSubsystem, k), fmt.Sprintf("Memory information field %s.", k), nil, nil, ), metricType, v, ) } return nil } node_exporter-1.7.0/collector/meminfo_darwin.go000066400000000000000000000047231452426057600217300ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nomeminfo // +build !nomeminfo package collector // #include // #include // typedef struct xsw_usage xsw_usage_t; import "C" import ( "encoding/binary" "fmt" "unsafe" "golang.org/x/sys/unix" ) func (c *meminfoCollector) getMemInfo() (map[string]float64, error) { host := C.mach_host_self() infoCount := C.mach_msg_type_number_t(C.HOST_VM_INFO64_COUNT) vmstat := C.vm_statistics64_data_t{} ret := C.host_statistics64( C.host_t(host), C.HOST_VM_INFO64, C.host_info_t(unsafe.Pointer(&vmstat)), &infoCount, ) if ret != C.KERN_SUCCESS { return nil, fmt.Errorf("Couldn't get memory statistics, host_statistics returned %d", ret) } totalb, err := unix.Sysctl("hw.memsize") if err != nil { return nil, err } swapraw, err := unix.SysctlRaw("vm.swapusage") if err != nil { return nil, err } swap := (*C.xsw_usage_t)(unsafe.Pointer(&swapraw[0])) // Syscall removes terminating NUL which we need to cast to uint64 total := binary.LittleEndian.Uint64([]byte(totalb + "\x00")) var pageSize C.vm_size_t C.host_page_size(C.host_t(host), &pageSize) ps := float64(pageSize) return map[string]float64{ "active_bytes": ps * float64(vmstat.active_count), "compressed_bytes": ps * float64(vmstat.compressor_page_count), "inactive_bytes": ps * float64(vmstat.inactive_count), "wired_bytes": ps * float64(vmstat.wire_count), "free_bytes": ps * float64(vmstat.free_count), "swapped_in_bytes_total": ps * float64(vmstat.pageins), "swapped_out_bytes_total": ps * float64(vmstat.pageouts), "internal_bytes": ps * float64(vmstat.internal_page_count), "purgeable_bytes": ps * float64(vmstat.purgeable_count), "total_bytes": float64(total), "swap_used_bytes": float64(swap.xsu_used), "swap_total_bytes": float64(swap.xsu_total), }, nil } node_exporter-1.7.0/collector/meminfo_linux.go000066400000000000000000000034751452426057600216060ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nomeminfo // +build !nomeminfo package collector import ( "bufio" "fmt" "io" "os" "regexp" "strconv" "strings" ) var ( reParens = regexp.MustCompile(`\((.*)\)`) ) func (c *meminfoCollector) getMemInfo() (map[string]float64, error) { file, err := os.Open(procFilePath("meminfo")) if err != nil { return nil, err } defer file.Close() return parseMemInfo(file) } func parseMemInfo(r io.Reader) (map[string]float64, error) { var ( memInfo = map[string]float64{} scanner = bufio.NewScanner(r) ) for scanner.Scan() { line := scanner.Text() parts := strings.Fields(line) // Workaround for empty lines occasionally occur in CentOS 6.2 kernel 3.10.90. if len(parts) == 0 { continue } fv, err := strconv.ParseFloat(parts[1], 64) if err != nil { return nil, fmt.Errorf("invalid value in meminfo: %w", err) } key := parts[0][:len(parts[0])-1] // remove trailing : from key // Active(anon) -> Active_anon key = reParens.ReplaceAllString(key, "_${1}") switch len(parts) { case 2: // no unit case 3: // has unit, we presume kB fv *= 1024 key = key + "_bytes" default: return nil, fmt.Errorf("invalid line in meminfo: %s", line) } memInfo[key] = fv } return memInfo, scanner.Err() } node_exporter-1.7.0/collector/meminfo_linux_test.go000066400000000000000000000022131452426057600226320ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nomeminfo // +build !nomeminfo package collector import ( "os" "testing" ) func TestMemInfo(t *testing.T) { file, err := os.Open("fixtures/proc/meminfo") if err != nil { t.Fatal(err) } defer file.Close() memInfo, err := parseMemInfo(file) if err != nil { t.Fatal(err) } if want, got := 3831959552.0, memInfo["MemTotal_bytes"]; want != got { t.Errorf("want memory total %f, got %f", want, got) } if want, got := 3787456512.0, memInfo["DirectMap2M_bytes"]; want != got { t.Errorf("want memory directMap2M %f, got %f", want, got) } } node_exporter-1.7.0/collector/meminfo_netbsd.go000066400000000000000000000027601452426057600217220ustar00rootroot00000000000000// Copyright 2023 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nomeminfo // +build !nomeminfo package collector import ( "golang.org/x/sys/unix" ) func (c *meminfoCollector) getMemInfo() (map[string]float64, error) { uvmexp, err := unix.SysctlUvmexp("vm.uvmexp2") if err != nil { return nil, err } ps := float64(uvmexp.Pagesize) // see uvm(9) return map[string]float64{ "active_bytes": ps * float64(uvmexp.Active), "free_bytes": ps * float64(uvmexp.Free), "inactive_bytes": ps * float64(uvmexp.Inactive), "size_bytes": ps * float64(uvmexp.Npages), "swap_size_bytes": ps * float64(uvmexp.Swpages), "swap_used_bytes": ps * float64(uvmexp.Swpginuse), "swapped_in_pages_bytes_total": ps * float64(uvmexp.Pgswapin), "swapped_out_pages_bytes_total": ps * float64(uvmexp.Pgswapout), "wired_bytes": ps * float64(uvmexp.Wired), }, nil } node_exporter-1.7.0/collector/meminfo_numa_linux.go000066400000000000000000000110571452426057600226210ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nomeminfo_numa // +build !nomeminfo_numa package collector import ( "bufio" "fmt" "io" "os" "path/filepath" "regexp" "strconv" "strings" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) const ( memInfoNumaSubsystem = "memory_numa" ) var meminfoNodeRE = regexp.MustCompile(`.*devices/system/node/node([0-9]*)`) type meminfoMetric struct { metricName string metricType prometheus.ValueType numaNode string value float64 } type meminfoNumaCollector struct { metricDescs map[string]*prometheus.Desc logger log.Logger } func init() { registerCollector("meminfo_numa", defaultDisabled, NewMeminfoNumaCollector) } // NewMeminfoNumaCollector returns a new Collector exposing memory stats. func NewMeminfoNumaCollector(logger log.Logger) (Collector, error) { return &meminfoNumaCollector{ metricDescs: map[string]*prometheus.Desc{}, logger: logger, }, nil } func (c *meminfoNumaCollector) Update(ch chan<- prometheus.Metric) error { metrics, err := getMemInfoNuma() if err != nil { return fmt.Errorf("couldn't get NUMA meminfo: %w", err) } for _, v := range metrics { desc, ok := c.metricDescs[v.metricName] if !ok { desc = prometheus.NewDesc( prometheus.BuildFQName(namespace, memInfoNumaSubsystem, v.metricName), fmt.Sprintf("Memory information field %s.", v.metricName), []string{"node"}, nil) c.metricDescs[v.metricName] = desc } ch <- prometheus.MustNewConstMetric(desc, v.metricType, v.value, v.numaNode) } return nil } func getMemInfoNuma() ([]meminfoMetric, error) { var ( metrics []meminfoMetric ) nodes, err := filepath.Glob(sysFilePath("devices/system/node/node[0-9]*")) if err != nil { return nil, err } for _, node := range nodes { meminfoFile, err := os.Open(filepath.Join(node, "meminfo")) if err != nil { return nil, err } defer meminfoFile.Close() numaInfo, err := parseMemInfoNuma(meminfoFile) if err != nil { return nil, err } metrics = append(metrics, numaInfo...) numastatFile, err := os.Open(filepath.Join(node, "numastat")) if err != nil { return nil, err } defer numastatFile.Close() nodeNumber := meminfoNodeRE.FindStringSubmatch(node) if nodeNumber == nil { return nil, fmt.Errorf("device node string didn't match regexp: %s", node) } numaStat, err := parseMemInfoNumaStat(numastatFile, nodeNumber[1]) if err != nil { return nil, err } metrics = append(metrics, numaStat...) } return metrics, nil } func parseMemInfoNuma(r io.Reader) ([]meminfoMetric, error) { var ( memInfo []meminfoMetric scanner = bufio.NewScanner(r) re = regexp.MustCompile(`\((.*)\)`) ) for scanner.Scan() { line := strings.TrimSpace(scanner.Text()) if line == "" { continue } parts := strings.Fields(line) fv, err := strconv.ParseFloat(parts[3], 64) if err != nil { return nil, fmt.Errorf("invalid value in meminfo: %w", err) } switch l := len(parts); { case l == 4: // no unit case l == 5 && parts[4] == "kB": // has unit fv *= 1024 default: return nil, fmt.Errorf("invalid line in meminfo: %s", line) } metric := strings.TrimRight(parts[2], ":") // Active(anon) -> Active_anon metric = re.ReplaceAllString(metric, "_${1}") memInfo = append(memInfo, meminfoMetric{metric, prometheus.GaugeValue, parts[1], fv}) } return memInfo, scanner.Err() } func parseMemInfoNumaStat(r io.Reader, nodeNumber string) ([]meminfoMetric, error) { var ( numaStat []meminfoMetric scanner = bufio.NewScanner(r) ) for scanner.Scan() { line := strings.TrimSpace(scanner.Text()) if line == "" { continue } parts := strings.Fields(line) if len(parts) != 2 { return nil, fmt.Errorf("line scan did not return 2 fields: %s", line) } fv, err := strconv.ParseFloat(parts[1], 64) if err != nil { return nil, fmt.Errorf("invalid value in numastat: %w", err) } numaStat = append(numaStat, meminfoMetric{parts[0] + "_total", prometheus.CounterValue, nodeNumber, fv}) } return numaStat, scanner.Err() } node_exporter-1.7.0/collector/meminfo_numa_linux_test.go000066400000000000000000000056061452426057600236630ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nomeminfo_numa // +build !nomeminfo_numa package collector import ( "os" "testing" ) func TestMemInfoNuma(t *testing.T) { file, err := os.Open("fixtures/sys/devices/system/node/node0/meminfo") if err != nil { t.Fatal(err) } defer file.Close() memInfo, err := parseMemInfoNuma(file) if err != nil { t.Fatal(err) } if want, got := 707915776.0, memInfo[5].value; want != got { t.Errorf("want memory Active(anon) value %f, got %f", want, got) } if want, got := "Active_anon", memInfo[5].metricName; want != got { t.Errorf("want metric Active(anon) metricName %s, got %s", want, got) } if want, got := 150994944.0, memInfo[25].value; want != got { t.Errorf("want memory AnonHugePages %f, got %f", want, got) } file, err = os.Open("fixtures/sys/devices/system/node/node1/meminfo") if err != nil { t.Fatal(err) } defer file.Close() memInfo, err = parseMemInfoNuma(file) if err != nil { t.Fatal(err) } if want, got := 291930112.0, memInfo[6].value; want != got { t.Errorf("want memory Inactive(anon) %f, got %f", want, got) } if want, got := 85585088512.0, memInfo[13].value; want != got { t.Errorf("want memory FilePages %f, got %f", want, got) } } func TestMemInfoNumaStat(t *testing.T) { file, err := os.Open("fixtures/sys/devices/system/node/node0/numastat") if err != nil { t.Fatal(err) } defer file.Close() numaStat, err := parseMemInfoNumaStat(file, "0") if err != nil { t.Fatal(err) } if want, got := 193460335812.0, numaStat[0].value; want != got { t.Errorf("want numa stat numa_hit value %f, got %f", want, got) } if want, got := "numa_hit_total", numaStat[0].metricName; want != got { t.Errorf("want numa stat numa_hit metricName %s, got %s", want, got) } if want, got := 193454780853.0, numaStat[4].value; want != got { t.Errorf("want numa stat local_node %f, got %f", want, got) } file, err = os.Open("fixtures/sys/devices/system/node/node1/numastat") if err != nil { t.Fatal(err) } defer file.Close() numaStat, err = parseMemInfoNumaStat(file, "1") if err != nil { t.Fatal(err) } if want, got := 59858626709.0, numaStat[1].value; want != got { t.Errorf("want numa stat numa_miss %f, got %f", want, got) } if want, got := 59860526920.0, numaStat[5].value; want != got { t.Errorf("want numa stat other_node %f, got %f", want, got) } } node_exporter-1.7.0/collector/meminfo_openbsd.go000066400000000000000000000046631452426057600221010ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nomeminfo && !amd64 // +build !nomeminfo,!amd64 package collector import ( "fmt" ) /* #include #include #include #include int sysctl_uvmexp(struct uvmexp *uvmexp) { static int uvmexp_mib[] = {CTL_VM, VM_UVMEXP}; size_t sz = sizeof(struct uvmexp); if(sysctl(uvmexp_mib, 2, uvmexp, &sz, NULL, 0) < 0) return -1; return 0; } int sysctl_bcstats(struct bcachestats *bcstats) { static int bcstats_mib[] = {CTL_VFS, VFS_GENERIC, VFS_BCACHESTAT}; size_t sz = sizeof(struct bcachestats); if(sysctl(bcstats_mib, 3, bcstats, &sz, NULL, 0) < 0) return -1; return 0; } */ import "C" func (c *meminfoCollector) getMemInfo() (map[string]float64, error) { var uvmexp C.struct_uvmexp var bcstats C.struct_bcachestats if _, err := C.sysctl_uvmexp(&uvmexp); err != nil { return nil, fmt.Errorf("sysctl CTL_VM VM_UVMEXP failed: %w", err) } if _, err := C.sysctl_bcstats(&bcstats); err != nil { return nil, fmt.Errorf("sysctl CTL_VFS VFS_GENERIC VFS_BCACHESTAT failed: %w", err) } ps := float64(uvmexp.pagesize) // see uvm(9) return map[string]float64{ "active_bytes": ps * float64(uvmexp.active), "cache_bytes": ps * float64(bcstats.numbufpages), "free_bytes": ps * float64(uvmexp.free), "inactive_bytes": ps * float64(uvmexp.inactive), "size_bytes": ps * float64(uvmexp.npages), "swap_size_bytes": ps * float64(uvmexp.swpages), "swap_used_bytes": ps * float64(uvmexp.swpginuse), "swapped_in_pages_bytes_total": ps * float64(uvmexp.pgswapin), "swapped_out_pages_bytes_total": ps * float64(uvmexp.pgswapout), "wired_bytes": ps * float64(uvmexp.wired), }, nil } node_exporter-1.7.0/collector/meminfo_openbsd_amd64.go000066400000000000000000000044401452426057600230650ustar00rootroot00000000000000// Copyright 2020 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License") // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nomeminfo // +build !nomeminfo package collector import ( "golang.org/x/sys/unix" "unsafe" ) const ( CTL_VFS = 10 VFS_GENERIC = 0 VFS_BCACHESTAT = 3 ) type bcachestats struct { Numbufs int64 Numbufpages int64 Numdirtypages int64 Numcleanpages int64 Pendingwrites int64 Pendingreads int64 Numwrites int64 Numreads int64 Cachehits int64 Busymapped int64 Dmapages int64 Highpages int64 Delwribufs int64 Kvaslots int64 Kvaslots_avail int64 Highflips int64 Highflops int64 Dmaflips int64 } func (c *meminfoCollector) getMemInfo() (map[string]float64, error) { uvmexpb, err := unix.SysctlRaw("vm.uvmexp") if err != nil { return nil, err } mib := [3]_C_int{CTL_VFS, VFS_GENERIC, VFS_BCACHESTAT} bcstatsb, err := sysctl(mib[:]) if err != nil { return nil, err } uvmexp := *(*unix.Uvmexp)(unsafe.Pointer(&uvmexpb[0])) ps := float64(uvmexp.Pagesize) bcstats := *(*bcachestats)(unsafe.Pointer(&bcstatsb[0])) // see uvm(9) return map[string]float64{ "active_bytes": ps * float64(uvmexp.Active), "cache_bytes": ps * float64(bcstats.Numbufpages), "free_bytes": ps * float64(uvmexp.Free), "inactive_bytes": ps * float64(uvmexp.Inactive), "size_bytes": ps * float64(uvmexp.Npages), "swap_size_bytes": ps * float64(uvmexp.Swpages), "swap_used_bytes": ps * float64(uvmexp.Swpginuse), "swapped_in_pages_bytes_total": ps * float64(uvmexp.Pgswapin), "swapped_out_pages_bytes_total": ps * float64(uvmexp.Pgswapout), "wired_bytes": ps * float64(uvmexp.Wired), }, nil } node_exporter-1.7.0/collector/memory_bsd.go000066400000000000000000000122271452426057600210700ustar00rootroot00000000000000// Copyright 2017 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build (freebsd || dragonfly) && !nomeminfo // +build freebsd dragonfly // +build !nomeminfo package collector import ( "fmt" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "golang.org/x/sys/unix" ) const ( memorySubsystem = "memory" ) type memoryCollector struct { pageSize uint64 sysctls []bsdSysctl kvm kvm logger log.Logger } func init() { registerCollector("meminfo", defaultEnabled, NewMemoryCollector) } // NewMemoryCollector returns a new Collector exposing memory stats. func NewMemoryCollector(logger log.Logger) (Collector, error) { tmp32, err := unix.SysctlUint32("vm.stats.vm.v_page_size") if err != nil { return nil, fmt.Errorf("sysctl(vm.stats.vm.v_page_size) failed: %w", err) } size := float64(tmp32) mibSwapTotal := "vm.swap_total" /* swap_total is FreeBSD specific. Fall back to Dfly specific mib if not present. */ _, err = unix.SysctlUint64(mibSwapTotal) if err != nil { mibSwapTotal = "vm.swap_size" } fromPage := func(v float64) float64 { return v * size } return &memoryCollector{ logger: logger, pageSize: uint64(tmp32), sysctls: []bsdSysctl{ // Descriptions via: https://wiki.freebsd.org/Memory { name: "active_bytes", description: "Recently used by userland", mib: "vm.stats.vm.v_active_count", conversion: fromPage, labels: nil, }, { name: "inactive_bytes", description: "Not recently used by userland", mib: "vm.stats.vm.v_inactive_count", conversion: fromPage, labels: nil, }, { name: "wired_bytes", description: "Locked in memory by kernel, mlock, etc", mib: "vm.stats.vm.v_wire_count", conversion: fromPage, labels: nil, }, { name: "user_wired_bytes", description: "Locked in memory by user, mlock, etc", mib: "vm.stats.vm.v_user_wire_count", conversion: fromPage, dataType: bsdSysctlTypeCLong, labels: nil, }, { name: "cache_bytes", description: "Almost free, backed by swap or files, available for re-allocation", mib: "vm.stats.vm.v_cache_count", conversion: fromPage, labels: nil, }, { name: "buffer_bytes", description: "Disk IO Cache entries for non ZFS filesystems, only usable by kernel", mib: "vfs.bufspace", dataType: bsdSysctlTypeCLong, labels: nil, }, { name: "free_bytes", description: "Unallocated, available for allocation", mib: "vm.stats.vm.v_free_count", conversion: fromPage, labels: nil, }, { name: "laundry_bytes", description: "Dirty not recently used by userland", mib: "vm.stats.vm.v_laundry_count", conversion: fromPage, labels: nil, }, { name: "size_bytes", description: "Total physical memory size", mib: "vm.stats.vm.v_page_count", conversion: fromPage, labels: nil, }, { name: "swap_size_bytes", description: "Total swap memory size", mib: mibSwapTotal, dataType: bsdSysctlTypeUint64, labels: nil, }, // Descriptions via: top(1) { name: "swap_in_bytes_total", description: "Bytes paged in from swap devices", mib: "vm.stats.vm.v_swappgsin", valueType: prometheus.CounterValue, conversion: fromPage, labels: nil, }, { name: "swap_out_bytes_total", description: "Bytes paged out to swap devices", mib: "vm.stats.vm.v_swappgsout", valueType: prometheus.CounterValue, conversion: fromPage, labels: nil, }, }, }, nil } // Update checks relevant sysctls for current memory usage, and kvm for swap // usage. func (c *memoryCollector) Update(ch chan<- prometheus.Metric) error { for _, m := range c.sysctls { v, err := m.Value() if err != nil { return fmt.Errorf("couldn't get memory: %w", err) } // Most are gauges. if m.valueType == 0 { m.valueType = prometheus.GaugeValue } ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(namespace, memorySubsystem, m.name), m.description, nil, nil, ), m.valueType, v) } swapUsed, err := c.kvm.SwapUsedPages() if err != nil { return fmt.Errorf("couldn't get kvm: %w", err) } ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(namespace, memorySubsystem, "swap_used_bytes"), "Currently allocated swap", nil, nil, ), prometheus.GaugeValue, float64(swapUsed*c.pageSize)) return nil } node_exporter-1.7.0/collector/mountstats_linux.go000066400000000000000000000633621452426057600223760ustar00rootroot00000000000000// Copyright 2017 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nomountstats // +build !nomountstats package collector import ( "fmt" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs" ) var ( // 64-bit float mantissa: https://en.wikipedia.org/wiki/Double-precision_floating-point_format float64Mantissa uint64 = 9007199254740992 ) type mountStatsCollector struct { // General statistics NFSAgeSecondsTotal *prometheus.Desc // Byte statistics NFSReadBytesTotal *prometheus.Desc NFSWriteBytesTotal *prometheus.Desc NFSDirectReadBytesTotal *prometheus.Desc NFSDirectWriteBytesTotal *prometheus.Desc NFSTotalReadBytesTotal *prometheus.Desc NFSTotalWriteBytesTotal *prometheus.Desc NFSReadPagesTotal *prometheus.Desc NFSWritePagesTotal *prometheus.Desc // Per-operation statistics NFSOperationsRequestsTotal *prometheus.Desc NFSOperationsTransmissionsTotal *prometheus.Desc NFSOperationsMajorTimeoutsTotal *prometheus.Desc NFSOperationsSentBytesTotal *prometheus.Desc NFSOperationsReceivedBytesTotal *prometheus.Desc NFSOperationsQueueTimeSecondsTotal *prometheus.Desc NFSOperationsResponseTimeSecondsTotal *prometheus.Desc NFSOperationsRequestTimeSecondsTotal *prometheus.Desc // Transport statistics NFSTransportBindTotal *prometheus.Desc NFSTransportConnectTotal *prometheus.Desc NFSTransportIdleTimeSeconds *prometheus.Desc NFSTransportSendsTotal *prometheus.Desc NFSTransportReceivesTotal *prometheus.Desc NFSTransportBadTransactionIDsTotal *prometheus.Desc NFSTransportBacklogQueueTotal *prometheus.Desc NFSTransportMaximumRPCSlots *prometheus.Desc NFSTransportSendingQueueTotal *prometheus.Desc NFSTransportPendingQueueTotal *prometheus.Desc // Event statistics NFSEventInodeRevalidateTotal *prometheus.Desc NFSEventDnodeRevalidateTotal *prometheus.Desc NFSEventDataInvalidateTotal *prometheus.Desc NFSEventAttributeInvalidateTotal *prometheus.Desc NFSEventVFSOpenTotal *prometheus.Desc NFSEventVFSLookupTotal *prometheus.Desc NFSEventVFSAccessTotal *prometheus.Desc NFSEventVFSUpdatePageTotal *prometheus.Desc NFSEventVFSReadPageTotal *prometheus.Desc NFSEventVFSReadPagesTotal *prometheus.Desc NFSEventVFSWritePageTotal *prometheus.Desc NFSEventVFSWritePagesTotal *prometheus.Desc NFSEventVFSGetdentsTotal *prometheus.Desc NFSEventVFSSetattrTotal *prometheus.Desc NFSEventVFSFlushTotal *prometheus.Desc NFSEventVFSFsyncTotal *prometheus.Desc NFSEventVFSLockTotal *prometheus.Desc NFSEventVFSFileReleaseTotal *prometheus.Desc NFSEventTruncationTotal *prometheus.Desc NFSEventWriteExtensionTotal *prometheus.Desc NFSEventSillyRenameTotal *prometheus.Desc NFSEventShortReadTotal *prometheus.Desc NFSEventShortWriteTotal *prometheus.Desc NFSEventJukeboxDelayTotal *prometheus.Desc NFSEventPNFSReadTotal *prometheus.Desc NFSEventPNFSWriteTotal *prometheus.Desc proc procfs.Proc logger log.Logger } // used to uniquely identify an NFS mount to prevent duplicates type nfsDeviceIdentifier struct { Device string Protocol string MountAddress string } func init() { registerCollector("mountstats", defaultDisabled, NewMountStatsCollector) } // NewMountStatsCollector returns a new Collector exposing NFS statistics. func NewMountStatsCollector(logger log.Logger) (Collector, error) { fs, err := procfs.NewFS(*procPath) if err != nil { return nil, fmt.Errorf("failed to open procfs: %w", err) } proc, err := fs.Self() if err != nil { return nil, fmt.Errorf("failed to open /proc/self: %w", err) } const ( // For the time being, only NFS statistics are available via this mechanism. subsystem = "mountstats_nfs" ) var ( labels = []string{"export", "protocol", "mountaddr"} opLabels = []string{"export", "protocol", "mountaddr", "operation"} ) return &mountStatsCollector{ NFSAgeSecondsTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "age_seconds_total"), "The age of the NFS mount in seconds.", labels, nil, ), NFSReadBytesTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "read_bytes_total"), "Number of bytes read using the read() syscall.", labels, nil, ), NFSWriteBytesTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "write_bytes_total"), "Number of bytes written using the write() syscall.", labels, nil, ), NFSDirectReadBytesTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "direct_read_bytes_total"), "Number of bytes read using the read() syscall in O_DIRECT mode.", labels, nil, ), NFSDirectWriteBytesTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "direct_write_bytes_total"), "Number of bytes written using the write() syscall in O_DIRECT mode.", labels, nil, ), NFSTotalReadBytesTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "total_read_bytes_total"), "Number of bytes read from the NFS server, in total.", labels, nil, ), NFSTotalWriteBytesTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "total_write_bytes_total"), "Number of bytes written to the NFS server, in total.", labels, nil, ), NFSReadPagesTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "read_pages_total"), "Number of pages read directly via mmap()'d files.", labels, nil, ), NFSWritePagesTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "write_pages_total"), "Number of pages written directly via mmap()'d files.", labels, nil, ), NFSTransportBindTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "transport_bind_total"), "Number of times the client has had to establish a connection from scratch to the NFS server.", labels, nil, ), NFSTransportConnectTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "transport_connect_total"), "Number of times the client has made a TCP connection to the NFS server.", labels, nil, ), NFSTransportIdleTimeSeconds: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "transport_idle_time_seconds"), "Duration since the NFS mount last saw any RPC traffic, in seconds.", labels, nil, ), NFSTransportSendsTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "transport_sends_total"), "Number of RPC requests for this mount sent to the NFS server.", labels, nil, ), NFSTransportReceivesTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "transport_receives_total"), "Number of RPC responses for this mount received from the NFS server.", labels, nil, ), NFSTransportBadTransactionIDsTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "transport_bad_transaction_ids_total"), "Number of times the NFS server sent a response with a transaction ID unknown to this client.", labels, nil, ), NFSTransportBacklogQueueTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "transport_backlog_queue_total"), "Total number of items added to the RPC backlog queue.", labels, nil, ), NFSTransportMaximumRPCSlots: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "transport_maximum_rpc_slots"), "Maximum number of simultaneously active RPC requests ever used.", labels, nil, ), NFSTransportSendingQueueTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "transport_sending_queue_total"), "Total number of items added to the RPC transmission sending queue.", labels, nil, ), NFSTransportPendingQueueTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "transport_pending_queue_total"), "Total number of items added to the RPC transmission pending queue.", labels, nil, ), NFSOperationsRequestsTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "operations_requests_total"), "Number of requests performed for a given operation.", opLabels, nil, ), NFSOperationsTransmissionsTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "operations_transmissions_total"), "Number of times an actual RPC request has been transmitted for a given operation.", opLabels, nil, ), NFSOperationsMajorTimeoutsTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "operations_major_timeouts_total"), "Number of times a request has had a major timeout for a given operation.", opLabels, nil, ), NFSOperationsSentBytesTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "operations_sent_bytes_total"), "Number of bytes sent for a given operation, including RPC headers and payload.", opLabels, nil, ), NFSOperationsReceivedBytesTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "operations_received_bytes_total"), "Number of bytes received for a given operation, including RPC headers and payload.", opLabels, nil, ), NFSOperationsQueueTimeSecondsTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "operations_queue_time_seconds_total"), "Duration all requests spent queued for transmission for a given operation before they were sent, in seconds.", opLabels, nil, ), NFSOperationsResponseTimeSecondsTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "operations_response_time_seconds_total"), "Duration all requests took to get a reply back after a request for a given operation was transmitted, in seconds.", opLabels, nil, ), NFSOperationsRequestTimeSecondsTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "operations_request_time_seconds_total"), "Duration all requests took from when a request was enqueued to when it was completely handled for a given operation, in seconds.", opLabels, nil, ), NFSEventInodeRevalidateTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "event_inode_revalidate_total"), "Number of times cached inode attributes are re-validated from the server.", labels, nil, ), NFSEventDnodeRevalidateTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "event_dnode_revalidate_total"), "Number of times cached dentry nodes are re-validated from the server.", labels, nil, ), NFSEventDataInvalidateTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "event_data_invalidate_total"), "Number of times an inode cache is cleared.", labels, nil, ), NFSEventAttributeInvalidateTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "event_attribute_invalidate_total"), "Number of times cached inode attributes are invalidated.", labels, nil, ), NFSEventVFSOpenTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "event_vfs_open_total"), "Number of times cached inode attributes are invalidated.", labels, nil, ), NFSEventVFSLookupTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "event_vfs_lookup_total"), "Number of times a directory lookup has occurred.", labels, nil, ), NFSEventVFSAccessTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "event_vfs_access_total"), "Number of times permissions have been checked.", labels, nil, ), NFSEventVFSUpdatePageTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "event_vfs_update_page_total"), "Number of updates (and potential writes) to pages.", labels, nil, ), NFSEventVFSReadPageTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "event_vfs_read_page_total"), "Number of pages read directly via mmap()'d files.", labels, nil, ), NFSEventVFSReadPagesTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "event_vfs_read_pages_total"), "Number of times a group of pages have been read.", labels, nil, ), NFSEventVFSWritePageTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "event_vfs_write_page_total"), "Number of pages written directly via mmap()'d files.", labels, nil, ), NFSEventVFSWritePagesTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "event_vfs_write_pages_total"), "Number of times a group of pages have been written.", labels, nil, ), NFSEventVFSGetdentsTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "event_vfs_getdents_total"), "Number of times directory entries have been read with getdents().", labels, nil, ), NFSEventVFSSetattrTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "event_vfs_setattr_total"), "Number of times directory entries have been read with getdents().", labels, nil, ), NFSEventVFSFlushTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "event_vfs_flush_total"), "Number of pending writes that have been forcefully flushed to the server.", labels, nil, ), NFSEventVFSFsyncTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "event_vfs_fsync_total"), "Number of times fsync() has been called on directories and files.", labels, nil, ), NFSEventVFSLockTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "event_vfs_lock_total"), "Number of times locking has been attempted on a file.", labels, nil, ), NFSEventVFSFileReleaseTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "event_vfs_file_release_total"), "Number of times files have been closed and released.", labels, nil, ), NFSEventTruncationTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "event_truncation_total"), "Number of times files have been truncated.", labels, nil, ), NFSEventWriteExtensionTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "event_write_extension_total"), "Number of times a file has been grown due to writes beyond its existing end.", labels, nil, ), NFSEventSillyRenameTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "event_silly_rename_total"), "Number of times a file was removed while still open by another process.", labels, nil, ), NFSEventShortReadTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "event_short_read_total"), "Number of times the NFS server gave less data than expected while reading.", labels, nil, ), NFSEventShortWriteTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "event_short_write_total"), "Number of times the NFS server wrote less data than expected while writing.", labels, nil, ), NFSEventJukeboxDelayTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "event_jukebox_delay_total"), "Number of times the NFS server indicated EJUKEBOX; retrieving data from offline storage.", labels, nil, ), NFSEventPNFSReadTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "event_pnfs_read_total"), "Number of NFS v4.1+ pNFS reads.", labels, nil, ), NFSEventPNFSWriteTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "event_pnfs_write_total"), "Number of NFS v4.1+ pNFS writes.", labels, nil, ), proc: proc, logger: logger, }, nil } func (c *mountStatsCollector) Update(ch chan<- prometheus.Metric) error { mounts, err := c.proc.MountStats() if err != nil { return fmt.Errorf("failed to parse mountstats: %w", err) } mountsInfo, err := c.proc.MountInfo() if err != nil { return fmt.Errorf("failed to parse mountinfo: %w", err) } // store all seen nfsDeviceIdentifiers for deduplication deviceList := make(map[nfsDeviceIdentifier]bool) for idx, m := range mounts { // For the time being, only NFS statistics are available via this mechanism stats, ok := m.Stats.(*procfs.MountStatsNFS) if !ok { continue } var mountAddress string if idx < len(mountsInfo) { // The mount entry order in the /proc/self/mountstats and /proc/self/mountinfo is the same. miStats := mountsInfo[idx] mountAddress = miStats.SuperOptions["addr"] } deviceIdentifier := nfsDeviceIdentifier{m.Device, stats.Transport.Protocol, mountAddress} i := deviceList[deviceIdentifier] if i { level.Debug(c.logger).Log("msg", "Skipping duplicate device entry", "device", deviceIdentifier) continue } deviceList[deviceIdentifier] = true c.updateNFSStats(ch, stats, m.Device, stats.Transport.Protocol, mountAddress) } return nil } func (c *mountStatsCollector) updateNFSStats(ch chan<- prometheus.Metric, s *procfs.MountStatsNFS, export, protocol, mountAddress string) { labelValues := []string{export, protocol, mountAddress} ch <- prometheus.MustNewConstMetric( c.NFSAgeSecondsTotal, prometheus.CounterValue, s.Age.Seconds(), labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSReadBytesTotal, prometheus.CounterValue, float64(s.Bytes.Read), labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSWriteBytesTotal, prometheus.CounterValue, float64(s.Bytes.Write), labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSDirectReadBytesTotal, prometheus.CounterValue, float64(s.Bytes.DirectRead), labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSDirectWriteBytesTotal, prometheus.CounterValue, float64(s.Bytes.DirectWrite), labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSTotalReadBytesTotal, prometheus.CounterValue, float64(s.Bytes.ReadTotal), labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSTotalWriteBytesTotal, prometheus.CounterValue, float64(s.Bytes.WriteTotal), labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSReadPagesTotal, prometheus.CounterValue, float64(s.Bytes.ReadPages), labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSWritePagesTotal, prometheus.CounterValue, float64(s.Bytes.WritePages), labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSTransportBindTotal, prometheus.CounterValue, float64(s.Transport.Bind), labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSTransportConnectTotal, prometheus.CounterValue, float64(s.Transport.Connect), labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSTransportIdleTimeSeconds, prometheus.GaugeValue, float64(s.Transport.IdleTimeSeconds%float64Mantissa), labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSTransportSendsTotal, prometheus.CounterValue, float64(s.Transport.Sends), labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSTransportReceivesTotal, prometheus.CounterValue, float64(s.Transport.Receives), labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSTransportBadTransactionIDsTotal, prometheus.CounterValue, float64(s.Transport.BadTransactionIDs), labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSTransportBacklogQueueTotal, prometheus.CounterValue, float64(s.Transport.CumulativeBacklog), labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSTransportMaximumRPCSlots, prometheus.GaugeValue, float64(s.Transport.MaximumRPCSlotsUsed), labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSTransportSendingQueueTotal, prometheus.CounterValue, float64(s.Transport.CumulativeSendingQueue), labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSTransportPendingQueueTotal, prometheus.CounterValue, float64(s.Transport.CumulativePendingQueue), labelValues..., ) for _, op := range s.Operations { opLabelValues := []string{export, protocol, mountAddress, op.Operation} ch <- prometheus.MustNewConstMetric( c.NFSOperationsRequestsTotal, prometheus.CounterValue, float64(op.Requests), opLabelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSOperationsTransmissionsTotal, prometheus.CounterValue, float64(op.Transmissions), opLabelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSOperationsMajorTimeoutsTotal, prometheus.CounterValue, float64(op.MajorTimeouts), opLabelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSOperationsSentBytesTotal, prometheus.CounterValue, float64(op.BytesSent), opLabelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSOperationsReceivedBytesTotal, prometheus.CounterValue, float64(op.BytesReceived), opLabelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSOperationsQueueTimeSecondsTotal, prometheus.CounterValue, float64(op.CumulativeQueueMilliseconds%float64Mantissa)/1000.0, opLabelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSOperationsResponseTimeSecondsTotal, prometheus.CounterValue, float64(op.CumulativeTotalResponseMilliseconds%float64Mantissa)/1000.0, opLabelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSOperationsRequestTimeSecondsTotal, prometheus.CounterValue, float64(op.CumulativeTotalRequestMilliseconds%float64Mantissa)/1000.0, opLabelValues..., ) } ch <- prometheus.MustNewConstMetric( c.NFSEventInodeRevalidateTotal, prometheus.CounterValue, float64(s.Events.InodeRevalidate), labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventDnodeRevalidateTotal, prometheus.CounterValue, float64(s.Events.DnodeRevalidate), labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventDataInvalidateTotal, prometheus.CounterValue, float64(s.Events.DataInvalidate), labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventAttributeInvalidateTotal, prometheus.CounterValue, float64(s.Events.AttributeInvalidate), labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventVFSOpenTotal, prometheus.CounterValue, float64(s.Events.VFSOpen), labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventVFSLookupTotal, prometheus.CounterValue, float64(s.Events.VFSLookup), labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventVFSAccessTotal, prometheus.CounterValue, float64(s.Events.VFSAccess), labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventVFSUpdatePageTotal, prometheus.CounterValue, float64(s.Events.VFSUpdatePage), labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventVFSReadPageTotal, prometheus.CounterValue, float64(s.Events.VFSReadPage), labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventVFSReadPagesTotal, prometheus.CounterValue, float64(s.Events.VFSReadPages), labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventVFSWritePageTotal, prometheus.CounterValue, float64(s.Events.VFSWritePage), labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventVFSWritePagesTotal, prometheus.CounterValue, float64(s.Events.VFSWritePages), labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventVFSGetdentsTotal, prometheus.CounterValue, float64(s.Events.VFSGetdents), labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventVFSSetattrTotal, prometheus.CounterValue, float64(s.Events.VFSSetattr), labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventVFSFlushTotal, prometheus.CounterValue, float64(s.Events.VFSFlush), labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventVFSFsyncTotal, prometheus.CounterValue, float64(s.Events.VFSFsync), labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventVFSLockTotal, prometheus.CounterValue, float64(s.Events.VFSLock), labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventVFSFileReleaseTotal, prometheus.CounterValue, float64(s.Events.VFSFileRelease), labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventTruncationTotal, prometheus.CounterValue, float64(s.Events.Truncation), labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventWriteExtensionTotal, prometheus.CounterValue, float64(s.Events.WriteExtension), labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventSillyRenameTotal, prometheus.CounterValue, float64(s.Events.SillyRename), labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventShortReadTotal, prometheus.CounterValue, float64(s.Events.ShortRead), labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventShortWriteTotal, prometheus.CounterValue, float64(s.Events.ShortWrite), labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventJukeboxDelayTotal, prometheus.CounterValue, float64(s.Events.JukeboxDelay), labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventPNFSReadTotal, prometheus.CounterValue, float64(s.Events.PNFSRead), labelValues..., ) ch <- prometheus.MustNewConstMetric( c.NFSEventPNFSWriteTotal, prometheus.CounterValue, float64(s.Events.PNFSWrite), labelValues..., ) } node_exporter-1.7.0/collector/netclass_linux.go000066400000000000000000000156721452426057600217720ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nonetclass && linux // +build !nonetclass,linux package collector import ( "errors" "fmt" "net" "os" "regexp" "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs/sysfs" ) var ( netclassIgnoredDevices = kingpin.Flag("collector.netclass.ignored-devices", "Regexp of net devices to ignore for netclass collector.").Default("^$").String() netclassInvalidSpeed = kingpin.Flag("collector.netclass.ignore-invalid-speed", "Ignore devices where the speed is invalid. This will be the default behavior in 2.x.").Bool() netclassNetlink = kingpin.Flag("collector.netclass.netlink", "Use netlink to gather stats instead of /proc/net/dev.").Default("false").Bool() ) type netClassCollector struct { fs sysfs.FS subsystem string ignoredDevicesPattern *regexp.Regexp metricDescs map[string]*prometheus.Desc logger log.Logger } func init() { registerCollector("netclass", defaultEnabled, NewNetClassCollector) } // NewNetClassCollector returns a new Collector exposing network class stats. func NewNetClassCollector(logger log.Logger) (Collector, error) { fs, err := sysfs.NewFS(*sysPath) if err != nil { return nil, fmt.Errorf("failed to open sysfs: %w", err) } pattern := regexp.MustCompile(*netclassIgnoredDevices) return &netClassCollector{ fs: fs, subsystem: "network", ignoredDevicesPattern: pattern, metricDescs: map[string]*prometheus.Desc{}, logger: logger, }, nil } func (c *netClassCollector) Update(ch chan<- prometheus.Metric) error { if *netclassNetlink { return c.netClassRTNLUpdate(ch) } return c.netClassSysfsUpdate(ch) } func (c *netClassCollector) netClassSysfsUpdate(ch chan<- prometheus.Metric) error { netClass, err := c.getNetClassInfo() if err != nil { if errors.Is(err, os.ErrNotExist) || errors.Is(err, os.ErrPermission) { level.Debug(c.logger).Log("msg", "Could not read netclass file", "err", err) return ErrNoData } return fmt.Errorf("could not get net class info: %w", err) } for _, ifaceInfo := range netClass { upDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, c.subsystem, "up"), "Value is 1 if operstate is 'up', 0 otherwise.", []string{"device"}, nil, ) upValue := 0.0 if ifaceInfo.OperState == "up" { upValue = 1.0 } ch <- prometheus.MustNewConstMetric(upDesc, prometheus.GaugeValue, upValue, ifaceInfo.Name) infoDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, c.subsystem, "info"), "Non-numeric data from /sys/class/net/, value is always 1.", []string{"device", "address", "broadcast", "duplex", "operstate", "adminstate", "ifalias"}, nil, ) infoValue := 1.0 ch <- prometheus.MustNewConstMetric(infoDesc, prometheus.GaugeValue, infoValue, ifaceInfo.Name, ifaceInfo.Address, ifaceInfo.Broadcast, ifaceInfo.Duplex, ifaceInfo.OperState, getAdminState(ifaceInfo.Flags), ifaceInfo.IfAlias) pushMetric(ch, c.getFieldDesc("address_assign_type"), "address_assign_type", ifaceInfo.AddrAssignType, prometheus.GaugeValue, ifaceInfo.Name) pushMetric(ch, c.getFieldDesc("carrier"), "carrier", ifaceInfo.Carrier, prometheus.GaugeValue, ifaceInfo.Name) pushMetric(ch, c.getFieldDesc("carrier_changes_total"), "carrier_changes_total", ifaceInfo.CarrierChanges, prometheus.CounterValue, ifaceInfo.Name) pushMetric(ch, c.getFieldDesc("carrier_up_changes_total"), "carrier_up_changes_total", ifaceInfo.CarrierUpCount, prometheus.CounterValue, ifaceInfo.Name) pushMetric(ch, c.getFieldDesc("carrier_down_changes_total"), "carrier_down_changes_total", ifaceInfo.CarrierDownCount, prometheus.CounterValue, ifaceInfo.Name) pushMetric(ch, c.getFieldDesc("device_id"), "device_id", ifaceInfo.DevID, prometheus.GaugeValue, ifaceInfo.Name) pushMetric(ch, c.getFieldDesc("dormant"), "dormant", ifaceInfo.Dormant, prometheus.GaugeValue, ifaceInfo.Name) pushMetric(ch, c.getFieldDesc("flags"), "flags", ifaceInfo.Flags, prometheus.GaugeValue, ifaceInfo.Name) pushMetric(ch, c.getFieldDesc("iface_id"), "iface_id", ifaceInfo.IfIndex, prometheus.GaugeValue, ifaceInfo.Name) pushMetric(ch, c.getFieldDesc("iface_link"), "iface_link", ifaceInfo.IfLink, prometheus.GaugeValue, ifaceInfo.Name) pushMetric(ch, c.getFieldDesc("iface_link_mode"), "iface_link_mode", ifaceInfo.LinkMode, prometheus.GaugeValue, ifaceInfo.Name) pushMetric(ch, c.getFieldDesc("mtu_bytes"), "mtu_bytes", ifaceInfo.MTU, prometheus.GaugeValue, ifaceInfo.Name) pushMetric(ch, c.getFieldDesc("name_assign_type"), "name_assign_type", ifaceInfo.NameAssignType, prometheus.GaugeValue, ifaceInfo.Name) pushMetric(ch, c.getFieldDesc("net_dev_group"), "net_dev_group", ifaceInfo.NetDevGroup, prometheus.GaugeValue, ifaceInfo.Name) if ifaceInfo.Speed != nil { // Some devices return -1 if the speed is unknown. if *ifaceInfo.Speed >= 0 || !*netclassInvalidSpeed { speedBytes := int64(*ifaceInfo.Speed * 1000 * 1000 / 8) pushMetric(ch, c.getFieldDesc("speed_bytes"), "speed_bytes", speedBytes, prometheus.GaugeValue, ifaceInfo.Name) } } pushMetric(ch, c.getFieldDesc("transmit_queue_length"), "transmit_queue_length", ifaceInfo.TxQueueLen, prometheus.GaugeValue, ifaceInfo.Name) pushMetric(ch, c.getFieldDesc("protocol_type"), "protocol_type", ifaceInfo.Type, prometheus.GaugeValue, ifaceInfo.Name) } return nil } func (c *netClassCollector) getFieldDesc(name string) *prometheus.Desc { fieldDesc, exists := c.metricDescs[name] if !exists { fieldDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, c.subsystem, name), fmt.Sprintf("Network device property: %s", name), []string{"device"}, nil, ) c.metricDescs[name] = fieldDesc } return fieldDesc } func (c *netClassCollector) getNetClassInfo() (sysfs.NetClass, error) { netClass := sysfs.NetClass{} netDevices, err := c.fs.NetClassDevices() if err != nil { return netClass, err } for _, device := range netDevices { if c.ignoredDevicesPattern.MatchString(device) { continue } interfaceClass, err := c.fs.NetClassByIface(device) if err != nil { return netClass, err } netClass[device] = *interfaceClass } return netClass, nil } func getAdminState(flags *int64) string { if flags == nil { return "unknown" } if *flags&int64(net.FlagUp) == 1 { return "up" } return "down" } node_exporter-1.7.0/collector/netclass_rtnl_linux.go000066400000000000000000000274531452426057600230310ustar00rootroot00000000000000// Copyright 2022 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nonetclass && linux // +build !nonetclass,linux package collector import ( "errors" "fmt" "io/fs" "path/filepath" "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log/level" "github.com/jsimonetti/rtnetlink" "github.com/mdlayher/ethtool" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs/sysfs" ) var ( netclassRTNLWithStats = kingpin.Flag("collector.netclass_rtnl.with-stats", "Expose the statistics for each network device, replacing netdev collector.").Bool() operstateStr = []string{ "unknown", "notpresent", "down", "lowerlayerdown", "testing", "dormant", "up", } ) func (c *netClassCollector) netClassRTNLUpdate(ch chan<- prometheus.Metric) error { linkModes := make(map[string]*ethtool.LinkMode) lms, err := c.getLinkModes() if err != nil { if !errors.Is(errors.Unwrap(err), fs.ErrNotExist) { return fmt.Errorf("could not get link modes: %w", err) } level.Info(c.logger).Log("msg", "ETHTOOL netlink interface unavailable, duplex and linkspeed are not scraped.") } else { for _, lm := range lms { if c.ignoredDevicesPattern.MatchString(lm.Interface.Name) { continue } if lm.SpeedMegabits >= 0 { speedBytes := uint64(lm.SpeedMegabits * 1000 * 1000 / 8) pushMetric(ch, c.getFieldDesc("speed_bytes"), "speed_bytes", speedBytes, prometheus.GaugeValue, lm.Interface.Name) } linkModes[lm.Interface.Name] = lm } } // Get most attributes from Netlink lMsgs, err := c.getNetClassInfoRTNL() if err != nil { return fmt.Errorf("could not get net class info: %w", err) } relevantLinks := make([]rtnetlink.LinkMessage, 0, len(lMsgs)) for _, msg := range lMsgs { if !c.ignoredDevicesPattern.MatchString(msg.Attributes.Name) { relevantLinks = append(relevantLinks, msg) } } // Read sysfs for attributes that Netlink doesn't expose sysfsAttrs, err := getSysfsAttributes(relevantLinks) if err != nil { return fmt.Errorf("could not get sysfs device info: %w", err) } // Parse all the info and update metrics for _, msg := range relevantLinks { upDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, c.subsystem, "up"), "Value is 1 if operstate is 'up', 0 otherwise.", []string{"device"}, nil, ) upValue := 0.0 if msg.Attributes.OperationalState == rtnetlink.OperStateUp { upValue = 1.0 } ch <- prometheus.MustNewConstMetric(upDesc, prometheus.GaugeValue, upValue, msg.Attributes.Name) infoDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, c.subsystem, "info"), "Non-numeric data of , value is always 1.", []string{"device", "address", "broadcast", "duplex", "operstate", "ifalias"}, nil, ) infoValue := 1.0 var ifalias = "" if msg.Attributes.Alias != nil { ifalias = *msg.Attributes.Alias } duplex := "" lm, lmExists := linkModes[msg.Attributes.Name] if lmExists { duplex = lm.Duplex.String() } ifaceInfo := sysfsAttrs[msg.Attributes.Name] ch <- prometheus.MustNewConstMetric(infoDesc, prometheus.GaugeValue, infoValue, msg.Attributes.Name, msg.Attributes.Address.String(), msg.Attributes.Broadcast.String(), duplex, operstateStr[int(msg.Attributes.OperationalState)], ifalias) pushMetric(ch, c.getFieldDesc("address_assign_type"), "address_assign_type", ifaceInfo.AddrAssignType, prometheus.GaugeValue, msg.Attributes.Name) pushMetric(ch, c.getFieldDesc("carrier"), "carrier", msg.Attributes.Carrier, prometheus.GaugeValue, msg.Attributes.Name) pushMetric(ch, c.getFieldDesc("carrier_changes_total"), "carrier_changes_total", msg.Attributes.CarrierChanges, prometheus.CounterValue, msg.Attributes.Name) pushMetric(ch, c.getFieldDesc("carrier_up_changes_total"), "carrier_up_changes_total", msg.Attributes.CarrierUpCount, prometheus.CounterValue, msg.Attributes.Name) pushMetric(ch, c.getFieldDesc("carrier_down_changes_total"), "carrier_down_changes_total", msg.Attributes.CarrierDownCount, prometheus.CounterValue, msg.Attributes.Name) pushMetric(ch, c.getFieldDesc("device_id"), "device_id", ifaceInfo.DevID, prometheus.GaugeValue, msg.Attributes.Name) pushMetric(ch, c.getFieldDesc("flags"), "flags", msg.Flags, prometheus.GaugeValue, msg.Attributes.Name) pushMetric(ch, c.getFieldDesc("iface_id"), "iface_id", msg.Index, prometheus.GaugeValue, msg.Attributes.Name) pushMetric(ch, c.getFieldDesc("iface_link_mode"), "iface_link_mode", msg.Attributes.LinkMode, prometheus.GaugeValue, msg.Attributes.Name) pushMetric(ch, c.getFieldDesc("dormant"), "dormant", msg.Attributes.LinkMode, prometheus.GaugeValue, msg.Attributes.Name) // kernel logic: IFLA_LINK attribute will be ignore when ifindex is the same as iflink // (dev->ifindex != dev_get_iflink(dev) && nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))) // As interface ID is never 0, we assume msg.Attributes.Type 0 means iflink is omitted in RTM_GETLINK response. if msg.Attributes.Type > 0 { pushMetric(ch, c.getFieldDesc("iface_link"), "iface_link", msg.Attributes.Type, prometheus.GaugeValue, msg.Attributes.Name) } else { pushMetric(ch, c.getFieldDesc("iface_link"), "iface_link", msg.Index, prometheus.GaugeValue, msg.Attributes.Name) } pushMetric(ch, c.getFieldDesc("mtu_bytes"), "mtu_bytes", msg.Attributes.MTU, prometheus.GaugeValue, msg.Attributes.Name) pushMetric(ch, c.getFieldDesc("name_assign_type"), "name_assign_type", ifaceInfo.NameAssignType, prometheus.GaugeValue, msg.Attributes.Name) pushMetric(ch, c.getFieldDesc("net_dev_group"), "net_dev_group", msg.Attributes.NetDevGroup, prometheus.GaugeValue, msg.Attributes.Name) pushMetric(ch, c.getFieldDesc("transmit_queue_length"), "transmit_queue_length", msg.Attributes.TxQueueLen, prometheus.GaugeValue, msg.Attributes.Name) pushMetric(ch, c.getFieldDesc("protocol_type"), "protocol_type", msg.Type, prometheus.GaugeValue, msg.Attributes.Name) // Skip statistics if argument collector.netclass_rtnl.with-stats is false or statistics are unavailable. if netclassRTNLWithStats == nil || !*netclassRTNLWithStats || msg.Attributes.Stats64 == nil { continue } pushMetric(ch, c.getFieldDesc("receive_packets_total"), "receive_packets_total", msg.Attributes.Stats64.RXPackets, prometheus.GaugeValue, msg.Attributes.Name) pushMetric(ch, c.getFieldDesc("transmit_packets_total"), "transmit_packets_total", msg.Attributes.Stats64.TXPackets, prometheus.GaugeValue, msg.Attributes.Name) pushMetric(ch, c.getFieldDesc("receive_bytes_total"), "receive_bytes_total", msg.Attributes.Stats64.RXBytes, prometheus.GaugeValue, msg.Attributes.Name) pushMetric(ch, c.getFieldDesc("transmit_bytes_total"), "transmit_bytes_total", msg.Attributes.Stats64.TXBytes, prometheus.GaugeValue, msg.Attributes.Name) pushMetric(ch, c.getFieldDesc("receive_errors_total"), "receive_errors_total", msg.Attributes.Stats64.RXErrors, prometheus.GaugeValue, msg.Attributes.Name) pushMetric(ch, c.getFieldDesc("transmit_errors_total"), "transmit_errors_total", msg.Attributes.Stats64.TXErrors, prometheus.GaugeValue, msg.Attributes.Name) pushMetric(ch, c.getFieldDesc("receive_dropped_total"), "receive_dropped_total", msg.Attributes.Stats64.RXDropped, prometheus.GaugeValue, msg.Attributes.Name) pushMetric(ch, c.getFieldDesc("transmit_dropped_total"), "transmit_dropped_total", msg.Attributes.Stats64.TXDropped, prometheus.GaugeValue, msg.Attributes.Name) pushMetric(ch, c.getFieldDesc("multicast_total"), "multicast_total", msg.Attributes.Stats64.Multicast, prometheus.GaugeValue, msg.Attributes.Name) pushMetric(ch, c.getFieldDesc("collisions_total"), "collisions_total", msg.Attributes.Stats64.Collisions, prometheus.GaugeValue, msg.Attributes.Name) // Detailed rx_errors. pushMetric(ch, c.getFieldDesc("receive_length_errors_total"), "receive_length_errors_total", msg.Attributes.Stats64.RXLengthErrors, prometheus.GaugeValue, msg.Attributes.Name) pushMetric(ch, c.getFieldDesc("receive_over_errors_total"), "receive_over_errors_total", msg.Attributes.Stats64.RXOverErrors, prometheus.GaugeValue, msg.Attributes.Name) pushMetric(ch, c.getFieldDesc("receive_crc_errors_total"), "receive_crc_errors_total", msg.Attributes.Stats64.RXCRCErrors, prometheus.GaugeValue, msg.Attributes.Name) pushMetric(ch, c.getFieldDesc("receive_frame_errors_total"), "receive_frame_errors_total", msg.Attributes.Stats64.RXFrameErrors, prometheus.GaugeValue, msg.Attributes.Name) pushMetric(ch, c.getFieldDesc("receive_fifo_errors_total"), "receive_fifo_errors_total", msg.Attributes.Stats64.RXFIFOErrors, prometheus.GaugeValue, msg.Attributes.Name) pushMetric(ch, c.getFieldDesc("receive_missed_errors_total"), "receive_missed_errors_total", msg.Attributes.Stats64.RXMissedErrors, prometheus.GaugeValue, msg.Attributes.Name) // Detailed tx_errors. pushMetric(ch, c.getFieldDesc("transmit_aborted_errors_total"), "transmit_aborted_errors_total", msg.Attributes.Stats64.TXAbortedErrors, prometheus.GaugeValue, msg.Attributes.Name) pushMetric(ch, c.getFieldDesc("transmit_carrier_errors_total"), "transmit_carrier_errors_total", msg.Attributes.Stats64.TXCarrierErrors, prometheus.GaugeValue, msg.Attributes.Name) pushMetric(ch, c.getFieldDesc("transmit_fifo_errors_total"), "transmit_fifo_errors_total", msg.Attributes.Stats64.TXFIFOErrors, prometheus.GaugeValue, msg.Attributes.Name) pushMetric(ch, c.getFieldDesc("transmit_heartbeat_errors_total"), "transmit_heartbeat_errors_total", msg.Attributes.Stats64.TXHeartbeatErrors, prometheus.GaugeValue, msg.Attributes.Name) pushMetric(ch, c.getFieldDesc("transmit_window_errors_total"), "transmit_window_errors_total", msg.Attributes.Stats64.TXWindowErrors, prometheus.GaugeValue, msg.Attributes.Name) // For cslip, etc. pushMetric(ch, c.getFieldDesc("receive_compressed_total"), "receive_compressed_total", msg.Attributes.Stats64.RXCompressed, prometheus.GaugeValue, msg.Attributes.Name) pushMetric(ch, c.getFieldDesc("transmit_compressed_total"), "transmit_compressed_total", msg.Attributes.Stats64.TXCompressed, prometheus.GaugeValue, msg.Attributes.Name) pushMetric(ch, c.getFieldDesc("receive_nohandler_total"), "receive_nohandler_total", msg.Attributes.Stats64.RXNoHandler, prometheus.GaugeValue, msg.Attributes.Name) } return nil } func (c *netClassCollector) getNetClassInfoRTNL() ([]rtnetlink.LinkMessage, error) { conn, err := rtnetlink.Dial(nil) if err != nil { return nil, err } defer conn.Close() lMsgs, err := conn.Link.List() return lMsgs, err } func (c *netClassCollector) getLinkModes() ([]*ethtool.LinkMode, error) { conn, err := ethtool.New() if err != nil { return nil, err } defer conn.Close() lms, err := conn.LinkModes() return lms, err } // getSysfsAttributes reads attributes that are absent from netlink but provided // by sysfs. func getSysfsAttributes(links []rtnetlink.LinkMessage) (sysfs.NetClass, error) { netClass := sysfs.NetClass{} for _, msg := range links { interfaceClass := sysfs.NetClassIface{} ifName := msg.Attributes.Name devPath := filepath.Join("/sys", "class", "net", ifName) // These three attributes hold a device-specific lock when // accessed, not the RTNL lock, so they are much less impactful // than reading most of the other attributes from sysfs. for _, attr := range []string{"addr_assign_type", "dev_id", "name_assign_type"} { if err := sysfs.ParseNetClassAttribute(devPath, attr, &interfaceClass); err != nil { return nil, err } } netClass[ifName] = interfaceClass } return netClass, nil } node_exporter-1.7.0/collector/netdev_bsd.go000066400000000000000000000040261452426057600210430ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nonetdev && (freebsd || dragonfly) // +build !nonetdev // +build freebsd dragonfly package collector import ( "errors" "github.com/go-kit/log" "github.com/go-kit/log/level" ) /* #cgo CFLAGS: -D_IFI_OQDROPS #include #include #include #include #include */ import "C" func getNetDevStats(filter *deviceFilter, logger log.Logger) (netDevStats, error) { netDev := netDevStats{} var ifap, ifa *C.struct_ifaddrs if C.getifaddrs(&ifap) == -1 { return nil, errors.New("getifaddrs() failed") } defer C.freeifaddrs(ifap) for ifa = ifap; ifa != nil; ifa = ifa.ifa_next { if ifa.ifa_addr.sa_family != C.AF_LINK { continue } dev := C.GoString(ifa.ifa_name) if filter.ignored(dev) { level.Debug(logger).Log("msg", "Ignoring device", "device", dev) continue } data := (*C.struct_if_data)(ifa.ifa_data) netDev[dev] = map[string]uint64{ "receive_packets": uint64(data.ifi_ipackets), "transmit_packets": uint64(data.ifi_opackets), "receive_bytes": uint64(data.ifi_ibytes), "transmit_bytes": uint64(data.ifi_obytes), "receive_errors": uint64(data.ifi_ierrors), "transmit_errors": uint64(data.ifi_oerrors), "receive_dropped": uint64(data.ifi_iqdrops), "transmit_dropped": uint64(data.ifi_oqdrops), "receive_multicast": uint64(data.ifi_imcasts), "transmit_multicast": uint64(data.ifi_omcasts), } } return netDev, nil } node_exporter-1.7.0/collector/netdev_common.go000066400000000000000000000172301452426057600215640ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nonetdev && (linux || freebsd || openbsd || dragonfly || darwin) // +build !nonetdev // +build linux freebsd openbsd dragonfly darwin package collector import ( "errors" "fmt" "net" "strconv" "sync" "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" ) var ( netdevDeviceInclude = kingpin.Flag("collector.netdev.device-include", "Regexp of net devices to include (mutually exclusive to device-exclude).").String() oldNetdevDeviceInclude = kingpin.Flag("collector.netdev.device-whitelist", "DEPRECATED: Use collector.netdev.device-include").Hidden().String() netdevDeviceExclude = kingpin.Flag("collector.netdev.device-exclude", "Regexp of net devices to exclude (mutually exclusive to device-include).").String() oldNetdevDeviceExclude = kingpin.Flag("collector.netdev.device-blacklist", "DEPRECATED: Use collector.netdev.device-exclude").Hidden().String() netdevAddressInfo = kingpin.Flag("collector.netdev.address-info", "Collect address-info for every device").Bool() netdevDetailedMetrics = kingpin.Flag("collector.netdev.enable-detailed-metrics", "Use (incompatible) metric names that provide more detailed stats on Linux").Bool() ) type netDevCollector struct { subsystem string deviceFilter deviceFilter metricDescsMutex sync.Mutex metricDescs map[string]*prometheus.Desc logger log.Logger } type netDevStats map[string]map[string]uint64 func init() { registerCollector("netdev", defaultEnabled, NewNetDevCollector) } // NewNetDevCollector returns a new Collector exposing network device stats. func NewNetDevCollector(logger log.Logger) (Collector, error) { if *oldNetdevDeviceInclude != "" { if *netdevDeviceInclude == "" { level.Warn(logger).Log("msg", "--collector.netdev.device-whitelist is DEPRECATED and will be removed in 2.0.0, use --collector.netdev.device-include") *netdevDeviceInclude = *oldNetdevDeviceInclude } else { return nil, errors.New("--collector.netdev.device-whitelist and --collector.netdev.device-include are mutually exclusive") } } if *oldNetdevDeviceExclude != "" { if *netdevDeviceExclude == "" { level.Warn(logger).Log("msg", "--collector.netdev.device-blacklist is DEPRECATED and will be removed in 2.0.0, use --collector.netdev.device-exclude") *netdevDeviceExclude = *oldNetdevDeviceExclude } else { return nil, errors.New("--collector.netdev.device-blacklist and --collector.netdev.device-exclude are mutually exclusive") } } if *netdevDeviceExclude != "" && *netdevDeviceInclude != "" { return nil, errors.New("device-exclude & device-include are mutually exclusive") } if *netdevDeviceExclude != "" { level.Info(logger).Log("msg", "Parsed flag --collector.netdev.device-exclude", "flag", *netdevDeviceExclude) } if *netdevDeviceInclude != "" { level.Info(logger).Log("msg", "Parsed Flag --collector.netdev.device-include", "flag", *netdevDeviceInclude) } return &netDevCollector{ subsystem: "network", deviceFilter: newDeviceFilter(*netdevDeviceExclude, *netdevDeviceInclude), metricDescs: map[string]*prometheus.Desc{}, logger: logger, }, nil } func (c *netDevCollector) metricDesc(key string) *prometheus.Desc { c.metricDescsMutex.Lock() defer c.metricDescsMutex.Unlock() if _, ok := c.metricDescs[key]; !ok { c.metricDescs[key] = prometheus.NewDesc( prometheus.BuildFQName(namespace, c.subsystem, key+"_total"), fmt.Sprintf("Network device statistic %s.", key), []string{"device"}, nil, ) } return c.metricDescs[key] } func (c *netDevCollector) Update(ch chan<- prometheus.Metric) error { netDev, err := getNetDevStats(&c.deviceFilter, c.logger) if err != nil { return fmt.Errorf("couldn't get netstats: %w", err) } for dev, devStats := range netDev { if !*netdevDetailedMetrics { legacy(devStats) } for key, value := range devStats { desc := c.metricDesc(key) ch <- prometheus.MustNewConstMetric(desc, prometheus.CounterValue, float64(value), dev) } } if *netdevAddressInfo { interfaces, err := net.Interfaces() if err != nil { return fmt.Errorf("could not get network interfaces: %w", err) } desc := prometheus.NewDesc(prometheus.BuildFQName(namespace, "network_address", "info"), "node network address by device", []string{"device", "address", "netmask", "scope"}, nil) for _, addr := range getAddrsInfo(interfaces) { ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, 1, addr.device, addr.addr, addr.netmask, addr.scope) } } return nil } type addrInfo struct { device string addr string scope string netmask string } func scope(ip net.IP) string { if ip.IsLoopback() || ip.IsLinkLocalUnicast() || ip.IsLinkLocalMulticast() { return "link-local" } if ip.IsInterfaceLocalMulticast() { return "interface-local" } if ip.IsGlobalUnicast() { return "global" } return "" } // getAddrsInfo returns interface name, address, scope and netmask for all interfaces. func getAddrsInfo(interfaces []net.Interface) []addrInfo { var res []addrInfo for _, ifs := range interfaces { addrs, _ := ifs.Addrs() for _, addr := range addrs { ip, ipNet, err := net.ParseCIDR(addr.String()) if err != nil { continue } size, _ := ipNet.Mask.Size() res = append(res, addrInfo{ device: ifs.Name, addr: ip.String(), scope: scope(ip), netmask: strconv.Itoa(size), }) } } return res } // https://github.com/torvalds/linux/blob/master/net/core/net-procfs.c#L75-L97 func legacy(metrics map[string]uint64) { if metric, ok := pop(metrics, "receive_errors"); ok { metrics["receive_errs"] = metric } if metric, ok := pop(metrics, "receive_dropped"); ok { metrics["receive_drop"] = metric + popz(metrics, "receive_missed_errors") } if metric, ok := pop(metrics, "receive_fifo_errors"); ok { metrics["receive_fifo"] = metric } if metric, ok := pop(metrics, "receive_frame_errors"); ok { metrics["receive_frame"] = metric + popz(metrics, "receive_length_errors") + popz(metrics, "receive_over_errors") + popz(metrics, "receive_crc_errors") } if metric, ok := pop(metrics, "multicast"); ok { metrics["receive_multicast"] = metric } if metric, ok := pop(metrics, "transmit_errors"); ok { metrics["transmit_errs"] = metric } if metric, ok := pop(metrics, "transmit_dropped"); ok { metrics["transmit_drop"] = metric } if metric, ok := pop(metrics, "transmit_fifo_errors"); ok { metrics["transmit_fifo"] = metric } if metric, ok := pop(metrics, "multicast"); ok { metrics["receive_multicast"] = metric } if metric, ok := pop(metrics, "collisions"); ok { metrics["transmit_colls"] = metric } if metric, ok := pop(metrics, "transmit_carrier_errors"); ok { metrics["transmit_carrier"] = metric + popz(metrics, "transmit_aborted_errors") + popz(metrics, "transmit_heartbeat_errors") + popz(metrics, "transmit_window_errors") } } func pop(m map[string]uint64, key string) (uint64, bool) { value, ok := m[key] delete(m, key) return value, ok } func popz(m map[string]uint64, key string) uint64 { if value, ok := m[key]; ok { delete(m, key) return value } return 0 } node_exporter-1.7.0/collector/netdev_darwin.go000066400000000000000000000060021452426057600215530ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nonetdev // +build !nonetdev package collector import ( "bytes" "encoding/binary" "fmt" "net" "github.com/go-kit/log" "github.com/go-kit/log/level" "golang.org/x/sys/unix" ) func getNetDevStats(filter *deviceFilter, logger log.Logger) (netDevStats, error) { netDev := netDevStats{} ifs, err := net.Interfaces() if err != nil { return nil, fmt.Errorf("net.Interfaces() failed: %w", err) } for _, iface := range ifs { if filter.ignored(iface.Name) { level.Debug(logger).Log("msg", "Ignoring device", "device", iface.Name) continue } ifaceData, err := getIfaceData(iface.Index) if err != nil { level.Debug(logger).Log("msg", "failed to load data for interface", "device", iface.Name, "err", err) continue } netDev[iface.Name] = map[string]uint64{ "receive_packets": ifaceData.Data.Ipackets, "transmit_packets": ifaceData.Data.Opackets, "receive_bytes": ifaceData.Data.Ibytes, "transmit_bytes": ifaceData.Data.Obytes, "receive_errors": ifaceData.Data.Ierrors, "transmit_errors": ifaceData.Data.Oerrors, "receive_dropped": ifaceData.Data.Iqdrops, "receive_multicast": ifaceData.Data.Imcasts, "transmit_multicast": ifaceData.Data.Omcasts, "collisions": ifaceData.Data.Collisions, "noproto": ifaceData.Data.Noproto, } } return netDev, nil } func getIfaceData(index int) (*ifMsghdr2, error) { var data ifMsghdr2 rawData, err := unix.SysctlRaw("net", unix.AF_ROUTE, 0, 0, unix.NET_RT_IFLIST2, index) if err != nil { return nil, err } err = binary.Read(bytes.NewReader(rawData), binary.LittleEndian, &data) return &data, err } type ifMsghdr2 struct { Msglen uint16 Version uint8 Type uint8 Addrs int32 Flags int32 Index uint16 _ [2]byte SndLen int32 SndMaxlen int32 SndDrops int32 Timer int32 Data ifData64 } // https://github.com/apple/darwin-xnu/blob/main/bsd/net/if_var.h#L199-L231 type ifData64 struct { Type uint8 Typelen uint8 Physical uint8 Addrlen uint8 Hdrlen uint8 Recvquota uint8 Xmitquota uint8 Unused1 uint8 Mtu uint32 Metric uint32 Baudrate uint64 Ipackets uint64 Ierrors uint64 Opackets uint64 Oerrors uint64 Collisions uint64 Ibytes uint64 Obytes uint64 Imcasts uint64 Omcasts uint64 Iqdrops uint64 Noproto uint64 Recvtiming uint32 Xmittiming uint32 Lastchange unix.Timeval32 } node_exporter-1.7.0/collector/netdev_linux.go000066400000000000000000000137451452426057600214420ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nonetdev // +build !nonetdev package collector import ( "fmt" "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/jsimonetti/rtnetlink" "github.com/prometheus/procfs" ) var ( netDevNetlink = kingpin.Flag("collector.netdev.netlink", "Use netlink to gather stats instead of /proc/net/dev.").Default("true").Bool() ) func getNetDevStats(filter *deviceFilter, logger log.Logger) (netDevStats, error) { if *netDevNetlink { return netlinkStats(filter, logger) } return procNetDevStats(filter, logger) } func netlinkStats(filter *deviceFilter, logger log.Logger) (netDevStats, error) { conn, err := rtnetlink.Dial(nil) if err != nil { return nil, err } defer conn.Close() links, err := conn.Link.List() if err != nil { return nil, err } return parseNetlinkStats(links, filter, logger), nil } func parseNetlinkStats(links []rtnetlink.LinkMessage, filter *deviceFilter, logger log.Logger) netDevStats { metrics := netDevStats{} for _, msg := range links { if msg.Attributes == nil { level.Debug(logger).Log("msg", "No netlink attributes, skipping") continue } name := msg.Attributes.Name stats := msg.Attributes.Stats64 if stats32 := msg.Attributes.Stats; stats == nil && stats32 != nil { stats = &rtnetlink.LinkStats64{ RXPackets: uint64(stats32.RXPackets), TXPackets: uint64(stats32.TXPackets), RXBytes: uint64(stats32.RXBytes), TXBytes: uint64(stats32.TXBytes), RXErrors: uint64(stats32.RXErrors), TXErrors: uint64(stats32.TXErrors), RXDropped: uint64(stats32.RXDropped), TXDropped: uint64(stats32.TXDropped), Multicast: uint64(stats32.Multicast), Collisions: uint64(stats32.Collisions), RXLengthErrors: uint64(stats32.RXLengthErrors), RXOverErrors: uint64(stats32.RXOverErrors), RXCRCErrors: uint64(stats32.RXCRCErrors), RXFrameErrors: uint64(stats32.RXFrameErrors), RXFIFOErrors: uint64(stats32.RXFIFOErrors), RXMissedErrors: uint64(stats32.RXMissedErrors), TXAbortedErrors: uint64(stats32.TXAbortedErrors), TXCarrierErrors: uint64(stats32.TXCarrierErrors), TXFIFOErrors: uint64(stats32.TXFIFOErrors), TXHeartbeatErrors: uint64(stats32.TXHeartbeatErrors), TXWindowErrors: uint64(stats32.TXWindowErrors), RXCompressed: uint64(stats32.RXCompressed), TXCompressed: uint64(stats32.TXCompressed), RXNoHandler: uint64(stats32.RXNoHandler), RXOtherhostDropped: 0, } } if filter.ignored(name) { level.Debug(logger).Log("msg", "Ignoring device", "device", name) continue } // Make sure we don't panic when accessing `stats` attributes below. if stats == nil { level.Debug(logger).Log("msg", "No netlink stats, skipping") continue } // https://github.com/torvalds/linux/blob/master/include/uapi/linux/if_link.h#L42-L246 metrics[name] = map[string]uint64{ "receive_packets": stats.RXPackets, "transmit_packets": stats.TXPackets, "receive_bytes": stats.RXBytes, "transmit_bytes": stats.TXBytes, "receive_errors": stats.RXErrors, "transmit_errors": stats.TXErrors, "receive_dropped": stats.RXDropped, "transmit_dropped": stats.TXDropped, "multicast": stats.Multicast, "collisions": stats.Collisions, // detailed rx_errors "receive_length_errors": stats.RXLengthErrors, "receive_over_errors": stats.RXOverErrors, "receive_crc_errors": stats.RXCRCErrors, "receive_frame_errors": stats.RXFrameErrors, "receive_fifo_errors": stats.RXFIFOErrors, "receive_missed_errors": stats.RXMissedErrors, // detailed tx_errors "transmit_aborted_errors": stats.TXAbortedErrors, "transmit_carrier_errors": stats.TXCarrierErrors, "transmit_fifo_errors": stats.TXFIFOErrors, "transmit_heartbeat_errors": stats.TXHeartbeatErrors, "transmit_window_errors": stats.TXWindowErrors, // for cslip etc "receive_compressed": stats.RXCompressed, "transmit_compressed": stats.TXCompressed, "receive_nohandler": stats.RXNoHandler, } } return metrics } func procNetDevStats(filter *deviceFilter, logger log.Logger) (netDevStats, error) { metrics := netDevStats{} fs, err := procfs.NewFS(*procPath) if err != nil { return metrics, fmt.Errorf("failed to open procfs: %w", err) } netDev, err := fs.NetDev() if err != nil { return metrics, fmt.Errorf("failed to parse /proc/net/dev: %w", err) } for _, stats := range netDev { name := stats.Name if filter.ignored(name) { level.Debug(logger).Log("msg", "Ignoring device", "device", name) continue } metrics[name] = map[string]uint64{ "receive_bytes": stats.RxBytes, "receive_packets": stats.RxPackets, "receive_errors": stats.RxErrors, "receive_dropped": stats.RxDropped, "receive_fifo": stats.RxFIFO, "receive_frame": stats.RxFrame, "receive_compressed": stats.RxCompressed, "receive_multicast": stats.RxMulticast, "transmit_bytes": stats.TxBytes, "transmit_packets": stats.TxPackets, "transmit_errors": stats.TxErrors, "transmit_dropped": stats.TxDropped, "transmit_fifo": stats.TxFIFO, "transmit_colls": stats.TxCollisions, "transmit_carrier": stats.TxCarrier, "transmit_compressed": stats.TxCompressed, } } return metrics, nil } node_exporter-1.7.0/collector/netdev_linux_test.go000066400000000000000000000210141452426057600224650ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nonetdev // +build !nonetdev package collector import ( "testing" "github.com/go-kit/log" "github.com/jsimonetti/rtnetlink" ) var links = []rtnetlink.LinkMessage{ { Attributes: &rtnetlink.LinkAttributes{ Name: "tun0", Stats64: &rtnetlink.LinkStats64{ RXPackets: 24, TXPackets: 934, RXBytes: 1888, TXBytes: 67120, }, }, }, { Attributes: &rtnetlink.LinkAttributes{ Name: "veth4B09XN", Stats64: &rtnetlink.LinkStats64{ RXPackets: 8, TXPackets: 10640, RXBytes: 648, TXBytes: 1943284, }, }, }, { Attributes: &rtnetlink.LinkAttributes{ Name: "lo", Stats64: &rtnetlink.LinkStats64{ RXPackets: 1832522, TXPackets: 1832522, RXBytes: 435303245, TXBytes: 435303245, }, }, }, { Attributes: &rtnetlink.LinkAttributes{ Name: "eth0", Stats64: &rtnetlink.LinkStats64{ RXPackets: 520993275, TXPackets: 43451486, RXBytes: 68210035552, TXBytes: 9315587528, }, }, }, { Attributes: &rtnetlink.LinkAttributes{ Name: "lxcbr0", Stats64: &rtnetlink.LinkStats64{ TXPackets: 28339, TXBytes: 2630299, }, }, }, { Attributes: &rtnetlink.LinkAttributes{ Name: "wlan0", Stats64: &rtnetlink.LinkStats64{ RXPackets: 13899359, TXPackets: 11726200, RXBytes: 10437182923, TXBytes: 2851649360, }, }, }, { Attributes: &rtnetlink.LinkAttributes{ Name: "docker0", Stats64: &rtnetlink.LinkStats64{ RXPackets: 1065585, TXPackets: 1929779, RXBytes: 64910168, TXBytes: 2681662018, }, }, }, { Attributes: &rtnetlink.LinkAttributes{ Name: "ibr10:30", Stats64: &rtnetlink.LinkStats64{}, }, }, { Attributes: &rtnetlink.LinkAttributes{ Name: "flannel.1", Stats64: &rtnetlink.LinkStats64{ RXPackets: 228499337, TXPackets: 258369223, RXBytes: 18144009813, TXBytes: 20758990068, TXDropped: 64, }, }, }, { Attributes: &rtnetlink.LinkAttributes{ Name: "💩0", Stats64: &rtnetlink.LinkStats64{ RXPackets: 105557, TXPackets: 304261, RXBytes: 57750104, TXBytes: 404570255, Multicast: 72, }, }, }, { Attributes: &rtnetlink.LinkAttributes{ Name: "enp0s0f0", Stats64: &rtnetlink.LinkStats64{ RXPackets: 226, TXPackets: 803, RXBytes: 231424, TXBytes: 822272, RXErrors: 14, TXErrors: 2, RXDropped: 10, TXDropped: 17, Multicast: 285, Collisions: 30, RXLengthErrors: 5, RXOverErrors: 3, RXCRCErrors: 1, RXFrameErrors: 4, RXFIFOErrors: 6, RXMissedErrors: 21, TXAbortedErrors: 22, TXCarrierErrors: 7, TXFIFOErrors: 24, TXHeartbeatErrors: 9, TXWindowErrors: 19, RXCompressed: 23, TXCompressed: 20, RXNoHandler: 62, }, }, }, } func TestNetDevStatsIgnore(t *testing.T) { filter := newDeviceFilter("^veth", "") netStats := parseNetlinkStats(links, &filter, log.NewNopLogger()) if want, got := uint64(10437182923), netStats["wlan0"]["receive_bytes"]; want != got { t.Errorf("want netstat wlan0 bytes %v, got %v", want, got) } if want, got := uint64(68210035552), netStats["eth0"]["receive_bytes"]; want != got { t.Errorf("want netstat eth0 bytes %v, got %v", want, got) } if want, got := uint64(934), netStats["tun0"]["transmit_packets"]; want != got { t.Errorf("want netstat tun0 packets %v, got %v", want, got) } if want, got := 10, len(netStats); want != got { t.Errorf("want count of devices to be %d, got %d", want, got) } if _, ok := netStats["veth4B09XN"]["transmit_bytes"]; ok { t.Error("want fixture interface veth4B09XN to not exist, but it does") } if want, got := uint64(0), netStats["ibr10:30"]["receive_fifo"]; want != got { t.Error("want fixture interface ibr10:30 to exist, but it does not") } if want, got := uint64(72), netStats["💩0"]["multicast"]; want != got { t.Error("want fixture interface 💩0 to exist, but it does not") } } func TestNetDevStatsAccept(t *testing.T) { filter := newDeviceFilter("", "^💩0$") netStats := parseNetlinkStats(links, &filter, log.NewNopLogger()) if want, got := 1, len(netStats); want != got { t.Errorf("want count of devices to be %d, got %d", want, got) } if want, got := uint64(72), netStats["💩0"]["multicast"]; want != got { t.Error("want fixture interface 💩0 to exist, but it does not") } } func TestNetDevLegacyMetricNames(t *testing.T) { expected := []string{ "receive_packets", "transmit_packets", "receive_bytes", "transmit_bytes", "receive_errs", "transmit_errs", "receive_drop", "transmit_drop", "receive_multicast", "transmit_colls", "receive_frame", "receive_fifo", "transmit_carrier", "transmit_fifo", "receive_compressed", "transmit_compressed", } filter := newDeviceFilter("", "") netStats := parseNetlinkStats(links, &filter, log.NewNopLogger()) for dev, devStats := range netStats { legacy(devStats) for _, name := range expected { if _, ok := devStats[name]; !ok { t.Errorf("metric %s should be defined on interface %s", name, dev) } } } } func TestNetDevLegacyMetricValues(t *testing.T) { expected := map[string]uint64{ "receive_packets": 226, "transmit_packets": 803, "receive_bytes": 231424, "transmit_bytes": 822272, "receive_errs": 14, "transmit_errs": 2, "receive_drop": 10 + 21, "transmit_drop": 17, "receive_multicast": 285, "transmit_colls": 30, "receive_frame": 5 + 3 + 1 + 4, "receive_fifo": 6, "transmit_carrier": 22 + 7 + 9 + 19, "transmit_fifo": 24, "receive_compressed": 23, "transmit_compressed": 20, } filter := newDeviceFilter("", "^enp0s0f0$") netStats := parseNetlinkStats(links, &filter, log.NewNopLogger()) metrics, ok := netStats["enp0s0f0"] if !ok { t.Error("expected stats for interface enp0s0f0") } legacy(metrics) for name, want := range expected { got, ok := metrics[name] if !ok { t.Errorf("metric %s should be defined on interface enp0s0f0", name) continue } if want != got { t.Errorf("want %s %d, got %d", name, want, got) } } } func TestNetDevMetricValues(t *testing.T) { filter := newDeviceFilter("", "") netStats := parseNetlinkStats(links, &filter, log.NewNopLogger()) for _, msg := range links { device := msg.Attributes.Name stats := msg.Attributes.Stats64 expected := map[string]uint64{ "receive_packets": stats.RXPackets, "transmit_packets": stats.TXPackets, "receive_bytes": stats.RXBytes, "transmit_bytes": stats.TXBytes, "receive_errors": stats.RXErrors, "transmit_errors": stats.TXErrors, "receive_dropped": stats.RXDropped, "transmit_dropped": stats.TXDropped, "multicast": stats.Multicast, "collisions": stats.Collisions, // detailed rx_errors "receive_length_errors": stats.RXLengthErrors, "receive_over_errors": stats.RXOverErrors, "receive_crc_errors": stats.RXCRCErrors, "receive_frame_errors": stats.RXFrameErrors, "receive_fifo_errors": stats.RXFIFOErrors, "receive_missed_errors": stats.RXMissedErrors, // detailed tx_errors "transmit_aborted_errors": stats.TXAbortedErrors, "transmit_carrier_errors": stats.TXCarrierErrors, "transmit_fifo_errors": stats.TXFIFOErrors, "transmit_heartbeat_errors": stats.TXHeartbeatErrors, "transmit_window_errors": stats.TXWindowErrors, // for cslip etc "receive_compressed": stats.RXCompressed, "transmit_compressed": stats.TXCompressed, "receive_nohandler": stats.RXNoHandler, } for name, want := range expected { devStats, ok := netStats[device] if !ok { t.Errorf("expected stats for interface %s", device) } got, ok := devStats[name] if !ok { t.Errorf("metric %s should be defined on interface %s", name, device) } if want != got { t.Errorf("want %s %d, got %d", name, want, got) } } } } node_exporter-1.7.0/collector/netdev_openbsd.go000066400000000000000000000041621452426057600217260ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nonetdev && !amd64 // +build !nonetdev,!amd64 package collector import ( "errors" "github.com/go-kit/log" "github.com/go-kit/log/level" ) /* #include #include #include #include */ import "C" func getNetDevStats(filter *deviceFilter, logger log.Logger) (netDevStats, error) { netDev := netDevStats{} var ifap, ifa *C.struct_ifaddrs if C.getifaddrs(&ifap) == -1 { return nil, errors.New("getifaddrs() failed") } defer C.freeifaddrs(ifap) for ifa = ifap; ifa != nil; ifa = ifa.ifa_next { if ifa.ifa_addr.sa_family != C.AF_LINK { continue } dev := C.GoString(ifa.ifa_name) if filter.ignored(dev) { level.Debug(logger).Log("msg", "Ignoring device", "device", dev) continue } data := (*C.struct_if_data)(ifa.ifa_data) // https://github.com/openbsd/src/blob/master/sys/net/if.h#L101-L126 netDev[dev] = map[string]uint64{ "receive_packets": uint64(data.ifi_ipackets), "transmit_packets": uint64(data.ifi_opackets), "receive_bytes": uint64(data.ifi_ibytes), "transmit_bytes": uint64(data.ifi_obytes), "receive_errors": uint64(data.ifi_ierrors), "transmit_errors": uint64(data.ifi_oerrors), "receive_dropped": uint64(data.ifi_iqdrops), "transmit_dropped": uint64(data.ifi_oqdrops), "receive_multicast": uint64(data.ifi_imcasts), "transmit_multicast": uint64(data.ifi_omcasts), "collisions": uint64(data.ifi_collisions), "noproto": uint64(data.ifi_noproto), } } return netDev, nil } node_exporter-1.7.0/collector/netdev_openbsd_amd64.go000066400000000000000000000044571452426057600227300ustar00rootroot00000000000000// Copyright 2020 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nonetdev // +build !nonetdev package collector import ( "github.com/go-kit/log" "github.com/go-kit/log/level" "golang.org/x/sys/unix" "unsafe" ) func getNetDevStats(filter *deviceFilter, logger log.Logger) (netDevStats, error) { netDev := netDevStats{} mib := [6]_C_int{unix.CTL_NET, unix.AF_ROUTE, 0, 0, unix.NET_RT_IFLIST, 0} buf, err := sysctl(mib[:]) if err != nil { return nil, err } n := uintptr(len(buf)) index := uintptr(unsafe.Pointer(&buf[0])) next := uintptr(0) var rtm *unix.RtMsghdr for next = index; next < (index + n); next += uintptr(rtm.Msglen) { rtm = (*unix.RtMsghdr)(unsafe.Pointer(next)) if rtm.Version != unix.RTM_VERSION || rtm.Type != unix.RTM_IFINFO { continue } ifm := (*unix.IfMsghdr)(unsafe.Pointer(next)) if ifm.Addrs&unix.RTA_IFP == 0 { continue } dl := (*unix.RawSockaddrDatalink)(unsafe.Pointer(next + uintptr(rtm.Hdrlen))) if dl.Family != unix.AF_LINK { continue } data := ifm.Data dev := int8ToString(dl.Data[:dl.Nlen]) if filter.ignored(dev) { level.Debug(logger).Log("msg", "Ignoring device", "device", dev) continue } // https://cs.opensource.google/go/x/sys/+/master:unix/ztypes_openbsd_amd64.go;l=292-316 netDev[dev] = map[string]uint64{ "receive_packets": data.Ipackets, "transmit_packets": data.Opackets, "receive_bytes": data.Ibytes, "transmit_bytes": data.Obytes, "receive_errors": data.Ierrors, "transmit_errors": data.Oerrors, "receive_dropped": data.Iqdrops, "transmit_dropped": data.Oqdrops, "receive_multicast": data.Imcasts, "transmit_multicast": data.Omcasts, "collisions": data.Collisions, "noproto": data.Noproto, } } return netDev, nil } node_exporter-1.7.0/collector/netisr_freebsd.go000066400000000000000000000054761452426057600217360ustar00rootroot00000000000000// Copyright 2023 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nonetisr // +build !nonetisr package collector import ( "fmt" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) type netisrCollector struct { sysctls []bsdSysctl logger log.Logger } const ( netisrCollectorSubsystem = "netisr" ) func init() { registerCollector("netisr", defaultEnabled, NewNetisrCollector) } func NewNetisrCollector(logger log.Logger) (Collector, error) { return &netisrCollector{ sysctls: []bsdSysctl{ { name: "numthreads", description: "netisr current thread count", mib: "net.isr.numthreads", dataType: bsdSysctlTypeUint32, valueType: prometheus.GaugeValue, labels: nil, }, { name: "maxprot", description: "netisr maximum protocols", mib: "net.isr.maxprot", dataType: bsdSysctlTypeUint32, valueType: prometheus.GaugeValue, labels: nil, }, { name: "defaultqlimit", description: "netisr default queue limit", mib: "net.isr.defaultqlimit", dataType: bsdSysctlTypeUint32, valueType: prometheus.GaugeValue, labels: nil, }, { name: "maxqlimit", description: "netisr maximum queue limit", mib: "net.isr.maxqlimit", dataType: bsdSysctlTypeUint32, valueType: prometheus.GaugeValue, labels: nil, }, { name: "bindthreads", description: "netisr threads bound to CPUs", mib: "net.isr.bindthreads", dataType: bsdSysctlTypeUint32, valueType: prometheus.GaugeValue, labels: nil, }, { name: "maxthreads", description: "netisr maximum thread count", mib: "net.isr.maxthreads", dataType: bsdSysctlTypeUint32, valueType: prometheus.GaugeValue, labels: nil, }, }, logger: logger, }, nil } func (c *netisrCollector) Update(ch chan<- prometheus.Metric) error { for _, m := range c.sysctls { v, err := m.Value() if err != nil { return fmt.Errorf("couldn't get sysctl: %w", err) } ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(namespace, netisrCollectorSubsystem, m.name), m.description, nil, nil, ), m.valueType, v) } return nil } node_exporter-1.7.0/collector/netstat_linux.go000066400000000000000000000115541452426057600216330ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nonetstat // +build !nonetstat package collector import ( "bufio" "errors" "fmt" "io" "os" "regexp" "strconv" "strings" "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) const ( netStatsSubsystem = "netstat" ) var ( netStatFields = kingpin.Flag("collector.netstat.fields", "Regexp of fields to return for netstat collector.").Default("^(.*_(InErrors|InErrs)|Ip_Forwarding|Ip(6|Ext)_(InOctets|OutOctets)|Icmp6?_(InMsgs|OutMsgs)|TcpExt_(Listen.*|Syncookies.*|TCPSynRetrans|TCPTimeouts)|Tcp_(ActiveOpens|InSegs|OutSegs|OutRsts|PassiveOpens|RetransSegs|CurrEstab)|Udp6?_(InDatagrams|OutDatagrams|NoPorts|RcvbufErrors|SndbufErrors))$").String() ) type netStatCollector struct { fieldPattern *regexp.Regexp logger log.Logger } func init() { registerCollector("netstat", defaultEnabled, NewNetStatCollector) } // NewNetStatCollector takes and returns // a new Collector exposing network stats. func NewNetStatCollector(logger log.Logger) (Collector, error) { pattern := regexp.MustCompile(*netStatFields) return &netStatCollector{ fieldPattern: pattern, logger: logger, }, nil } func (c *netStatCollector) Update(ch chan<- prometheus.Metric) error { netStats, err := getNetStats(procFilePath("net/netstat")) if err != nil { return fmt.Errorf("couldn't get netstats: %w", err) } snmpStats, err := getNetStats(procFilePath("net/snmp")) if err != nil { return fmt.Errorf("couldn't get SNMP stats: %w", err) } snmp6Stats, err := getSNMP6Stats(procFilePath("net/snmp6")) if err != nil { return fmt.Errorf("couldn't get SNMP6 stats: %w", err) } // Merge the results of snmpStats into netStats (collisions are possible, but // we know that the keys are always unique for the given use case). for k, v := range snmpStats { netStats[k] = v } for k, v := range snmp6Stats { netStats[k] = v } for protocol, protocolStats := range netStats { for name, value := range protocolStats { key := protocol + "_" + name v, err := strconv.ParseFloat(value, 64) if err != nil { return fmt.Errorf("invalid value %s in netstats: %w", value, err) } if !c.fieldPattern.MatchString(key) { continue } ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(namespace, netStatsSubsystem, key), fmt.Sprintf("Statistic %s.", protocol+name), nil, nil, ), prometheus.UntypedValue, v, ) } } return nil } func getNetStats(fileName string) (map[string]map[string]string, error) { file, err := os.Open(fileName) if err != nil { return nil, err } defer file.Close() return parseNetStats(file, fileName) } func parseNetStats(r io.Reader, fileName string) (map[string]map[string]string, error) { var ( netStats = map[string]map[string]string{} scanner = bufio.NewScanner(r) ) for scanner.Scan() { nameParts := strings.Split(scanner.Text(), " ") scanner.Scan() valueParts := strings.Split(scanner.Text(), " ") // Remove trailing :. protocol := nameParts[0][:len(nameParts[0])-1] netStats[protocol] = map[string]string{} if len(nameParts) != len(valueParts) { return nil, fmt.Errorf("mismatch field count mismatch in %s: %s", fileName, protocol) } for i := 1; i < len(nameParts); i++ { netStats[protocol][nameParts[i]] = valueParts[i] } } return netStats, scanner.Err() } func getSNMP6Stats(fileName string) (map[string]map[string]string, error) { file, err := os.Open(fileName) if err != nil { // On systems with IPv6 disabled, this file won't exist. // Do nothing. if errors.Is(err, os.ErrNotExist) { return nil, nil } return nil, err } defer file.Close() return parseSNMP6Stats(file) } func parseSNMP6Stats(r io.Reader) (map[string]map[string]string, error) { var ( netStats = map[string]map[string]string{} scanner = bufio.NewScanner(r) ) for scanner.Scan() { stat := strings.Fields(scanner.Text()) if len(stat) < 2 { continue } // Expect to have "6" in metric name, skip line otherwise if sixIndex := strings.Index(stat[0], "6"); sixIndex != -1 { protocol := stat[0][:sixIndex+1] name := stat[0][sixIndex+1:] if _, present := netStats[protocol]; !present { netStats[protocol] = map[string]string{} } netStats[protocol][name] = stat[1] } } return netStats, scanner.Err() } node_exporter-1.7.0/collector/netstat_linux_test.go000066400000000000000000000051371452426057600226720ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nonetstat // +build !nonetstat package collector import ( "os" "testing" ) func TestNetStats(t *testing.T) { testNetStats(t, "fixtures/proc/net/netstat") testSNMPStats(t, "fixtures/proc/net/snmp") testSNMP6Stats(t, "fixtures/proc/net/snmp6") } func testNetStats(t *testing.T, fileName string) { file, err := os.Open(fileName) if err != nil { t.Fatal(err) } defer file.Close() netStats, err := parseNetStats(file, fileName) if err != nil { t.Fatal(err) } if want, got := "102471", netStats["TcpExt"]["DelayedACKs"]; want != got { t.Errorf("want netstat TCP DelayedACKs %s, got %s", want, got) } if want, got := "2786264347", netStats["IpExt"]["OutOctets"]; want != got { t.Errorf("want netstat IP OutOctets %s, got %s", want, got) } } func testSNMPStats(t *testing.T, fileName string) { file, err := os.Open(fileName) if err != nil { t.Fatal(err) } defer file.Close() snmpStats, err := parseNetStats(file, fileName) if err != nil { t.Fatal(err) } if want, got := "9", snmpStats["Udp"]["RcvbufErrors"]; want != got { t.Errorf("want netstat Udp RcvbufErrors %s, got %s", want, got) } if want, got := "8", snmpStats["Udp"]["SndbufErrors"]; want != got { t.Errorf("want netstat Udp SndbufErrors %s, got %s", want, got) } } func testSNMP6Stats(t *testing.T, fileName string) { file, err := os.Open(fileName) if err != nil { t.Fatal(err) } defer file.Close() snmp6Stats, err := parseSNMP6Stats(file) if err != nil { t.Fatal(err) } if want, got := "460", snmp6Stats["Ip6"]["InOctets"]; want != got { t.Errorf("want netstat IPv6 InOctets %s, got %s", want, got) } if want, got := "8", snmp6Stats["Icmp6"]["OutMsgs"]; want != got { t.Errorf("want netstat ICPM6 OutMsgs %s, got %s", want, got) } if want, got := "9", snmp6Stats["Udp6"]["RcvbufErrors"]; want != got { t.Errorf("want netstat Udp6 RcvbufErrors %s, got %s", want, got) } if want, got := "8", snmp6Stats["Udp6"]["SndbufErrors"]; want != got { t.Errorf("want netstat Udp6 SndbufErrors %s, got %s", want, got) } } node_exporter-1.7.0/collector/network_route_linux.go000066400000000000000000000122641452426057600230570ustar00rootroot00000000000000// Copyright 2020 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nonetworkroute // +build !nonetworkroute package collector import ( "fmt" "golang.org/x/sys/unix" "net" "strconv" "github.com/go-kit/log" "github.com/jsimonetti/rtnetlink" "github.com/prometheus/client_golang/prometheus" ) type networkRouteCollector struct { routeInfoDesc *prometheus.Desc routesDesc *prometheus.Desc logger log.Logger } func init() { registerCollector("network_route", defaultDisabled, NewNetworkRouteCollector) } // NewNetworkRouteCollector returns a new Collector exposing systemd statistics. func NewNetworkRouteCollector(logger log.Logger) (Collector, error) { const subsystem = "network" routeInfoDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "route_info"), "network routing table information", []string{"device", "src", "dest", "gw", "priority", "proto", "weight"}, nil, ) routesDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "routes"), "network routes by interface", []string{"device"}, nil, ) return &networkRouteCollector{ routeInfoDesc: routeInfoDesc, routesDesc: routesDesc, logger: logger, }, nil } func (n networkRouteCollector) Update(ch chan<- prometheus.Metric) error { deviceRoutes := make(map[string]int) conn, err := rtnetlink.Dial(nil) if err != nil { return fmt.Errorf("couldn't connect rtnetlink: %w", err) } defer conn.Close() links, err := conn.Link.List() if err != nil { return fmt.Errorf("couldn't get links: %w", err) } routes, err := conn.Route.List() if err != nil { return fmt.Errorf("couldn't get routes: %w", err) } for _, route := range routes { if route.Type != unix.RTA_DST { continue } if len(route.Attributes.Multipath) != 0 { for _, nextHop := range route.Attributes.Multipath { ifName := "" for _, link := range links { if link.Index == nextHop.Hop.IfIndex { ifName = link.Attributes.Name break } } labels := []string{ ifName, // if networkRouteIPToString(route.Attributes.Src), // src networkRouteIPWithPrefixToString(route.Attributes.Dst, route.DstLength), // dest networkRouteIPToString(nextHop.Gateway), // gw strconv.FormatUint(uint64(route.Attributes.Priority), 10), // priority(metrics) networkRouteProtocolToString(route.Protocol), // proto strconv.Itoa(int(nextHop.Hop.Hops) + 1), // weight } ch <- prometheus.MustNewConstMetric(n.routeInfoDesc, prometheus.GaugeValue, 1, labels...) deviceRoutes[ifName]++ } } else { ifName := "" for _, link := range links { if link.Index == route.Attributes.OutIface { ifName = link.Attributes.Name break } } labels := []string{ ifName, // if networkRouteIPToString(route.Attributes.Src), // src networkRouteIPWithPrefixToString(route.Attributes.Dst, route.DstLength), // dest networkRouteIPToString(route.Attributes.Gateway), // gw strconv.FormatUint(uint64(route.Attributes.Priority), 10), // priority(metrics) networkRouteProtocolToString(route.Protocol), // proto "", // weight } ch <- prometheus.MustNewConstMetric(n.routeInfoDesc, prometheus.GaugeValue, 1, labels...) deviceRoutes[ifName]++ } } for dev, total := range deviceRoutes { ch <- prometheus.MustNewConstMetric(n.routesDesc, prometheus.GaugeValue, float64(total), dev) } return nil } func networkRouteIPWithPrefixToString(ip net.IP, len uint8) string { if len == 0 { return "default" } iplen := net.IPv4len if ip.To4() == nil { iplen = net.IPv6len } network := &net.IPNet{ IP: ip, Mask: net.CIDRMask(int(len), iplen*8), } return network.String() } func networkRouteIPToString(ip net.IP) string { if len(ip) == 0 { return "" } return ip.String() } func networkRouteProtocolToString(protocol uint8) string { // from linux kernel 'include/uapi/linux/rtnetlink.h' switch protocol { case 0: return "unspec" case 1: return "redirect" case 2: return "kernel" case 3: return "boot" case 4: return "static" case 8: return "gated" case 9: return "ra" case 10: return "mrt" case 11: return "zebra" case 12: return "bird" case 13: return "dnrouted" case 14: return "xorp" case 15: return "ntk" case 16: return "dhcp" case 17: return "mrouted" case 42: return "babel" case 186: return "bgp" case 187: return "isis" case 188: return "ospf" case 189: return "rip" case 192: return "eigrp" } return "unknown" } node_exporter-1.7.0/collector/nfs_linux.go000066400000000000000000000131701452426057600207330ustar00rootroot00000000000000// Copyright 2016 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nonfs // +build !nonfs package collector import ( "errors" "fmt" "os" "reflect" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs/nfs" ) const ( nfsSubsystem = "nfs" ) type nfsCollector struct { fs nfs.FS nfsNetReadsDesc *prometheus.Desc nfsNetConnectionsDesc *prometheus.Desc nfsRPCOperationsDesc *prometheus.Desc nfsRPCRetransmissionsDesc *prometheus.Desc nfsRPCAuthenticationRefreshesDesc *prometheus.Desc nfsProceduresDesc *prometheus.Desc logger log.Logger } func init() { registerCollector("nfs", defaultEnabled, NewNfsCollector) } // NewNfsCollector returns a new Collector exposing NFS statistics. func NewNfsCollector(logger log.Logger) (Collector, error) { fs, err := nfs.NewFS(*procPath) if err != nil { return nil, fmt.Errorf("failed to open procfs: %w", err) } return &nfsCollector{ fs: fs, nfsNetReadsDesc: prometheus.NewDesc( prometheus.BuildFQName(namespace, nfsSubsystem, "packets_total"), "Total NFSd network packets (sent+received) by protocol type.", []string{"protocol"}, nil, ), nfsNetConnectionsDesc: prometheus.NewDesc( prometheus.BuildFQName(namespace, nfsSubsystem, "connections_total"), "Total number of NFSd TCP connections.", nil, nil, ), nfsRPCOperationsDesc: prometheus.NewDesc( prometheus.BuildFQName(namespace, nfsSubsystem, "rpcs_total"), "Total number of RPCs performed.", nil, nil, ), nfsRPCRetransmissionsDesc: prometheus.NewDesc( prometheus.BuildFQName(namespace, nfsSubsystem, "rpc_retransmissions_total"), "Number of RPC transmissions performed.", nil, nil, ), nfsRPCAuthenticationRefreshesDesc: prometheus.NewDesc( prometheus.BuildFQName(namespace, nfsSubsystem, "rpc_authentication_refreshes_total"), "Number of RPC authentication refreshes performed.", nil, nil, ), nfsProceduresDesc: prometheus.NewDesc( prometheus.BuildFQName(namespace, nfsSubsystem, "requests_total"), "Number of NFS procedures invoked.", []string{"proto", "method"}, nil, ), logger: logger, }, nil } func (c *nfsCollector) Update(ch chan<- prometheus.Metric) error { stats, err := c.fs.ClientRPCStats() if err != nil { if errors.Is(err, os.ErrNotExist) { level.Debug(c.logger).Log("msg", "Not collecting NFS metrics", "err", err) return ErrNoData } return fmt.Errorf("failed to retrieve nfs stats: %w", err) } c.updateNFSNetworkStats(ch, &stats.Network) c.updateNFSClientRPCStats(ch, &stats.ClientRPC) c.updateNFSRequestsv2Stats(ch, &stats.V2Stats) c.updateNFSRequestsv3Stats(ch, &stats.V3Stats) c.updateNFSRequestsv4Stats(ch, &stats.ClientV4Stats) return nil } // updateNFSNetworkStats collects statistics for network packets/connections. func (c *nfsCollector) updateNFSNetworkStats(ch chan<- prometheus.Metric, s *nfs.Network) { ch <- prometheus.MustNewConstMetric(c.nfsNetReadsDesc, prometheus.CounterValue, float64(s.UDPCount), "udp") ch <- prometheus.MustNewConstMetric(c.nfsNetReadsDesc, prometheus.CounterValue, float64(s.TCPCount), "tcp") ch <- prometheus.MustNewConstMetric(c.nfsNetConnectionsDesc, prometheus.CounterValue, float64(s.TCPConnect)) } // updateNFSClientRPCStats collects statistics for kernel server RPCs. func (c *nfsCollector) updateNFSClientRPCStats(ch chan<- prometheus.Metric, s *nfs.ClientRPC) { ch <- prometheus.MustNewConstMetric(c.nfsRPCOperationsDesc, prometheus.CounterValue, float64(s.RPCCount)) ch <- prometheus.MustNewConstMetric(c.nfsRPCRetransmissionsDesc, prometheus.CounterValue, float64(s.Retransmissions)) ch <- prometheus.MustNewConstMetric(c.nfsRPCAuthenticationRefreshesDesc, prometheus.CounterValue, float64(s.AuthRefreshes)) } // updateNFSRequestsv2Stats collects statistics for NFSv2 requests. func (c *nfsCollector) updateNFSRequestsv2Stats(ch chan<- prometheus.Metric, s *nfs.V2Stats) { const proto = "2" v := reflect.ValueOf(s).Elem() for i := 0; i < v.NumField(); i++ { field := v.Field(i) ch <- prometheus.MustNewConstMetric(c.nfsProceduresDesc, prometheus.CounterValue, float64(field.Uint()), proto, v.Type().Field(i).Name) } } // updateNFSRequestsv3Stats collects statistics for NFSv3 requests. func (c *nfsCollector) updateNFSRequestsv3Stats(ch chan<- prometheus.Metric, s *nfs.V3Stats) { const proto = "3" v := reflect.ValueOf(s).Elem() for i := 0; i < v.NumField(); i++ { field := v.Field(i) ch <- prometheus.MustNewConstMetric(c.nfsProceduresDesc, prometheus.CounterValue, float64(field.Uint()), proto, v.Type().Field(i).Name) } } // updateNFSRequestsv4Stats collects statistics for NFSv4 requests. func (c *nfsCollector) updateNFSRequestsv4Stats(ch chan<- prometheus.Metric, s *nfs.ClientV4Stats) { const proto = "4" v := reflect.ValueOf(s).Elem() for i := 0; i < v.NumField(); i++ { field := v.Field(i) ch <- prometheus.MustNewConstMetric(c.nfsProceduresDesc, prometheus.CounterValue, float64(field.Uint()), proto, v.Type().Field(i).Name) } } node_exporter-1.7.0/collector/nfsd_linux.go000066400000000000000000000407241452426057600211040ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nonfsd // +build !nonfsd package collector import ( "errors" "fmt" "os" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs/nfs" ) // A nfsdCollector is a Collector which gathers metrics from /proc/net/rpc/nfsd. // See: https://www.svennd.be/nfsd-stats-explained-procnetrpcnfsd/ type nfsdCollector struct { fs nfs.FS requestsDesc *prometheus.Desc logger log.Logger } func init() { registerCollector("nfsd", defaultEnabled, NewNFSdCollector) } const ( nfsdSubsystem = "nfsd" ) // NewNFSdCollector returns a new Collector exposing /proc/net/rpc/nfsd statistics. func NewNFSdCollector(logger log.Logger) (Collector, error) { fs, err := nfs.NewFS(*procPath) if err != nil { return nil, fmt.Errorf("failed to open procfs: %w", err) } return &nfsdCollector{ fs: fs, requestsDesc: prometheus.NewDesc( prometheus.BuildFQName(namespace, nfsdSubsystem, "requests_total"), "Total number NFSd Requests by method and protocol.", []string{"proto", "method"}, nil, ), logger: logger, }, nil } // Update implements Collector. func (c *nfsdCollector) Update(ch chan<- prometheus.Metric) error { stats, err := c.fs.ServerRPCStats() if err != nil { if errors.Is(err, os.ErrNotExist) { level.Debug(c.logger).Log("msg", "Not collecting NFSd metrics", "err", err) return ErrNoData } return fmt.Errorf("failed to retrieve nfsd stats: %w", err) } c.updateNFSdReplyCacheStats(ch, &stats.ReplyCache) c.updateNFSdFileHandlesStats(ch, &stats.FileHandles) c.updateNFSdInputOutputStats(ch, &stats.InputOutput) c.updateNFSdThreadsStats(ch, &stats.Threads) c.updateNFSdReadAheadCacheStats(ch, &stats.ReadAheadCache) c.updateNFSdNetworkStats(ch, &stats.Network) c.updateNFSdServerRPCStats(ch, &stats.ServerRPC) c.updateNFSdRequestsv2Stats(ch, &stats.V2Stats) c.updateNFSdRequestsv3Stats(ch, &stats.V3Stats) c.updateNFSdRequestsv4Stats(ch, &stats.V4Ops) return nil } // updateNFSdReplyCacheStats collects statistics for the reply cache. func (c *nfsdCollector) updateNFSdReplyCacheStats(ch chan<- prometheus.Metric, s *nfs.ReplyCache) { ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(namespace, nfsdSubsystem, "reply_cache_hits_total"), "Total number of NFSd Reply Cache hits (client lost server response).", nil, nil, ), prometheus.CounterValue, float64(s.Hits)) ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(namespace, nfsdSubsystem, "reply_cache_misses_total"), "Total number of NFSd Reply Cache an operation that requires caching (idempotent).", nil, nil, ), prometheus.CounterValue, float64(s.Misses)) ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(namespace, nfsdSubsystem, "reply_cache_nocache_total"), "Total number of NFSd Reply Cache non-idempotent operations (rename/delete/…).", nil, nil, ), prometheus.CounterValue, float64(s.NoCache)) } // updateNFSdFileHandlesStats collects statistics for the file handles. func (c *nfsdCollector) updateNFSdFileHandlesStats(ch chan<- prometheus.Metric, s *nfs.FileHandles) { ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(namespace, nfsdSubsystem, "file_handles_stale_total"), "Total number of NFSd stale file handles", nil, nil, ), prometheus.CounterValue, float64(s.Stale)) // NOTE: Other FileHandles entries are unused in the kernel. } // updateNFSdInputOutputStats collects statistics for the bytes in/out. func (c *nfsdCollector) updateNFSdInputOutputStats(ch chan<- prometheus.Metric, s *nfs.InputOutput) { ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(namespace, nfsdSubsystem, "disk_bytes_read_total"), "Total NFSd bytes read.", nil, nil, ), prometheus.CounterValue, float64(s.Read)) ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(namespace, nfsdSubsystem, "disk_bytes_written_total"), "Total NFSd bytes written.", nil, nil, ), prometheus.CounterValue, float64(s.Write)) } // updateNFSdThreadsStats collects statistics for kernel server threads. func (c *nfsdCollector) updateNFSdThreadsStats(ch chan<- prometheus.Metric, s *nfs.Threads) { ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(namespace, nfsdSubsystem, "server_threads"), "Total number of NFSd kernel threads that are running.", nil, nil, ), prometheus.GaugeValue, float64(s.Threads)) } // updateNFSdReadAheadCacheStats collects statistics for the read ahead cache. func (c *nfsdCollector) updateNFSdReadAheadCacheStats(ch chan<- prometheus.Metric, s *nfs.ReadAheadCache) { ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(namespace, nfsdSubsystem, "read_ahead_cache_size_blocks"), "How large the read ahead cache is in blocks.", nil, nil, ), prometheus.GaugeValue, float64(s.CacheSize)) ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(namespace, nfsdSubsystem, "read_ahead_cache_not_found_total"), "Total number of NFSd read ahead cache not found.", nil, nil, ), prometheus.CounterValue, float64(s.NotFound)) } // updateNFSdNetworkStats collects statistics for network packets/connections. func (c *nfsdCollector) updateNFSdNetworkStats(ch chan<- prometheus.Metric, s *nfs.Network) { packetDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, nfsdSubsystem, "packets_total"), "Total NFSd network packets (sent+received) by protocol type.", []string{"proto"}, nil, ) ch <- prometheus.MustNewConstMetric( packetDesc, prometheus.CounterValue, float64(s.UDPCount), "udp") ch <- prometheus.MustNewConstMetric( packetDesc, prometheus.CounterValue, float64(s.TCPCount), "tcp") ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(namespace, nfsdSubsystem, "connections_total"), "Total number of NFSd TCP connections.", nil, nil, ), prometheus.CounterValue, float64(s.TCPConnect)) } // updateNFSdServerRPCStats collects statistics for kernel server RPCs. func (c *nfsdCollector) updateNFSdServerRPCStats(ch chan<- prometheus.Metric, s *nfs.ServerRPC) { badRPCDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, nfsdSubsystem, "rpc_errors_total"), "Total number of NFSd RPC errors by error type.", []string{"error"}, nil, ) ch <- prometheus.MustNewConstMetric( badRPCDesc, prometheus.CounterValue, float64(s.BadFmt), "fmt") ch <- prometheus.MustNewConstMetric( badRPCDesc, prometheus.CounterValue, float64(s.BadAuth), "auth") ch <- prometheus.MustNewConstMetric( badRPCDesc, prometheus.CounterValue, float64(s.BadcInt), "cInt") ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(namespace, nfsdSubsystem, "server_rpcs_total"), "Total number of NFSd RPCs.", nil, nil, ), prometheus.CounterValue, float64(s.RPCCount)) } // updateNFSdRequestsv2Stats collects statistics for NFSv2 requests. func (c *nfsdCollector) updateNFSdRequestsv2Stats(ch chan<- prometheus.Metric, s *nfs.V2Stats) { const proto = "2" ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.GetAttr), proto, "GetAttr") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.SetAttr), proto, "SetAttr") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.Root), proto, "Root") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.Lookup), proto, "Lookup") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.ReadLink), proto, "ReadLink") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.Read), proto, "Read") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.WrCache), proto, "WrCache") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.Write), proto, "Write") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.Create), proto, "Create") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.Remove), proto, "Remove") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.Rename), proto, "Rename") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.Link), proto, "Link") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.SymLink), proto, "SymLink") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.MkDir), proto, "MkDir") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.RmDir), proto, "RmDir") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.ReadDir), proto, "ReadDir") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.FsStat), proto, "FsStat") } // updateNFSdRequestsv3Stats collects statistics for NFSv3 requests. func (c *nfsdCollector) updateNFSdRequestsv3Stats(ch chan<- prometheus.Metric, s *nfs.V3Stats) { const proto = "3" ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.GetAttr), proto, "GetAttr") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.SetAttr), proto, "SetAttr") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.Lookup), proto, "Lookup") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.Access), proto, "Access") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.ReadLink), proto, "ReadLink") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.Read), proto, "Read") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.Write), proto, "Write") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.Create), proto, "Create") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.MkDir), proto, "MkDir") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.SymLink), proto, "SymLink") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.MkNod), proto, "MkNod") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.Remove), proto, "Remove") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.RmDir), proto, "RmDir") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.Rename), proto, "Rename") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.Link), proto, "Link") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.ReadDir), proto, "ReadDir") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.ReadDirPlus), proto, "ReadDirPlus") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.FsStat), proto, "FsStat") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.FsInfo), proto, "FsInfo") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.PathConf), proto, "PathConf") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.Commit), proto, "Commit") } // updateNFSdRequestsv4Stats collects statistics for NFSv4 requests. func (c *nfsdCollector) updateNFSdRequestsv4Stats(ch chan<- prometheus.Metric, s *nfs.V4Ops) { const proto = "4" ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.Access), proto, "Access") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.Close), proto, "Close") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.Commit), proto, "Commit") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.Create), proto, "Create") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.DelegPurge), proto, "DelegPurge") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.DelegReturn), proto, "DelegReturn") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.GetAttr), proto, "GetAttr") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.GetFH), proto, "GetFH") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.Link), proto, "Link") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.Lock), proto, "Lock") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.Lockt), proto, "Lockt") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.Locku), proto, "Locku") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.Lookup), proto, "Lookup") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.LookupRoot), proto, "LookupRoot") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.Nverify), proto, "Nverify") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.Open), proto, "Open") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.OpenAttr), proto, "OpenAttr") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.OpenConfirm), proto, "OpenConfirm") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.OpenDgrd), proto, "OpenDgrd") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.PutFH), proto, "PutFH") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.Read), proto, "Read") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.ReadDir), proto, "ReadDir") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.ReadLink), proto, "ReadLink") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.Remove), proto, "Remove") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.Rename), proto, "Rename") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.Renew), proto, "Renew") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.RestoreFH), proto, "RestoreFH") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.SaveFH), proto, "SaveFH") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.SecInfo), proto, "SecInfo") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.SetAttr), proto, "SetAttr") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.SetClientID), proto, "SetClientID") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.SetClientIDConfirm), proto, "SetClientIDConfirm") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.Verify), proto, "Verify") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.Write), proto, "Write") ch <- prometheus.MustNewConstMetric(c.requestsDesc, prometheus.CounterValue, float64(s.RelLockOwner), proto, "RelLockOwner") } node_exporter-1.7.0/collector/ntp.go000066400000000000000000000154471452426057600175400ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nontp // +build !nontp package collector import ( "fmt" "net" "sync" "time" "github.com/alecthomas/kingpin/v2" "github.com/beevik/ntp" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" ) const ( hour24 = 24 * time.Hour // `time` does not export `Day` as Day != 24h because of DST ntpSubsystem = "ntp" ) var ( ntpServer = kingpin.Flag("collector.ntp.server", "NTP server to use for ntp collector").Default("127.0.0.1").String() ntpServerPort = kingpin.Flag("collector.ntp.server-port", "UDP port number to connect to on NTP server").Default("123").Int() ntpProtocolVersion = kingpin.Flag("collector.ntp.protocol-version", "NTP protocol version").Default("4").Int() ntpServerIsLocal = kingpin.Flag("collector.ntp.server-is-local", "Certify that collector.ntp.server address is not a public ntp server").Default("false").Bool() ntpIPTTL = kingpin.Flag("collector.ntp.ip-ttl", "IP TTL to use while sending NTP query").Default("1").Int() // 3.46608s ~ 1.5s + PHI * (1 << maxPoll), where 1.5s is MAXDIST from ntp.org, it is 1.0 in RFC5905 // max-distance option is used as-is without phi*(1< 4 { return nil, fmt.Errorf("invalid NTP protocol version %d; must be 2, 3, or 4", *ntpProtocolVersion) } if *ntpOffsetTolerance < 0 { return nil, fmt.Errorf("offset tolerance must be non-negative") } if *ntpServerPort < 1 || *ntpServerPort > 65535 { return nil, fmt.Errorf("invalid NTP port number %d; must be between 1 and 65535 inclusive", *ntpServerPort) } level.Warn(logger).Log("msg", "This collector is deprecated and will be removed in the next major version release.") return &ntpCollector{ stratum: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, ntpSubsystem, "stratum"), "NTPD stratum.", nil, nil, ), prometheus.GaugeValue}, leap: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, ntpSubsystem, "leap"), "NTPD leap second indicator, 2 bits.", nil, nil, ), prometheus.GaugeValue}, rtt: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, ntpSubsystem, "rtt_seconds"), "RTT to NTPD.", nil, nil, ), prometheus.GaugeValue}, offset: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, ntpSubsystem, "offset_seconds"), "ClockOffset between NTP and local clock.", nil, nil, ), prometheus.GaugeValue}, reftime: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, ntpSubsystem, "reference_timestamp_seconds"), "NTPD ReferenceTime, UNIX timestamp.", nil, nil, ), prometheus.GaugeValue}, rootDelay: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, ntpSubsystem, "root_delay_seconds"), "NTPD RootDelay.", nil, nil, ), prometheus.GaugeValue}, rootDispersion: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, ntpSubsystem, "root_dispersion_seconds"), "NTPD RootDispersion.", nil, nil, ), prometheus.GaugeValue}, sanity: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, ntpSubsystem, "sanity"), "NTPD sanity according to RFC5905 heuristics and configured limits.", nil, nil, ), prometheus.GaugeValue}, logger: logger, }, nil } func (c *ntpCollector) Update(ch chan<- prometheus.Metric) error { resp, err := ntp.QueryWithOptions(*ntpServer, ntp.QueryOptions{ Version: *ntpProtocolVersion, TTL: *ntpIPTTL, Timeout: time.Second, // default `ntpdate` timeout Port: *ntpServerPort, }) if err != nil { return fmt.Errorf("couldn't get SNTP reply: %w", err) } ch <- c.stratum.mustNewConstMetric(float64(resp.Stratum)) ch <- c.leap.mustNewConstMetric(float64(resp.Leap)) ch <- c.rtt.mustNewConstMetric(resp.RTT.Seconds()) ch <- c.offset.mustNewConstMetric(resp.ClockOffset.Seconds()) if resp.ReferenceTime.Unix() > 0 { // Go Zero is 0001-01-01 00:00:00 UTC // NTP Zero is 1900-01-01 00:00:00 UTC // UNIX Zero is 1970-01-01 00:00:00 UTC // so let's keep ALL ancient `reftime` values as zero ch <- c.reftime.mustNewConstMetric(float64(resp.ReferenceTime.UnixNano()) / 1e9) } else { ch <- c.reftime.mustNewConstMetric(0) } ch <- c.rootDelay.mustNewConstMetric(resp.RootDelay.Seconds()) ch <- c.rootDispersion.mustNewConstMetric(resp.RootDispersion.Seconds()) // Here is SNTP packet sanity check that is exposed to move burden of // configuration from node_exporter user to the developer. maxerr := *ntpOffsetTolerance leapMidnightMutex.Lock() if resp.Leap == ntp.LeapAddSecond || resp.Leap == ntp.LeapDelSecond { // state of leapMidnight is cached as leap flag is dropped right after midnight leapMidnight = resp.Time.Truncate(hour24).Add(hour24) } if leapMidnight.Add(-hour24).Before(resp.Time) && resp.Time.Before(leapMidnight.Add(hour24)) { // tolerate leap smearing maxerr += time.Second } leapMidnightMutex.Unlock() if resp.Validate() == nil && resp.RootDistance <= *ntpMaxDistance && resp.MinError <= maxerr { ch <- c.sanity.mustNewConstMetric(1) } else { ch <- c.sanity.mustNewConstMetric(0) } return nil } node_exporter-1.7.0/collector/nvme_linux.go000066400000000000000000000040451452426057600211130ustar00rootroot00000000000000// Copyright 2021 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nonvme // +build !nonvme package collector import ( "errors" "fmt" "os" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs/sysfs" ) type nvmeCollector struct { fs sysfs.FS logger log.Logger } func init() { registerCollector("nvme", defaultEnabled, NewNVMeCollector) } // NewNVMeCollector returns a new Collector exposing NVMe stats. func NewNVMeCollector(logger log.Logger) (Collector, error) { fs, err := sysfs.NewFS(*sysPath) if err != nil { return nil, fmt.Errorf("failed to open sysfs: %w", err) } return &nvmeCollector{ fs: fs, logger: logger, }, nil } func (c *nvmeCollector) Update(ch chan<- prometheus.Metric) error { devices, err := c.fs.NVMeClass() if err != nil { if errors.Is(err, os.ErrNotExist) { level.Debug(c.logger).Log("msg", "nvme statistics not found, skipping") return ErrNoData } return fmt.Errorf("error obtaining NVMe class info: %w", err) } for _, device := range devices { infoDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, "nvme", "info"), "Non-numeric data from /sys/class/nvme/, value is always 1.", []string{"device", "firmware_revision", "model", "serial", "state"}, nil, ) infoValue := 1.0 ch <- prometheus.MustNewConstMetric(infoDesc, prometheus.GaugeValue, infoValue, device.Name, device.FirmwareRevision, device.Model, device.Serial, device.State) } return nil } node_exporter-1.7.0/collector/os_release.go000066400000000000000000000143311452426057600210470ustar00rootroot00000000000000// Copyright 2021 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector import ( "encoding/xml" "errors" "io" "os" "regexp" "strconv" "strings" "sync" "time" "github.com/go-kit/log" "github.com/go-kit/log/level" envparse "github.com/hashicorp/go-envparse" "github.com/prometheus/client_golang/prometheus" ) const ( etcOSRelease = "/etc/os-release" usrLibOSRelease = "/usr/lib/os-release" systemVersionPlist = "/System/Library/CoreServices/SystemVersion.plist" ) var ( versionRegex = regexp.MustCompile(`^[0-9]+\.?[0-9]*`) ) type osRelease struct { Name string ID string IDLike string PrettyName string Variant string VariantID string Version string VersionID string VersionCodename string BuildID string ImageID string ImageVersion string } type osReleaseCollector struct { infoDesc *prometheus.Desc logger log.Logger os *osRelease osFilename string // file name of cached release information osMtime time.Time // mtime of cached release file osMutex sync.RWMutex osReleaseFilenames []string // all os-release file names to check version float64 versionDesc *prometheus.Desc } type Plist struct { Dict Dict `xml:"dict"` } type Dict struct { Key []string `xml:"key"` String []string `xml:"string"` } func init() { registerCollector("os", defaultEnabled, NewOSCollector) } // NewOSCollector returns a new Collector exposing os-release information. func NewOSCollector(logger log.Logger) (Collector, error) { return &osReleaseCollector{ logger: logger, infoDesc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "os", "info"), "A metric with a constant '1' value labeled by build_id, id, id_like, image_id, image_version, "+ "name, pretty_name, variant, variant_id, version, version_codename, version_id.", []string{"build_id", "id", "id_like", "image_id", "image_version", "name", "pretty_name", "variant", "variant_id", "version", "version_codename", "version_id"}, nil, ), osReleaseFilenames: []string{etcOSRelease, usrLibOSRelease, systemVersionPlist}, versionDesc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "os", "version"), "Metric containing the major.minor part of the OS version.", []string{"id", "id_like", "name"}, nil, ), }, nil } func parseOSRelease(r io.Reader) (*osRelease, error) { env, err := envparse.Parse(r) return &osRelease{ Name: env["NAME"], ID: env["ID"], IDLike: env["ID_LIKE"], PrettyName: env["PRETTY_NAME"], Variant: env["VARIANT"], VariantID: env["VARIANT_ID"], Version: env["VERSION"], VersionID: env["VERSION_ID"], VersionCodename: env["VERSION_CODENAME"], BuildID: env["BUILD_ID"], ImageID: env["IMAGE_ID"], ImageVersion: env["IMAGE_VERSION"], }, err } func (c *osReleaseCollector) UpdateStruct(path string) error { releaseFile, err := os.Open(path) if err != nil { return err } defer releaseFile.Close() stat, err := releaseFile.Stat() if err != nil { return err } t := stat.ModTime() c.osMutex.RLock() upToDate := path == c.osFilename && t == c.osMtime c.osMutex.RUnlock() if upToDate { // osReleaseCollector struct is already up-to-date. return nil } // Acquire a lock to update the osReleaseCollector struct. c.osMutex.Lock() defer c.osMutex.Unlock() level.Debug(c.logger).Log("msg", "file modification time has changed", "file", path, "old_value", c.osMtime, "new_value", t) c.osFilename = path c.osMtime = t // SystemVersion.plist is xml file with MacOs version info if strings.Contains(releaseFile.Name(), "SystemVersion.plist") { c.os, err = getMacosProductVersion(releaseFile.Name()) if err != nil { return err } } else { c.os, err = parseOSRelease(releaseFile) if err != nil { return err } } majorMinor := versionRegex.FindString(c.os.VersionID) if majorMinor != "" { c.version, err = strconv.ParseFloat(majorMinor, 64) if err != nil { return err } } else { c.version = 0 } return nil } func (c *osReleaseCollector) Update(ch chan<- prometheus.Metric) error { for i, path := range c.osReleaseFilenames { err := c.UpdateStruct(*rootfsPath + path) if err == nil { break } if errors.Is(err, os.ErrNotExist) { if i >= (len(c.osReleaseFilenames) - 1) { level.Debug(c.logger).Log("msg", "no os-release file found", "files", strings.Join(c.osReleaseFilenames, ",")) return ErrNoData } continue } return err } ch <- prometheus.MustNewConstMetric(c.infoDesc, prometheus.GaugeValue, 1.0, c.os.BuildID, c.os.ID, c.os.IDLike, c.os.ImageID, c.os.ImageVersion, c.os.Name, c.os.PrettyName, c.os.Variant, c.os.VariantID, c.os.Version, c.os.VersionCodename, c.os.VersionID) if c.version > 0 { ch <- prometheus.MustNewConstMetric(c.versionDesc, prometheus.GaugeValue, c.version, c.os.ID, c.os.IDLike, c.os.Name) } return nil } func getMacosProductVersion(filename string) (*osRelease, error) { f, _ := os.Open(filename) bytePlist, _ := io.ReadAll(f) f.Close() var plist Plist err := xml.Unmarshal(bytePlist, &plist) if err != nil { return &osRelease{}, err } var osVersionID, osVersionName, osBuildID string if len(plist.Dict.Key) > 0 { for index, value := range plist.Dict.Key { switch value { case "ProductVersion": osVersionID = plist.Dict.String[index] case "ProductName": osVersionName = plist.Dict.String[index] case "ProductBuildVersion": osBuildID = plist.Dict.String[index] } } } return &osRelease{ Name: osVersionName, Version: osVersionID, VersionID: osVersionID, BuildID: osBuildID, }, nil } node_exporter-1.7.0/collector/os_release_test.go000066400000000000000000000053331452426057600221100ustar00rootroot00000000000000// Copyright 2021 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector import ( "os" "reflect" "strings" "testing" "github.com/go-kit/log" ) const debianBullseye string = `PRETTY_NAME="Debian GNU/Linux 11 (bullseye)" NAME="Debian GNU/Linux" VERSION_ID="11" VERSION="11 (bullseye)" VERSION_CODENAME=bullseye ID=debian HOME_URL="https://www.debian.org/" SUPPORT_URL="https://www.debian.org/support" BUG_REPORT_URL="https://bugs.debian.org/" ` func TestParseOSRelease(t *testing.T) { want := &osRelease{ Name: "Ubuntu", ID: "ubuntu", IDLike: "debian", PrettyName: "Ubuntu 20.04.2 LTS", Version: "20.04.2 LTS (Focal Fossa)", VersionID: "20.04", VersionCodename: "focal", } osReleaseFile, err := os.Open("fixtures" + usrLibOSRelease) if err != nil { t.Fatal(err) } defer osReleaseFile.Close() got, err := parseOSRelease(osReleaseFile) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(want, got) { t.Fatalf("should have %+v osRelease: got %+v", want, got) } want = &osRelease{ Name: "Debian GNU/Linux", ID: "debian", PrettyName: "Debian GNU/Linux 11 (bullseye)", Version: "11 (bullseye)", VersionID: "11", VersionCodename: "bullseye", } got, err = parseOSRelease(strings.NewReader(debianBullseye)) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(want, got) { t.Fatalf("should have %+v osRelease: got %+v", want, got) } } func TestUpdateStruct(t *testing.T) { wantedOS := &osRelease{ Name: "Ubuntu", ID: "ubuntu", IDLike: "debian", PrettyName: "Ubuntu 20.04.2 LTS", Version: "20.04.2 LTS (Focal Fossa)", VersionID: "20.04", VersionCodename: "focal", } wantedVersion := 20.04 collector, err := NewOSCollector(log.NewNopLogger()) if err != nil { t.Fatal(err) } c := collector.(*osReleaseCollector) err = c.UpdateStruct("fixtures" + usrLibOSRelease) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(wantedOS, c.os) { t.Fatalf("should have %+v osRelease: got %+v", wantedOS, c.os) } if wantedVersion != c.version { t.Errorf("Expected '%v' but got '%v'", wantedVersion, c.version) } } node_exporter-1.7.0/collector/paths.go000066400000000000000000000032261452426057600200460ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector import ( "path/filepath" "strings" "github.com/alecthomas/kingpin/v2" "github.com/prometheus/procfs" ) var ( // The path of the proc filesystem. procPath = kingpin.Flag("path.procfs", "procfs mountpoint.").Default(procfs.DefaultMountPoint).String() sysPath = kingpin.Flag("path.sysfs", "sysfs mountpoint.").Default("/sys").String() rootfsPath = kingpin.Flag("path.rootfs", "rootfs mountpoint.").Default("/").String() udevDataPath = kingpin.Flag("path.udev.data", "udev data path.").Default("/run/udev/data").String() ) func procFilePath(name string) string { return filepath.Join(*procPath, name) } func sysFilePath(name string) string { return filepath.Join(*sysPath, name) } func rootfsFilePath(name string) string { return filepath.Join(*rootfsPath, name) } func udevDataFilePath(name string) string { return filepath.Join(*udevDataPath, name) } func rootfsStripPrefix(path string) string { if *rootfsPath == "/" { return path } stripped := strings.TrimPrefix(path, *rootfsPath) if stripped == "" { return "/" } return stripped } node_exporter-1.7.0/collector/paths_test.go000066400000000000000000000045251452426057600211100ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector import ( "testing" "github.com/alecthomas/kingpin/v2" "github.com/prometheus/procfs" ) func TestDefaultProcPath(t *testing.T) { if _, err := kingpin.CommandLine.Parse([]string{"--path.procfs", procfs.DefaultMountPoint}); err != nil { t.Fatal(err) } if got, want := procFilePath("somefile"), "/proc/somefile"; got != want { t.Errorf("Expected: %s, Got: %s", want, got) } if got, want := procFilePath("some/file"), "/proc/some/file"; got != want { t.Errorf("Expected: %s, Got: %s", want, got) } } func TestCustomProcPath(t *testing.T) { if _, err := kingpin.CommandLine.Parse([]string{"--path.procfs", "./../some/./place/"}); err != nil { t.Fatal(err) } if got, want := procFilePath("somefile"), "../some/place/somefile"; got != want { t.Errorf("Expected: %s, Got: %s", want, got) } if got, want := procFilePath("some/file"), "../some/place/some/file"; got != want { t.Errorf("Expected: %s, Got: %s", want, got) } } func TestDefaultSysPath(t *testing.T) { if _, err := kingpin.CommandLine.Parse([]string{"--path.sysfs", "/sys"}); err != nil { t.Fatal(err) } if got, want := sysFilePath("somefile"), "/sys/somefile"; got != want { t.Errorf("Expected: %s, Got: %s", want, got) } if got, want := sysFilePath("some/file"), "/sys/some/file"; got != want { t.Errorf("Expected: %s, Got: %s", want, got) } } func TestCustomSysPath(t *testing.T) { if _, err := kingpin.CommandLine.Parse([]string{"--path.sysfs", "./../some/./place/"}); err != nil { t.Fatal(err) } if got, want := sysFilePath("somefile"), "../some/place/somefile"; got != want { t.Errorf("Expected: %s, Got: %s", want, got) } if got, want := sysFilePath("some/file"), "../some/place/some/file"; got != want { t.Errorf("Expected: %s, Got: %s", want, got) } } node_exporter-1.7.0/collector/perf_linux.go000066400000000000000000000627071452426057600211130ustar00rootroot00000000000000// Copyright 2019 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !noperf // +build !noperf package collector import ( "fmt" "runtime" "strconv" "strings" "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/hodgesds/perf-utils" "github.com/prometheus/client_golang/prometheus" "golang.org/x/sys/unix" ) const ( perfSubsystem = "perf" ) var ( perfCPUsFlag = kingpin.Flag("collector.perf.cpus", "List of CPUs from which perf metrics should be collected").Default("").String() perfTracepointFlag = kingpin.Flag("collector.perf.tracepoint", "perf tracepoint that should be collected").Strings() perfNoHwProfiler = kingpin.Flag("collector.perf.disable-hardware-profilers", "disable perf hardware profilers").Default("false").Bool() perfHwProfilerFlag = kingpin.Flag("collector.perf.hardware-profilers", "perf hardware profilers that should be collected").Strings() perfNoSwProfiler = kingpin.Flag("collector.perf.disable-software-profilers", "disable perf software profilers").Default("false").Bool() perfSwProfilerFlag = kingpin.Flag("collector.perf.software-profilers", "perf software profilers that should be collected").Strings() perfNoCaProfiler = kingpin.Flag("collector.perf.disable-cache-profilers", "disable perf cache profilers").Default("false").Bool() perfCaProfilerFlag = kingpin.Flag("collector.perf.cache-profilers", "perf cache profilers that should be collected").Strings() ) func init() { registerCollector(perfSubsystem, defaultDisabled, NewPerfCollector) } var ( perfHardwareProfilerMap = map[string]perf.HardwareProfilerType{ "CpuCycles": perf.CpuCyclesProfiler, "CpuInstr": perf.CpuInstrProfiler, "CacheRef": perf.CacheRefProfiler, "CacheMisses": perf.CacheMissesProfiler, "BranchInstr": perf.BranchInstrProfiler, "BranchMisses": perf.BranchMissesProfiler, "StalledCyclesBackend": perf.StalledCyclesBackendProfiler, "StalledCyclesFrontend": perf.StalledCyclesFrontendProfiler, "RefCpuCycles": perf.RefCpuCyclesProfiler, // "BusCycles": perf.BusCyclesProfiler, } perfSoftwareProfilerMap = map[string]perf.SoftwareProfilerType{ "PageFault": perf.PageFaultProfiler, "ContextSwitch": perf.ContextSwitchProfiler, "CpuMigration": perf.CpuMigrationProfiler, "MinorFault": perf.MinorFaultProfiler, "MajorFault": perf.MajorFaultProfiler, // "CpuClock": perf.CpuClockProfiler, // "TaskClock": perf.TaskClockProfiler, // "AlignFault": perf.AlignFaultProfiler, // "EmuFault": perf.EmuFaultProfiler, } perfCacheProfilerMap = map[string]perf.CacheProfilerType{ "L1DataReadHit": perf.L1DataReadHitProfiler, "L1DataReadMiss": perf.L1DataReadMissProfiler, "L1DataWriteHit": perf.L1DataWriteHitProfiler, "L1InstrReadMiss": perf.L1InstrReadMissProfiler, "LLReadHit": perf.LLReadHitProfiler, "LLReadMiss": perf.LLReadMissProfiler, "LLWriteHit": perf.LLWriteHitProfiler, "LLWriteMiss": perf.LLWriteMissProfiler, "InstrTLBReadHit": perf.InstrTLBReadHitProfiler, "InstrTLBReadMiss": perf.InstrTLBReadMissProfiler, "BPUReadHit": perf.BPUReadHitProfiler, "BPUReadMiss": perf.BPUReadMissProfiler, // "L1InstrReadHit": perf.L1InstrReadHitProfiler, // "DataTLBReadHit": perf.DataTLBReadHitProfiler, // "DataTLBReadMiss": perf.DataTLBReadMissProfiler, // "DataTLBWriteHit": perf.DataTLBWriteHitProfiler, // "DataTLBWriteMiss": perf.DataTLBWriteMissProfiler, // "NodeCacheReadHit": perf.NodeCacheReadHitProfiler, // "NodeCacheReadMiss": perf.NodeCacheReadMissProfiler, // "NodeCacheWriteHit": perf.NodeCacheWriteHitProfiler, // "NodeCacheWriteMiss": perf.NodeCacheWriteMissProfiler, } ) // perfTracepointFlagToTracepoints returns the set of configured tracepoints. func perfTracepointFlagToTracepoints(tracepointsFlag []string) ([]*perfTracepoint, error) { tracepoints := make([]*perfTracepoint, len(tracepointsFlag)) for i, tracepoint := range tracepointsFlag { split := strings.Split(tracepoint, ":") if len(split) != 2 { return nil, fmt.Errorf("invalid tracepoint config %v", tracepoint) } tracepoints[i] = &perfTracepoint{ subsystem: split[0], event: split[1], } } return tracepoints, nil } // perfCPUFlagToCPUs returns a set of CPUs for the perf collectors to monitor. func perfCPUFlagToCPUs(cpuFlag string) ([]int, error) { var err error cpus := []int{} for _, subset := range strings.Split(cpuFlag, ",") { // First parse a single CPU. if !strings.Contains(subset, "-") { cpu, err := strconv.Atoi(subset) if err != nil { return nil, err } cpus = append(cpus, cpu) continue } stride := 1 // Handle strides, ie 1-10:5 should yield 1,5,10 strideSet := strings.Split(subset, ":") if len(strideSet) == 2 { stride, err = strconv.Atoi(strideSet[1]) if err != nil { return nil, err } } rangeSet := strings.Split(strideSet[0], "-") if len(rangeSet) != 2 { return nil, fmt.Errorf("invalid flag value %q", cpuFlag) } start, err := strconv.Atoi(rangeSet[0]) if err != nil { return nil, err } end, err := strconv.Atoi(rangeSet[1]) if err != nil { return nil, err } for i := start; i <= end; i += stride { cpus = append(cpus, i) } } return cpus, nil } // perfTracepoint is a struct for holding tracepoint information. type perfTracepoint struct { subsystem string event string } // label returns the tracepoint name in the format of subsystem_tracepoint. func (t *perfTracepoint) label() string { return t.subsystem + "_" + t.event } // tracepoint returns the tracepoint name in the format of subsystem:tracepoint. func (t *perfTracepoint) tracepoint() string { return t.subsystem + ":" + t.event } // perfCollector is a Collector that uses the perf subsystem to collect // metrics. It uses perf_event_open an ioctls for profiling. Due to the fact // that the perf subsystem is highly dependent on kernel configuration and // settings not all profiler values may be exposed on the target system at any // given time. type perfCollector struct { hwProfilerCPUMap map[*perf.HardwareProfiler]int swProfilerCPUMap map[*perf.SoftwareProfiler]int cacheProfilerCPUMap map[*perf.CacheProfiler]int perfHwProfilers map[int]*perf.HardwareProfiler perfSwProfilers map[int]*perf.SoftwareProfiler perfCacheProfilers map[int]*perf.CacheProfiler desc map[string]*prometheus.Desc logger log.Logger tracepointCollector *perfTracepointCollector } type perfTracepointCollector struct { // desc is the mapping of subsystem to tracepoint *prometheus.Desc. descs map[string]map[string]*prometheus.Desc // collection order is the sorted configured collection order of the profiler. collectionOrder []string logger log.Logger profilers map[int]perf.GroupProfiler } // update is used collect all tracepoints across all tracepoint profilers. func (c *perfTracepointCollector) update(ch chan<- prometheus.Metric) error { for cpu := range c.profilers { if err := c.updateCPU(cpu, ch); err != nil { return err } } return nil } // updateCPU is used to update metrics per CPU profiler. func (c *perfTracepointCollector) updateCPU(cpu int, ch chan<- prometheus.Metric) error { profiler := c.profilers[cpu] p := &perf.GroupProfileValue{} if err := profiler.Profile(p); err != nil { level.Error(c.logger).Log("msg", "Failed to collect tracepoint profile", "err", err) return err } cpuid := strconv.Itoa(cpu) for i, value := range p.Values { // Get the Desc from the ordered group value. descKey := c.collectionOrder[i] descKeySlice := strings.Split(descKey, ":") ch <- prometheus.MustNewConstMetric( c.descs[descKeySlice[0]][descKeySlice[1]], prometheus.CounterValue, float64(value), cpuid, ) } return nil } // newPerfTracepointCollector returns a configured perfTracepointCollector. func newPerfTracepointCollector( logger log.Logger, tracepointsFlag []string, cpus []int, ) (*perfTracepointCollector, error) { tracepoints, err := perfTracepointFlagToTracepoints(tracepointsFlag) if err != nil { return nil, err } collectionOrder := make([]string, len(tracepoints)) descs := map[string]map[string]*prometheus.Desc{} eventAttrs := make([]unix.PerfEventAttr, len(tracepoints)) for i, tracepoint := range tracepoints { eventAttr, err := perf.TracepointEventAttr(tracepoint.subsystem, tracepoint.event) if err != nil { return nil, err } eventAttrs[i] = *eventAttr collectionOrder[i] = tracepoint.tracepoint() if _, ok := descs[tracepoint.subsystem]; !ok { descs[tracepoint.subsystem] = map[string]*prometheus.Desc{} } descs[tracepoint.subsystem][tracepoint.event] = prometheus.NewDesc( prometheus.BuildFQName( namespace, perfSubsystem, tracepoint.label(), ), "Perf tracepoint "+tracepoint.tracepoint(), []string{"cpu"}, nil, ) } profilers := make(map[int]perf.GroupProfiler, len(cpus)) for _, cpu := range cpus { profiler, err := perf.NewGroupProfiler(-1, cpu, 0, eventAttrs...) if err != nil { return nil, err } profilers[cpu] = profiler } c := &perfTracepointCollector{ descs: descs, collectionOrder: collectionOrder, profilers: profilers, logger: logger, } for _, profiler := range c.profilers { if err := profiler.Start(); err != nil { return nil, err } } return c, nil } // NewPerfCollector returns a new perf based collector, it creates a profiler // per CPU. func NewPerfCollector(logger log.Logger) (Collector, error) { collector := &perfCollector{ perfHwProfilers: map[int]*perf.HardwareProfiler{}, perfSwProfilers: map[int]*perf.SoftwareProfiler{}, perfCacheProfilers: map[int]*perf.CacheProfiler{}, hwProfilerCPUMap: map[*perf.HardwareProfiler]int{}, swProfilerCPUMap: map[*perf.SoftwareProfiler]int{}, cacheProfilerCPUMap: map[*perf.CacheProfiler]int{}, logger: logger, } var ( cpus []int err error ) if perfCPUsFlag != nil && *perfCPUsFlag != "" { cpus, err = perfCPUFlagToCPUs(*perfCPUsFlag) if err != nil { return nil, err } } else { cpus = make([]int, runtime.NumCPU()) for i := range cpus { cpus[i] = i } } // First configure any tracepoints. if *perfTracepointFlag != nil && len(*perfTracepointFlag) > 0 { tracepointCollector, err := newPerfTracepointCollector(logger, *perfTracepointFlag, cpus) if err != nil { return nil, err } collector.tracepointCollector = tracepointCollector } // Configure perf profilers hardwareProfilers := perf.AllHardwareProfilers if *perfHwProfilerFlag != nil && len(*perfHwProfilerFlag) > 0 { // hardwareProfilers = 0 for _, hf := range *perfHwProfilerFlag { if v, ok := perfHardwareProfilerMap[hf]; ok { hardwareProfilers |= v } } } softwareProfilers := perf.AllSoftwareProfilers if *perfSwProfilerFlag != nil && len(*perfSwProfilerFlag) > 0 { // softwareProfilers = 0 for _, sf := range *perfSwProfilerFlag { if v, ok := perfSoftwareProfilerMap[sf]; ok { softwareProfilers |= v } } } cacheProfilers := perf.L1DataReadHitProfiler | perf.L1DataReadMissProfiler | perf.L1DataWriteHitProfiler | perf.L1InstrReadMissProfiler | perf.InstrTLBReadHitProfiler | perf.InstrTLBReadMissProfiler | perf.LLReadHitProfiler | perf.LLReadMissProfiler | perf.LLWriteHitProfiler | perf.LLWriteMissProfiler | perf.BPUReadHitProfiler | perf.BPUReadMissProfiler if *perfCaProfilerFlag != nil && len(*perfCaProfilerFlag) > 0 { cacheProfilers = 0 for _, cf := range *perfCaProfilerFlag { if v, ok := perfCacheProfilerMap[cf]; ok { cacheProfilers |= v } } } // Configure all profilers for the specified CPUs. for _, cpu := range cpus { // Use -1 to profile all processes on the CPU, see: // man perf_event_open if !*perfNoHwProfiler { hwProf, err := perf.NewHardwareProfiler( -1, cpu, hardwareProfilers, ) if err != nil && !hwProf.HasProfilers() { return nil, err } if err := hwProf.Start(); err != nil { return nil, err } collector.perfHwProfilers[cpu] = &hwProf collector.hwProfilerCPUMap[&hwProf] = cpu } if !*perfNoSwProfiler { swProf, err := perf.NewSoftwareProfiler(-1, cpu, softwareProfilers) if err != nil && !swProf.HasProfilers() { return nil, err } if err := swProf.Start(); err != nil { return nil, err } collector.perfSwProfilers[cpu] = &swProf collector.swProfilerCPUMap[&swProf] = cpu } if !*perfNoCaProfiler { cacheProf, err := perf.NewCacheProfiler( -1, cpu, cacheProfilers, ) if err != nil && !cacheProf.HasProfilers() { return nil, err } if err := cacheProf.Start(); err != nil { return nil, err } collector.perfCacheProfilers[cpu] = &cacheProf collector.cacheProfilerCPUMap[&cacheProf] = cpu } } collector.desc = map[string]*prometheus.Desc{ "cpucycles_total": prometheus.NewDesc( prometheus.BuildFQName( namespace, perfSubsystem, "cpucycles_total", ), "Number of CPU cycles (frequency scaled)", []string{"cpu"}, nil, ), "instructions_total": prometheus.NewDesc( prometheus.BuildFQName( namespace, perfSubsystem, "instructions_total", ), "Number of CPU instructions", []string{"cpu"}, nil, ), "branch_instructions_total": prometheus.NewDesc( prometheus.BuildFQName( namespace, perfSubsystem, "branch_instructions_total", ), "Number of CPU branch instructions", []string{"cpu"}, nil, ), "branch_misses_total": prometheus.NewDesc( prometheus.BuildFQName( namespace, perfSubsystem, "branch_misses_total", ), "Number of CPU branch misses", []string{"cpu"}, nil, ), "cache_refs_total": prometheus.NewDesc( prometheus.BuildFQName( namespace, perfSubsystem, "cache_refs_total", ), "Number of cache references (non frequency scaled)", []string{"cpu"}, nil, ), "cache_misses_total": prometheus.NewDesc( prometheus.BuildFQName( namespace, perfSubsystem, "cache_misses_total", ), "Number of cache misses", []string{"cpu"}, nil, ), "ref_cpucycles_total": prometheus.NewDesc( prometheus.BuildFQName( namespace, perfSubsystem, "ref_cpucycles_total", ), "Number of CPU cycles", []string{"cpu"}, nil, ), "stalled_cycles_backend_total": prometheus.NewDesc( prometheus.BuildFQName( namespace, perfSubsystem, "stalled_cycles_backend_total", ), "Number of stalled backend CPU cycles", []string{"cpu"}, nil, ), "stalled_cycles_frontend_total": prometheus.NewDesc( prometheus.BuildFQName( namespace, perfSubsystem, "stalled_cycles_frontend_total", ), "Number of stalled frontend CPU cycles", []string{"cpu"}, nil, ), "page_faults_total": prometheus.NewDesc( prometheus.BuildFQName( namespace, perfSubsystem, "page_faults_total", ), "Number of page faults", []string{"cpu"}, nil, ), "context_switches_total": prometheus.NewDesc( prometheus.BuildFQName( namespace, perfSubsystem, "context_switches_total", ), "Number of context switches", []string{"cpu"}, nil, ), "cpu_migrations_total": prometheus.NewDesc( prometheus.BuildFQName( namespace, perfSubsystem, "cpu_migrations_total", ), "Number of CPU process migrations", []string{"cpu"}, nil, ), "minor_faults_total": prometheus.NewDesc( prometheus.BuildFQName( namespace, perfSubsystem, "minor_faults_total", ), "Number of minor page faults", []string{"cpu"}, nil, ), "major_faults_total": prometheus.NewDesc( prometheus.BuildFQName( namespace, perfSubsystem, "major_faults_total", ), "Number of major page faults", []string{"cpu"}, nil, ), "cache_l1d_read_hits_total": prometheus.NewDesc( prometheus.BuildFQName( namespace, perfSubsystem, "cache_l1d_read_hits_total", ), "Number L1 data cache read hits", []string{"cpu"}, nil, ), "cache_l1d_read_misses_total": prometheus.NewDesc( prometheus.BuildFQName( namespace, perfSubsystem, "cache_l1d_read_misses_total", ), "Number L1 data cache read misses", []string{"cpu"}, nil, ), "cache_l1d_write_hits_total": prometheus.NewDesc( prometheus.BuildFQName( namespace, perfSubsystem, "cache_l1d_write_hits_total", ), "Number L1 data cache write hits", []string{"cpu"}, nil, ), "cache_l1_instr_read_misses_total": prometheus.NewDesc( prometheus.BuildFQName( namespace, perfSubsystem, "cache_l1_instr_read_misses_total", ), "Number instruction L1 instruction read misses", []string{"cpu"}, nil, ), "cache_tlb_instr_read_hits_total": prometheus.NewDesc( prometheus.BuildFQName( namespace, perfSubsystem, "cache_tlb_instr_read_hits_total", ), "Number instruction TLB read hits", []string{"cpu"}, nil, ), "cache_tlb_instr_read_misses_total": prometheus.NewDesc( prometheus.BuildFQName( namespace, perfSubsystem, "cache_tlb_instr_read_misses_total", ), "Number instruction TLB read misses", []string{"cpu"}, nil, ), "cache_ll_read_hits_total": prometheus.NewDesc( prometheus.BuildFQName( namespace, perfSubsystem, "cache_ll_read_hits_total", ), "Number last level read hits", []string{"cpu"}, nil, ), "cache_ll_read_misses_total": prometheus.NewDesc( prometheus.BuildFQName( namespace, perfSubsystem, "cache_ll_read_misses_total", ), "Number last level read misses", []string{"cpu"}, nil, ), "cache_ll_write_hits_total": prometheus.NewDesc( prometheus.BuildFQName( namespace, perfSubsystem, "cache_ll_write_hits_total", ), "Number last level write hits", []string{"cpu"}, nil, ), "cache_ll_write_misses_total": prometheus.NewDesc( prometheus.BuildFQName( namespace, perfSubsystem, "cache_ll_write_misses_total", ), "Number last level write misses", []string{"cpu"}, nil, ), "cache_bpu_read_hits_total": prometheus.NewDesc( prometheus.BuildFQName( namespace, perfSubsystem, "cache_bpu_read_hits_total", ), "Number BPU read hits", []string{"cpu"}, nil, ), "cache_bpu_read_misses_total": prometheus.NewDesc( prometheus.BuildFQName( namespace, perfSubsystem, "cache_bpu_read_misses_total", ), "Number BPU read misses", []string{"cpu"}, nil, ), } return collector, nil } // Update implements the Collector interface and will collect metrics per CPU. func (c *perfCollector) Update(ch chan<- prometheus.Metric) error { if err := c.updateHardwareStats(ch); err != nil { return err } if err := c.updateSoftwareStats(ch); err != nil { return err } if err := c.updateCacheStats(ch); err != nil { return err } if c.tracepointCollector != nil { return c.tracepointCollector.update(ch) } return nil } func (c *perfCollector) updateHardwareStats(ch chan<- prometheus.Metric) error { for _, profiler := range c.perfHwProfilers { hwProfile := &perf.HardwareProfile{} if err := (*profiler).Profile(hwProfile); err != nil { return err } cpuid := strconv.Itoa(c.hwProfilerCPUMap[profiler]) if hwProfile.CPUCycles != nil { ch <- prometheus.MustNewConstMetric( c.desc["cpucycles_total"], prometheus.CounterValue, float64(*hwProfile.CPUCycles), cpuid, ) } if hwProfile.Instructions != nil { ch <- prometheus.MustNewConstMetric( c.desc["instructions_total"], prometheus.CounterValue, float64(*hwProfile.Instructions), cpuid, ) } if hwProfile.BranchInstr != nil { ch <- prometheus.MustNewConstMetric( c.desc["branch_instructions_total"], prometheus.CounterValue, float64(*hwProfile.BranchInstr), cpuid, ) } if hwProfile.BranchMisses != nil { ch <- prometheus.MustNewConstMetric( c.desc["branch_misses_total"], prometheus.CounterValue, float64(*hwProfile.BranchMisses), cpuid, ) } if hwProfile.CacheRefs != nil { ch <- prometheus.MustNewConstMetric( c.desc["cache_refs_total"], prometheus.CounterValue, float64(*hwProfile.CacheRefs), cpuid, ) } if hwProfile.CacheMisses != nil { ch <- prometheus.MustNewConstMetric( c.desc["cache_misses_total"], prometheus.CounterValue, float64(*hwProfile.CacheMisses), cpuid, ) } if hwProfile.RefCPUCycles != nil { ch <- prometheus.MustNewConstMetric( c.desc["ref_cpucycles_total"], prometheus.CounterValue, float64(*hwProfile.RefCPUCycles), cpuid, ) } if hwProfile.StalledCyclesBackend != nil { ch <- prometheus.MustNewConstMetric( c.desc["stalled_cycles_backend_total"], prometheus.CounterValue, float64(*hwProfile.StalledCyclesBackend), cpuid, ) } if hwProfile.StalledCyclesFrontend != nil { ch <- prometheus.MustNewConstMetric( c.desc["stalled_cycles_frontend_total"], prometheus.CounterValue, float64(*hwProfile.StalledCyclesFrontend), cpuid, ) } } return nil } func (c *perfCollector) updateSoftwareStats(ch chan<- prometheus.Metric) error { for _, profiler := range c.perfSwProfilers { swProfile := &perf.SoftwareProfile{} if err := (*profiler).Profile(swProfile); err != nil { return err } cpuid := strconv.Itoa(c.swProfilerCPUMap[profiler]) if swProfile.PageFaults != nil { ch <- prometheus.MustNewConstMetric( c.desc["page_faults_total"], prometheus.CounterValue, float64(*swProfile.PageFaults), cpuid, ) } if swProfile.ContextSwitches != nil { ch <- prometheus.MustNewConstMetric( c.desc["context_switches_total"], prometheus.CounterValue, float64(*swProfile.ContextSwitches), cpuid, ) } if swProfile.CPUMigrations != nil { ch <- prometheus.MustNewConstMetric( c.desc["cpu_migrations_total"], prometheus.CounterValue, float64(*swProfile.CPUMigrations), cpuid, ) } if swProfile.MinorPageFaults != nil { ch <- prometheus.MustNewConstMetric( c.desc["minor_faults_total"], prometheus.CounterValue, float64(*swProfile.MinorPageFaults), cpuid, ) } if swProfile.MajorPageFaults != nil { ch <- prometheus.MustNewConstMetric( c.desc["major_faults_total"], prometheus.CounterValue, float64(*swProfile.MajorPageFaults), cpuid, ) } } return nil } func (c *perfCollector) updateCacheStats(ch chan<- prometheus.Metric) error { for _, profiler := range c.perfCacheProfilers { cacheProfile := &perf.CacheProfile{} if err := (*profiler).Profile(cacheProfile); err != nil { return err } cpuid := strconv.Itoa(c.cacheProfilerCPUMap[profiler]) if cacheProfile.L1DataReadHit != nil { ch <- prometheus.MustNewConstMetric( c.desc["cache_l1d_read_hits_total"], prometheus.CounterValue, float64(*cacheProfile.L1DataReadHit), cpuid, ) } if cacheProfile.L1DataReadMiss != nil { ch <- prometheus.MustNewConstMetric( c.desc["cache_l1d_read_misses_total"], prometheus.CounterValue, float64(*cacheProfile.L1DataReadMiss), cpuid, ) } if cacheProfile.L1DataWriteHit != nil { ch <- prometheus.MustNewConstMetric( c.desc["cache_l1d_write_hits_total"], prometheus.CounterValue, float64(*cacheProfile.L1DataWriteHit), cpuid, ) } if cacheProfile.L1InstrReadMiss != nil { ch <- prometheus.MustNewConstMetric( c.desc["cache_l1_instr_read_misses_total"], prometheus.CounterValue, float64(*cacheProfile.L1InstrReadMiss), cpuid, ) } if cacheProfile.InstrTLBReadHit != nil { ch <- prometheus.MustNewConstMetric( c.desc["cache_tlb_instr_read_hits_total"], prometheus.CounterValue, float64(*cacheProfile.InstrTLBReadHit), cpuid, ) } if cacheProfile.InstrTLBReadMiss != nil { ch <- prometheus.MustNewConstMetric( c.desc["cache_tlb_instr_read_misses_total"], prometheus.CounterValue, float64(*cacheProfile.InstrTLBReadMiss), cpuid, ) } if cacheProfile.LastLevelReadHit != nil { ch <- prometheus.MustNewConstMetric( c.desc["cache_ll_read_hits_total"], prometheus.CounterValue, float64(*cacheProfile.LastLevelReadHit), cpuid, ) } if cacheProfile.LastLevelReadMiss != nil { ch <- prometheus.MustNewConstMetric( c.desc["cache_ll_read_misses_total"], prometheus.CounterValue, float64(*cacheProfile.LastLevelReadMiss), cpuid, ) } if cacheProfile.LastLevelWriteHit != nil { ch <- prometheus.MustNewConstMetric( c.desc["cache_ll_write_hits_total"], prometheus.CounterValue, float64(*cacheProfile.LastLevelWriteHit), cpuid, ) } if cacheProfile.LastLevelWriteMiss != nil { ch <- prometheus.MustNewConstMetric( c.desc["cache_ll_write_misses_total"], prometheus.CounterValue, float64(*cacheProfile.LastLevelWriteMiss), cpuid, ) } if cacheProfile.BPUReadHit != nil { ch <- prometheus.MustNewConstMetric( c.desc["cache_bpu_read_hits_total"], prometheus.CounterValue, float64(*cacheProfile.BPUReadHit), cpuid, ) } if cacheProfile.BPUReadMiss != nil { ch <- prometheus.MustNewConstMetric( c.desc["cache_bpu_read_misses_total"], prometheus.CounterValue, float64(*cacheProfile.BPUReadMiss), cpuid, ) } } return nil } node_exporter-1.7.0/collector/perf_linux_test.go000066400000000000000000000126751452426057600221510ustar00rootroot00000000000000// Copyright 2019 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !noperf // +build !noperf package collector import ( "os" "runtime" "strconv" "strings" "testing" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) func canTestPerf(t *testing.T) { paranoidBytes, err := os.ReadFile("/proc/sys/kernel/perf_event_paranoid") if err != nil { t.Skip("Procfs not mounted, skipping perf tests") } paranoidStr := strings.Replace(string(paranoidBytes), "\n", "", -1) paranoid, err := strconv.Atoi(paranoidStr) if err != nil { t.Fatalf("Expected perf_event_paranoid to be an int, got: %s", paranoidStr) } if paranoid >= 1 { t.Skip("Skipping perf tests, set perf_event_paranoid to 0") } } func TestPerfCollector(t *testing.T) { canTestPerf(t) collector, err := NewPerfCollector(log.NewNopLogger()) if err != nil { t.Fatal(err) } // Setup background goroutine to capture metrics. metrics := make(chan prometheus.Metric) defer close(metrics) go func() { i := 0 for range metrics { i++ } }() if err := collector.Update(metrics); err != nil { t.Fatal(err) } } func TestPerfCollectorStride(t *testing.T) { canTestPerf(t) tests := []struct { name string flag string exCPUs []int }{ { name: "valid single CPU", flag: "1", exCPUs: []int{1}, }, { name: "valid range CPUs", flag: "1-5", exCPUs: []int{1, 2, 3, 4, 5}, }, { name: "valid stride", flag: "1-8:2", exCPUs: []int{1, 3, 5, 7}, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { ncpu := runtime.NumCPU() for _, cpu := range test.exCPUs { if cpu > ncpu { t.Skipf("Skipping test because runtime.NumCPU < %d", cpu) } } perfCPUsFlag = &test.flag collector, err := NewPerfCollector(log.NewNopLogger()) if err != nil { t.Fatal(err) } c := collector.(*perfCollector) for _, cpu := range test.exCPUs { if _, ok := c.perfHwProfilers[cpu]; !ok { t.Fatalf("Expected CPU %v in hardware profilers", cpu) } if _, ok := c.perfSwProfilers[cpu]; !ok { t.Fatalf("Expected CPU %v in software profilers", cpu) } if _, ok := c.perfCacheProfilers[cpu]; !ok { t.Fatalf("Expected CPU %v in cache profilers", cpu) } } }) } } func TestPerfCPUFlagToCPUs(t *testing.T) { tests := []struct { name string flag string exCpus []int errStr string }{ { name: "valid single CPU", flag: "1", exCpus: []int{1}, }, { name: "valid range CPUs", flag: "1-5", exCpus: []int{1, 2, 3, 4, 5}, }, { name: "valid double digit", flag: "10", exCpus: []int{10}, }, { name: "valid double digit range", flag: "10-12", exCpus: []int{10, 11, 12}, }, { name: "valid double digit stride", flag: "10-20:5", exCpus: []int{10, 15, 20}, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { cpus, err := perfCPUFlagToCPUs(test.flag) if test.errStr != "" { if err != nil { t.Fatal("expected error to not be nil") } if test.errStr != err.Error() { t.Fatalf( "expected error %q, got %q", test.errStr, err.Error(), ) } return } if err != nil { t.Fatal(err) } if len(cpus) != len(test.exCpus) { t.Fatalf( "expected CPUs %v, got %v", test.exCpus, cpus, ) } for i := range cpus { if test.exCpus[i] != cpus[i] { t.Fatalf( "expected CPUs %v, got %v", test.exCpus[i], cpus[i], ) } } }) } } func TestPerfTracepointFlagToTracepoints(t *testing.T) { tests := []struct { name string flag []string exTracepoints []*perfTracepoint errStr string }{ { name: "valid single tracepoint", flag: []string{"sched:sched_kthread_stop"}, exTracepoints: []*perfTracepoint{ { subsystem: "sched", event: "sched_kthread_stop", }, }, }, { name: "valid multiple tracepoints", flag: []string{"sched:sched_kthread_stop", "sched:sched_process_fork"}, exTracepoints: []*perfTracepoint{ { subsystem: "sched", event: "sched_kthread_stop", }, { subsystem: "sched", event: "sched_process_fork", }, }, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { tracepoints, err := perfTracepointFlagToTracepoints(test.flag) if test.errStr != "" { if err != nil { t.Fatal("expected error to not be nil") } if test.errStr != err.Error() { t.Fatalf( "expected error %q, got %q", test.errStr, err.Error(), ) } return } if err != nil { t.Fatal(err) } for i := range tracepoints { if test.exTracepoints[i].event != tracepoints[i].event && test.exTracepoints[i].subsystem != tracepoints[i].subsystem { t.Fatalf( "expected tracepoint %v, got %v", test.exTracepoints[i], tracepoints[i], ) } } }) } } node_exporter-1.7.0/collector/powersupplyclass.go000066400000000000000000000031551452426057600223670ustar00rootroot00000000000000// Copyright 2019 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nopowersupplyclass && (linux || darwin) // +build !nopowersupplyclass // +build linux darwin package collector import ( "regexp" "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) var ( powerSupplyClassIgnoredPowerSupplies = kingpin.Flag("collector.powersupply.ignored-supplies", "Regexp of power supplies to ignore for powersupplyclass collector.").Default("^$").String() ) type powerSupplyClassCollector struct { subsystem string ignoredPattern *regexp.Regexp metricDescs map[string]*prometheus.Desc logger log.Logger } func init() { registerCollector("powersupplyclass", defaultEnabled, NewPowerSupplyClassCollector) } func NewPowerSupplyClassCollector(logger log.Logger) (Collector, error) { pattern := regexp.MustCompile(*powerSupplyClassIgnoredPowerSupplies) return &powerSupplyClassCollector{ subsystem: "power_supply", ignoredPattern: pattern, metricDescs: map[string]*prometheus.Desc{}, logger: logger, }, nil } node_exporter-1.7.0/collector/powersupplyclass_darwin.go000066400000000000000000000267431452426057600237430ustar00rootroot00000000000000// Copyright 2019 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nopowersupplyclass // +build !nopowersupplyclass package collector /* #cgo LDFLAGS: -framework IOKit -framework CoreFoundation #include #include #include #include #include // values collected from IOKit Power Source APIs // Functions documentation available at // https://developer.apple.com/documentation/iokit/iopowersources_h // CFDictionary keys definition // https://developer.apple.com/documentation/iokit/iopskeys_h/defines struct macos_powersupply { char *Name; char *PowerSourceState; char *Type; char *TransportType; char *BatteryHealth; char *HardwareSerialNumber; int *PowerSourceID; int *CurrentCapacity; int *MaxCapacity; int *DesignCapacity; int *NominalCapacity; int *TimeToEmpty; int *TimeToFullCharge; int *Voltage; int *Current; int *Temperature; // boolean values int *IsCharged; int *IsCharging; int *InternalFailure; int *IsPresent; }; int *CFDictionaryGetInt(CFDictionaryRef theDict, const void *key) { CFNumberRef tmp; int *value; tmp = CFDictionaryGetValue(theDict, key); if (tmp == NULL) return NULL; value = (int*)malloc(sizeof(int)); if (CFNumberGetValue(tmp, kCFNumberIntType, value)) { return value; } free(value); return NULL; } int *CFDictionaryGetBoolean(CFDictionaryRef theDict, const void *key) { CFBooleanRef tmp; int *value; tmp = CFDictionaryGetValue(theDict, key); if (tmp == NULL) return NULL; value = (int*)malloc(sizeof(int)); if (CFBooleanGetValue(tmp)) { *value = 1; } else { *value = 0; } return value; } char *CFDictionaryGetSring(CFDictionaryRef theDict, const void *key) { CFStringRef tmp; CFIndex size; char *value; tmp = CFDictionaryGetValue(theDict, key); if (tmp == NULL) return NULL; size = CFStringGetLength(tmp) + 1; value = (char*)malloc(size); if(CFStringGetCString(tmp, value, size, kCFStringEncodingUTF8)) { return value; } free(value); return NULL; } struct macos_powersupply* getPowerSupplyInfo(CFDictionaryRef powerSourceInformation) { struct macos_powersupply *ret; if (powerSourceInformation == NULL) return NULL; ret = (struct macos_powersupply*)malloc(sizeof(struct macos_powersupply)); ret->PowerSourceID = CFDictionaryGetInt(powerSourceInformation, CFSTR(kIOPSPowerSourceIDKey)); ret->CurrentCapacity = CFDictionaryGetInt(powerSourceInformation, CFSTR(kIOPSCurrentCapacityKey)); ret->MaxCapacity = CFDictionaryGetInt(powerSourceInformation, CFSTR(kIOPSMaxCapacityKey)); ret->DesignCapacity = CFDictionaryGetInt(powerSourceInformation, CFSTR(kIOPSDesignCapacityKey)); ret->NominalCapacity = CFDictionaryGetInt(powerSourceInformation, CFSTR(kIOPSNominalCapacityKey)); ret->TimeToEmpty = CFDictionaryGetInt(powerSourceInformation, CFSTR(kIOPSTimeToEmptyKey)); ret->TimeToFullCharge = CFDictionaryGetInt(powerSourceInformation, CFSTR(kIOPSTimeToFullChargeKey)); ret->Voltage = CFDictionaryGetInt(powerSourceInformation, CFSTR(kIOPSVoltageKey)); ret->Current = CFDictionaryGetInt(powerSourceInformation, CFSTR(kIOPSCurrentKey)); ret->Temperature = CFDictionaryGetInt(powerSourceInformation, CFSTR(kIOPSTemperatureKey)); ret->Name = CFDictionaryGetSring(powerSourceInformation, CFSTR(kIOPSNameKey)); ret->PowerSourceState = CFDictionaryGetSring(powerSourceInformation, CFSTR(kIOPSPowerSourceStateKey)); ret->Type = CFDictionaryGetSring(powerSourceInformation, CFSTR(kIOPSTypeKey)); ret->TransportType = CFDictionaryGetSring(powerSourceInformation, CFSTR(kIOPSTransportTypeKey)); ret->BatteryHealth = CFDictionaryGetSring(powerSourceInformation, CFSTR(kIOPSBatteryHealthKey)); ret->HardwareSerialNumber = CFDictionaryGetSring(powerSourceInformation, CFSTR(kIOPSHardwareSerialNumberKey)); ret->IsCharged = CFDictionaryGetBoolean(powerSourceInformation, CFSTR(kIOPSIsChargedKey)); ret->IsCharging = CFDictionaryGetBoolean(powerSourceInformation, CFSTR(kIOPSIsChargingKey)); ret->InternalFailure = CFDictionaryGetBoolean(powerSourceInformation, CFSTR(kIOPSInternalFailureKey)); ret->IsPresent = CFDictionaryGetBoolean(powerSourceInformation, CFSTR(kIOPSIsPresentKey)); return ret; } void releasePowerSupply(struct macos_powersupply *ps) { free(ps->Name); free(ps->PowerSourceState); free(ps->Type); free(ps->TransportType); free(ps->BatteryHealth); free(ps->HardwareSerialNumber); free(ps->PowerSourceID); free(ps->CurrentCapacity); free(ps->MaxCapacity); free(ps->DesignCapacity); free(ps->NominalCapacity); free(ps->TimeToEmpty); free(ps->TimeToFullCharge); free(ps->Voltage); free(ps->Current); free(ps->Temperature); free(ps->IsCharged); free(ps->IsCharging); free(ps->InternalFailure); free(ps->IsPresent); free(ps); } */ import "C" import ( "fmt" "strconv" "github.com/prometheus/client_golang/prometheus" ) func (c *powerSupplyClassCollector) Update(ch chan<- prometheus.Metric) error { psList, err := getPowerSourceList() if err != nil { return fmt.Errorf("couldn't get IOPPowerSourcesList: %w", err) } for _, info := range psList { labels := getPowerSourceDescriptorLabels(info) powerSupplyName := labels["power_supply"] if c.ignoredPattern.MatchString(powerSupplyName) { continue } for name, value := range getPowerSourceDescriptorMap(info) { if value == nil { continue } ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(namespace, c.subsystem, name), fmt.Sprintf("IOKit Power Source information field %s for .", name), []string{"power_supply"}, nil, ), prometheus.GaugeValue, *value, powerSupplyName, ) } pushEnumMetric( ch, getPowerSourceDescriptorState(info), "power_source_state", c.subsystem, powerSupplyName, ) pushEnumMetric( ch, getPowerSourceDescriptorBatteryHealth(info), "battery_health", c.subsystem, powerSupplyName, ) var ( keys []string values []string ) for name, value := range labels { if value != "" { keys = append(keys, name) values = append(values, value) } } fieldDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, c.subsystem, "info"), "IOKit Power Source information for .", keys, nil, ) ch <- prometheus.MustNewConstMetric(fieldDesc, prometheus.GaugeValue, 1.0, values...) C.releasePowerSupply(info) } return nil } // getPowerSourceList fetches information from IOKit APIs // // Data is provided as opaque CoreFoundation references // C.getPowerSupplyInfo will convert those objects in something // easily manageable in Go. // https://developer.apple.com/documentation/iokit/iopowersources_h func getPowerSourceList() ([]*C.struct_macos_powersupply, error) { infos, err := C.IOPSCopyPowerSourcesInfo() if err != nil { return nil, err } defer C.CFRelease(infos) psList, err := C.IOPSCopyPowerSourcesList(infos) if err != nil { return nil, err } if psList == C.CFArrayRef(0) { return nil, nil } defer C.CFRelease(C.CFTypeRef(psList)) size, err := C.CFArrayGetCount(psList) if err != nil { return nil, err } ret := make([]*C.struct_macos_powersupply, size) for i := C.CFIndex(0); i < size; i++ { ps, err := C.CFArrayGetValueAtIndex(psList, i) if err != nil { return nil, err } dict, err := C.IOPSGetPowerSourceDescription(infos, (C.CFTypeRef)(ps)) if err != nil { return nil, err } info, err := C.getPowerSupplyInfo(dict) if err != nil { return nil, err } ret[int(i)] = info } return ret, nil } func getPowerSourceDescriptorMap(info *C.struct_macos_powersupply) map[string]*float64 { return map[string]*float64{ "current_capacity": convertValue(info.CurrentCapacity), "max_capacity": convertValue(info.MaxCapacity), "design_capacity": convertValue(info.DesignCapacity), "nominal_capacity": convertValue(info.NominalCapacity), "time_to_empty_seconds": minutesToSeconds(info.TimeToEmpty), "time_to_full_seconds": minutesToSeconds(info.TimeToFullCharge), "voltage_volt": scaleValue(info.Voltage, 1e3), "current_ampere": scaleValue(info.Current, 1e3), "temp_celsius": convertValue(info.Temperature), "present": convertValue(info.IsPresent), "charging": convertValue(info.IsCharging), "charged": convertValue(info.IsCharged), "internal_failure": convertValue(info.InternalFailure), } } func getPowerSourceDescriptorLabels(info *C.struct_macos_powersupply) map[string]string { return map[string]string{ "id": strconv.FormatInt(int64(*info.PowerSourceID), 10), "power_supply": C.GoString(info.Name), "type": C.GoString(info.Type), "transport_type": C.GoString(info.TransportType), "serial_number": C.GoString(info.HardwareSerialNumber), } } func getPowerSourceDescriptorState(info *C.struct_macos_powersupply) map[string]float64 { stateMap := map[string]float64{ "Off Line": 0, "AC Power": 0, "Battery Power": 0, } // This field is always present // https://developer.apple.com/documentation/iokit/kiopspowersourcestatekey stateMap[C.GoString(info.PowerSourceState)] = 1 return stateMap } func getPowerSourceDescriptorBatteryHealth(info *C.struct_macos_powersupply) map[string]float64 { // This field is optional // https://developer.apple.com/documentation/iokit/kiopsBatteryHealthkey if info.BatteryHealth == nil { return nil } stateMap := map[string]float64{ "Good": 0, "Fair": 0, "Poor": 0, } stateMap[C.GoString(info.BatteryHealth)] = 1 return stateMap } func convertValue(value *C.int) *float64 { if value == nil { return nil } ret := new(float64) *ret = (float64)(*value) return ret } func scaleValue(value *C.int, scale float64) *float64 { ret := convertValue(value) if ret == nil { return nil } *ret /= scale return ret } // minutesToSeconds converts *C.int minutes into *float64 seconds. // // Only positive values will be scaled to seconds, because negative ones // have special meanings. I.e. -1 indicates "Still Calculating the Time" func minutesToSeconds(minutes *C.int) *float64 { ret := convertValue(minutes) if ret == nil { return nil } if *ret > 0 { *ret *= 60 } return ret } func pushEnumMetric(ch chan<- prometheus.Metric, values map[string]float64, name, subsystem, powerSupply string) { for state, value := range values { ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, name), fmt.Sprintf("IOKit Power Source information field %s for .", name), []string{"power_supply", "state"}, nil, ), prometheus.GaugeValue, value, powerSupply, state, ) } } node_exporter-1.7.0/collector/powersupplyclass_linux.go000066400000000000000000000146651452426057600236160ustar00rootroot00000000000000// Copyright 2019 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nopowersupplyclass // +build !nopowersupplyclass package collector import ( "errors" "fmt" "os" "regexp" "strings" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs/sysfs" ) func (c *powerSupplyClassCollector) Update(ch chan<- prometheus.Metric) error { powerSupplyClass, err := getPowerSupplyClassInfo(c.ignoredPattern) if err != nil { if errors.Is(err, os.ErrNotExist) { return ErrNoData } return fmt.Errorf("could not get power_supply class info: %w", err) } for _, powerSupply := range powerSupplyClass { for name, value := range map[string]*int64{ "authentic": powerSupply.Authentic, "calibrate": powerSupply.Calibrate, "capacity": powerSupply.Capacity, "capacity_alert_max": powerSupply.CapacityAlertMax, "capacity_alert_min": powerSupply.CapacityAlertMin, "cyclecount": powerSupply.CycleCount, "online": powerSupply.Online, "present": powerSupply.Present, "time_to_empty_seconds": powerSupply.TimeToEmptyNow, "time_to_full_seconds": powerSupply.TimeToFullNow, } { if value != nil { pushPowerSupplyMetric(ch, c.subsystem, name, float64(*value), powerSupply.Name, prometheus.GaugeValue) } } for name, value := range map[string]*int64{ "current_boot": powerSupply.CurrentBoot, "current_max": powerSupply.CurrentMax, "current_ampere": powerSupply.CurrentNow, "energy_empty": powerSupply.EnergyEmpty, "energy_empty_design": powerSupply.EnergyEmptyDesign, "energy_full": powerSupply.EnergyFull, "energy_full_design": powerSupply.EnergyFullDesign, "energy_watthour": powerSupply.EnergyNow, "voltage_boot": powerSupply.VoltageBoot, "voltage_max": powerSupply.VoltageMax, "voltage_max_design": powerSupply.VoltageMaxDesign, "voltage_min": powerSupply.VoltageMin, "voltage_min_design": powerSupply.VoltageMinDesign, "voltage_volt": powerSupply.VoltageNow, "voltage_ocv": powerSupply.VoltageOCV, "charge_control_limit": powerSupply.ChargeControlLimit, "charge_control_limit_max": powerSupply.ChargeControlLimitMax, "charge_counter": powerSupply.ChargeCounter, "charge_empty": powerSupply.ChargeEmpty, "charge_empty_design": powerSupply.ChargeEmptyDesign, "charge_full": powerSupply.ChargeFull, "charge_full_design": powerSupply.ChargeFullDesign, "charge_ampere": powerSupply.ChargeNow, "charge_term_current": powerSupply.ChargeTermCurrent, "constant_charge_current": powerSupply.ConstantChargeCurrent, "constant_charge_current_max": powerSupply.ConstantChargeCurrentMax, "constant_charge_voltage": powerSupply.ConstantChargeVoltage, "constant_charge_voltage_max": powerSupply.ConstantChargeVoltageMax, "precharge_current": powerSupply.PrechargeCurrent, "input_current_limit": powerSupply.InputCurrentLimit, "power_watt": powerSupply.PowerNow, } { if value != nil { pushPowerSupplyMetric(ch, c.subsystem, name, float64(*value)/1e6, powerSupply.Name, prometheus.GaugeValue) } } for name, value := range map[string]*int64{ "temp_celsius": powerSupply.Temp, "temp_alert_max_celsius": powerSupply.TempAlertMax, "temp_alert_min_celsius": powerSupply.TempAlertMin, "temp_ambient_celsius": powerSupply.TempAmbient, "temp_ambient_max_celsius": powerSupply.TempAmbientMax, "temp_ambient_min_celsius": powerSupply.TempAmbientMin, "temp_max_celsius": powerSupply.TempMax, "temp_min_celsius": powerSupply.TempMin, } { if value != nil { pushPowerSupplyMetric(ch, c.subsystem, name, float64(*value)/10.0, powerSupply.Name, prometheus.GaugeValue) } } var ( keys []string values []string ) for name, value := range map[string]string{ "power_supply": powerSupply.Name, "capacity_level": powerSupply.CapacityLevel, "charge_type": powerSupply.ChargeType, "health": powerSupply.Health, "manufacturer": powerSupply.Manufacturer, "model_name": powerSupply.ModelName, "serial_number": powerSupply.SerialNumber, "status": powerSupply.Status, "technology": powerSupply.Technology, "type": powerSupply.Type, "usb_type": powerSupply.UsbType, "scope": powerSupply.Scope, } { if value != "" { keys = append(keys, name) values = append(values, strings.ToValidUTF8(value, "�")) } } fieldDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, c.subsystem, "info"), "info of /sys/class/power_supply/.", keys, nil, ) ch <- prometheus.MustNewConstMetric(fieldDesc, prometheus.GaugeValue, 1.0, values...) } return nil } func pushPowerSupplyMetric(ch chan<- prometheus.Metric, subsystem string, name string, value float64, powerSupplyName string, valueType prometheus.ValueType) { fieldDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, name), fmt.Sprintf("%s value of /sys/class/power_supply/.", name), []string{"power_supply"}, nil, ) ch <- prometheus.MustNewConstMetric(fieldDesc, valueType, value, powerSupplyName) } func getPowerSupplyClassInfo(ignore *regexp.Regexp) (sysfs.PowerSupplyClass, error) { fs, err := sysfs.NewFS(*sysPath) if err != nil { return nil, err } powerSupplyClass, err := fs.PowerSupplyClass() if err != nil { return powerSupplyClass, fmt.Errorf("error obtaining power_supply class info: %w", err) } for device := range powerSupplyClass { if ignore.MatchString(device) { delete(powerSupplyClass, device) } } return powerSupplyClass, nil } node_exporter-1.7.0/collector/pressure_linux.go000066400000000000000000000100271452426057600220130ustar00rootroot00000000000000// Copyright 2019 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nopressure // +build !nopressure package collector import ( "errors" "fmt" "os" "syscall" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs" ) var ( psiResources = []string{"cpu", "io", "memory"} ) type pressureStatsCollector struct { cpu *prometheus.Desc io *prometheus.Desc ioFull *prometheus.Desc mem *prometheus.Desc memFull *prometheus.Desc fs procfs.FS logger log.Logger } func init() { registerCollector("pressure", defaultEnabled, NewPressureStatsCollector) } // NewPressureStatsCollector returns a Collector exposing pressure stall information func NewPressureStatsCollector(logger log.Logger) (Collector, error) { fs, err := procfs.NewFS(*procPath) if err != nil { return nil, fmt.Errorf("failed to open procfs: %w", err) } return &pressureStatsCollector{ cpu: prometheus.NewDesc( prometheus.BuildFQName(namespace, "pressure", "cpu_waiting_seconds_total"), "Total time in seconds that processes have waited for CPU time", nil, nil, ), io: prometheus.NewDesc( prometheus.BuildFQName(namespace, "pressure", "io_waiting_seconds_total"), "Total time in seconds that processes have waited due to IO congestion", nil, nil, ), ioFull: prometheus.NewDesc( prometheus.BuildFQName(namespace, "pressure", "io_stalled_seconds_total"), "Total time in seconds no process could make progress due to IO congestion", nil, nil, ), mem: prometheus.NewDesc( prometheus.BuildFQName(namespace, "pressure", "memory_waiting_seconds_total"), "Total time in seconds that processes have waited for memory", nil, nil, ), memFull: prometheus.NewDesc( prometheus.BuildFQName(namespace, "pressure", "memory_stalled_seconds_total"), "Total time in seconds no process could make progress due to memory congestion", nil, nil, ), fs: fs, logger: logger, }, nil } // Update calls procfs.NewPSIStatsForResource for the different resources and updates the values func (c *pressureStatsCollector) Update(ch chan<- prometheus.Metric) error { for _, res := range psiResources { level.Debug(c.logger).Log("msg", "collecting statistics for resource", "resource", res) vals, err := c.fs.PSIStatsForResource(res) if err != nil { if errors.Is(err, os.ErrNotExist) { level.Debug(c.logger).Log("msg", "pressure information is unavailable, you need a Linux kernel >= 4.20 and/or CONFIG_PSI enabled for your kernel") return ErrNoData } if errors.Is(err, syscall.ENOTSUP) { level.Debug(c.logger).Log("msg", "pressure information is disabled, add psi=1 kernel command line to enable it") return ErrNoData } return fmt.Errorf("failed to retrieve pressure stats: %w", err) } switch res { case "cpu": ch <- prometheus.MustNewConstMetric(c.cpu, prometheus.CounterValue, float64(vals.Some.Total)/1000.0/1000.0) case "io": ch <- prometheus.MustNewConstMetric(c.io, prometheus.CounterValue, float64(vals.Some.Total)/1000.0/1000.0) ch <- prometheus.MustNewConstMetric(c.ioFull, prometheus.CounterValue, float64(vals.Full.Total)/1000.0/1000.0) case "memory": ch <- prometheus.MustNewConstMetric(c.mem, prometheus.CounterValue, float64(vals.Some.Total)/1000.0/1000.0) ch <- prometheus.MustNewConstMetric(c.memFull, prometheus.CounterValue, float64(vals.Full.Total)/1000.0/1000.0) default: level.Debug(c.logger).Log("msg", "did not account for resource", "resource", res) } } return nil } node_exporter-1.7.0/collector/processes_linux.go000066400000000000000000000144131452426057600221540ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !noprocesses // +build !noprocesses package collector import ( "errors" "fmt" "os" "path" "strconv" "strings" "syscall" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs" ) type processCollector struct { fs procfs.FS threadAlloc *prometheus.Desc threadLimit *prometheus.Desc threadsState *prometheus.Desc procsState *prometheus.Desc pidUsed *prometheus.Desc pidMax *prometheus.Desc logger log.Logger } func init() { registerCollector("processes", defaultDisabled, NewProcessStatCollector) } // NewProcessStatCollector returns a new Collector exposing process data read from the proc filesystem. func NewProcessStatCollector(logger log.Logger) (Collector, error) { fs, err := procfs.NewFS(*procPath) if err != nil { return nil, fmt.Errorf("failed to open procfs: %w", err) } subsystem := "processes" return &processCollector{ fs: fs, threadAlloc: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "threads"), "Allocated threads in system", nil, nil, ), threadLimit: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "max_threads"), "Limit of threads in the system", nil, nil, ), threadsState: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "threads_state"), "Number of threads in each state.", []string{"thread_state"}, nil, ), procsState: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "state"), "Number of processes in each state.", []string{"state"}, nil, ), pidUsed: prometheus.NewDesc(prometheus.BuildFQName(namespace, subsystem, "pids"), "Number of PIDs", nil, nil, ), pidMax: prometheus.NewDesc(prometheus.BuildFQName(namespace, subsystem, "max_processes"), "Number of max PIDs limit", nil, nil, ), logger: logger, }, nil } func (c *processCollector) Update(ch chan<- prometheus.Metric) error { pids, states, threads, threadStates, err := c.getAllocatedThreads() if err != nil { return fmt.Errorf("unable to retrieve number of allocated threads: %w", err) } ch <- prometheus.MustNewConstMetric(c.threadAlloc, prometheus.GaugeValue, float64(threads)) maxThreads, err := readUintFromFile(procFilePath("sys/kernel/threads-max")) if err != nil { return fmt.Errorf("unable to retrieve limit number of threads: %w", err) } ch <- prometheus.MustNewConstMetric(c.threadLimit, prometheus.GaugeValue, float64(maxThreads)) for state := range states { ch <- prometheus.MustNewConstMetric(c.procsState, prometheus.GaugeValue, float64(states[state]), state) } for state := range threadStates { ch <- prometheus.MustNewConstMetric(c.threadsState, prometheus.GaugeValue, float64(threadStates[state]), state) } pidM, err := readUintFromFile(procFilePath("sys/kernel/pid_max")) if err != nil { return fmt.Errorf("unable to retrieve limit number of maximum pids alloved: %w", err) } ch <- prometheus.MustNewConstMetric(c.pidUsed, prometheus.GaugeValue, float64(pids)) ch <- prometheus.MustNewConstMetric(c.pidMax, prometheus.GaugeValue, float64(pidM)) return nil } func (c *processCollector) getAllocatedThreads() (int, map[string]int32, int, map[string]int32, error) { p, err := c.fs.AllProcs() if err != nil { return 0, nil, 0, nil, fmt.Errorf("unable to list all processes: %w", err) } pids := 0 thread := 0 procStates := make(map[string]int32) threadStates := make(map[string]int32) for _, pid := range p { stat, err := pid.Stat() if err != nil { // PIDs can vanish between getting the list and getting stats. if c.isIgnoredError(err) { level.Debug(c.logger).Log("msg", "file not found when retrieving stats for pid", "pid", pid.PID, "err", err) continue } level.Debug(c.logger).Log("msg", "error reading stat for pid", "pid", pid.PID, "err", err) return 0, nil, 0, nil, fmt.Errorf("error reading stat for pid %d: %w", pid.PID, err) } pids++ procStates[stat.State]++ thread += stat.NumThreads err = c.getThreadStates(pid.PID, stat, threadStates) if err != nil { return 0, nil, 0, nil, err } } return pids, procStates, thread, threadStates, nil } func (c *processCollector) getThreadStates(pid int, pidStat procfs.ProcStat, threadStates map[string]int32) error { fs, err := procfs.NewFS(procFilePath(path.Join(strconv.Itoa(pid), "task"))) if err != nil { if c.isIgnoredError(err) { level.Debug(c.logger).Log("msg", "file not found when retrieving tasks for pid", "pid", pid, "err", err) return nil } level.Debug(c.logger).Log("msg", "error reading tasks for pid", "pid", pid, "err", err) return fmt.Errorf("error reading task for pid %d: %w", pid, err) } t, err := fs.AllProcs() if err != nil { if c.isIgnoredError(err) { level.Debug(c.logger).Log("msg", "file not found when retrieving tasks for pid", "pid", pid, "err", err) return nil } return fmt.Errorf("unable to list all threads for pid: %d %w", pid, err) } for _, thread := range t { if pid == thread.PID { threadStates[pidStat.State]++ continue } threadStat, err := thread.Stat() if err != nil { if c.isIgnoredError(err) { level.Debug(c.logger).Log("msg", "file not found when retrieving stats for thread", "pid", pid, "threadId", thread.PID, "err", err) continue } level.Debug(c.logger).Log("msg", "error reading stat for thread", "pid", pid, "threadId", thread.PID, "err", err) return fmt.Errorf("error reading stat for pid:%d thread:%d err:%w", pid, thread.PID, err) } threadStates[threadStat.State]++ } return nil } func (c *processCollector) isIgnoredError(err error) bool { if errors.Is(err, os.ErrNotExist) || strings.Contains(err.Error(), syscall.ESRCH.Error()) { return true } return false } node_exporter-1.7.0/collector/processes_linux_test.go000066400000000000000000000033311452426057600232100ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !noprocesses // +build !noprocesses package collector import ( "testing" "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" "github.com/prometheus/procfs" ) func TestReadProcessStatus(t *testing.T) { if _, err := kingpin.CommandLine.Parse([]string{"--path.procfs", "fixtures/proc"}); err != nil { t.Fatal(err) } want := 1 fs, err := procfs.NewFS(*procPath) if err != nil { t.Errorf("failed to open procfs: %v", err) } c := processCollector{fs: fs, logger: log.NewNopLogger()} pids, states, threads, _, err := c.getAllocatedThreads() if err != nil { t.Fatalf("Cannot retrieve data from procfs getAllocatedThreads function: %v ", err) } if threads < want { t.Fatalf("Current threads: %d Shouldn't be less than wanted %d", threads, want) } if states == nil { t.Fatalf("Process states cannot be nil %v:", states) } maxPid, err := readUintFromFile(procFilePath("sys/kernel/pid_max")) if err != nil { t.Fatalf("Unable to retrieve limit number of maximum pids alloved %v\n", err) } if uint64(pids) > maxPid || pids == 0 { t.Fatalf("Total running pids cannot be greater than %d or equals to 0", maxPid) } } node_exporter-1.7.0/collector/qdisc_linux.go000066400000000000000000000141111452426057600212440ustar00rootroot00000000000000// Copyright 2017 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !noqdisc // +build !noqdisc package collector import ( "encoding/json" "fmt" "os" "path/filepath" "github.com/alecthomas/kingpin/v2" "github.com/ema/qdisc" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" ) type qdiscStatCollector struct { logger log.Logger deviceFilter deviceFilter bytes typedDesc packets typedDesc drops typedDesc requeues typedDesc overlimits typedDesc qlength typedDesc backlog typedDesc } var ( collectorQdisc = kingpin.Flag("collector.qdisc.fixtures", "test fixtures to use for qdisc collector end-to-end testing").Default("").String() collectorQdiscDeviceInclude = kingpin.Flag("collector.qdisc.device-include", "Regexp of qdisc devices to include (mutually exclusive to device-exclude).").String() oldCollectorQdiskDeviceInclude = kingpin.Flag("collector.qdisk.device-include", "DEPRECATED: Use collector.qdisc.device-include").Hidden().String() collectorQdiscDeviceExclude = kingpin.Flag("collector.qdisc.device-exclude", "Regexp of qdisc devices to exclude (mutually exclusive to device-include).").String() oldCollectorQdiskDeviceExclude = kingpin.Flag("collector.qdisk.device-exclude", "DEPRECATED: Use collector.qdisc.device-exclude").Hidden().String() ) func init() { registerCollector("qdisc", defaultDisabled, NewQdiscStatCollector) } // NewQdiscStatCollector returns a new Collector exposing queuing discipline statistics. func NewQdiscStatCollector(logger log.Logger) (Collector, error) { if *oldCollectorQdiskDeviceInclude != "" { if *collectorQdiscDeviceInclude == "" { level.Warn(logger).Log("msg", "--collector.qdisk.device-include is DEPRECATED and will be removed in 2.0.0, use --collector.qdisc.device-include") *collectorQdiscDeviceInclude = *oldCollectorQdiskDeviceInclude } else { return nil, fmt.Errorf("--collector.qdisk.device-include and --collector.qdisc.device-include are mutually exclusive") } } if *oldCollectorQdiskDeviceExclude != "" { if *collectorQdiscDeviceExclude == "" { level.Warn(logger).Log("msg", "--collector.qdisk.device-exclude is DEPRECATED and will be removed in 2.0.0, use --collector.qdisc.device-exclude") *collectorQdiscDeviceExclude = *oldCollectorQdiskDeviceExclude } else { return nil, fmt.Errorf("--collector.qdisk.device-exclude and --collector.qdisc.device-exclude are mutually exclusive") } } if *collectorQdiscDeviceExclude != "" && *collectorQdiscDeviceInclude != "" { return nil, fmt.Errorf("collector.qdisc.device-include and collector.qdisc.device-exclude are mutaly exclusive") } return &qdiscStatCollector{ bytes: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, "qdisc", "bytes_total"), "Number of bytes sent.", []string{"device", "kind"}, nil, ), prometheus.CounterValue}, packets: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, "qdisc", "packets_total"), "Number of packets sent.", []string{"device", "kind"}, nil, ), prometheus.CounterValue}, drops: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, "qdisc", "drops_total"), "Number of packets dropped.", []string{"device", "kind"}, nil, ), prometheus.CounterValue}, requeues: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, "qdisc", "requeues_total"), "Number of packets dequeued, not transmitted, and requeued.", []string{"device", "kind"}, nil, ), prometheus.CounterValue}, overlimits: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, "qdisc", "overlimits_total"), "Number of overlimit packets.", []string{"device", "kind"}, nil, ), prometheus.CounterValue}, qlength: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, "qdisc", "current_queue_length"), "Number of packets currently in queue to be sent.", []string{"device", "kind"}, nil, ), prometheus.GaugeValue}, backlog: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, "qdisc", "backlog"), "Number of bytes currently in queue to be sent.", []string{"device", "kind"}, nil, ), prometheus.GaugeValue}, logger: logger, deviceFilter: newDeviceFilter(*collectorQdiscDeviceExclude, *collectorQdiscDeviceInclude), }, nil } func testQdiscGet(fixtures string) ([]qdisc.QdiscInfo, error) { var res []qdisc.QdiscInfo b, err := os.ReadFile(filepath.Join(fixtures, "results.json")) if err != nil { return res, err } err = json.Unmarshal(b, &res) return res, err } func (c *qdiscStatCollector) Update(ch chan<- prometheus.Metric) error { var msgs []qdisc.QdiscInfo var err error fixtures := *collectorQdisc if fixtures == "" { msgs, err = qdisc.Get() } else { msgs, err = testQdiscGet(fixtures) } if err != nil { return err } for _, msg := range msgs { // Only report root qdisc information. if msg.Parent != 0 { continue } if c.deviceFilter.ignored(msg.IfaceName) { continue } ch <- c.bytes.mustNewConstMetric(float64(msg.Bytes), msg.IfaceName, msg.Kind) ch <- c.packets.mustNewConstMetric(float64(msg.Packets), msg.IfaceName, msg.Kind) ch <- c.drops.mustNewConstMetric(float64(msg.Drops), msg.IfaceName, msg.Kind) ch <- c.requeues.mustNewConstMetric(float64(msg.Requeues), msg.IfaceName, msg.Kind) ch <- c.overlimits.mustNewConstMetric(float64(msg.Overlimits), msg.IfaceName, msg.Kind) ch <- c.qlength.mustNewConstMetric(float64(msg.Qlen), msg.IfaceName, msg.Kind) ch <- c.backlog.mustNewConstMetric(float64(msg.Backlog), msg.IfaceName, msg.Kind) } return nil } node_exporter-1.7.0/collector/rapl_linux.go000066400000000000000000000070771452426057600211140ustar00rootroot00000000000000// Copyright 2019 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !norapl // +build !norapl package collector import ( "errors" "fmt" "os" "strconv" "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs/sysfs" ) const raplCollectorSubsystem = "rapl" type raplCollector struct { fs sysfs.FS logger log.Logger joulesMetricDesc *prometheus.Desc } func init() { registerCollector(raplCollectorSubsystem, defaultEnabled, NewRaplCollector) } var ( raplZoneLabel = kingpin.Flag("collector.rapl.enable-zone-label", "Enables service unit metric unit_start_time_seconds").Bool() ) // NewRaplCollector returns a new Collector exposing RAPL metrics. func NewRaplCollector(logger log.Logger) (Collector, error) { fs, err := sysfs.NewFS(*sysPath) if err != nil { return nil, err } joulesMetricDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, raplCollectorSubsystem, "joules_total"), "Current RAPL value in joules", []string{"index", "path", "rapl_zone"}, nil, ) collector := raplCollector{ fs: fs, logger: logger, joulesMetricDesc: joulesMetricDesc, } return &collector, nil } // Update implements Collector and exposes RAPL related metrics. func (c *raplCollector) Update(ch chan<- prometheus.Metric) error { // nil zones are fine when platform doesn't have powercap files present. zones, err := sysfs.GetRaplZones(c.fs) if err != nil { if errors.Is(err, os.ErrNotExist) { level.Debug(c.logger).Log("msg", "Platform doesn't have powercap files present", "err", err) return ErrNoData } if errors.Is(err, os.ErrPermission) { level.Debug(c.logger).Log("msg", "Can't access powercap files", "err", err) return ErrNoData } return fmt.Errorf("failed to retrieve rapl stats: %w", err) } for _, rz := range zones { microJoules, err := rz.GetEnergyMicrojoules() if err != nil { if errors.Is(err, os.ErrPermission) { level.Debug(c.logger).Log("msg", "Can't access energy_uj file", "zone", rz, "err", err) return ErrNoData } return err } joules := float64(microJoules) / 1000000.0 if *raplZoneLabel { ch <- c.joulesMetricWithZoneLabel(rz, joules) } else { ch <- c.joulesMetric(rz, joules) } } return nil } func (c *raplCollector) joulesMetric(z sysfs.RaplZone, v float64) prometheus.Metric { index := strconv.Itoa(z.Index) descriptor := prometheus.NewDesc( prometheus.BuildFQName( namespace, raplCollectorSubsystem, fmt.Sprintf("%s_joules_total", SanitizeMetricName(z.Name)), ), fmt.Sprintf("Current RAPL %s value in joules", z.Name), []string{"index", "path"}, nil, ) return prometheus.MustNewConstMetric( descriptor, prometheus.CounterValue, v, index, z.Path, ) } func (c *raplCollector) joulesMetricWithZoneLabel(z sysfs.RaplZone, v float64) prometheus.Metric { index := strconv.Itoa(z.Index) return prometheus.MustNewConstMetric( c.joulesMetricDesc, prometheus.CounterValue, v, index, z.Path, z.Name, ) } node_exporter-1.7.0/collector/runit.go000066400000000000000000000065711452426057600200760ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !norunit // +build !norunit package collector import ( "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus-community/go-runit/runit" "github.com/prometheus/client_golang/prometheus" ) var runitServiceDir = kingpin.Flag("collector.runit.servicedir", "Path to runit service directory.").Default("/etc/service").String() type runitCollector struct { state typedDesc stateDesired typedDesc stateNormal typedDesc stateTimestamp typedDesc logger log.Logger } func init() { registerCollector("runit", defaultDisabled, NewRunitCollector) } // NewRunitCollector returns a new Collector exposing runit statistics. func NewRunitCollector(logger log.Logger) (Collector, error) { var ( subsystem = "service" constLabels = prometheus.Labels{"supervisor": "runit"} labelNames = []string{"service"} ) level.Warn(logger).Log("msg", "This collector is deprecated and will be removed in the next major version release.") return &runitCollector{ state: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "state"), "State of runit service.", labelNames, constLabels, ), prometheus.GaugeValue}, stateDesired: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "desired_state"), "Desired state of runit service.", labelNames, constLabels, ), prometheus.GaugeValue}, stateNormal: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "normal_state"), "Normal state of runit service.", labelNames, constLabels, ), prometheus.GaugeValue}, stateTimestamp: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "state_last_change_timestamp_seconds"), "Unix timestamp of the last runit service state change.", labelNames, constLabels, ), prometheus.GaugeValue}, logger: logger, }, nil } func (c *runitCollector) Update(ch chan<- prometheus.Metric) error { services, err := runit.GetServices(*runitServiceDir) if err != nil { return err } for _, service := range services { status, err := service.Status() if err != nil { level.Debug(c.logger).Log("msg", "Couldn't get status", "service", service.Name, "err", err) continue } level.Debug(c.logger).Log("msg", "duration", "service", service.Name, "status", status.State, "pid", status.Pid, "duration_seconds", status.Duration) ch <- c.state.mustNewConstMetric(float64(status.State), service.Name) ch <- c.stateDesired.mustNewConstMetric(float64(status.Want), service.Name) ch <- c.stateTimestamp.mustNewConstMetric(float64(status.Timestamp.Unix()), service.Name) if status.NormallyUp { ch <- c.stateNormal.mustNewConstMetric(1, service.Name) } else { ch <- c.stateNormal.mustNewConstMetric(0, service.Name) } } return nil } node_exporter-1.7.0/collector/schedstat_linux.go000066400000000000000000000052201452426057600221240ustar00rootroot00000000000000// Copyright 2019 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !noshedstat // +build !noshedstat package collector import ( "errors" "fmt" "os" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs" ) const nsPerSec = 1e9 var ( runningSecondsTotal = prometheus.NewDesc( prometheus.BuildFQName(namespace, "schedstat", "running_seconds_total"), "Number of seconds CPU spent running a process.", []string{"cpu"}, nil, ) waitingSecondsTotal = prometheus.NewDesc( prometheus.BuildFQName(namespace, "schedstat", "waiting_seconds_total"), "Number of seconds spent by processing waiting for this CPU.", []string{"cpu"}, nil, ) timeslicesTotal = prometheus.NewDesc( prometheus.BuildFQName(namespace, "schedstat", "timeslices_total"), "Number of timeslices executed by CPU.", []string{"cpu"}, nil, ) ) // NewSchedstatCollector returns a new Collector exposing task scheduler statistics func NewSchedstatCollector(logger log.Logger) (Collector, error) { fs, err := procfs.NewFS(*procPath) if err != nil { return nil, fmt.Errorf("failed to open procfs: %w", err) } return &schedstatCollector{fs, logger}, nil } type schedstatCollector struct { fs procfs.FS logger log.Logger } func init() { registerCollector("schedstat", defaultEnabled, NewSchedstatCollector) } func (c *schedstatCollector) Update(ch chan<- prometheus.Metric) error { stats, err := c.fs.Schedstat() if err != nil { if errors.Is(err, os.ErrNotExist) { level.Debug(c.logger).Log("msg", "schedstat file does not exist") return ErrNoData } return err } for _, cpu := range stats.CPUs { ch <- prometheus.MustNewConstMetric( runningSecondsTotal, prometheus.CounterValue, float64(cpu.RunningNanoseconds)/nsPerSec, cpu.CPUNum, ) ch <- prometheus.MustNewConstMetric( waitingSecondsTotal, prometheus.CounterValue, float64(cpu.WaitingNanoseconds)/nsPerSec, cpu.CPUNum, ) ch <- prometheus.MustNewConstMetric( timeslicesTotal, prometheus.CounterValue, float64(cpu.RunTimeslices), cpu.CPUNum, ) } return nil } node_exporter-1.7.0/collector/selinux_linux.go000066400000000000000000000042761452426057600216430ustar00rootroot00000000000000// Copyright 2022 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !noselinux // +build !noselinux package collector import ( "github.com/go-kit/log" "github.com/opencontainers/selinux/go-selinux" "github.com/prometheus/client_golang/prometheus" ) type selinuxCollector struct { configMode *prometheus.Desc currentMode *prometheus.Desc enabled *prometheus.Desc logger log.Logger } func init() { registerCollector("selinux", defaultEnabled, NewSelinuxCollector) } // NewSelinuxCollector returns a new Collector exposing SELinux statistics. func NewSelinuxCollector(logger log.Logger) (Collector, error) { const subsystem = "selinux" return &selinuxCollector{ configMode: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "config_mode"), "Configured SELinux enforcement mode", nil, nil, ), currentMode: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "current_mode"), "Current SELinux enforcement mode", nil, nil, ), enabled: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "enabled"), "SELinux is enabled, 1 is true, 0 is false", nil, nil, ), logger: logger, }, nil } func (c *selinuxCollector) Update(ch chan<- prometheus.Metric) error { if !selinux.GetEnabled() { ch <- prometheus.MustNewConstMetric( c.enabled, prometheus.GaugeValue, 0) return nil } ch <- prometheus.MustNewConstMetric( c.enabled, prometheus.GaugeValue, 1) ch <- prometheus.MustNewConstMetric( c.configMode, prometheus.GaugeValue, float64(selinux.DefaultEnforceMode())) ch <- prometheus.MustNewConstMetric( c.currentMode, prometheus.GaugeValue, float64(selinux.EnforceMode())) return nil } node_exporter-1.7.0/collector/slabinfo_linux.go000066400000000000000000000070111452426057600217370ustar00rootroot00000000000000// Copyright 2022 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !noslabinfo // +build !noslabinfo package collector import ( "fmt" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs" ) type slabinfoCollector struct { fs procfs.FS logger log.Logger subsystem string labels []string } func init() { registerCollector("slabinfo", defaultDisabled, NewSlabinfoCollector) } func NewSlabinfoCollector(logger log.Logger) (Collector, error) { fs, err := procfs.NewFS(*procPath) if err != nil { return nil, fmt.Errorf("failed to open procfs: %w", err) } return &slabinfoCollector{logger: logger, fs: fs, subsystem: "slabinfo", labels: []string{"slab"}, }, nil } func (c *slabinfoCollector) Update(ch chan<- prometheus.Metric) error { slabinfo, err := c.fs.SlabInfo() if err != nil { return fmt.Errorf("couldn't get %s: %w", c.subsystem, err) } for _, slab := range slabinfo.Slabs { ch <- c.activeObjects(slab.Name, slab.ObjActive) ch <- c.objects(slab.Name, slab.ObjNum) ch <- c.objectSizeBytes(slab.Name, slab.ObjSize) ch <- c.objectsPerSlab(slab.Name, slab.ObjPerSlab) ch <- c.pagesPerSlab(slab.Name, slab.PagesPerSlab) } return nil } func (c *slabinfoCollector) activeObjects(label string, val int64) prometheus.Metric { desc := prometheus.NewDesc( prometheus.BuildFQName(namespace, c.subsystem, "active_objects"), "The number of objects that are currently active (i.e., in use).", c.labels, nil) return prometheus.MustNewConstMetric( desc, prometheus.GaugeValue, float64(val), label, ) } func (c *slabinfoCollector) objects(label string, val int64) prometheus.Metric { desc := prometheus.NewDesc( prometheus.BuildFQName(namespace, c.subsystem, "objects"), "The total number of allocated objects (i.e., objects that are both in use and not in use).", c.labels, nil) return prometheus.MustNewConstMetric( desc, prometheus.GaugeValue, float64(val), label, ) } func (c *slabinfoCollector) objectSizeBytes(label string, val int64) prometheus.Metric { desc := prometheus.NewDesc( prometheus.BuildFQName(namespace, c.subsystem, "object_size_bytes"), "The size of objects in this slab, in bytes.", c.labels, nil) return prometheus.MustNewConstMetric( desc, prometheus.GaugeValue, float64(val), label, ) } func (c *slabinfoCollector) objectsPerSlab(label string, val int64) prometheus.Metric { desc := prometheus.NewDesc( prometheus.BuildFQName(namespace, c.subsystem, "objects_per_slab"), "The number of objects stored in each slab.", c.labels, nil) return prometheus.MustNewConstMetric( desc, prometheus.GaugeValue, float64(val), label, ) } func (c *slabinfoCollector) pagesPerSlab(label string, val int64) prometheus.Metric { desc := prometheus.NewDesc( prometheus.BuildFQName(namespace, c.subsystem, "pages_per_slab"), "The number of pages allocated for each slab.", c.labels, nil) return prometheus.MustNewConstMetric( desc, prometheus.GaugeValue, float64(val), label, ) } node_exporter-1.7.0/collector/sockstat_linux.go000066400000000000000000000102231452426057600217740ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nosockstat // +build !nosockstat package collector import ( "errors" "fmt" "os" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs" ) const ( sockStatSubsystem = "sockstat" ) // Used for calculating the total memory bytes on TCP and UDP. var pageSize = os.Getpagesize() type sockStatCollector struct { logger log.Logger } func init() { registerCollector(sockStatSubsystem, defaultEnabled, NewSockStatCollector) } // NewSockStatCollector returns a new Collector exposing socket stats. func NewSockStatCollector(logger log.Logger) (Collector, error) { return &sockStatCollector{logger}, nil } func (c *sockStatCollector) Update(ch chan<- prometheus.Metric) error { fs, err := procfs.NewFS(*procPath) if err != nil { return fmt.Errorf("failed to open procfs: %w", err) } // If IPv4 and/or IPv6 are disabled on this kernel, handle it gracefully. stat4, err := fs.NetSockstat() switch { case err == nil: case errors.Is(err, os.ErrNotExist): level.Debug(c.logger).Log("msg", "IPv4 sockstat statistics not found, skipping") default: return fmt.Errorf("failed to get IPv4 sockstat data: %w", err) } stat6, err := fs.NetSockstat6() switch { case err == nil: case errors.Is(err, os.ErrNotExist): level.Debug(c.logger).Log("msg", "IPv6 sockstat statistics not found, skipping") default: return fmt.Errorf("failed to get IPv6 sockstat data: %w", err) } stats := []struct { isIPv6 bool stat *procfs.NetSockstat }{ { stat: stat4, }, { isIPv6: true, stat: stat6, }, } for _, s := range stats { c.update(ch, s.isIPv6, s.stat) } return nil } func (c *sockStatCollector) update(ch chan<- prometheus.Metric, isIPv6 bool, s *procfs.NetSockstat) { if s == nil { // IPv6 disabled or similar; nothing to do. return } // If sockstat contains the number of used sockets, export it. if !isIPv6 && s.Used != nil { // TODO: this must be updated if sockstat6 ever exports this data. ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(namespace, sockStatSubsystem, "sockets_used"), "Number of IPv4 sockets in use.", nil, nil, ), prometheus.GaugeValue, float64(*s.Used), ) } // A name and optional value for a sockstat metric. type ssPair struct { name string v *int } // Previously these metric names were generated directly from the file output. // In order to keep the same level of compatibility, we must map the fields // to their correct names. for _, p := range s.Protocols { pairs := []ssPair{ { name: "inuse", v: &p.InUse, }, { name: "orphan", v: p.Orphan, }, { name: "tw", v: p.TW, }, { name: "alloc", v: p.Alloc, }, { name: "mem", v: p.Mem, }, { name: "memory", v: p.Memory, }, } // Also export mem_bytes values for sockets which have a mem value // stored in pages. if p.Mem != nil { v := *p.Mem * pageSize pairs = append(pairs, ssPair{ name: "mem_bytes", v: &v, }) } for _, pair := range pairs { if pair.v == nil { // This value is not set for this protocol; nothing to do. continue } ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName( namespace, sockStatSubsystem, fmt.Sprintf("%s_%s", p.Protocol, pair.name), ), fmt.Sprintf("Number of %s sockets in state %s.", p.Protocol, pair.name), nil, nil, ), prometheus.GaugeValue, float64(*pair.v), ) } } } node_exporter-1.7.0/collector/softirq_linux.go000066400000000000000000000043361452426057600216400ustar00rootroot00000000000000// Copyright 2023 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nosoftirqs // +build !nosoftirqs package collector import ( "fmt" "strconv" "github.com/prometheus/client_golang/prometheus" ) var ( softirqLabelNames = []string{"cpu", "type"} ) func (c *softirqsCollector) Update(ch chan<- prometheus.Metric) (err error) { softirqs, err := c.fs.Softirqs() if err != nil { return fmt.Errorf("couldn't get softirqs: %w", err) } for cpuNo, value := range softirqs.Hi { ch <- c.desc.mustNewConstMetric(float64(value), strconv.Itoa(cpuNo), "HI") } for cpuNo, value := range softirqs.Timer { ch <- c.desc.mustNewConstMetric(float64(value), strconv.Itoa(cpuNo), "TIMER") } for cpuNo, value := range softirqs.NetTx { ch <- c.desc.mustNewConstMetric(float64(value), strconv.Itoa(cpuNo), "NET_TX") } for cpuNo, value := range softirqs.NetRx { ch <- c.desc.mustNewConstMetric(float64(value), strconv.Itoa(cpuNo), "NET_RX") } for cpuNo, value := range softirqs.Block { ch <- c.desc.mustNewConstMetric(float64(value), strconv.Itoa(cpuNo), "BLOCK") } for cpuNo, value := range softirqs.IRQPoll { ch <- c.desc.mustNewConstMetric(float64(value), strconv.Itoa(cpuNo), "IRQ_POLL") } for cpuNo, value := range softirqs.Tasklet { ch <- c.desc.mustNewConstMetric(float64(value), strconv.Itoa(cpuNo), "TASKLET") } for cpuNo, value := range softirqs.Sched { ch <- c.desc.mustNewConstMetric(float64(value), strconv.Itoa(cpuNo), "SCHED") } for cpuNo, value := range softirqs.HRTimer { ch <- c.desc.mustNewConstMetric(float64(value), strconv.Itoa(cpuNo), "HRTIMER") } for cpuNo, value := range softirqs.RCU { ch <- c.desc.mustNewConstMetric(float64(value), strconv.Itoa(cpuNo), "RCU") } return err } node_exporter-1.7.0/collector/softirqs_common.go000066400000000000000000000026471452426057600221570ustar00rootroot00000000000000// Copyright 2023 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build linux && !nosoftirqs // +build linux,!nosoftirqs package collector import ( "fmt" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs" ) type softirqsCollector struct { fs procfs.FS desc typedDesc logger log.Logger } func init() { registerCollector("softirqs", defaultDisabled, NewSoftirqsCollector) } // NewSoftirqsCollector returns a new Collector exposing softirq stats. func NewSoftirqsCollector(logger log.Logger) (Collector, error) { desc := typedDesc{prometheus.NewDesc( namespace+"_softirqs_functions_total", "Softirq counts per CPU.", softirqLabelNames, nil, ), prometheus.CounterValue} fs, err := procfs.NewFS(*procPath) if err != nil { return nil, fmt.Errorf("failed to open procfs: %w", err) } return &softirqsCollector{fs, desc, logger}, nil } node_exporter-1.7.0/collector/softnet_linux.go000066400000000000000000000101631452426057600216260ustar00rootroot00000000000000// Copyright 2019 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nosoftnet // +build !nosoftnet package collector import ( "fmt" "strconv" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs" ) type softnetCollector struct { fs procfs.FS processed *prometheus.Desc dropped *prometheus.Desc timeSqueezed *prometheus.Desc cpuCollision *prometheus.Desc receivedRps *prometheus.Desc flowLimitCount *prometheus.Desc softnetBacklogLen *prometheus.Desc logger log.Logger } const ( softnetSubsystem = "softnet" ) func init() { registerCollector("softnet", defaultEnabled, NewSoftnetCollector) } // NewSoftnetCollector returns a new Collector exposing softnet metrics. func NewSoftnetCollector(logger log.Logger) (Collector, error) { fs, err := procfs.NewFS(*procPath) if err != nil { return nil, fmt.Errorf("failed to open procfs: %w", err) } return &softnetCollector{ fs: fs, processed: prometheus.NewDesc( prometheus.BuildFQName(namespace, softnetSubsystem, "processed_total"), "Number of processed packets", []string{"cpu"}, nil, ), dropped: prometheus.NewDesc( prometheus.BuildFQName(namespace, softnetSubsystem, "dropped_total"), "Number of dropped packets", []string{"cpu"}, nil, ), timeSqueezed: prometheus.NewDesc( prometheus.BuildFQName(namespace, softnetSubsystem, "times_squeezed_total"), "Number of times processing packets ran out of quota", []string{"cpu"}, nil, ), cpuCollision: prometheus.NewDesc( prometheus.BuildFQName(namespace, softnetSubsystem, "cpu_collision_total"), "Number of collision occur while obtaining device lock while transmitting", []string{"cpu"}, nil, ), receivedRps: prometheus.NewDesc( prometheus.BuildFQName(namespace, softnetSubsystem, "received_rps_total"), "Number of times cpu woken up received_rps", []string{"cpu"}, nil, ), flowLimitCount: prometheus.NewDesc( prometheus.BuildFQName(namespace, softnetSubsystem, "flow_limit_count_total"), "Number of times flow limit has been reached", []string{"cpu"}, nil, ), softnetBacklogLen: prometheus.NewDesc( prometheus.BuildFQName(namespace, softnetSubsystem, "backlog_len"), "Softnet backlog status", []string{"cpu"}, nil, ), logger: logger, }, nil } // Update gets parsed softnet statistics using procfs. func (c *softnetCollector) Update(ch chan<- prometheus.Metric) error { var cpu string stats, err := c.fs.NetSoftnetStat() if err != nil { return fmt.Errorf("could not get softnet statistics: %w", err) } for _, cpuStats := range stats { cpu = strconv.FormatUint(uint64(cpuStats.Index), 10) ch <- prometheus.MustNewConstMetric( c.processed, prometheus.CounterValue, float64(cpuStats.Processed), cpu, ) ch <- prometheus.MustNewConstMetric( c.dropped, prometheus.CounterValue, float64(cpuStats.Dropped), cpu, ) ch <- prometheus.MustNewConstMetric( c.timeSqueezed, prometheus.CounterValue, float64(cpuStats.TimeSqueezed), cpu, ) ch <- prometheus.MustNewConstMetric( c.cpuCollision, prometheus.CounterValue, float64(cpuStats.CPUCollision), cpu, ) ch <- prometheus.MustNewConstMetric( c.receivedRps, prometheus.CounterValue, float64(cpuStats.ReceivedRps), cpu, ) ch <- prometheus.MustNewConstMetric( c.flowLimitCount, prometheus.CounterValue, float64(cpuStats.FlowLimitCount), cpu, ) ch <- prometheus.MustNewConstMetric( c.softnetBacklogLen, prometheus.GaugeValue, float64(cpuStats.SoftnetBacklogLen), cpu, ) } return nil } node_exporter-1.7.0/collector/stat_linux.go000066400000000000000000000100561452426057600211200ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nostat // +build !nostat package collector import ( "fmt" "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs" ) type statCollector struct { fs procfs.FS intr *prometheus.Desc ctxt *prometheus.Desc forks *prometheus.Desc btime *prometheus.Desc procsRunning *prometheus.Desc procsBlocked *prometheus.Desc softIRQ *prometheus.Desc logger log.Logger } var statSoftirqFlag = kingpin.Flag("collector.stat.softirq", "Export softirq calls per vector").Default("false").Bool() func init() { registerCollector("stat", defaultEnabled, NewStatCollector) } // NewStatCollector returns a new Collector exposing kernel/system statistics. func NewStatCollector(logger log.Logger) (Collector, error) { fs, err := procfs.NewFS(*procPath) if err != nil { return nil, fmt.Errorf("failed to open procfs: %w", err) } return &statCollector{ fs: fs, intr: prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "intr_total"), "Total number of interrupts serviced.", nil, nil, ), ctxt: prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "context_switches_total"), "Total number of context switches.", nil, nil, ), forks: prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "forks_total"), "Total number of forks.", nil, nil, ), btime: prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "boot_time_seconds"), "Node boot time, in unixtime.", nil, nil, ), procsRunning: prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "procs_running"), "Number of processes in runnable state.", nil, nil, ), procsBlocked: prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "procs_blocked"), "Number of processes blocked waiting for I/O to complete.", nil, nil, ), softIRQ: prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "softirqs_total"), "Number of softirq calls.", []string{"vector"}, nil, ), logger: logger, }, nil } // Update implements Collector and exposes kernel and system statistics. func (c *statCollector) Update(ch chan<- prometheus.Metric) error { stats, err := c.fs.Stat() if err != nil { return err } ch <- prometheus.MustNewConstMetric(c.intr, prometheus.CounterValue, float64(stats.IRQTotal)) ch <- prometheus.MustNewConstMetric(c.ctxt, prometheus.CounterValue, float64(stats.ContextSwitches)) ch <- prometheus.MustNewConstMetric(c.forks, prometheus.CounterValue, float64(stats.ProcessCreated)) ch <- prometheus.MustNewConstMetric(c.btime, prometheus.GaugeValue, float64(stats.BootTime)) ch <- prometheus.MustNewConstMetric(c.procsRunning, prometheus.GaugeValue, float64(stats.ProcessesRunning)) ch <- prometheus.MustNewConstMetric(c.procsBlocked, prometheus.GaugeValue, float64(stats.ProcessesBlocked)) if *statSoftirqFlag { si := stats.SoftIRQ for _, vec := range []struct { name string value uint64 }{ {name: "hi", value: si.Hi}, {name: "timer", value: si.Timer}, {name: "net_tx", value: si.NetTx}, {name: "net_rx", value: si.NetRx}, {name: "block", value: si.Block}, {name: "block_iopoll", value: si.BlockIoPoll}, {name: "tasklet", value: si.Tasklet}, {name: "sched", value: si.Sched}, {name: "hrtimer", value: si.Hrtimer}, {name: "rcu", value: si.Rcu}, } { ch <- prometheus.MustNewConstMetric(c.softIRQ, prometheus.CounterValue, float64(vec.value), vec.name) } } return nil } node_exporter-1.7.0/collector/supervisord.go000066400000000000000000000122061452426057600213120ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nosupervisord // +build !nosupervisord package collector import ( "context" "fmt" "net" "net/http" "net/url" "time" "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/mattn/go-xmlrpc" "github.com/prometheus/client_golang/prometheus" ) var ( supervisordURL = kingpin.Flag("collector.supervisord.url", "XML RPC endpoint.").Default("http://localhost:9001/RPC2").Envar("SUPERVISORD_URL").String() xrpc *xmlrpc.Client ) type supervisordCollector struct { upDesc *prometheus.Desc stateDesc *prometheus.Desc exitStatusDesc *prometheus.Desc startTimeDesc *prometheus.Desc logger log.Logger } func init() { registerCollector("supervisord", defaultDisabled, NewSupervisordCollector) } // NewSupervisordCollector returns a new Collector exposing supervisord statistics. func NewSupervisordCollector(logger log.Logger) (Collector, error) { var ( subsystem = "supervisord" labelNames = []string{"name", "group"} ) if u, err := url.Parse(*supervisordURL); err == nil && u.Scheme == "unix" { // Fake the URI scheme as http, since net/http.*Transport.roundTrip will complain // about a non-http(s) transport. xrpc = xmlrpc.NewClient("http://unix/RPC2") xrpc.HttpClient.Transport = &http.Transport{ DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) { d := net.Dialer{Timeout: 10 * time.Second} return d.DialContext(ctx, "unix", u.Path) }, } } else { xrpc = xmlrpc.NewClient(*supervisordURL) } level.Warn(logger).Log("msg", "This collector is deprecated and will be removed in the next major version release.") return &supervisordCollector{ upDesc: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "up"), "Process Up", labelNames, nil, ), stateDesc: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "state"), "Process State", labelNames, nil, ), exitStatusDesc: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "exit_status"), "Process Exit Status", labelNames, nil, ), startTimeDesc: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "start_time_seconds"), "Process start time", labelNames, nil, ), logger: logger, }, nil } func (c *supervisordCollector) isRunning(state int) bool { // http://supervisord.org/subprocess.html#process-states const ( // STOPPED = 0 STARTING = 10 RUNNING = 20 // BACKOFF = 30 STOPPING = 40 // EXITED = 100 // FATAL = 200 // UNKNOWN = 1000 ) switch state { case STARTING, RUNNING, STOPPING: return true } return false } func (c *supervisordCollector) Update(ch chan<- prometheus.Metric) error { var info struct { Name string `xmlrpc:"name"` Group string `xmlrpc:"group"` Start int `xmlrpc:"start"` Stop int `xmlrpc:"stop"` Now int `xmlrpc:"now"` State int `xmlrpc:"state"` StateName string `xmlrpc:"statename"` SpawnErr string `xmlrpc:"spanerr"` ExitStatus int `xmlrpc:"exitstatus"` StdoutLogfile string `xmlrcp:"stdout_logfile"` StderrLogfile string `xmlrcp:"stderr_logfile"` PID int `xmlrpc:"pid"` } res, err := xrpc.Call("supervisor.getAllProcessInfo") if err != nil { return fmt.Errorf("unable to call supervisord: %w", err) } for _, p := range res.(xmlrpc.Array) { for k, v := range p.(xmlrpc.Struct) { switch k { case "name": info.Name = v.(string) case "group": info.Group = v.(string) case "start": info.Start = v.(int) case "stop": info.Stop = v.(int) case "now": info.Now = v.(int) case "state": info.State = v.(int) case "statename": info.StateName = v.(string) case "exitstatus": info.ExitStatus = v.(int) case "pid": info.PID = v.(int) } } labels := []string{info.Name, info.Group} ch <- prometheus.MustNewConstMetric(c.stateDesc, prometheus.GaugeValue, float64(info.State), labels...) ch <- prometheus.MustNewConstMetric(c.exitStatusDesc, prometheus.GaugeValue, float64(info.ExitStatus), labels...) if c.isRunning(info.State) { ch <- prometheus.MustNewConstMetric(c.upDesc, prometheus.GaugeValue, 1, labels...) ch <- prometheus.MustNewConstMetric(c.startTimeDesc, prometheus.CounterValue, float64(info.Start), labels...) } else { ch <- prometheus.MustNewConstMetric(c.upDesc, prometheus.GaugeValue, 0, labels...) } level.Debug(c.logger).Log("msg", "process info", "group", info.Group, "name", info.Name, "state", info.StateName, "pid", info.PID) } return nil } node_exporter-1.7.0/collector/sysctl_bsd.go000066400000000000000000000055401452426057600211010ustar00rootroot00000000000000// Copyright 2017 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build (freebsd || dragonfly || openbsd || netbsd || darwin) && cgo // +build freebsd dragonfly openbsd netbsd darwin // +build cgo package collector import ( "fmt" "unsafe" "github.com/prometheus/client_golang/prometheus" "golang.org/x/sys/unix" ) // #include import "C" type bsdSysctlType uint8 // BSD-specific sysctl value types. There is an impedience mismatch between // native C types, e.g. int vs long, and the golang unix.Sysctl variables const ( // Default to uint32. bsdSysctlTypeUint32 bsdSysctlType = iota bsdSysctlTypeUint64 bsdSysctlTypeCLong ) // Contains all the info needed to map a single bsd-sysctl to a prometheus // value. type bsdSysctl struct { // Prometheus name name string // Simple prometheus description description string // Prometheus type valueType prometheus.ValueType // Sysctl name mib string // Sysctl data-type dataType bsdSysctlType // Post-retrieval conversion hooks conversion func(float64) float64 // Prometheus labels labels prometheus.Labels } func (b bsdSysctl) Value() (float64, error) { var tmp32 uint32 var tmp64 uint64 var tmpf64 float64 var err error switch b.dataType { case bsdSysctlTypeUint32: tmp32, err = unix.SysctlUint32(b.mib) tmpf64 = float64(tmp32) case bsdSysctlTypeUint64: tmp64, err = unix.SysctlUint64(b.mib) tmpf64 = float64(tmp64) case bsdSysctlTypeCLong: tmpf64, err = b.getCLong() } if err != nil { return 0, err } if b.conversion != nil { return b.conversion(tmpf64), nil } return tmpf64, nil } func (b bsdSysctl) getCLong() (float64, error) { raw, err := unix.SysctlRaw(b.mib) if err != nil { return 0, err } if len(raw) == C.sizeof_long { return float64(*(*C.long)(unsafe.Pointer(&raw[0]))), nil } if len(raw) == C.sizeof_int { // This is valid for at least vfs.bufspace, and the default // long handler - which can clamp longs to 32-bits: // https://github.com/freebsd/freebsd/blob/releng/10.3/sys/kern/vfs_bio.c#L338 // https://github.com/freebsd/freebsd/blob/releng/10.3/sys/kern/kern_sysctl.c#L1062 return float64(*(*C.int)(unsafe.Pointer(&raw[0]))), nil } return 0, fmt.Errorf( "length of bytes received from sysctl (%d) does not match expected bytes (long: %d), (int: %d)", len(raw), C.sizeof_long, C.sizeof_int, ) } node_exporter-1.7.0/collector/sysctl_linux.go000066400000000000000000000127451452426057600214750ustar00rootroot00000000000000// Copyright 2022 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector import ( "fmt" "strconv" "strings" "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs" ) var ( sysctlInclude = kingpin.Flag("collector.sysctl.include", "Select sysctl metrics to include").Strings() sysctlIncludeInfo = kingpin.Flag("collector.sysctl.include-info", "Select sysctl metrics to include as info metrics").Strings() sysctlInfoDesc = prometheus.NewDesc(prometheus.BuildFQName(namespace, "sysctl", "info"), "sysctl info", []string{"name", "value", "index"}, nil) ) type sysctlCollector struct { fs procfs.FS logger log.Logger sysctls []*sysctl } func init() { registerCollector("sysctl", defaultDisabled, NewSysctlCollector) } func NewSysctlCollector(logger log.Logger) (Collector, error) { fs, err := procfs.NewFS(*procPath) if err != nil { return nil, fmt.Errorf("failed to open sysfs: %w", err) } c := &sysctlCollector{ logger: logger, fs: fs, sysctls: []*sysctl{}, } for _, include := range *sysctlInclude { sysctl, err := newSysctl(include, true) if err != nil { return nil, err } c.sysctls = append(c.sysctls, sysctl) } for _, include := range *sysctlIncludeInfo { sysctl, err := newSysctl(include, false) if err != nil { return nil, err } c.sysctls = append(c.sysctls, sysctl) } return c, nil } func (c *sysctlCollector) Update(ch chan<- prometheus.Metric) error { for _, sysctl := range c.sysctls { metrics, err := c.newMetrics(sysctl) if err != nil { return err } for _, metric := range metrics { ch <- metric } } return nil } func (c *sysctlCollector) newMetrics(s *sysctl) ([]prometheus.Metric, error) { var ( values interface{} length int err error ) if s.numeric { values, err = c.fs.SysctlInts(s.name) if err != nil { return nil, fmt.Errorf("error obtaining sysctl info: %w", err) } length = len(values.([]int)) } else { values, err = c.fs.SysctlStrings(s.name) if err != nil { return nil, fmt.Errorf("error obtaining sysctl info: %w", err) } length = len(values.([]string)) } switch length { case 0: return nil, fmt.Errorf("sysctl %s has no values", s.name) case 1: if len(s.keys) > 0 { return nil, fmt.Errorf("sysctl %s has only one value, but expected %v", s.name, s.keys) } return []prometheus.Metric{s.newConstMetric(values)}, nil default: if len(s.keys) == 0 { return s.newIndexedMetrics(values), nil } if length != len(s.keys) { return nil, fmt.Errorf("sysctl %s has %d keys but only %d defined in f lag", s.name, length, len(s.keys)) } return s.newMappedMetrics(values) } } type sysctl struct { numeric bool name string keys []string } func newSysctl(include string, numeric bool) (*sysctl, error) { parts := strings.SplitN(include, ":", 2) s := &sysctl{ numeric: numeric, name: parts[0], } if len(parts) == 2 { s.keys = strings.Split(parts[1], ",") s.name = parts[0] } return s, nil } func (s *sysctl) metricName() string { return SanitizeMetricName(s.name) } func (s *sysctl) newConstMetric(v interface{}) prometheus.Metric { if s.numeric { return prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(namespace, "sysctl", s.metricName()), fmt.Sprintf("sysctl %s", s.name), nil, nil), prometheus.UntypedValue, float64(v.([]int)[0])) } return prometheus.MustNewConstMetric( sysctlInfoDesc, prometheus.GaugeValue, 1.0, s.name, v.([]string)[0], "0", ) } func (s *sysctl) newIndexedMetrics(v interface{}) []prometheus.Metric { desc := prometheus.NewDesc( prometheus.BuildFQName(namespace, "sysctl", s.metricName()), fmt.Sprintf("sysctl %s", s.name), []string{"index"}, nil, ) switch values := v.(type) { case []int: metrics := make([]prometheus.Metric, len(values)) for i, n := range values { metrics[i] = prometheus.MustNewConstMetric(desc, prometheus.UntypedValue, float64(n), strconv.Itoa(i)) } return metrics case []string: metrics := make([]prometheus.Metric, len(values)) for i, str := range values { metrics[i] = prometheus.MustNewConstMetric(sysctlInfoDesc, prometheus.GaugeValue, 1.0, s.name, str, strconv.Itoa(i)) } return metrics default: panic(fmt.Sprintf("unexpected type %T", values)) } } func (s *sysctl) newMappedMetrics(v interface{}) ([]prometheus.Metric, error) { switch values := v.(type) { case []int: metrics := make([]prometheus.Metric, len(values)) for i, n := range values { key := s.keys[i] desc := prometheus.NewDesc( prometheus.BuildFQName(namespace, "sysctl", s.metricName()+"_"+key), fmt.Sprintf("sysctl %s, field %d", s.name, i), nil, nil, ) metrics[i] = prometheus.MustNewConstMetric(desc, prometheus.UntypedValue, float64(n)) } return metrics, nil case []string: return nil, fmt.Errorf("mapped sysctl string values not supported") default: return nil, fmt.Errorf("unexpected type %T", values) } } node_exporter-1.7.0/collector/sysctl_openbsd_amd64.go000066400000000000000000000036121452426057600227540ustar00rootroot00000000000000// Copyright 2020 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector import ( "golang.org/x/sys/unix" "syscall" "unsafe" ) func int8ToString(a []int8) string { buf := make([]byte, len(a)) for i, v := range a { if byte(v) == 0 { buf = buf[:i] break } buf[i] = byte(v) } return string(buf) } // unix._C_int type _C_int int32 var _zero uintptr func errnoErr(e syscall.Errno) error { switch e { case 0: return nil case unix.EAGAIN: return syscall.EAGAIN case unix.EINVAL: return syscall.EINVAL case unix.ENOENT: return syscall.ENOENT } return e } func _sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { _p0 = unsafe.Pointer(&mib[0]) } else { _p0 = unsafe.Pointer(&_zero) } for { _, _, e1 := unix.Syscall6(unix.SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) if e1 != 0 { err = errnoErr(e1) } if err != unix.EINTR { return } } return } func sysctl(mib []_C_int) ([]byte, error) { n := uintptr(0) if err := _sysctl(mib, nil, &n, nil, 0); err != nil { return nil, err } if n == 0 { return nil, nil } buf := make([]byte, n) if err := _sysctl(mib, &buf[0], &n, nil, 0); err != nil { return nil, err } return buf[:n], nil } node_exporter-1.7.0/collector/systemd_linux.go000066400000000000000000000462731452426057600216470ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nosystemd // +build !nosystemd package collector import ( "context" "errors" "fmt" "math" "regexp" "strconv" "strings" "sync" "time" "github.com/alecthomas/kingpin/v2" "github.com/coreos/go-systemd/v22/dbus" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" ) const ( // minSystemdVersionSystemState is the minimum SystemD version for availability of // the 'SystemState' manager property and the timer property 'LastTriggerUSec' // https://github.com/prometheus/node_exporter/issues/291 minSystemdVersionSystemState = 212 ) var ( systemdUnitIncludeSet bool systemdUnitInclude = kingpin.Flag("collector.systemd.unit-include", "Regexp of systemd units to include. Units must both match include and not match exclude to be included.").Default(".+").PreAction(func(c *kingpin.ParseContext) error { systemdUnitIncludeSet = true return nil }).String() oldSystemdUnitInclude = kingpin.Flag("collector.systemd.unit-whitelist", "DEPRECATED: Use --collector.systemd.unit-include").Hidden().String() systemdUnitExcludeSet bool systemdUnitExclude = kingpin.Flag("collector.systemd.unit-exclude", "Regexp of systemd units to exclude. Units must both match include and not match exclude to be included.").Default(".+\\.(automount|device|mount|scope|slice)").PreAction(func(c *kingpin.ParseContext) error { systemdUnitExcludeSet = true return nil }).String() oldSystemdUnitExclude = kingpin.Flag("collector.systemd.unit-blacklist", "DEPRECATED: Use collector.systemd.unit-exclude").Hidden().String() systemdPrivate = kingpin.Flag("collector.systemd.private", "Establish a private, direct connection to systemd without dbus (Strongly discouraged since it requires root. For testing purposes only).").Hidden().Bool() enableTaskMetrics = kingpin.Flag("collector.systemd.enable-task-metrics", "Enables service unit tasks metrics unit_tasks_current and unit_tasks_max").Bool() enableRestartsMetrics = kingpin.Flag("collector.systemd.enable-restarts-metrics", "Enables service unit metric service_restart_total").Bool() enableStartTimeMetrics = kingpin.Flag("collector.systemd.enable-start-time-metrics", "Enables service unit metric unit_start_time_seconds").Bool() systemdVersionRE = regexp.MustCompile(`[0-9]{3,}(\.[0-9]+)?`) ) type systemdCollector struct { unitDesc *prometheus.Desc unitStartTimeDesc *prometheus.Desc unitTasksCurrentDesc *prometheus.Desc unitTasksMaxDesc *prometheus.Desc systemRunningDesc *prometheus.Desc summaryDesc *prometheus.Desc nRestartsDesc *prometheus.Desc timerLastTriggerDesc *prometheus.Desc socketAcceptedConnectionsDesc *prometheus.Desc socketCurrentConnectionsDesc *prometheus.Desc socketRefusedConnectionsDesc *prometheus.Desc systemdVersionDesc *prometheus.Desc // Use regexps for more flexability than device_filter.go allows systemdUnitIncludePattern *regexp.Regexp systemdUnitExcludePattern *regexp.Regexp logger log.Logger } var unitStatesName = []string{"active", "activating", "deactivating", "inactive", "failed"} func init() { registerCollector("systemd", defaultDisabled, NewSystemdCollector) } // NewSystemdCollector returns a new Collector exposing systemd statistics. func NewSystemdCollector(logger log.Logger) (Collector, error) { const subsystem = "systemd" unitDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "unit_state"), "Systemd unit", []string{"name", "state", "type"}, nil, ) unitStartTimeDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "unit_start_time_seconds"), "Start time of the unit since unix epoch in seconds.", []string{"name"}, nil, ) unitTasksCurrentDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "unit_tasks_current"), "Current number of tasks per Systemd unit", []string{"name"}, nil, ) unitTasksMaxDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "unit_tasks_max"), "Maximum number of tasks per Systemd unit", []string{"name"}, nil, ) systemRunningDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "system_running"), "Whether the system is operational (see 'systemctl is-system-running')", nil, nil, ) summaryDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "units"), "Summary of systemd unit states", []string{"state"}, nil) nRestartsDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "service_restart_total"), "Service unit count of Restart triggers", []string{"name"}, nil) timerLastTriggerDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "timer_last_trigger_seconds"), "Seconds since epoch of last trigger.", []string{"name"}, nil) socketAcceptedConnectionsDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "socket_accepted_connections_total"), "Total number of accepted socket connections", []string{"name"}, nil) socketCurrentConnectionsDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "socket_current_connections"), "Current number of socket connections", []string{"name"}, nil) socketRefusedConnectionsDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "socket_refused_connections_total"), "Total number of refused socket connections", []string{"name"}, nil) systemdVersionDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "version"), "Detected systemd version", []string{"version"}, nil) if *oldSystemdUnitExclude != "" { if !systemdUnitExcludeSet { level.Warn(logger).Log("msg", "--collector.systemd.unit-blacklist is DEPRECATED and will be removed in 2.0.0, use --collector.systemd.unit-exclude") *systemdUnitExclude = *oldSystemdUnitExclude } else { return nil, errors.New("--collector.systemd.unit-blacklist and --collector.systemd.unit-exclude are mutually exclusive") } } if *oldSystemdUnitInclude != "" { if !systemdUnitIncludeSet { level.Warn(logger).Log("msg", "--collector.systemd.unit-whitelist is DEPRECATED and will be removed in 2.0.0, use --collector.systemd.unit-include") *systemdUnitInclude = *oldSystemdUnitInclude } else { return nil, errors.New("--collector.systemd.unit-whitelist and --collector.systemd.unit-include are mutually exclusive") } } level.Info(logger).Log("msg", "Parsed flag --collector.systemd.unit-include", "flag", *systemdUnitInclude) systemdUnitIncludePattern := regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *systemdUnitInclude)) level.Info(logger).Log("msg", "Parsed flag --collector.systemd.unit-exclude", "flag", *systemdUnitExclude) systemdUnitExcludePattern := regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *systemdUnitExclude)) return &systemdCollector{ unitDesc: unitDesc, unitStartTimeDesc: unitStartTimeDesc, unitTasksCurrentDesc: unitTasksCurrentDesc, unitTasksMaxDesc: unitTasksMaxDesc, systemRunningDesc: systemRunningDesc, summaryDesc: summaryDesc, nRestartsDesc: nRestartsDesc, timerLastTriggerDesc: timerLastTriggerDesc, socketAcceptedConnectionsDesc: socketAcceptedConnectionsDesc, socketCurrentConnectionsDesc: socketCurrentConnectionsDesc, socketRefusedConnectionsDesc: socketRefusedConnectionsDesc, systemdVersionDesc: systemdVersionDesc, systemdUnitIncludePattern: systemdUnitIncludePattern, systemdUnitExcludePattern: systemdUnitExcludePattern, logger: logger, }, nil } // Update gathers metrics from systemd. Dbus collection is done in parallel // to reduce wait time for responses. func (c *systemdCollector) Update(ch chan<- prometheus.Metric) error { begin := time.Now() conn, err := newSystemdDbusConn() if err != nil { return fmt.Errorf("couldn't get dbus connection: %w", err) } defer conn.Close() systemdVersion, systemdVersionFull := c.getSystemdVersion(conn) if systemdVersion < minSystemdVersionSystemState { level.Debug(c.logger).Log("msg", "Detected systemd version is lower than minimum, some systemd state and timer metrics will not be available", "current", systemdVersion, "minimum", minSystemdVersionSystemState) } ch <- prometheus.MustNewConstMetric( c.systemdVersionDesc, prometheus.GaugeValue, systemdVersion, systemdVersionFull, ) allUnits, err := c.getAllUnits(conn) if err != nil { return fmt.Errorf("couldn't get units: %w", err) } level.Debug(c.logger).Log("msg", "getAllUnits took", "duration_seconds", time.Since(begin).Seconds()) begin = time.Now() summary := summarizeUnits(allUnits) c.collectSummaryMetrics(ch, summary) level.Debug(c.logger).Log("msg", "collectSummaryMetrics took", "duration_seconds", time.Since(begin).Seconds()) begin = time.Now() units := filterUnits(allUnits, c.systemdUnitIncludePattern, c.systemdUnitExcludePattern, c.logger) level.Debug(c.logger).Log("msg", "filterUnits took", "duration_seconds", time.Since(begin).Seconds()) var wg sync.WaitGroup defer wg.Wait() wg.Add(1) go func() { defer wg.Done() begin = time.Now() c.collectUnitStatusMetrics(conn, ch, units) level.Debug(c.logger).Log("msg", "collectUnitStatusMetrics took", "duration_seconds", time.Since(begin).Seconds()) }() if *enableStartTimeMetrics { wg.Add(1) go func() { defer wg.Done() begin = time.Now() c.collectUnitStartTimeMetrics(conn, ch, units) level.Debug(c.logger).Log("msg", "collectUnitStartTimeMetrics took", "duration_seconds", time.Since(begin).Seconds()) }() } if *enableTaskMetrics { wg.Add(1) go func() { defer wg.Done() begin = time.Now() c.collectUnitTasksMetrics(conn, ch, units) level.Debug(c.logger).Log("msg", "collectUnitTasksMetrics took", "duration_seconds", time.Since(begin).Seconds()) }() } if systemdVersion >= minSystemdVersionSystemState { wg.Add(1) go func() { defer wg.Done() begin = time.Now() c.collectTimers(conn, ch, units) level.Debug(c.logger).Log("msg", "collectTimers took", "duration_seconds", time.Since(begin).Seconds()) }() } wg.Add(1) go func() { defer wg.Done() begin = time.Now() c.collectSockets(conn, ch, units) level.Debug(c.logger).Log("msg", "collectSockets took", "duration_seconds", time.Since(begin).Seconds()) }() if systemdVersion >= minSystemdVersionSystemState { begin = time.Now() err = c.collectSystemState(conn, ch) level.Debug(c.logger).Log("msg", "collectSystemState took", "duration_seconds", time.Since(begin).Seconds()) } return err } func (c *systemdCollector) collectUnitStatusMetrics(conn *dbus.Conn, ch chan<- prometheus.Metric, units []unit) { for _, unit := range units { serviceType := "" if strings.HasSuffix(unit.Name, ".service") { serviceTypeProperty, err := conn.GetUnitTypePropertyContext(context.TODO(), unit.Name, "Service", "Type") if err != nil { level.Debug(c.logger).Log("msg", "couldn't get unit type", "unit", unit.Name, "err", err) } else { serviceType = serviceTypeProperty.Value.Value().(string) } } else if strings.HasSuffix(unit.Name, ".mount") { serviceTypeProperty, err := conn.GetUnitTypePropertyContext(context.TODO(), unit.Name, "Mount", "Type") if err != nil { level.Debug(c.logger).Log("msg", "couldn't get unit type", "unit", unit.Name, "err", err) } else { serviceType = serviceTypeProperty.Value.Value().(string) } } for _, stateName := range unitStatesName { isActive := 0.0 if stateName == unit.ActiveState { isActive = 1.0 } ch <- prometheus.MustNewConstMetric( c.unitDesc, prometheus.GaugeValue, isActive, unit.Name, stateName, serviceType) } if *enableRestartsMetrics && strings.HasSuffix(unit.Name, ".service") { // NRestarts wasn't added until systemd 235. restartsCount, err := conn.GetUnitTypePropertyContext(context.TODO(), unit.Name, "Service", "NRestarts") if err != nil { level.Debug(c.logger).Log("msg", "couldn't get unit NRestarts", "unit", unit.Name, "err", err) } else { ch <- prometheus.MustNewConstMetric( c.nRestartsDesc, prometheus.CounterValue, float64(restartsCount.Value.Value().(uint32)), unit.Name) } } } } func (c *systemdCollector) collectSockets(conn *dbus.Conn, ch chan<- prometheus.Metric, units []unit) { for _, unit := range units { if !strings.HasSuffix(unit.Name, ".socket") { continue } acceptedConnectionCount, err := conn.GetUnitTypePropertyContext(context.TODO(), unit.Name, "Socket", "NAccepted") if err != nil { level.Debug(c.logger).Log("msg", "couldn't get unit NAccepted", "unit", unit.Name, "err", err) continue } ch <- prometheus.MustNewConstMetric( c.socketAcceptedConnectionsDesc, prometheus.CounterValue, float64(acceptedConnectionCount.Value.Value().(uint32)), unit.Name) currentConnectionCount, err := conn.GetUnitTypePropertyContext(context.TODO(), unit.Name, "Socket", "NConnections") if err != nil { level.Debug(c.logger).Log("msg", "couldn't get unit NConnections", "unit", unit.Name, "err", err) continue } ch <- prometheus.MustNewConstMetric( c.socketCurrentConnectionsDesc, prometheus.GaugeValue, float64(currentConnectionCount.Value.Value().(uint32)), unit.Name) // NRefused wasn't added until systemd 239. refusedConnectionCount, err := conn.GetUnitTypePropertyContext(context.TODO(), unit.Name, "Socket", "NRefused") if err == nil { ch <- prometheus.MustNewConstMetric( c.socketRefusedConnectionsDesc, prometheus.GaugeValue, float64(refusedConnectionCount.Value.Value().(uint32)), unit.Name) } } } func (c *systemdCollector) collectUnitStartTimeMetrics(conn *dbus.Conn, ch chan<- prometheus.Metric, units []unit) { var startTimeUsec uint64 for _, unit := range units { if unit.ActiveState != "active" { startTimeUsec = 0 } else { timestampValue, err := conn.GetUnitPropertyContext(context.TODO(), unit.Name, "ActiveEnterTimestamp") if err != nil { level.Debug(c.logger).Log("msg", "couldn't get unit StartTimeUsec", "unit", unit.Name, "err", err) continue } startTimeUsec = timestampValue.Value.Value().(uint64) } ch <- prometheus.MustNewConstMetric( c.unitStartTimeDesc, prometheus.GaugeValue, float64(startTimeUsec)/1e6, unit.Name) } } func (c *systemdCollector) collectUnitTasksMetrics(conn *dbus.Conn, ch chan<- prometheus.Metric, units []unit) { var val uint64 for _, unit := range units { if strings.HasSuffix(unit.Name, ".service") { tasksCurrentCount, err := conn.GetUnitTypePropertyContext(context.TODO(), unit.Name, "Service", "TasksCurrent") if err != nil { level.Debug(c.logger).Log("msg", "couldn't get unit TasksCurrent", "unit", unit.Name, "err", err) } else { val = tasksCurrentCount.Value.Value().(uint64) // Don't set if tasksCurrent if dbus reports MaxUint64. if val != math.MaxUint64 { ch <- prometheus.MustNewConstMetric( c.unitTasksCurrentDesc, prometheus.GaugeValue, float64(val), unit.Name) } } tasksMaxCount, err := conn.GetUnitTypePropertyContext(context.TODO(), unit.Name, "Service", "TasksMax") if err != nil { level.Debug(c.logger).Log("msg", "couldn't get unit TasksMax", "unit", unit.Name, "err", err) } else { val = tasksMaxCount.Value.Value().(uint64) // Don't set if tasksMax if dbus reports MaxUint64. if val != math.MaxUint64 { ch <- prometheus.MustNewConstMetric( c.unitTasksMaxDesc, prometheus.GaugeValue, float64(val), unit.Name) } } } } } func (c *systemdCollector) collectTimers(conn *dbus.Conn, ch chan<- prometheus.Metric, units []unit) { for _, unit := range units { if !strings.HasSuffix(unit.Name, ".timer") { continue } lastTriggerValue, err := conn.GetUnitTypePropertyContext(context.TODO(), unit.Name, "Timer", "LastTriggerUSec") if err != nil { level.Debug(c.logger).Log("msg", "couldn't get unit LastTriggerUSec", "unit", unit.Name, "err", err) continue } ch <- prometheus.MustNewConstMetric( c.timerLastTriggerDesc, prometheus.GaugeValue, float64(lastTriggerValue.Value.Value().(uint64))/1e6, unit.Name) } } func (c *systemdCollector) collectSummaryMetrics(ch chan<- prometheus.Metric, summary map[string]float64) { for stateName, count := range summary { ch <- prometheus.MustNewConstMetric( c.summaryDesc, prometheus.GaugeValue, count, stateName) } } func (c *systemdCollector) collectSystemState(conn *dbus.Conn, ch chan<- prometheus.Metric) error { systemState, err := conn.GetManagerProperty("SystemState") if err != nil { return fmt.Errorf("couldn't get system state: %w", err) } isSystemRunning := 0.0 if systemState == `"running"` { isSystemRunning = 1.0 } ch <- prometheus.MustNewConstMetric(c.systemRunningDesc, prometheus.GaugeValue, isSystemRunning) return nil } func newSystemdDbusConn() (*dbus.Conn, error) { if *systemdPrivate { return dbus.NewSystemdConnectionContext(context.TODO()) } return dbus.NewWithContext(context.TODO()) } type unit struct { dbus.UnitStatus } func (c *systemdCollector) getAllUnits(conn *dbus.Conn) ([]unit, error) { allUnits, err := conn.ListUnitsContext(context.TODO()) if err != nil { return nil, err } result := make([]unit, 0, len(allUnits)) for _, status := range allUnits { unit := unit{ UnitStatus: status, } result = append(result, unit) } return result, nil } func summarizeUnits(units []unit) map[string]float64 { summarized := make(map[string]float64) for _, unitStateName := range unitStatesName { summarized[unitStateName] = 0.0 } for _, unit := range units { summarized[unit.ActiveState] += 1.0 } return summarized } func filterUnits(units []unit, includePattern, excludePattern *regexp.Regexp, logger log.Logger) []unit { filtered := make([]unit, 0, len(units)) for _, unit := range units { if includePattern.MatchString(unit.Name) && !excludePattern.MatchString(unit.Name) && unit.LoadState == "loaded" { level.Debug(logger).Log("msg", "Adding unit", "unit", unit.Name) filtered = append(filtered, unit) } else { level.Debug(logger).Log("msg", "Ignoring unit", "unit", unit.Name) } } return filtered } func (c *systemdCollector) getSystemdVersion(conn *dbus.Conn) (float64, string) { version, err := conn.GetManagerProperty("Version") if err != nil { level.Debug(c.logger).Log("msg", "Unable to get systemd version property, defaulting to 0") return 0, "" } version = strings.TrimPrefix(strings.TrimSuffix(version, `"`), `"`) level.Debug(c.logger).Log("msg", "Got systemd version", "version", version) parsedVersion := systemdVersionRE.FindString(version) v, err := strconv.ParseFloat(parsedVersion, 64) if err != nil { level.Debug(c.logger).Log("msg", "Got invalid systemd version", "version", version) return 0, "" } return v, version } node_exporter-1.7.0/collector/systemd_linux_test.go000066400000000000000000000072621452426057600227010ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nosystemd // +build !nosystemd package collector import ( "regexp" "testing" "github.com/coreos/go-systemd/v22/dbus" "github.com/go-kit/log" ) // Creates mock UnitLists func getUnitListFixtures() [][]unit { fixture1 := []unit{ { UnitStatus: dbus.UnitStatus{ Name: "foo", Description: "foo desc", LoadState: "loaded", ActiveState: "active", SubState: "running", Followed: "", Path: "/org/freedesktop/systemd1/unit/foo", JobId: 0, JobType: "", JobPath: "/", }, }, { UnitStatus: dbus.UnitStatus{ Name: "bar", Description: "bar desc", LoadState: "not-found", ActiveState: "inactive", SubState: "dead", Followed: "", Path: "/org/freedesktop/systemd1/unit/bar", JobId: 0, JobType: "", JobPath: "/", }, }, { UnitStatus: dbus.UnitStatus{ Name: "foobar", Description: "bar desc", LoadState: "not-found", ActiveState: "inactive", SubState: "dead", Followed: "", Path: "/org/freedesktop/systemd1/unit/bar", JobId: 0, JobType: "", JobPath: "/", }, }, { UnitStatus: dbus.UnitStatus{ Name: "baz", Description: "bar desc", LoadState: "not-found", ActiveState: "inactive", SubState: "dead", Followed: "", Path: "/org/freedesktop/systemd1/unit/bar", JobId: 0, JobType: "", JobPath: "/", }, }, } fixture2 := []unit{} return [][]unit{fixture1, fixture2} } func TestSystemdIgnoreFilter(t *testing.T) { fixtures := getUnitListFixtures() includePattern := regexp.MustCompile("^foo$") excludePattern := regexp.MustCompile("^bar$") filtered := filterUnits(fixtures[0], includePattern, excludePattern, log.NewNopLogger()) for _, unit := range filtered { if excludePattern.MatchString(unit.Name) || !includePattern.MatchString(unit.Name) { t.Error(unit.Name, "should not be in the filtered list") } } } func TestSystemdIgnoreFilterDefaultKeepsAll(t *testing.T) { logger := log.NewNopLogger() c, err := NewSystemdCollector(logger) if err != nil { t.Fatal(err) } fixtures := getUnitListFixtures() collector := c.(*systemdCollector) filtered := filterUnits(fixtures[0], collector.systemdUnitIncludePattern, collector.systemdUnitExcludePattern, logger) // Adjust fixtures by 3 "not-found" units. if len(filtered) != len(fixtures[0])-3 { t.Error("Default filters removed units") } } func TestSystemdSummary(t *testing.T) { fixtures := getUnitListFixtures() summary := summarizeUnits(fixtures[0]) for _, state := range unitStatesName { if state == "inactive" { testSummaryHelper(t, state, summary[state], 3.0) } else if state == "active" { testSummaryHelper(t, state, summary[state], 1.0) } else { testSummaryHelper(t, state, summary[state], 0.0) } } } func testSummaryHelper(t *testing.T, state string, actual float64, expected float64) { if actual != expected { t.Errorf("Summary mode didn't count %s jobs correctly. Actual: %f, expected: %f", state, actual, expected) } } node_exporter-1.7.0/collector/tapestats_linux.go000066400000000000000000000147001452426057600221550ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !notapestats // +build !notapestats package collector import ( "fmt" "os" "regexp" "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs/sysfs" ) var ( ignoredTapeDevices = kingpin.Flag("collector.tapestats.ignored-devices", "Regexp of devices to ignore for tapestats.").Default("^$").String() ) type tapestatsCollector struct { ignoredDevicesPattern *regexp.Regexp ioNow *prometheus.Desc ioTimeSeconds *prometheus.Desc othersCompletedTotal *prometheus.Desc readByteTotal *prometheus.Desc readsCompletedTotal *prometheus.Desc readTimeSeconds *prometheus.Desc writtenByteTotal *prometheus.Desc writesCompletedTotal *prometheus.Desc writeTimeSeconds *prometheus.Desc residualTotal *prometheus.Desc fs sysfs.FS logger log.Logger } func init() { registerCollector("tapestats", defaultEnabled, NewTapestatsCollector) } // NewTapestatsCollector returns a new Collector exposing tape device stats. // Docs from https://www.kernel.org/doc/html/latest/scsi/st.html#sysfs-and-statistics-for-tape-devices func NewTapestatsCollector(logger log.Logger) (Collector, error) { var tapeLabelNames = []string{"device"} fs, err := sysfs.NewFS(*sysPath) if err != nil { return nil, fmt.Errorf("failed to open sysfs: %w", err) } tapeSubsystem := "tape" return &tapestatsCollector{ ignoredDevicesPattern: regexp.MustCompile(*ignoredTapeDevices), ioNow: prometheus.NewDesc( prometheus.BuildFQName(namespace, tapeSubsystem, "io_now"), "The number of I/Os currently outstanding to this device.", tapeLabelNames, nil, ), ioTimeSeconds: prometheus.NewDesc( prometheus.BuildFQName(namespace, tapeSubsystem, "io_time_seconds_total"), "The amount of time spent waiting for all I/O to complete (including read and write). This includes tape movement commands such as seeking between file or set marks and implicit tape movement such as when rewind on close tape devices are used.", tapeLabelNames, nil, ), othersCompletedTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, tapeSubsystem, "io_others_total"), "The number of I/Os issued to the tape drive other than read or write commands. The time taken to complete these commands uses the following calculation io_time_seconds_total-read_time_seconds_total-write_time_seconds_total", tapeLabelNames, nil, ), readByteTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, tapeSubsystem, "read_bytes_total"), "The number of bytes read from the tape drive.", tapeLabelNames, nil, ), readsCompletedTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, tapeSubsystem, "reads_completed_total"), "The number of read requests issued to the tape drive.", tapeLabelNames, nil, ), readTimeSeconds: prometheus.NewDesc( prometheus.BuildFQName(namespace, tapeSubsystem, "read_time_seconds_total"), "The amount of time spent waiting for read requests to complete.", tapeLabelNames, nil, ), writtenByteTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, tapeSubsystem, "written_bytes_total"), "The number of bytes written to the tape drive.", tapeLabelNames, nil, ), writesCompletedTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, tapeSubsystem, "writes_completed_total"), "The number of write requests issued to the tape drive.", tapeLabelNames, nil, ), writeTimeSeconds: prometheus.NewDesc( prometheus.BuildFQName(namespace, tapeSubsystem, "write_time_seconds_total"), "The amount of time spent waiting for write requests to complete.", tapeLabelNames, nil, ), residualTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, tapeSubsystem, "residual_total"), "The number of times during a read or write we found the residual amount to be non-zero. This should mean that a program is issuing a read larger thean the block size on tape. For write not all data made it to tape.", tapeLabelNames, nil, ), logger: logger, fs: fs, }, nil } func (c *tapestatsCollector) Update(ch chan<- prometheus.Metric) error { tapes, err := c.fs.SCSITapeClass() if err != nil { if os.IsNotExist(err) { level.Debug(c.logger).Log("msg", "scsi_tape stats not found, skipping") return ErrNoData } return fmt.Errorf("error obtaining SCSITape class info: %s", err) } for _, tape := range tapes { if c.ignoredDevicesPattern.MatchString(tape.Name) { level.Debug(c.logger).Log("msg", "Ignoring device", "device", tape.Name) continue } ch <- prometheus.MustNewConstMetric(c.ioNow, prometheus.GaugeValue, float64(tape.Counters.InFlight), tape.Name) ch <- prometheus.MustNewConstMetric(c.ioTimeSeconds, prometheus.CounterValue, float64(tape.Counters.IoNs)*0.000000001, tape.Name) ch <- prometheus.MustNewConstMetric(c.othersCompletedTotal, prometheus.CounterValue, float64(tape.Counters.OtherCnt), tape.Name) ch <- prometheus.MustNewConstMetric(c.readByteTotal, prometheus.CounterValue, float64(tape.Counters.ReadByteCnt), tape.Name) ch <- prometheus.MustNewConstMetric(c.readsCompletedTotal, prometheus.CounterValue, float64(tape.Counters.ReadCnt), tape.Name) ch <- prometheus.MustNewConstMetric(c.readTimeSeconds, prometheus.CounterValue, float64(tape.Counters.ReadNs)*0.000000001, tape.Name) ch <- prometheus.MustNewConstMetric(c.residualTotal, prometheus.CounterValue, float64(tape.Counters.ResidCnt), tape.Name) ch <- prometheus.MustNewConstMetric(c.writtenByteTotal, prometheus.CounterValue, float64(tape.Counters.WriteByteCnt), tape.Name) ch <- prometheus.MustNewConstMetric(c.writesCompletedTotal, prometheus.CounterValue, float64(tape.Counters.WriteCnt), tape.Name) ch <- prometheus.MustNewConstMetric(c.writeTimeSeconds, prometheus.CounterValue, float64(tape.Counters.WriteNs)*0.000000001, tape.Name) } return nil } node_exporter-1.7.0/collector/tcpstat_linux.go000066400000000000000000000121571452426057600216330ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !notcpstat // +build !notcpstat package collector import ( "fmt" "os" "syscall" "unsafe" "github.com/go-kit/log" "github.com/mdlayher/netlink" "github.com/prometheus/client_golang/prometheus" ) type tcpConnectionState int const ( // TCP_ESTABLISHED tcpEstablished tcpConnectionState = iota + 1 // TCP_SYN_SENT tcpSynSent // TCP_SYN_RECV tcpSynRecv // TCP_FIN_WAIT1 tcpFinWait1 // TCP_FIN_WAIT2 tcpFinWait2 // TCP_TIME_WAIT tcpTimeWait // TCP_CLOSE tcpClose // TCP_CLOSE_WAIT tcpCloseWait // TCP_LAST_ACK tcpLastAck // TCP_LISTEN tcpListen // TCP_CLOSING tcpClosing // TCP_RX_BUFFER tcpRxQueuedBytes // TCP_TX_BUFFER tcpTxQueuedBytes ) type tcpStatCollector struct { desc typedDesc logger log.Logger } func init() { registerCollector("tcpstat", defaultDisabled, NewTCPStatCollector) } // NewTCPStatCollector returns a new Collector exposing network stats. func NewTCPStatCollector(logger log.Logger) (Collector, error) { return &tcpStatCollector{ desc: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, "tcp", "connection_states"), "Number of connection states.", []string{"state"}, nil, ), prometheus.GaugeValue}, logger: logger, }, nil } // InetDiagSockID (inet_diag_sockid) contains the socket identity. // https://github.com/torvalds/linux/blob/v4.0/include/uapi/linux/inet_diag.h#L13 type InetDiagSockID struct { SourcePort [2]byte DestPort [2]byte SourceIP [4][4]byte DestIP [4][4]byte Interface uint32 Cookie [2]uint32 } // InetDiagReqV2 (inet_diag_req_v2) is used to request diagnostic data. // https://github.com/torvalds/linux/blob/v4.0/include/uapi/linux/inet_diag.h#L37 type InetDiagReqV2 struct { Family uint8 Protocol uint8 Ext uint8 Pad uint8 States uint32 ID InetDiagSockID } const sizeOfDiagRequest = 0x38 func (req *InetDiagReqV2) Serialize() []byte { return (*(*[sizeOfDiagRequest]byte)(unsafe.Pointer(req)))[:] } func (req *InetDiagReqV2) Len() int { return sizeOfDiagRequest } type InetDiagMsg struct { Family uint8 State uint8 Timer uint8 Retrans uint8 ID InetDiagSockID Expires uint32 RQueue uint32 WQueue uint32 UID uint32 Inode uint32 } func parseInetDiagMsg(b []byte) *InetDiagMsg { return (*InetDiagMsg)(unsafe.Pointer(&b[0])) } func (c *tcpStatCollector) Update(ch chan<- prometheus.Metric) error { tcpStats, err := getTCPStats(syscall.AF_INET) if err != nil { return fmt.Errorf("couldn't get tcpstats: %w", err) } // if enabled ipv6 system if _, hasIPv6 := os.Stat(procFilePath("net/tcp6")); hasIPv6 == nil { tcp6Stats, err := getTCPStats(syscall.AF_INET6) if err != nil { return fmt.Errorf("couldn't get tcp6stats: %w", err) } for st, value := range tcp6Stats { tcpStats[st] += value } } for st, value := range tcpStats { ch <- c.desc.mustNewConstMetric(value, st.String()) } return nil } func getTCPStats(family uint8) (map[tcpConnectionState]float64, error) { const TCPFAll = 0xFFF const InetDiagInfo = 2 const SockDiagByFamily = 20 conn, err := netlink.Dial(syscall.NETLINK_INET_DIAG, nil) if err != nil { return nil, fmt.Errorf("couldn't connect netlink: %w", err) } defer conn.Close() msg := netlink.Message{ Header: netlink.Header{ Type: SockDiagByFamily, Flags: syscall.NLM_F_REQUEST | syscall.NLM_F_DUMP, }, Data: (&InetDiagReqV2{ Family: family, Protocol: syscall.IPPROTO_TCP, States: TCPFAll, Ext: 0 | 1<<(InetDiagInfo-1), }).Serialize(), } messages, err := conn.Execute(msg) if err != nil { return nil, err } return parseTCPStats(messages) } func parseTCPStats(msgs []netlink.Message) (map[tcpConnectionState]float64, error) { tcpStats := map[tcpConnectionState]float64{} for _, m := range msgs { msg := parseInetDiagMsg(m.Data) tcpStats[tcpTxQueuedBytes] += float64(msg.WQueue) tcpStats[tcpRxQueuedBytes] += float64(msg.RQueue) tcpStats[tcpConnectionState(msg.State)]++ } return tcpStats, nil } func (st tcpConnectionState) String() string { switch st { case tcpEstablished: return "established" case tcpSynSent: return "syn_sent" case tcpSynRecv: return "syn_recv" case tcpFinWait1: return "fin_wait1" case tcpFinWait2: return "fin_wait2" case tcpTimeWait: return "time_wait" case tcpClose: return "close" case tcpCloseWait: return "close_wait" case tcpLastAck: return "last_ack" case tcpListen: return "listen" case tcpClosing: return "closing" case tcpRxQueuedBytes: return "rx_queued_bytes" case tcpTxQueuedBytes: return "tx_queued_bytes" default: return "unknown" } } node_exporter-1.7.0/collector/tcpstat_linux_test.go000066400000000000000000000042071452426057600226670ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !notcpstat // +build !notcpstat package collector import ( "bytes" "encoding/binary" "syscall" "testing" "github.com/josharian/native" "github.com/mdlayher/netlink" ) func Test_parseTCPStats(t *testing.T) { encode := func(m InetDiagMsg) []byte { var buf bytes.Buffer err := binary.Write(&buf, native.Endian, m) if err != nil { panic(err) } return buf.Bytes() } msg := []netlink.Message{ { Data: encode(InetDiagMsg{ Family: syscall.AF_INET, State: uint8(tcpEstablished), Timer: 0, Retrans: 0, ID: InetDiagSockID{}, Expires: 0, RQueue: 11, WQueue: 21, UID: 0, Inode: 0, }), }, { Data: encode(InetDiagMsg{ Family: syscall.AF_INET, State: uint8(tcpListen), Timer: 0, Retrans: 0, ID: InetDiagSockID{}, Expires: 0, RQueue: 11, WQueue: 21, UID: 0, Inode: 0, }), }, } tcpStats, err := parseTCPStats(msg) if err != nil { t.Fatal(err) } if want, got := 1, int(tcpStats[tcpEstablished]); want != got { t.Errorf("want tcpstat number of established state %d, got %d", want, got) } if want, got := 1, int(tcpStats[tcpListen]); want != got { t.Errorf("want tcpstat number of listen state %d, got %d", want, got) } if want, got := 42, int(tcpStats[tcpTxQueuedBytes]); want != got { t.Errorf("want tcpstat number of bytes in tx queue %d, got %d", want, got) } if want, got := 22, int(tcpStats[tcpRxQueuedBytes]); want != got { t.Errorf("want tcpstat number of bytes in rx queue %d, got %d", want, got) } } node_exporter-1.7.0/collector/textfile.go000066400000000000000000000201721452426057600205520ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !notextfile // +build !notextfile package collector import ( "fmt" "os" "path/filepath" "sort" "strings" "time" "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/expfmt" ) var ( textFileDirectory = kingpin.Flag("collector.textfile.directory", "Directory to read text files with metrics from.").Default("").String() mtimeDesc = prometheus.NewDesc( "node_textfile_mtime_seconds", "Unixtime mtime of textfiles successfully read.", []string{"file"}, nil, ) ) type textFileCollector struct { path string // Only set for testing to get predictable output. mtime *float64 logger log.Logger } func init() { registerCollector("textfile", defaultEnabled, NewTextFileCollector) } // NewTextFileCollector returns a new Collector exposing metrics read from files // in the given textfile directory. func NewTextFileCollector(logger log.Logger) (Collector, error) { c := &textFileCollector{ path: *textFileDirectory, logger: logger, } return c, nil } func convertMetricFamily(metricFamily *dto.MetricFamily, ch chan<- prometheus.Metric, logger log.Logger) { var valType prometheus.ValueType var val float64 allLabelNames := map[string]struct{}{} for _, metric := range metricFamily.Metric { labels := metric.GetLabel() for _, label := range labels { if _, ok := allLabelNames[label.GetName()]; !ok { allLabelNames[label.GetName()] = struct{}{} } } } for _, metric := range metricFamily.Metric { if metric.TimestampMs != nil { level.Warn(logger).Log("msg", "Ignoring unsupported custom timestamp on textfile collector metric", "metric", metric) } labels := metric.GetLabel() var names []string var values []string for _, label := range labels { names = append(names, label.GetName()) values = append(values, label.GetValue()) } for k := range allLabelNames { present := false for _, name := range names { if k == name { present = true break } } if !present { names = append(names, k) values = append(values, "") } } metricType := metricFamily.GetType() switch metricType { case dto.MetricType_COUNTER: valType = prometheus.CounterValue val = metric.Counter.GetValue() case dto.MetricType_GAUGE: valType = prometheus.GaugeValue val = metric.Gauge.GetValue() case dto.MetricType_UNTYPED: valType = prometheus.UntypedValue val = metric.Untyped.GetValue() case dto.MetricType_SUMMARY: quantiles := map[float64]float64{} for _, q := range metric.Summary.Quantile { quantiles[q.GetQuantile()] = q.GetValue() } ch <- prometheus.MustNewConstSummary( prometheus.NewDesc( *metricFamily.Name, metricFamily.GetHelp(), names, nil, ), metric.Summary.GetSampleCount(), metric.Summary.GetSampleSum(), quantiles, values..., ) case dto.MetricType_HISTOGRAM: buckets := map[float64]uint64{} for _, b := range metric.Histogram.Bucket { buckets[b.GetUpperBound()] = b.GetCumulativeCount() } ch <- prometheus.MustNewConstHistogram( prometheus.NewDesc( *metricFamily.Name, metricFamily.GetHelp(), names, nil, ), metric.Histogram.GetSampleCount(), metric.Histogram.GetSampleSum(), buckets, values..., ) default: panic("unknown metric type") } if metricType == dto.MetricType_GAUGE || metricType == dto.MetricType_COUNTER || metricType == dto.MetricType_UNTYPED { ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( *metricFamily.Name, metricFamily.GetHelp(), names, nil, ), valType, val, values..., ) } } } func (c *textFileCollector) exportMTimes(mtimes map[string]time.Time, ch chan<- prometheus.Metric) { if len(mtimes) == 0 { return } // Export the mtimes of the successful files. // Sorting is needed for predictable output comparison in tests. filepaths := make([]string, 0, len(mtimes)) for path := range mtimes { filepaths = append(filepaths, path) } sort.Strings(filepaths) for _, path := range filepaths { mtime := float64(mtimes[path].UnixNano() / 1e9) if c.mtime != nil { mtime = *c.mtime } ch <- prometheus.MustNewConstMetric(mtimeDesc, prometheus.GaugeValue, mtime, path) } } // Update implements the Collector interface. func (c *textFileCollector) Update(ch chan<- prometheus.Metric) error { // Iterate over files and accumulate their metrics, but also track any // parsing errors so an error metric can be reported. var errored bool var parsedFamilies []*dto.MetricFamily metricsNamesToFiles := map[string][]string{} paths, err := filepath.Glob(c.path) if err != nil || len(paths) == 0 { // not glob or not accessible path either way assume single // directory and let os.ReadDir handle it paths = []string{c.path} } mtimes := make(map[string]time.Time) for _, path := range paths { files, err := os.ReadDir(path) if err != nil && path != "" { errored = true level.Error(c.logger).Log("msg", "failed to read textfile collector directory", "path", path, "err", err) } for _, f := range files { metricsFilePath := filepath.Join(path, f.Name()) if !strings.HasSuffix(f.Name(), ".prom") { continue } mtime, families, err := c.processFile(path, f.Name(), ch) for _, mf := range families { metricsNamesToFiles[*mf.Name] = append(metricsNamesToFiles[*mf.Name], metricsFilePath) parsedFamilies = append(parsedFamilies, mf) } if err != nil { errored = true level.Error(c.logger).Log("msg", "failed to collect textfile data", "file", f.Name(), "err", err) continue } mtimes[metricsFilePath] = *mtime } } for _, mf := range parsedFamilies { if mf.Help == nil { help := fmt.Sprintf("Metric read from %s", strings.Join(metricsNamesToFiles[*mf.Name], ", ")) mf.Help = &help } } for _, mf := range parsedFamilies { convertMetricFamily(mf, ch, c.logger) } c.exportMTimes(mtimes, ch) // Export if there were errors. var errVal float64 if errored { errVal = 1.0 } ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( "node_textfile_scrape_error", "1 if there was an error opening or reading a file, 0 otherwise", nil, nil, ), prometheus.GaugeValue, errVal, ) return nil } // processFile processes a single file, returning its modification time on success. func (c *textFileCollector) processFile(dir, name string, ch chan<- prometheus.Metric) (*time.Time, map[string]*dto.MetricFamily, error) { path := filepath.Join(dir, name) f, err := os.Open(path) if err != nil { return nil, nil, fmt.Errorf("failed to open textfile data file %q: %w", path, err) } defer f.Close() var parser expfmt.TextParser families, err := parser.TextToMetricFamilies(f) if err != nil { return nil, nil, fmt.Errorf("failed to parse textfile data from %q: %w", path, err) } if hasTimestamps(families) { return nil, nil, fmt.Errorf("textfile %q contains unsupported client-side timestamps, skipping entire file", path) } // Only stat the file once it has been parsed and validated, so that // a failure does not appear fresh. stat, err := f.Stat() if err != nil { return nil, families, fmt.Errorf("failed to stat %q: %w", path, err) } t := stat.ModTime() return &t, families, nil } // hasTimestamps returns true when metrics contain unsupported timestamps. func hasTimestamps(parsedFamilies map[string]*dto.MetricFamily) bool { for _, mf := range parsedFamilies { for _, m := range mf.Metric { if m.TimestampMs != nil { return true } } } return false } node_exporter-1.7.0/collector/textfile_test.go000066400000000000000000000104251452426057600216110ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !notextfile // +build !notextfile package collector import ( "fmt" "net/http" "net/http/httptest" "os" "testing" "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/common/promlog" "github.com/prometheus/common/promlog/flag" ) type collectorAdapter struct { Collector } // Describe implements the prometheus.Collector interface. func (a collectorAdapter) Describe(ch chan<- *prometheus.Desc) { // We have to send *some* metric in Describe, but we don't know which ones // we're going to get, so just send a dummy metric. ch <- prometheus.NewDesc("dummy_metric", "Dummy metric.", nil, nil) } // Collect implements the prometheus.Collector interface. func (a collectorAdapter) Collect(ch chan<- prometheus.Metric) { if err := a.Update(ch); err != nil { panic(fmt.Sprintf("failed to update collector: %v", err)) } } func TestTextfileCollector(t *testing.T) { tests := []struct { path string out string }{ { path: "fixtures/textfile/no_metric_files", out: "fixtures/textfile/no_metric_files.out", }, { path: "fixtures/textfile/two_metric_files", out: "fixtures/textfile/two_metric_files.out", }, { path: "fixtures/textfile/nonexistent_path", out: "fixtures/textfile/nonexistent_path.out", }, { path: "fixtures/textfile/client_side_timestamp", out: "fixtures/textfile/client_side_timestamp.out", }, { path: "fixtures/textfile/different_metric_types", out: "fixtures/textfile/different_metric_types.out", }, { path: "fixtures/textfile/inconsistent_metrics", out: "fixtures/textfile/inconsistent_metrics.out", }, { path: "fixtures/textfile/histogram", out: "fixtures/textfile/histogram.out", }, { path: "fixtures/textfile/histogram_extra_dimension", out: "fixtures/textfile/histogram_extra_dimension.out", }, { path: "fixtures/textfile/summary", out: "fixtures/textfile/summary.out", }, { path: "fixtures/textfile/summary_extra_dimension", out: "fixtures/textfile/summary_extra_dimension.out", }, { path: "fixtures/textfile/*_extra_dimension", out: "fixtures/textfile/glob_extra_dimension.out", }, { path: "fixtures/textfile/metrics_merge_empty_help", out: "fixtures/textfile/metrics_merge_empty_help.out", }, { path: "fixtures/textfile/metrics_merge_no_help", out: "fixtures/textfile/metrics_merge_no_help.out", }, { path: "fixtures/textfile/metrics_merge_same_help", out: "fixtures/textfile/metrics_merge_same_help.out", }, { path: "fixtures/textfile/metrics_merge_different_help", out: "fixtures/textfile/metrics_merge_different_help.out", }, } for i, test := range tests { mtime := 1.0 c := &textFileCollector{ path: test.path, mtime: &mtime, logger: log.NewNopLogger(), } // Suppress a log message about `nonexistent_path` not existing, this is // expected and clutters the test output. promlogConfig := &promlog.Config{} flag.AddFlags(kingpin.CommandLine, promlogConfig) if _, err := kingpin.CommandLine.Parse([]string{"--log.level", "debug"}); err != nil { t.Fatal(err) } registry := prometheus.NewRegistry() registry.MustRegister(collectorAdapter{c}) rw := httptest.NewRecorder() promhttp.HandlerFor(registry, promhttp.HandlerOpts{ErrorHandling: promhttp.ContinueOnError}).ServeHTTP(rw, &http.Request{}) got := string(rw.Body.String()) want, err := os.ReadFile(test.out) if err != nil { t.Fatalf("%d. error reading fixture file %s: %s", i, test.out, err) } if string(want) != got { t.Fatalf("%d.%q want:\n\n%s\n\ngot:\n\n%s", i, test.path, string(want), got) } } } node_exporter-1.7.0/collector/thermal_darwin.go000066400000000000000000000124331452426057600217270ustar00rootroot00000000000000// Copyright 2021 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !notherm // +build !notherm package collector /* #cgo LDFLAGS: -framework IOKit -framework CoreFoundation #include #include #include #include #include struct ref_with_ret { CFDictionaryRef ref; IOReturn ret; }; struct ref_with_ret FetchThermal(); struct ref_with_ret FetchThermal() { CFDictionaryRef ref; IOReturn ret; ret = IOPMCopyCPUPowerStatus(&ref); struct ref_with_ret result = { ref, ret, }; return result; } */ import "C" import ( "errors" "fmt" "unsafe" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) type thermCollector struct { cpuSchedulerLimit typedDesc cpuAvailableCPU typedDesc cpuSpeedLimit typedDesc logger log.Logger } const thermal = "thermal" func init() { registerCollector(thermal, defaultEnabled, NewThermCollector) } // NewThermCollector returns a new Collector exposing current CPU power levels. func NewThermCollector(logger log.Logger) (Collector, error) { return &thermCollector{ cpuSchedulerLimit: typedDesc{ desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, thermal, "cpu_scheduler_limit_ratio"), "Represents the percentage (0-100) of CPU time available. 100% at normal operation. The OS may limit this time for a percentage less than 100%.", nil, nil), valueType: prometheus.GaugeValue, }, cpuAvailableCPU: typedDesc{ desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, thermal, "cpu_available_cpu"), "Reflects how many, if any, CPUs have been taken offline. Represented as an integer number of CPUs (0 - Max CPUs).", nil, nil, ), valueType: prometheus.GaugeValue, }, cpuSpeedLimit: typedDesc{ desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, thermal, "cpu_speed_limit_ratio"), "Defines the speed & voltage limits placed on the CPU. Represented as a percentage (0-100) of maximum CPU speed.", nil, nil, ), valueType: prometheus.GaugeValue, }, logger: logger, }, nil } func (c *thermCollector) Update(ch chan<- prometheus.Metric) error { cpuPowerStatus, err := fetchCPUPowerStatus() if err != nil { return err } if value, ok := cpuPowerStatus[(string(C.kIOPMCPUPowerLimitSchedulerTimeKey))]; ok { ch <- c.cpuSchedulerLimit.mustNewConstMetric(float64(value) / 100.0) } if value, ok := cpuPowerStatus[(string(C.kIOPMCPUPowerLimitProcessorCountKey))]; ok { ch <- c.cpuAvailableCPU.mustNewConstMetric(float64(value)) } if value, ok := cpuPowerStatus[(string(C.kIOPMCPUPowerLimitProcessorSpeedKey))]; ok { ch <- c.cpuSpeedLimit.mustNewConstMetric(float64(value) / 100.0) } return nil } func fetchCPUPowerStatus() (map[string]int, error) { cfDictRef, _ := C.FetchThermal() defer func() { if cfDictRef.ref != 0x0 { C.CFRelease(C.CFTypeRef(cfDictRef.ref)) } }() if C.kIOReturnNotFound == cfDictRef.ret { return nil, errors.New("no CPU power status has been recorded") } if C.kIOReturnSuccess != cfDictRef.ret { return nil, fmt.Errorf("no CPU power status with error code 0x%08x", int(cfDictRef.ret)) } // mapping CFDictionary to map cfDict := CFDict(cfDictRef.ref) return mappingCFDictToMap(cfDict), nil } type CFDict uintptr func mappingCFDictToMap(dict CFDict) map[string]int { if C.CFNullRef(dict) == C.kCFNull { return nil } cfDict := C.CFDictionaryRef(dict) var result map[string]int count := C.CFDictionaryGetCount(cfDict) if count > 0 { keys := make([]C.CFTypeRef, count) values := make([]C.CFTypeRef, count) C.CFDictionaryGetKeysAndValues(cfDict, (*unsafe.Pointer)(unsafe.Pointer(&keys[0])), (*unsafe.Pointer)(unsafe.Pointer(&values[0]))) result = make(map[string]int, count) for i := C.CFIndex(0); i < count; i++ { result[mappingCFStringToString(C.CFStringRef(keys[i]))] = mappingCFNumberLongToInt(C.CFNumberRef(values[i])) } } return result } // CFStringToString converts a CFStringRef to a string. func mappingCFStringToString(s C.CFStringRef) string { p := C.CFStringGetCStringPtr(s, C.kCFStringEncodingUTF8) if p != nil { return C.GoString(p) } length := C.CFStringGetLength(s) if length == 0 { return "" } maxBufLen := C.CFStringGetMaximumSizeForEncoding(length, C.kCFStringEncodingUTF8) if maxBufLen == 0 { return "" } buf := make([]byte, maxBufLen) var usedBufLen C.CFIndex _ = C.CFStringGetBytes(s, C.CFRange{0, length}, C.kCFStringEncodingUTF8, C.UInt8(0), C.false, (*C.UInt8)(&buf[0]), maxBufLen, &usedBufLen) return string(buf[:usedBufLen]) } func mappingCFNumberLongToInt(n C.CFNumberRef) int { typ := C.CFNumberGetType(n) var long C.long C.CFNumberGetValue(n, typ, unsafe.Pointer(&long)) return int(long) } node_exporter-1.7.0/collector/thermal_zone_linux.go000066400000000000000000000061311452426057600226330ustar00rootroot00000000000000// Copyright 2019 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nothermalzone // +build !nothermalzone package collector import ( "errors" "fmt" "os" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs/sysfs" ) const coolingDevice = "cooling_device" const thermalZone = "thermal_zone" type thermalZoneCollector struct { fs sysfs.FS coolingDeviceCurState *prometheus.Desc coolingDeviceMaxState *prometheus.Desc zoneTemp *prometheus.Desc logger log.Logger } func init() { registerCollector("thermal_zone", defaultEnabled, NewThermalZoneCollector) } // NewThermalZoneCollector returns a new Collector exposing kernel/system statistics. func NewThermalZoneCollector(logger log.Logger) (Collector, error) { fs, err := sysfs.NewFS(*sysPath) if err != nil { return nil, fmt.Errorf("failed to open sysfs: %w", err) } return &thermalZoneCollector{ fs: fs, zoneTemp: prometheus.NewDesc( prometheus.BuildFQName(namespace, thermalZone, "temp"), "Zone temperature in Celsius", []string{"zone", "type"}, nil, ), coolingDeviceCurState: prometheus.NewDesc( prometheus.BuildFQName(namespace, coolingDevice, "cur_state"), "Current throttle state of the cooling device", []string{"name", "type"}, nil, ), coolingDeviceMaxState: prometheus.NewDesc( prometheus.BuildFQName(namespace, coolingDevice, "max_state"), "Maximum throttle state of the cooling device", []string{"name", "type"}, nil, ), logger: logger, }, nil } func (c *thermalZoneCollector) Update(ch chan<- prometheus.Metric) error { thermalZones, err := c.fs.ClassThermalZoneStats() if err != nil { if errors.Is(err, os.ErrNotExist) || errors.Is(err, os.ErrPermission) || errors.Is(err, os.ErrInvalid) { level.Debug(c.logger).Log("msg", "Could not read thermal zone stats", "err", err) return ErrNoData } return err } for _, stats := range thermalZones { ch <- prometheus.MustNewConstMetric( c.zoneTemp, prometheus.GaugeValue, float64(stats.Temp)/1000.0, stats.Name, stats.Type, ) } coolingDevices, err := c.fs.ClassCoolingDeviceStats() if err != nil { return err } for _, stats := range coolingDevices { ch <- prometheus.MustNewConstMetric( c.coolingDeviceCurState, prometheus.GaugeValue, float64(stats.CurState), stats.Name, stats.Type, ) ch <- prometheus.MustNewConstMetric( c.coolingDeviceMaxState, prometheus.GaugeValue, float64(stats.MaxState), stats.Name, stats.Type, ) } return nil } node_exporter-1.7.0/collector/time.go000066400000000000000000000051771452426057600176740ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !notime // +build !notime package collector import ( "time" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" ) type timeCollector struct { now typedDesc zone typedDesc clocksourcesAvailable typedDesc clocksourceCurrent typedDesc logger log.Logger } func init() { registerCollector("time", defaultEnabled, NewTimeCollector) } // NewTimeCollector returns a new Collector exposing the current system time in // seconds since epoch. func NewTimeCollector(logger log.Logger) (Collector, error) { const subsystem = "time" return &timeCollector{ now: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "seconds"), "System time in seconds since epoch (1970).", nil, nil, ), prometheus.GaugeValue}, zone: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "zone_offset_seconds"), "System time zone offset in seconds.", []string{"time_zone"}, nil, ), prometheus.GaugeValue}, clocksourcesAvailable: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "clocksource_available_info"), "Available clocksources read from '/sys/devices/system/clocksource'.", []string{"device", "clocksource"}, nil, ), prometheus.GaugeValue}, clocksourceCurrent: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "clocksource_current_info"), "Current clocksource read from '/sys/devices/system/clocksource'.", []string{"device", "clocksource"}, nil, ), prometheus.GaugeValue}, logger: logger, }, nil } func (c *timeCollector) Update(ch chan<- prometheus.Metric) error { now := time.Now() nowSec := float64(now.UnixNano()) / 1e9 zone, zoneOffset := now.Zone() level.Debug(c.logger).Log("msg", "Return time", "now", nowSec) ch <- c.now.mustNewConstMetric(nowSec) level.Debug(c.logger).Log("msg", "Zone offset", "offset", zoneOffset, "time_zone", zone) ch <- c.zone.mustNewConstMetric(float64(zoneOffset), zone) return c.update(ch) } node_exporter-1.7.0/collector/time_linux.go000066400000000000000000000026751452426057600211130ustar00rootroot00000000000000// Copyright 2021 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !notime // +build !notime package collector import ( "fmt" "strconv" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs/sysfs" ) func (c *timeCollector) update(ch chan<- prometheus.Metric) error { fs, err := sysfs.NewFS(*sysPath) if err != nil { return fmt.Errorf("failed to open procfs: %w", err) } clocksources, err := fs.ClockSources() if err != nil { return fmt.Errorf("couldn't get clocksources: %w", err) } level.Debug(c.logger).Log("msg", "in Update", "clocksources", fmt.Sprintf("%v", clocksources)) for i, clocksource := range clocksources { is := strconv.Itoa(i) for _, cs := range clocksource.Available { ch <- c.clocksourcesAvailable.mustNewConstMetric(1.0, is, cs) } ch <- c.clocksourceCurrent.mustNewConstMetric(1.0, is, clocksource.Current) } return nil } node_exporter-1.7.0/collector/time_other.go000066400000000000000000000014531452426057600210660ustar00rootroot00000000000000// Copyright 2021 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !linux && !notime // +build !linux,!notime package collector import ( "github.com/prometheus/client_golang/prometheus" ) func (c *timeCollector) update(ch chan<- prometheus.Metric) error { return nil } node_exporter-1.7.0/collector/timex.go000066400000000000000000000152041452426057600200540ustar00rootroot00000000000000// Copyright 2017 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build linux && !notimex // +build linux,!notimex package collector import ( "errors" "fmt" "os" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "golang.org/x/sys/unix" ) const ( // The system clock is not synchronized to a reliable // server (TIME_ERROR). timeError = 5 // The timex.Status time resolution bit (STA_NANO), // 0 = microsecond, 1 = nanoseconds. staNano = 0x2000 // 1 second in nanoSeconds = 1000000000 microSeconds = 1000000 ) type timexCollector struct { offset, freq, maxerror, esterror, status, constant, tick, ppsfreq, jitter, shift, stabil, jitcnt, calcnt, errcnt, stbcnt, tai, syncStatus typedDesc logger log.Logger } func init() { registerCollector("timex", defaultEnabled, NewTimexCollector) } // NewTimexCollector returns a new Collector exposing adjtime(3) stats. func NewTimexCollector(logger log.Logger) (Collector, error) { const subsystem = "timex" return &timexCollector{ offset: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "offset_seconds"), "Time offset in between local system and reference clock.", nil, nil, ), prometheus.GaugeValue}, freq: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "frequency_adjustment_ratio"), "Local clock frequency adjustment.", nil, nil, ), prometheus.GaugeValue}, maxerror: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "maxerror_seconds"), "Maximum error in seconds.", nil, nil, ), prometheus.GaugeValue}, esterror: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "estimated_error_seconds"), "Estimated error in seconds.", nil, nil, ), prometheus.GaugeValue}, status: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "status"), "Value of the status array bits.", nil, nil, ), prometheus.GaugeValue}, constant: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "loop_time_constant"), "Phase-locked loop time constant.", nil, nil, ), prometheus.GaugeValue}, tick: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "tick_seconds"), "Seconds between clock ticks.", nil, nil, ), prometheus.GaugeValue}, ppsfreq: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "pps_frequency_hertz"), "Pulse per second frequency.", nil, nil, ), prometheus.GaugeValue}, jitter: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "pps_jitter_seconds"), "Pulse per second jitter.", nil, nil, ), prometheus.GaugeValue}, shift: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "pps_shift_seconds"), "Pulse per second interval duration.", nil, nil, ), prometheus.GaugeValue}, stabil: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "pps_stability_hertz"), "Pulse per second stability, average of recent frequency changes.", nil, nil, ), prometheus.GaugeValue}, jitcnt: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "pps_jitter_total"), "Pulse per second count of jitter limit exceeded events.", nil, nil, ), prometheus.CounterValue}, calcnt: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "pps_calibration_total"), "Pulse per second count of calibration intervals.", nil, nil, ), prometheus.CounterValue}, errcnt: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "pps_error_total"), "Pulse per second count of calibration errors.", nil, nil, ), prometheus.CounterValue}, stbcnt: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "pps_stability_exceeded_total"), "Pulse per second count of stability limit exceeded events.", nil, nil, ), prometheus.CounterValue}, tai: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "tai_offset_seconds"), "International Atomic Time (TAI) offset.", nil, nil, ), prometheus.GaugeValue}, syncStatus: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "sync_status"), "Is clock synchronized to a reliable server (1 = yes, 0 = no).", nil, nil, ), prometheus.GaugeValue}, logger: logger, }, nil } func (c *timexCollector) Update(ch chan<- prometheus.Metric) error { var syncStatus float64 var divisor float64 var timex = new(unix.Timex) status, err := unix.Adjtimex(timex) if err != nil { if errors.Is(err, os.ErrPermission) { level.Debug(c.logger).Log("msg", "Not collecting timex metrics", "err", err) return ErrNoData } return fmt.Errorf("failed to retrieve adjtimex stats: %w", err) } if status == timeError { syncStatus = 0 } else { syncStatus = 1 } if (timex.Status & staNano) != 0 { divisor = nanoSeconds } else { divisor = microSeconds } // See NOTES in adjtimex(2). const ppm16frac = 1000000.0 * 65536.0 ch <- c.syncStatus.mustNewConstMetric(syncStatus) ch <- c.offset.mustNewConstMetric(float64(timex.Offset) / divisor) ch <- c.freq.mustNewConstMetric(1 + float64(timex.Freq)/ppm16frac) ch <- c.maxerror.mustNewConstMetric(float64(timex.Maxerror) / microSeconds) ch <- c.esterror.mustNewConstMetric(float64(timex.Esterror) / microSeconds) ch <- c.status.mustNewConstMetric(float64(timex.Status)) ch <- c.constant.mustNewConstMetric(float64(timex.Constant)) ch <- c.tick.mustNewConstMetric(float64(timex.Tick) / microSeconds) ch <- c.ppsfreq.mustNewConstMetric(float64(timex.Ppsfreq) / ppm16frac) ch <- c.jitter.mustNewConstMetric(float64(timex.Jitter) / divisor) ch <- c.shift.mustNewConstMetric(float64(timex.Shift)) ch <- c.stabil.mustNewConstMetric(float64(timex.Stabil) / ppm16frac) ch <- c.jitcnt.mustNewConstMetric(float64(timex.Jitcnt)) ch <- c.calcnt.mustNewConstMetric(float64(timex.Calcnt)) ch <- c.errcnt.mustNewConstMetric(float64(timex.Errcnt)) ch <- c.stbcnt.mustNewConstMetric(float64(timex.Stbcnt)) ch <- c.tai.mustNewConstMetric(float64(timex.Tai)) return nil } node_exporter-1.7.0/collector/udp_queues_linux.go000066400000000000000000000052431452426057600223260ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !noudp_queues // +build !noudp_queues package collector import ( "errors" "fmt" "os" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs" ) type ( udpQueuesCollector struct { fs procfs.FS desc *prometheus.Desc logger log.Logger } ) func init() { registerCollector("udp_queues", defaultEnabled, NewUDPqueuesCollector) } // NewUDPqueuesCollector returns a new Collector exposing network udp queued bytes. func NewUDPqueuesCollector(logger log.Logger) (Collector, error) { fs, err := procfs.NewFS(*procPath) if err != nil { return nil, fmt.Errorf("failed to open procfs: %w", err) } return &udpQueuesCollector{ fs: fs, desc: prometheus.NewDesc( prometheus.BuildFQName(namespace, "udp", "queues"), "Number of allocated memory in the kernel for UDP datagrams in bytes.", []string{"queue", "ip"}, nil, ), logger: logger, }, nil } func (c *udpQueuesCollector) Update(ch chan<- prometheus.Metric) error { s4, errIPv4 := c.fs.NetUDPSummary() if errIPv4 == nil { ch <- prometheus.MustNewConstMetric(c.desc, prometheus.GaugeValue, float64(s4.TxQueueLength), "tx", "v4") ch <- prometheus.MustNewConstMetric(c.desc, prometheus.GaugeValue, float64(s4.RxQueueLength), "rx", "v4") } else { if errors.Is(errIPv4, os.ErrNotExist) { level.Debug(c.logger).Log("msg", "not collecting ipv4 based metrics") } else { return fmt.Errorf("couldn't get udp queued bytes: %w", errIPv4) } } s6, errIPv6 := c.fs.NetUDP6Summary() if errIPv6 == nil { ch <- prometheus.MustNewConstMetric(c.desc, prometheus.GaugeValue, float64(s6.TxQueueLength), "tx", "v6") ch <- prometheus.MustNewConstMetric(c.desc, prometheus.GaugeValue, float64(s6.RxQueueLength), "rx", "v6") } else { if errors.Is(errIPv6, os.ErrNotExist) { level.Debug(c.logger).Log("msg", "not collecting ipv6 based metrics") } else { return fmt.Errorf("couldn't get udp6 queued bytes: %w", errIPv6) } } if errors.Is(errIPv4, os.ErrNotExist) && errors.Is(errIPv6, os.ErrNotExist) { return ErrNoData } return nil } node_exporter-1.7.0/collector/uname.go000066400000000000000000000034701452426057600200350ustar00rootroot00000000000000// Copyright 2019 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build (darwin || freebsd || openbsd || netbsd || linux) && !nouname // +build darwin freebsd openbsd netbsd linux // +build !nouname package collector import ( "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) var unameDesc = prometheus.NewDesc( prometheus.BuildFQName(namespace, "uname", "info"), "Labeled system information as provided by the uname system call.", []string{ "sysname", "release", "version", "machine", "nodename", "domainname", }, nil, ) type unameCollector struct { logger log.Logger } type uname struct { SysName string Release string Version string Machine string NodeName string DomainName string } func init() { registerCollector("uname", defaultEnabled, newUnameCollector) } // NewUnameCollector returns new unameCollector. func newUnameCollector(logger log.Logger) (Collector, error) { return &unameCollector{logger}, nil } func (c *unameCollector) Update(ch chan<- prometheus.Metric) error { uname, err := getUname() if err != nil { return err } ch <- prometheus.MustNewConstMetric(unameDesc, prometheus.GaugeValue, 1, uname.SysName, uname.Release, uname.Version, uname.Machine, uname.NodeName, uname.DomainName, ) return nil } node_exporter-1.7.0/collector/uname_bsd.go000066400000000000000000000037211452426057600206640ustar00rootroot00000000000000// Copyright 2019 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build (darwin || freebsd || openbsd || netbsd) && !nouname // +build darwin freebsd openbsd netbsd // +build !nouname package collector import ( "strings" "golang.org/x/sys/unix" ) func getUname() (uname, error) { var utsname unix.Utsname if err := unix.Uname(&utsname); err != nil { return uname{}, err } nodeName, domainName := parseHostNameAndDomainName(utsname) output := uname{ SysName: unix.ByteSliceToString(utsname.Sysname[:]), Release: unix.ByteSliceToString(utsname.Release[:]), Version: unix.ByteSliceToString(utsname.Version[:]), Machine: unix.ByteSliceToString(utsname.Machine[:]), NodeName: nodeName, DomainName: domainName, } return output, nil } // parseHostNameAndDomainName for FreeBSD,OpenBSD,Darwin. // Attempts to emulate what happens in the Linux uname calls since these OS doesn't have a Domainname. func parseHostNameAndDomainName(utsname unix.Utsname) (hostname string, domainname string) { nodename := unix.ByteSliceToString(utsname.Nodename[:]) split := strings.SplitN(nodename, ".", 2) // We'll always have at least a single element in the array. We assume this // is the hostname. hostname = split[0] // If we have more than one element, we assume this is the domainname. // Otherwise leave it to "(none)" like Linux. domainname = "(none)" if len(split) > 1 { domainname = split[1] } return hostname, domainname } node_exporter-1.7.0/collector/uname_linux.go000066400000000000000000000022731452426057600212540ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nouname // +build !nouname package collector import "golang.org/x/sys/unix" func getUname() (uname, error) { var utsname unix.Utsname if err := unix.Uname(&utsname); err != nil { return uname{}, err } output := uname{ SysName: unix.ByteSliceToString(utsname.Sysname[:]), Release: unix.ByteSliceToString(utsname.Release[:]), Version: unix.ByteSliceToString(utsname.Version[:]), Machine: unix.ByteSliceToString(utsname.Machine[:]), NodeName: unix.ByteSliceToString(utsname.Nodename[:]), DomainName: unix.ByteSliceToString(utsname.Domainname[:]), } return output, nil } node_exporter-1.7.0/collector/vmstat_linux.go000066400000000000000000000041711452426057600214640ustar00rootroot00000000000000// Copyright 2016 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !novmstat // +build !novmstat package collector import ( "bufio" "fmt" "os" "regexp" "strconv" "strings" "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" ) const ( vmStatSubsystem = "vmstat" ) var ( vmStatFields = kingpin.Flag("collector.vmstat.fields", "Regexp of fields to return for vmstat collector.").Default("^(oom_kill|pgpg|pswp|pg.*fault).*").String() ) type vmStatCollector struct { fieldPattern *regexp.Regexp logger log.Logger } func init() { registerCollector("vmstat", defaultEnabled, NewvmStatCollector) } // NewvmStatCollector returns a new Collector exposing vmstat stats. func NewvmStatCollector(logger log.Logger) (Collector, error) { pattern := regexp.MustCompile(*vmStatFields) return &vmStatCollector{ fieldPattern: pattern, logger: logger, }, nil } func (c *vmStatCollector) Update(ch chan<- prometheus.Metric) error { file, err := os.Open(procFilePath("vmstat")) if err != nil { return err } defer file.Close() scanner := bufio.NewScanner(file) for scanner.Scan() { parts := strings.Fields(scanner.Text()) value, err := strconv.ParseFloat(parts[1], 64) if err != nil { return err } if !c.fieldPattern.MatchString(parts[0]) { continue } ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(namespace, vmStatSubsystem, parts[0]), fmt.Sprintf("/proc/vmstat information field %s.", parts[0]), nil, nil), prometheus.UntypedValue, value, ) } return scanner.Err() } node_exporter-1.7.0/collector/wifi_linux.go000066400000000000000000000255051452426057600211100ustar00rootroot00000000000000// Copyright 2017 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nowifi // +build !nowifi package collector import ( "encoding/json" "errors" "fmt" "os" "path/filepath" "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/mdlayher/wifi" "github.com/prometheus/client_golang/prometheus" ) type wifiCollector struct { interfaceFrequencyHertz *prometheus.Desc stationInfo *prometheus.Desc stationConnectedSecondsTotal *prometheus.Desc stationInactiveSeconds *prometheus.Desc stationReceiveBitsPerSecond *prometheus.Desc stationTransmitBitsPerSecond *prometheus.Desc stationReceiveBytesTotal *prometheus.Desc stationTransmitBytesTotal *prometheus.Desc stationSignalDBM *prometheus.Desc stationTransmitRetriesTotal *prometheus.Desc stationTransmitFailedTotal *prometheus.Desc stationBeaconLossTotal *prometheus.Desc logger log.Logger } var ( collectorWifi = kingpin.Flag("collector.wifi.fixtures", "test fixtures to use for wifi collector metrics").Default("").String() ) func init() { registerCollector("wifi", defaultDisabled, NewWifiCollector) } var _ wifiStater = &wifi.Client{} // wifiStater is an interface used to swap out a *wifi.Client for end to end tests. type wifiStater interface { BSS(ifi *wifi.Interface) (*wifi.BSS, error) Close() error Interfaces() ([]*wifi.Interface, error) StationInfo(ifi *wifi.Interface) ([]*wifi.StationInfo, error) } // NewWifiCollector returns a new Collector exposing Wifi statistics. func NewWifiCollector(logger log.Logger) (Collector, error) { const ( subsystem = "wifi" ) var ( labels = []string{"device", "mac_address"} ) return &wifiCollector{ interfaceFrequencyHertz: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "interface_frequency_hertz"), "The current frequency a WiFi interface is operating at, in hertz.", []string{"device"}, nil, ), stationInfo: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "station_info"), "Labeled WiFi interface station information as provided by the operating system.", []string{"device", "bssid", "ssid", "mode"}, nil, ), stationConnectedSecondsTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "station_connected_seconds_total"), "The total number of seconds a station has been connected to an access point.", labels, nil, ), stationInactiveSeconds: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "station_inactive_seconds"), "The number of seconds since any wireless activity has occurred on a station.", labels, nil, ), stationReceiveBitsPerSecond: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "station_receive_bits_per_second"), "The current WiFi receive bitrate of a station, in bits per second.", labels, nil, ), stationTransmitBitsPerSecond: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "station_transmit_bits_per_second"), "The current WiFi transmit bitrate of a station, in bits per second.", labels, nil, ), stationReceiveBytesTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "station_receive_bytes_total"), "The total number of bytes received by a WiFi station.", labels, nil, ), stationTransmitBytesTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "station_transmit_bytes_total"), "The total number of bytes transmitted by a WiFi station.", labels, nil, ), stationSignalDBM: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "station_signal_dbm"), "The current WiFi signal strength, in decibel-milliwatts (dBm).", labels, nil, ), stationTransmitRetriesTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "station_transmit_retries_total"), "The total number of times a station has had to retry while sending a packet.", labels, nil, ), stationTransmitFailedTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "station_transmit_failed_total"), "The total number of times a station has failed to send a packet.", labels, nil, ), stationBeaconLossTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "station_beacon_loss_total"), "The total number of times a station has detected a beacon loss.", labels, nil, ), logger: logger, }, nil } func (c *wifiCollector) Update(ch chan<- prometheus.Metric) error { stat, err := newWifiStater(*collectorWifi) if err != nil { // Cannot access wifi metrics, report no error. if errors.Is(err, os.ErrNotExist) { level.Debug(c.logger).Log("msg", "wifi collector metrics are not available for this system") return ErrNoData } if errors.Is(err, os.ErrPermission) { level.Debug(c.logger).Log("msg", "wifi collector got permission denied when accessing metrics") return ErrNoData } return fmt.Errorf("failed to access wifi data: %w", err) } defer stat.Close() ifis, err := stat.Interfaces() if err != nil { return fmt.Errorf("failed to retrieve wifi interfaces: %w", err) } for _, ifi := range ifis { // Some virtual devices have no "name" and should be skipped. if ifi.Name == "" { continue } level.Debug(c.logger).Log("msg", "probing wifi device with type", "wifi", ifi.Name, "type", ifi.Type) ch <- prometheus.MustNewConstMetric( c.interfaceFrequencyHertz, prometheus.GaugeValue, mHzToHz(ifi.Frequency), ifi.Name, ) // When a statistic is not available for a given interface, package wifi // returns a os.ErrNotExist error. We leverage this to only export // metrics which are actually valid for given interface types. bss, err := stat.BSS(ifi) switch { case err == nil: c.updateBSSStats(ch, ifi.Name, bss) case errors.Is(err, os.ErrNotExist): level.Debug(c.logger).Log("msg", "BSS information not found for wifi device", "name", ifi.Name) default: return fmt.Errorf("failed to retrieve BSS for device %s: %v", ifi.Name, err) } stations, err := stat.StationInfo(ifi) switch { case err == nil: for _, station := range stations { c.updateStationStats(ch, ifi.Name, station) } case errors.Is(err, os.ErrNotExist): level.Debug(c.logger).Log("msg", "station information not found for wifi device", "name", ifi.Name) default: return fmt.Errorf("failed to retrieve station info for device %q: %v", ifi.Name, err) } } return nil } func (c *wifiCollector) updateBSSStats(ch chan<- prometheus.Metric, device string, bss *wifi.BSS) { // Synthetic metric which provides wifi station info, such as SSID, BSSID, etc. ch <- prometheus.MustNewConstMetric( c.stationInfo, prometheus.GaugeValue, 1, device, bss.BSSID.String(), bss.SSID, bssStatusMode(bss.Status), ) } func (c *wifiCollector) updateStationStats(ch chan<- prometheus.Metric, device string, info *wifi.StationInfo) { ch <- prometheus.MustNewConstMetric( c.stationConnectedSecondsTotal, prometheus.CounterValue, info.Connected.Seconds(), device, info.HardwareAddr.String(), ) ch <- prometheus.MustNewConstMetric( c.stationInactiveSeconds, prometheus.GaugeValue, info.Inactive.Seconds(), device, info.HardwareAddr.String(), ) ch <- prometheus.MustNewConstMetric( c.stationReceiveBitsPerSecond, prometheus.GaugeValue, float64(info.ReceiveBitrate), device, info.HardwareAddr.String(), ) ch <- prometheus.MustNewConstMetric( c.stationTransmitBitsPerSecond, prometheus.GaugeValue, float64(info.TransmitBitrate), device, info.HardwareAddr.String(), ) ch <- prometheus.MustNewConstMetric( c.stationReceiveBytesTotal, prometheus.CounterValue, float64(info.ReceivedBytes), device, info.HardwareAddr.String(), ) ch <- prometheus.MustNewConstMetric( c.stationTransmitBytesTotal, prometheus.CounterValue, float64(info.TransmittedBytes), device, info.HardwareAddr.String(), ) ch <- prometheus.MustNewConstMetric( c.stationSignalDBM, prometheus.GaugeValue, float64(info.Signal), device, info.HardwareAddr.String(), ) ch <- prometheus.MustNewConstMetric( c.stationTransmitRetriesTotal, prometheus.CounterValue, float64(info.TransmitRetries), device, info.HardwareAddr.String(), ) ch <- prometheus.MustNewConstMetric( c.stationTransmitFailedTotal, prometheus.CounterValue, float64(info.TransmitFailed), device, info.HardwareAddr.String(), ) ch <- prometheus.MustNewConstMetric( c.stationBeaconLossTotal, prometheus.CounterValue, float64(info.BeaconLoss), device, info.HardwareAddr.String(), ) } func mHzToHz(mHz int) float64 { return float64(mHz) * 1000 * 1000 } func bssStatusMode(status wifi.BSSStatus) string { switch status { case wifi.BSSStatusAuthenticated, wifi.BSSStatusAssociated: return "client" case wifi.BSSStatusIBSSJoined: return "ad-hoc" default: return "unknown" } } // All code below this point is used to assist with end-to-end tests for // the wifi collector, since wifi devices are not available in CI. // newWifiStater determines if mocked test fixtures from files should be used for // collecting wifi metrics, or if package wifi should be used. func newWifiStater(fixtures string) (wifiStater, error) { if fixtures != "" { return &mockWifiStater{ fixtures: fixtures, }, nil } return wifi.New() } var _ wifiStater = &mockWifiStater{} type mockWifiStater struct { fixtures string } func (s *mockWifiStater) unmarshalJSONFile(filename string, v interface{}) error { b, err := os.ReadFile(filepath.Join(s.fixtures, filename)) if err != nil { return err } return json.Unmarshal(b, v) } func (s *mockWifiStater) Close() error { return nil } func (s *mockWifiStater) BSS(ifi *wifi.Interface) (*wifi.BSS, error) { p := filepath.Join(ifi.Name, "bss.json") var bss wifi.BSS if err := s.unmarshalJSONFile(p, &bss); err != nil { return nil, err } return &bss, nil } func (s *mockWifiStater) Interfaces() ([]*wifi.Interface, error) { var ifis []*wifi.Interface if err := s.unmarshalJSONFile("interfaces.json", &ifis); err != nil { return nil, err } return ifis, nil } func (s *mockWifiStater) StationInfo(ifi *wifi.Interface) ([]*wifi.StationInfo, error) { p := filepath.Join(ifi.Name, "stationinfo.json") var stations []*wifi.StationInfo if err := s.unmarshalJSONFile(p, &stations); err != nil { return nil, err } return stations, nil } node_exporter-1.7.0/collector/xfs_linux.go000066400000000000000000000222211452426057600207420ustar00rootroot00000000000000// Copyright 2017 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !noxfs // +build !noxfs package collector import ( "fmt" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs/xfs" ) // An xfsCollector is a Collector which gathers metrics from XFS filesystems. type xfsCollector struct { fs xfs.FS logger log.Logger } func init() { registerCollector("xfs", defaultEnabled, NewXFSCollector) } // NewXFSCollector returns a new Collector exposing XFS statistics. func NewXFSCollector(logger log.Logger) (Collector, error) { fs, err := xfs.NewFS(*procPath, *sysPath) if err != nil { return nil, fmt.Errorf("failed to open sysfs: %w", err) } return &xfsCollector{ fs: fs, logger: logger, }, nil } // Update implements Collector. func (c *xfsCollector) Update(ch chan<- prometheus.Metric) error { stats, err := c.fs.SysStats() if err != nil { return fmt.Errorf("failed to retrieve XFS stats: %w", err) } for _, s := range stats { c.updateXFSStats(ch, s) } return nil } // updateXFSStats collects statistics for a single XFS filesystem. func (c *xfsCollector) updateXFSStats(ch chan<- prometheus.Metric, s *xfs.Stats) { const ( subsystem = "xfs" ) var ( labels = []string{"device"} ) // Metric names and descriptions are sourced from: // http://xfs.org/index.php/Runtime_Stats. // // Each metric has a name that roughly follows the pattern of // "node_xfs_category_value_total", using the categories and value names // found on the XFS wiki. // // Note that statistics for more than one internal B-tree are measured, // and as such, each one must be differentiated by name. metrics := []struct { name string desc string value float64 }{ { name: "extent_allocation_extents_allocated_total", desc: "Number of extents allocated for a filesystem.", value: float64(s.ExtentAllocation.ExtentsAllocated), }, { name: "extent_allocation_blocks_allocated_total", desc: "Number of blocks allocated for a filesystem.", value: float64(s.ExtentAllocation.BlocksAllocated), }, { name: "extent_allocation_extents_freed_total", desc: "Number of extents freed for a filesystem.", value: float64(s.ExtentAllocation.ExtentsFreed), }, { name: "extent_allocation_blocks_freed_total", desc: "Number of blocks freed for a filesystem.", value: float64(s.ExtentAllocation.BlocksFreed), }, { name: "allocation_btree_lookups_total", desc: "Number of allocation B-tree lookups for a filesystem.", value: float64(s.AllocationBTree.Lookups), }, { name: "allocation_btree_compares_total", desc: "Number of allocation B-tree compares for a filesystem.", value: float64(s.AllocationBTree.Compares), }, { name: "allocation_btree_records_inserted_total", desc: "Number of allocation B-tree records inserted for a filesystem.", value: float64(s.AllocationBTree.RecordsInserted), }, { name: "allocation_btree_records_deleted_total", desc: "Number of allocation B-tree records deleted for a filesystem.", value: float64(s.AllocationBTree.RecordsDeleted), }, { name: "block_mapping_reads_total", desc: "Number of block map for read operations for a filesystem.", value: float64(s.BlockMapping.Reads), }, { name: "block_mapping_writes_total", desc: "Number of block map for write operations for a filesystem.", value: float64(s.BlockMapping.Writes), }, { name: "block_mapping_unmaps_total", desc: "Number of block unmaps (deletes) for a filesystem.", value: float64(s.BlockMapping.Unmaps), }, { name: "block_mapping_extent_list_insertions_total", desc: "Number of extent list insertions for a filesystem.", value: float64(s.BlockMapping.ExtentListInsertions), }, { name: "block_mapping_extent_list_deletions_total", desc: "Number of extent list deletions for a filesystem.", value: float64(s.BlockMapping.ExtentListDeletions), }, { name: "block_mapping_extent_list_lookups_total", desc: "Number of extent list lookups for a filesystem.", value: float64(s.BlockMapping.ExtentListLookups), }, { name: "block_mapping_extent_list_compares_total", desc: "Number of extent list compares for a filesystem.", value: float64(s.BlockMapping.ExtentListCompares), }, { name: "block_map_btree_lookups_total", desc: "Number of block map B-tree lookups for a filesystem.", value: float64(s.BlockMapBTree.Lookups), }, { name: "block_map_btree_compares_total", desc: "Number of block map B-tree compares for a filesystem.", value: float64(s.BlockMapBTree.Compares), }, { name: "block_map_btree_records_inserted_total", desc: "Number of block map B-tree records inserted for a filesystem.", value: float64(s.BlockMapBTree.RecordsInserted), }, { name: "block_map_btree_records_deleted_total", desc: "Number of block map B-tree records deleted for a filesystem.", value: float64(s.BlockMapBTree.RecordsDeleted), }, { name: "directory_operation_lookup_total", desc: "Number of file name directory lookups which miss the operating systems directory name lookup cache.", value: float64(s.DirectoryOperation.Lookups), }, { name: "directory_operation_create_total", desc: "Number of times a new directory entry was created for a filesystem.", value: float64(s.DirectoryOperation.Creates), }, { name: "directory_operation_remove_total", desc: "Number of times an existing directory entry was created for a filesystem.", value: float64(s.DirectoryOperation.Removes), }, { name: "directory_operation_getdents_total", desc: "Number of times the directory getdents operation was performed for a filesystem.", value: float64(s.DirectoryOperation.Getdents), }, { name: "inode_operation_attempts_total", desc: "Number of times the OS looked for an XFS inode in the inode cache.", value: float64(s.InodeOperation.Attempts), }, { name: "inode_operation_found_total", desc: "Number of times the OS looked for and found an XFS inode in the inode cache.", value: float64(s.InodeOperation.Found), }, { name: "inode_operation_recycled_total", desc: "Number of times the OS found an XFS inode in the cache, but could not use it as it was being recycled.", value: float64(s.InodeOperation.Recycle), }, { name: "inode_operation_missed_total", desc: "Number of times the OS looked for an XFS inode in the cache, but did not find it.", value: float64(s.InodeOperation.Missed), }, { name: "inode_operation_duplicates_total", desc: "Number of times the OS tried to add a missing XFS inode to the inode cache, but found it had already been added by another process.", value: float64(s.InodeOperation.Duplicate), }, { name: "inode_operation_reclaims_total", desc: "Number of times the OS reclaimed an XFS inode from the inode cache to free memory for another purpose.", value: float64(s.InodeOperation.Reclaims), }, { name: "inode_operation_attribute_changes_total", desc: "Number of times the OS explicitly changed the attributes of an XFS inode.", value: float64(s.InodeOperation.AttributeChange), }, { name: "read_calls_total", desc: "Number of read(2) system calls made to files in a filesystem.", value: float64(s.ReadWrite.Read), }, { name: "write_calls_total", desc: "Number of write(2) system calls made to files in a filesystem.", value: float64(s.ReadWrite.Write), }, { name: "vnode_active_total", desc: "Number of vnodes not on free lists for a filesystem.", value: float64(s.Vnode.Active), }, { name: "vnode_allocate_total", desc: "Number of times vn_alloc called for a filesystem.", value: float64(s.Vnode.Allocate), }, { name: "vnode_get_total", desc: "Number of times vn_get called for a filesystem.", value: float64(s.Vnode.Get), }, { name: "vnode_hold_total", desc: "Number of times vn_hold called for a filesystem.", value: float64(s.Vnode.Hold), }, { name: "vnode_release_total", desc: "Number of times vn_rele called for a filesystem.", value: float64(s.Vnode.Release), }, { name: "vnode_reclaim_total", desc: "Number of times vn_reclaim called for a filesystem.", value: float64(s.Vnode.Reclaim), }, { name: "vnode_remove_total", desc: "Number of times vn_remove called for a filesystem.", value: float64(s.Vnode.Remove), }, } for _, m := range metrics { desc := prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, m.name), m.desc, labels, nil, ) ch <- prometheus.MustNewConstMetric( desc, prometheus.CounterValue, m.value, s.Name, ) } } node_exporter-1.7.0/collector/zfs.go000066400000000000000000000103661452426057600175340ustar00rootroot00000000000000// Copyright 2016 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build linux && !nozfs // +build linux,!nozfs package collector import ( "errors" "strings" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" ) var errZFSNotAvailable = errors.New("ZFS / ZFS statistics are not available") type zfsSysctl string func init() { registerCollector("zfs", defaultEnabled, NewZFSCollector) } type zfsCollector struct { linuxProcpathBase string linuxZpoolIoPath string linuxZpoolObjsetPath string linuxZpoolStatePath string linuxPathMap map[string]string logger log.Logger } // NewZFSCollector returns a new Collector exposing ZFS statistics. func NewZFSCollector(logger log.Logger) (Collector, error) { return &zfsCollector{ linuxProcpathBase: "spl/kstat/zfs", linuxZpoolIoPath: "/*/io", linuxZpoolObjsetPath: "/*/objset-*", linuxZpoolStatePath: "/*/state", linuxPathMap: map[string]string{ "zfs_abd": "abdstats", "zfs_arc": "arcstats", "zfs_dbuf": "dbufstats", "zfs_dmu_tx": "dmu_tx", "zfs_dnode": "dnodestats", "zfs_fm": "fm", "zfs_vdev_cache": "vdev_cache_stats", // vdev_cache is deprecated "zfs_vdev_mirror": "vdev_mirror_stats", "zfs_xuio": "xuio_stats", // no known consumers of the XUIO interface on Linux exist "zfs_zfetch": "zfetchstats", "zfs_zil": "zil", }, logger: logger, }, nil } func (c *zfsCollector) Update(ch chan<- prometheus.Metric) error { if _, err := c.openProcFile(c.linuxProcpathBase); err != nil { if err == errZFSNotAvailable { level.Debug(c.logger).Log("err", err) return ErrNoData } } for subsystem := range c.linuxPathMap { if err := c.updateZfsStats(subsystem, ch); err != nil { if err == errZFSNotAvailable { level.Debug(c.logger).Log("err", err) // ZFS /proc files are added as new features to ZFS arrive, it is ok to continue continue } return err } } // Pool stats return c.updatePoolStats(ch) } func (s zfsSysctl) metricName() string { parts := strings.Split(string(s), ".") return strings.Replace(parts[len(parts)-1], "-", "_", -1) } func (c *zfsCollector) constSysctlMetric(subsystem string, sysctl zfsSysctl, value uint64) prometheus.Metric { metricName := sysctl.metricName() return prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, metricName), string(sysctl), nil, nil, ), prometheus.UntypedValue, float64(value), ) } func (c *zfsCollector) constPoolMetric(poolName string, sysctl zfsSysctl, value uint64) prometheus.Metric { metricName := sysctl.metricName() return prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(namespace, "zfs_zpool", metricName), string(sysctl), []string{"zpool"}, nil, ), prometheus.UntypedValue, float64(value), poolName, ) } func (c *zfsCollector) constPoolObjsetMetric(poolName string, datasetName string, sysctl zfsSysctl, value uint64) prometheus.Metric { metricName := sysctl.metricName() return prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(namespace, "zfs_zpool_dataset", metricName), string(sysctl), []string{"zpool", "dataset"}, nil, ), prometheus.UntypedValue, float64(value), poolName, datasetName, ) } func (c *zfsCollector) constPoolStateMetric(poolName string, stateName string, isActive uint64) prometheus.Metric { return prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(namespace, "zfs_zpool", "state"), "kstat.zfs.misc.state", []string{"zpool", "state"}, nil, ), prometheus.GaugeValue, float64(isActive), poolName, stateName, ) } node_exporter-1.7.0/collector/zfs_freebsd.go000066400000000000000000000252321452426057600212240ustar00rootroot00000000000000// Copyright 2019 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nozfs // +build !nozfs package collector import ( "fmt" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "golang.org/x/sys/unix" ) type zfsCollector struct { sysctls []bsdSysctl logger log.Logger } const ( zfsCollectorSubsystem = "zfs" ) func init() { registerCollector("zfs", defaultEnabled, NewZfsCollector) } func NewZfsCollector(logger log.Logger) (Collector, error) { return &zfsCollector{ sysctls: []bsdSysctl{ { name: "abdstats_linear_count_total", description: "ZFS ARC buffer data linear count", mib: "kstat.zfs.misc.abdstats.linear_cnt", dataType: bsdSysctlTypeUint64, valueType: prometheus.CounterValue, labels: nil, }, { name: "abdstats_linear_data_bytes", description: "ZFS ARC buffer data linear data size", mib: "kstat.zfs.misc.abdstats.linear_data_size", dataType: bsdSysctlTypeUint64, valueType: prometheus.GaugeValue, labels: nil, }, { name: "abdstats_scatter_chunk_waste_bytes", description: "ZFS ARC buffer data scatter chunk waste", mib: "kstat.zfs.misc.abdstats.scatter_chunk_waste", dataType: bsdSysctlTypeUint64, valueType: prometheus.GaugeValue, labels: nil, }, { name: "abdstats_scatter_count_total", description: "ZFS ARC buffer data scatter count", mib: "kstat.zfs.misc.abdstats.scatter_cnt", dataType: bsdSysctlTypeUint64, valueType: prometheus.CounterValue, labels: nil, }, { name: "abdstats_scatter_data_bytes", description: "ZFS ARC buffer data scatter data size", mib: "kstat.zfs.misc.abdstats.scatter_data_size", dataType: bsdSysctlTypeUint64, valueType: prometheus.GaugeValue, labels: nil, }, { name: "abdstats_struct_bytes", description: "ZFS ARC buffer data struct size", mib: "kstat.zfs.misc.abdstats.struct_size", dataType: bsdSysctlTypeUint64, valueType: prometheus.GaugeValue, labels: nil, }, { name: "arcstats_anon_bytes", description: "ZFS ARC anon size", mib: "kstat.zfs.misc.arcstats.anon_size", dataType: bsdSysctlTypeUint64, valueType: prometheus.GaugeValue, labels: nil, }, { name: "arcstats_c_bytes", description: "ZFS ARC target size", mib: "kstat.zfs.misc.arcstats.c", dataType: bsdSysctlTypeUint64, valueType: prometheus.GaugeValue, labels: nil, }, { name: "arcstats_c_max_bytes", description: "ZFS ARC maximum size", mib: "kstat.zfs.misc.arcstats.c_max", dataType: bsdSysctlTypeUint64, valueType: prometheus.GaugeValue, labels: nil, }, { name: "arcstats_c_min_bytes", description: "ZFS ARC minimum size", mib: "kstat.zfs.misc.arcstats.c_min", dataType: bsdSysctlTypeUint64, valueType: prometheus.GaugeValue, labels: nil, }, { name: "arcstats_data_bytes", description: "ZFS ARC data size", mib: "kstat.zfs.misc.arcstats.data_size", dataType: bsdSysctlTypeUint64, valueType: prometheus.GaugeValue, labels: nil, }, { name: "arcstats_demand_data_hits_total", description: "ZFS ARC demand data hits", mib: "kstat.zfs.misc.arcstats.demand_data_hits", dataType: bsdSysctlTypeUint64, valueType: prometheus.CounterValue, labels: nil, }, { name: "arcstats_demand_data_misses_total", description: "ZFS ARC demand data misses", mib: "kstat.zfs.misc.arcstats.demand_data_misses", dataType: bsdSysctlTypeUint64, valueType: prometheus.CounterValue, labels: nil, }, { name: "arcstats_demand_metadata_hits_total", description: "ZFS ARC demand metadata hits", mib: "kstat.zfs.misc.arcstats.demand_metadata_hits", dataType: bsdSysctlTypeUint64, valueType: prometheus.CounterValue, labels: nil, }, { name: "arcstats_demand_metadata_misses_total", description: "ZFS ARC demand metadata misses", mib: "kstat.zfs.misc.arcstats.demand_metadata_misses", dataType: bsdSysctlTypeUint64, valueType: prometheus.CounterValue, labels: nil, }, { name: "arcstats_hdr_bytes", description: "ZFS ARC header size", mib: "kstat.zfs.misc.arcstats.hdr_size", dataType: bsdSysctlTypeUint64, valueType: prometheus.GaugeValue, labels: nil, }, { name: "arcstats_hits_total", description: "ZFS ARC hits", mib: "kstat.zfs.misc.arcstats.hits", dataType: bsdSysctlTypeUint64, valueType: prometheus.CounterValue, labels: nil, }, { name: "arcstats_misses_total", description: "ZFS ARC misses", mib: "kstat.zfs.misc.arcstats.misses", dataType: bsdSysctlTypeUint64, valueType: prometheus.CounterValue, labels: nil, }, { name: "arcstats_mfu_ghost_hits_total", description: "ZFS ARC MFU ghost hits", mib: "kstat.zfs.misc.arcstats.mfu_ghost_hits", dataType: bsdSysctlTypeUint64, valueType: prometheus.CounterValue, labels: nil, }, { name: "arcstats_mfu_ghost_size", description: "ZFS ARC MFU ghost size", mib: "kstat.zfs.misc.arcstats.mfu_ghost_size", dataType: bsdSysctlTypeUint64, valueType: prometheus.GaugeValue, labels: nil, }, { name: "arcstats_mfu_bytes", description: "ZFS ARC MFU size", mib: "kstat.zfs.misc.arcstats.mfu_size", dataType: bsdSysctlTypeUint64, valueType: prometheus.GaugeValue, labels: nil, }, { name: "arcstats_mru_ghost_hits_total", description: "ZFS ARC MRU ghost hits", mib: "kstat.zfs.misc.arcstats.mru_ghost_hits", dataType: bsdSysctlTypeUint64, valueType: prometheus.CounterValue, labels: nil, }, { name: "arcstats_mru_ghost_bytes", description: "ZFS ARC MRU ghost size", mib: "kstat.zfs.misc.arcstats.mru_ghost_size", dataType: bsdSysctlTypeUint64, valueType: prometheus.GaugeValue, labels: nil, }, { name: "arcstats_mru_bytes", description: "ZFS ARC MRU size", mib: "kstat.zfs.misc.arcstats.mru_size", dataType: bsdSysctlTypeUint64, valueType: prometheus.GaugeValue, labels: nil, }, { name: "arcstats_other_bytes", description: "ZFS ARC other size", mib: "kstat.zfs.misc.arcstats.other_size", dataType: bsdSysctlTypeUint64, valueType: prometheus.GaugeValue, labels: nil, }, // when FreeBSD 14.0+, `meta/pm/pd` install of `p`. { name: "arcstats_p_bytes", description: "ZFS ARC MRU target size", mib: "kstat.zfs.misc.arcstats.p", dataType: bsdSysctlTypeUint64, valueType: prometheus.GaugeValue, labels: nil, }, { name: "arcstats_meta_bytes", description: "ZFS ARC metadata target frac ", mib: "kstat.zfs.misc.arcstats.meta", dataType: bsdSysctlTypeUint64, valueType: prometheus.GaugeValue, }, { name: "arcstats_pd_bytes", description: "ZFS ARC data MRU target frac", mib: "kstat.zfs.misc.arcstats.pd", dataType: bsdSysctlTypeUint64, valueType: prometheus.GaugeValue, }, { name: "arcstats_pm_bytes", description: "ZFS ARC meta MRU target frac", mib: "kstat.zfs.misc.arcstats.pm", dataType: bsdSysctlTypeUint64, valueType: prometheus.GaugeValue, }, { name: "arcstats_size_bytes", description: "ZFS ARC size", mib: "kstat.zfs.misc.arcstats.size", dataType: bsdSysctlTypeUint64, valueType: prometheus.GaugeValue, labels: nil, }, { name: "zfetchstats_hits_total", description: "ZFS cache fetch hits", mib: "kstat.zfs.misc.zfetchstats.hits", dataType: bsdSysctlTypeUint64, valueType: prometheus.CounterValue, labels: nil, }, { name: "zfetchstats_misses_total", description: "ZFS cache fetch misses", mib: "kstat.zfs.misc.zfetchstats.misses", dataType: bsdSysctlTypeUint64, valueType: prometheus.CounterValue, labels: nil, }, }, logger: logger, }, nil } func (c *zfsCollector) Update(ch chan<- prometheus.Metric) error { for _, m := range c.sysctls { v, err := m.Value() if err != nil { // debug logging level.Debug(c.logger).Log("name", m.name, "couldn't get sysctl:", err) continue } ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(namespace, zfsCollectorSubsystem, m.name), m.description, nil, nil, ), m.valueType, v) } return nil } func (c *zfsCollector) parseFreeBSDPoolObjsetStats() error { sysCtlMetrics := []string{ "nunlinked", "nunlinks", "nread", "reads", "nwritten", "writes", } zfsPoolMibPrefix := "kstat.zfs.pool.dataset" zfsDatasetNames := []string{} zfsDatasets, err := unix.Sysctl(zfsPoolMibPrefix) if err != nil { return fmt.Errorf("couldn't get sysctl: %w", err) } for dataset, _ := range zfsDatasets { if strings.HasSuffix(dataset, ".dataset_name") { zfsDatasetNames = append(zfsDatasetNames, strings.SplitAfter(dataset, ".")[3]) } } for zpoolDataset := range zfsDatasetsNames { zfsDatasetLabels := map[string]string{ "dataset": zpoolDataset, "zpool": strings.SplitAfter(zpoolDataset, "/")[0], } for metric := range sysCtlMetrics { c.sysctls = append(c.sysctls, bsdSysctl{ name: fmt.SprintF("node_zfs_zpool_dataset_%s", metric), description: fmt.SprintF("node_zfs_zpool_dataset_%s", metric), mib: fmt.Sprintf("%s.%s.%s", zfsPoolMibPrefix, poolObj, metric), dataType: bsdSysctlTypeUint64, valueType: prometheus.CounterValue, labels: zfsDatasetLabels, }) } } return nil } node_exporter-1.7.0/collector/zfs_linux.go000066400000000000000000000203521452426057600207470ustar00rootroot00000000000000// Copyright 2016 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nozfs // +build !nozfs package collector import ( "bufio" "fmt" "io" "os" "path/filepath" "strconv" "strings" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" ) // constants from https://github.com/zfsonlinux/zfs/blob/master/lib/libspl/include/sys/kstat.h // kept as strings for comparison thus avoiding conversion to int const ( // kstatDataChar = "0" // kstatDataInt32 = "1" // kstatDataUint32 = "2" kstatDataInt64 = "3" kstatDataUint64 = "4" // kstatDataLong = "5" // kstatDataUlong = "6" // kstatDataString = "7" ) var zfsPoolStatesName = []string{"online", "degraded", "faulted", "offline", "removed", "unavail", "suspended"} func (c *zfsCollector) openProcFile(path string) (*os.File, error) { file, err := os.Open(procFilePath(path)) if err != nil { // file not found error can occur if: // 1. zfs module is not loaded // 2. zfs version does not have the feature with metrics -- ok to ignore level.Debug(c.logger).Log("msg", "Cannot open file for reading", "path", procFilePath(path)) return nil, errZFSNotAvailable } return file, nil } func (c *zfsCollector) updateZfsStats(subsystem string, ch chan<- prometheus.Metric) error { file, err := c.openProcFile(filepath.Join(c.linuxProcpathBase, c.linuxPathMap[subsystem])) if err != nil { return err } defer file.Close() return c.parseProcfsFile(file, c.linuxPathMap[subsystem], func(s zfsSysctl, v uint64) { ch <- c.constSysctlMetric(subsystem, s, v) }) } func (c *zfsCollector) updatePoolStats(ch chan<- prometheus.Metric) error { zpoolPaths, err := filepath.Glob(procFilePath(filepath.Join(c.linuxProcpathBase, c.linuxZpoolIoPath))) if err != nil { return err } for _, zpoolPath := range zpoolPaths { file, err := os.Open(zpoolPath) if err != nil { // this file should exist, but there is a race where an exporting pool can remove the files -- ok to ignore level.Debug(c.logger).Log("msg", "Cannot open file for reading", "path", zpoolPath) return errZFSNotAvailable } err = c.parsePoolProcfsFile(file, zpoolPath, func(poolName string, s zfsSysctl, v uint64) { ch <- c.constPoolMetric(poolName, s, v) }) file.Close() if err != nil { return err } } zpoolObjsetPaths, err := filepath.Glob(procFilePath(filepath.Join(c.linuxProcpathBase, c.linuxZpoolObjsetPath))) if err != nil { return err } for _, zpoolPath := range zpoolObjsetPaths { file, err := os.Open(zpoolPath) if err != nil { // This file should exist, but there is a race where an exporting pool can remove the files. Ok to ignore. level.Debug(c.logger).Log("msg", "Cannot open file for reading", "path", zpoolPath) return errZFSNotAvailable } err = c.parseLinuxPoolObjsetFile(file, zpoolPath, func(poolName string, datasetName string, s zfsSysctl, v uint64) { ch <- c.constPoolObjsetMetric(poolName, datasetName, s, v) }) file.Close() if err != nil { return err } } zpoolStatePaths, err := filepath.Glob(procFilePath(filepath.Join(c.linuxProcpathBase, c.linuxZpoolStatePath))) if err != nil { return err } if zpoolStatePaths == nil { level.Debug(c.logger).Log("msg", "No pool state files found") return nil } for _, zpoolPath := range zpoolStatePaths { file, err := os.Open(zpoolPath) if err != nil { // This file should exist, but there is a race where an exporting pool can remove the files. Ok to ignore. level.Debug(c.logger).Log("msg", "Cannot open file for reading", "path", zpoolPath) return errZFSNotAvailable } err = c.parsePoolStateFile(file, zpoolPath, func(poolName string, stateName string, isActive uint64) { ch <- c.constPoolStateMetric(poolName, stateName, isActive) }) file.Close() if err != nil { return err } } return nil } func (c *zfsCollector) parseProcfsFile(reader io.Reader, fmtExt string, handler func(zfsSysctl, uint64)) error { scanner := bufio.NewScanner(reader) parseLine := false for scanner.Scan() { parts := strings.Fields(scanner.Text()) if !parseLine && len(parts) == 3 && parts[0] == "name" && parts[1] == "type" && parts[2] == "data" { // Start parsing from here. parseLine = true continue } if !parseLine || len(parts) < 3 { continue } // kstat data type (column 2) should be KSTAT_DATA_UINT64, otherwise ignore // TODO: when other KSTAT_DATA_* types arrive, much of this will need to be restructured if parts[1] == kstatDataUint64 || parts[1] == kstatDataInt64 { key := fmt.Sprintf("kstat.zfs.misc.%s.%s", fmtExt, parts[0]) value, err := strconv.ParseUint(parts[2], 10, 64) if err != nil { return fmt.Errorf("could not parse expected integer value for %q", key) } handler(zfsSysctl(key), value) } } if !parseLine { return fmt.Errorf("did not parse a single %q metric", fmtExt) } return scanner.Err() } func (c *zfsCollector) parsePoolProcfsFile(reader io.Reader, zpoolPath string, handler func(string, zfsSysctl, uint64)) error { scanner := bufio.NewScanner(reader) parseLine := false var fields []string for scanner.Scan() { line := strings.Fields(scanner.Text()) if !parseLine && len(line) >= 12 && line[0] == "nread" { //Start parsing from here. parseLine = true fields = make([]string, len(line)) copy(fields, line) continue } if !parseLine { continue } zpoolPathElements := strings.Split(zpoolPath, "/") pathLen := len(zpoolPathElements) if pathLen < 2 { return fmt.Errorf("zpool path did not return at least two elements") } zpoolName := zpoolPathElements[pathLen-2] zpoolFile := zpoolPathElements[pathLen-1] for i, field := range fields { key := fmt.Sprintf("kstat.zfs.misc.%s.%s", zpoolFile, field) value, err := strconv.ParseUint(line[i], 10, 64) if err != nil { return fmt.Errorf("could not parse expected integer value for %q: %w", key, err) } handler(zpoolName, zfsSysctl(key), value) } } return scanner.Err() } func (c *zfsCollector) parseLinuxPoolObjsetFile(reader io.Reader, zpoolPath string, handler func(string, string, zfsSysctl, uint64)) error { scanner := bufio.NewScanner(reader) parseLine := false var zpoolName, datasetName string for scanner.Scan() { parts := strings.Fields(scanner.Text()) if !parseLine && len(parts) == 3 && parts[0] == "name" && parts[1] == "type" && parts[2] == "data" { parseLine = true continue } if !parseLine || len(parts) < 3 { continue } if parts[0] == "dataset_name" { zpoolPathElements := strings.Split(zpoolPath, "/") pathLen := len(zpoolPathElements) zpoolName = zpoolPathElements[pathLen-2] datasetName = parts[2] continue } if parts[1] == kstatDataUint64 { key := fmt.Sprintf("kstat.zfs.misc.objset.%s", parts[0]) value, err := strconv.ParseUint(parts[2], 10, 64) if err != nil { return fmt.Errorf("could not parse expected integer value for %q", key) } handler(zpoolName, datasetName, zfsSysctl(key), value) } } if !parseLine { return fmt.Errorf("did not parse a single %s %s metric", zpoolName, datasetName) } return scanner.Err() } func (c *zfsCollector) parsePoolStateFile(reader io.Reader, zpoolPath string, handler func(string, string, uint64)) error { scanner := bufio.NewScanner(reader) scanner.Scan() actualStateName, err := scanner.Text(), scanner.Err() if err != nil { return err } actualStateName = strings.ToLower(actualStateName) zpoolPathElements := strings.Split(zpoolPath, "/") pathLen := len(zpoolPathElements) if pathLen < 2 { return fmt.Errorf("zpool path did not return at least two elements") } zpoolName := zpoolPathElements[pathLen-2] for _, stateName := range zfsPoolStatesName { isActive := uint64(0) if actualStateName == stateName { isActive = 1 } handler(zpoolName, stateName, isActive) } return nil } node_exporter-1.7.0/collector/zfs_linux_test.go000066400000000000000000000261201452426057600220050ustar00rootroot00000000000000// Copyright 2016 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nozfs // +build !nozfs package collector import ( "os" "path/filepath" "testing" ) func TestArcstatsParsing(t *testing.T) { arcstatsFile, err := os.Open("fixtures/proc/spl/kstat/zfs/arcstats") if err != nil { t.Fatal(err) } defer arcstatsFile.Close() c := zfsCollector{} if err != nil { t.Fatal(err) } handlerCalled := false err = c.parseProcfsFile(arcstatsFile, "arcstats", func(s zfsSysctl, v uint64) { if s != zfsSysctl("kstat.zfs.misc.arcstats.hits") { return } handlerCalled = true if v != uint64(8772612) { t.Fatalf("Incorrect value parsed from procfs data") } }) if err != nil { t.Fatal(err) } if !handlerCalled { t.Fatal("Arcstats parsing handler was not called for some expected sysctls") } } func TestZfetchstatsParsing(t *testing.T) { zfetchstatsFile, err := os.Open("fixtures/proc/spl/kstat/zfs/zfetchstats") if err != nil { t.Fatal(err) } defer zfetchstatsFile.Close() c := zfsCollector{} if err != nil { t.Fatal(err) } handlerCalled := false err = c.parseProcfsFile(zfetchstatsFile, "zfetchstats", func(s zfsSysctl, v uint64) { if s != zfsSysctl("kstat.zfs.misc.zfetchstats.hits") { return } handlerCalled = true if v != uint64(7067992) { t.Fatalf("Incorrect value parsed from procfs data") } }) if err != nil { t.Fatal(err) } if !handlerCalled { t.Fatal("Zfetchstats parsing handler was not called for some expected sysctls") } } func TestZilParsing(t *testing.T) { zilFile, err := os.Open("fixtures/proc/spl/kstat/zfs/zil") if err != nil { t.Fatal(err) } defer zilFile.Close() c := zfsCollector{} if err != nil { t.Fatal(err) } handlerCalled := false err = c.parseProcfsFile(zilFile, "zil", func(s zfsSysctl, v uint64) { if s != zfsSysctl("kstat.zfs.misc.zil.zil_commit_count") { return } handlerCalled = true if v != uint64(10) { t.Fatalf("Incorrect value parsed from procfs data") } }) if err != nil { t.Fatal(err) } if !handlerCalled { t.Fatal("Zil parsing handler was not called for some expected sysctls") } } func TestVdevCacheStatsParsing(t *testing.T) { vdevCacheStatsFile, err := os.Open("fixtures/proc/spl/kstat/zfs/vdev_cache_stats") if err != nil { t.Fatal(err) } defer vdevCacheStatsFile.Close() c := zfsCollector{} if err != nil { t.Fatal(err) } handlerCalled := false err = c.parseProcfsFile(vdevCacheStatsFile, "vdev_cache_stats", func(s zfsSysctl, v uint64) { if s != zfsSysctl("kstat.zfs.misc.vdev_cache_stats.delegations") { return } handlerCalled = true if v != uint64(40) { t.Fatalf("Incorrect value parsed from procfs data") } }) if err != nil { t.Fatal(err) } if !handlerCalled { t.Fatal("VdevCacheStats parsing handler was not called for some expected sysctls") } } func TestXuioStatsParsing(t *testing.T) { xuioStatsFile, err := os.Open("fixtures/proc/spl/kstat/zfs/xuio_stats") if err != nil { t.Fatal(err) } defer xuioStatsFile.Close() c := zfsCollector{} if err != nil { t.Fatal(err) } handlerCalled := false err = c.parseProcfsFile(xuioStatsFile, "xuio_stats", func(s zfsSysctl, v uint64) { if s != zfsSysctl("kstat.zfs.misc.xuio_stats.onloan_read_buf") { return } handlerCalled = true if v != uint64(32) { t.Fatalf("Incorrect value parsed from procfs data") } }) if err != nil { t.Fatal(err) } if !handlerCalled { t.Fatal("XuioStats parsing handler was not called for some expected sysctls") } } func TestFmParsing(t *testing.T) { fmFile, err := os.Open("fixtures/proc/spl/kstat/zfs/fm") if err != nil { t.Fatal(err) } defer fmFile.Close() c := zfsCollector{} if err != nil { t.Fatal(err) } handlerCalled := false err = c.parseProcfsFile(fmFile, "fm", func(s zfsSysctl, v uint64) { if s != zfsSysctl("kstat.zfs.misc.fm.erpt-dropped") { return } handlerCalled = true if v != uint64(18) { t.Fatalf("Incorrect value parsed from procfs data") } }) if err != nil { t.Fatal(err) } if !handlerCalled { t.Fatal("Fm parsing handler was not called for some expected sysctls") } } func TestDmuTxParsing(t *testing.T) { dmuTxFile, err := os.Open("fixtures/proc/spl/kstat/zfs/dmu_tx") if err != nil { t.Fatal(err) } defer dmuTxFile.Close() c := zfsCollector{} if err != nil { t.Fatal(err) } handlerCalled := false err = c.parseProcfsFile(dmuTxFile, "dmu_tx", func(s zfsSysctl, v uint64) { if s != zfsSysctl("kstat.zfs.misc.dmu_tx.dmu_tx_assigned") { return } handlerCalled = true if v != uint64(3532844) { t.Fatalf("Incorrect value parsed from procfs data") } }) if err != nil { t.Fatal(err) } if !handlerCalled { t.Fatal("DmuTx parsing handler was not called for some expected sysctls") } } func TestZpoolParsing(t *testing.T) { zpoolPaths, err := filepath.Glob("fixtures/proc/spl/kstat/zfs/*/io") if err != nil { t.Fatal(err) } c := zfsCollector{} if err != nil { t.Fatal(err) } handlerCalled := false for _, zpoolPath := range zpoolPaths { file, err := os.Open(zpoolPath) if err != nil { t.Fatal(err) } err = c.parsePoolProcfsFile(file, zpoolPath, func(poolName string, s zfsSysctl, v uint64) { if s != zfsSysctl("kstat.zfs.misc.io.nread") { return } handlerCalled = true if v != uint64(1884160) && v != uint64(2826240) { t.Fatalf("Incorrect value parsed from procfs data %v", v) } }) file.Close() if err != nil { t.Fatal(err) } } if !handlerCalled { t.Fatal("Zpool parsing handler was not called for some expected sysctls") } } func TestZpoolObjsetParsing(t *testing.T) { zpoolPaths, err := filepath.Glob("fixtures/proc/spl/kstat/zfs/*/objset-*") if err != nil { t.Fatal(err) } c := zfsCollector{} if err != nil { t.Fatal(err) } handlerCalled := false for _, zpoolPath := range zpoolPaths { file, err := os.Open(zpoolPath) if err != nil { t.Fatal(err) } err = c.parseLinuxPoolObjsetFile(file, zpoolPath, func(poolName string, datasetName string, s zfsSysctl, v uint64) { if s != zfsSysctl("kstat.zfs.misc.objset.writes") { return } handlerCalled = true if v != uint64(0) && v != uint64(4) && v != uint64(10) { t.Fatalf("Incorrect value parsed from procfs data %v", v) } }) file.Close() if err != nil { t.Fatal(err) } } if !handlerCalled { t.Fatal("Zpool parsing handler was not called for some expected sysctls") } } func TestAbdstatsParsing(t *testing.T) { abdstatsFile, err := os.Open("fixtures/proc/spl/kstat/zfs/abdstats") if err != nil { t.Fatal(err) } defer abdstatsFile.Close() c := zfsCollector{} if err != nil { t.Fatal(err) } handlerCalled := false err = c.parseProcfsFile(abdstatsFile, "abdstats", func(s zfsSysctl, v uint64) { if s != zfsSysctl("kstat.zfs.misc.abdstats.linear_data_size") { return } handlerCalled = true if v != uint64(223232) { t.Fatalf("Incorrect value parsed from procfs abdstats data") } }) if err != nil { t.Fatal(err) } if !handlerCalled { t.Fatal("ABDStats parsing handler was not called for some expected sysctls") } } func TestDbufstatsParsing(t *testing.T) { dbufstatsFile, err := os.Open("fixtures/proc/spl/kstat/zfs/dbufstats") if err != nil { t.Fatal(err) } defer dbufstatsFile.Close() c := zfsCollector{} if err != nil { t.Fatal(err) } handlerCalled := false err = c.parseProcfsFile(dbufstatsFile, "dbufstats", func(s zfsSysctl, v uint64) { if s != zfsSysctl("kstat.zfs.misc.dbufstats.hash_hits") { return } handlerCalled = true if v != uint64(108807) { t.Fatalf("Incorrect value parsed from procfs dbufstats data") } }) if err != nil { t.Fatal(err) } if !handlerCalled { t.Fatal("DbufStats parsing handler was not called for some expected sysctls") } } func TestDnodestatsParsing(t *testing.T) { dnodestatsFile, err := os.Open("fixtures/proc/spl/kstat/zfs/dnodestats") if err != nil { t.Fatal(err) } defer dnodestatsFile.Close() c := zfsCollector{} if err != nil { t.Fatal(err) } handlerCalled := false err = c.parseProcfsFile(dnodestatsFile, "dnodestats", func(s zfsSysctl, v uint64) { if s != zfsSysctl("kstat.zfs.misc.dnodestats.dnode_hold_alloc_hits") { return } handlerCalled = true if v != uint64(37617) { t.Fatalf("Incorrect value parsed from procfs dnodestats data") } }) if err != nil { t.Fatal(err) } if !handlerCalled { t.Fatal("Dnodestats parsing handler was not called for some expected sysctls") } } func TestVdevMirrorstatsParsing(t *testing.T) { vdevMirrorStatsFile, err := os.Open("fixtures/proc/spl/kstat/zfs/vdev_mirror_stats") if err != nil { t.Fatal(err) } defer vdevMirrorStatsFile.Close() c := zfsCollector{} if err != nil { t.Fatal(err) } handlerCalled := false err = c.parseProcfsFile(vdevMirrorStatsFile, "vdev_mirror_stats", func(s zfsSysctl, v uint64) { if s != zfsSysctl("kstat.zfs.misc.vdev_mirror_stats.preferred_not_found") { return } handlerCalled = true if v != uint64(94) { t.Fatalf("Incorrect value parsed from procfs vdev_mirror_stats data") } }) if err != nil { t.Fatal(err) } if !handlerCalled { t.Fatal("VdevMirrorStats parsing handler was not called for some expected sysctls") } } func TestPoolStateParsing(t *testing.T) { zpoolPaths, err := filepath.Glob("fixtures/proc/spl/kstat/zfs/*/state") if err != nil { t.Fatal(err) } c := zfsCollector{} if err != nil { t.Fatal(err) } handlerCalled := false for _, zpoolPath := range zpoolPaths { file, err := os.Open(zpoolPath) if err != nil { t.Fatal(err) } err = c.parsePoolStateFile(file, zpoolPath, func(poolName string, stateName string, isActive uint64) { handlerCalled = true if poolName == "pool1" { if isActive != uint64(1) && stateName == "online" { t.Fatalf("Incorrect parsed value for online state") } if isActive != uint64(0) && stateName != "online" { t.Fatalf("Incorrect parsed value for online state") } } if poolName == "poolz1" { if isActive != uint64(1) && stateName == "degraded" { t.Fatalf("Incorrect parsed value for degraded state") } if isActive != uint64(0) && stateName != "degraded" { t.Fatalf("Incorrect parsed value for degraded state") } } if poolName == "pool2" { if isActive != uint64(1) && stateName == "suspended" { t.Fatalf("Incorrect parsed value for suspended state") } if isActive != uint64(0) && stateName != "suspended" { t.Fatalf("Incorrect parsed value for suspended state") } } }) file.Close() if err != nil { t.Fatal(err) } } if !handlerCalled { t.Fatal("Zpool parsing handler was not called for some expected sysctls") } } node_exporter-1.7.0/collector/zfs_solaris.go000066400000000000000000000255361452426057600212750ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !nozfs // +build !nozfs package collector import ( "strings" "github.com/go-kit/log" "github.com/illumos/go-kstat" "github.com/prometheus/client_golang/prometheus" ) type zfsCollector struct { abdstatsLinearCount *prometheus.Desc abdstatsLinearDataSize *prometheus.Desc abdstatsScatterChunkWaste *prometheus.Desc abdstatsScatterCount *prometheus.Desc abdstatsScatterDataSize *prometheus.Desc abdstatsStructSize *prometheus.Desc arcstatsAnonSize *prometheus.Desc arcstatsC *prometheus.Desc arcstatsCMax *prometheus.Desc arcstatsCMin *prometheus.Desc arcstatsDataSize *prometheus.Desc arcstatsDemandDataHits *prometheus.Desc arcstatsDemandDataMisses *prometheus.Desc arcstatsDemandMetadataHits *prometheus.Desc arcstatsDemandMetadataMisses *prometheus.Desc arcstatsHeaderSize *prometheus.Desc arcstatsHits *prometheus.Desc arcstatsMisses *prometheus.Desc arcstatsMFUGhostHits *prometheus.Desc arcstatsMFUGhostSize *prometheus.Desc arcstatsMFUSize *prometheus.Desc arcstatsMRUGhostHits *prometheus.Desc arcstatsMRUGhostSize *prometheus.Desc arcstatsMRUSize *prometheus.Desc arcstatsOtherSize *prometheus.Desc arcstatsP *prometheus.Desc arcstatsSize *prometheus.Desc zfetchstatsHits *prometheus.Desc zfetchstatsMisses *prometheus.Desc logger log.Logger } const ( zfsCollectorSubsystem = "zfs" ) func init() { registerCollector("zfs", defaultEnabled, NewZfsCollector) } func NewZfsCollector(logger log.Logger) (Collector, error) { return &zfsCollector{ abdstatsLinearCount: prometheus.NewDesc( prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "abdstats_linear_count_total"), "ZFS ARC buffer data linear count", nil, nil, ), abdstatsLinearDataSize: prometheus.NewDesc( prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "abdstats_linear_data_bytes"), "ZFS ARC buffer data linear data size", nil, nil, ), abdstatsScatterChunkWaste: prometheus.NewDesc( prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "abdstats_scatter_chunk_waste_bytes"), "ZFS ARC buffer data scatter chunk waste", nil, nil, ), abdstatsScatterCount: prometheus.NewDesc( prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "abdstats_scatter_count_total"), "ZFS ARC buffer data scatter count", nil, nil, ), abdstatsScatterDataSize: prometheus.NewDesc( prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "abdstats_scatter_data_bytes"), "ZFS ARC buffer data scatter data size", nil, nil, ), abdstatsStructSize: prometheus.NewDesc( prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "abdstats_struct_bytes"), "ZFS ARC buffer data struct size", nil, nil, ), arcstatsAnonSize: prometheus.NewDesc( prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "arcstats_anon_bytes"), "ZFS ARC anon size", nil, nil, ), arcstatsC: prometheus.NewDesc( prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "arcstats_c_bytes"), "ZFS ARC target size", nil, nil, ), arcstatsCMax: prometheus.NewDesc( prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "arcstats_c_max_bytes"), "ZFS ARC maximum size", nil, nil, ), arcstatsCMin: prometheus.NewDesc( prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "arcstats_c_min_bytes"), "ZFS ARC minimum size", nil, nil, ), arcstatsDataSize: prometheus.NewDesc( prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "arcstats_data_bytes"), "ZFS ARC data size", nil, nil, ), arcstatsDemandDataHits: prometheus.NewDesc( prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "arcstats_demand_data_hits_total"), "ZFS ARC demand data hits", nil, nil, ), arcstatsDemandDataMisses: prometheus.NewDesc( prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "arcstats_demand_data_misses_total"), "ZFS ARC demand data misses", nil, nil, ), arcstatsDemandMetadataHits: prometheus.NewDesc( prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "arcstats_demand_metadata_hits_total"), "ZFS ARC demand metadata hits", nil, nil, ), arcstatsDemandMetadataMisses: prometheus.NewDesc( prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "arcstats_demand_metadata_misses_total"), "ZFS ARC demand metadata misses", nil, nil, ), arcstatsHeaderSize: prometheus.NewDesc( prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "arcstats_hdr_bytes"), "ZFS ARC header size", nil, nil, ), arcstatsHits: prometheus.NewDesc( prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "arcstats_hits_total"), "ZFS ARC hits", nil, nil, ), arcstatsMisses: prometheus.NewDesc( prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "arcstats_misses_total"), "ZFS ARC misses", nil, nil, ), arcstatsMFUGhostHits: prometheus.NewDesc( prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "arcstats_mfu_ghost_hits_total"), "ZFS ARC MFU ghost hits", nil, nil, ), arcstatsMFUGhostSize: prometheus.NewDesc( prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "arcstats_mfu_ghost_size"), "ZFS ARC MFU ghost size", nil, nil, ), arcstatsMFUSize: prometheus.NewDesc( prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "arcstats_mfu_bytes"), "ZFS ARC MFU size", nil, nil, ), arcstatsMRUGhostHits: prometheus.NewDesc( prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "arcstats_mru_ghost_hits_total"), "ZFS ARC MRU ghost hits", nil, nil, ), arcstatsMRUGhostSize: prometheus.NewDesc( prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "arcstats_mru_ghost_bytes"), "ZFS ARC MRU ghost size", nil, nil, ), arcstatsMRUSize: prometheus.NewDesc( prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "arcstats_mru_bytes"), "ZFS ARC MRU size", nil, nil, ), arcstatsOtherSize: prometheus.NewDesc( prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "arcstats_other_bytes"), "ZFS ARC other size", nil, nil, ), arcstatsP: prometheus.NewDesc( prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "arcstats_p_bytes"), "ZFS ARC MRU target size", nil, nil, ), arcstatsSize: prometheus.NewDesc( prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "arcstats_size_bytes"), "ZFS ARC size", nil, nil, ), zfetchstatsHits: prometheus.NewDesc( prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "zfetchstats_hits_total"), "ZFS cache fetch hits", nil, nil, ), zfetchstatsMisses: prometheus.NewDesc( prometheus.BuildFQName(namespace, zfsCollectorSubsystem, "zfetchstats_misses_total"), "ZFS cache fetch misses", nil, nil, ), logger: logger, }, nil } func (c *zfsCollector) updateZfsAbdStats(ch chan<- prometheus.Metric) error { var metricType prometheus.ValueType tok, err := kstat.Open() if err != nil { return err } defer tok.Close() ksZFSInfo, err := tok.Lookup("zfs", 0, "abdstats") if err != nil { return err } for k, v := range map[string]*prometheus.Desc{ "linear_cnt": c.abdstatsLinearCount, "linear_data_size": c.abdstatsLinearDataSize, "scatter_chunk_waste": c.abdstatsScatterChunkWaste, "scatter_cnt": c.abdstatsScatterCount, "scatter_data_size": c.abdstatsScatterDataSize, "struct_size": c.abdstatsStructSize, } { ksZFSInfoValue, err := ksZFSInfo.GetNamed(k) if err != nil { return err } if strings.HasSuffix(k, "_cnt") { metricType = prometheus.CounterValue } else { metricType = prometheus.GaugeValue } ch <- prometheus.MustNewConstMetric( v, metricType, float64(ksZFSInfoValue.UintVal), ) } return nil } func (c *zfsCollector) updateZfsArcStats(ch chan<- prometheus.Metric) error { var metricType prometheus.ValueType tok, err := kstat.Open() if err != nil { return err } defer tok.Close() ksZFSInfo, err := tok.Lookup("zfs", 0, "arcstats") if err != nil { return err } for k, v := range map[string]*prometheus.Desc{ "anon_size": c.arcstatsAnonSize, "c": c.arcstatsC, "c_max": c.arcstatsCMax, "c_min": c.arcstatsCMin, "data_size": c.arcstatsDataSize, "demand_data_hits": c.arcstatsDemandDataHits, "demand_data_misses": c.arcstatsDemandDataMisses, "demand_metadata_hits": c.arcstatsDemandMetadataHits, "demand_metadata_misses": c.arcstatsDemandMetadataMisses, "hdr_size": c.arcstatsHeaderSize, "hits": c.arcstatsHits, "misses": c.arcstatsMisses, "mfu_ghost_hits": c.arcstatsMFUGhostHits, "mfu_ghost_size": c.arcstatsMFUGhostSize, "mfu_size": c.arcstatsMFUSize, "mru_ghost_hits": c.arcstatsMRUGhostHits, "mru_ghost_size": c.arcstatsMRUGhostSize, "mru_size": c.arcstatsMRUSize, "other_size": c.arcstatsOtherSize, "p": c.arcstatsP, "size": c.arcstatsSize, } { ksZFSInfoValue, err := ksZFSInfo.GetNamed(k) if err != nil { return err } if strings.HasSuffix(k, "_hits") || strings.HasSuffix(k, "_misses") { metricType = prometheus.CounterValue } else { metricType = prometheus.GaugeValue } ch <- prometheus.MustNewConstMetric( v, metricType, float64(ksZFSInfoValue.UintVal), ) } return nil } func (c *zfsCollector) updateZfsFetchStats(ch chan<- prometheus.Metric) error { tok, err := kstat.Open() if err != nil { return err } defer tok.Close() ksZFSInfo, err := tok.Lookup("zfs", 0, "zfetchstats") if err != nil { return err } for k, v := range map[string]*prometheus.Desc{ "hits": c.zfetchstatsHits, "misses": c.zfetchstatsMisses, } { ksZFSInfoValue, err := ksZFSInfo.GetNamed(k) if err != nil { return err } ch <- prometheus.MustNewConstMetric( v, prometheus.CounterValue, float64(ksZFSInfoValue.UintVal), ) } return nil } func (c *zfsCollector) Update(ch chan<- prometheus.Metric) error { if err := c.updateZfsAbdStats(ch); err != nil { return err } if err := c.updateZfsArcStats(ch); err != nil { return err } if err := c.updateZfsFetchStats(ch); err != nil { return err } return nil } node_exporter-1.7.0/collector/zoneinfo_linux.go000066400000000000000000000226121452426057600217750ustar00rootroot00000000000000// Copyright 2020 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collector import ( "fmt" "reflect" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/procfs" ) const zoneinfoSubsystem = "zoneinfo" type zoneinfoCollector struct { gaugeMetricDescs map[string]*prometheus.Desc counterMetricDescs map[string]*prometheus.Desc logger log.Logger fs procfs.FS } func init() { registerCollector("zoneinfo", defaultDisabled, NewZoneinfoCollector) } // NewZoneinfoCollector returns a new Collector exposing zone stats. func NewZoneinfoCollector(logger log.Logger) (Collector, error) { fs, err := procfs.NewFS(*procPath) if err != nil { return nil, fmt.Errorf("failed to open procfs: %w", err) } return &zoneinfoCollector{ gaugeMetricDescs: createGaugeMetricDescriptions(), counterMetricDescs: createCounterMetricDescriptions(), logger: logger, fs: fs, }, nil } func (c *zoneinfoCollector) Update(ch chan<- prometheus.Metric) error { metrics, err := c.fs.Zoneinfo() if err != nil { return fmt.Errorf("couldn't get zoneinfo: %w", err) } for _, metric := range metrics { node := metric.Node zone := metric.Zone metricStruct := reflect.ValueOf(metric) typeOfMetricStruct := metricStruct.Type() for i := 0; i < metricStruct.NumField(); i++ { value := reflect.Indirect(metricStruct.Field(i)) if value.Kind() != reflect.Int64 { continue } metricName := typeOfMetricStruct.Field(i).Name desc, ok := c.gaugeMetricDescs[metricName] metricType := prometheus.GaugeValue if !ok { desc = c.counterMetricDescs[metricName] metricType = prometheus.CounterValue } ch <- prometheus.MustNewConstMetric(desc, metricType, float64(reflect.Indirect(metricStruct.Field(i)).Int()), node, zone) } for i, value := range metric.Protection { metricName := fmt.Sprintf("protection_%d", i) desc, ok := c.gaugeMetricDescs[metricName] if !ok { desc = prometheus.NewDesc( prometheus.BuildFQName(namespace, zoneinfoSubsystem, metricName), fmt.Sprintf("Protection array %d. field", i), []string{"node", "zone"}, nil) c.gaugeMetricDescs[metricName] = desc } ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, float64(*value), node, zone) } } return nil } func createGaugeMetricDescriptions() map[string]*prometheus.Desc { return map[string]*prometheus.Desc{ "NrFreePages": prometheus.NewDesc( prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_free_pages"), "Total number of free pages in the zone", []string{"node", "zone"}, nil), "Min": prometheus.NewDesc( prometheus.BuildFQName(namespace, zoneinfoSubsystem, "min_pages"), "Zone watermark pages_min", []string{"node", "zone"}, nil), "Low": prometheus.NewDesc( prometheus.BuildFQName(namespace, zoneinfoSubsystem, "low_pages"), "Zone watermark pages_low", []string{"node", "zone"}, nil), "High": prometheus.NewDesc( prometheus.BuildFQName(namespace, zoneinfoSubsystem, "high_pages"), "Zone watermark pages_high", []string{"node", "zone"}, nil), "Scanned": prometheus.NewDesc( prometheus.BuildFQName(namespace, zoneinfoSubsystem, "scanned_pages"), "Pages scanned since last reclaim", []string{"node", "zone"}, nil), "Spanned": prometheus.NewDesc( prometheus.BuildFQName(namespace, zoneinfoSubsystem, "spanned_pages"), "Total pages spanned by the zone, including holes", []string{"node", "zone"}, nil), "Present": prometheus.NewDesc( prometheus.BuildFQName(namespace, zoneinfoSubsystem, "present_pages"), "Physical pages existing within the zone", []string{"node", "zone"}, nil), "Managed": prometheus.NewDesc( prometheus.BuildFQName(namespace, zoneinfoSubsystem, "managed_pages"), "Present pages managed by the buddy system", []string{"node", "zone"}, nil), "NrActiveAnon": prometheus.NewDesc( prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_active_anon_pages"), "Number of anonymous pages recently more used", []string{"node", "zone"}, nil), "NrInactiveAnon": prometheus.NewDesc( prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_inactive_anon_pages"), "Number of anonymous pages recently less used", []string{"node", "zone"}, nil), "NrIsolatedAnon": prometheus.NewDesc( prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_isolated_anon_pages"), "Temporary isolated pages from anon lru", []string{"node", "zone"}, nil), "NrAnonPages": prometheus.NewDesc( prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_anon_pages"), "Number of anonymous pages currently used by the system", []string{"node", "zone"}, nil), "NrAnonTransparentHugepages": prometheus.NewDesc( prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_anon_transparent_hugepages"), "Number of anonymous transparent huge pages currently used by the system", []string{"node", "zone"}, nil), "NrActiveFile": prometheus.NewDesc( prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_active_file_pages"), "Number of active pages with file-backing", []string{"node", "zone"}, nil), "NrInactiveFile": prometheus.NewDesc( prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_inactive_file_pages"), "Number of inactive pages with file-backing", []string{"node", "zone"}, nil), "NrIsolatedFile": prometheus.NewDesc( prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_isolated_file_pages"), "Temporary isolated pages from file lru", []string{"node", "zone"}, nil), "NrFilePages": prometheus.NewDesc( prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_file_pages"), "Number of file pages", []string{"node", "zone"}, nil), "NrSlabReclaimable": prometheus.NewDesc( prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_slab_reclaimable_pages"), "Number of reclaimable slab pages", []string{"node", "zone"}, nil), "NrSlabUnreclaimable": prometheus.NewDesc( prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_slab_unreclaimable_pages"), "Number of unreclaimable slab pages", []string{"node", "zone"}, nil), "NrMlockStack": prometheus.NewDesc( prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_mlock_stack_pages"), "mlock()ed pages found and moved off LRU", []string{"node", "zone"}, nil), "NrKernelStack": prometheus.NewDesc( prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_kernel_stacks"), "Number of kernel stacks", []string{"node", "zone"}, nil), "NrMapped": prometheus.NewDesc( prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_mapped_pages"), "Number of mapped pages", []string{"node", "zone"}, nil), "NrDirty": prometheus.NewDesc( prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_dirty_pages"), "Number of dirty pages", []string{"node", "zone"}, nil), "NrWriteback": prometheus.NewDesc( prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_writeback_pages"), "Number of writeback pages", []string{"node", "zone"}, nil), "NrUnevictable": prometheus.NewDesc( prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_unevictable_pages"), "Number of unevictable pages", []string{"node", "zone"}, nil), "NrShmem": prometheus.NewDesc( prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_shmem_pages"), "Number of shmem pages (included tmpfs/GEM pages)", []string{"node", "zone"}, nil), } } func createCounterMetricDescriptions() map[string]*prometheus.Desc { return map[string]*prometheus.Desc{ "NrDirtied": prometheus.NewDesc( prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_dirtied_total"), "Page dirtyings since bootup", []string{"node", "zone"}, nil), "NrWritten": prometheus.NewDesc( prometheus.BuildFQName(namespace, zoneinfoSubsystem, "nr_written_total"), "Page writings since bootup", []string{"node", "zone"}, nil), "NumaHit": prometheus.NewDesc( prometheus.BuildFQName(namespace, zoneinfoSubsystem, "numa_hit_total"), "Allocated in intended node", []string{"node", "zone"}, nil), "NumaMiss": prometheus.NewDesc( prometheus.BuildFQName(namespace, zoneinfoSubsystem, "numa_miss_total"), "Allocated in non intended node", []string{"node", "zone"}, nil), "NumaForeign": prometheus.NewDesc( prometheus.BuildFQName(namespace, zoneinfoSubsystem, "numa_foreign_total"), "Was intended here, hit elsewhere", []string{"node", "zone"}, nil), "NumaInterleave": prometheus.NewDesc( prometheus.BuildFQName(namespace, zoneinfoSubsystem, "numa_interleave_total"), "Interleaver preferred this zone", []string{"node", "zone"}, nil), "NumaLocal": prometheus.NewDesc( prometheus.BuildFQName(namespace, zoneinfoSubsystem, "numa_local_total"), "Allocation from local node", []string{"node", "zone"}, nil), "NumaOther": prometheus.NewDesc( prometheus.BuildFQName(namespace, zoneinfoSubsystem, "numa_other_total"), "Allocation from other node", []string{"node", "zone"}, nil), } } node_exporter-1.7.0/docs/000077500000000000000000000000001452426057600153375ustar00rootroot00000000000000node_exporter-1.7.0/docs/TIME.md000066400000000000000000000071411452426057600164220ustar00rootroot00000000000000# Monitoring time sync with node_exporter ## `ntp` collector NOTE: This collector is deprecated and will be removed in the next major version release. This collector is intended for usage with local NTP daemons including [ntp.org](http://ntp.org/), [chrony](https://chrony.tuxfamily.org/comparison.html), and [OpenNTPD](http://www.openntpd.org/). Note, some chrony packages have `local stratum 10` configuration value making chrony a valid server when it is unsynchronised. This configuration makes one of the heuristics that derive `node_ntp_sanity` unreliable. Note, OpenNTPD does not listen for SNTP queries by default. Add `listen on 127.0.0.1` to the OpenNTPD configuration when using this collector with that package. ### `node_ntp_stratum` This metric shows the [stratum](https://en.wikipedia.org/wiki/Network_Time_Protocol#Clock_strata) of the local NTP daemon. Stratum `16` means that clock are unsynchronised. See also aforementioned note about default local stratum in chrony. ### `node_ntp_leap` Raw leap flag value. 0 – OK, 1 – add leap second at UTC midnight, 2 – delete leap second at UTC midnight, 3 – unsynchronised. OpenNTPD ignores leap seconds and never sets leap flag to `1` or `2`. ### `node_ntp_rtt` RTT (round-trip time) from node_exporter collector to local NTPD. This value is used in sanity check as part of causality violation estimate. ### `node_ntp_offset` [Clock offset](https://en.wikipedia.org/wiki/Network_Time_Protocol#Clock_synchronization_algorithm) between local time and NTPD time. ntp.org always sets NTPD time to local clock instead of relaying remote NTP time, so this offset is irrelevant for this NTPD. This value is used in sanity check as part of causality violation estimate. ### `node_ntp_reference_timestamp_seconds` Reference Time. This field show time when the last adjustment was made, but implementation details vary from "**local** wall-clock time" to "Reference Time field in incoming SNTP packet". `time() - node_ntp_reference_timestamp_seconds` and `node_time_seconds - node_ntp_reference_timestamp_seconds` represent some estimate of "freshness" of synchronization. ### `node_ntp_root_delay` and `node_ntp_root_dispersion` These values are used to calculate synchronization distance that is limited by `collector.ntp.max-distance`. ntp.org adds known local offset to announced root dispersion and linearly increases dispersion in case of NTP connectivity problems, OpenNTPD does not account dispersion at all and always reports `0`. ### `node_ntp_sanity` Aggregate NTPD health including stratum, leap flag, sane freshness, root distance being less than `collector.ntp.max-distance` and causality violation being less than `collector.ntp.local-offset-tolerance`. Causality violation is lower bound estimate of clock error done using SNTP, it's calculated as positive portion of `abs(node_ntp_offset) - node_ntp_rtt / 2`. ## `timex` collector This collector exports state of kernel time synchronization flag that should be maintained by time-keeping daemon and is eventually raised by Linux kernel if time-keeping daemon does not update it regularly. Unfortunately some daemons do not handle this flag properly, e.g. chrony-1.30 from Debian/jessie clears `STA_UNSYNC` flag during daemon initialisation and does not indicate clock synchronization status using this flag. Modern chrony versions should work better. All chrony versions require `rtcsync` option to maintain this flag. OpenNTPD does not touch this flag at all till OpenNTPD-5.9p1. On the other hand combination of `sync_status` and `offset` exported by `timex` module is the way to monitor if systemd-timesyncd does its job. node_exporter-1.7.0/docs/V0_16_UPGRADE_GUIDE.md000066400000000000000000000023231452426057600205400ustar00rootroot00000000000000# Version 0.16.0 Upgrade Guide The `node_exporter` 0.16.0 and newer renamed many metrics in order to conform with Prometheus [naming best practices]. In order to allow easy upgrades, there are several options. ## Update dashboards Grafana users can add multiple queries in order to display both the old and new data simultaneously. ## Use recording rules We have provided a [sample recording rule set that translates old metrics to new ones] and the [one that translates new metrics format to old one] to create duplicate metrics (it translates "old" metrics format to new one). This has a minor disadvantage that it creates a lot of extra data, and re-aligns the timestamps of the data. ## Run both old and new versions simultaneously. It's possible to run both the old and new exporter on different ports, and include an additional scrape job in Prometheus. It's recommended to enable only the collectors that have name changes that you care about. [naming best practices]: https://prometheus.io/docs/practices/naming/ [sample recording rule set that translates old metrics to new ones]: example-16-compatibility-rules.yml [one that translates new metrics format to old one]: example-16-compatibility-rules-new-to-old.yml node_exporter-1.7.0/docs/example-16-compatibility-rules-new-to-old.yml000066400000000000000000000176671452426057600257440ustar00rootroot00000000000000groups: - name: node_exporter-16-bcache rules: - expr: node_bcache_cache_read_races record: node_bcache_cache_read_races_total - name: node_exporter-16-buddyinfo rules: - expr: node_buddyinfo_blocks record: node_buddyinfo_count - name: node_exporter-16-stat rules: - expr: node_boot_time_seconds record: node_boot_time - expr: node_time_seconds record: node_time - expr: node_context_switches_total record: node_context_switches - expr: node_forks_total record: node_forks - expr: node_intr_total record: node_intr - name: node_exporter-16-cpu rules: - expr: label_replace(node_cpu_seconds_total, "cpu", "cpu$1", "cpu", "(.+)") record: node_cpu - name: node_exporter-16-diskstats rules: - expr: node_disk_read_bytes_total record: node_disk_bytes_read - expr: node_disk_written_bytes_total record: node_disk_bytes_written - expr: node_disk_io_time_seconds_total * 1000 record: node_disk_io_time_ms - expr: node_disk_io_time_weighted_seconds_total record: node_disk_io_time_weighted - expr: node_disk_reads_completed_total record: node_disk_reads_completed - expr: node_disk_reads_merged_total record: node_disk_reads_merged - expr: node_disk_read_time_seconds_total * 1000 record: node_disk_read_time_ms - expr: node_disk_writes_completed_total record: node_disk_writes_completed - expr: node_disk_writes_merged_total record: node_disk_writes_merged - expr: node_disk_write_time_seconds_total * 1000 record: node_disk_write_time_ms - name: node_exporter-16-filesystem rules: - expr: node_filesystem_free_bytes record: node_filesystem_free - expr: node_filesystem_avail_bytes record: node_filesystem_avail - expr: node_filesystem_size_bytes record: node_filesystem_size - name: node_exporter-16-infiniband rules: - expr: node_infiniband_port_data_received_bytes_total record: node_infiniband_port_data_received_bytes - expr: node_infiniband_port_data_transmitted_bytes_total record: node_infiniband_port_data_transmitted_bytes - name: node_exporter-16-interrupts rules: - expr: node_interrupts_total record: node_interrupts - name: node_exporter-16-memory rules: - expr: node_memory_Active_bytes record: node_memory_Active - expr: node_memory_Active_anon_bytes record: node_memory_Active_anon - expr: node_memory_Active_file_bytes record: node_memory_Active_file - expr: node_memory_AnonHugePages_bytes record: node_memory_AnonHugePages - expr: node_memory_AnonPages_bytes record: node_memory_AnonPages - expr: node_memory_Bounce_bytes record: node_memory_Bounce - expr: node_memory_Buffers_bytes record: node_memory_Buffers - expr: node_memory_Cached_bytes record: node_memory_Cached - expr: node_memory_CommitLimit_bytes record: node_memory_CommitLimit - expr: node_memory_Committed_AS_bytes record: node_memory_Committed_AS - expr: node_memory_DirectMap2M_bytes record: node_memory_DirectMap2M - expr: node_memory_DirectMap4k_bytes record: node_memory_DirectMap4k - expr: node_memory_Dirty_bytes record: node_memory_Dirty - expr: node_memory_HardwareCorrupted_bytes record: node_memory_HardwareCorrupted - expr: node_memory_Hugepagesize_bytes record: node_memory_Hugepagesize - expr: node_memory_Inactive_bytes record: node_memory_Inactive - expr: node_memory_Inactive_anon_bytes record: node_memory_Inactive_anon - expr: node_memory_Inactive_file_bytes record: node_memory_Inactive_file - expr: node_memory_KernelStack_bytes record: node_memory_KernelStack - expr: node_memory_Mapped_bytes record: node_memory_Mapped - expr: node_memory_MemAvailable_bytes record: node_memory_MemAvailable - expr: node_memory_MemFree_bytes record: node_memory_MemFree - expr: node_memory_MemTotal_bytes record: node_memory_MemTotal - expr: node_memory_Mlocked_bytes record: node_memory_Mlocked - expr: node_memory_NFS_Unstable_bytes record: node_memory_NFS_Unstable - expr: node_memory_PageTables_bytes record: node_memory_PageTables - expr: node_memory_Shmem_bytes record: node_memory_Shmem - expr: node_memory_ShmemHugePages_bytes record: node_memory_ShmemHugePages - expr: node_memory_ShmemPmdMapped_bytes record: node_memory_ShmemPmdMapped - expr: node_memory_Slab_bytes record: node_memory_Slab - expr: node_memory_SReclaimable_bytes record: node_memory_SReclaimable - expr: node_memory_SUnreclaim_bytes record: node_memory_SUnreclaim - expr: node_memory_SwapCached_bytes record: node_memory_SwapCached - expr: node_memory_SwapFree_bytes record: node_memory_SwapFree - expr: node_memory_SwapTotal_bytes record: node_memory_SwapTotal - expr: node_memory_Unevictable_bytes record: node_memory_Unevictable - expr: node_memory_VmallocChunk_bytes record: node_memory_VmallocChunk - expr: node_memory_VmallocTotal_bytes record: node_memory_VmallocTotal - expr: node_memory_VmallocUsed_bytes record: node_memory_VmallocUsed - expr: node_memory_Writeback_bytes record: node_memory_Writeback - expr: node_memory_WritebackTmp_bytes record: node_memory_WritebackTmp - name: node_exporter-16-network rules: - expr: node_network_receive_bytes_total record: node_network_receive_bytes - expr: node_network_receive_compressed_total record: node_network_receive_compressed - expr: node_network_receive_drop_total record: node_network_receive_drop - expr: node_network_receive_errs_total record: node_network_receive_errs - expr: node_network_receive_fifo_total record: node_network_receive_fifo - expr: node_network_receive_frame_total record: node_network_receive_frame - expr: node_network_receive_multicast_total record: node_network_receive_multicast - expr: node_network_receive_packets_total record: node_network_receive_packets - expr: node_network_transmit_bytes_total record: node_network_transmit_bytes - expr: node_network_transmit_compressed_total record: node_network_transmit_compressed - expr: node_network_transmit_drop_total record: node_network_transmit_drop - expr: node_network_transmit_errs_total record: node_network_transmit_errs - expr: node_network_transmit_fifo_total record: node_network_transmit_fifo - expr: node_network_transmit_frame_total record: node_network_transmit_frame - expr: node_network_transmit_multicast_total record: node_network_transmit_multicast - expr: node_network_transmit_packets_total record: node_network_transmit_packets - name: node_exporter-16-nfs rules: - expr: node_nfs_connections_total record: node_nfs_net_connections - expr: node_nfs_packets_total record: node_nfs_net_reads - expr: label_replace(label_replace(node_nfs_requests_total, "proto", "$1", "version", "(.+)"), "method", "$1", "procedure", "(.+)") record: node_nfs_procedures - expr: node_nfs_rpc_authentication_refreshes_total record: node_nfs_rpc_authentication_refreshes - expr: node_nfs_rpcs_total record: node_nfs_rpc_operations - expr: node_nfs_rpc_retransmissions_total record: node_nfs_rpc_retransmissions - name: node_exporter-16-textfile rules: - expr: node_textfile_mtime_seconds record: node_textfile_mtime node_exporter-1.7.0/docs/example-16-compatibility-rules.yml000066400000000000000000000176671452426057600237610ustar00rootroot00000000000000groups: - name: node_exporter-16-bcache rules: - record: node_bcache_cache_read_races expr: node_bcache_cache_read_races_total - name: node_exporter-16-buddyinfo rules: - record: node_buddyinfo_blocks expr: node_buddyinfo_count - name: node_exporter-16-stat rules: - record: node_boot_time_seconds expr: node_boot_time - record: node_time_seconds expr: node_time - record: node_context_switches_total expr: node_context_switches - record: node_forks_total expr: node_forks - record: node_intr_total expr: node_intr - name: node_exporter-16-cpu rules: - record: node_cpu expr: label_replace(node_cpu_seconds_total, "cpu", "$1", "cpu", "cpu(.+)") - name: node_exporter-16-diskstats rules: - record: node_disk_read_bytes_total expr: node_disk_bytes_read - record: node_disk_written_bytes_total expr: node_disk_bytes_written - record: node_disk_io_time_seconds_total expr: node_disk_io_time_ms / 1000 - record: node_disk_io_time_weighted_seconds_total expr: node_disk_io_time_weighted - record: node_disk_reads_completed_total expr: node_disk_reads_completed - record: node_disk_reads_merged_total expr: node_disk_reads_merged - record: node_disk_read_time_seconds_total expr: node_disk_read_time_ms / 1000 - record: node_disk_writes_completed_total expr: node_disk_writes_completed - record: node_disk_writes_merged_total expr: node_disk_writes_merged - record: node_disk_write_time_seconds_total expr: node_disk_write_time_ms / 1000 - name: node_exporter-16-filesystem rules: - record: node_filesystem_free_bytes expr: node_filesystem_free - record: node_filesystem_avail_bytes expr: node_filesystem_avail - record: node_filesystem_size_bytes expr: node_filesystem_size - name: node_exporter-16-infiniband rules: - record: node_infiniband_port_data_received_bytes_total expr: node_infiniband_port_data_received_bytes - record: node_infiniband_port_data_transmitted_bytes_total expr: node_infiniband_port_data_transmitted_bytes - name: node_exporter-16-interrupts rules: - record: node_interrupts_total expr: node_interrupts - name: node_exporter-16-memory rules: - record: node_memory_Active_bytes expr: node_memory_Active - record: node_memory_Active_anon_bytes expr: node_memory_Active_anon - record: node_memory_Active_file_bytes expr: node_memory_Active_file - record: node_memory_AnonHugePages_bytes expr: node_memory_AnonHugePages - record: node_memory_AnonPages_bytes expr: node_memory_AnonPages - record: node_memory_Bounce_bytes expr: node_memory_Bounce - record: node_memory_Buffers_bytes expr: node_memory_Buffers - record: node_memory_Cached_bytes expr: node_memory_Cached - record: node_memory_CommitLimit_bytes expr: node_memory_CommitLimit - record: node_memory_Committed_AS_bytes expr: node_memory_Committed_AS - record: node_memory_DirectMap2M_bytes expr: node_memory_DirectMap2M - record: node_memory_DirectMap4k_bytes expr: node_memory_DirectMap4k - record: node_memory_Dirty_bytes expr: node_memory_Dirty - record: node_memory_HardwareCorrupted_bytes expr: node_memory_HardwareCorrupted - record: node_memory_Hugepagesize_bytes expr: node_memory_Hugepagesize - record: node_memory_Inactive_bytes expr: node_memory_Inactive - record: node_memory_Inactive_anon_bytes expr: node_memory_Inactive_anon - record: node_memory_Inactive_file_bytes expr: node_memory_Inactive_file - record: node_memory_KernelStack_bytes expr: node_memory_KernelStack - record: node_memory_Mapped_bytes expr: node_memory_Mapped - record: node_memory_MemAvailable_bytes expr: node_memory_MemAvailable - record: node_memory_MemFree_bytes expr: node_memory_MemFree - record: node_memory_MemTotal_bytes expr: node_memory_MemTotal - record: node_memory_Mlocked_bytes expr: node_memory_Mlocked - record: node_memory_NFS_Unstable_bytes expr: node_memory_NFS_Unstable - record: node_memory_PageTables_bytes expr: node_memory_PageTables - record: node_memory_Shmem_bytes expr: node_memory_Shmem - record: node_memory_ShmemHugePages_bytes expr: node_memory_ShmemHugePages - record: node_memory_ShmemPmdMapped_bytes expr: node_memory_ShmemPmdMapped - record: node_memory_Slab_bytes expr: node_memory_Slab - record: node_memory_SReclaimable_bytes expr: node_memory_SReclaimable - record: node_memory_SUnreclaim_bytes expr: node_memory_SUnreclaim - record: node_memory_SwapCached_bytes expr: node_memory_SwapCached - record: node_memory_SwapFree_bytes expr: node_memory_SwapFree - record: node_memory_SwapTotal_bytes expr: node_memory_SwapTotal - record: node_memory_Unevictable_bytes expr: node_memory_Unevictable - record: node_memory_VmallocChunk_bytes expr: node_memory_VmallocChunk - record: node_memory_VmallocTotal_bytes expr: node_memory_VmallocTotal - record: node_memory_VmallocUsed_bytes expr: node_memory_VmallocUsed - record: node_memory_Writeback_bytes expr: node_memory_Writeback - record: node_memory_WritebackTmp_bytes expr: node_memory_WritebackTmp - name: node_exporter-16-network rules: - record: node_network_receive_bytes_total expr: node_network_receive_bytes - record: node_network_receive_compressed_total expr: node_network_receive_compressed - record: node_network_receive_drop_total expr: node_network_receive_drop - record: node_network_receive_errs_total expr: node_network_receive_errs - record: node_network_receive_fifo_total expr: node_network_receive_fifo - record: node_network_receive_frame_total expr: node_network_receive_frame - record: node_network_receive_multicast_total expr: node_network_receive_multicast - record: node_network_receive_packets_total expr: node_network_receive_packets - record: node_network_transmit_bytes_total expr: node_network_transmit_bytes - record: node_network_transmit_compressed_total expr: node_network_transmit_compressed - record: node_network_transmit_drop_total expr: node_network_transmit_drop - record: node_network_transmit_errs_total expr: node_network_transmit_errs - record: node_network_transmit_fifo_total expr: node_network_transmit_fifo - record: node_network_transmit_frame_total expr: node_network_transmit_frame - record: node_network_transmit_multicast_total expr: node_network_transmit_multicast - record: node_network_transmit_packets_total expr: node_network_transmit_packets - name: node_exporter-16-nfs rules: - record: node_nfs_connections_total expr: node_nfs_net_connections - record: node_nfs_packets_total expr: node_nfs_net_reads - record: node_nfs_requests_total expr: label_replace(label_replace(node_nfs_procedures, "proto", "$1", "version", "(.+)"), "method", "$1", "procedure", "(.+)") - record: node_nfs_rpc_authentication_refreshes_total expr: node_nfs_rpc_authentication_refreshes - record: node_nfs_rpcs_total expr: node_nfs_rpc_operations - record: node_nfs_rpc_retransmissions_total expr: node_nfs_rpc_retransmissions - name: node_exporter-16-textfile rules: - record: node_textfile_mtime_seconds expr: node_textfile_mtime node_exporter-1.7.0/docs/example-17-compatibility-rules-new-to-old.yml000066400000000000000000000002351452426057600257240ustar00rootroot00000000000000groups: - name: node_exporter-17-supervisord rules: - record: node_supervisord_start_time_seconds expr: node_supervisord_uptime + time() node_exporter-1.7.0/docs/example-17-compatibility-rules.yml000066400000000000000000000002351452426057600237410ustar00rootroot00000000000000groups: - name: node_exporter-17-supervisord rules: - record: node_supervisord_uptime expr: time() - node_supervisord_start_time_seconds node_exporter-1.7.0/docs/node-mixin/000077500000000000000000000000001452426057600174065ustar00rootroot00000000000000node_exporter-1.7.0/docs/node-mixin/.gitignore000066400000000000000000000000631452426057600213750ustar00rootroot00000000000000jsonnetfile.lock.json vendor *.yaml dashboards_out node_exporter-1.7.0/docs/node-mixin/Makefile000066400000000000000000000017661452426057600210600ustar00rootroot00000000000000JSONNET_FMT := jsonnetfmt -n 2 --max-blank-lines 2 --string-style s --comment-style s all: fmt node_alerts.yaml node_rules.yaml dashboards_out lint fmt: find . -name 'vendor' -prune -o -name '*.libsonnet' -print -o -name '*.jsonnet' -print | \ xargs -n 1 -- $(JSONNET_FMT) -i node_alerts.yaml: mixin.libsonnet config.libsonnet $(wildcard alerts/*) jsonnet -S alerts.jsonnet > $@ node_rules.yaml: mixin.libsonnet config.libsonnet $(wildcard rules/*) jsonnet -S rules.jsonnet > $@ dashboards_out: mixin.libsonnet config.libsonnet $(wildcard dashboards/*) @mkdir -p dashboards_out jsonnet -J vendor -m dashboards_out dashboards.jsonnet lint: node_alerts.yaml node_rules.yaml find . -name 'vendor' -prune -o -name '*.libsonnet' -print -o -name '*.jsonnet' -print | \ while read f; do \ $(JSONNET_FMT) "$$f" | diff -u "$$f" -; \ done promtool check rules node_alerts.yaml node_rules.yaml .PHONY: jb_install jb_install: jb install clean: rm -rf dashboards_out node_alerts.yaml node_rules.yaml node_exporter-1.7.0/docs/node-mixin/README.md000066400000000000000000000025011452426057600206630ustar00rootroot00000000000000# Node Mixin _This is a work in progress. We aim for it to become a good role model for alerts and dashboards eventually, but it is not quite there yet._ The Node Mixin is a set of configurable, reusable, and extensible alerts and dashboards based on the metrics exported by the Node Exporter. The mixin creates recording and alerting rules for Prometheus and suitable dashboard descriptions for Grafana. To use them, you need to have `jsonnet` (v0.16+) and `jb` installed. If you have a working Go development environment, it's easiest to run the following: ```bash go install github.com/google/go-jsonnet/cmd/jsonnet@latest go install github.com/google/go-jsonnet/cmd/jsonnetfmt@latest go install github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb@latest ``` Next, install the dependencies by running the following command in this directory: ```bash jb install ``` You can then build the Prometheus rules files `node_alerts.yaml` and `node_rules.yaml`: ```bash make node_alerts.yaml node_rules.yaml ``` You can also build a directory `dashboard_out` with the JSON dashboard files for Grafana: ```bash make dashboards_out ``` Note that some of the generated dashboards require recording rules specified in the previously generated `node_rules.yaml`. For more advanced uses of mixins, see . node_exporter-1.7.0/docs/node-mixin/alerts.jsonnet000066400000000000000000000001011452426057600222720ustar00rootroot00000000000000std.manifestYamlDoc((import 'mixin.libsonnet').prometheusAlerts) node_exporter-1.7.0/docs/node-mixin/alerts/000077500000000000000000000000001452426057600207005ustar00rootroot00000000000000node_exporter-1.7.0/docs/node-mixin/alerts/alerts.libsonnet000066400000000000000000000466701452426057600241260ustar00rootroot00000000000000{ prometheusAlerts+:: { groups+: [ { name: 'node-exporter', rules: [ { alert: 'NodeFilesystemSpaceFillingUp', expr: ||| ( node_filesystem_avail_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} / node_filesystem_size_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} * 100 < %(fsSpaceFillingUpWarningThreshold)d and predict_linear(node_filesystem_avail_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s}[6h], 24*60*60) < 0 and node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} == 0 ) ||| % $._config, 'for': '1h', labels: { severity: 'warning', }, annotations: { summary: 'Filesystem is predicted to run out of space within the next 24 hours.', description: 'Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left and is filling up.', }, }, { alert: 'NodeFilesystemSpaceFillingUp', expr: ||| ( node_filesystem_avail_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} / node_filesystem_size_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} * 100 < %(fsSpaceFillingUpCriticalThreshold)d and predict_linear(node_filesystem_avail_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s}[6h], 4*60*60) < 0 and node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} == 0 ) ||| % $._config, 'for': '1h', labels: { severity: '%(nodeCriticalSeverity)s' % $._config, }, annotations: { summary: 'Filesystem is predicted to run out of space within the next 4 hours.', description: 'Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left and is filling up fast.', }, }, { alert: 'NodeFilesystemAlmostOutOfSpace', expr: ||| ( node_filesystem_avail_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} / node_filesystem_size_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} * 100 < %(fsSpaceAvailableWarningThreshold)d and node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} == 0 ) ||| % $._config, 'for': '30m', labels: { severity: 'warning', }, annotations: { summary: 'Filesystem has less than %(fsSpaceAvailableWarningThreshold)d%% space left.' % $._config, description: 'Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left.', }, }, { alert: 'NodeFilesystemAlmostOutOfSpace', expr: ||| ( node_filesystem_avail_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} / node_filesystem_size_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} * 100 < %(fsSpaceAvailableCriticalThreshold)d and node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} == 0 ) ||| % $._config, 'for': '30m', labels: { severity: '%(nodeCriticalSeverity)s' % $._config, }, annotations: { summary: 'Filesystem has less than %(fsSpaceAvailableCriticalThreshold)d%% space left.' % $._config, description: 'Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left.', }, }, { alert: 'NodeFilesystemFilesFillingUp', expr: ||| ( node_filesystem_files_free{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} / node_filesystem_files{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} * 100 < 40 and predict_linear(node_filesystem_files_free{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s}[6h], 24*60*60) < 0 and node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} == 0 ) ||| % $._config, 'for': '1h', labels: { severity: 'warning', }, annotations: { summary: 'Filesystem is predicted to run out of inodes within the next 24 hours.', description: 'Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left and is filling up.', }, }, { alert: 'NodeFilesystemFilesFillingUp', expr: ||| ( node_filesystem_files_free{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} / node_filesystem_files{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} * 100 < 20 and predict_linear(node_filesystem_files_free{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s}[6h], 4*60*60) < 0 and node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} == 0 ) ||| % $._config, 'for': '1h', labels: { severity: '%(nodeCriticalSeverity)s' % $._config, }, annotations: { summary: 'Filesystem is predicted to run out of inodes within the next 4 hours.', description: 'Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left and is filling up fast.', }, }, { alert: 'NodeFilesystemAlmostOutOfFiles', expr: ||| ( node_filesystem_files_free{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} / node_filesystem_files{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} * 100 < 5 and node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} == 0 ) ||| % $._config, 'for': '1h', labels: { severity: 'warning', }, annotations: { summary: 'Filesystem has less than 5% inodes left.', description: 'Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left.', }, }, { alert: 'NodeFilesystemAlmostOutOfFiles', expr: ||| ( node_filesystem_files_free{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} / node_filesystem_files{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} * 100 < 3 and node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} == 0 ) ||| % $._config, 'for': '1h', labels: { severity: '%(nodeCriticalSeverity)s' % $._config, }, annotations: { summary: 'Filesystem has less than 3% inodes left.', description: 'Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left.', }, }, { alert: 'NodeNetworkReceiveErrs', expr: ||| rate(node_network_receive_errs_total{%(nodeExporterSelector)s}[2m]) / rate(node_network_receive_packets_total{%(nodeExporterSelector)s}[2m]) > 0.01 ||| % $._config, 'for': '1h', labels: { severity: 'warning', }, annotations: { summary: 'Network interface is reporting many receive errors.', description: '{{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf "%.0f" $value }} receive errors in the last two minutes.', }, }, { alert: 'NodeNetworkTransmitErrs', expr: ||| rate(node_network_transmit_errs_total{%(nodeExporterSelector)s}[2m]) / rate(node_network_transmit_packets_total{%(nodeExporterSelector)s}[2m]) > 0.01 ||| % $._config, 'for': '1h', labels: { severity: 'warning', }, annotations: { summary: 'Network interface is reporting many transmit errors.', description: '{{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf "%.0f" $value }} transmit errors in the last two minutes.', }, }, { alert: 'NodeHighNumberConntrackEntriesUsed', expr: ||| (node_nf_conntrack_entries{%(nodeExporterSelector)s} / node_nf_conntrack_entries_limit) > 0.75 ||| % $._config, annotations: { summary: 'Number of conntrack are getting close to the limit.', description: '{{ $value | humanizePercentage }} of conntrack entries are used.', }, labels: { severity: 'warning', }, }, { alert: 'NodeTextFileCollectorScrapeError', expr: ||| node_textfile_scrape_error{%(nodeExporterSelector)s} == 1 ||| % $._config, annotations: { summary: 'Node Exporter text file collector failed to scrape.', description: 'Node Exporter text file collector on {{ $labels.instance }} failed to scrape.', }, labels: { severity: 'warning', }, }, { alert: 'NodeClockSkewDetected', expr: ||| ( node_timex_offset_seconds{%(nodeExporterSelector)s} > 0.05 and deriv(node_timex_offset_seconds{%(nodeExporterSelector)s}[5m]) >= 0 ) or ( node_timex_offset_seconds{%(nodeExporterSelector)s} < -0.05 and deriv(node_timex_offset_seconds{%(nodeExporterSelector)s}[5m]) <= 0 ) ||| % $._config, 'for': '10m', labels: { severity: 'warning', }, annotations: { summary: 'Clock skew detected.', description: 'Clock at {{ $labels.instance }} is out of sync by more than 0.05s. Ensure NTP is configured correctly on this host.', }, }, { alert: 'NodeClockNotSynchronising', expr: ||| min_over_time(node_timex_sync_status{%(nodeExporterSelector)s}[5m]) == 0 and node_timex_maxerror_seconds{%(nodeExporterSelector)s} >= 16 ||| % $._config, 'for': '10m', labels: { severity: 'warning', }, annotations: { summary: 'Clock not synchronising.', description: 'Clock at {{ $labels.instance }} is not synchronising. Ensure NTP is configured on this host.', }, }, { alert: 'NodeRAIDDegraded', expr: ||| node_md_disks_required{%(nodeExporterSelector)s,%(diskDeviceSelector)s} - ignoring (state) (node_md_disks{state="active",%(nodeExporterSelector)s,%(diskDeviceSelector)s}) > 0 ||| % $._config, 'for': '15m', labels: { severity: 'critical', }, annotations: { summary: 'RAID Array is degraded.', description: "RAID array '{{ $labels.device }}' at {{ $labels.instance }} is in degraded state due to one or more disks failures. Number of spare drives is insufficient to fix issue automatically.", }, }, { alert: 'NodeRAIDDiskFailure', expr: ||| node_md_disks{state="failed",%(nodeExporterSelector)s,%(diskDeviceSelector)s} > 0 ||| % $._config, labels: { severity: 'warning', }, annotations: { summary: 'Failed device in RAID array.', description: "At least one device in RAID array at {{ $labels.instance }} failed. Array '{{ $labels.device }}' needs attention and possibly a disk swap.", }, }, { alert: 'NodeFileDescriptorLimit', expr: ||| ( node_filefd_allocated{%(nodeExporterSelector)s} * 100 / node_filefd_maximum{%(nodeExporterSelector)s} > 70 ) ||| % $._config, 'for': '15m', labels: { severity: 'warning', }, annotations: { summary: 'Kernel is predicted to exhaust file descriptors limit soon.', description: 'File descriptors limit at {{ $labels.instance }} is currently at {{ printf "%.2f" $value }}%.', }, }, { alert: 'NodeFileDescriptorLimit', expr: ||| ( node_filefd_allocated{%(nodeExporterSelector)s} * 100 / node_filefd_maximum{%(nodeExporterSelector)s} > 90 ) ||| % $._config, 'for': '15m', labels: { severity: 'critical', }, annotations: { summary: 'Kernel is predicted to exhaust file descriptors limit soon.', description: 'File descriptors limit at {{ $labels.instance }} is currently at {{ printf "%.2f" $value }}%.', }, }, { alert: 'NodeCPUHighUsage', expr: ||| sum without(mode) (avg without (cpu) (rate(node_cpu_seconds_total{%(nodeExporterSelector)s, mode!="idle"}[2m]))) * 100 > %(cpuHighUsageThreshold)d ||| % $._config, 'for': '15m', labels: { severity: 'info', }, annotations: { summary: 'High CPU usage.', description: ||| CPU usage at {{ $labels.instance }} has been above %(cpuHighUsageThreshold)d%% for the last 15 minutes, is currently at {{ printf "%%.2f" $value }}%%. ||| % $._config, }, }, { alert: 'NodeSystemSaturation', expr: ||| node_load1{%(nodeExporterSelector)s} / count without (cpu, mode) (node_cpu_seconds_total{%(nodeExporterSelector)s, mode="idle"}) > %(systemSaturationPerCoreThreshold)d ||| % $._config, 'for': '15m', labels: { severity: 'warning', }, annotations: { summary: 'System saturated, load per core is very high.', description: ||| System load per core at {{ $labels.instance }} has been above %(systemSaturationPerCoreThreshold)d for the last 15 minutes, is currently at {{ printf "%%.2f" $value }}. This might indicate this instance resources saturation and can cause it becoming unresponsive. ||| % $._config, }, }, { alert: 'NodeMemoryMajorPagesFaults', expr: ||| rate(node_vmstat_pgmajfault{%(nodeExporterSelector)s}[5m]) > %(memoryMajorPagesFaultsThreshold)d ||| % $._config, 'for': '15m', labels: { severity: 'warning', }, annotations: { summary: 'Memory major page faults are occurring at very high rate.', description: ||| Memory major pages are occurring at very high rate at {{ $labels.instance }}, %(memoryMajorPagesFaultsThreshold)d major page faults per second for the last 15 minutes, is currently at {{ printf "%%.2f" $value }}. Please check that there is enough memory available at this instance. ||| % $._config, }, }, { alert: 'NodeMemoryHighUtilization', expr: ||| 100 - (node_memory_MemAvailable_bytes{%(nodeExporterSelector)s} / node_memory_MemTotal_bytes{%(nodeExporterSelector)s} * 100) > %(memoryHighUtilizationThreshold)d ||| % $._config, 'for': '15m', labels: { severity: 'warning', }, annotations: { summary: 'Host is running out of memory.', description: ||| Memory is filling up at {{ $labels.instance }}, has been above %(memoryHighUtilizationThreshold)d%% for the last 15 minutes, is currently at {{ printf "%%.2f" $value }}%%. ||| % $._config, }, }, { alert: 'NodeDiskIOSaturation', expr: ||| rate(node_disk_io_time_weighted_seconds_total{%(nodeExporterSelector)s, %(diskDeviceSelector)s}[5m]) > %(diskIOSaturationThreshold)d ||| % $._config, 'for': '30m', labels: { severity: 'warning', }, annotations: { summary: 'Disk IO queue is high.', description: ||| Disk IO queue (aqu-sq) is high on {{ $labels.device }} at {{ $labels.instance }}, has been above %(diskIOSaturationThreshold)d for the last 15 minutes, is currently at {{ printf "%%.2f" $value }}. This symptom might indicate disk saturation. ||| % $._config, }, }, { alert: 'NodeSystemdServiceFailed', expr: ||| node_systemd_unit_state{%(nodeExporterSelector)s, state="failed"} == 1 ||| % $._config, 'for': '5m', labels: { severity: 'warning', }, annotations: { summary: 'Systemd service has entered failed state.', description: 'Systemd service {{ $labels.name }} has entered failed state at {{ $labels.instance }}', }, }, { alert: 'NodeBondingDegraded', expr: ||| (node_bonding_slaves - node_bonding_active) != 0 ||| % $._config, 'for': '5m', labels: { severity: 'warning', }, annotations: { summary: 'Bonding interface is degraded', description: 'Bonding interface {{ $labels.master }} on {{ $labels.instance }} is in degraded state due to one or more slave failures.', }, }, ], }, ], }, } node_exporter-1.7.0/docs/node-mixin/config.libsonnet000066400000000000000000000077771452426057600226140ustar00rootroot00000000000000{ _config+:: { // Selectors are inserted between {} in Prometheus queries. // Select the metrics coming from the node exporter. Note that all // the selected metrics are shown stacked on top of each other in // the 'USE Method / Cluster' dashboard. Consider disabling that // dashboard if mixing up all those metrics in the same dashboard // doesn't make sense (e.g. because they are coming from different // clusters). nodeExporterSelector: 'job="node"', // Select the fstype for filesystem-related queries. If left // empty, all filesystems are selected. If you have unusual // filesystem you don't want to include in dashboards and // alerting, you can exclude them here, e.g. 'fstype!="tmpfs"'. fsSelector: 'fstype!=""', // Select the mountpoint for filesystem-related queries. If left // empty, all mountpoints are selected. For example if you have a // special purpose tmpfs instance that has a fixed size and will // always be 100% full, but you still want alerts and dashboards for // other tmpfs instances, you can exclude those by mountpoint prefix // like so: 'mountpoint!~"/var/lib/foo.*"'. fsMountpointSelector: 'mountpoint!=""', // Select the device for disk-related queries. If left empty, all // devices are selected. If you have unusual devices you don't // want to include in dashboards and alerting, you can exclude // them here, e.g. 'device!="tmpfs"'. diskDeviceSelector: 'device!=""', // Some of the alerts are meant to fire if a critical failure of a // node is imminent (e.g. the disk is about to run full). In a // true “cloud native” setup, failures of a single node should be // tolerated. Hence, even imminent failure of a single node is no // reason to create a paging alert. However, in practice there are // still many situations where operators like to get paged in time // before a node runs out of disk space. nodeCriticalSeverity can // be set to the desired severity for this kind of alerts. This // can even be templated to depend on labels of the node, e.g. you // could make this critical for traditional database masters but // just a warning for K8s nodes. nodeCriticalSeverity: 'critical', // CPU utilization (%) on which to trigger the // 'NodeCPUHighUsage' alert. cpuHighUsageThreshold: 90, // Load average 1m (per core) on which to trigger the // 'NodeSystemSaturation' alert. systemSaturationPerCoreThreshold: 2, // Available disk space (%) thresholds on which to trigger the // 'NodeFilesystemSpaceFillingUp' alerts. These alerts fire if the disk // usage grows in a way that it is predicted to run out in 4h or 1d // and if the provided thresholds have been reached right now. // In some cases you'll want to adjust these, e.g. by default Kubernetes // runs the image garbage collection when the disk usage reaches 85% // of its available space. In that case, you'll want to reduce the // critical threshold below to something like 14 or 15, otherwise // the alert could fire under normal node usage. fsSpaceFillingUpWarningThreshold: 40, fsSpaceFillingUpCriticalThreshold: 20, // Available disk space (%) thresholds on which to trigger the // 'NodeFilesystemAlmostOutOfSpace' alerts. fsSpaceAvailableWarningThreshold: 5, fsSpaceAvailableCriticalThreshold: 3, // Memory utilzation (%) level on which to trigger the // 'NodeMemoryHighUtilization' alert. memoryHighUtilizationThreshold: 90, // Threshold for the rate of memory major page faults to trigger // 'NodeMemoryMajorPagesFaults' alert. memoryMajorPagesFaultsThreshold: 500, // Disk IO queue level above which to trigger // 'NodeDiskIOSaturation' alert. diskIOSaturationThreshold: 10, rateInterval: '5m', // Opt-in for multi-cluster support. showMultiCluster: false, clusterLabel: 'cluster', dashboardNamePrefix: 'Node Exporter / ', dashboardTags: ['node-exporter-mixin'], }, } node_exporter-1.7.0/docs/node-mixin/dashboards.jsonnet000066400000000000000000000002141452426057600231170ustar00rootroot00000000000000local dashboards = (import 'mixin.libsonnet').grafanaDashboards; { [name]: dashboards[name] for name in std.objectFields(dashboards) } node_exporter-1.7.0/docs/node-mixin/dashboards/000077500000000000000000000000001452426057600215205ustar00rootroot00000000000000node_exporter-1.7.0/docs/node-mixin/dashboards/dashboards.libsonnet000066400000000000000000000000651452426057600255520ustar00rootroot00000000000000(import 'node.libsonnet') + (import 'use.libsonnet') node_exporter-1.7.0/docs/node-mixin/dashboards/node.libsonnet000066400000000000000000000004021452426057600243600ustar00rootroot00000000000000{ local nodemixin = import '../lib/prom-mixin.libsonnet', grafanaDashboards+:: { 'nodes.json': nodemixin.new(config=$._config, platform='Linux').dashboard, 'nodes-darwin.json': nodemixin.new(config=$._config, platform='Darwin').dashboard, }, } node_exporter-1.7.0/docs/node-mixin/dashboards/use.libsonnet000066400000000000000000000605361452426057600242450ustar00rootroot00000000000000local grafana = import 'github.com/grafana/grafonnet-lib/grafonnet/grafana.libsonnet'; local dashboard = grafana.dashboard; local row = grafana.row; local prometheus = grafana.prometheus; local template = grafana.template; local graphPanel = grafana.graphPanel; local c = import '../config.libsonnet'; local datasourceTemplate = { current: { text: 'default', value: 'default', }, hide: 0, label: 'Data Source', name: 'datasource', options: [], query: 'prometheus', refresh: 1, regex: '', type: 'datasource', }; local CPUUtilisation = graphPanel.new( 'CPU Utilisation', datasource='$datasource', span=6, format='percentunit', stack=true, fill=10, legend_show=false, ) { tooltip+: { sort: 2 } }; local CPUSaturation = // TODO: Is this a useful panel? At least there should be some explanation how load // average relates to the "CPU saturation" in the title. graphPanel.new( 'CPU Saturation (Load1 per CPU)', datasource='$datasource', span=6, format='percentunit', stack=true, fill=10, legend_show=false, ) { tooltip+: { sort: 2 } }; local memoryUtilisation = graphPanel.new( 'Memory Utilisation', datasource='$datasource', span=6, format='percentunit', stack=true, fill=10, legend_show=false, ) { tooltip+: { sort: 2 } }; local memorySaturation = graphPanel.new( 'Memory Saturation (Major Page Faults)', datasource='$datasource', span=6, format='rds', stack=true, fill=10, legend_show=false, ) { tooltip+: { sort: 2 } }; local networkUtilisation = graphPanel.new( 'Network Utilisation (Bytes Receive/Transmit)', datasource='$datasource', span=6, format='Bps', stack=true, fill=10, legend_show=false, ) .addSeriesOverride({ alias: '/Receive/', stack: 'A' }) .addSeriesOverride({ alias: '/Transmit/', stack: 'B', transform: 'negative-Y' }) { tooltip+: { sort: 2 } }; local networkSaturation = graphPanel.new( 'Network Saturation (Drops Receive/Transmit)', datasource='$datasource', span=6, format='Bps', stack=true, fill=10, legend_show=false, ) .addSeriesOverride({ alias: '/ Receive/', stack: 'A' }) .addSeriesOverride({ alias: '/ Transmit/', stack: 'B', transform: 'negative-Y' }) { tooltip+: { sort: 2 } }; local diskIOUtilisation = graphPanel.new( 'Disk IO Utilisation', datasource='$datasource', span=6, format='percentunit', stack=true, fill=10, legend_show=false, ) { tooltip+: { sort: 2 } }; local diskIOSaturation = graphPanel.new( 'Disk IO Saturation', datasource='$datasource', span=6, format='percentunit', stack=true, fill=10, legend_show=false, ) { tooltip+: { sort: 2 } }; local diskSpaceUtilisation = graphPanel.new( 'Disk Space Utilisation', datasource='$datasource', span=12, format='percentunit', stack=true, fill=10, legend_show=false, ) { tooltip+: { sort: 2 } }; { _clusterTemplate:: template.new( name='cluster', datasource='$datasource', query='label_values(node_time_seconds, %s)' % $._config.clusterLabel, current='', hide=if $._config.showMultiCluster then '' else '2', refresh=2, includeAll=false, sort=1 ), grafanaDashboards+:: { 'node-rsrc-use.json': dashboard.new( '%sUSE Method / Node' % $._config.dashboardNamePrefix, time_from='now-1h', tags=($._config.dashboardTags), timezone='utc', refresh='30s', graphTooltip='shared_crosshair' ) .addTemplate(datasourceTemplate) .addTemplate($._clusterTemplate) .addTemplate( template.new( 'instance', '$datasource', 'label_values(node_exporter_build_info{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}, instance)' % $._config, refresh='time', sort=1 ) ) .addRow( row.new('CPU') .addPanel(CPUUtilisation.addTarget(prometheus.target('instance:node_cpu_utilisation:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='Utilisation'))) .addPanel(CPUSaturation.addTarget(prometheus.target('instance:node_load1_per_cpu:ratio{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='Saturation'))) ) .addRow( row.new('Memory') .addPanel(memoryUtilisation.addTarget(prometheus.target('instance:node_memory_utilisation:ratio{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='Utilisation'))) .addPanel(memorySaturation.addTarget(prometheus.target('instance:node_vmstat_pgmajfault:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='Major page Faults'))) ) .addRow( row.new('Network') .addPanel( networkUtilisation .addTarget(prometheus.target('instance:node_network_receive_bytes_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='Receive')) .addTarget(prometheus.target('instance:node_network_transmit_bytes_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='Transmit')) ) .addPanel( networkSaturation .addTarget(prometheus.target('instance:node_network_receive_drop_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='Receive')) .addTarget(prometheus.target('instance:node_network_transmit_drop_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='Transmit')) ) ) .addRow( row.new('Disk IO') .addPanel(diskIOUtilisation.addTarget(prometheus.target('instance_device:node_disk_io_time_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='{{device}}'))) .addPanel(diskIOSaturation.addTarget(prometheus.target('instance_device:node_disk_io_time_weighted_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='{{device}}'))) ) .addRow( row.new('Disk Space') .addPanel( diskSpaceUtilisation.addTarget(prometheus.target( ||| sort_desc(1 - ( max without (mountpoint, fstype) (node_filesystem_avail_bytes{%(nodeExporterSelector)s, fstype!="", instance="$instance", %(clusterLabel)s="$cluster"}) / max without (mountpoint, fstype) (node_filesystem_size_bytes{%(nodeExporterSelector)s, fstype!="", instance="$instance", %(clusterLabel)s="$cluster"}) ) != 0 ) ||| % $._config, legendFormat='{{device}}' )) ) ), 'node-cluster-rsrc-use.json': dashboard.new( '%sUSE Method / Cluster' % $._config.dashboardNamePrefix, time_from='now-1h', tags=($._config.dashboardTags), timezone='utc', refresh='30s', graphTooltip='shared_crosshair' ) .addTemplate(datasourceTemplate) .addTemplate($._clusterTemplate) .addRow( row.new('CPU') .addPanel( CPUUtilisation .addTarget(prometheus.target( ||| (( instance:node_cpu_utilisation:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} * instance:node_num_cpu:sum{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} ) != 0 ) / scalar(sum(instance:node_num_cpu:sum{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"})) ||| % $._config, legendFormat='{{ instance }}' )) ) .addPanel( CPUSaturation .addTarget(prometheus.target( ||| ( instance:node_load1_per_cpu:ratio{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} / scalar(count(instance:node_load1_per_cpu:ratio{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"})) ) != 0 ||| % $._config, legendFormat='{{instance}}' )) ) ) .addRow( row.new('Memory') .addPanel( memoryUtilisation .addTarget(prometheus.target( ||| ( instance:node_memory_utilisation:ratio{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} / scalar(count(instance:node_memory_utilisation:ratio{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"})) ) != 0 ||| % $._config, legendFormat='{{instance}}', )) ) .addPanel(memorySaturation.addTarget(prometheus.target('instance:node_vmstat_pgmajfault:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}' % $._config, legendFormat='{{instance}}'))) ) .addRow( row.new('Network') .addPanel( networkUtilisation .addTarget(prometheus.target('instance:node_network_receive_bytes_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='{{instance}} Receive')) .addTarget(prometheus.target('instance:node_network_transmit_bytes_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='{{instance}} Transmit')) ) .addPanel( networkSaturation .addTarget(prometheus.target('instance:node_network_receive_drop_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='{{instance}} Receive')) .addTarget(prometheus.target('instance:node_network_transmit_drop_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='{{instance}} Transmit')) ) ) .addRow( row.new('Disk IO') .addPanel( diskIOUtilisation .addTarget(prometheus.target( ||| ( instance_device:node_disk_io_time_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} / scalar(count(instance_device:node_disk_io_time_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"})) ) != 0 ||| % $._config, legendFormat='{{instance}} {{device}}' )) ) .addPanel( diskIOSaturation .addTarget(prometheus.target( ||| ( instance_device:node_disk_io_time_weighted_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} / scalar(count(instance_device:node_disk_io_time_weighted_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"})) ) != 0 ||| % $._config, legendFormat='{{instance}} {{device}}' )) ) ) .addRow( row.new('Disk Space') .addPanel( diskSpaceUtilisation .addTarget(prometheus.target( ||| sum without (device) ( max without (fstype, mountpoint) (( node_filesystem_size_bytes{%(nodeExporterSelector)s, %(fsSelector)s, %(fsMountpointSelector)s, %(clusterLabel)s="$cluster"} - node_filesystem_avail_bytes{%(nodeExporterSelector)s, %(fsSelector)s, %(fsMountpointSelector)s, %(clusterLabel)s="$cluster"} ) != 0) ) / scalar(sum(max without (fstype, mountpoint) (node_filesystem_size_bytes{%(nodeExporterSelector)s, %(fsSelector)s, %(fsMountpointSelector)s, %(clusterLabel)s="$cluster"}))) ||| % $._config, legendFormat='{{instance}}' )) ) ), } + if $._config.showMultiCluster then { 'node-multicluster-rsrc-use.json': dashboard.new( '%sUSE Method / Multi-cluster' % $._config.dashboardNamePrefix, time_from='now-1h', tags=($._config.dashboardTags), timezone='utc', refresh='30s', graphTooltip='shared_crosshair' ) .addTemplate(datasourceTemplate) .addRow( row.new('CPU') .addPanel( CPUUtilisation .addTarget(prometheus.target( ||| sum( (( instance:node_cpu_utilisation:rate%(rateInterval)s{%(nodeExporterSelector)s} * instance:node_num_cpu:sum{%(nodeExporterSelector)s} ) != 0) / scalar(sum(instance:node_num_cpu:sum{%(nodeExporterSelector)s})) ) by (%(clusterLabel)s) ||| % $._config, legendFormat='{{%(clusterLabel)s}}' % $._config )) ) .addPanel( CPUSaturation .addTarget(prometheus.target( ||| sum(( instance:node_load1_per_cpu:ratio{%(nodeExporterSelector)s} / scalar(count(instance:node_load1_per_cpu:ratio{%(nodeExporterSelector)s})) ) != 0) by (%(clusterLabel)s) ||| % $._config, legendFormat='{{%(clusterLabel)s}}' % $._config )) ) ) .addRow( row.new('Memory') .addPanel( memoryUtilisation .addTarget(prometheus.target( ||| sum(( instance:node_memory_utilisation:ratio{%(nodeExporterSelector)s} / scalar(count(instance:node_memory_utilisation:ratio{%(nodeExporterSelector)s})) ) != 0) by (%(clusterLabel)s) ||| % $._config, legendFormat='{{%(clusterLabel)s}}' % $._config )) ) .addPanel( memorySaturation .addTarget(prometheus.target( ||| sum(( instance:node_vmstat_pgmajfault:rate%(rateInterval)s{%(nodeExporterSelector)s} ) != 0) by (%(clusterLabel)s) ||| % $._config, legendFormat='{{%(clusterLabel)s}}' % $._config )) ) ) .addRow( row.new('Network') .addPanel( networkUtilisation .addTarget(prometheus.target( ||| sum(( instance:node_network_receive_bytes_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s} ) != 0) by (%(clusterLabel)s) ||| % $._config, legendFormat='{{%(clusterLabel)s}} Receive' % $._config )) .addTarget(prometheus.target( ||| sum(( instance:node_network_transmit_bytes_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s} ) != 0) by (%(clusterLabel)s) ||| % $._config, legendFormat='{{%(clusterLabel)s}} Transmit' % $._config )) ) .addPanel( networkSaturation .addTarget(prometheus.target( ||| sum(( instance:node_network_receive_drop_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s} ) != 0) by (%(clusterLabel)s) ||| % $._config, legendFormat='{{%(clusterLabel)s}} Receive' % $._config )) .addTarget(prometheus.target( ||| sum(( instance:node_network_transmit_drop_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s} ) != 0) by (%(clusterLabel)s) ||| % $._config, legendFormat='{{%(clusterLabel)s}} Transmit' % $._config )) ) ) .addRow( row.new('Disk IO') .addPanel( diskIOUtilisation .addTarget(prometheus.target( ||| sum(( instance_device:node_disk_io_time_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s} / scalar(count(instance_device:node_disk_io_time_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s})) ) != 0) by (%(clusterLabel)s, device) ||| % $._config, legendFormat='{{%(clusterLabel)s}} {{device}}' % $._config )) ) .addPanel( diskIOSaturation .addTarget(prometheus.target( ||| sum(( instance_device:node_disk_io_time_weighted_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s} / scalar(count(instance_device:node_disk_io_time_weighted_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s})) ) != 0) by (%(clusterLabel)s, device) ||| % $._config, legendFormat='{{%(clusterLabel)s}} {{device}}' % $._config )) ) ) .addRow( row.new('Disk Space') .addPanel( diskSpaceUtilisation .addTarget(prometheus.target( ||| sum ( sum without (device) ( max without (fstype, mountpoint, instance, pod) (( node_filesystem_size_bytes{%(nodeExporterSelector)s, %(fsSelector)s, %(fsMountpointSelector)s} - node_filesystem_avail_bytes{%(nodeExporterSelector)s, %(fsSelector)s, %(fsMountpointSelector)s} ) != 0) ) / scalar(sum(max without (fstype, mountpoint) (node_filesystem_size_bytes{%(nodeExporterSelector)s, %(fsSelector)s, %(fsMountpointSelector)s}))) ) by (%(clusterLabel)s) ||| % $._config, legendFormat='{{%(clusterLabel)s}}' % $._config )) ) ), } else {}, } node_exporter-1.7.0/docs/node-mixin/jsonnetfile.json000066400000000000000000000007131452426057600226220ustar00rootroot00000000000000{ "version": 1, "dependencies": [ { "source": { "git": { "remote": "https://github.com/grafana/grafonnet-lib.git", "subdir": "grafonnet" } }, "version": "master" }, { "source": { "git": { "remote": "https://github.com/grafana/grafonnet-lib.git", "subdir": "grafonnet-7.0" } }, "version": "master" } ], "legacyImports": false } node_exporter-1.7.0/docs/node-mixin/lib/000077500000000000000000000000001452426057600201545ustar00rootroot00000000000000node_exporter-1.7.0/docs/node-mixin/lib/prom-mixin.libsonnet000066400000000000000000000355751452426057600242110ustar00rootroot00000000000000local grafana = import 'github.com/grafana/grafonnet-lib/grafonnet/grafana.libsonnet'; local dashboard = grafana.dashboard; local row = grafana.row; local prometheus = grafana.prometheus; local template = grafana.template; local graphPanel = grafana.graphPanel; local grafana70 = import 'github.com/grafana/grafonnet-lib/grafonnet-7.0/grafana.libsonnet'; local gaugePanel = grafana70.panel.gauge; local table = grafana70.panel.table; { new(config=null, platform=null):: { local prometheusDatasourceTemplate = { current: { text: 'default', value: 'default', }, hide: 0, label: 'Data Source', name: 'datasource', options: [], query: 'prometheus', refresh: 1, regex: '', type: 'datasource', }, local instanceTemplatePrototype = template.new( 'instance', '$datasource', '', refresh='time', label='Instance', ), local instanceTemplate = if platform == 'Darwin' then instanceTemplatePrototype { query: 'label_values(node_uname_info{%(nodeExporterSelector)s, sysname="Darwin"}, instance)' % config } else instanceTemplatePrototype { query: 'label_values(node_uname_info{%(nodeExporterSelector)s, sysname!="Darwin"}, instance)' % config }, local idleCPU = graphPanel.new( 'CPU Usage', datasource='$datasource', span=6, format='percentunit', max=1, min=0, stack=true, ) .addTarget(prometheus.target( ||| ( (1 - sum without (mode) (rate(node_cpu_seconds_total{%(nodeExporterSelector)s, mode=~"idle|iowait|steal", instance="$instance"}[$__rate_interval]))) / ignoring(cpu) group_left count without (cpu, mode) (node_cpu_seconds_total{%(nodeExporterSelector)s, mode="idle", instance="$instance"}) ) ||| % config, legendFormat='{{cpu}}', intervalFactor=5, )), local systemLoad = graphPanel.new( 'Load Average', datasource='$datasource', span=6, format='short', min=0, fill=0, ) .addTarget(prometheus.target('node_load1{%(nodeExporterSelector)s, instance="$instance"}' % config, legendFormat='1m load average')) .addTarget(prometheus.target('node_load5{%(nodeExporterSelector)s, instance="$instance"}' % config, legendFormat='5m load average')) .addTarget(prometheus.target('node_load15{%(nodeExporterSelector)s, instance="$instance"}' % config, legendFormat='15m load average')) .addTarget(prometheus.target('count(node_cpu_seconds_total{%(nodeExporterSelector)s, instance="$instance", mode="idle"})' % config, legendFormat='logical cores')), local memoryGraphPanelPrototype = graphPanel.new( 'Memory Usage', datasource='$datasource', span=9, format='bytes', min=0, ), local memoryGraph = if platform == 'Linux' then memoryGraphPanelPrototype { stack: true } .addTarget(prometheus.target( ||| ( node_memory_MemTotal_bytes{%(nodeExporterSelector)s, instance="$instance"} - node_memory_MemFree_bytes{%(nodeExporterSelector)s, instance="$instance"} - node_memory_Buffers_bytes{%(nodeExporterSelector)s, instance="$instance"} - node_memory_Cached_bytes{%(nodeExporterSelector)s, instance="$instance"} ) ||| % config, legendFormat='memory used' )) .addTarget(prometheus.target('node_memory_Buffers_bytes{%(nodeExporterSelector)s, instance="$instance"}' % config, legendFormat='memory buffers')) .addTarget(prometheus.target('node_memory_Cached_bytes{%(nodeExporterSelector)s, instance="$instance"}' % config, legendFormat='memory cached')) .addTarget(prometheus.target('node_memory_MemFree_bytes{%(nodeExporterSelector)s, instance="$instance"}' % config, legendFormat='memory free')) else if platform == 'Darwin' then // not useful to stack memoryGraphPanelPrototype { stack: false } .addTarget(prometheus.target('node_memory_total_bytes{%(nodeExporterSelector)s, instance="$instance"}' % config, legendFormat='Physical Memory')) .addTarget(prometheus.target( ||| ( node_memory_internal_bytes{%(nodeExporterSelector)s, instance="$instance"} - node_memory_purgeable_bytes{%(nodeExporterSelector)s, instance="$instance"} + node_memory_wired_bytes{%(nodeExporterSelector)s, instance="$instance"} + node_memory_compressed_bytes{%(nodeExporterSelector)s, instance="$instance"} ) ||| % config, legendFormat='Memory Used' )) .addTarget(prometheus.target( ||| ( node_memory_internal_bytes{%(nodeExporterSelector)s, instance="$instance"} - node_memory_purgeable_bytes{%(nodeExporterSelector)s, instance="$instance"} ) ||| % config, legendFormat='App Memory' )) .addTarget(prometheus.target('node_memory_wired_bytes{%(nodeExporterSelector)s, instance="$instance"}' % config, legendFormat='Wired Memory')) .addTarget(prometheus.target('node_memory_compressed_bytes{%(nodeExporterSelector)s, instance="$instance"}' % config, legendFormat='Compressed')), // NOTE: avg() is used to circumvent a label change caused by a node_exporter rollout. local memoryGaugePanelPrototype = gaugePanel.new( title='Memory Usage', datasource='$datasource', ) .addThresholdStep('rgba(50, 172, 45, 0.97)') .addThresholdStep('rgba(237, 129, 40, 0.89)', 80) .addThresholdStep('rgba(245, 54, 54, 0.9)', 90) .setFieldConfig(max=100, min=0, unit='percent') + { span: 3, }, local memoryGauge = if platform == 'Linux' then memoryGaugePanelPrototype .addTarget(prometheus.target( ||| 100 - ( avg(node_memory_MemAvailable_bytes{%(nodeExporterSelector)s, instance="$instance"}) / avg(node_memory_MemTotal_bytes{%(nodeExporterSelector)s, instance="$instance"}) * 100 ) ||| % config, )) else if platform == 'Darwin' then memoryGaugePanelPrototype .addTarget(prometheus.target( ||| ( ( avg(node_memory_internal_bytes{%(nodeExporterSelector)s, instance="$instance"}) - avg(node_memory_purgeable_bytes{%(nodeExporterSelector)s, instance="$instance"}) + avg(node_memory_wired_bytes{%(nodeExporterSelector)s, instance="$instance"}) + avg(node_memory_compressed_bytes{%(nodeExporterSelector)s, instance="$instance"}) ) / avg(node_memory_total_bytes{%(nodeExporterSelector)s, instance="$instance"}) ) * 100 ||| % config )), local diskIO = graphPanel.new( 'Disk I/O', datasource='$datasource', span=6, min=0, fill=0, ) // TODO: Does it make sense to have those three in the same panel? .addTarget(prometheus.target( 'rate(node_disk_read_bytes_total{%(nodeExporterSelector)s, instance="$instance", %(diskDeviceSelector)s}[$__rate_interval])' % config, legendFormat='{{device}} read', intervalFactor=1, )) .addTarget(prometheus.target( 'rate(node_disk_written_bytes_total{%(nodeExporterSelector)s, instance="$instance", %(diskDeviceSelector)s}[$__rate_interval])' % config, legendFormat='{{device}} written', intervalFactor=1, )) .addTarget(prometheus.target( 'rate(node_disk_io_time_seconds_total{%(nodeExporterSelector)s, instance="$instance", %(diskDeviceSelector)s}[$__rate_interval])' % config, legendFormat='{{device}} io time', intervalFactor=1, )) + { seriesOverrides: [ { alias: '/ read| written/', yaxis: 1, }, { alias: '/ io time/', yaxis: 2, }, ], yaxes: [ self.yaxe(format='Bps'), self.yaxe(format='percentunit'), ], }, local diskSpaceUsage = table.new( title='Disk Space Usage', datasource='$datasource', ) .setFieldConfig(unit='decbytes') .addThresholdStep(color='green', value=null) .addThresholdStep(color='yellow', value=0.8) .addThresholdStep(color='red', value=0.9) .addTarget(prometheus.target( ||| max by (mountpoint) (node_filesystem_size_bytes{%(nodeExporterSelector)s, instance="$instance", %(fsSelector)s, %(fsMountpointSelector)s}) ||| % config, legendFormat='', instant=true, format='table' )) .addTarget(prometheus.target( ||| max by (mountpoint) (node_filesystem_avail_bytes{%(nodeExporterSelector)s, instance="$instance", %(fsSelector)s, %(fsMountpointSelector)s}) ||| % config, legendFormat='', instant=true, format='table' )) .addOverride( matcher={ id: 'byName', options: 'Mounted on', }, properties=[ { id: 'custom.width', value: 260, }, ], ) .addOverride( matcher={ id: 'byName', options: 'Size', }, properties=[ { id: 'custom.width', value: 93, }, ], ) .addOverride( matcher={ id: 'byName', options: 'Used', }, properties=[ { id: 'custom.width', value: 72, }, ], ) .addOverride( matcher={ id: 'byName', options: 'Available', }, properties=[ { id: 'custom.width', value: 88, }, ], ) .addOverride( matcher={ id: 'byName', options: 'Used, %', }, properties=[ { id: 'unit', value: 'percentunit', }, { id: 'custom.displayMode', value: 'gradient-gauge', }, { id: 'max', value: 1, }, { id: 'min', value: 0, }, ] ) + { span: 6 } + { transformations: [ { id: 'groupBy', options: { fields: { 'Value #A': { aggregations: [ 'lastNotNull', ], operation: 'aggregate', }, 'Value #B': { aggregations: [ 'lastNotNull', ], operation: 'aggregate', }, mountpoint: { aggregations: [], operation: 'groupby', }, }, }, }, { id: 'merge', options: {}, }, { id: 'calculateField', options: { alias: 'Used', binary: { left: 'Value #A (lastNotNull)', operator: '-', reducer: 'sum', right: 'Value #B (lastNotNull)', }, mode: 'binary', reduce: { reducer: 'sum', }, }, }, { id: 'calculateField', options: { alias: 'Used, %', binary: { left: 'Used', operator: '/', reducer: 'sum', right: 'Value #A (lastNotNull)', }, mode: 'binary', reduce: { reducer: 'sum', }, }, }, { id: 'organize', options: { excludeByName: {}, indexByName: {}, renameByName: { 'Value #A (lastNotNull)': 'Size', 'Value #B (lastNotNull)': 'Available', mountpoint: 'Mounted on', }, }, }, { id: 'sortBy', options: { fields: {}, sort: [ { field: 'Mounted on', }, ], }, }, ], }, local networkReceived = graphPanel.new( 'Network Received', description='Network received (bits/s)', datasource='$datasource', span=6, format='bps', min=0, fill=0, ) .addTarget(prometheus.target( 'rate(node_network_receive_bytes_total{%(nodeExporterSelector)s, instance="$instance", device!="lo"}[$__rate_interval]) * 8' % config, legendFormat='{{device}}', intervalFactor=1, )), local networkTransmitted = graphPanel.new( 'Network Transmitted', description='Network transmitted (bits/s)', datasource='$datasource', span=6, format='bps', min=0, fill=0, ) .addTarget(prometheus.target( 'rate(node_network_transmit_bytes_total{%(nodeExporterSelector)s, instance="$instance", device!="lo"}[$__rate_interval]) * 8' % config, legendFormat='{{device}}', intervalFactor=1, )), local cpuRow = row.new('CPU') .addPanel(idleCPU) .addPanel(systemLoad), local memoryRow = row.new('Memory') .addPanel(memoryGraph) .addPanel(memoryGauge), local diskRow = row.new('Disk') .addPanel(diskIO) .addPanel(diskSpaceUsage), local networkRow = row.new('Network') .addPanel(networkReceived) .addPanel(networkTransmitted), local rows = [ cpuRow, memoryRow, diskRow, networkRow, ], local templates = [ prometheusDatasourceTemplate, instanceTemplate, ], dashboard: if platform == 'Linux' then dashboard.new( '%sNodes' % config.dashboardNamePrefix, time_from='now-1h', tags=(config.dashboardTags), timezone='utc', refresh='30s', graphTooltip='shared_crosshair' ) .addTemplates(templates) .addRows(rows) else if platform == 'Darwin' then dashboard.new( '%sMacOS' % config.dashboardNamePrefix, time_from='now-1h', tags=(config.dashboardTags), timezone='utc', refresh='30s', graphTooltip='shared_crosshair' ) .addTemplates(templates) .addRows(rows), }, } node_exporter-1.7.0/docs/node-mixin/mixin.libsonnet000066400000000000000000000002211452426057600224440ustar00rootroot00000000000000(import 'config.libsonnet') + (import 'alerts/alerts.libsonnet') + (import 'dashboards/dashboards.libsonnet') + (import 'rules/rules.libsonnet') node_exporter-1.7.0/docs/node-mixin/rules.jsonnet000066400000000000000000000001001452426057600221310ustar00rootroot00000000000000std.manifestYamlDoc((import 'mixin.libsonnet').prometheusRules) node_exporter-1.7.0/docs/node-mixin/rules/000077500000000000000000000000001452426057600205405ustar00rootroot00000000000000node_exporter-1.7.0/docs/node-mixin/rules/rules.libsonnet000066400000000000000000000113021452426057600236060ustar00rootroot00000000000000{ prometheusRules+:: { groups+: [ { name: 'node-exporter.rules', rules: [ { // This rule gives the number of CPUs per node. record: 'instance:node_num_cpu:sum', expr: ||| count without (cpu, mode) ( node_cpu_seconds_total{%(nodeExporterSelector)s,mode="idle"} ) ||| % $._config, }, { // CPU utilisation is % CPU without {idle,iowait,steal}. record: 'instance:node_cpu_utilisation:rate%(rateInterval)s' % $._config, expr: ||| 1 - avg without (cpu) ( sum without (mode) (rate(node_cpu_seconds_total{%(nodeExporterSelector)s, mode=~"idle|iowait|steal"}[%(rateInterval)s])) ) ||| % $._config, }, { // This is CPU saturation: 1min avg run queue length / number of CPUs. // Can go over 1. // TODO: There are situation where a run queue >1/core is just normal and fine. // We need to clarify how to read this metric and if its usage is helpful at all. record: 'instance:node_load1_per_cpu:ratio', expr: ||| ( node_load1{%(nodeExporterSelector)s} / instance:node_num_cpu:sum{%(nodeExporterSelector)s} ) ||| % $._config, }, { // Memory utilisation (ratio of used memory per instance). record: 'instance:node_memory_utilisation:ratio', expr: ||| 1 - ( ( node_memory_MemAvailable_bytes{%(nodeExporterSelector)s} or ( node_memory_Buffers_bytes{%(nodeExporterSelector)s} + node_memory_Cached_bytes{%(nodeExporterSelector)s} + node_memory_MemFree_bytes{%(nodeExporterSelector)s} + node_memory_Slab_bytes{%(nodeExporterSelector)s} ) ) / node_memory_MemTotal_bytes{%(nodeExporterSelector)s} ) ||| % $._config, }, { record: 'instance:node_vmstat_pgmajfault:rate%(rateInterval)s' % $._config, expr: ||| rate(node_vmstat_pgmajfault{%(nodeExporterSelector)s}[%(rateInterval)s]) ||| % $._config, }, { // Disk utilisation (seconds spent, 1 second rate). record: 'instance_device:node_disk_io_time_seconds:rate%(rateInterval)s' % $._config, expr: ||| rate(node_disk_io_time_seconds_total{%(nodeExporterSelector)s, %(diskDeviceSelector)s}[%(rateInterval)s]) ||| % $._config, }, { // Disk saturation (weighted seconds spent, 1 second rate). record: 'instance_device:node_disk_io_time_weighted_seconds:rate%(rateInterval)s' % $._config, expr: ||| rate(node_disk_io_time_weighted_seconds_total{%(nodeExporterSelector)s, %(diskDeviceSelector)s}[%(rateInterval)s]) ||| % $._config, }, { record: 'instance:node_network_receive_bytes_excluding_lo:rate%(rateInterval)s' % $._config, expr: ||| sum without (device) ( rate(node_network_receive_bytes_total{%(nodeExporterSelector)s, device!="lo"}[%(rateInterval)s]) ) ||| % $._config, }, { record: 'instance:node_network_transmit_bytes_excluding_lo:rate%(rateInterval)s' % $._config, expr: ||| sum without (device) ( rate(node_network_transmit_bytes_total{%(nodeExporterSelector)s, device!="lo"}[%(rateInterval)s]) ) ||| % $._config, }, // TODO: Find out if those drops ever happen on modern switched networks. { record: 'instance:node_network_receive_drop_excluding_lo:rate%(rateInterval)s' % $._config, expr: ||| sum without (device) ( rate(node_network_receive_drop_total{%(nodeExporterSelector)s, device!="lo"}[%(rateInterval)s]) ) ||| % $._config, }, { record: 'instance:node_network_transmit_drop_excluding_lo:rate%(rateInterval)s' % $._config, expr: ||| sum without (device) ( rate(node_network_transmit_drop_total{%(nodeExporterSelector)s, device!="lo"}[%(rateInterval)s]) ) ||| % $._config, }, ], }, ], }, } node_exporter-1.7.0/end-to-end-test.sh000077500000000000000000000103651452426057600176620ustar00rootroot00000000000000#!/usr/bin/env bash set -euf -o pipefail enabled_collectors=$(cat << COLLECTORS arp bcache bonding btrfs buddyinfo cgroups conntrack cpu cpufreq cpu_vulnerabilities diskstats dmi drbd edac entropy fibrechannel filefd hwmon infiniband interrupts ipvs ksmd lnstat loadavg mdadm meminfo meminfo_numa mountstats netdev netstat nfs nfsd pressure processes qdisc rapl schedstat selinux slabinfo sockstat softirqs stat sysctl textfile thermal_zone udp_queues vmstat wifi xfs zfs zoneinfo COLLECTORS ) disabled_collectors=$(cat << COLLECTORS filesystem timex uname COLLECTORS ) cd "$(dirname $0)" port="$((10000 + (RANDOM % 10000)))" tmpdir=$(mktemp -d /tmp/node_exporter_e2e_test.XXXXXX) skip_re="^(go_|node_exporter_build_info|node_scrape_collector_duration_seconds|process_|node_textfile_mtime_seconds|node_time_(zone|seconds)|node_network_(receive|transmit)_(bytes|packets)_total)" arch="$(uname -m)" case "${arch}" in aarch64|ppc64le) fixture='collector/fixtures/e2e-64k-page-output.txt' ;; *) fixture='collector/fixtures/e2e-output.txt' ;; esac # Only test CPU info collection on x86_64. case "${arch}" in x86_64) cpu_info_collector='--collector.cpu.info' cpu_info_bugs='^(cpu_meltdown|spectre_.*|mds)$' cpu_info_flags='^(aes|avx.?|constant_tsc)$' ;; *) cpu_info_collector='--no-collector.cpu.info' cpu_info_bugs='' cpu_info_flags='' ;; esac keep=0; update=0; verbose=0 while getopts 'hkuv' opt do case "$opt" in k) keep=1 ;; u) update=1 ;; v) verbose=1 set -x ;; *) echo "Usage: $0 [-k] [-u] [-v]" echo " -k: keep temporary files and leave node_exporter running" echo " -u: update fixture" echo " -v: verbose output" exit 1 ;; esac done if [ ! -x ./node_exporter ] then echo './node_exporter not found. Consider running `go build` first.' >&2 exit 1 fi ./node_exporter \ --path.rootfs="collector/fixtures" \ --path.procfs="collector/fixtures/proc" \ --path.sysfs="collector/fixtures/sys" \ --path.udev.data="collector/fixtures/udev/data" \ $(for c in ${enabled_collectors}; do echo --collector.${c} ; done) \ $(for c in ${disabled_collectors}; do echo --no-collector.${c} ; done) \ --collector.textfile.directory="collector/fixtures/textfile/two_metric_files/" \ --collector.wifi.fixtures="collector/fixtures/wifi" \ --collector.qdisc.fixtures="collector/fixtures/qdisc/" \ --collector.qdisc.device-include="(wlan0|eth0)" \ --collector.arp.device-exclude="nope" \ --no-collector.arp.netlink \ --collector.hwmon.chip-include="(applesmc|coretemp|hwmon4|nct6779)" \ --collector.netclass.ignored-devices="(dmz|int)" \ --collector.netclass.ignore-invalid-speed \ --collector.netdev.device-include="lo" \ --collector.bcache.priorityStats \ "${cpu_info_collector}" \ --collector.cpu.info.bugs-include="${cpu_info_bugs}" \ --collector.cpu.info.flags-include="${cpu_info_flags}" \ --collector.stat.softirq \ --collector.sysctl.include="kernel.threads-max" \ --collector.sysctl.include="fs.file-nr" \ --collector.sysctl.include="fs.file-nr:total,current,max" \ --collector.sysctl.include-info="kernel.seccomp.actions_avail" \ --web.listen-address "127.0.0.1:${port}" \ --log.level="debug" > "${tmpdir}/node_exporter.log" 2>&1 & echo $! > "${tmpdir}/node_exporter.pid" finish() { if [ $? -ne 0 -o ${verbose} -ne 0 ] then cat << EOF >&2 LOG ===================== $(cat "${tmpdir}/node_exporter.log") ========================= EOF fi if [ ${update} -ne 0 ] then cp "${tmpdir}/e2e-output.txt" "${fixture}" fi if [ ${keep} -eq 0 ] then kill -9 "$(cat ${tmpdir}/node_exporter.pid)" # This silences the "Killed" message set +e wait "$(cat ${tmpdir}/node_exporter.pid)" > /dev/null 2>&1 rm -rf "${tmpdir}" fi } trap finish EXIT get() { if command -v curl > /dev/null 2>&1 then curl -s -f "$@" elif command -v wget > /dev/null 2>&1 then wget -O - "$@" else echo "Neither curl nor wget found" exit 1 fi } sleep 1 get "127.0.0.1:${port}/metrics" | grep -E -v "${skip_re}" > "${tmpdir}/e2e-output.txt" diff -u \ "${fixture}" \ "${tmpdir}/e2e-output.txt" node_exporter-1.7.0/example-rules.yml000066400000000000000000000014051452426057600177150ustar00rootroot00000000000000groups: - name: example-node-exporter-rules rules: # The count of CPUs per node, useful for getting CPU time as a percent of total. - record: instance:node_cpus:count expr: count(node_cpu_seconds_total{mode="idle"}) without (cpu,mode) # CPU in use by CPU. - record: instance_cpu:node_cpu_seconds_not_idle:rate5m expr: sum(rate(node_cpu_seconds_total{mode!="idle"}[5m])) without (mode) # CPU in use by mode. - record: instance_mode:node_cpu_seconds:rate5m expr: sum(rate(node_cpu_seconds_total[5m])) without (cpu) # CPU in use ratio. - record: instance:node_cpu_utilization:ratio expr: sum(instance_mode:node_cpu_seconds:rate5m{mode!="idle"}) without (mode) / instance:node_cpus:count node_exporter-1.7.0/examples/000077500000000000000000000000001452426057600162255ustar00rootroot00000000000000node_exporter-1.7.0/examples/init.d/000077500000000000000000000000001452426057600174125ustar00rootroot00000000000000node_exporter-1.7.0/examples/init.d/node_exporter000077500000000000000000000017301452426057600222160ustar00rootroot00000000000000#!/bin/bash RETVAL=0 PROG="node_exporter" EXEC="/etc/node_exporter/node_exporter" LOCKFILE="/var/lock/subsys/$PROG" OPTIONS="--web.listen-address=:9100" # Source function library. if [ -f /etc/rc.d/init.d/functions ]; then . /etc/rc.d/init.d/functions else echo "/etc/rc.d/init.d/functions does not exist" exit 0 fi start() { if [ -f $LOCKFILE ] then echo "$PROG is already running!" else echo -n "Starting $PROG: " nohup $EXEC $OPTIONS >/dev/null 2>&1 & RETVAL=$? [ $RETVAL -eq 0 ] && touch $LOCKFILE && success || failure echo return $RETVAL fi } stop() { echo -n "Stopping $PROG: " killproc $EXEC RETVAL=$? [ $RETVAL -eq 0 ] && rm -r $LOCKFILE && success || failure echo } restart () { stop sleep 1 start } case "$1" in start) start ;; stop) stop ;; status) status $PROG ;; restart) restart ;; *) echo "Usage: $0 {start|stop|restart|status}" exit 1 esac exit $RETVAL node_exporter-1.7.0/examples/launchctl/000077500000000000000000000000001452426057600202025ustar00rootroot00000000000000node_exporter-1.7.0/examples/launchctl/README.md000066400000000000000000000015651452426057600214700ustar00rootroot00000000000000# MacOS LaunchDaemon If you're installing through a package manager, you probably don't need to deal with this file. The `plist` file should be put in `/Library/LaunchDaemons/` (user defined daemons), and the binary installed at `/usr/local/bin/node_exporter`. Ex. install globally by sudo cp -n node_exporter /usr/local/bin/ sudo cp -n examples/launchctl/io.prometheus.node_exporter.plist /Library/LaunchDaemons/ sudo launchctl bootstrap system/ /Library/LaunchDaemons/io.prometheus.node_exporter.plist # Optionally configure by dropping CLI arguments in a file echo -- '--web.listen-address=:9101' | sudo tee /usr/local/etc/node_exporter.args # Check it's running sudo launchctl list | grep node_exporter # See full process state sudo launchctl print system/io.prometheus.node_exporter # View logs sudo tail /tmp/node_exporter.log node_exporter-1.7.0/examples/launchctl/io.prometheus.node_exporter.plist000066400000000000000000000020051452426057600267310ustar00rootroot00000000000000 Label io.prometheus.node_exporter ProgramArguments sh -c /usr/local/bin/node_exporter $(< /usr/local/etc/node_exporter.args) UserName nobody GroupName nobody RunAtLoad KeepAlive WorkingDirectory /usr/local StandardErrorPath /tmp/node_exporter.log StandardOutPath /tmp/node_exporter.log HardResourceLimits NumberOfFiles 4096 SoftResourceLimits NumberOfFiles 4096 node_exporter-1.7.0/examples/openbsd-rc.d/000077500000000000000000000000001452426057600205035ustar00rootroot00000000000000node_exporter-1.7.0/examples/openbsd-rc.d/node_exporter000077500000000000000000000001721452426057600233060ustar00rootroot00000000000000#!/bin/ksh # Shawn Craver, 2019-04-02 daemon="/usr/local/bin/node_exporter" . /etc/rc.d/rc.subr rc_bg=YES rc_cmd $1 node_exporter-1.7.0/examples/openwrt-init.d/000077500000000000000000000000001452426057600211065ustar00rootroot00000000000000node_exporter-1.7.0/examples/openwrt-init.d/node_exporter000077500000000000000000000003411452426057600237070ustar00rootroot00000000000000#!/bin/sh /etc/rc.common START=99 USE_PROCD=1 PROG="/usr/bin/node_exporter" OPTIONS="--web.listen-address=:9100" start_service() { procd_open_instance procd_set_param command "$PROG" "${OPTIONS}" procd_close_instance } node_exporter-1.7.0/examples/systemd/000077500000000000000000000000001452426057600177155ustar00rootroot00000000000000node_exporter-1.7.0/examples/systemd/README.md000066400000000000000000000011171452426057600211740ustar00rootroot00000000000000# Systemd Unit If you are using distribution packages or the copr repository, you don't need to deal with these files! The unit files (`*.service` and `*.socket`) in this directory are to be put into `/etc/systemd/system`. It needs a user named `node_exporter`, whose shell should be `/sbin/nologin` and should not have any special privileges. It needs a sysconfig file in `/etc/sysconfig/node_exporter`. It needs a directory named `/var/lib/node_exporter/textfile_collector`, whose owner should be `node_exporter`:`node_exporter`. A sample file can be found in `sysconfig.node_exporter`. node_exporter-1.7.0/examples/systemd/node_exporter.service000066400000000000000000000003601452426057600241530ustar00rootroot00000000000000[Unit] Description=Node Exporter Requires=node_exporter.socket [Service] User=node_exporter EnvironmentFile=/etc/sysconfig/node_exporter ExecStart=/usr/sbin/node_exporter --web.systemd-socket $OPTIONS [Install] WantedBy=multi-user.target node_exporter-1.7.0/examples/systemd/node_exporter.socket000066400000000000000000000001401452426057600237770ustar00rootroot00000000000000[Unit] Description=Node Exporter [Socket] ListenStream=9100 [Install] WantedBy=sockets.target node_exporter-1.7.0/examples/systemd/sysconfig.node_exporter000066400000000000000000000001231452426057600245140ustar00rootroot00000000000000OPTIONS="--collector.textfile.directory /var/lib/node_exporter/textfile_collector" node_exporter-1.7.0/go.mod000066400000000000000000000046341452426057600155240ustar00rootroot00000000000000module github.com/prometheus/node_exporter go 1.20 require ( github.com/alecthomas/kingpin/v2 v2.3.2 github.com/beevik/ntp v1.3.0 github.com/coreos/go-systemd/v22 v22.5.0 github.com/dennwc/btrfs v0.0.0-20230312211831-a1f570bd01a1 github.com/ema/qdisc v1.0.0 github.com/go-kit/log v0.2.1 github.com/godbus/dbus/v5 v5.1.0 github.com/hashicorp/go-envparse v0.1.0 github.com/hodgesds/perf-utils v0.7.0 github.com/illumos/go-kstat v0.0.0-20210513183136-173c9b0a9973 github.com/josharian/native v1.1.0 github.com/jsimonetti/rtnetlink v1.3.5 github.com/lufia/iostat v1.2.1 github.com/mattn/go-xmlrpc v0.0.3 github.com/mdlayher/ethtool v0.1.0 github.com/mdlayher/netlink v1.7.2 github.com/mdlayher/wifi v0.1.0 github.com/opencontainers/selinux v1.11.0 github.com/prometheus-community/go-runit v0.1.0 github.com/prometheus/client_golang v1.17.0 github.com/prometheus/client_model v0.5.0 github.com/prometheus/common v0.45.0 github.com/prometheus/exporter-toolkit v0.10.0 github.com/prometheus/procfs v0.12.0 github.com/safchain/ethtool v0.3.0 golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 golang.org/x/sys v0.13.0 howett.net/plist v1.0.0 ) require ( github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dennwc/ioctl v1.0.0 // indirect github.com/go-logfmt/logfmt v0.5.1 // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/go-cmp v0.5.9 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/kr/text v0.2.0 // indirect github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect github.com/mdlayher/genetlink v1.3.2 // indirect github.com/mdlayher/socket v0.4.1 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect github.com/siebenmann/go-kstat v0.0.0-20210513183136-173c9b0a9973 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.6.0 // indirect golang.org/x/crypto v0.14.0 // indirect golang.org/x/net v0.17.0 // indirect golang.org/x/oauth2 v0.12.0 // indirect golang.org/x/sync v0.3.0 // indirect golang.org/x/text v0.13.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect ) node_exporter-1.7.0/go.sum000066400000000000000000000401231452426057600155420ustar00rootroot00000000000000github.com/alecthomas/kingpin/v2 v2.3.2 h1:H0aULhgmSzN8xQ3nX1uxtdlTHYoPLu5AhHxWrKI6ocU= github.com/alecthomas/kingpin/v2 v2.3.2/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/beevik/ntp v1.3.0 h1:/w5VhpW5BGKS37vFm1p9oVk/t4HnnkKZAZIubHM6F7Q= github.com/beevik/ntp v1.3.0/go.mod h1:vD6h1um4kzXpqmLTuu0cCLcC+NfvC0IC+ltmEDA8E78= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cilium/ebpf v0.11.0 h1:V8gS/bTCCjX9uUnkUFUpPsksM8n1lXBAvHcpiFk1X2Y= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dennwc/btrfs v0.0.0-20230312211831-a1f570bd01a1 h1:ue4Es4Xzz255hWQ7NAWzZxuXG+YOV7URzzusLLSe0zU= github.com/dennwc/btrfs v0.0.0-20230312211831-a1f570bd01a1/go.mod h1:MYsOV9Dgsec3FFSOjywi0QK5r6TeBbdWxdrMGtiYXHA= github.com/dennwc/ioctl v1.0.0 h1:DsWAAjIxRqNcLn9x6mwfuf2pet3iB7aK90K4tF16rLg= github.com/dennwc/ioctl v1.0.0/go.mod h1:ellh2YB5ldny99SBU/VX7Nq0xiZbHphf1DrtHxxjMk0= github.com/ema/qdisc v1.0.0 h1:EHLG08FVRbWLg8uRICa3xzC9Zm0m7HyMHfXobWFnXYg= github.com/ema/qdisc v1.0.0/go.mod h1:FhIc0fLYi7f+lK5maMsesDqwYojIOh3VfRs8EVd5YJQ= github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/hashicorp/go-envparse v0.1.0 h1:bE++6bhIsNCPLvgDZkYqo3nA+/PFI51pkrHdmPSDFPY= github.com/hashicorp/go-envparse v0.1.0/go.mod h1:OHheN1GoygLlAkTlXLXvAdnXdZxy8JUweQ1rAXx1xnc= github.com/hodgesds/perf-utils v0.7.0 h1:7KlHGMuig4FRH5fNw68PV6xLmgTe7jKs9hgAcEAbioU= github.com/hodgesds/perf-utils v0.7.0/go.mod h1:LAklqfDadNKpkxoAJNHpD5tkY0rkZEVdnCEWN5k4QJY= github.com/illumos/go-kstat v0.0.0-20210513183136-173c9b0a9973 h1:hk4LPqXIY/c9XzRbe7dA6qQxaT6Axcbny0L/G5a4owQ= github.com/illumos/go-kstat v0.0.0-20210513183136-173c9b0a9973/go.mod h1:PoK3ejP3LJkGTzKqRlpvCIFas3ncU02v8zzWDW+g0FY= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/josharian/native v1.1.0 h1:uuaP0hAbW7Y4l0ZRQ6C9zfb7Mg1mbFKry/xzDAfmtLA= github.com/josharian/native v1.1.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jsimonetti/rtnetlink v1.3.5 h1:hVlNQNRlLDGZz31gBPicsG7Q53rnlsz1l1Ix/9XlpVA= github.com/jsimonetti/rtnetlink v1.3.5/go.mod h1:0LFedyiTkebnd43tE4YAkWGIq9jQphow4CcwxaT2Y00= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/lufia/iostat v1.2.1 h1:tnCdZBIglgxD47RyD55kfWQcJMGzO+1QBziSQfesf2k= github.com/lufia/iostat v1.2.1/go.mod h1:rEPNA0xXgjHQjuI5Cy05sLlS2oRcSlWHRLrvh/AQ+Pg= github.com/mattn/go-xmlrpc v0.0.3 h1:Y6WEMLEsqs3RviBrAa1/7qmbGB7DVD3brZIbqMbQdGY= github.com/mattn/go-xmlrpc v0.0.3/go.mod h1:mqc2dz7tP5x5BKlCahN/n+hs7OSZKJkS9JsHNBRlrxA= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= github.com/mdlayher/ethtool v0.1.0 h1:XAWHsmKhyPOo42qq/yTPb0eFBGUKKTR1rE0dVrWVQ0Y= github.com/mdlayher/ethtool v0.1.0/go.mod h1:fBMLn2UhfRGtcH5ZFjr+6GUiHEjZsItFD7fSn7jbZVQ= github.com/mdlayher/genetlink v1.3.2 h1:KdrNKe+CTu+IbZnm/GVUMXSqBBLqcGpRDa0xkQy56gw= github.com/mdlayher/genetlink v1.3.2/go.mod h1:tcC3pkCrPUGIKKsCsp0B3AdaaKuHtaxoJRz3cc+528o= github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g= github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw= github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U= github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA= github.com/mdlayher/wifi v0.1.0 h1:y8wYRUXwok5CtUZOXT3egghYesX0O79E3ALl+SIDm9Q= github.com/mdlayher/wifi v0.1.0/go.mod h1:+gBYnZAMcUKHSFzMJXwlz7tLsEHgwDJ9DJCefhJM+gI= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU= github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus-community/go-runit v0.1.0 h1:uTWEj/Fn2RoLdfg/etSqwzgYNOYPrARx1BHUN052tGA= github.com/prometheus-community/go-runit v0.1.0/go.mod h1:AvJ9Jo3gAFu2lbM4+qfjdpq30FfiLDJZKbQ015u08IQ= github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= github.com/prometheus/exporter-toolkit v0.10.0 h1:yOAzZTi4M22ZzVxD+fhy1URTuNRj/36uQJJ5S8IPza8= github.com/prometheus/exporter-toolkit v0.10.0/go.mod h1:+sVFzuvV5JDyw+Ih6p3zFxZNVnKQa3x5qPmDSiPu4ZY= github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/safchain/ethtool v0.3.0 h1:gimQJpsI6sc1yIqP/y8GYgiXn/NjgvpM0RNoWLVVmP0= github.com/safchain/ethtool v0.3.0/go.mod h1:SA9BwrgyAqNo7M+uaL6IYbxpm5wk3L7Mm6ocLW+CJUs= github.com/siebenmann/go-kstat v0.0.0-20210513183136-173c9b0a9973 h1:GfSdC6wKfTGcgCS7BtzF5694Amne1pGCSTY252WhlEY= github.com/siebenmann/go-kstat v0.0.0-20210513183136-173c9b0a9973/go.mod h1:G81aIFAMS9ECrwBYR9YxhlPjWgrItd+Kje78O6+uqm8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc= golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4= golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211031064116-611d5d643895/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM= howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= node_exporter-1.7.0/node_exporter.go000066400000000000000000000166061452426057600176240ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "fmt" stdlog "log" "net/http" _ "net/http/pprof" "os" "os/user" "runtime" "sort" "github.com/prometheus/common/promlog" "github.com/prometheus/common/promlog/flag" "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" promcollectors "github.com/prometheus/client_golang/prometheus/collectors" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/common/version" "github.com/prometheus/exporter-toolkit/web" "github.com/prometheus/exporter-toolkit/web/kingpinflag" "github.com/prometheus/node_exporter/collector" ) // handler wraps an unfiltered http.Handler but uses a filtered handler, // created on the fly, if filtering is requested. Create instances with // newHandler. type handler struct { unfilteredHandler http.Handler // exporterMetricsRegistry is a separate registry for the metrics about // the exporter itself. exporterMetricsRegistry *prometheus.Registry includeExporterMetrics bool maxRequests int logger log.Logger } func newHandler(includeExporterMetrics bool, maxRequests int, logger log.Logger) *handler { h := &handler{ exporterMetricsRegistry: prometheus.NewRegistry(), includeExporterMetrics: includeExporterMetrics, maxRequests: maxRequests, logger: logger, } if h.includeExporterMetrics { h.exporterMetricsRegistry.MustRegister( promcollectors.NewProcessCollector(promcollectors.ProcessCollectorOpts{}), promcollectors.NewGoCollector(), ) } if innerHandler, err := h.innerHandler(); err != nil { panic(fmt.Sprintf("Couldn't create metrics handler: %s", err)) } else { h.unfilteredHandler = innerHandler } return h } // ServeHTTP implements http.Handler. func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { filters := r.URL.Query()["collect[]"] level.Debug(h.logger).Log("msg", "collect query:", "filters", filters) if len(filters) == 0 { // No filters, use the prepared unfiltered handler. h.unfilteredHandler.ServeHTTP(w, r) return } // To serve filtered metrics, we create a filtering handler on the fly. filteredHandler, err := h.innerHandler(filters...) if err != nil { level.Warn(h.logger).Log("msg", "Couldn't create filtered metrics handler:", "err", err) w.WriteHeader(http.StatusBadRequest) w.Write([]byte(fmt.Sprintf("Couldn't create filtered metrics handler: %s", err))) return } filteredHandler.ServeHTTP(w, r) } // innerHandler is used to create both the one unfiltered http.Handler to be // wrapped by the outer handler and also the filtered handlers created on the // fly. The former is accomplished by calling innerHandler without any arguments // (in which case it will log all the collectors enabled via command-line // flags). func (h *handler) innerHandler(filters ...string) (http.Handler, error) { nc, err := collector.NewNodeCollector(h.logger, filters...) if err != nil { return nil, fmt.Errorf("couldn't create collector: %s", err) } // Only log the creation of an unfiltered handler, which should happen // only once upon startup. if len(filters) == 0 { level.Info(h.logger).Log("msg", "Enabled collectors") collectors := []string{} for n := range nc.Collectors { collectors = append(collectors, n) } sort.Strings(collectors) for _, c := range collectors { level.Info(h.logger).Log("collector", c) } } r := prometheus.NewRegistry() r.MustRegister(version.NewCollector("node_exporter")) if err := r.Register(nc); err != nil { return nil, fmt.Errorf("couldn't register node collector: %s", err) } var handler http.Handler if h.includeExporterMetrics { handler = promhttp.HandlerFor( prometheus.Gatherers{h.exporterMetricsRegistry, r}, promhttp.HandlerOpts{ ErrorLog: stdlog.New(log.NewStdlibAdapter(level.Error(h.logger)), "", 0), ErrorHandling: promhttp.ContinueOnError, MaxRequestsInFlight: h.maxRequests, Registry: h.exporterMetricsRegistry, }, ) // Note that we have to use h.exporterMetricsRegistry here to // use the same promhttp metrics for all expositions. handler = promhttp.InstrumentMetricHandler( h.exporterMetricsRegistry, handler, ) } else { handler = promhttp.HandlerFor( r, promhttp.HandlerOpts{ ErrorLog: stdlog.New(log.NewStdlibAdapter(level.Error(h.logger)), "", 0), ErrorHandling: promhttp.ContinueOnError, MaxRequestsInFlight: h.maxRequests, }, ) } return handler, nil } func main() { var ( metricsPath = kingpin.Flag( "web.telemetry-path", "Path under which to expose metrics.", ).Default("/metrics").String() disableExporterMetrics = kingpin.Flag( "web.disable-exporter-metrics", "Exclude metrics about the exporter itself (promhttp_*, process_*, go_*).", ).Bool() maxRequests = kingpin.Flag( "web.max-requests", "Maximum number of parallel scrape requests. Use 0 to disable.", ).Default("40").Int() disableDefaultCollectors = kingpin.Flag( "collector.disable-defaults", "Set all collectors to disabled by default.", ).Default("false").Bool() maxProcs = kingpin.Flag( "runtime.gomaxprocs", "The target number of CPUs Go will run on (GOMAXPROCS)", ).Envar("GOMAXPROCS").Default("1").Int() toolkitFlags = kingpinflag.AddFlags(kingpin.CommandLine, ":9100") ) promlogConfig := &promlog.Config{} flag.AddFlags(kingpin.CommandLine, promlogConfig) kingpin.Version(version.Print("node_exporter")) kingpin.CommandLine.UsageWriter(os.Stdout) kingpin.HelpFlag.Short('h') kingpin.Parse() logger := promlog.New(promlogConfig) if *disableDefaultCollectors { collector.DisableDefaultCollectors() } level.Info(logger).Log("msg", "Starting node_exporter", "version", version.Info()) level.Info(logger).Log("msg", "Build context", "build_context", version.BuildContext()) if user, err := user.Current(); err == nil && user.Uid == "0" { level.Warn(logger).Log("msg", "Node Exporter is running as root user. This exporter is designed to run as unprivileged user, root is not required.") } runtime.GOMAXPROCS(*maxProcs) level.Debug(logger).Log("msg", "Go MAXPROCS", "procs", runtime.GOMAXPROCS(0)) http.Handle(*metricsPath, newHandler(!*disableExporterMetrics, *maxRequests, logger)) if *metricsPath != "/" { landingConfig := web.LandingConfig{ Name: "Node Exporter", Description: "Prometheus Node Exporter", Version: version.Info(), Links: []web.LandingLinks{ { Address: *metricsPath, Text: "Metrics", }, }, } landingPage, err := web.NewLandingPage(landingConfig) if err != nil { level.Error(logger).Log("err", err) os.Exit(1) } http.Handle("/", landingPage) } server := &http.Server{} if err := web.ListenAndServe(server, toolkitFlags, logger); err != nil { level.Error(logger).Log("err", err) os.Exit(1) } } node_exporter-1.7.0/node_exporter_test.go000066400000000000000000000074611452426057600206620ustar00rootroot00000000000000// Copyright 2017 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "fmt" "io" "net/http" "os" "os/exec" "path/filepath" "testing" "time" "github.com/prometheus/procfs" ) var ( binary = filepath.Join(os.Getenv("GOPATH"), "bin/node_exporter") ) const ( address = "localhost:19100" ) func TestFileDescriptorLeak(t *testing.T) { if _, err := os.Stat(binary); err != nil { t.Skipf("node_exporter binary not available, try to run `make build` first: %s", err) } fs, err := procfs.NewDefaultFS() if err != nil { t.Skipf("proc filesystem is not available, but currently required to read number of open file descriptors: %s", err) } if _, err := fs.Stat(); err != nil { t.Errorf("unable to read process stats: %s", err) } exporter := exec.Command(binary, "--web.listen-address", address) test := func(pid int) error { if err := queryExporter(address); err != nil { return err } proc, err := procfs.NewProc(pid) if err != nil { return err } fdsBefore, err := proc.FileDescriptors() if err != nil { return err } for i := 0; i < 5; i++ { if err := queryExporter(address); err != nil { return err } } fdsAfter, err := proc.FileDescriptors() if err != nil { return err } if want, have := len(fdsBefore), len(fdsAfter); want != have { return fmt.Errorf("want %d open file descriptors after metrics scrape, have %d", want, have) } return nil } if err := runCommandAndTests(exporter, address, test); err != nil { t.Error(err) } } func TestHandlingOfDuplicatedMetrics(t *testing.T) { if _, err := os.Stat(binary); err != nil { t.Skipf("node_exporter binary not available, try to run `make build` first: %s", err) } dir, err := os.MkdirTemp("", "node-exporter") if err != nil { t.Fatal(err) } defer os.RemoveAll(dir) content := []byte("dummy_metric 1\n") if err := os.WriteFile(filepath.Join(dir, "a.prom"), content, 0600); err != nil { t.Fatal(err) } if err := os.WriteFile(filepath.Join(dir, "b.prom"), content, 0600); err != nil { t.Fatal(err) } exporter := exec.Command(binary, "--web.listen-address", address, "--collector.textfile.directory", dir) test := func(_ int) error { return queryExporter(address) } if err := runCommandAndTests(exporter, address, test); err != nil { t.Error(err) } } func queryExporter(address string) error { resp, err := http.Get(fmt.Sprintf("http://%s/metrics", address)) if err != nil { return err } b, err := io.ReadAll(resp.Body) if err != nil { return err } if err := resp.Body.Close(); err != nil { return err } if want, have := http.StatusOK, resp.StatusCode; want != have { return fmt.Errorf("want /metrics status code %d, have %d. Body:\n%s", want, have, b) } return nil } func runCommandAndTests(cmd *exec.Cmd, address string, fn func(pid int) error) error { if err := cmd.Start(); err != nil { return fmt.Errorf("failed to start command: %s", err) } time.Sleep(50 * time.Millisecond) for i := 0; i < 10; i++ { if err := queryExporter(address); err == nil { break } time.Sleep(500 * time.Millisecond) if cmd.Process == nil || i == 9 { return fmt.Errorf("can't start command") } } errc := make(chan error) go func(pid int) { errc <- fn(pid) }(cmd.Process.Pid) err := <-errc if cmd.Process != nil { cmd.Process.Kill() } return err } node_exporter-1.7.0/staticcheck.conf000066400000000000000000000000331452426057600175370ustar00rootroot00000000000000checks = ["all", "ST1003"] node_exporter-1.7.0/test_image.sh000077500000000000000000000011361452426057600170700ustar00rootroot00000000000000#!/bin/bash set -exo pipefail docker_image=$1 port=$2 container_id='' wait_start() { for in in {1..10}; do if /usr/bin/curl -s -m 5 -f "http://localhost:${port}/metrics" > /dev/null; then docker_cleanup exit 0 else sleep 1 fi done exit 1 } docker_start() { container_id=$(docker run -d -p "${port}":"${port}" "${docker_image}") } docker_cleanup() { docker kill "${container_id}" } if [[ "$#" -ne 2 ]] ; then echo "Usage: $0 quay.io/prometheus/node-exporter:v0.13.0 9100" >&2 exit 1 fi docker_start wait_start node_exporter-1.7.0/text_collector_examples/000077500000000000000000000000001452426057600213375ustar00rootroot00000000000000node_exporter-1.7.0/text_collector_examples/README.md000066400000000000000000000002221452426057600226120ustar00rootroot00000000000000# Text collector example scripts The scripts have been moved to https://github.com/prometheus-community/node-exporter-textfile-collector-scripts node_exporter-1.7.0/tls_config_noAuth.bad.yml000066400000000000000000000000001452426057600213120ustar00rootroot00000000000000node_exporter-1.7.0/ttar000077500000000000000000000254571452426057600153240ustar00rootroot00000000000000#!/usr/bin/env bash # Purpose: plain text tar format # Limitations: - only suitable for text files, directories, and symlinks # - stores only filename, content, and mode # - not designed for untrusted input # # Note: must work with bash version 3.2 (macOS) # Copyright 2017 Roger Luethi # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -o errexit -o nounset # Sanitize environment (for instance, standard sorting of glob matches) export LC_ALL=C path="" CMD="" ARG_STRING="$*" #------------------------------------------------------------------------------ # Not all sed implementations can work on null bytes. In order to make ttar # work out of the box on macOS, use Python as a stream editor. USE_PYTHON=0 PYTHON_CREATE_FILTER=$(cat << 'PCF' #!/usr/bin/env python import re import sys for line in sys.stdin: line = re.sub(r'EOF', r'\EOF', line) line = re.sub(r'NULLBYTE', r'\NULLBYTE', line) line = re.sub('\x00', r'NULLBYTE', line) sys.stdout.write(line) PCF ) PYTHON_EXTRACT_FILTER=$(cat << 'PEF' #!/usr/bin/env python import re import sys for line in sys.stdin: line = re.sub(r'(?/dev/null; then echo "ERROR Python not found. Aborting." exit 2 fi USE_PYTHON=1 fi } #------------------------------------------------------------------------------ function usage { bname=$(basename "$0") cat << USAGE Usage: $bname [-C ] -c -f (create archive) $bname -t -f (list archive contents) $bname [-C ] -x -f (extract archive) Options: -C (change directory) -v (verbose) Example: Change to sysfs directory, create ttar file from fixtures directory $bname -C sysfs -c -f sysfs/fixtures.ttar fixtures/ USAGE exit "$1" } function vecho { if [ "${VERBOSE:-}" == "yes" ]; then echo >&7 "$@" fi } function set_cmd { if [ -n "$CMD" ]; then echo "ERROR: more than one command given" echo usage 2 fi CMD=$1 } unset VERBOSE while getopts :cf:htxvC: opt; do case $opt in c) set_cmd "create" ;; f) ARCHIVE=$OPTARG ;; h) usage 0 ;; t) set_cmd "list" ;; x) set_cmd "extract" ;; v) VERBOSE=yes exec 7>&1 ;; C) CDIR=$OPTARG ;; *) echo >&2 "ERROR: invalid option -$OPTARG" echo usage 1 ;; esac done # Remove processed options from arguments shift $(( OPTIND - 1 )); if [ "${CMD:-}" == "" ]; then echo >&2 "ERROR: no command given" echo usage 1 elif [ "${ARCHIVE:-}" == "" ]; then echo >&2 "ERROR: no archive name given" echo usage 1 fi function list { local path="" local size=0 local line_no=0 local ttar_file=$1 if [ -n "${2:-}" ]; then echo >&2 "ERROR: too many arguments." echo usage 1 fi if [ ! -e "$ttar_file" ]; then echo >&2 "ERROR: file not found ($ttar_file)" echo usage 1 fi while read -r line; do line_no=$(( line_no + 1 )) if [ $size -gt 0 ]; then size=$(( size - 1 )) continue fi if [[ $line =~ ^Path:\ (.*)$ ]]; then path=${BASH_REMATCH[1]} elif [[ $line =~ ^Lines:\ (.*)$ ]]; then size=${BASH_REMATCH[1]} echo "$path" elif [[ $line =~ ^Directory:\ (.*)$ ]]; then path=${BASH_REMATCH[1]} echo "$path/" elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then echo "$path -> ${BASH_REMATCH[1]}" fi done < "$ttar_file" } function extract { local path="" local size=0 local line_no=0 local ttar_file=$1 if [ -n "${2:-}" ]; then echo >&2 "ERROR: too many arguments." echo usage 1 fi if [ ! -e "$ttar_file" ]; then echo >&2 "ERROR: file not found ($ttar_file)" echo usage 1 fi while IFS= read -r line; do line_no=$(( line_no + 1 )) local eof_without_newline if [ "$size" -gt 0 ]; then if [[ "$line" =~ [^\\]EOF ]]; then # An EOF not preceeded by a backslash indicates that the line # does not end with a newline eof_without_newline=1 else eof_without_newline=0 fi # Replace NULLBYTE with null byte if at beginning of line # Replace NULLBYTE with null byte unless preceeded by backslash # Remove one backslash in front of NULLBYTE (if any) # Remove EOF unless preceeded by backslash # Remove one backslash in front of EOF if [ $USE_PYTHON -eq 1 ]; then echo -n "$line" | python -c "$PYTHON_EXTRACT_FILTER" >> "$path" else # The repeated pattern makes up for sed's lack of negative # lookbehind assertions (for consecutive null bytes). echo -n "$line" | \ sed -e 's/^NULLBYTE/\x0/g; s/\([^\\]\)NULLBYTE/\1\x0/g; s/\([^\\]\)NULLBYTE/\1\x0/g; s/\\NULLBYTE/NULLBYTE/g; s/\([^\\]\)EOF/\1/g; s/\\EOF/EOF/g; ' >> "$path" fi if [[ "$eof_without_newline" -eq 0 ]]; then echo >> "$path" fi size=$(( size - 1 )) continue fi if [[ $line =~ ^Path:\ (.*)$ ]]; then path=${BASH_REMATCH[1]} if [ -e "$path" ] || [ -L "$path" ]; then rm "$path" fi elif [[ $line =~ ^Lines:\ (.*)$ ]]; then size=${BASH_REMATCH[1]} # Create file even if it is zero-length. touch "$path" vecho " $path" elif [[ $line =~ ^Mode:\ (.*)$ ]]; then mode=${BASH_REMATCH[1]} chmod "$mode" "$path" vecho "$mode" elif [[ $line =~ ^Directory:\ (.*)$ ]]; then path=${BASH_REMATCH[1]} mkdir -p "$path" vecho " $path/" elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then ln -s "${BASH_REMATCH[1]}" "$path" vecho " $path -> ${BASH_REMATCH[1]}" elif [[ $line =~ ^# ]]; then # Ignore comments between files continue else echo >&2 "ERROR: Unknown keyword on line $line_no: $line" exit 1 fi done < "$ttar_file" } function div { echo "# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -" \ "- - - - - -" } function get_mode { local mfile=$1 if [ -z "${STAT_OPTION:-}" ]; then if stat -c '%a' "$mfile" >/dev/null 2>&1; then # GNU stat STAT_OPTION='-c' STAT_FORMAT='%a' else # BSD stat STAT_OPTION='-f' # Octal output, user/group/other (omit file type, sticky bit) STAT_FORMAT='%OLp' fi fi stat "${STAT_OPTION}" "${STAT_FORMAT}" "$mfile" } function _create { shopt -s nullglob local mode local eof_without_newline while (( "$#" )); do file=$1 if [ -L "$file" ]; then echo "Path: $file" symlinkTo=$(readlink "$file") echo "SymlinkTo: $symlinkTo" vecho " $file -> $symlinkTo" div elif [ -d "$file" ]; then # Strip trailing slash (if there is one) file=${file%/} echo "Directory: $file" mode=$(get_mode "$file") echo "Mode: $mode" vecho "$mode $file/" div # Find all files and dirs, including hidden/dot files for x in "$file/"{*,.[^.]*}; do _create "$x" done elif [ -f "$file" ]; then echo "Path: $file" lines=$(wc -l "$file"|awk '{print $1}') eof_without_newline=0 if [[ "$(wc -c "$file"|awk '{print $1}')" -gt 0 ]] && \ [[ "$(tail -c 1 "$file" | wc -l)" -eq 0 ]]; then eof_without_newline=1 lines=$((lines+1)) fi echo "Lines: $lines" # Add backslash in front of EOF # Add backslash in front of NULLBYTE # Replace null byte with NULLBYTE if [ $USE_PYTHON -eq 1 ]; then < "$file" python -c "$PYTHON_CREATE_FILTER" else < "$file" \ sed 's/EOF/\\EOF/g; s/NULLBYTE/\\NULLBYTE/g; s/\x0/NULLBYTE/g; ' fi if [[ "$eof_without_newline" -eq 1 ]]; then # Finish line with EOF to indicate that the original line did # not end with a linefeed echo "EOF" fi mode=$(get_mode "$file") echo "Mode: $mode" vecho "$mode $file" div else echo >&2 "ERROR: file not found ($file in $(pwd))" exit 2 fi shift done } function create { ttar_file=$1 shift if [ -z "${1:-}" ]; then echo >&2 "ERROR: missing arguments." echo usage 1 fi if [ -e "$ttar_file" ]; then rm "$ttar_file" fi exec > "$ttar_file" echo "# Archive created by ttar $ARG_STRING" _create "$@" } test_environment if [ -n "${CDIR:-}" ]; then if [[ "$ARCHIVE" != /* ]]; then # Relative path: preserve the archive's location before changing # directory ARCHIVE="$(pwd)/$ARCHIVE" fi cd "$CDIR" fi "$CMD" "$ARCHIVE" "$@"