pax_global_header00006660000000000000000000000064147457721110014523gustar00rootroot0000000000000052 comment=0ba969c6574103f9c90134ed77cd8a235309f92d prometheus-frr-exporter-1.4.0/000077500000000000000000000000001474577211100163555ustar00rootroot00000000000000prometheus-frr-exporter-1.4.0/.circleci/000077500000000000000000000000001474577211100202105ustar00rootroot00000000000000prometheus-frr-exporter-1.4.0/.circleci/config.yml000066400000000000000000000017631474577211100222070ustar00rootroot00000000000000version: 2.1 executors: golang: docker: # Whenever the Go version is updated here, .promu.yml, Dockerfile and line 6 of this file should also be updated. - image: cimg/go:1.23 jobs: test: executor: golang steps: - checkout - run: make test build: executor: golang steps: - checkout - setup_remote_docker - run: make setup_promu - run: ./promu crossbuild - run: ./promu crossbuild tarballs - run: ./promu checksum .tarballs release: executor: golang steps: - checkout - setup_remote_docker - run: make setup_promu - run: ./promu crossbuild - run: ./promu crossbuild tarballs - run: ./promu checksum .tarballs - run: ./promu release .tarballs workflows: version: 2 build_and_release: jobs: - test - build - release: filters: branches: ignore: /.*/ tags: only: /v[0-9]+(\.[0-9]+)*(-.*)*/ prometheus-frr-exporter-1.4.0/.github/000077500000000000000000000000001474577211100177155ustar00rootroot00000000000000prometheus-frr-exporter-1.4.0/.github/workflows/000077500000000000000000000000001474577211100217525ustar00rootroot00000000000000prometheus-frr-exporter-1.4.0/.github/workflows/docker_upload.yml000066400000000000000000000033031474577211100253070ustar00rootroot00000000000000name: frr_exporter_docker_upload on: push: branches: - master release: types: created jobs: docker: runs-on: ubuntu-20.04 steps: - name: Checkout uses: actions/checkout@v3 - name: Set up QEMU uses: docker/setup-qemu-action@v2 - name: Set up Docker Buildx uses: docker/setup-buildx-action@v2 - name: Login to DockerHub uses: docker/login-action@v2 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Login to GitHub Container Registry uses: docker/login-action@v2 with: registry: ghcr.io username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: Check + set version tag run: echo "GIT_TAG=$(git describe --candidates=0 --tags 2> /dev/null || echo latest_non_release)" >> $GITHUB_ENV - name: Build and push image uses: docker/build-push-action@v3 with: context: . platforms: linux/amd64,linux/arm64 push: true tags: | tynany/frr_exporter:${{ env.GIT_TAG }} ghcr.io/tynany/frr_exporter:${{ env.GIT_TAG }} # only push latest tag if a release. - name: Build and push image latest tag if: env.GIT_TAG != 'latest_non_release' uses: docker/build-push-action@v3 with: context: . platforms: linux/amd64,linux/arm64 push: true tags: | tynany/frr_exporter:latest ghcr.io/tynany/frr_exporter:latest - name: Image digest run: echo ${{ steps.docker_build.outputs.digest }} prometheus-frr-exporter-1.4.0/.github/workflows/golangci-lint.yml000066400000000000000000000005321474577211100252240ustar00rootroot00000000000000name: golangci-lint on: push: pull_request: jobs: golangci: name: lint runs-on: ubuntu-latest steps: - uses: actions/setup-go@v5 with: go-version: stable - uses: actions/checkout@v4 - name: golangci-lint uses: golangci/golangci-lint-action@v6 with: version: v1.61.0 prometheus-frr-exporter-1.4.0/.gitignore000066400000000000000000000007241474577211100203500ustar00rootroot00000000000000 # Created by https://www.gitignore.io/api/go # Edit at https://www.gitignore.io/?templates=go ### Go ### # Binaries for programs and plugins *.exe *.exe~ *.dll *.so *.dylib # Test binary, built with `go test -c` *.test # Output of the go coverage tool, specifically when used with LiteIDE *.out # Dependency directories (remove the comment below to include it) # vendor/ ### Go Patch ### /vendor/ /Godeps/ frr_exporter # End of https://www.gitignore.io/api/go prometheus-frr-exporter-1.4.0/.golangci.yml000066400000000000000000000003061474577211100207400ustar00rootroot00000000000000linters-settings: errcheck: exclude-functions: - (net/http.ResponseWriter).Write linters: enable: - gofmt - gofumpt - goimports - misspell - revive - sloglint prometheus-frr-exporter-1.4.0/.promu.yml000066400000000000000000000016371474577211100203270ustar00rootroot00000000000000go: # Whenever the Go version is updated here, .circle/config.yml and Dockerfile should also be updated. version: 1.23 repository: path: github.com/tynany/frr_exporter build: binaries: - name: frr_exporter flags: -a -tags 'netgo static_build' ldflags: | -X github.com/prometheus/common/version.Version={{.Version}} -X github.com/prometheus/common/version.Revision={{.Revision}} -X github.com/prometheus/common/version.Branch={{.Branch}} -X github.com/prometheus/common/version.BuildUser={{user}}@{{host}} -X github.com/prometheus/common/version.BuildDate={{date "20060102-15:04:05"}} crossbuild: platforms: - linux/amd64 - linux/386 - linux/arm - linux/arm64 - darwin/amd64 - darwin/arm - darwin/arm64 - freebsd/amd64 - freebsd/386 - freebsd/arm - freebsd/arm64 prometheus-frr-exporter-1.4.0/Dockerfile000066400000000000000000000006451474577211100203540ustar00rootroot00000000000000# Whenever the Go version is updated here, .circle/config.yml and .promu.yml should also be updated. FROM golang:1.22 WORKDIR /go/src/github.com/tynany/frr_exporter COPY . /go/src/github.com/tynany/frr_exporter RUN make setup_promu RUN ./promu build RUN ls -lah FROM quay.io/frrouting/frr:10.1.2 WORKDIR /app COPY --from=0 /go/src/github.com/tynany/frr_exporter/frr_exporter . EXPOSE 9342 ENTRYPOINT [ "./frr_exporter"] prometheus-frr-exporter-1.4.0/LICENSE000066400000000000000000000020461474577211100173640ustar00rootroot00000000000000MIT License Copyright (c) 2018 Tynan Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. prometheus-frr-exporter-1.4.0/Makefile000066400000000000000000000004631474577211100200200ustar00rootroot00000000000000PROMU_VERSION := 0.17.0 setup_promu: curl -s -L https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).linux-amd64.tar.gz | tar -xvzf - mv promu-$(PROMU_VERSION).linux-amd64/promu . build: ./promu build --prefix $(PREFIX) $(PROMU_BINARIES) test: go test ./... prometheus-frr-exporter-1.4.0/README.md000066400000000000000000000306151474577211100176410ustar00rootroot00000000000000# Free Range Routing (FRR) Exporter Prometheus exporter for FRR version 3.0+ that collects metrics from the FRR Unix sockets and exposes them via HTTP, ready for collecting by Prometheus. ## Getting Started To run FRR Exporter: ``` ./frr_exporter [flags] ``` To view metrics on the default port (9342) and path (/metrics): ``` http://device:9342/metrics ``` To view available flags: ``` usage: frr_exporter [] Flags: -h, --[no-]help Show context-sensitive help (also try --help-long and --help-man). --[no-]collector.bgp.peer-types Enable the frr_bgp_peer_types_up metric (default: disabled). --collector.bgp.peer-types.keys=type ... Select the keys from the JSON formatted BGP peer description of which the values will be used with the frr_bgp_peer_types_up metric. Supports multiple values (default: type). --[no-]collector.bgp.peer-descriptions Add the value of the desc key from the JSON formatted BGP peer description as a label to peer metrics. (default: disabled). --[no-]collector.bgp.peer-groups Adds the peer's peer group name as a label. (default: disabled). --[no-]collector.bgp.peer-hostnames Adds the peer's hostname as a label. (default: disabled). --[no-]collector.bgp.peer-descriptions.plain-text Use the full text field of the BGP peer description instead of the value of the JSON formatted desc key (default: disabled). --[no-]collector.bgp.advertised-prefixes Enables the frr_exporter_bgp_prefixes_advertised_count_total metric which exports the number of advertised prefixes to a BGP peer. This is an option for older versions of FRR that don't have PfxSent field (default: disabled). --[no-]collector.bgp.accepted-filtered-prefixes Enable retrieval of accepted and filtered BGP prefix counts (default: disabled). --frr.socket.dir-path="/var/run/frr" Path of of the localstatedir containing each daemon's Unix socket. --frr.socket.timeout=20s Timeout when connecting to the FRR daemon Unix sockets --[no-]frr.vtysh Use vtysh to query FRR instead of each daemon's Unix socket (default: disabled, recommended: disabled). --frr.vtysh.path="/usr/bin/vtysh" Path of vtysh. --frr.vtysh.timeout=20s The timeout when running vtysh commands (default: 20s). --[no-]frr.vtysh.sudo Enable sudo when executing vtysh commands. --frr.vtysh.options="" Additional options passed to vtysh. --collector.ospf.instances="" Comma-separated list of instance IDs if using multiple OSPF instances --[no-]collector.bfd Enable the bfd collector (default: enabled, to disable use --no-collector.bfd). --[no-]collector.bgp Enable the bgp collector (default: enabled, to disable use --no-collector.bgp). --[no-]collector.bgp6 Enable the bgp6 collector (default: disabled). --[no-]collector.bgpl2vpn Enable the bgpl2vpn collector (default: disabled). --[no-]collector.ospf Enable the ospf collector (default: enabled, to disable use --no-collector.ospf). --[no-]collector.pim Enable the pim collector (default: disabled). --[no-]collector.vrrp Enable the vrrp collector (default: disabled). --web.telemetry-path="/metrics" Path under which to expose metrics. --web.listen-address=:9342 ... Addresses on which to expose metrics and web interface. Repeatable for multiple addresses. Examples: `:9100` or `[::1]:9100` for http, `vsock://:9100` for vsock --web.config.file="" Path to configuration file that can enable TLS or authentication. See: https://github.com/prometheus/exporter-toolkit/blob/master/docs/web-configuration.md --log.level=info Only log messages with the given severity or above. One of: [debug, info, warn, error] --log.format=logfmt Output format of log messages. One of: [logfmt, json] --[no-]version Show application version. ``` Promethues configuraiton: ``` scrape_configs: - job_name: frr static_configs: - targets: - device1:9342 - device2:9342 relabel_configs: - source_labels: [__address__] regex: "(.*):\d+" target: instance ``` ## Docker A Docker container is available at: - Docker Hub: [tynany/frr_exporter](https://hub.docker.com/r/tynany/frr_exporter) - GitHub Container Registry: [ghcr.io/tynany/frr_exporter](https://github.com/tynany/frr_exporter/pkgs/container/frr_exporter) ### Example Mount the FRR socket directory (default `/var/run/frr`) inside the container, passing that directory to FRR Exporter via the `--frr.socket.dir-path` flag: ``` docker run --restart unless-stopped -d -p 9342:9342 -v /var/run/frr:/frr_sockets tynany/frr_exporter "--frr.socket.dir-path=/frr_sockets" ``` #### If using the --frr.vtysh flag (not recommended) Mount the FRR config directory (default `/etc/frr`) and FRR socket directory (default `/var/run/frr`) inside the container, passing those directories to vtysh options `--vty_socket` & `--config_dir` via the FRR Exporter flag `--frr.vtysh.options` if using: ``` docker run --restart unless-stopped -d -p 9342:9342 -v /etc/frr:/frr_config -v /var/run/frr:/frr_sockets tynany/frr_exporter "--frr.vtysh --frr.vtysh.options=--vty_socket=/frr_sockets --config_dir=/frr_config" ``` ## Collectors To disable a default collector, use the `--no-collector.$name` flag, or `--collector.$name` to enable it. ### Enabled by Default Name | Description --- | --- BGP | Per VRF and address family (currently support unicast only) BGP metrics:
- RIB entries
- RIB memory usage
- Configured peer count
- Peer memory usage
- Configure peer group count
- Peer group memory usage
- Peer messages in
- Peer messages out
- Peer received prefixes
- Peer advertised prefixes
- Peer state (established/down)
- Peer uptime OSPFv4 | Per VRF OSPF metrics:
- Neighbors
- Neighbor adjacencies BFD | BFD Peer metrics:
- Count of total number of peers
- BFD Peer State (up/down)
- BFD Peer Uptime in seconds ### Disabled by Default Name | Description --- | --- BGP IPv6 | Per VRF and address family (currently support unicast only) BGP IPv6 metrics:
- RIB entries
- RIB memory usage
- Configured peer count
- Peer memory usage
- Configure peer group count
- Peer group memory usage
- Peer messages in
- Peer messages out
- Peer active prfixes
- Peer state (established/down)
- Peer uptime BGP L2VPN | Per VRF and address family (currently support EVPN only) BGP L2VPN EVPN metrics:
- RIB entries
- RIB memory usage
- Configured peer count
- Peer memory usage
- Configure peer group count
- Peer group memory usage
- Peer messages in
- Peer messages out
- Peer active prfixes
- Peer state (established/down)
- Peer uptime VRRP | Per VRRP Interface, VrID and Protocol:
- Rx and TX statistics
- VRRP Status
- VRRP State Transitions
PIM | PIM metrics:
- Neighbor count
- Neighbor uptime ### Sending commands to FRR By default, FRR Exporter sends commands to FRR via the Unix sockets exposed by each FRR daemon (e.g. bgpd, ospfd, etc), usually located in `/var/run/frr`. If the sockets are located in a folder other than `/var/run/frr`, pass that directory to FRR Exporter via the `--frr.socket.dir-path` flag. #### VTYSH If desired, FRR Exporter can interface with FRR via the `vtysh` command by passing the `--frr.vtysh` flag to FRR Exporter. This is not recommended, and is far slower than FRR Exporter's default way of sending commands to FRR via Unix sockets. The default timeout is 20s but can be modified via the `--frr.vtysh.timeout` flag. ### BGP: Peer Description Labels The description of a BGP peer can be added as a label to all peer metrics by passing the `--collector.bgp.peer-descriptions` flag. The peer description must be JSON formatted with a `desc` field. Example configuration: ``` router bgp 64512 neighbor 192.168.0.1 remote-as 64513 neighbor 192.168.0.1 description {"desc":"important peer"} ``` If an unstructured description is preferred, additionally to `--collector.bgp.peer-descriptions` pass the `--collector.bgp.peer-descriptions.plain-text` flag. Example configuration: ``` router bgp 64512 neighbor 192.168.0.1 remote-as 64513 neighbor 192.168.0.1 description important peer ``` Note, it is recommended to leave this feature disabled as peer descriptions can easily change, resulting in a new time series. ### BGP: Advertised Prefixes to a Peer This is an option for older versions of FRR. If your FRR shows the "PfxSnt" field for Peers in the Established state in the output of `show bgp summary json`, you don't need to enable this option. The number of prefixes advertised to a BGP peer can be enabled (i.e. the `frr_exporter_bgp_prefixes_advertised_count_total` metric) by passing the `--collector.bgp.advertised-prefixes` flag. Please note, older FRR versions do not expose a summary of prefixes advertised to BGP peers, so each peer needs to be queried individually. For example, if 20 BGP peers are configured, 20 'sh ip bgp neigh X.X.X.X advertised-routes json' commands are sent to the Unix socket (or `vtysh` if the `--frr.vtysh` is used). This can be slow, especially if using the `--frr.vtysh` flag. The commands are run in parallel by FRR Exporter, but FRR executes them in serial. Due to the potential negative performance implications of running `vtysh` for every BGP peer, this metric is disabled by default. ### BGP: frr_bgp_peer_types_up FRR Exporter exposes a special metric, `frr_bgp_peer_types_up`, that can be used in scenarios where you want to create Prometheus queries that report on the number of types of BGP peers that are currently established, such as for Alertmanager. To implement this metric, a JSON formatted description must be configured on your BGP group. FRR Exporter will then use the value from the keys specific by the `--collector.bgp.peer-types.keys` flag (the default is `type`), and aggregates all BGP peers that are currently established and configured with that type. For example, if you want to know how many BGP peers are currently established that provide internet, you'd set the description of all BGP groups that provide internet to `{"type":"internet"}` and query Prometheus with `frr_bgp_peer_types_up{type="internet"})`. Going further, if you want to create an alert when the number of established BGP peers that provide internet is 1 or less, you'd use `sum(frr_bgp_peer_types_up{type="internet"}) <= 1`. To enable `frr_bgp_peer_types_up`, use the `--collector.bgp.peer-types` flag. ### OSPF: Multiple Instance Support [OSPF Mulit-instace](https://docs.frrouting.org/en/latest/ospfd.html#multi-instance-support) is supported by passing a comma-separated list of instances ID to FRR Exporter via the `--collector.ospf.instances` flag. For example, if `/etc/frr/daemons` contains the below configuration, FRR Exporter should be run as: `./frr_exporter --collector.ospf.instances=1,5,6`. ``` ... ospfd=yes ospfd_instances=1,5,6 ... ``` Note: FRR Exporter does not support multi-instance when using `vtysh` to interface with FRR via the `--frr.vtysh` flag for the following reasons: * Invalid JSON is returned when OSPF commands are executed by `vtysh`. For example,\ `show ip ospf vrf all interface json` returns the concatenated JSON from each OSPF instance. * Vtysh does not support `vrf` and `instance` in the same commend. For example,\ `show ip ospf 1 vrf all interface json` is an invalid command. ## Development ### Building ``` go get github.com/tynany/frr_exporter cd ${GOPATH}/src/github.com/prometheus/frr_exporter go build ``` This project uses https://golangci-lint.run in GitHub Actions. You can lint your code locally before submitting a PR by following the installation instructions at https://golangci-lint.run/usage/install/ and run prior to submitting changes: ``` golangci-lint run ``` ## TODO - Collector and main tests - OSPF6 - ISIS - Additional BGP SAFI - Feel free to submit a new feature request prometheus-frr-exporter-1.4.0/collector/000077500000000000000000000000001474577211100203435ustar00rootroot00000000000000prometheus-frr-exporter-1.4.0/collector/bfd.go000066400000000000000000000057231474577211100214340ustar00rootroot00000000000000package collector import ( "encoding/json" "log/slog" "github.com/prometheus/client_golang/prometheus" ) var bfdSubsystem = "bfd" func init() { registerCollector(bfdSubsystem, enabledByDefault, NewBFDCollector) } type bfdCollector struct { logger *slog.Logger descriptions map[string]*prometheus.Desc } // NewBFDCollector collects BFD metrics, implemented as per the Collector interface. func NewBFDCollector(logger *slog.Logger) (Collector, error) { return &bfdCollector{logger: logger, descriptions: getBFDDesc()}, nil } func getBFDDesc() map[string]*prometheus.Desc { countLabels := []string{} peerLabels := []string{"local", "peer"} return map[string]*prometheus.Desc{ "bfdPeerCount": colPromDesc(bfdSubsystem, "peer_count", "Number of peers detected.", countLabels), "bfdPeerUptime": colPromDesc(bfdSubsystem, "peer_uptime", "Uptime of bfd peer in seconds", peerLabels), "bfdPeerState": colPromDesc(bfdSubsystem, "peer_state", "State of the bfd peer (1 = Up, 0 = Down).", peerLabels), } } // Update implemented as per the Collector interface. func (c *bfdCollector) Update(ch chan<- prometheus.Metric) error { cmd := "show bfd peers json" jsonBFDInterface, err := executeBFDCommand(cmd) if err != nil { return err } if err = processBFDPeers(ch, jsonBFDInterface, c.descriptions); err != nil { return cmdOutputProcessError(cmd, string(jsonBFDInterface), err) } return nil } func processBFDPeers(ch chan<- prometheus.Metric, jsonBFDInterface []byte, bfdDesc map[string]*prometheus.Desc) error { var bfdPeers []bfdPeer if err := json.Unmarshal(jsonBFDInterface, &bfdPeers); err != nil { return err } // metric is a count of the number of peers newGauge(ch, bfdDesc["bfdPeerCount"], float64(len(bfdPeers))) for _, p := range bfdPeers { labels := []string{p.Local, p.Peer} // get the uptime of the connection to the peer in seconds newGauge(ch, bfdDesc["bfdPeerUptime"], float64(p.Uptime), labels...) // state of connection to the bfd peer, up or down var bfdState float64 if p.Status == "up" { bfdState = 1 } newGauge(ch, bfdDesc["bfdPeerState"], bfdState, labels...) } return nil } type bfdPeer struct { Multihop bool `json:"multihop"` Peer string `json:"peer"` Local string `json:"local"` Vrf string `json:"vrf"` ID uint32 `json:"id"` RemoteID uint32 `json:"remote-id"` Status string `json:"status"` Uptime uint64 `json:"uptime"` Diagnostic string `json:"diagnostic"` RemoteDiagnostic string `json:"remote-diagnostic"` ReceiveInterval uint32 `json:"receive-interval"` TransmitInterval uint32 `json:"transmit-interval"` EchoInterval uint32 `json:"echo-interval"` RemoteReceiveInterval uint32 `json:"remote-receive-interval"` RemoteTransmitInterval uint32 `json:"remote-transmit-interval"` RemoteEchoInterval uint32 `json:"remote-echo-interval"` } prometheus-frr-exporter-1.4.0/collector/bfd_test.go000066400000000000000000000045441474577211100224730ustar00rootroot00000000000000package collector import ( "fmt" "regexp" "strings" "testing" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" ) var expectedBFDMetrics = map[string]float64{ "frr_bfd_peer_count{}": 3, "frr_bfd_peer_uptime{local=10.10.141.81,peer=10.10.141.61}": 847716, "frr_bfd_peer_state{local=10.10.141.81,peer=10.10.141.61}": 1, "frr_bfd_peer_uptime{local=10.10.141.81,peer=10.10.141.62}": 847595, "frr_bfd_peer_state{local=10.10.141.81,peer=10.10.141.62}": 1, "frr_bfd_peer_uptime{local=10.10.141.81,peer=10.10.141.63}": 847888, "frr_bfd_peer_state{local=10.10.141.81,peer=10.10.141.63}": 0, } func TestProcessBFDPeers(t *testing.T) { ch := make(chan prometheus.Metric, 1024) if err := processBFDPeers(ch, readTestFixture(t, "show_bfd_peers.json"), getBFDDesc()); err != nil { t.Errorf("error calling processBFDPeers ipv4unicast: %s", err) } close(ch) // Create a map of following format: // key: metric_name{labelname:labelvalue,...} // value: metric value gotMetrics := make(map[string]float64) for { msg, more := <-ch if !more { break } metric := &dto.Metric{} if err := msg.Write(metric); err != nil { t.Errorf("error writing metric: %s", err) } var labels []string for _, label := range metric.GetLabel() { labels = append(labels, fmt.Sprintf("%s=%s", label.GetName(), label.GetValue())) } var value float64 if metric.GetCounter() != nil { value = metric.GetCounter().GetValue() } else if metric.GetGauge() != nil { value = metric.GetGauge().GetValue() } re, err := regexp.Compile(`.*fqName: "(.*)", help:.*`) if err != nil { t.Errorf("could not compile regex: %s", err) } metricName := re.FindStringSubmatch(msg.Desc().String())[1] gotMetrics[fmt.Sprintf("%s{%s}", metricName, strings.Join(labels, ","))] = value } for metricName, metricVal := range gotMetrics { if expectedMetricVal, ok := expectedBFDMetrics[metricName]; ok { if expectedMetricVal != metricVal { t.Errorf("metric %s expected value %v got %v", metricName, expectedMetricVal, metricVal) } } else { t.Errorf("unexpected metric: %s : %v", metricName, metricVal) } } for expectedMetricName, expectedMetricVal := range expectedBFDMetrics { if _, ok := gotMetrics[expectedMetricName]; !ok { t.Errorf("missing metric: %s value %v", expectedMetricName, expectedMetricVal) } } } prometheus-frr-exporter-1.4.0/collector/bgp.go000066400000000000000000000464071474577211100214550ustar00rootroot00000000000000package collector import ( "encoding/json" "fmt" "log/slog" "strconv" "strings" "sync" "github.com/alecthomas/kingpin/v2" "github.com/prometheus/client_golang/prometheus" ) var ( bgpSubsystem = "bgp" bgpPeerTypes = kingpin.Flag("collector.bgp.peer-types", "Enable the frr_bgp_peer_types_up metric (default: disabled).").Default("False").Bool() frrBGPDescKey = kingpin.Flag("collector.bgp.peer-types.keys", "Select the keys from the JSON formatted BGP peer description of which the values will be used with the frr_bgp_peer_types_up metric. Supports multiple values (default: type).").Default("type").Strings() bgpPeerDescs = kingpin.Flag("collector.bgp.peer-descriptions", "Add the value of the desc key from the JSON formatted BGP peer description as a label to peer metrics. (default: disabled).").Default("False").Bool() bgpPeerGroups = kingpin.Flag("collector.bgp.peer-groups", "Adds the peer's peer group name as a label. (default: disabled).").Default("False").Bool() bgpPeerHostnames = kingpin.Flag("collector.bgp.peer-hostnames", "Adds the peer's hostname as a label. (default: disabled).").Default("False").Bool() bgpPeerDescsText = kingpin.Flag("collector.bgp.peer-descriptions.plain-text", "Use the full text field of the BGP peer description instead of the value of the JSON formatted desc key (default: disabled).").Default("False").Bool() bgpAdvertisedPrefixes = kingpin.Flag("collector.bgp.advertised-prefixes", "Enables the frr_exporter_bgp_prefixes_advertised_count_total metric which exports the number of advertised prefixes to a BGP peer. This is an option for older versions of FRR that don't have PfxSent field (default: disabled).").Default("False").Bool() bgpAcceptedFilteredPrefixes = kingpin.Flag("collector.bgp.accepted-filtered-prefixes", "Enable retrieval of accepted and filtered BGP prefix counts (default: disabled).").Default("False").Bool() ) func init() { registerCollector(bgpSubsystem, enabledByDefault, NewBGPCollector) registerCollector(bgpSubsystem+"6", disabledByDefault, NewBGP6Collector) registerCollector(bgpSubsystem+"l2vpn", disabledByDefault, NewBGPL2VPNCollector) } type bgpCollector struct { logger *slog.Logger descriptions map[string]*prometheus.Desc afi string } // NewBGPCollector collects BGP metrics, implemented as per the Collector interface. func NewBGPCollector(logger *slog.Logger) (Collector, error) { return &bgpCollector{logger: logger, descriptions: getBGPDesc(), afi: "ipv4"}, nil } func getBGPDesc() map[string]*prometheus.Desc { bgpLabels := []string{"vrf", "afi", "safi", "local_as"} bgpPeerTypeLabels := []string{"type", "afi", "safi"} bgpPeerLabels := append(bgpLabels, "peer", "peer_as") if *bgpPeerDescs { bgpPeerLabels = append(bgpPeerLabels, "peer_desc") } if *bgpPeerHostnames { bgpPeerLabels = append(bgpPeerLabels, "peer_hostname") } if *bgpPeerGroups { bgpPeerLabels = append(bgpPeerLabels, "peer_group") } return map[string]*prometheus.Desc{ "ribCount": colPromDesc(bgpSubsystem, "rib_count_total", "Number of routes in the RIB.", bgpLabels), "ribMemory": colPromDesc(bgpSubsystem, "rib_memory_bytes", "Memory consumbed by the RIB.", bgpLabels), "peerCount": colPromDesc(bgpSubsystem, "peers_count_total", "Number peers configured.", bgpLabels), "peerMemory": colPromDesc(bgpSubsystem, "peers_memory_bytes", "Memory consumed by peers.", bgpLabels), "peerGroupCount": colPromDesc(bgpSubsystem, "peer_groups_count_total", "Number of peer groups configured.", bgpLabels), "peerGroupMemory": colPromDesc(bgpSubsystem, "peer_groups_memory_bytes", "Memory consumed by peer groups.", bgpLabels), "msgRcvd": colPromDesc(bgpSubsystem, "peer_message_received_total", "Number of received messages.", bgpPeerLabels), "msgSent": colPromDesc(bgpSubsystem, "peer_message_sent_total", "Number of sent messages.", bgpPeerLabels), "prefixReceivedCount": colPromDesc(bgpSubsystem, "peer_prefixes_received_count_total", "Number of prefixes received.", bgpPeerLabels), "prefixAdvertisedCount": colPromDesc(bgpSubsystem, "peer_prefixes_advertised_count_total", "Number of prefixes advertised.", bgpPeerLabels), "prefixAcceptedCount": colPromDesc(bgpSubsystem, "peer_prefixes_accepted_count_total", "Number of prefixes accepted.", bgpPeerLabels), "prefixFilteredCount": colPromDesc(bgpSubsystem, "peer_prefixes_filtered_count_total", "Number of prefixes filtered.", bgpPeerLabels), "state": colPromDesc(bgpSubsystem, "peer_state", "State of the peer (2 = Administratively Down, 1 = Established, 0 = Down).", bgpPeerLabels), "UptimeSec": colPromDesc(bgpSubsystem, "peer_uptime_seconds", "How long has the peer been up.", bgpPeerLabels), "peerTypesUp": colPromDesc(bgpSubsystem, "peer_types_up", "Total Number of Peer Types that are Up.", bgpPeerTypeLabels), } } // Update implemented as per the Collector interface. func (c *bgpCollector) Update(ch chan<- prometheus.Metric) error { return collectBGP(ch, c.afi, c.logger, c.descriptions) } // NewBGP6Collector collects BGPv6 metrics, implemented as per the Collector interface. func NewBGP6Collector(logger *slog.Logger) (Collector, error) { return &bgpCollector{logger: logger, descriptions: getBGPDesc(), afi: "ipv6"}, nil } type bgpL2VPNCollector struct { logger *slog.Logger descriptions map[string]*prometheus.Desc } // NewBGPL2VPNCollector collects BGP L2VPN metrics, implemented as per the Collector interface. func NewBGPL2VPNCollector(logger *slog.Logger) (Collector, error) { return &bgpL2VPNCollector{logger: logger, descriptions: getBGPL2VPNDesc()}, nil } func getBGPL2VPNDesc() map[string]*prometheus.Desc { bgpDesc := getBGPDesc() labels := []string{"vni", "type", "vxlanIf", "tenantVrf"} metricPrefix := "bgp_l2vpn_evpn" bgpDesc["numMacs"] = colPromDesc(metricPrefix, "mac_count_total", "Number of known MAC addresses", labels) bgpDesc["numArpNd"] = colPromDesc(metricPrefix, "arp_nd_count_total", "Number of ARP / ND entries", labels) bgpDesc["numRemoteVteps"] = colPromDesc(metricPrefix, "remote_vtep_count_total", "Number of known remote VTEPs. A value of -1 indicates a non-integer output from FRR, such as n/a.", labels) return bgpDesc } // Update implemented as per the Collector interface. func (c *bgpL2VPNCollector) Update(ch chan<- prometheus.Metric) error { if err := collectBGP(ch, "l2vpn", c.logger, c.descriptions); err != nil { return err } cmd := "show evpn vni json" jsonBGPL2vpnEvpnSum, err := executeZebraCommand(cmd) if err != nil { return err } if len(jsonBGPL2vpnEvpnSum) == 0 { return nil } if err := processBgpL2vpnEvpnSummary(ch, jsonBGPL2vpnEvpnSum, c.descriptions); err != nil { return cmdOutputProcessError(cmd, string(jsonBGPL2vpnEvpnSum), err) } return nil } type vxLanStats struct { Vni uint32 VxlanType string `json:"type"` VxlanIf string NumMacs uint32 NumArpNd uint32 NumRemoteVteps interface{} // it's possible for the numRemoteVteps field to contain non-int values such as "n\/a" TenantVrf string } func processBgpL2vpnEvpnSummary(ch chan<- prometheus.Metric, jsonBGPL2vpnEvpnSum []byte, bgpL2vpnDesc map[string]*prometheus.Desc) error { var jsonMap map[string]vxLanStats if err := json.Unmarshal(jsonBGPL2vpnEvpnSum, &jsonMap); err != nil { return err } for _, vxLanStat := range jsonMap { bgpL2vpnLabels := []string{strconv.FormatUint(uint64(vxLanStat.Vni), 10), vxLanStat.VxlanType, vxLanStat.VxlanIf, vxLanStat.TenantVrf} newGauge(ch, bgpL2vpnDesc["numMacs"], float64(vxLanStat.NumMacs), bgpL2vpnLabels...) newGauge(ch, bgpL2vpnDesc["numArpNd"], float64(vxLanStat.NumArpNd), bgpL2vpnLabels...) remoteVteps, ok := vxLanStat.NumRemoteVteps.(float64) if !ok { remoteVteps = -1 } newGauge(ch, bgpL2vpnDesc["numRemoteVteps"], remoteVteps, bgpL2vpnLabels...) } return nil } func collectBGP(ch chan<- prometheus.Metric, AFI string, logger *slog.Logger, desc map[string]*prometheus.Desc) error { SAFI := "" if (AFI == "ipv4") || (AFI == "ipv6") { SAFI = "" } else if AFI == "l2vpn" { SAFI = "evpn" } cmd := fmt.Sprintf("show bgp vrf all %s %s summary json", AFI, SAFI) jsonBGPSum, err := executeBGPCommand(cmd) if err != nil { return err } if err := processBGPSummary(ch, jsonBGPSum, AFI, SAFI, logger, desc); err != nil { return cmdOutputProcessError(cmd, string(jsonBGPSum), err) } return nil } func processBGPSummary(ch chan<- prometheus.Metric, jsonBGPSum []byte, AFI string, SAFI string, logger *slog.Logger, bgpDesc map[string]*prometheus.Desc) error { var jsonMap map[string]map[string]bgpProcess // if we've specified SAFI in the command, we won't have the SAFI layer of array to loop through // so we simulate it here, rather than using a conditional and writing almost the same code twice if AFI == "l2vpn" && SAFI == "evpn" { // since we need to massage the format a bit, unmarshall into a temp variable var tempJSONMap map[string]bgpProcess if err := json.Unmarshal(jsonBGPSum, &tempJSONMap); err != nil { return err } jsonMap = map[string]map[string]bgpProcess{} for vrfName, vrfData := range tempJSONMap { jsonMap[vrfName] = map[string]bgpProcess{"xxxxevpn": vrfData} } } else { // we have the format we expect, unmarshall directly into jsonMap if err := json.Unmarshal(jsonBGPSum, &jsonMap); err != nil { return err } } var peerDesc map[string]bgpVRF var err error if *bgpPeerTypes || *bgpPeerDescs || *bgpPeerGroups { peerDesc, err = getBGPPeerDesc() if err != nil { return err } } peerTypes := make(map[string]map[string]float64) wg := &sync.WaitGroup{} for vrfName, vrfData := range jsonMap { for safiName, safiData := range vrfData { // The labels are "vrf", "afi", "safi", "local_as" localAs := strconv.FormatUint(uint64(safiData.AS), 10) procLabels := []string{strings.ToLower(vrfName), strings.ToLower(AFI), strings.ToLower(safiName[4:]), localAs} // No point collecting metrics if no peers configured. if safiData.PeerCount != 0 { newGauge(ch, bgpDesc["ribCount"], float64(safiData.RIBCount), procLabels...) newGauge(ch, bgpDesc["ribMemory"], float64(safiData.RIBMemory), procLabels...) newGauge(ch, bgpDesc["peerCount"], float64(safiData.PeerCount), procLabels...) newGauge(ch, bgpDesc["peerMemory"], float64(safiData.PeerMemory), procLabels...) newGauge(ch, bgpDesc["peerGroupCount"], float64(safiData.PeerGroupCount), procLabels...) newGauge(ch, bgpDesc["peerGroupMemory"], float64(safiData.PeerGroupMemory), procLabels...) for peerIP, peerData := range safiData.Peers { // The labels are "vrf", "afi", "safi", "local_as", "peer", "remote_as" peerLabels := []string{strings.ToLower(vrfName), strings.ToLower(AFI), strings.ToLower(safiName[4:]), localAs, peerIP, strconv.FormatUint(uint64(peerData.RemoteAs), 10)} if *bgpPeerDescs { d := peerDesc[vrfName].BGPNeighbors[peerIP].Desc if *bgpPeerDescsText { // The labels are "vrf", "afi", "safi", "local_as", "peer", "remote_as", "peer_desc" peerLabels = append(peerLabels, d) } else { // Assume the FRR BGP neighbor description is JSON formatted, and the description is in the "desc" field. jsonDesc := struct{ Desc string }{} if err := json.Unmarshal([]byte(d), &jsonDesc); err != nil { // Don't return an error as unmarshalling is best effort. logger.Error("cannot unmarshal bgp description", "description", peerDesc[vrfName].BGPNeighbors[peerIP].Desc, "err", err) } // The labels are "vrf", "afi", "safi", "local_as", "peer", "remote_as", "peer_desc" peerLabels = append(peerLabels, jsonDesc.Desc) } } if *bgpPeerHostnames { peerLabels = append(peerLabels, peerData.Hostname) } if *bgpPeerGroups { peerLabels = append(peerLabels, peerDesc[vrfName].BGPNeighbors[peerIP].PeerGroup) } // In earlier versions of FRR did not expose a summary of advertised prefixes for all peers, but in later versions it can get with PfxSnt field. if peerData.PfxSnt != nil { newGauge(ch, bgpDesc["prefixAdvertisedCount"], float64(*peerData.PfxSnt), peerLabels...) } else if *bgpAdvertisedPrefixes { wg.Add(1) go getPeerAdvertisedPrefixes(ch, wg, AFI, safiName[4:], vrfName, peerIP, logger, bgpDesc, peerLabels...) } newCounter(ch, bgpDesc["msgRcvd"], float64(peerData.MsgRcvd), peerLabels...) newCounter(ch, bgpDesc["msgSent"], float64(peerData.MsgSent), peerLabels...) newGauge(ch, bgpDesc["UptimeSec"], float64(peerData.PeerUptimeMsec)*0.001, peerLabels...) // In earlier versions of FRR, the prefixReceivedCount JSON element is used for the number of received prefixes, but in later versions it was changed to PfxRcd. prefixReceived := 0.0 if peerData.PrefixReceivedCount != 0 { prefixReceived = float64(peerData.PrefixReceivedCount) } else if peerData.PfxRcd != 0 { prefixReceived = float64(peerData.PfxRcd) } newGauge(ch, bgpDesc["prefixReceivedCount"], prefixReceived, peerLabels...) if *bgpAcceptedFilteredPrefixes { wg.Add(1) go getPeerAcceptedFilteredRoutes(ch, wg, AFI, safiName[4:], vrfName, peerIP, prefixReceived, logger, bgpDesc, peerLabels...) } var peerDescTypes map[string]string if *bgpPeerTypes { if err := json.Unmarshal([]byte(peerDesc[vrfName].BGPNeighbors[peerIP].Desc), &peerDescTypes); err != nil { // Don't return an error as unmarshalling is best effort. logger.Error("cannot unmarshal bgp description", "description", peerDesc[vrfName].BGPNeighbors[peerIP].Desc, "err", err) } // add key for this SAFI if it doesn't exist if _, exist := peerTypes[strings.ToLower(safiName[4:])]; !exist { peerTypes[strings.ToLower(safiName[4:])] = make(map[string]float64) } for _, descKey := range *frrBGPDescKey { if peerDescTypes[descKey] != "" { if _, exist := peerTypes[strings.ToLower(safiName[4:])][strings.TrimSpace(peerDescTypes[descKey])]; !exist { peerTypes[strings.ToLower(safiName[4:])][strings.TrimSpace(peerDescTypes[descKey])] = 0 } } } } peerState := 0.0 switch peerDataState := strings.ToLower(peerData.State); peerDataState { case "established": peerState = 1 if *bgpPeerTypes { for _, descKey := range *frrBGPDescKey { if peerDescTypes[descKey] != "" { peerTypes[strings.ToLower(safiName[4:])][strings.TrimSpace(peerDescTypes[descKey])]++ } } } case "idle (admin)": peerState = 2 } newGauge(ch, bgpDesc["state"], peerState, peerLabels...) } } } } wg.Wait() for peerSafi, peerTypesPerSafi := range peerTypes { for peerType, count := range peerTypesPerSafi { peerTypeLabels := []string{peerType, strings.ToLower(AFI), peerSafi} newGauge(ch, bgpDesc["peerTypesUp"], count, peerTypeLabels...) } } return nil } func getPeerAdvertisedPrefixes(ch chan<- prometheus.Metric, wg *sync.WaitGroup, AFI string, SAFI string, vrfName string, neighbor string, logger *slog.Logger, bgpDesc map[string]*prometheus.Desc, peerLabels ...string) { defer wg.Done() var cmd string if strings.ToLower(vrfName) == "default" { cmd = fmt.Sprintf("show bgp %s %s neighbors %s advertised-routes json", AFI, SAFI, neighbor) } else { cmd = fmt.Sprintf("show bgp vrf %s %s %s neighbors %s advertised-routes json", vrfName, AFI, SAFI, neighbor) } output, err := executeBGPCommand(cmd) if err != nil { logger.Error("get neighbor advertised prefixes failed", "afi", AFI, "safi", SAFI, "vrf", vrfName, "neighbor", neighbor, "err", err) return } var advertisedPrefixes bgpAdvertisedRoutes if err := json.Unmarshal(output, &advertisedPrefixes); err != nil { logger.Error("get neighbor advertised prefixes failed", "afi", AFI, "safi", SAFI, "vrf", vrfName, "neighbor", neighbor, "err", err) return } newGauge(ch, bgpDesc["prefixAdvertisedCount"], float64(advertisedPrefixes.TotalPrefixCounter), peerLabels...) } type bgpRoutes struct { // We care only about the routes Routes map[string][]json.RawMessage `json:"routes"` } func getPeerAcceptedFilteredRoutes(ch chan<- prometheus.Metric, wg *sync.WaitGroup, AFI string, SAFI string, vrfName string, neighbor string, prefixesReceived float64, logger *slog.Logger, bgpDesc map[string]*prometheus.Desc, peerLabels ...string) { defer wg.Done() var cmd string if strings.ToLower(vrfName) == "default" { cmd = fmt.Sprintf("show bgp %s %s neighbors %s routes json", strings.ToLower(AFI), strings.ToLower(SAFI), neighbor) } else { cmd = fmt.Sprintf("show bgp vrf %s %s %s neighbors %s routes json", vrfName, strings.ToLower(AFI), strings.ToLower(SAFI), neighbor) } output, err := executeBGPCommand(cmd) if err != nil { logger.Error("get neighbor accepted filtered routes failed", "afi", AFI, "safi", SAFI, "vrf", vrfName, "neighbor", neighbor, "err", err) return } var routes bgpRoutes if err := json.Unmarshal(output, &routes); err != nil { logger.Error("get neighbor accepted filtered routes failed", "afi", AFI, "safi", SAFI, "vrf", vrfName, "neighbor", neighbor, "err", err) return } prefixesAccepted := float64(len(routes.Routes)) newGauge(ch, bgpDesc["prefixAcceptedCount"], prefixesAccepted, peerLabels...) newGauge(ch, bgpDesc["prefixFilteredCount"], prefixesReceived-prefixesAccepted, peerLabels...) } type bgpProcess struct { RouterID string AS uint32 RIBCount uint32 RIBMemory uint32 PeerCount uint32 PeerMemory uint32 PeerGroupCount uint32 PeerGroupMemory uint32 Peers map[string]*bgpPeerSession } type bgpPeerSession struct { State string RemoteAs uint32 MsgRcvd uint32 MsgSent uint32 PeerUptimeMsec uint64 PrefixReceivedCount uint32 PfxRcd uint32 PfxSnt *uint32 Hostname string } type bgpAdvertisedRoutes struct { TotalPrefixCounter uint32 `json:"totalPrefixCounter"` } func getBGPPeerDesc() (map[string]bgpVRF, error) { output, err := executeBGPCommand("show bgp vrf all neighbors json") if err != nil { return nil, err } return processBGPPeerDesc(output) } func processBGPPeerDesc(output []byte) (map[string]bgpVRF, error) { vrfMap := make(map[string]bgpVRF) if err := json.Unmarshal([]byte(output), &vrfMap); err != nil { return nil, err } return vrfMap, nil } func (vrf *bgpVRF) UnmarshalJSON(data []byte) error { var raw map[string]*json.RawMessage if err := json.Unmarshal(data, &raw); err != nil { return err } vrf.BGPNeighbors = make(map[string]bgpNeighbor) for k, v := range raw { switch k { case "vrfId": if err := json.Unmarshal(*v, &vrf.ID); err != nil { return err } case "vrfName": // This is somewhat redundant, since the VRF name is a top-level key in the source JSON. if err := json.Unmarshal(*v, &vrf.Name); err != nil { return err } default: var neighbor bgpNeighbor if err := json.Unmarshal(*v, &neighbor); err != nil { return err } vrf.BGPNeighbors[k] = neighbor } } return nil } type bgpVRF struct { ID int `json:"vrfId"` Name string `json:"vrfName"` BGPNeighbors map[string]bgpNeighbor `json:"-"` } type bgpNeighbor struct { Desc string `json:"nbrDesc"` PeerGroup string `json:"peerGroup"` } prometheus-frr-exporter-1.4.0/collector/bgp_test.go000066400000000000000000000325021474577211100225030ustar00rootroot00000000000000package collector import ( "fmt" "reflect" "regexp" "strings" "testing" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" ) var ( expectedBGPMetrics = map[string]float64{ "frr_bgp_peer_groups_count_total{afi=ipv4,local_as=64512,safi=unicast,vrf=default}": 0.0, "frr_bgp_peer_groups_count_total{afi=ipv4,local_as=64612,safi=unicast,vrf=red}": 0.0, "frr_bgp_peer_groups_count_total{afi=ipv6,local_as=64512,safi=unicast,vrf=default}": 0.0, "frr_bgp_peer_groups_count_total{afi=ipv6,local_as=64612,safi=unicast,vrf=red}": 0.0, "frr_bgp_peer_groups_memory_bytes{afi=ipv4,local_as=64512,safi=unicast,vrf=default}": 0.0, "frr_bgp_peer_groups_memory_bytes{afi=ipv4,local_as=64612,safi=unicast,vrf=red}": 0.0, "frr_bgp_peer_groups_memory_bytes{afi=ipv6,local_as=64512,safi=unicast,vrf=default}": 0.0, "frr_bgp_peer_groups_memory_bytes{afi=ipv6,local_as=64612,safi=unicast,vrf=red}": 0.0, "frr_bgp_peer_message_received_total{afi=ipv4,local_as=64512,peer=192.168.0.2,peer_as=64513,safi=unicast,vrf=default}": 100.0, "frr_bgp_peer_message_received_total{afi=ipv4,local_as=64512,peer=192.168.0.3,peer_as=64514,safi=unicast,vrf=default}": 0.0, "frr_bgp_peer_message_received_total{afi=ipv4,local_as=64612,peer=192.168.1.2,peer_as=64613,safi=unicast,vrf=red}": 100.0, "frr_bgp_peer_message_received_total{afi=ipv4,local_as=64612,peer=192.168.1.3,peer_as=64614,safi=unicast,vrf=red}": 200.0, "frr_bgp_peer_message_received_total{afi=ipv6,local_as=64512,peer=fd00::1,peer_as=64513,safi=unicast,vrf=default}": 29285.0, "frr_bgp_peer_message_received_total{afi=ipv6,local_as=64512,peer=fd00::5,peer_as=64514,safi=unicast,vrf=default}": 0.0, "frr_bgp_peer_message_received_total{afi=ipv6,local_as=64612,peer=fd00::101,peer_as=64613,safi=unicast,vrf=red}": 29285.0, "frr_bgp_peer_message_received_total{afi=ipv6,local_as=64612,peer=fd00::105,peer_as=64614,safi=unicast,vrf=red}": 0.0, "frr_bgp_peer_message_sent_total{afi=ipv4,local_as=64512,peer=192.168.0.2,peer_as=64513,safi=unicast,vrf=default}": 100.0, "frr_bgp_peer_message_sent_total{afi=ipv4,local_as=64512,peer=192.168.0.3,peer_as=64514,safi=unicast,vrf=default}": 0.0, "frr_bgp_peer_message_sent_total{afi=ipv4,local_as=64612,peer=192.168.1.2,peer_as=64613,safi=unicast,vrf=red}": 100.0, "frr_bgp_peer_message_sent_total{afi=ipv4,local_as=64612,peer=192.168.1.3,peer_as=64614,safi=unicast,vrf=red}": 200.0, "frr_bgp_peer_message_sent_total{afi=ipv6,local_as=64512,peer=fd00::1,peer_as=64513,safi=unicast,vrf=default}": 29285.0, "frr_bgp_peer_message_sent_total{afi=ipv6,local_as=64512,peer=fd00::5,peer_as=64514,safi=unicast,vrf=default}": 0.0, "frr_bgp_peer_message_sent_total{afi=ipv6,local_as=64612,peer=fd00::101,peer_as=64613,safi=unicast,vrf=red}": 29285.0, "frr_bgp_peer_message_sent_total{afi=ipv6,local_as=64612,peer=fd00::105,peer_as=64614,safi=unicast,vrf=red}": 0.0, "frr_bgp_peer_prefixes_received_count_total{afi=ipv4,local_as=64512,peer=192.168.0.2,peer_as=64513,safi=unicast,vrf=default}": 0.0, "frr_bgp_peer_prefixes_received_count_total{afi=ipv4,local_as=64512,peer=192.168.0.3,peer_as=64514,safi=unicast,vrf=default}": 2.0, "frr_bgp_peer_prefixes_received_count_total{afi=ipv4,local_as=64612,peer=192.168.1.2,peer_as=64613,safi=unicast,vrf=red}": 2.0, "frr_bgp_peer_prefixes_received_count_total{afi=ipv4,local_as=64612,peer=192.168.1.3,peer_as=64614,safi=unicast,vrf=red}": 0.0, "frr_bgp_peer_prefixes_received_count_total{afi=ipv6,local_as=64512,peer=fd00::1,peer_as=64513,safi=unicast,vrf=default}": 1.0, "frr_bgp_peer_prefixes_received_count_total{afi=ipv6,local_as=64512,peer=fd00::5,peer_as=64514,safi=unicast,vrf=default}": 0.0, "frr_bgp_peer_prefixes_received_count_total{afi=ipv6,local_as=64612,peer=fd00::101,peer_as=64613,safi=unicast,vrf=red}": 1.0, "frr_bgp_peer_prefixes_received_count_total{afi=ipv6,local_as=64612,peer=fd00::105,peer_as=64614,safi=unicast,vrf=red}": 0.0, "frr_bgp_peers_count_total{afi=ipv4,local_as=64512,safi=unicast,vrf=default}": 2.0, "frr_bgp_peers_count_total{afi=ipv4,local_as=64612,safi=unicast,vrf=red}": 2.0, "frr_bgp_peers_count_total{afi=ipv6,local_as=64512,safi=unicast,vrf=default}": 2.0, "frr_bgp_peers_count_total{afi=ipv6,local_as=64612,safi=unicast,vrf=red}": 2.0, "frr_bgp_peers_memory_bytes{afi=ipv4,local_as=64512,safi=unicast,vrf=default}": 39936.0, "frr_bgp_peers_memory_bytes{afi=ipv4,local_as=64612,safi=unicast,vrf=red}": 39936.0, "frr_bgp_peers_memory_bytes{afi=ipv6,local_as=64512,safi=unicast,vrf=default}": 59904.0, "frr_bgp_peers_memory_bytes{afi=ipv6,local_as=64612,safi=unicast,vrf=red}": 59904.0, "frr_bgp_peer_state{afi=ipv4,local_as=64512,peer=192.168.0.2,peer_as=64513,safi=unicast,vrf=default}": 1.0, "frr_bgp_peer_state{afi=ipv4,local_as=64512,peer=192.168.0.3,peer_as=64514,safi=unicast,vrf=default}": 0.0, "frr_bgp_peer_state{afi=ipv4,local_as=64612,peer=192.168.1.2,peer_as=64613,safi=unicast,vrf=red}": 1.0, "frr_bgp_peer_state{afi=ipv4,local_as=64612,peer=192.168.1.3,peer_as=64614,safi=unicast,vrf=red}": 0.0, "frr_bgp_peer_state{afi=ipv6,local_as=64512,peer=fd00::1,peer_as=64513,safi=unicast,vrf=default}": 1.0, "frr_bgp_peer_state{afi=ipv6,local_as=64512,peer=fd00::5,peer_as=64514,safi=unicast,vrf=default}": 0.0, "frr_bgp_peer_state{afi=ipv6,local_as=64612,peer=fd00::101,peer_as=64613,safi=unicast,vrf=red}": 1.0, "frr_bgp_peer_state{afi=ipv6,local_as=64612,peer=fd00::105,peer_as=64614,safi=unicast,vrf=red}": 0.0, "frr_bgp_peer_uptime_seconds{afi=ipv4,local_as=64512,peer=192.168.0.2,peer_as=64513,safi=unicast,vrf=default}": 10.0, "frr_bgp_peer_uptime_seconds{afi=ipv4,local_as=64512,peer=192.168.0.3,peer_as=64514,safi=unicast,vrf=default}": 0.0, "frr_bgp_peer_uptime_seconds{afi=ipv4,local_as=64612,peer=192.168.1.2,peer_as=64613,safi=unicast,vrf=red}": 20.0, "frr_bgp_peer_uptime_seconds{afi=ipv4,local_as=64612,peer=192.168.1.3,peer_as=64614,safi=unicast,vrf=red}": 0.0, "frr_bgp_peer_uptime_seconds{afi=ipv6,local_as=64512,peer=fd00::1,peer_as=64513,safi=unicast,vrf=default}": 8465643000.0, "frr_bgp_peer_uptime_seconds{afi=ipv6,local_as=64512,peer=fd00::5,peer_as=64514,safi=unicast,vrf=default}": 0.0, "frr_bgp_peer_uptime_seconds{afi=ipv6,local_as=64612,peer=fd00::101,peer_as=64613,safi=unicast,vrf=red}": 87873.0, "frr_bgp_peer_uptime_seconds{afi=ipv6,local_as=64612,peer=fd00::105,peer_as=64614,safi=unicast,vrf=red}": 0.0, "frr_bgp_rib_count_total{afi=ipv4,local_as=64512,safi=unicast,vrf=default}": 1.0, "frr_bgp_rib_count_total{afi=ipv4,local_as=64612,safi=unicast,vrf=red}": 0.0, "frr_bgp_rib_count_total{afi=ipv6,local_as=64512,safi=unicast,vrf=default}": 3.0, "frr_bgp_rib_count_total{afi=ipv6,local_as=64612,safi=unicast,vrf=red}": 3.0, "frr_bgp_rib_memory_bytes{afi=ipv4,local_as=64512,safi=unicast,vrf=default}": 64.0, "frr_bgp_rib_memory_bytes{afi=ipv4,local_as=64612,safi=unicast,vrf=red}": 0.0, "frr_bgp_rib_memory_bytes{afi=ipv6,local_as=64512,safi=unicast,vrf=default}": 456.0, "frr_bgp_rib_memory_bytes{afi=ipv6,local_as=64612,safi=unicast,vrf=red}": 456.0, "frr_bgp_peer_state{afi=ipv4,local_as=64512,peer=192.168.0.4,peer_as=64515,safi=unicast,vrf=default}": 2.0, "frr_bgp_peer_message_sent_total{afi=ipv4,local_as=64512,peer=192.168.0.4,peer_as=64515,safi=unicast,vrf=default}": 0.0, "frr_bgp_peer_prefixes_received_count_total{afi=ipv4,local_as=64512,peer=192.168.0.4,peer_as=64515,safi=unicast,vrf=default}": 2.0, "frr_bgp_peer_uptime_seconds{afi=ipv4,local_as=64512,peer=192.168.0.4,peer_as=64515,safi=unicast,vrf=default}": 0.0, "frr_bgp_peer_message_received_total{afi=ipv4,local_as=64512,peer=192.168.0.4,peer_as=64515,safi=unicast,vrf=default}": 0.0, } expectedBgpL2vpnMetrics = map[string]float64{ "frr_bgp_l2vpn_evpn_arp_nd_count_total{tenantVrf=default,type=L2,vni=172192,vxlanIf=ONTEP1_172192}": 23.000000, "frr_bgp_l2vpn_evpn_arp_nd_count_total{tenantVrf=default,type=L2,vni=174374,vxlanIf=ONTEP1_174374}": 0.000000, "frr_bgp_l2vpn_evpn_mac_count_total{tenantVrf=default,type=L2,vni=172192,vxlanIf=ONTEP1_172192}": 0.000000, "frr_bgp_l2vpn_evpn_mac_count_total{tenantVrf=default,type=L2,vni=174374,vxlanIf=ONTEP1_174374}": 42.000000, "frr_bgp_l2vpn_evpn_remote_vtep_count_total{tenantVrf=default,type=L2,vni=172192,vxlanIf=ONTEP1_172192}": -1.000000, "frr_bgp_l2vpn_evpn_remote_vtep_count_total{tenantVrf=default,type=L2,vni=174374,vxlanIf=ONTEP1_174374}": 1.000000, } ) func prepareMetrics(ch chan prometheus.Metric, t *testing.T) map[string]float64 { gotMetrics := make(map[string]float64) for { msg, more := <-ch if !more { break } metric := &dto.Metric{} if err := msg.Write(metric); err != nil { t.Errorf("error writing metric: %s", err) } var labels []string for _, label := range metric.GetLabel() { labels = append(labels, fmt.Sprintf("%s=%s", label.GetName(), label.GetValue())) } var value float64 if metric.GetCounter() != nil { value = metric.GetCounter().GetValue() } else if metric.GetGauge() != nil { value = metric.GetGauge().GetValue() } re, err := regexp.Compile(`.*fqName: "(.*)", help:.*`) if err != nil { t.Errorf("could not compile regex: %s", err) } metricName := re.FindStringSubmatch(msg.Desc().String())[1] gotMetrics[fmt.Sprintf("%s{%s}", metricName, strings.Join(labels, ","))] = value } return gotMetrics } func compareMetrics(t *testing.T, gotMetrics map[string]float64, expectedMetrics map[string]float64) { for metricName, metricVal := range gotMetrics { if expectedMetricVal, ok := expectedMetrics[metricName]; ok { if expectedMetricVal != metricVal { t.Errorf("metric %s expected value %v got %v", metricName, expectedMetricVal, metricVal) } } else { t.Errorf("unexpected metric: %s : %v", metricName, metricVal) } } for expectedMetricName, expectedMetricVal := range expectedMetrics { if _, ok := gotMetrics[expectedMetricName]; !ok { t.Errorf("missing metric: %s value %v", expectedMetricName, expectedMetricVal) } } } func TestProcessBGPSummary(t *testing.T) { ch := make(chan prometheus.Metric, 1024) if err := processBGPSummary(ch, readTestFixture(t, "show_bgp_vrf_all_ipv4_summary.json"), "ipv4", "", nil, getBGPDesc()); err != nil { t.Errorf("error calling processBGPSummary ipv4unicast: %s", err) } if err := processBGPSummary(ch, readTestFixture(t, "show_bgp_vrf_all_ipv6_summary.json"), "ipv6", "", nil, getBGPDesc()); err != nil { t.Errorf("error calling processBGPSummary ipv6unicast: %s", err) } close(ch) gotMetrics := prepareMetrics(ch, t) compareMetrics(t, gotMetrics, expectedBGPMetrics) } func TestProcessBgpL2vpnEvpnSummary(t *testing.T) { ch := make(chan prometheus.Metric, 1024) if err := processBgpL2vpnEvpnSummary(ch, readTestFixture(t, "show_evpn_vni.json"), getBGPL2VPNDesc()); err != nil { t.Errorf("error calling processBgpL2vpnEvpnSummary: %s", err) } close(ch) gotMetrics := prepareMetrics(ch, t) compareMetrics(t, gotMetrics, expectedBgpL2vpnMetrics) } func TestProcessBGPPeerDesc(t *testing.T) { expectedOutput := map[string]bgpVRF{ "default": { ID: 0, Name: "default", BGPNeighbors: map[string]bgpNeighbor{ "10.1.1.10": {Desc: "{\"desc\":\"rt1\"}"}, "swp2": {Desc: "{\"desc\":\"fw1\"}"}, }, }, "vrf1": { ID: -1, Name: "vrf1", BGPNeighbors: map[string]bgpNeighbor{ "10.2.0.1": {Desc: "{\"desc\":\"remote\"}"}, }, }, } peerDesc, err := processBGPPeerDesc(readTestFixture(t, "show_bgp_vrf_all_neighbors.json")) if err != nil { t.Errorf("error calling processBGPPeerDesc: %s", err) } if !reflect.DeepEqual(peerDesc, expectedOutput) { t.Errorf("error comparing bgp neighbor description output: %v does not match expected %v", peerDesc, expectedOutput) } } prometheus-frr-exporter-1.4.0/collector/collector.go000066400000000000000000000124711474577211100226650ustar00rootroot00000000000000package collector import ( "fmt" "log/slog" "strconv" "sync" "time" "github.com/alecthomas/kingpin/v2" "github.com/prometheus/client_golang/prometheus" "github.com/tynany/frr_exporter/internal/frrsockets" ) const ( metricNamespace = "frr" enabledByDefault = true disabledByDefault = false ) var ( socketConn *frrsockets.Connection frrTotalScrapeCount = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: metricNamespace, Name: "scrapes_total", Help: "Total number of times FRR has been scraped.", }) frrLabels = []string{"collector"} frrDesc = map[string]*prometheus.Desc{ "frrScrapeDuration": promDesc("scrape_duration_seconds", "Time it took for a collector's scrape to complete.", frrLabels), "frrCollectorUp": promDesc("collector_up", "Whether the collector's last scrape was successful (1 = successful, 0 = unsuccessful).", frrLabels), } socketDirPath = kingpin.Flag("frr.socket.dir-path", "Path of of the localstatedir containing each daemon's Unix socket.").Default("/var/run/frr").String() socketTimeout = kingpin.Flag("frr.socket.timeout", "Timeout when connecting to the FRR daemon Unix sockets").Default("20s").Duration() factories = make(map[string]func(logger *slog.Logger) (Collector, error)) initiatedCollectorsMtx = sync.Mutex{} initiatedCollectors = make(map[string]Collector) collectorState = make(map[string]*bool) ) func registerCollector(name string, enabledByDefaultStatus bool, factory func(logger *slog.Logger) (Collector, error)) { defaultState := "disabled" if enabledByDefaultStatus { defaultState = "enabled" } help := fmt.Sprintf("Enable the %s collector (default: %s).", name, defaultState) if enabledByDefaultStatus { help = fmt.Sprintf("Enable the %s collector (default: %s, to disable use --no-collector.%s).", name, defaultState, name) } factories[name] = factory collectorState[name] = kingpin.Flag(fmt.Sprintf("collector.%s", name), help).Default(strconv.FormatBool(enabledByDefaultStatus)).Bool() } // Collector is the interface a collector has to implement. type Collector interface { // Update metrics and sends to the Prometheus.Metric channel. Update(ch chan<- prometheus.Metric) error } // Exporter collects all collector metrics, implemented as per the prometheus.Collector interface. type Exporter struct { Collectors map[string]Collector logger *slog.Logger } // NewExporter returns a new Exporter. func NewExporter(logger *slog.Logger) (*Exporter, error) { collectors := make(map[string]Collector) initiatedCollectorsMtx.Lock() defer initiatedCollectorsMtx.Unlock() socketConn = frrsockets.NewConnection(*socketDirPath, *socketTimeout) for name, enabled := range collectorState { if !*enabled { continue } if collector, exists := initiatedCollectors[name]; exists { collectors[name] = collector } else { collector, err := factories[name](logger.With("collector", name)) if err != nil { return nil, err } collectors[name] = collector initiatedCollectors[name] = collector } } return &Exporter{ Collectors: collectors, logger: logger, }, nil } // Collect implemented as per the prometheus.Collector interface. func (e *Exporter) Collect(ch chan<- prometheus.Metric) { frrTotalScrapeCount.Inc() ch <- frrTotalScrapeCount wg := &sync.WaitGroup{} wg.Add(len(e.Collectors)) for name, collector := range e.Collectors { go runCollector(ch, name, collector, wg, e.logger) } wg.Wait() } func runCollector(ch chan<- prometheus.Metric, name string, collector Collector, wg *sync.WaitGroup, logger *slog.Logger) { defer wg.Done() startTime := time.Now() err := collector.Update(ch) scrapeDurationSeconds := time.Since(startTime).Seconds() ch <- prometheus.MustNewConstMetric(frrDesc["frrScrapeDuration"], prometheus.GaugeValue, float64(scrapeDurationSeconds), name) success := 0.0 if err != nil { logger.Error("collector scrape failed", "name", name, "duration_seconds", scrapeDurationSeconds, "err", err) } else { logger.Debug("collector succeeded", "name", name, "duration_seconds", scrapeDurationSeconds) success = 1 } ch <- prometheus.MustNewConstMetric(frrDesc["frrCollectorUp"], prometheus.GaugeValue, success, name) } // Describe implemented as per the prometheus.Collector interface. func (e *Exporter) Describe(ch chan<- *prometheus.Desc) { for _, desc := range frrDesc { ch <- desc } } func promDesc(metricName string, metricDescription string, labels []string) *prometheus.Desc { return prometheus.NewDesc(metricNamespace+"_"+metricName, metricDescription, labels, nil) } func colPromDesc(subsystem string, metricName string, metricDescription string, labels []string) *prometheus.Desc { return prometheus.NewDesc(prometheus.BuildFQName(metricNamespace, subsystem, metricName), metricDescription, labels, nil) } func newGauge(ch chan<- prometheus.Metric, descName *prometheus.Desc, metric float64, labels ...string) { ch <- prometheus.MustNewConstMetric(descName, prometheus.GaugeValue, metric, labels...) } func newCounter(ch chan<- prometheus.Metric, descName *prometheus.Desc, metric float64, labels ...string) { ch <- prometheus.MustNewConstMetric(descName, prometheus.CounterValue, metric, labels...) } func cmdOutputProcessError(cmd, output string, err error) error { return fmt.Errorf("cannot process output of %s: %w: command output: %s", cmd, err, output) } prometheus-frr-exporter-1.4.0/collector/collector_test.go000066400000000000000000000004201474577211100237130ustar00rootroot00000000000000package collector import ( "os" "path/filepath" "testing" ) func readTestFixture(t *testing.T, filename string) []byte { data, err := os.ReadFile(filepath.Join("testdata", filename)) if err != nil { t.Fatalf("cannot read test fixture: %v", err) } return data } prometheus-frr-exporter-1.4.0/collector/command.go000066400000000000000000000051161474577211100223130ustar00rootroot00000000000000package collector import ( "bytes" "context" "fmt" "os/exec" "strings" "github.com/alecthomas/kingpin/v2" ) var ( vtyshEnable = kingpin.Flag("frr.vtysh", "Use vtysh to query FRR instead of each daemon's Unix socket (default: disabled, recommended: disabled).").Default("false").Bool() vtyshPath = kingpin.Flag("frr.vtysh.path", "Path of vtysh.").Default("/usr/bin/vtysh").String() vtyshTimeout = kingpin.Flag("frr.vtysh.timeout", "The timeout when running vtysh commands (default: 20s).").Default("20s").Duration() vtyshSudo = kingpin.Flag("frr.vtysh.sudo", "Enable sudo when executing vtysh commands.").Bool() frrVTYSHOptions = kingpin.Flag("frr.vtysh.options", "Additional options passed to vtysh.").Default("").String() ) func executeBFDCommand(cmd string) ([]byte, error) { if *vtyshEnable { return execVtyshCommand(cmd) } return socketConn.ExecBFDCmd(cmd) } func executeBGPCommand(cmd string) ([]byte, error) { if *vtyshEnable { return execVtyshCommand(cmd) } return socketConn.ExecBGPCmd(cmd) } func executeOSPFMultiInstanceCommand(cmd string, instanceID int) ([]byte, error) { return socketConn.ExecOSPFMultiInstanceCmd(cmd, instanceID) } func executeOSPFCommand(cmd string) ([]byte, error) { if *vtyshEnable { return execVtyshCommand(cmd) } return socketConn.ExecOSPFCmd(cmd) } func executePIMCommand(cmd string) ([]byte, error) { if *vtyshEnable { return execVtyshCommand(cmd) } return socketConn.ExecPIMCmd(cmd) } func executeZebraCommand(cmd string) ([]byte, error) { if *vtyshEnable { return execVtyshCommand(cmd) } return socketConn.ExecZebraCmd(cmd) } func executeVRRPCommand(cmd string) ([]byte, error) { if *vtyshEnable { return execVtyshCommand(cmd) } return socketConn.ExecVRRPCmd(cmd) } func execVtyshCommand(vtyshCmd string) ([]byte, error) { ctx, cancel := context.WithTimeout(context.Background(), *vtyshTimeout) defer cancel() var a []string var executable string if *vtyshSudo { a = []string{*vtyshPath} executable = "/usr/bin/sudo" } else { a = []string{} executable = *vtyshPath } if *frrVTYSHOptions != "" { frrOptions := strings.Split(*frrVTYSHOptions, " ") a = append(a, frrOptions...) } a = append(a, "-c", vtyshCmd) cmd := exec.CommandContext(ctx, executable, a...) var stdout, stderr bytes.Buffer cmd.Stdout = &stdout cmd.Stderr = &stderr err := cmd.Run() if err != nil { return stdout.Bytes(), fmt.Errorf("command %s failed: %w: stderr: %s: stdout: %s", cmd, err, strings.Replace(stderr.String(), "\n", " ", -1), strings.Replace(stdout.String(), "\n", " ", -1)) } return stdout.Bytes(), nil } prometheus-frr-exporter-1.4.0/collector/ospf.go000066400000000000000000000133141474577211100216430ustar00rootroot00000000000000package collector import ( "encoding/json" "fmt" "log/slog" "strconv" "strings" "github.com/alecthomas/kingpin/v2" "github.com/prometheus/client_golang/prometheus" ) var ( ospfSubsystem = "ospf" frrOSPFInstances = kingpin.Flag("collector.ospf.instances", "Comma-separated list of instance IDs if using multiple OSPF instances").Default("").String() ) func init() { registerCollector(ospfSubsystem, enabledByDefault, NewOSPFCollector) } type ospfCollector struct { logger *slog.Logger descriptions map[string]*prometheus.Desc instanceIDs []int } // NewOSPFCollector collects OSPF metrics, implemented as per the Collector interface. func NewOSPFCollector(logger *slog.Logger) (Collector, error) { var instanceIDs []int if len(*frrOSPFInstances) > 0 { // FRR Exporter does not support multi-instance when using `vtysh` to interface with FRR // via the `--frr.vtysh` flag for the following reasons: // * Invalid JSON is returned when OSPF commands are executed by `vtysh`. For example, // `show ip ospf vrf all interface json` returns the concatenated JSON from each OSPF instance. // * Vtysh does not support `vrf` and `instance` in the same commend. For example, // `show ip ospf 1 vrf all interface json` is an invalid command. if *vtyshEnable { return nil, fmt.Errorf("cannot use --frr.vtysh with --collector.ospf.instances") } instances := strings.Split(*frrOSPFInstances, ",") for _, id := range instances { i, err := strconv.Atoi(id) if err != nil { return nil, fmt.Errorf("unable to parse instance ID %s: %w", id, err) } instanceIDs = append(instanceIDs, i) } } return &ospfCollector{logger: logger, instanceIDs: instanceIDs, descriptions: getOSPFDesc()}, nil } func getOSPFDesc() map[string]*prometheus.Desc { labels := []string{"vrf", "iface", "area"} if len(*frrOSPFInstances) > 0 { labels = append(labels, "instance") } return map[string]*prometheus.Desc{ "ospfIfaceNeigh": colPromDesc(ospfSubsystem, "neighbors", "Number of neighbors detected.", labels), "ospfIfaceNeighAdj": colPromDesc(ospfSubsystem, "neighbor_adjacencies", "Number of neighbor adjacencies formed.", labels), } } // Update implemented as per the Collector interface. func (c *ospfCollector) Update(ch chan<- prometheus.Metric) error { cmd := "show ip ospf vrf all interface json" if len(c.instanceIDs) > 0 { for _, id := range c.instanceIDs { jsonOSPFInterface, err := executeOSPFMultiInstanceCommand(cmd, id) if err != nil { return err } if err = processOSPFInterface(ch, jsonOSPFInterface, c.descriptions, id); err != nil { return cmdOutputProcessError(cmd, string(jsonOSPFInterface), err) } } return nil } jsonOSPFInterface, err := executeOSPFCommand(cmd) if err != nil { return err } if err = processOSPFInterface(ch, jsonOSPFInterface, c.descriptions, 0); err != nil { return cmdOutputProcessError(cmd, string(jsonOSPFInterface), err) } return nil } func processOSPFInterface(ch chan<- prometheus.Metric, jsonOSPFInterface []byte, ospfDesc map[string]*prometheus.Desc, instanceID int) error { // Unfortunately, the 'show ip ospf vrf all interface json' JSON output is poorly structured. Instead // of all interfaces being in a list, each interface is added as a key on the same level of vrfName and // vrfId. As such, we have to loop through each key and apply logic to determine whether the key is an // interface. var jsonMap map[string]json.RawMessage if err := json.Unmarshal(jsonOSPFInterface, &jsonMap); err != nil { return fmt.Errorf("cannot unmarshal ospf interface json: %s", err) } for vrfName, vrfData := range jsonMap { var _tempvrfInstance map[string]json.RawMessage switch vrfName { case "ospfInstance": // Do nothing default: if err := json.Unmarshal(vrfData, &_tempvrfInstance); err != nil { return fmt.Errorf("cannot unmarshal VRF instance json: %s", err) } } for ospfInstanceKey, ospfInstanceVal := range _tempvrfInstance { switch ospfInstanceKey { case "vrfName", "vrfId": // Do nothing as we do not need the value of these keys. case "interfaces": var _tempInterfaceInstance map[string]json.RawMessage if err := json.Unmarshal(ospfInstanceVal, &_tempInterfaceInstance); err != nil { return fmt.Errorf("cannot unmarshal VRF instance json: %s", err) } for interfaceKey, interfaceValue := range _tempInterfaceInstance { var newIface ospfIface if err := json.Unmarshal(interfaceValue, &newIface); err != nil { return fmt.Errorf("cannot unmarshal interface json: %s", err) } if !newIface.TimerPassiveIface { // The labels are "vrf", "newIface", "area" labels := []string{strings.ToLower(vrfName), interfaceKey, newIface.Area} ospfMetrics(ch, newIface, labels, ospfDesc, instanceID) } } default: // All other keys are interfaces. var iface ospfIface if err := json.Unmarshal(ospfInstanceVal, &iface); err != nil { return fmt.Errorf("cannot unmarshal interface json: %s", err) } if !iface.TimerPassiveIface { // The labels are "vrf", "iface", "area" labels := []string{strings.ToLower(vrfName), ospfInstanceKey, iface.Area} ospfMetrics(ch, iface, labels, ospfDesc, instanceID) } } } } return nil } func ospfMetrics(ch chan<- prometheus.Metric, iface ospfIface, labels []string, ospfDesc map[string]*prometheus.Desc, instanceID int) { if instanceID != 0 { labels = append(labels, strconv.Itoa(instanceID)) } newGauge(ch, ospfDesc["ospfIfaceNeigh"], float64(iface.NbrCount), labels...) newGauge(ch, ospfDesc["ospfIfaceNeighAdj"], float64(iface.NbrAdjacentCount), labels...) } type ospfIface struct { NbrCount uint32 NbrAdjacentCount uint32 Area string TimerPassiveIface bool } prometheus-frr-exporter-1.4.0/collector/ospf_test.go000066400000000000000000000104151474577211100227010ustar00rootroot00000000000000package collector import ( "fmt" "regexp" "strings" "testing" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" ) var expectedOSPFMetrics = map[string]float64{ "frr_ospf_neighbors{area=0.0.0.0,iface=swp1,vrf=default}": 0, "frr_ospf_neighbors{area=0.0.0.0,iface=swp2,vrf=default}": 1, "frr_ospf_neighbors{area=0.0.0.0,iface=swp3,vrf=red}": 0, "frr_ospf_neighbors{area=0.0.0.0,iface=swp4,vrf=red}": 1, "frr_ospf_neighbor_adjacencies{area=0.0.0.0,iface=swp1,vrf=default}": 0, "frr_ospf_neighbor_adjacencies{area=0.0.0.0,iface=swp2,vrf=default}": 1, "frr_ospf_neighbor_adjacencies{area=0.0.0.0,iface=swp3,vrf=red}": 0, "frr_ospf_neighbor_adjacencies{area=0.0.0.0,iface=swp4,vrf=red}": 1, "frr_ospf_neighbors{area=0.0.0.0,iface=swp1,instance=1,vrf=default}": 0, "frr_ospf_neighbors{area=0.0.0.0,iface=swp2,instance=1,vrf=default}": 1, "frr_ospf_neighbors{area=0.0.0.0,iface=swp3,instance=1,vrf=red}": 0, "frr_ospf_neighbors{area=0.0.0.0,iface=swp4,instance=1,vrf=red}": 1, "frr_ospf_neighbors{area=0.0.0.0,iface=swp1,instance=2,vrf=default}": 0, "frr_ospf_neighbors{area=0.0.0.0,iface=swp2,instance=2,vrf=default}": 1, "frr_ospf_neighbors{area=0.0.0.0,iface=swp3,instance=2,vrf=red}": 0, "frr_ospf_neighbors{area=0.0.0.0,iface=swp4,instance=2,vrf=red}": 1, "frr_ospf_neighbor_adjacencies{area=0.0.0.0,iface=swp1,instance=1,vrf=default}": 0, "frr_ospf_neighbor_adjacencies{area=0.0.0.0,iface=swp2,instance=1,vrf=default}": 1, "frr_ospf_neighbor_adjacencies{area=0.0.0.0,iface=swp3,instance=1,vrf=red}": 0, "frr_ospf_neighbor_adjacencies{area=0.0.0.0,iface=swp4,instance=1,vrf=red}": 1, "frr_ospf_neighbor_adjacencies{area=0.0.0.0,iface=swp1,instance=2,vrf=default}": 0, "frr_ospf_neighbor_adjacencies{area=0.0.0.0,iface=swp2,instance=2,vrf=default}": 1, "frr_ospf_neighbor_adjacencies{area=0.0.0.0,iface=swp3,instance=2,vrf=red}": 0, "frr_ospf_neighbor_adjacencies{area=0.0.0.0,iface=swp4,instance=2,vrf=red}": 1, } func TestProcessOSPFInterface(t *testing.T) { ospfInterfaceSum := readTestFixture(t, "show_ip_ospf_vrf_all_interface.json") ch := make(chan prometheus.Metric, len(expectedOSPFMetrics)) if err := processOSPFInterface(ch, ospfInterfaceSum, getOSPFDesc(), 0); err != nil { t.Errorf("error calling processOSPFInterface ipv4unicast: %s", err) } // test for OSPF multiple instances *frrOSPFInstances = "1,2" for i := 1; i <= 2; i++ { if err := processOSPFInterface(ch, ospfInterfaceSum, getOSPFDesc(), i); err != nil { t.Errorf("error calling processOSPFInterface ipv4unicast: %s", err) } } close(ch) // Create a map of following format: // key: metric_name{labelname:labelvalue,...} // value: metric value gotMetrics := make(map[string]float64) for { msg, more := <-ch if !more { break } metric := &dto.Metric{} if err := msg.Write(metric); err != nil { t.Errorf("error writing metric: %s", err) } var labels []string for _, label := range metric.GetLabel() { labels = append(labels, fmt.Sprintf("%s=%s", label.GetName(), label.GetValue())) } var value float64 if metric.GetCounter() != nil { value = metric.GetCounter().GetValue() } else if metric.GetGauge() != nil { value = metric.GetGauge().GetValue() } re, err := regexp.Compile(`.*fqName: "(.*)", help:.*`) if err != nil { t.Errorf("could not compile regex: %s", err) } metricName := re.FindStringSubmatch(msg.Desc().String())[1] gotMetrics[fmt.Sprintf("%s{%s}", metricName, strings.Join(labels, ","))] = value } for metricName, metricVal := range gotMetrics { if expectedMetricVal, ok := expectedOSPFMetrics[metricName]; ok { if expectedMetricVal != metricVal { t.Errorf("metric %s expected value %v got %v", metricName, expectedMetricVal, metricVal) } } else { t.Errorf("unexpected metric: %s : %v", metricName, metricVal) } } for expectedMetricName, expectedMetricVal := range expectedOSPFMetrics { if _, ok := gotMetrics[expectedMetricName]; !ok { t.Errorf("missing metric: %s value %v", expectedMetricName, expectedMetricVal) } } } prometheus-frr-exporter-1.4.0/collector/pim.go000066400000000000000000000057531474577211100214710ustar00rootroot00000000000000package collector import ( "encoding/json" "fmt" "log/slog" "strings" "github.com/prometheus/client_golang/prometheus" ) const pimSubsystem = "pim" func init() { registerCollector(pimSubsystem, disabledByDefault, NewPIMCollector) } type pimCollector struct { logger *slog.Logger descriptions map[string]*prometheus.Desc } // NewPIMCollector collects PIM metrics, implemented as per the Collector interface. func NewPIMCollector(logger *slog.Logger) (Collector, error) { return &pimCollector{logger: logger, descriptions: getPIMDesc()}, nil } func getPIMDesc() map[string]*prometheus.Desc { labels := []string{"vrf"} neighborLabels := append(labels, "iface", "neighbor") return map[string]*prometheus.Desc{ "neighborCount": colPromDesc(pimSubsystem, "neighbor_count_total", "Number of neighbors detected", labels), "upTime": colPromDesc(pimSubsystem, "neighbor_uptime_seconds", "How long has the peer been up.", neighborLabels), } } // Collect implemented as per the Collector interface func (c *pimCollector) Update(ch chan<- prometheus.Metric) error { cmd := "show ip pim vrf all neighbor json" jsonPIMNeighbors, err := executePIMCommand(cmd) if err != nil { return err } if err := processPIMNeighbors(ch, jsonPIMNeighbors, c.logger, c.descriptions); err != nil { return cmdOutputProcessError(cmd, string(jsonPIMNeighbors), err) } return nil } func processPIMNeighbors(ch chan<- prometheus.Metric, jsonPIMNeighbors []byte, logger *slog.Logger, pimDesc map[string]*prometheus.Desc) error { var jsonMap map[string]json.RawMessage if err := json.Unmarshal(jsonPIMNeighbors, &jsonMap); err != nil { return fmt.Errorf("cannot unmarshal pim neighbors json: %s", err) } for vrfName, vrfData := range jsonMap { neighborCount := 0.0 var _tempvrfInstance map[string]json.RawMessage if err := json.Unmarshal(vrfData, &_tempvrfInstance); err != nil { return fmt.Errorf("cannot unmarshal VRF instance json: %s", err) } for ifaceName, ifaceData := range _tempvrfInstance { var neighbors map[string]pimNeighbor if err := json.Unmarshal(ifaceData, &neighbors); err != nil { return fmt.Errorf("cannot unmarshal neighbor json: %s", err) } for neighborIP, neighborData := range neighbors { neighborCount++ if uptimeSec, err := parseHMS(neighborData.UpTime); err != nil { logger.Error("cannot parse neighbor uptime", "uptime", neighborData.UpTime, "err", err) } else { // The labels are "vrf", "iface", "neighbor" neighborLabels := []string{strings.ToLower(vrfName), strings.ToLower(ifaceName), neighborIP} newGauge(ch, pimDesc["upTime"], float64(uptimeSec), neighborLabels...) } } } newGauge(ch, pimDesc["neighborCount"], neighborCount, vrfName) } return nil } func parseHMS(st string) (uint64, error) { var h, m, s uint64 n, err := fmt.Sscanf(st, "%d:%d:%d", &h, &m, &s) if err != nil || n != 3 { return 0, err } return h*3600 + m*60 + s, nil } type pimNeighbor struct { Interface string Neighbor string UpTime string } prometheus-frr-exporter-1.4.0/collector/pim_test.go000066400000000000000000000053761474577211100225310ustar00rootroot00000000000000package collector import ( "fmt" "regexp" "strings" "testing" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" ) var ( expectedPIMMetrics = map[string]float64{ "frr_pim_neighbor_uptime_seconds{iface=eth2,neighbor=192.0.2.227,vrf=red}": 13543, "frr_pim_neighbor_uptime_seconds{iface=eth1,neighbor=192.0.2.45,vrf=blue}": 13545, "frr_pim_neighbor_uptime_seconds{iface=eth0,neighbor=192.0.2.99,vrf=default}": 2745, "frr_pim_neighbor_count_total{vrf=red}": 1, "frr_pim_neighbor_count_total{vrf=blue}": 1, "frr_pim_neighbor_count_total{vrf=default}": 1, } parseHMStests = []struct { in string out uint64 }{ {"03:45:43", 13543}, {"00:04:01", 241}, {"10:00:43", 36043}, } ) func TestProcessPIMNeighbors(t *testing.T) { ch := make(chan prometheus.Metric, 1024) if err := processPIMNeighbors(ch, readTestFixture(t, "show_ip_pim_vrf_all_neighbor.json"), nil, getPIMDesc()); err != nil { t.Errorf("error calling processPIMNeighbors: %s", err) } close(ch) gotMetrics := make(map[string]float64) for { msg, more := <-ch if !more { break } metric := &dto.Metric{} if err := msg.Write(metric); err != nil { t.Errorf("error writing metric: %s", err) } var labels []string for _, label := range metric.GetLabel() { labels = append(labels, fmt.Sprintf("%s=%s", label.GetName(), label.GetValue())) } var value float64 if metric.GetCounter() != nil { value = metric.GetCounter().GetValue() } else if metric.GetGauge() != nil { value = metric.GetGauge().GetValue() } re, err := regexp.Compile(`.*fqName: "(.*)", help:.*`) if err != nil { t.Errorf("could not compile regex: %s", err) } metricName := re.FindStringSubmatch(msg.Desc().String())[1] gotMetrics[fmt.Sprintf("%s{%s}", metricName, strings.Join(labels, ","))] = value } for metricName, metricVal := range gotMetrics { if expectedMetricVal, ok := expectedPIMMetrics[metricName]; ok { if expectedMetricVal != metricVal { t.Errorf("metric %s expected value %v got %v", metricName, expectedMetricVal, metricVal) } } else { t.Errorf("unexpected metric: %s : %v", metricName, metricVal) } } for expectedMetricName, expectedMetricVal := range expectedPIMMetrics { if _, ok := gotMetrics[expectedMetricName]; !ok { t.Errorf("missing metric: %s value %v", expectedMetricName, expectedMetricVal) } } } func TestParseHMS(t *testing.T) { for _, tt := range parseHMStests { t.Run(tt.in, func(t *testing.T) { if uptimeSec, err := parseHMS(tt.in); err != nil || uptimeSec != tt.out { t.Errorf("ParseHMS => %s, got %d, wanted %d (err %s)", tt.in, uptimeSec, tt.out, err) } }) } } prometheus-frr-exporter-1.4.0/collector/testdata/000077500000000000000000000000001474577211100221545ustar00rootroot00000000000000prometheus-frr-exporter-1.4.0/collector/testdata/show_bfd_peers.json000066400000000000000000000025001474577211100260350ustar00rootroot00000000000000[ { "multihop": false, "peer": "10.10.141.61", "local": "10.10.141.81", "vrf": "default", "id": 869087474, "remote-id": 533345668, "status": "up", "uptime": 847716, "diagnostic": "ok", "remote-diagnostic": "ok", "receive-interval": 300, "transmit-interval": 300, "echo-interval": 0, "remote-receive-interval": 300, "remote-transmit-interval": 300, "remote-echo-interval": 300 }, { "multihop": false, "peer": "10.10.141.62", "local": "10.10.141.81", "vrf": "default", "id": 2809641312, "remote-id": 3617154307, "status": "up", "uptime": 847595, "diagnostic": "ok", "remote-diagnostic": "ok", "receive-interval": 300, "transmit-interval": 300, "echo-interval": 0, "remote-receive-interval": 300, "remote-transmit-interval": 300, "remote-echo-interval": 300 }, { "multihop": false, "peer": "10.10.141.63", "local": "10.10.141.81", "vrf": "default", "id": 2809641312, "remote-id": 3617154307, "status": "down", "uptime": 847888, "diagnostic": "ok", "remote-diagnostic": "ok", "receive-interval": 300, "transmit-interval": 300, "echo-interval": 0, "remote-receive-interval": 300, "remote-transmit-interval": 300, "remote-echo-interval": 300 } ] prometheus-frr-exporter-1.4.0/collector/testdata/show_bgp_vrf_all_ipv4_summary.json000066400000000000000000000050671474577211100311130ustar00rootroot00000000000000{ "default": { "ipv4Unicast": { "routerId": "192.168.0.1", "as": 64512, "vrfId": 0, "vrfName": "default", "tableVersion": 0, "ribCount": 1, "ribMemory": 64, "peerCount": 2, "peerMemory": 39936, "peers": { "192.168.0.2": { "remoteAs": 64513, "version": 4, "msgRcvd": 100, "msgSent": 100, "tableVersion": 0, "outq": 0, "inq": 0, "peerUptime": "10000", "peerUptimeMsec": 10000, "prefixReceivedCount": 0, "state": "Established", "idType": "ipv4" }, "192.168.0.3": { "remoteAs": 64514, "version": 4, "msgRcvd": 0, "msgSent": 0, "tableVersion": 0, "outq": 0, "inq": 0, "peerUptime": "never", "peerUptimeMsec": 0, "pfxRcd": 2, "state": "Active", "idType": "ipv4" }, "192.168.0.4": { "remoteAs": 64515, "version": 4, "msgRcvd": 0, "msgSent": 0, "tableVersion": 0, "outq": 0, "inq": 0, "peerUptime": "never", "peerUptimeMsec": 0, "pfxRcd": 2, "state": "Idle (Admin)", "idType": "ipv4" } }, "totalPeers": 2, "dynamicPeers": 0, "bestPath": { "multiPathRelax": "false" } } }, "red": { "ipv4Unicast": { "routerId": "192.168.1.1", "as": 64612, "vrfId": 39, "vrfName": "red", "tableVersion": 0, "ribCount": 0, "ribMemory": 0, "peerCount": 2, "peerMemory": 39936, "peers": { "192.168.1.2": { "remoteAs": 64613, "version": 4, "msgRcvd": 100, "msgSent": 100, "tableVersion": 0, "outq": 0, "inq": 0, "peerUptime": "10000", "peerUptimeMsec": 20000, "prefixReceivedCount": 2, "state": "Established", "idType": "ipv4" }, "192.168.1.3": { "remoteAs": 64614, "version": 4, "msgRcvd": 200, "msgSent": 200, "tableVersion": 0, "outq": 0, "inq": 0, "peerUptime": "never", "peerUptimeMsec": 0, "prefixReceivedCount": 0, "state": "Active", "idType": "ipv4" } }, "totalPeers": 2, "dynamicPeers": 0, "bestPath": { "multiPathRelax": "false" } } } }prometheus-frr-exporter-1.4.0/collector/testdata/show_bgp_vrf_all_ipv6_summary.json000066400000000000000000000043571474577211100311160ustar00rootroot00000000000000{ "default": { "ipv6Unicast": { "routerId": "192.168.0.1", "as": 64512, "vrfId": 0, "vrfName": "default", "tableVersion": 6, "ribCount": 3, "ribMemory": 456, "peerCount": 2, "peerMemory": 59904, "peers": { "fd00::1": { "remoteAs": 64513, "version": 4, "msgRcvd": 29285, "msgSent": 29285, "tableVersion": 0, "outq": 0, "inq": 0, "peerUptime": "1d00h24m", "peerUptimeMsec": 8465643000000, "prefixReceivedCount": 1, "state": "Established", "idType": "ipv6" }, "fd00::5": { "remoteAs": 64514, "version": 4, "msgRcvd": 0, "msgSent": 0, "tableVersion": 0, "outq": 0, "inq": 0, "peerUptime": "never", "peerUptimeMsec": 0, "prefixReceivedCount": 0, "state": "Active", "idType": "ipv6" } }, "totalPeers": 2, "dynamicPeers": 0, "bestPath": { "multiPathRelax": "false" } } }, "red": { "ipv6Unicast": { "routerId": "192.168.1.1", "as": 64612, "vrfId": 0, "vrfName": "default", "tableVersion": 6, "ribCount": 3, "ribMemory": 456, "peerCount": 2, "peerMemory": 59904, "peers": { "fd00::101": { "remoteAs": 64613, "version": 4, "msgRcvd": 29285, "msgSent": 29285, "tableVersion": 0, "outq": 0, "inq": 0, "peerUptime": "1d00h24m", "peerUptimeMsec": 87873000, "prefixReceivedCount": 1, "state": "Established", "idType": "ipv6" }, "fd00::105": { "remoteAs": 64614, "version": 4, "msgRcvd": 0, "msgSent": 0, "tableVersion": 0, "outq": 0, "inq": 0, "peerUptime": "never", "peerUptimeMsec": 0, "prefixReceivedCount": 0, "state": "Active", "idType": "ipv6" } }, "totalPeers": 2, "dynamicPeers": 0, "bestPath": { "multiPathRelax": "false" } } } }prometheus-frr-exporter-1.4.0/collector/testdata/show_bgp_vrf_all_neighbors.json000066400000000000000000000004511474577211100304240ustar00rootroot00000000000000{ "default":{ "vrfId":0, "vrfName":"default", "swp2":{ "nbrDesc":"{\"desc\":\"fw1\"}" }, "10.1.1.10":{ "nbrDesc":"{\"desc\":\"rt1\"}" } }, "vrf1":{ "vrfId":-1, "vrfName":"vrf1", "10.2.0.1":{ "nbrDesc":"{\"desc\":\"remote\"}" } } } prometheus-frr-exporter-1.4.0/collector/testdata/show_evpn_vni.json000066400000000000000000000006661474577211100257430ustar00rootroot00000000000000{ "174374":{ "vni":174374, "type":"L2", "vxlanIf":"ONTEP1_174374", "numMacs":42, "numArpNd":0, "numRemoteVteps":1, "tenantVrf":"default", "remoteVteps":[ "10.0.0.13" ] }, "172192":{ "vni":172192, "type":"L2", "vxlanIf":"ONTEP1_172192", "numMacs":0, "numArpNd":23, "numRemoteVteps":"n\/a", "tenantVrf":"default", "remoteVteps":[ "10.0.0.13" ] } } prometheus-frr-exporter-1.4.0/collector/testdata/show_ip_ospf_vrf_all_interface.json000066400000000000000000000074111474577211100312760ustar00rootroot00000000000000{ "default":{ "vrfName":"default", "vrfId":0, "swp1":{ "ifUp":true, "ifIndex":4, "mtuBytes":1500, "bandwidthMbit":4294967295, "ifFlags":"", "ospfEnabled":true, "ipAddress":"192.168.0.1", "ipAddressPrefixlen":24, "area":"0.0.0.0", "routerId":"192.168.255.1", "networkType":"BROADCAST", "cost":1, "transmitDelayMsecs":1000, "state":"DR", "priority":1, "mcastMemberOspfAllRouters":true, "mcastMemberOspfDesignatedRouters":true, "timerMsecs":100, "timerDeadMsecs":25, "timerWaitMsecs":25, "timerRetransmit":200, "timerHelloInMsecs":7769, "nbrCount":0, "nbrAdjacentCount":0 }, "swp2":{ "ifUp":true, "ifIndex":6, "mtuBytes":1500, "bandwidthMbit":4294967295, "ifFlags":"", "ospfEnabled":true, "ipAddress":"192.168.2.1", "ipAddressPrefixlen":24, "area":"0.0.0.0", "routerId":"192.168.255.1", "networkType":"BROADCAST", "cost":1, "transmitDelayMsecs":1000, "state":"DR", "priority":1, "bdrId":"1.1.1.1", "bdrAddress":"192.168.1.2", "networkLsaSequence":2147483717, "mcastMemberOspfAllRouters":true, "mcastMemberOspfDesignatedRouters":true, "timerMsecs":100, "timerDeadMsecs":25, "timerWaitMsecs":25, "timerRetransmit":200, "timerHelloInMsecs":7769, "nbrCount":1, "nbrAdjacentCount":1 } }, "red":{ "vrfName":"red", "vrfId":0, "swp3":{ "ifUp":true, "ifIndex":4, "mtuBytes":1500, "bandwidthMbit":4294967295, "ifFlags":"", "ospfEnabled":true, "ipAddress":"192.168.10.1", "ipAddressPrefixlen":24, "area":"0.0.0.0", "routerId":"192.168.255.1", "networkType":"BROADCAST", "cost":1, "transmitDelayMsecs":1000, "state":"DR", "priority":1, "mcastMemberOspfAllRouters":true, "mcastMemberOspfDesignatedRouters":true, "timerMsecs":100, "timerDeadMsecs":25, "timerWaitMsecs":25, "timerRetransmit":200, "timerHelloInMsecs":7769, "nbrCount":0, "nbrAdjacentCount":0 }, "swp4":{ "ifUp":true, "ifIndex":6, "mtuBytes":1500, "bandwidthMbit":4294967295, "ifFlags":"", "ospfEnabled":true, "ipAddress":"192.168.12.1", "ipAddressPrefixlen":24, "area":"0.0.0.0", "routerId":"192.168.255.1", "networkType":"BROADCAST", "cost":1, "transmitDelayMsecs":1000, "state":"DR", "priority":1, "bdrId":"1.1.1.1", "bdrAddress":"192.168.1.2", "networkLsaSequence":2147483717, "mcastMemberOspfAllRouters":true, "mcastMemberOspfDesignatedRouters":true, "timerMsecs":100, "timerDeadMsecs":25, "timerWaitMsecs":25, "timerRetransmit":200, "timerHelloInMsecs":7769, "nbrCount":1, "nbrAdjacentCount":1 }, "peerlink.4094":{ "ifUp":true, "ifIndex":62, "mtuBytes":9000, "bandwidthMbit":2000, "ifFlags":"", "ospfEnabled":true, "ipAddress":"169.254.1.1", "ipAddressPrefixlen":30, "ospfIfType":"Broadcast", "localIfUsed":"169.254.1.3", "area":"0.0.0.75 [Stub]", "routerId":"10.200.1.222", "networkType":"BROADCAST", "cost":50, "transmitDelaySecs":1, "state":"DR", "priority":1, "timerMsecs":10000, "timerDeadSecs":40, "timerWaitSecs":40, "timerRetransmitSecs":5, "timerPassiveIface":true, "nbrCount":0, "nbrAdjacentCount":0 } } } prometheus-frr-exporter-1.4.0/collector/testdata/show_ip_pim_vrf_all_neighbor.json000066400000000000000000000013501474577211100307450ustar00rootroot00000000000000{ "red": { "red":{}, "eth2":{ "192.0.2.227":{ "interface":"eth2", "neighbor":"192.0.2.227", "upTime":"03:45:43", "holdTime":"00:01:43", "holdTimeMax":105, "drPriority":1 } } }, "blue": { "blue":{}, "eth1":{ "192.0.2.45":{ "interface":"eth1", "neighbor":"192.0.2.45", "upTime":"03:45:45", "holdTime":"00:01:34", "holdTimeMax":105, "drPriority":1 } } }, "default": { "eth0":{ "192.0.2.99":{ "interface":"eth1", "neighbor":"192.0.2.99", "upTime":"00:45:45", "holdTime":"00:02:34", "holdTimeMax":105, "drPriority":1 } } } } prometheus-frr-exporter-1.4.0/collector/testdata/show_vrrp.json000066400000000000000000000040401474577211100250760ustar00rootroot00000000000000[ { "vrid":1, "version":3, "autoconfigured":false, "shutdown":false, "preemptMode":true, "acceptMode":true, "interface":"gw_extnet", "advertisementInterval":1000, "v4":{ "interface":"extnet_v4_1", "vmac":"00:00:5e:00:01:01", "primaryAddress":"", "status":"Backup", "effectivePriority":100, "masterAdverInterval":1000, "skewTime":600, "masterDownInterval":3600, "stats":{ "adverTx":6, "adverRx":1548196, "garpTx":4, "transitions":9 }, "addresses":[ "192.0.2.1" ] }, "v6":{ "interface":"extnet_v6_1", "vmac":"00:00:5e:00:02:01", "primaryAddress":"::", "status":"Backup", "effectivePriority":100, "masterAdverInterval":1000, "skewTime":600, "masterDownInterval":3600, "stats":{ "adverTx":2, "adverRx":1548195, "neighborAdverTx":5, "transitions":11 }, "addresses":[ "2001:DB8:2c02::1" ] } }, { "vrid":2, "version":3, "autoconfigured":false, "shutdown":false, "preemptMode":true, "acceptMode":true, "interface":"gw_extnet", "advertisementInterval":1000, "v4":{ "interface":"extnet_v4_2", "vmac":"00:00:5e:00:01:02", "primaryAddress":"192.0.2.3", "status":"Master", "effectivePriority":200, "masterAdverInterval":1000, "skewTime":210, "masterDownInterval":3210, "stats":{ "adverTx":1548210, "adverRx":4, "garpTx":1, "transitions":2 }, "addresses":[ "192.0.2.1" ] }, "v6":{ "interface":"", "vmac":"00:00:5e:00:02:02", "primaryAddress":"::", "status":"Initialize", "effectivePriority":200, "masterAdverInterval":0, "skewTime":0, "masterDownInterval":0, "stats":{ "adverTx":0, "adverRx":0, "neighborAdverTx":0, "transitions":0 }, "addresses":[] } } ] prometheus-frr-exporter-1.4.0/collector/vrrp.go000066400000000000000000000101431474577211100216620ustar00rootroot00000000000000package collector import ( "encoding/json" "log/slog" "strconv" "strings" "github.com/prometheus/client_golang/prometheus" ) const ( vrrpStatusInitialize = "Initialize" vrrpStatusBackup = "Backup" vrrpStatusMaster = "Master" ) var ( vrrpSubsystem = "vrrp" vrrpStates = []string{vrrpStatusInitialize, vrrpStatusMaster, vrrpStatusBackup} ) func init() { registerCollector(vrrpSubsystem, disabledByDefault, NewVRRPCollector) } type VrrpVrInfo struct { Vrid uint32 Interface string V6Info VrrpInstanceInfo `json:"v6"` V4Info VrrpInstanceInfo `json:"v4"` } type VrrpInstanceInfo struct { Subinterface string `json:"interface"` Status string Statistics VrrpInstanceStats `json:"stats"` } type VrrpInstanceStats struct { AdverTx *uint32 AdverRx *uint32 GarpTx *uint32 NeighborAdverTx *uint32 Transitions *uint32 } type vrrpCollector struct { logger *slog.Logger descriptions map[string]*prometheus.Desc } // NewVRRPCollector collects VRRP metrics, implemented as per the Collector interface. func NewVRRPCollector(logger *slog.Logger) (Collector, error) { return &vrrpCollector{logger: logger, descriptions: getVRRPDesc()}, nil } func getVRRPDesc() map[string]*prometheus.Desc { labels := []string{"proto", "vrid", "interface", "subinterface"} stateLabels := append(labels, "state") return map[string]*prometheus.Desc{ "vrrpState": colPromDesc(vrrpSubsystem, "state", "Status of the VRRP state machine.", stateLabels), "adverTx": colPromDesc(vrrpSubsystem, "advertisements_sent_total", "Advertisements sent total.", labels), "adverRx": colPromDesc(vrrpSubsystem, "advertisements_received_total", "Advertisements received total.", labels), "garpTx": colPromDesc(vrrpSubsystem, "gratuitous_arp_sent_total", "Gratuitous ARP sent total.", labels), "neighborAdverTx": colPromDesc(vrrpSubsystem, "neighbor_advertisements_sent_total", "Neighbor Advertisements sent total.", labels), "transitions": colPromDesc(vrrpSubsystem, "state_transitions_total", "Number of transitions of the VRRP state machine in total.", labels), } } // Update implemented as per the Collector interface. func (c *vrrpCollector) Update(ch chan<- prometheus.Metric) error { cmd := "show vrrp json" jsonVRRPInfo, err := executeVRRPCommand(cmd) if err != nil { return err } if err := processVRRPInfo(ch, jsonVRRPInfo, c.descriptions); err != nil { return cmdOutputProcessError(cmd, string(jsonVRRPInfo), err) } return nil } func processVRRPInfo(ch chan<- prometheus.Metric, jsonVRRPInfo []byte, desc map[string]*prometheus.Desc) error { var jsonList []VrrpVrInfo if err := json.Unmarshal(jsonVRRPInfo, &jsonList); err != nil { return err } for _, vrInfo := range jsonList { processInstance(ch, "v4", vrInfo.Vrid, vrInfo.Interface, vrInfo.V4Info, desc) processInstance(ch, "v6", vrInfo.Vrid, vrInfo.Interface, vrInfo.V6Info, desc) } return nil } func processInstance(ch chan<- prometheus.Metric, proto string, vrid uint32, iface string, instance VrrpInstanceInfo, vrrpDesc map[string]*prometheus.Desc) { vrrpLabels := []string{proto, strconv.FormatUint(uint64(vrid), 10), iface, instance.Subinterface} for _, state := range vrrpStates { stateLabels := append(vrrpLabels, state) var value float64 if strings.EqualFold(instance.Status, state) { value = 1 } newGauge(ch, vrrpDesc["vrrpState"], value, stateLabels...) } if instance.Statistics.AdverTx != nil { newCounter(ch, vrrpDesc["adverTx"], float64(*instance.Statistics.AdverTx), vrrpLabels...) } if instance.Statistics.AdverRx != nil { newCounter(ch, vrrpDesc["adverRx"], float64(*instance.Statistics.AdverRx), vrrpLabels...) } if instance.Statistics.GarpTx != nil { newCounter(ch, vrrpDesc["garpTx"], float64(*instance.Statistics.GarpTx), vrrpLabels...) } if instance.Statistics.NeighborAdverTx != nil { newCounter(ch, vrrpDesc["neighborAdverTx"], float64(*instance.Statistics.NeighborAdverTx), vrrpLabels...) } if instance.Statistics.Transitions != nil { newCounter(ch, vrrpDesc["transitions"], float64(*instance.Statistics.Transitions), vrrpLabels...) } } prometheus-frr-exporter-1.4.0/collector/vrrp_test.go000066400000000000000000000120311474577211100227170ustar00rootroot00000000000000package collector import ( "fmt" "regexp" "strings" "testing" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" ) var expectedVRRPMetrics = map[string]float64{ "frr_vrrp_advertisements_received_total{interface=gw_extnet,proto=v4,subinterface=extnet_v4_1,vrid=1}": 1548196, "frr_vrrp_advertisements_received_total{interface=gw_extnet,proto=v4,subinterface=extnet_v4_2,vrid=2}": 4.0, "frr_vrrp_advertisements_received_total{interface=gw_extnet,proto=v6,subinterface=,vrid=2}": 0.0, "frr_vrrp_advertisements_received_total{interface=gw_extnet,proto=v6,subinterface=extnet_v6_1,vrid=1}": 1548195, "frr_vrrp_advertisements_sent_total{interface=gw_extnet,proto=v4,subinterface=extnet_v4_1,vrid=1}": 6, "frr_vrrp_advertisements_sent_total{interface=gw_extnet,proto=v4,subinterface=extnet_v4_2,vrid=2}": 1548210, "frr_vrrp_advertisements_sent_total{interface=gw_extnet,proto=v6,subinterface=,vrid=2}": 0, "frr_vrrp_advertisements_sent_total{interface=gw_extnet,proto=v6,subinterface=extnet_v6_1,vrid=1}": 2, "frr_vrrp_gratuitous_arp_sent_total{interface=gw_extnet,proto=v4,subinterface=extnet_v4_1,vrid=1}": 4, "frr_vrrp_gratuitous_arp_sent_total{interface=gw_extnet,proto=v4,subinterface=extnet_v4_2,vrid=2}": 1, "frr_vrrp_neighbor_advertisements_sent_total{interface=gw_extnet,proto=v6,subinterface=,vrid=2}": 0, "frr_vrrp_neighbor_advertisements_sent_total{interface=gw_extnet,proto=v6,subinterface=extnet_v6_1,vrid=1}": 5, "frr_vrrp_state_transitions_total{interface=gw_extnet,proto=v4,subinterface=extnet_v4_1,vrid=1}": 9, "frr_vrrp_state_transitions_total{interface=gw_extnet,proto=v4,subinterface=extnet_v4_2,vrid=2}": 2, "frr_vrrp_state_transitions_total{interface=gw_extnet,proto=v6,subinterface=,vrid=2}": 0, "frr_vrrp_state_transitions_total{interface=gw_extnet,proto=v6,subinterface=extnet_v6_1,vrid=1}": 11, "frr_vrrp_state{interface=gw_extnet,proto=v4,state=Backup,subinterface=extnet_v4_1,vrid=1}": 1, "frr_vrrp_state{interface=gw_extnet,proto=v4,state=Backup,subinterface=extnet_v4_2,vrid=2}": 0, "frr_vrrp_state{interface=gw_extnet,proto=v4,state=Initialize,subinterface=extnet_v4_1,vrid=1}": 0, "frr_vrrp_state{interface=gw_extnet,proto=v4,state=Initialize,subinterface=extnet_v4_2,vrid=2}": 0, "frr_vrrp_state{interface=gw_extnet,proto=v4,state=Master,subinterface=extnet_v4_1,vrid=1}": 0, "frr_vrrp_state{interface=gw_extnet,proto=v4,state=Master,subinterface=extnet_v4_2,vrid=2}": 1, "frr_vrrp_state{interface=gw_extnet,proto=v6,state=Backup,subinterface=,vrid=2}": 0, "frr_vrrp_state{interface=gw_extnet,proto=v6,state=Backup,subinterface=extnet_v6_1,vrid=1}": 1, "frr_vrrp_state{interface=gw_extnet,proto=v6,state=Initialize,subinterface=,vrid=2}": 1, "frr_vrrp_state{interface=gw_extnet,proto=v6,state=Initialize,subinterface=extnet_v6_1,vrid=1}": 0, "frr_vrrp_state{interface=gw_extnet,proto=v6,state=Master,subinterface=,vrid=2}": 0, "frr_vrrp_state{interface=gw_extnet,proto=v6,state=Master,subinterface=extnet_v6_1,vrid=1}": 0, } func TestProcessVRRPInfo(t *testing.T) { ch := make(chan prometheus.Metric, 1024) if err := processVRRPInfo(ch, readTestFixture(t, "show_vrrp.json"), getVRRPDesc()); err != nil { t.Errorf("error calling processVRRPInfo: %s", err) } close(ch) // Create a map of following format: // key: metric_name{labelname:labelvalue,...} // value: metric value gotMetrics := make(map[string]float64) for { msg, more := <-ch if !more { break } metric := &dto.Metric{} if err := msg.Write(metric); err != nil { t.Errorf("error writing metric: %s", err) } var labels []string for _, label := range metric.GetLabel() { labels = append(labels, fmt.Sprintf("%s=%s", label.GetName(), label.GetValue())) } var value float64 if metric.GetCounter() != nil { value = metric.GetCounter().GetValue() } else if metric.GetGauge() != nil { value = metric.GetGauge().GetValue() } re, err := regexp.Compile(`.*fqName: "(.*)", help:.*`) if err != nil { t.Errorf("could not compile regex: %s", err) } metricName := re.FindStringSubmatch(msg.Desc().String())[1] gotMetrics[fmt.Sprintf("%s{%s}", metricName, strings.Join(labels, ","))] = value } for metricName, metricVal := range gotMetrics { if expectedMetricVal, ok := expectedVRRPMetrics[metricName]; ok { if expectedMetricVal != metricVal { t.Errorf("metric %s expected value %v got %v", metricName, expectedMetricVal, metricVal) } } else { t.Errorf("unexpected metric: %s : %v", metricName, metricVal) } } for expectedMetricName, expectedMetricVal := range expectedVRRPMetrics { if _, ok := gotMetrics[expectedMetricName]; !ok { t.Errorf("missing metric: %s value %v", expectedMetricName, expectedMetricVal) } } } prometheus-frr-exporter-1.4.0/frr_exporter.go000066400000000000000000000037621474577211100214350ustar00rootroot00000000000000package main import ( "fmt" "net/http" _ "net/http/pprof" "os" "github.com/alecthomas/kingpin/v2" "github.com/prometheus/client_golang/prometheus" versioncollector "github.com/prometheus/client_golang/prometheus/collectors/version" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/common/promslog" "github.com/prometheus/common/promslog/flag" "github.com/prometheus/common/version" "github.com/prometheus/exporter-toolkit/web" "github.com/prometheus/exporter-toolkit/web/kingpinflag" "github.com/tynany/frr_exporter/collector" ) var ( telemetryPath = kingpin.Flag("web.telemetry-path", "Path under which to expose metrics.").Default("/metrics").String() webFlagConfig = kingpinflag.AddFlags(kingpin.CommandLine, ":9342") ) func main() { promslogConfig := &promslog.Config{} flag.AddFlags(kingpin.CommandLine, promslogConfig) kingpin.Version(version.Print("frr_exporter")) kingpin.HelpFlag.Short('h') kingpin.Parse() logger := promslog.New(promslogConfig) prometheus.MustRegister(versioncollector.NewCollector("frr_exporter")) logger.Info("Starting frr_exporter", "version", version.Info()) logger.Info("Build context", "build_context", version.BuildContext()) nc, err := collector.NewExporter(logger) if err != nil { panic(fmt.Errorf("Could not create collector: %w", err)) } prometheus.MustRegister(nc) http.Handle(*telemetryPath, promhttp.Handler()) if *telemetryPath != "/" && *telemetryPath != "" { landingConfig := web.LandingConfig{ Name: "FRR Exporter", Description: "Prometheus Exporter for FRRouting daemon", Version: version.Info(), Links: []web.LandingLinks{ {Address: *telemetryPath, Text: "Metrics"}, }, } landingPage, err := web.NewLandingPage(landingConfig) if err != nil { logger.Error(err.Error()) os.Exit(1) } http.Handle("/", landingPage) } server := &http.Server{} if err := web.ListenAndServe(server, webFlagConfig, logger); err != nil { logger.Error(err.Error()) os.Exit(1) } } prometheus-frr-exporter-1.4.0/go.mod000066400000000000000000000024061474577211100174650ustar00rootroot00000000000000module github.com/tynany/frr_exporter go 1.22 require ( github.com/alecthomas/kingpin/v2 v2.4.0 github.com/prometheus/client_golang v1.20.4 github.com/prometheus/client_model v0.6.1 github.com/prometheus/common v0.59.1 github.com/prometheus/exporter-toolkit v0.13.0 ) require ( github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/klauspost/compress v1.17.9 // indirect github.com/mdlayher/socket v0.4.1 // indirect github.com/mdlayher/vsock v1.2.1 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect golang.org/x/crypto v0.32.0 // indirect golang.org/x/net v0.34.0 // indirect golang.org/x/oauth2 v0.22.0 // indirect golang.org/x/sync v0.10.0 // indirect golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect google.golang.org/protobuf v1.34.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect ) prometheus-frr-exporter-1.4.0/go.sum000066400000000000000000000151161474577211100175140ustar00rootroot00000000000000github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY= github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U= github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA= github.com/mdlayher/vsock v1.2.1 h1:pC1mTJTvjo1r9n9fbm7S1j04rCgCzhCOS5DY0zqHlnQ= github.com/mdlayher/vsock v1.2.1/go.mod h1:NRfCibel++DgeMD8z/hP+PPTjlNJsdPOmxcnENvE+SE= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.59.1 h1:LXb1quJHWm1P6wq/U824uxYi4Sg0oGvNeUm1z5dJoX0= github.com/prometheus/common v0.59.1/go.mod h1:GpWM7dewqmVYcd7SmRaiWVe9SSqjf0UrwnYnpEZNuT0= github.com/prometheus/exporter-toolkit v0.13.0 h1:lmA0Q+8IaXgmFRKw09RldZmZdnvu9wwcDLIXGmTPw1c= github.com/prometheus/exporter-toolkit v0.13.0/go.mod h1:2uop99EZl80KdXhv/MxVI2181fMcwlsumFOqBecGkG0= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= prometheus-frr-exporter-1.4.0/internal/000077500000000000000000000000001474577211100201715ustar00rootroot00000000000000prometheus-frr-exporter-1.4.0/internal/frrsockets/000077500000000000000000000000001474577211100223565ustar00rootroot00000000000000prometheus-frr-exporter-1.4.0/internal/frrsockets/frrsockets.go000066400000000000000000000044001474577211100250700ustar00rootroot00000000000000package frrsockets import ( "bytes" "fmt" "net" "path/filepath" "time" ) type Connection struct { dirPath string timeout time.Duration } func NewConnection(dirPath string, timeout time.Duration) *Connection { return &Connection{dirPath: dirPath, timeout: timeout} } func (c Connection) ExecBFDCmd(cmd string) ([]byte, error) { return executeCmd(filepath.Join(c.dirPath, "bfdd.vty"), cmd, c.timeout) } func (c Connection) ExecBGPCmd(cmd string) ([]byte, error) { return executeCmd(filepath.Join(c.dirPath, "bgpd.vty"), cmd, c.timeout) } func (c Connection) ExecOSPFCmd(cmd string) ([]byte, error) { return executeCmd(filepath.Join(c.dirPath, "ospfd.vty"), cmd, c.timeout) } func (c Connection) ExecOSPFMultiInstanceCmd(cmd string, instanceID int) ([]byte, error) { return executeCmd(filepath.Join(c.dirPath, fmt.Sprintf("ospfd-%d.vty", instanceID)), cmd, c.timeout) } func (c Connection) ExecPIMCmd(cmd string) ([]byte, error) { return executeCmd(filepath.Join(c.dirPath, "pimd.vty"), cmd, c.timeout) } func (c Connection) ExecVRRPCmd(cmd string) ([]byte, error) { return executeCmd(filepath.Join(c.dirPath, "vrrpd.vty"), cmd, c.timeout) } func (c Connection) ExecZebraCmd(cmd string) ([]byte, error) { return executeCmd(filepath.Join(c.dirPath, "zebra.vty"), cmd, c.timeout) } func executeCmd(socketPath, cmd string, timeout time.Duration) ([]byte, error) { var response bytes.Buffer conn, err := net.DialUnix("unix", nil, &net.UnixAddr{Net: "unix", Name: socketPath}) if err != nil { return nil, err } defer conn.Close() if err = conn.SetDeadline(time.Now().Add(timeout)); err != nil { return nil, err } buf := make([]byte, 4096) // Mimic vtysh by switching to 'enable' mode first. Note that commands need to be // null-terminated. if _, err = conn.Write([]byte("enable\x00")); err != nil { return nil, err } if _, err := conn.Read(buf); err != nil { return nil, err } // Send desired command. if _, err = conn.Write([]byte(cmd + "\x00")); err != nil { return nil, err } for { n, err := conn.Read(buf) if err != nil { return response.Bytes(), err } response.Write(buf[:n]) // frr signals the end of a response with a null character if n > 0 && buf[n-1] == 0 { return bytes.TrimRight(response.Bytes(), "\x00"), nil } } } prometheus-frr-exporter-1.4.0/internal/frrsockets/frrsockets_test.go000066400000000000000000000040531474577211100261330ustar00rootroot00000000000000package frrsockets import ( "net" "os" "path/filepath" "strings" "testing" "time" ) func TestExecuteCmd(t *testing.T) { socketPath := filepath.Join(os.TempDir(), "zebra_mock.vty") expected := "FRRouting 8.1 (localhost).\n" // Simple mock of FRR Zebra Unix socket go mockSocket(socketPath, expected) // Allow socket listener goroutine to settle time.Sleep(100 * time.Millisecond) if resp, err := executeCmd(socketPath, "show version", time.Second); err != nil { t.Fatalf("executeCmd returned error: %v\n", err) } else if string(resp) != expected { t.Fatalf("executeCmd expected '%s', got '%s'\n", expected, resp) } } // TestExecuteCmdWithLargeOutput tests ExecuteCmd when the command returns // a large amount of output exceeding the hard-coded buffer size of 4096. func TestExecuteCmdWithLargeOutput(t *testing.T) { socketPath := filepath.Join(os.TempDir(), "bgp_mock.vty") command := "show a whole lot of data" expected := strings.Repeat("z", 5000) go mockSocket(socketPath, expected) // Allow socket listener goroutine to settle time.Sleep(100 * time.Millisecond) if resp, err := executeCmd(socketPath, command, time.Second); err != nil { t.Fatalf("executeCmd returned error: %v\n", err) } else if string(resp) != expected { t.Fatalf("executeCmd \n expected '%s',\n got '%s'\n", expected, resp) } } func mockSocket(socketPath string, socketData string) { // Simple mock of FRR Unix socket l, err := net.Listen("unix", socketPath) if err != nil { panic(err) } defer os.Remove(socketPath) defer l.Close() conn, err := l.Accept() if err != nil { panic(err) } defer conn.Close() cmd := make([]byte, 1024) if _, err := conn.Read(cmd); err != nil { panic(err) } // If initial command is 'enable', send expected response and wait for next command. if string(cmd[:7]) == "enable\x00" { if _, err := conn.Write([]byte{0, 0, 0, 0}); err != nil { panic(err) } if _, err := conn.Read(cmd); err != nil { panic(err) } } if _, err := conn.Write([]byte(socketData + "\x00")); err != nil { panic(err) } }