pax_global_header00006660000000000000000000000064145503021750014514gustar00rootroot0000000000000052 comment=cd8b8947f18f102875ce64c6d2094ba2be59dc35 pyroute2-0.7.11/000077500000000000000000000000001455030217500133735ustar00rootroot00000000000000pyroute2-0.7.11/.flake8000066400000000000000000000000421455030217500145420ustar00rootroot00000000000000[flake8] ignore = E203,E722,W503 pyroute2-0.7.11/.github/000077500000000000000000000000001455030217500147335ustar00rootroot00000000000000pyroute2-0.7.11/.github/FUNDING.yml000066400000000000000000000000201455030217500165400ustar00rootroot00000000000000github: svinota pyroute2-0.7.11/.github/dependabot.yml000066400000000000000000000001601455030217500175600ustar00rootroot00000000000000version: 2 updates: - package-ecosystem: "github-actions" directory: "/" schedule: interval: "weekly" pyroute2-0.7.11/.github/workflows/000077500000000000000000000000001455030217500167705ustar00rootroot00000000000000pyroute2-0.7.11/.github/workflows/codeql.yaml000066400000000000000000000026311455030217500211250ustar00rootroot00000000000000name: "Code Scanning - Action" on: pull_request: push: jobs: CodeQL-Build: # CodeQL runs on ubuntu-latest, windows-latest, and macos-latest runs-on: ubuntu-latest permissions: # required for all workflows security-events: write steps: - name: Checkout repository uses: actions/checkout@v4 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL uses: github/codeql-action/init@v3 # Override language selection by uncommenting this and choosing your languages # with: # languages: go, javascript, csharp, python, cpp, java # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below). - name: Autobuild uses: github/codeql-action/autobuild@v3 # â„šī¸ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun # âœī¸ If the Autobuild fails above, remove it and uncomment the following # three lines and modify them (or add more) to build your code if your # project uses a compiled language #- run: | # make bootstrap # make release - name: Perform CodeQL Analysis uses: github/codeql-action/analyze@v3 pyroute2-0.7.11/.github/workflows/depsreview.yaml000066400000000000000000000004501455030217500220300ustar00rootroot00000000000000name: 'Dependency Review' on: [pull_request] permissions: contents: read jobs: dependency-review: runs-on: ubuntu-latest steps: - name: 'Checkout Repository' uses: actions/checkout@v4 - name: 'Dependency Review' uses: actions/dependency-review-action@v3 pyroute2-0.7.11/.github/workflows/pull_request.yml000066400000000000000000000042561455030217500222460ustar00rootroot00000000000000name: Main CI on: pull_request: branches: [ master ] workflow_dispatch: permissions: contents: read jobs: linter: runs-on: code steps: - run: sudo chown -R $USER:$USER $GITHUB_WORKSPACE - uses: actions/checkout@v4 - run: make nox session=linter repo: runs-on: Python3.8 steps: - run: sudo chown -R $USER:$USER $GITHUB_WORKSPACE - uses: actions/checkout@v4 - run: make nox session=repo unit: runs-on: Linux steps: - run: sudo chown -R $USER:$USER $GITHUB_WORKSPACE - uses: actions/checkout@v4 - run: make nox session=unit lab: runs-on: Linux steps: - run: sudo chown -R $USER:$USER $GITHUB_WORKSPACE - uses: actions/checkout@v4 - run: make nox session=lab docs: runs-on: Linux steps: - run: sudo chown -R $USER:$USER $GITHUB_WORKSPACE - uses: actions/checkout@v4 - run: make nox session=docs neutron: runs-on: Python3.8 steps: - run: sudo chown -R $USER:$USER $GITHUB_WORKSPACE - uses: actions/checkout@v4 - run: make nox session=neutron integration: runs-on: Linux steps: - run: sudo chown -R $USER:$USER $GITHUB_WORKSPACE - uses: actions/checkout@v4 - run: make nox session=integration minimal: runs-on: Linux steps: - run: sudo chown -R $USER:$USER $GITHUB_WORKSPACE - uses: actions/checkout@v4 - run: sudo make nox session=minimal linux-fedora-38: runs-on: linux-fedora-38 steps: - run: sudo chown -R $USER:$USER $GITHUB_WORKSPACE - uses: actions/checkout@v4 - run: sudo make nox session=linux-3.6 - run: sudo make nox session=linux-3.8 - run: sudo make nox session=linux-3.10 - run: sudo make nox session=linux-3.12 linux-ubuntu-22: runs-on: linux-ubuntu-22 steps: - run: sudo chown -R $USER:$USER $GITHUB_WORKSPACE - uses: actions/checkout@v4 - run: sudo make nox session=linux-3.6 - run: sudo make nox session=linux-3.10 openbsd: runs-on: OpenBSD steps: - run: sudo chown -R $USER:$USER $GITHUB_WORKSPACE - uses: actions/checkout@v4 - run: /home/github/openbsd_runner.sh pyroute2-0.7.11/.github/workflows/push.yml000066400000000000000000000004241455030217500204720ustar00rootroot00000000000000name: Linter on: push: branches: [ master ] workflow_dispatch: permissions: contents: read jobs: build: runs-on: code steps: - run: sudo chown -R $USER:$USER $GITHUB_WORKSPACE - uses: actions/checkout@v4 - run: make nox session=linter pyroute2-0.7.11/.gitignore000066400000000000000000000003651455030217500153670ustar00rootroot00000000000000.idea/ .eggs/ *.swp *.pyc *~ pyroute2/config/version.py build/ dist/ MANIFEST docs/html docs/man docs/doctrees docs/_templates/private.layout.html docs-build.log lab/_build lab/*html lab/_static/conf.js *.egg-info benchmark.log venv .venv .nox* pyroute2-0.7.11/.pre-commit-config.yaml000066400000000000000000000013031455030217500176510ustar00rootroot00000000000000files: '^(noxfile.py|pyroute2|pr2modules|util|examlpes/ndb|docs/conf.py|examples/pyroute2-cli|tests)' repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.4.0 hooks: - id: trailing-whitespace - id: end-of-file-fixer - repo: https://github.com/pycqa/isort rev: 5.12.0 hooks: - id: isort name: isort (python) args: ['-m', '3', '--tc', '-w', '79', '--profile', 'black'] - repo: https://github.com/psf/black rev: 23.1.0 hooks: - id: black args: ['-C', '-S', '-l', '79'] - repo: https://github.com/PyCQA/flake8 rev: 4.0.1 hooks: - id: flake8 files: \.py$ args: ['--config', '.flake8'] pyroute2-0.7.11/CHANGELOG.rst000066400000000000000000000731571455030217500154310ustar00rootroot00000000000000Changelog ========= * 0.7.11 * ethtool: ring support * ndb: fix FDB records index * ndb: fix sources objects counting * 0.7.10 * ss2: fix classful flow data * ci: add Python versions 3.8 and 3.12, update platform versions * l2tp: fix get_tunnel/get_session * ndb: compat fix * ndb: recordset pipes * netns: RISCV64 fix * netns: loongarch support * ethtool: statistics support * ethtool: dynamic ioctl gstrings * NetNS: set_netnsid fix * iproute: nsid allocation * iproute: dump mpls routes * rtnl: CAN support * 0.7.9 * minimal: fix for embedded envs * diag: support CGROUP_ID * iwutil: get/set interface (by ifindex) type * tc: 'duplicate' parameter fix * 0.7.8 * ss2: more fixes * 0.7.7 * ss2: user context patch * ndb: basic altname support * nl80211: decoder improvements * 0.7.6 * setup: static loader * iproute: support altname in link_lookup() * ethtool: fd leaks * 0.7.5 * nlsocket: fix marshal reference * 0.7.4 * rtnl: SRv6 updates * connector: basic implementation * nftables: sets draft * ss2: provide as a module * 0.7.3 * nlsocket: CompileContext support * nlsocket: support for per request parsers (see `IPRoute.get_default_routes()`) * generic: added support for dumping policies * ndb: changed API for reports * conntrack: fix TCP states * ipmock: new component (IPMock) for mock tests * thermal: update * ci: `nlm_generator = True` -- force using generators in the CI * 0.7.2 * iproute: fix neighbours dump * iproute: fix rule add * setup: fix build-backend * 0.7.1 * packaging: back to single package layout * packaging: static version * ci: use nox, finish migration from nose * netlink: universal nlmsg.get() * 0.6.13 * requests: IPv6 routes default priority * requests: fix wrong family on empty IP targets * ndb: fix NetlinkError escalation routine on apply * 0.6.12 * ndb: allow dict as keys in `__contains__` * ndb: fixed `count()` on nested views * ndb: fix IPv6 addr removal: * ifinfmsg: fixed IFLA_IPTUN_FLAGS: * tc: fix tcm_info setting: * 0.6.11 * ipdb: fix requests compatibility: * ipdb: added deprecation warning * 0.6.10 * ndb: fix DB reload on incomplete requests in the IPRoute API fallbacks: * ndb: basic ipvlan support * ndb: support `ndb.schema.backup("filename")` fro SQLite3 backend * ndb: transactions, CheckProcess, PingAddress, see `test_ndb/test_transaction.py` and docs * ndb: make snapshots only for `commit()` calls, no automatic snapshots for `apply()` * netlink: filter out all the subsequent messages starting from the first error: * protocols: fix arp module export * iproute: deprecate positional arguments to `IPRoute.rule()` * iproute, ndb: use `pyroute2.requests` to filter and transform API call arguments * conntrack: fix exports: * nftables: multiple fixes: * tc: fix em_ipset crash: * tests: integrated pre-commit, github actions and more * tests: support basic OpenBSD tests * pyroute2-cli: parser fixes * 0.6.9 * wireguard peer fix for both IPv4 and IPv6: * netns: avoid extra libc reloads: * 0.6.8 * wireguard: IPv6 fixes: * ndb: support arbitrary IPRoute-compatible source classes * ndb: tolerate source failures * ndb: chaotic tests * general: kernel version parser: * package: static version file: * iproute: support custom link kind classes: * 0.6.7 * ndb: reload of tunnels in the down state * ndb: improved reliability in the state sync, affected multihop and encap routes * ndb: improved neighbours management * 0.6.6 * tuntap: support loongarch * ndb: fix routes cleanup * ndb: support `in` for sources, interfaces and addresses * ndb: support changing tunnel (gre, ipip, ...) attributes * netlink: support NLM_F_DUMP_INTR * 0.6.5 * ndb: fix `in` for sources: * core: provide entry_points aliases for legacy code: * packaging: fix missing exports: * 0.6.4 * ndb: memory leak fix: * packaging: fix internal modules import * netlink: DEFAULT_RCVBUF size increased: * rtnl: fix COLLECT_METADATA for geneve devices: * 0.6.3 * ndb: route spec fixes: * packaging: force virtual packages to require same versions * 0.6.2 * core: relocate RemoteIPRoute: * wireguard: fix key parsing: * packaging: fix importlib-metadata deps: * tc: support pfifo qdisc: * 0.6.1 * packaging: new layout: * 0.5.19 * ndb: fix default routes keys: * mptcp: basic support: * netlink: ext_ack support fix: * tc: netem rate setting: * NSPopen: fix fd leak: * 0.5.18 * netlink: support zero length lladdr: * 0.5.17 * license: aligned cli/ss2 * ndb: `del_ip()` improvements * ndb: `wait(timeout=...)` * 0.5.16 * ndb: fix syntax for Python < 3.6 * 0.5.15 * ndb: don't mess with SQL adapters unless really needed * ndb: support more virtual interface types * ndb: support `in` for views and record sets * ndb: fix nested fetches from DB * tests: start migration to pytest * 0.5.14 * iproute: ip neigh get * iproute: link_lookup fix * nftables: missing hooks * netns: fix * 0.5.13 * netns: allow to pass custom libc reference * generic: att L2TP support * iproute: link_lookup() optimization * ndb: basic cluster support * 0.5.12 * rtnl: implement team config pass * ndb.auth: example auth modules for RADIUS and OpenStack / Keystone * cli: syntax fixes * 0.5.11 * ndb.report: filters and transformations * ndb.objects.route: support basic MPLS routes management * ndb.objects.route: support MPLS lwtunnel routes * ndb.schema: reschedule events * 0.5.10 * general: don't use pkg_resources * iproute: fix Windows support * netlink: provide the target field * ndb: use the target field from the netlink header * ndb: multiple SQL fixes, transactions fixed with the PostgreSQL backend * ndb: multiple object cache fixes * ndb.schema: drop DB triggers * ndb.objects: fix object management within a netns * ndb.objects.route: support route metrics * ndb.objects.route: fix default route syntax * 0.5.9 * ethtool: fix module setup * 0.5.8 * ethtool: initial support * tc: multimatch support * tc: meta support * tc: cake: add stats_app decoder * conntrack: filter * ndb.objects.interface: reload after setns * ndb.objects.route: create() dst syntax * ndb.objects.route: 'default' syntax * wireguard: basic testing * 0.5.7 * ndb.objects.netns: prototype * ndb: netns management * ndb: netns sources autoconnect (disabled by default) * wireguard: basic support * netns: fix FD leakage * * cli: Python3 fixes * iproute: support `route('append', ...)` * ipdb: fix routes cleanup on link down * * wiset: support "mark" ipset type * 0.5.6 * ndb.objects.route: multipath routes * ndb.objects.rule: basic support * ndb.objects.interface: veth fixed * ndb.source: fix source restart * ndb.log: logging setup * 0.5.5 * nftables: rules expressions * * netns: ns_pids * * ndb: wait() method * ndb: add extra logging, log state transitions * ndb: nested views, e.g. `ndb.interfaces['br0'].ports` * cli: port pyroute2-cli to use NDB instead of IPDB * iproute: basic Windows support (proof of concept only) * remote: support mitogen proxy chains, support remote netns * 0.5.4 * iproute: basic SR-IOV support, virtual functions setup * ipdb: shutdown logging fixed * * nftables: fix regression (errata: previously mentioned ipset) * * netns: pushns() / popns() / dropns() calls * * 0.5.3 * bsd: parser improvements * ndb: PostgreSQL support * ndb: transactions commit/rollback * ndb: dependencies rollback * ipdb: IPv6 routes fix * * tcmsg: ematch support * tcmsg: flow filter * tcmsg: stats2 support improvements * ifinfmsg: GRE i/oflags, i/okey format fixed * * cli/ss2: improvements, tests * nlsocket: fix work on kernels < 3.2 * * 0.5.2 * ndb: read-only DB prototype * remote: support communication via stdio * general: fix async keyword -- Python 3.7 compatibility * * * iproute: support monitoring on BSD systems via PF_ROUTE * rtnl: support for SQL schema in message classes * nl80211: improvements * * * * netlink: support generators * 0.5.1 * ipdb: #310 -- route keying fix * ipdb: #483, #484 -- callback internals change * ipdb: #499 -- eventloop interface * ipdb: #500 -- fix non-default :: routes * netns: #448 -- API change: setns() doesn't remove FD * netns: #504 -- fix resource leakage * bsd: initial commits * 0.5.0 * ACHTUNG: ipdb commit logic is changed * ipdb: do not drop failed transactions * ipdb: #388 -- normalize IPv6 addresses * ipdb: #391 -- support both IPv4 and IPv6 default routes * ipdb: #392 -- fix MPLS route key reference * ipdb: #394 -- correctly work with route priorities * ipdb: #408 -- fix IPv6 routes in tables >= 256 * ipdb: #416 -- fix VRF interfaces creation * ipset: multiple improvements * tuntap: #469 -- support s390x arch * nlsocket: #443 -- fix socket methods resolve order for Python2 * netns: non-destructive `netns.create()` * 0.4.18 * ipdb: #379 [critical] -- routes in global commits * ipdb: #380 -- global commit with disabled plugins * ipdb: #381 -- exceptions fixed * ipdb: #382 -- manage dependent routes during interface commits * ipdb: #384 -- global `review()` * ipdb: #385 -- global `drop()` * netns: #383 -- support ppc64 * general: public API refactored (same signatures; to be documented) * 0.4.17 * req: #374 [critical] -- mode nla init * iproute: #378 [critical] -- fix `flush_routes()` to respect filters * ifinfmsg: #376 -- fix data plugins API to support pyinstaller * 0.4.16 * ipdb: race fixed: remove port/bridge * ipdb: #280 -- race fixed: port/bridge * ipdb: #302 -- ipaddr views: [ifname].ipaddr.ipv4, [ifname]ipaddr.ipv6 * ipdb: #357 -- allow bridge timings to have some delta * ipdb: #338 -- allow to fix interface objects from failed `create()` * rtnl: #336 -- fix vlan flags * iproute: #342 -- the match method takes any callable * nlsocket: #367 -- increase default SO_SNDBUF * ifinfmsg: support tuntap on armv6l, armv7l platforms * 0.4.15 * req: #365 -- full and short nla notation fixed, critical * iproute: #364 -- new method, `brport()` * ipdb: -- support bridge port options * 0.4.14 * event: new genl protocols set: VFS_DQUOT, acpi_event, thermal_event * ipdb: #310 -- fixed priority change on routes * ipdb: #349 -- fix setting ifalias on interfaces * ipdb: #353 -- mitigate kernel oops during bridge creation * ipdb: #354 -- allow to explicitly choose plugins to load * ipdb: #359 -- provide read-only context managers * rtnl: #336 -- vlan flags support * rtnl: #352 -- support interface type plugins * tc: #344 -- mirred action * tc: #346 -- connmark action * netlink: #358 -- memory optimization * config: #360 -- generic asyncio config * iproute: #362 -- allow to change or replace a qdisc * 0.4.13 * ipset: full rework of the IPSET_ATTR_DATA and IPSET_ATTR_ADT ACHTUNG: this commit may break API compatibility * ipset: hash:mac support * ipset: list:set support * ipdb: throw EEXIST when creates VLAN/VXLAN devs with same ID, but under different names * tests: #329 -- include unit tests into the bundle * legal: E/// logo removed * 0.4.12 * ipdb: #314 -- let users choose RTNL groups IPDB listens to * ipdb: #321 -- isolate `net_ns_.*` setup in a separate code block * ipdb: #322 -- IPv6 updates on interfaces in DOWN state * ifinfmsg: allow absolute/relative paths in the net_ns_fd NLA * ipset: #323 -- support setting counters on ipset add * ipset: `headers()` command * ipset: revisions * ipset: #326 -- mark types * 0.4.11 * rtnl: #284 -- support vlan_flags * ipdb: #288 -- do not inore link-local addresses * ipdb: #300 -- sort ip addresses * ipdb: #306 -- support net_ns_pid * ipdb: #307 -- fix IPv6 routes management * ipdb: #311 -- vlan interfaces address loading * iprsocket: #305 -- support NETLINK_LISTEN_ALL_NSID * 0.4.10 * devlink: fix fd leak on broken init * 0.4.9 * sock_diag: initial NETLINK_SOCK_DIAG support * rtnl: fix critical fd leak in the compat code * 0.4.8 * rtnl: compat proxying fix * 0.4.7 * rtnl: compat code is back * netns: custom netns path support * ipset: multiple improvements * 0.4.6 * ipdb: #278 -- fix initial ports mapping * ipset: #277 -- fix ADT attributes parsing * nl80211: #274, #275, #276 -- BSS-related fixes * 0.4.5 * ifinfmsg: GTP interfaces support * generic: devlink protocol support * generic: code cleanup * 0.4.4 * iproute: #262 -- `get_vlans()` fix * iproute: default mask 32 for IPv4 in `addr()` * rtmsg: #260 -- RTA_FLOW support * 0.4.3 * ipdb: #259 -- critical `Interface` class fix * benchmark: initial release * 0.4.2 * ipdb: event modules * ipdb: on-demand views * ipdb: rules management * ipdb: bridge controls * ipdb: #258 -- important Python compatibility fixes * netns: #257 -- pipe leak fix * netlink: support pickling for nlmsg * 0.4.1 * netlink: no buffer copying in the parser * netlink: parse NLA on demand * ipdb: #244 -- lwtunnel multipath fixes * iproute: #235 -- route types * docs updated * 0.4.0 * ACHTUNG: old kernels compatibility code is dropped * ACHTUNG: IPDB uses two separate sockets for monitoring and commands * ipdb: #244 -- multipath lwtunnel * ipdb: #242 -- AF_MPLS routes * ipdb: #241, #234 -- fix create(..., reuse=True) * ipdb: #239 -- route encap and metrics fixed * ipdb: #238 -- generic port management * ipdb: #235 -- support route scope and type * ipdb: #230, #232 -- routes GC (work in progress) * rtnl: #245 -- do not fail if `/proc/net/psched` doesn't exist * rtnl: #233 -- support VRF interfaces (requires net-next) * 0.3.21 * ipdb: #231 -- return `ipdb.common` as deprecated * 0.3.20 * iproute: `vlan_filter()` * iproute: #229 -- FDB management * general: exceptions re-exported via the root module * 0.3.19 * rtmsg: #227 -- MPLS lwtunnel basic support * iproute: `route()` docs updated * general: #228 -- exceptions layout changed * package-rh: rpm subpackages * 0.3.18 * version bump -- include docs in the release tarball * 0.3.17 * tcmsg: qdiscs and filters as plugins * tcmsg: #223 -- tc clsact and bpf direct-action * tcmsg: plug, codel, choke, drr qdiscs * tests: CI in VMs (see civm project) * tests: xunit output * ifinfmsg: tuntap support in i386, i686 * ifinfmsg: #207 -- support vlan filters * examples: #226 -- included in the release tarball * ipdb: partial commits, initial support * 0.3.16 * ipdb: fix the multiple IPs in one commit case * rtnl: support veth peer attributes * netns: support 32bit i686 * netns: fix MIPS support * netns: fix tun/tap creation * netns: fix interface move between namespaces * tcmsg: support hfsc, fq_codel, codel qdiscs * nftables: initial support * netlink: dump/load messages to/from simple types * 0.3.15 * netns: #194 -- fix fd leak * iproute: #184 -- fix routes dump * rtnl: TCA_ACT_BPF support * rtnl: ipvlan support * rtnl: OVS support removed * iproute: rule() improved to support all NLAs * project supported by Ericsson * 0.3.14 * package-rh: spec fixed * package-rh: both licenses added * remote: fixed the setup.py record * 0.3.13 * package-rh: new rpm for Fedora and CentOS * remote: new draft of the remote protocol * netns: refactored using the new remote protocol * ipdb: gretap support * 0.3.12 * ipdb: new `Interface.wait_ip()` routine * ipdb: #175 -- fix `master` attribute cleanup * ipdb: #171 -- support multipath routes * ipdb: memory consumption improvements * rtmsg: MPLS support * rtmsg: RTA_VIA support * iwutil: #174 -- fix FREQ_FIXED flag * 0.3.11 * ipdb: #161 -- fix memory allocations * nlsocket: #161 -- remove monitor mode * 0.3.10 * rtnl: added BPF filters * rtnl: LWtunnel support in ifinfmsg * ipdb: support address attributes * ipdb: global transactions, initial version * ipdb: routes refactored to use key index (speed up) * config: eventlet support embedded (thanks to Angus Lees) * iproute: replace tc classes * iproute: flush_addr(), flush_rules() * iproute: rule() refactored * netns: proxy file objects (stdin, stdout, stderr) * 0.3.9 * root imports: #109, #135 -- `issubclass`, `isinstance` * iwutil: multiple improvements * iwutil: initial tests * proxy: correctly forward NetlinkError * iproute: neighbour tables support * iproute: #147, filters on dump calls * config: initial usage of `capabilities` * 0.3.8 * docs: inheritance diagrams * nlsocket: #126, #132 -- resource deallocation * arch: #128, #131 -- MIPS support * setup.py: #133 -- syntax error during install on Python2 * 0.3.7 * ipdb: new routing syntax * ipdb: sync interface movement between namespaces * ipdb: #125 -- fix route metrics * netns: new class NSPopen * netns: #119 -- i386 syscall * netns: #122 -- return correct errno * netlink: #126 -- fix socket reuse * 0.3.6 * dhcp: initial release DHCPv4 * license: dual GPLv2+ and Apache v2.0 * ovs: port add/delete * macvlan, macvtap: basic support * vxlan: basic support * ipset: basic support * 0.3.5 * netns: #90 -- netns setns support * generic: #99 -- support custom basic netlink socket classes * proxy-ng: #106 -- provide more diagnostics * nl80211: initial nl80211 support, iwutil module added * 0.3.4 * ipdb: #92 -- route metrics support * ipdb: #85 -- broadcast address specification * ipdb, rtnl: #84 -- veth support * ipdb, rtnl: tuntap support * netns: #84 -- network namespaces support, NetNS class * rtnl: proxy-ng API * pypi: #91 -- embed docs into the tarball * 0.3.3 * ipdb: restart on error * generic: handle non-existing family case * [fix]: #80 -- Python 2.6 unicode vs -O bug workaround * 0.3.2 * simple socket architecture * all the protocols now are based on NetlinkSocket, see examples * rpc: deprecated * iocore: deprecated * iproute: single-threaded socket object * ipdb: restart on errors * rtnl: updated ifinfmsg policies * 0.3.1 * module structure refactored * new protocol: ipq * new protocol: nfnetlink / nf-queue * new protocol: generic * threadless sockets for all the protocols * 0.2.16 * prepare the transition to 0.3.x * 0.2.15 * ipdb: fr #63 -- interface settings freeze * ipdb: fr #50, #51 -- bridge & bond options (initial version) * RHEL7 support * [fix]: #52 -- HTB: correct rtab compilation * [fix]: #53 -- RHEL6.5 bridge races * [fix]: #55 -- IPv6 on bridges * [fix]: #58 -- vlans as bridge ports * [fix]: #59 -- threads sync in iocore * 0.2.14 * [fix]: #44 -- incorrect netlink exceptions proxying * [fix]: #45 -- multiple issues with device targets * [fix]: #46 -- consistent exceptions * ipdb: LinkedSet cascade updates fixed * ipdb: allow to reuse existing interface in `create()` * 0.2.13 * [fix]: #43 -- pipe leak in the main I/O loop * tests: integrate examples, import into tests * iocore: use own TimeoutException instead of Queue.Empty * iproute: default routing table = 254 * iproute: flush_routes() routine * iproute: fwmark parameter for rule() routine * iproute: destination and mask for rules * docs: netlink development guide * 0.2.12 * [fix]: #33 -- release resources only for bound sockets * [fix]: #37 -- fix commit targets * rtnl: HFSC support * rtnl: priomap fixed * 0.2.11 * ipdb: watchdogs to sync on RTNL events * ipdb: fix commit errors * generic: NLA operations, complement and intersection * docs: more autodocs in the code * tests: -W error: more strict testing now * tests: cover examples by the integration testing cycle * with -W error many resource leaks were fixed * 0.2.10 * ipdb: command chaining * ipdb: fix for RHEL6.5 Python "optimizations" * rtnl: support TCA_U32_ACT * [fix]: #32 -- NLA comparison * 0.2.9 * ipdb: support bridges and bonding interfaces on RHEL * ipdb: "shadow" interfaces (still in alpha state) * ipdb: minor fixes on routing and compat issues * ipdb: as a separate package (sub-module) * docs: include ipdb autodocs * rpc: include in setup.py * 0.2.8 * netlink: allow multiple NetlinkSocket allocation from one process * netlink: fix defragmentation for netlink-over-tcp * iocore: support forked IOCore and IOBroker as a separate process * ipdb: generic callbacks support * ipdb: routing support * rtnl: #30 -- support IFLA_INFO_DATA for bond interfaces * 0.2.7 * ipdb: use separate namespaces for utility functions and other stuff * ipdb: generic callbacks (see also IPDB.wait_interface()) * iocore: initial multipath support * iocore: use of 16byte uuid4 for packet ids * 0.2.6 * rpc: initial version, REQ/REP, PUSH/PULL * iocore: shared IOLoop * iocore: AddrPool usage * iproute: policing in FW filter * python3 compatibility issues fixed * 0.2.4 * python3 compatibility issues fixed, tests passed * 0.2.3 * [fix]: #28 -- bundle issue * 0.2.2 * iocore: new component * iocore: separate IOCore and IOBroker * iocore: change from peer-to-peer to flat addresses * iocore: REP/REQ, PUSH/PULL * iocore: support for UDP PUSH/PULL * iocore: AddrPool component for addresses and nonces * generic: allow multiple re-encoding * 0.1.12 * ipdb: transaction commit callbacks * iproute: delete root qdisc (@chantra) * iproute: netem qdisc management (@chantra) * 0.1.11 * netlink: get qdiscs for particular interface * netlink: IPRSocket threadless objects * rtnl: u32 policy setup * iproute: filter actions, such as `ok`, `drop` and so on * iproute: changed syntax of commands, `action` → `command` * tests: htb, tbf tests added * 0.1.10 * [fix]: #8 -- default route fix, routes filtering * [fix]: #9 -- add/delete route routine improved * [fix]: #10 -- shutdown sequence fixed * [fix]: #11 -- close IPC pipes on release() * [fix]: #12 -- stop service threads on release() * netlink: debug mode added to be used with GUI * ipdb: interface removal * ipdb: fail on transaction sync timeout * tests: R/O mode added, use `export PYROUTE2_TESTS_RO=True` * 0.1.9 * tests: all races fixed * ipdb: half-sync commit(): wait for IPs and ports lists update * netlink: use pipes for in-process communication * Python 2.6 compatibility issue: remove copy.deepcopy() usage * QPython 2.7 for Android: works * 0.1.8 * complete refactoring of class names * Python 2.6 compatibility issues * tests: code coverage, multiple code fixes * plugins: ptrace message source * packaging: RH package * 0.1.7 * ipdb: interface creation: dummy, bond, bridge, vlan * ipdb: if\_slaves interface obsoleted * ipdb: 'direct' mode * iproute: code refactored * examples: create() examples committed * 0.1.6 * netlink: tc ingress, sfq, tbf, htb, u32 partial support * ipdb: completely re-implemented transactional model (see docs) * generic: internal fields declaration API changed for nlmsg * tests: first unit tests committed * 0.1.5 * netlink: dedicated io buffering thread * netlink: messages reassembling * netlink: multi-uplink remote * netlink: masquerade remote requests * ipdb: represent interfaces hierarchy * iproute: decode VLAN info * 0.1.4 * netlink: remote netlink access * netlink: SSL/TLS server/client auth support * netlink: tcp and unix transports * docs: started sphinx docs * 0.1.3 * ipdb: context manager interface * ipdb: [fix] correctly handle ip addr changes in transaction * ipdb: [fix] make up()/down() methods transactional [#1] * iproute: mirror packets to 0 queue * iproute: [fix] handle primary ip address removal response * 0.1.2 * initial ipdb version * iproute fixes * 0.1.1 * initial release, iproute module pyroute2-0.7.11/LICENSE000066400000000000000000000000371455030217500144000ustar00rootroot00000000000000GPL-2.0-or-later OR Apache-2.0 pyroute2-0.7.11/LICENSE.Apache-2.0000066400000000000000000000261241455030217500160620ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright 2016 Peter V. Saveliev Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. pyroute2-0.7.11/LICENSE.GPL-2.0-or-later000066400000000000000000000432541455030217500170510ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Lesser General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. pyroute2-0.7.11/MANIFEST.in000066400000000000000000000003551455030217500151340ustar00rootroot00000000000000include VERSION include README.rst include README.contribute.rst include README.minimal.rst include README.report.rst include README.license.rst include LICENSE* include CHANGELOG.rst graft docs/html graft tests/test_unit graft examples pyroute2-0.7.11/Makefile000066400000000000000000000040241455030217500150330ustar00rootroot00000000000000## # # The pyroute2 project is dual licensed, see README.license.md for details # # python ?= $(shell util/find_python.sh) define nox nox $(1) -- '$(subst ",\",${noxconfig})' endef .PHONY: all all: @echo targets: @echo @echo \* clean -- clean all generated files @echo \* docs -- generate project docs @echo \* dist -- create the package file @echo \* test -- run all the tests @echo \* install -- install lib into the system or the current virtualenv @echo \* uninstall -- uninstall lib @echo .PHONY: git-clean git-clean: git clean -d -f -x git remote prune origin git branch --merged >/tmp/merged-branches && \ vi /tmp/merged-branches && xargs git branch -d /dev/null ||: .PHONY: VERSION VERSION: @${python} util/update_version.py .PHONY: docs docs: $(call nox,-e docs) .PHONY: lab lab: $(call nox,-e lab) .PHONY: format format: $(call nox,-e linter) .PHONY: test test: $(call nox,) .PHONY: test-platform test-platform: $(call nox,-e test_platform) .PHONY: upload upload: dist $(call nox,-e upload) .PHONY: setup setup: $(MAKE) VERSION .PHONY: dist dist: setup $(call nox,-e build) .PHONY: dist-minimal dist-minimal: setup $(call nox,-e build_minimal) .PHONY: install install: setup $(MAKE) uninstall $(MAKE) clean $(call nox,-e build) ${python} -m pip install dist/pyroute2-*whl ${root} .PHONY: install-minimal install-minimal: dist-minimal ${python} -m pip install dist/pyroute2.minimal*whl ${root} .PHONY: uninstall uninstall: ${python} -m pip uninstall -y pyroute2 ${python} -m pip uninstall -y pyroute2-minimal .PHONY: audit-imports audit-imports: findimports -n pyroute2 2>/dev/null | awk -f util/imports_dict.awk .PHONY: nox nox: $(call nox,-e ${session}) pyroute2-0.7.11/README.contribute.rst000066400000000000000000000042261455030217500172430ustar00rootroot00000000000000.. devcontribute: Project contribution guide ========================== Step 1: setup the environment ----------------------------- Linux +++++ .. code-block:: sh # make sure you have installed: # bash # git # python # GNU make, sed, awk # # then clone the repo git clone ${pyroute2_git_url} cd pyroute2 # create and activate virtualenv python -m venv venv . venv/bin/activate # update pip and install nox pip install --upgrade pip pip install nox # run the test cycle nox OpenBSD +++++++ .. code-block:: sh # install required tools pkg_add bash git gmake gsed python # clone the repo git clone ${pyroute_git_url} cd pyroute2 # create and activate virtualenv python3.10 -m venv venv . venv/bin/activate # update pip and install nox pip install --upgrade pip pip install nox # run the platform specific environment nox -e openbsd Step 2: make a change --------------------- The project is designed to work on the bare standard library. But some embedded environments strip even the stdlib, removing modules like sqlite3. So to run pyroute2 even in such environments, the project provdes two packages, `pyroute2` and `pyroute2.minimal`, with the latter providing a minimal distribution, but using no sqlite3 or pickle. Modules `pyroute2` and `pyroute2.minimal` are mutually exclusive. Each module provides it's own pypi package. More details: https://github.com/svinota/pyroute2/discussions/786 Step 3: test the change ----------------------- Assume the environment is already set up on the step 1. Thus: .. code-block:: sh # run code checks nox -e linter # run unit tests nox -e unit # run functional test, some require root nox -e linux-3.10 Step 4: submit a PR ------------------- The primary repo for the project is on Github. All the PRs are more than welcome there. Requirements to a PR ++++++++++++++++++++ The code must comply some requirements: * the library must work on Python >= 3.6. * the code must pass `nox -e linter` * the code must not break existing unit and functional tests * the `ctypes` usage must not break the library on SELinux pyroute2-0.7.11/README.license.rst000066400000000000000000000005241455030217500165040ustar00rootroot00000000000000Pyroute2 package is dual licensed since 0.3.6, emerging two licenses: * GPL-2.0-or-later * Apache-2.0 It means, that being writing some derived code, or including the library into distribution, you are free to choose the license from the list above. Apache v2 license was included to make the code compatible with the OpenStack project. pyroute2-0.7.11/README.minimal.rst000066400000000000000000000006151455030217500165110ustar00rootroot00000000000000pyroute2.minimal ================ PyRoute2 is a pure Python **netlink** library. This module provides minimal subset of pyroute2 modules. Only netlink parser, basic netns management and some netlink protocols implementations. links ===== * Home: * PyPI: * Usage: pyroute2-0.7.11/README.report.rst000066400000000000000000000013651455030217500164010ustar00rootroot00000000000000Report a bug ============ In the case you have issues, please report them to the project bug tracker: https://github.com/svinota/pyroute2/issues It is important to provide all the required information with your report: * Linux kernel version * Python version * Specific environment, if used -- gevent, eventlet etc. The project provides a script to print the system summary: .. code-block:: sh pyroute2-test-platform Please keep in mind, that this command will try to create and delete different interface types, and this requires root access. It is possible also to run the test in your code: .. code-block:: python from pprint import pprint from pyroute2.config.test_platform import TestCapsRtnl pprint(TestCapsRtnl().collect()) pyroute2-0.7.11/README.rst000066400000000000000000000153061455030217500150670ustar00rootroot00000000000000Pyroute2 ======== Pyroute2 is a pure Python **netlink** library. The core requires only Python stdlib, no 3rd party libraries. The library was started as an RTNL protocol implementation, so the name is **pyroute2**, but now it supports many netlink protocols. Some supported netlink families and protocols: * **rtnl**, network settings --- addresses, routes, traffic controls * **nfnetlink** --- netfilter API * **ipq** --- simplest userspace packet filtering, iptables QUEUE target * **devlink** --- manage and monitor devlink-enabled hardware * **generic** --- generic netlink families * **uevent** --- same uevent messages as in udev Netfilter API: * **ipset** --- IP sets * **nftables** --- packet filtering * **nfct** --- connection tracking Generic netlink: * **ethtool** --- low-level network interface setup * **wireguard** --- VPN setup * **nl80211** --- wireless functions API (basic support) * **taskstats** --- extended process statistics * **acpi_events** --- ACPI events monitoring * **thermal_events** --- thermal events monitoring * **VFS_DQUOT** --- disk quota events monitoring On the low level the library provides socket objects with an extended API. The additional functionality aims to: * Help to open/bind netlink sockets * Discover generic netlink protocols and multicast groups * Construct, encode and decode netlink and PF_ROUTE messages Supported systems ----------------- Pyroute2 runs natively on Linux and emulates some limited subset of RTNL netlink API on BSD systems on top of PF_ROUTE notifications and standard system tools. Other platforms are not supported. NDB -- high level RTNL API -------------------------- Key features: * Data integrity * Transactions with commit/rollback changes * State synchronization * Multiple sources, including netns and remote systems A "Hello world" example: .. code-block:: python from pyroute2 import NDB with NDB() as ndb: with ndb.interfaces['eth0'] as eth0: # set one parameter eth0.set(state='down') eth0.commit() # make sure that the interface is down # or multiple parameters at once eth0.set(ifname='hello_world!', state='up') eth0.commit() # rename, bring up and wait for success # --> <-- here you can be sure that the interface is up & renamed More examples: .. code-block:: python from pyroute2 import NDB ndb = NDB(log='debug') for record in ndb.interfaces.summary(): print(record.ifname, record.address, record.state) if_dump = ndb.interfaces.dump() if_dump.select_records(state='up') if_dump.select_fields('index', 'ifname', 'kind') for line in if_dump.format('json'): print(line) addr_summary = ndb.addresses.summary() addr_summary.select_records(ifname='eth0') for line in addr_summary.format('csv'): print(line) with ndb.interfaces.create(ifname='br0', kind='bridge') as br0: br0.add_port('eth0') br0.add_port('eth1') br0.add_ip('10.0.0.1/24') br0.add_ip('192.168.0.1/24') br0.set( br_stp_state=1, # set STP on br_group_fwd_mask=0x4000, # set LLDP forwarding state='up', # bring the interface up ) # --> <-- commit() will be run by the context manager # operate on netns: ndb.sources.add(netns='testns') # connect to a namespace with ( ndb.interfaces.create( ifname='veth0', # create veth kind='veth', peer={ 'ifname': 'eth0', # setup peer 'net_ns_fd': 'testns', # in a namespace }, state='up', ) ) as veth0: veth0.add_ip(address='172.16.230.1', prefixlen=24) with ndb.interfaces.wait( target='testns', ifname='eth0' ) as peer: # wait for the peer peer.set(state='up') # bring it up peer.add_ip('172.16.230.2/24') # add address IPRoute -- Low level RTNL API ----------------------------- Low-level **IPRoute** utility --- Linux network configuration. The **IPRoute** class is a 1-to-1 RTNL mapping. There are no implicit interface lookups and so on. Get notifications about network settings changes with IPRoute: .. code-block:: python from pyroute2 import IPRoute with IPRoute() as ipr: # With IPRoute objects you have to call bind() manually ipr.bind() for message in ipr.get(): print(message) More examples: .. code-block:: python from socket import AF_INET from pyroute2 import IPRoute # get access to the netlink socket ipr = IPRoute() # no monitoring here -- thus no bind() # print interfaces for link in ipr.get_links(): print(link) # create VETH pair and move v0p1 to netns 'test' ipr.link('add', ifname='v0p0', peer='v0p1', kind='veth') # wait for the devices: peer, veth = ipr.poll( ipr.link, 'dump', timeout=5, ifname=lambda x: x in ('v0p0', 'v0p1') ) ipr.link('set', index=peer['index'], net_ns_fd='test') # bring v0p0 up and add an address ipr.link('set', index=veth['index'], state='up') ipr.addr('add', index=veth['index'], address='10.0.0.1', prefixlen=24) # release Netlink socket ip.close() Network namespace examples -------------------------- Network namespace manipulation: .. code-block:: python from pyroute2 import netns # create netns netns.create('test') # list print(netns.listnetns()) # remove netns netns.remove('test') Create **veth** interfaces pair and move to **netns**: .. code-block:: python from pyroute2 import IPRoute with IPRoute() as ipr: # create interface pair ipr.link('add', ifname='v0p0', kind='veth', peer='v0p1') # wait for the peer (peer,) = ipr.poll(ipr.link, 'dump', timeout=5, ifname='v0p1') # move the peer to the 'test' netns: ipr.link('set', index=peer['index'], net_ns_fd='test') List interfaces in some **netns**: .. code-block:: python from pyroute2 import NetNS from pprint import pprint ns = NetNS('test') pprint(ns.get_links()) ns.close() More details and samples see in the documentation. Installation ------------ Using pypi: .. code-block:: bash pip install pyroute2 Using git: .. code-block:: bash pip install git+https://github.com/svinota/pyroute2.git Using source, requires make and nox .. code-block:: bash git clone https://github.com/svinota/pyroute2.git cd pyroute2 make install Requirements ------------ Python >= 3.6 Links ----- * home: https://pyroute2.org/ * source: https://github.com/svinota/pyroute2 * bugs: https://github.com/svinota/pyroute2/issues * pypi: https://pypi.python.org/pypi/pyroute2 * docs: http://docs.pyroute2.org/ pyroute2-0.7.11/VERSION000066400000000000000000000000071455030217500144400ustar00rootroot000000000000000.7.11 pyroute2-0.7.11/docs/000077500000000000000000000000001455030217500143235ustar00rootroot00000000000000pyroute2-0.7.11/docs/Makefile000066400000000000000000000127331455030217500157710ustar00rootroot00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = . # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: -rm -rf $(BUILDDIR)/html -rm -rf $(BUILDDIR)/man html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/pyroute2.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/pyroute2.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/pyroute2" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/pyroute2" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." pyroute2-0.7.11/docs/README.md000066400000000000000000000003601455030217500156010ustar00rootroot00000000000000Documentation ------------- That's the project documentation source. In order to build html docs, one should install sphinx and run `make docs` in the project's root directory. Actual built docs are available at http://docs.pyroute2.org/ pyroute2-0.7.11/docs/_static/000077500000000000000000000000001455030217500157515ustar00rootroot00000000000000pyroute2-0.7.11/docs/_static/.empty000066400000000000000000000000001455030217500170760ustar00rootroot00000000000000pyroute2-0.7.11/docs/_static/classic.css000066400000000000000000000000001455030217500200720ustar00rootroot00000000000000pyroute2-0.7.11/docs/_static/custom.css000066400000000000000000000113561455030217500200030ustar00rootroot00000000000000/* * * Sphinx stylesheet based on the default theme. * * :copyright: Copyright 2012 by Peter V. Saveliev * :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ @import url("basic.css"); /* -- page layout ----------------------------------------------------------- */ body { font-family: sans-serif; font-size: 100%; color: #000; margin: 0; padding: 0; } div.document { } div.documentwrapper { float: left; width: 100%; } div.bodywrapper { } div.body { background-color: #ffffff; color: #000000; padding: 0 20px 30px 20px; max-width: 45em; min-width: 10em; padding: 2em; } div.footer { color: #ffffff; width: 100%; padding: 9px 0 9px 0; text-align: center; font-size: 75%; } div.footer a { color: #ffffff; text-decoration: underline; } div.related { background-color: #fafafa; line-height: 30px; border-top: 1px solid #c0c0c0; border-bottom: 1px solid #c0c0c0; } div.sphinxsidebar { display: none; } div.sphinxsidebar h3 { color: #355f7c; font-size: 1.2em; font-weight: normal; margin: 0; padding: 0; } div.sphinxsidebar h4 { color: #355f7c; font-size: 1.2em; font-weight: normal; margin: 5px 0 0 0; padding: 0; } div.sphinxsidebar p { color: #ffffff; } div.sphinxsidebar p.topless { margin: 5px 10px 10px 10px; } div.sphinxsidebar ul { margin: 10px; padding: 0; color: #ffffff; } div.sphinxsidebar a { } div.sphinxsidebar input { border: 1px solid #98dbcc; font-family: sans-serif; font-size: 1em; } /* -- hyperlink styles ------------------------------------------------------ */ a { color: #355f7c; text-decoration: none; } a:visited { color: #355f7c; text-decoration: none; } a:hover { text-decoration: underline; } /* -- body styles ----------------------------------------------------------- */ div.body h1, div.body h2, div.body h3, div.body h4, div.body h5, div.body h6 { font-weight: normal; color: #20435c; border-bottom: 1px solid #ccc; margin: 20px -20px 10px -20px; padding: 3px 0 3px 10px; } div.body h1 { margin-top: 0; font-size: 200%; } div.body h2 { font-size: 160%; } div.body h3 { font-size: 140%; } div.body h4 { font-size: 120%; } div.body h5 { font-size: 110%; } div.body h6 { font-size: 100%; } a.headerlink { color: #c60f0f; font-size: 0.8em; padding: 0 4px 0 4px; text-decoration: none; } a.headerlink:hover { background-color: #c60f0f; color: white; } img.align-right { position: absolute; right: 50px; top: 50px; } div.body p, div.body dd, div.body li { text-align: justify; line-height: 130%; } div.admonition p.admonition-title + p { display: inline; } div.admonition p { margin-bottom: 5px; } div.admonition pre { margin-bottom: 5px; } div.admonition ul, div.admonition ol { margin-bottom: 5px; } p.first { margin: 2px; } dl.class, dl.exception { border-bottom: 1px dashed #ccc; } div.note { background-color: #eee; border: 1px solid #ccc; } div.seealso { background-color: #ffc; border: 1px solid #ff6; } div.topic { background-color: #eee; } div.warning { background-color: #ffe4e4; border: 1px solid #f66; border-radius: 10px; } p.admonition-title { display: inline; } p.admonition-title:after { content: ":"; } cite { background-color: #ececec; color: #333333; font-family: monospace; font-style: normal; } pre { padding: 5px; color: #333333; line-height: 120%; border: 1px solid #ac9; border-left: none; border-right: none; border-radius: 10px; } div.highlight-none > div.highlight > pre { border: none; margin-top: -1.5em; padding-top: 1.5em; } tt { background-color: #ecf0f3; padding: 0 1px 0 1px; font-size: 0.95em; } th { background-color: #ede; } .warning tt { background: #efc2c2; } .note tt { background: #d6d6d6; } .viewcode-back { font-family: sans-serif; } div.viewcode-block:target { background-color: #f4debf; border-top: 1px solid #ac9; border-bottom: 1px solid #ac9; } .highlight { background: #f9f9f9; } dl.method > dt { margin-bottom: 1em; padding: 1em; } dl.class > dt { margin-bottom: 1em; padding: 1em; } div.aafig-caption { width: 100%; } div.aafig-caption p { text-align: center; color: #a0a0a0; } div.hidden { display: none; } div.source-switch { cursor: pointer; border-radius: 10px; background-color: #f9f9f9; } span.source-title { font-family: monospace; font-weight: bold; font-size: 120%; } span.source-hint { font-weight: normal; font-size: 100%; color: #909090; } pyroute2-0.7.11/docs/_static/fixup.js000066400000000000000000000040211455030217500174370ustar00rootroot00000000000000window.addEventListener("load", function() { Array.from( document.getElementsByTagName("img") ).map( function(img) { img.removeAttribute("width"); img.removeAttribute("height"); } ); if (!document.getElementById("fold-sources")) return; Array.from( document.getElementsByClassName("highlight-python notranslate") ).map( function(node) { let div_id = Math.round(Math.random() * 10000); let parent_node = node.parentElement; let function_node = node.firstChild.firstChild.children[2]; if (function_node.className != 'nf') return; let function_name = function_node.textContent; div_clickable = document.createElement("div"); div_switchable = document.createElement("div"); source_header = document.createElement("div"); source_title = document.createElement("span"); source_hint = document.createElement("span"); source_title.className = "source-title"; source_title.textContent = function_name + "()"; source_hint.className = "source-hint"; source_hint.textContent = ": (click to toggle the source)"; source_header.appendChild(source_title); source_header.appendChild(source_hint); div_clickable.appendChild(source_header); div_clickable.appendChild(div_switchable); div_clickable.className = "source-switch"; div_clickable.setAttribute("onclick", "source_toggle(" + div_id + ")"); parent_node.replaceChild(div_clickable, node); div_switchable.setAttribute("class", "hidden"); div_switchable.setAttribute("id", div_id); div_switchable.appendChild(node); } ); }); function source_toggle(div_id) { node = document.getElementById(div_id); if (node.className == "hidden") { node.className = "source-view"; } else { node.className = "hidden"; }; }; pyroute2-0.7.11/docs/_templates/000077500000000000000000000000001455030217500164605ustar00rootroot00000000000000pyroute2-0.7.11/docs/_templates/.empty000066400000000000000000000000001455030217500176050ustar00rootroot00000000000000pyroute2-0.7.11/docs/_templates/layout.html000066400000000000000000000022021455030217500206570ustar00rootroot00000000000000{% extends '!layout.html' %} {%- macro c_relbar() %} {%- endmacro %} {% block relbar1 %} {{ c_relbar() }} {% endblock %} pyroute2-0.7.11/docs/aafigure.map000066400000000000000000000014221455030217500166040ustar00rootroot00000000000000source|../ndb_sources.html SQL database|../ndb_schema.html SQLite|../ndb_schema.html PostgreSQL|../ndb_schema.html NDB object:|../ndb_objects.html interface|../ndb_objects.html address|../ndb_objects.html route|../ndb_objects.html netlink events|../netlink.html inotify events|../netlink.html NDB() instance|../ndb.html View()|../ndb_objects.html .dump()|../ndb_objects.html .summary()|../ndb_objects.html RecordSet()|../ndb_reports.html filter()|../ndb_reports.html select()|../ndb_reports.html transform()|../ndb_reports.html join()|../ndb_reports.html Record()|../ndb_reports.html .create()|../ndb_objects.html .__getitem__()|../ndb_objects.html Interface()|../ndb_interfaces.html Address()|../ndb_addresses.html Route()|../ndb_routes.html Neighbour()|../ndb.html Rule()|../ndb.html pyroute2-0.7.11/docs/arch.rst000066400000000000000000000215701455030217500157770ustar00rootroot00000000000000.. sockets: Module architecture ^^^^^^^^^^^^^^^^^^^ Sockets ======= The idea behind the pyroute2 framework is pretty simple. The library provides socket objects, that have: * shortcuts to establish netlink connections * extra methods to run netlink queries * some magic to handle packet bursts * another magic to transparently mangle netlink messages In other sense any netlink socket is just an ordinary socket with `fileno()`, `recv()`, `sendto()` etc. Of course, one can use it in `poll()`. There is an inheritance diagram of Linux netlink sockets, provided by the library: .. inheritance-diagram:: pyroute2.iproute.linux.IPRoute pyroute2.iproute.linux.IPBatch pyroute2.iproute.linux.RawIPRoute pyroute2.iwutil.IW pyroute2.ipset.IPSet pyroute2.netlink.uevent.UeventSocket pyroute2.netlink.taskstats.TaskStats pyroute2.netlink.generic.wireguard.WireGuard pyroute2.netlink.generic.ethtool.NlEthtool pyroute2.netlink.ipq.IPQSocket pyroute2.netlink.nfnetlink.nfctsocket.NFCTSocket pyroute2.netlink.nfnetlink.nftsocket.NFTSocket pyroute2.netlink.event.EventSocket pyroute2.netlink.event.acpi_event.AcpiEventSocket pyroute2.netlink.event.dquot.DQuotSocket pyroute2.netlink.event.thermal.ThermalEventSocket pyroute2.netlink.devlink.DevlinkSocket pyroute2.netlink.diag.DiagSocket pyroute2.remote.RemoteIPRoute pyroute2.remote.transport.RemoteSocket pyroute2.remote.shell.ShellIPR pyroute2.nslink.nslink.NetNS :parts: 1 under the hood -------------- Let's assume we use an `IPRoute` object to get the interface list of the system:: from pyroute2 import IPRoute ipr = IPRoute() ipr.get_links() ipr.close() The `get_links()` method is provided by the `IPRouteMixin` class. It chooses the message to send (`ifinfmsg`), prepares required fields and passes it to the next layer:: result.extend(self.nlm_request(msg, RTM_GETLINK, msg_flags)) The `nlm_request()` is a method of the `NetlinkSocketBase` class. It wraps the pair request/response in one method. The request is done via `put()`, response comes with `get()`. These methods hide under the hood the asynchronous nature of the netlink protocol, where the response can come whenever -- the time and packet order are not guaranteed. But one can use the `sequence_number` field of a netlink message to match responses, and the pair `put()/get()` does it. cache thread ------------ Sometimes it is preferable to get incoming messages asap and parse them only when there is time for that. For that case the `NetlinkSocketBase` provides a possibility to start a dedicated cache thread, that will collect and queue incoming messages as they arrive. The thread doesn't affect the socket behaviour: it will behave exactly in the same way, the only difference is that `recv()` will return already cached in the userspace message. To start the thread, one should call `bind()` with `async_cache=True`:: ipr = IPRoute() ipr.bind(async_cache=True) ... # do some stuff ipr.close() message mangling ---------------- An interesting feature of the `IPRSocketBase` is a netlink proxy code, that allows to register callbacks for different message types. The callback API is simple. The callback must accept the message as a binary data, and must return a dictionary with two keys, `verdict` and `data`. The verdict can be: * for `sendto()`: `forward`, `return` or `error` * for `recv()`: `forward` or `error` E.g.:: msg = ifinfmsg(data) msg.decode() ... # mangle msg msg.reset() msg.encode() return {'verdict': 'forward', 'data': msg.buf.getvalue()} The `error` verdict raises an exception from `data`. The `forward` verdict causes the `data` to be passed. The `return` verdict is valid only in `sendto()` callbacks and means that the `data` should not be passed to the kernel, but instead it must be returned to the user. This magic allows the library to transparently support ovs, teamd, tuntap calls via netlink. The corresponding callbacks transparently route the call to an external utility or to `ioctl()` API. How to register callbacks, see `IPRSocketBase` init. The `_sproxy` serves `sendto()` mangling, the `_rproxy` serves the `recv()` mangling. Later this API can become public. Netlink messages ================ To handle the data going through the sockets, the library uses different message classes. To create a custom message type, one should inherit: * `nlmsg` to create a netlink message class * `genlmsg` to create generic netlink message class * `nla` to create a NLA class The messages hierarchy: .. inheritance-diagram:: pyroute2.netlink.rtnl.ndmsg.ndmsg pyroute2.netlink.rtnl.ndtmsg.ndtmsg pyroute2.netlink.rtnl.tcmsg.tcmsg pyroute2.netlink.rtnl.rtmsg.nlflags pyroute2.netlink.rtnl.rtmsg.rtmsg_base pyroute2.netlink.rtnl.rtmsg.rtmsg pyroute2.netlink.rtnl.rtmsg.nh pyroute2.netlink.rtnl.fibmsg.fibmsg pyroute2.netlink.rtnl.ifaddrmsg.ifaddrmsg pyroute2.netlink.rtnl.ifstatsmsg.ifstatsmsg pyroute2.netlink.rtnl.ifinfmsg.ifinfmsg pyroute2.netlink.rtnl.ifinfmsg.ifinfveth pyroute2.netlink.rtnl.iw_event.iw_event pyroute2.netlink.rtnl.nsidmsg.nsidmsg pyroute2.netlink.rtnl.nsinfmsg.nsinfmsg pyroute2.netlink.rtnl.rtgenmsg.rtgenmsg pyroute2.netlink.devlink.devlinkcmd pyroute2.netlink.diag.inet_addr_codec pyroute2.netlink.diag.inet_diag_req pyroute2.netlink.diag.inet_diag_msg pyroute2.netlink.diag.unix_diag_req pyroute2.netlink.diag.unix_diag_msg pyroute2.netlink.event.acpi_event.acpimsg pyroute2.netlink.event.dquot.dquotmsg pyroute2.netlink.event.thermal.thermal_msg pyroute2.netlink.taskstats.taskstatsmsg pyroute2.netlink.taskstats.tcmd pyroute2.netlink.generic.ethtool.ethtool_strset_msg pyroute2.netlink.generic.ethtool.ethtool_linkinfo_msg pyroute2.netlink.generic.ethtool.ethtool_linkmode_msg pyroute2.netlink.generic.ethtool.ethtool_linkstate_msg pyroute2.netlink.generic.ethtool.ethtool_wol_msg pyroute2.netlink.generic.wireguard.wgmsg pyroute2.netlink.ctrlmsg pyroute2.netlink.genlmsg pyroute2.netlink.nl80211.nl80211cmd pyroute2.netlink.nfnetlink.ipset.ipset_msg pyroute2.netlink.nfnetlink.nfgen_msg pyroute2.netlink.nfnetlink.nftsocket.nft_gen_msg pyroute2.netlink.nfnetlink.nftsocket.nft_chain_msg pyroute2.netlink.nfnetlink.nftsocket.nft_rule_msg pyroute2.netlink.nfnetlink.nftsocket.nft_set_msg pyroute2.netlink.nfnetlink.nftsocket.nft_table_msg pyroute2.netlink.nfnetlink.nfctsocket.nfct_stats pyroute2.netlink.nfnetlink.nfctsocket.nfct_stats_cpu pyroute2.netlink.nfnetlink.nfctsocket.nfct_msg pyroute2.netlink.ipq.ipq_mode_msg pyroute2.netlink.ipq.ipq_packet_msg pyroute2.netlink.ipq.ipq_verdict_msg pyroute2.netlink.uevent.ueventmsg :parts: 1 PF_ROUTE messages ================= PF_ROUTE socket is used to receive notifications from the BSD kernel. The PF_ROUTE messages: .. inheritance-diagram:: pyroute2.bsd.pf_route.freebsd.bsdmsg pyroute2.bsd.pf_route.freebsd.if_msg pyroute2.bsd.pf_route.freebsd.rt_msg_base pyroute2.bsd.pf_route.freebsd.ifa_msg_base pyroute2.bsd.pf_route.freebsd.ifma_msg_base pyroute2.bsd.pf_route.freebsd.if_announcemsg pyroute2.bsd.pf_route.rt_slot pyroute2.bsd.pf_route.rt_msg pyroute2.bsd.pf_route.ifa_msg pyroute2.bsd.pf_route.ifma_msg :parts: 1 IPDB ==== The `IPDB` module implements high-level logic to manage some of the system network settings. It is completely agnostic to the netlink object's nature, the only requirement is that the netlink transport must provide RTNL API. So, using proper mixin classes one can create a custom RTNL-compatible transport. E.g., this way `IPDB` can work over `NetNS` objects, providing the network management within some network namespace — while itself it runs in the main namespace. The `IPDB` architecture is not too complicated, but it implements some useful transaction magic, see `commit()` methods of the `Transactional` objects. .. inheritance-diagram:: pyroute2.ipdb.main.IPDB pyroute2.ipdb.interfaces.Interface pyroute2.ipdb.linkedset.LinkedSet pyroute2.ipdb.linkedset.IPaddrSet pyroute2.ipdb.routes.NextHopSet pyroute2.ipdb.routes.Via pyroute2.ipdb.routes.Encap pyroute2.ipdb.routes.Metrics pyroute2.ipdb.routes.BaseRoute pyroute2.ipdb.routes.Route pyroute2.ipdb.routes.MPLSRoute pyroute2.ipdb.routes.RoutingTable pyroute2.ipdb.routes.MPLSTable pyroute2.ipdb.routes.RoutingTableSet pyroute2.ipdb.rules.Rule pyroute2.ipdb.rules.RulesDict :parts: 1 Internet protocols ================== Beside of the netlink protocols, the library implements a limited set of supplementary internet protocol to play with. .. inheritance-diagram:: pyroute2.protocols.udpmsg pyroute2.protocols.ip4msg pyroute2.protocols.udp4_pseudo_header pyroute2.protocols.ethmsg pyroute2.dhcp.dhcp4msg.dhcp4msg :parts: 1 pyroute2-0.7.11/docs/conf.py000066400000000000000000000020011455030217500156130ustar00rootroot00000000000000import pyroute2 extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.inheritance_diagram', 'aafigure.sphinxext', 'code_include.extension', ] aafig_format = {'html': 'svg', 'man': None, '': None} inheritance_graph_attrs = {'rankdir': 'LR', 'ratio': 'auto'} source_suffix = '.rst' master_doc = 'index' project = u'pyroute2' copyright = u'Peter Saveliev and PyRoute2 team' release = pyroute2.__version__ exclude_patterns = ['_build'] pygments_style = 'sphinx' autodoc_member_order = 'bysource' html_theme = 'default' html_static_path = ['_static'] html_js_files = ['fixup.js'] html_css_files = ['custom.css'] htmlhelp_basename = 'pyroute2doc' templates_path = ['_templates'] man_pages = [ ( 'pyroute2-cli', 'pyroute2-cli', 'pyroute2 command line interface', ['Peter Saveliev'], 1, ), ( 'pyroute2-dhcp-client', 'pyroute2-dhcp-client', 'pyroute2 dhcp client', ['Peter Saveliev'], 1, ), ] pyroute2-0.7.11/docs/debug.rst000066400000000000000000000134401455030217500161450ustar00rootroot00000000000000.. debug: Netlink debug howto ------------------- Dump data ========= Either run the required command via `strace`, or attach to the running process with `strace -p`. Use `-s {int}` argument to make sure that all the messages are dumped. The `-x` argument instructs `strace` to produce output in the hex format that can be passed to the pyroute2 decoder:: $ strace -e trace=network -x -s 16384 ip ro socket(PF_NETLINK, SOCK_RAW|SOCK_CLOEXEC, NETLINK_ROUTE) = 3 setsockopt(3, SOL_SOCKET, SO_SNDBUF, [32768], 4) = 0 setsockopt(3, SOL_SOCKET, SO_RCVBUF, [1048576], 4) = 0 bind(3, {sa_family=AF_NETLINK, pid=0, groups=00000000}, 12) = 0 getsockname(3, {sa_family=AF_NETLINK, pid=28616, groups=00000000}, [12]) = 0 sendto(3, "\x28\x00\x00\x00\x1a\x00\x01\x03 [skip] ", 40, 0, NULL, 0) = 40 recvmsg(3, {msg_name(12)={sa_family=AF_NETLINK, pid=0, groups=00000000}, msg_iov(1)=[{"\x3c\x00\x00\x00\x18 [skip]", 16384}], msg_controllen=0, msg_flags=0}, 0) = 480 socket(PF_LOCAL, SOCK_DGRAM|SOCK_CLOEXEC, 0) = 4 192.168.122.0/24 dev virbr0 proto kernel scope link src 192.168.122.1 recvmsg(3, {msg_name(12)={sa_family=AF_NETLINK, pid=0, groups=00000000}, msg_iov(1)=[{"\x14\x00\x00\x00\x03 [skip]", 16384}], msg_controllen=0, msg_flags=0}, 0) = 20 +++ exited with 0 +++ Now you can copy `sendâ€Ļ()` and `recvâ€Ļ()` buffer strings to a file. Strace compatibility note ========================= Starting with version 4.13, `strace` parses Netlink message headers and displays them in their parsed form instead of displaying the whole buffer in its raw form. The rest of the buffer is still shown, but due to it being incomplete, the method mentioned above doesn't work anymore. For the time being, the easiest workaround is probably to use an older strace version as it only depends on libc6. Decode data =========== The decoder is not provided with rpm or pip packages, so you should have a local git repo of the project:: $ git clone $ cd pyroute2 Now run the decoder:: $ export PYTHONPATH=`pwd` $ python tests/decoder/decoder.py E.g. for the route dump in the file `rt.dump` the command line should be:: $ python tests/decoder/decoder.py \ pyroute2.netlink.rtnl.rtmsg.rtmsg \ rt.dump **Why should I specify the message class?** Why there is no marshalling in the decoder script? 'Cause it is intended to be used with different netlink protocols, not only RTNL, but also nl80211, nfnetlink etc. There is no common marshalling for all the netlink protocols. **How to specify the message class?** All the netlink protocols are defined under `pyroute2/netlink/`, e.g. `rtmsg` module is `pyroute2/netlink/rtnl/rtmsg.py`. Thereafter you should specify the class inside the module, since there can be several classes. In the `rtmsg` case the line will be `pyroute.netlink.rtnl.rtmsg.rtmsg` or, more friendly to the bash autocomplete, `pyroute2/netlink/rtnl/rtmsg.rtmsg`. Notice, that the class you have to specify with dot anyways. **What is the data file format?** Rules are as follows: * The data dump should be in a hex format. Two possible variants are: `\\x00\\x01\\x02\\x03` or `00:01:02:03`. * There can be several packets in the same file. They should be of the same type. * Spaces and line ends are ignored, so you can format the dump as you want. * The `#` symbol starts a comment until the end of the line. * The `#!` symbols start a comment until the end of the file. Example:: # ifinfmsg headers # # nlmsg header \x84\x00\x00\x00 # length \x10\x00 # type \x05\x06 # flags \x49\x61\x03\x55 # sequence number \x00\x00\x00\x00 # pid # RTNL header \x00\x00 # ifi_family \x00\x00 # ifi_type \x00\x00\x00\x00 # ifi_index \x00\x00\x00\x00 # ifi_flags \x00\x00\x00\x00 # ifi_change # ... Compile data ============ Starting with 0.4.1, the library provides `BatchSocket` class, that only compiles and collects requests instead of sending them to the kernel. E.g., it is used by `IPBatch`, that combines `BatchSocket` with `IPRouteMixin`, providing RTNL compiler:: $ python3 Python 3.4.3 (default, Mar 31 2016, 20:42:37) [GCC 5.3.1 20151207 (Red Hat 5.3.1-2)] on linux Type "help", "copyright", "credits" or "license" for more information. # import all the stuff >>> from pyroute2 import IPBatch >>> from pyroute2.common import hexdump # create the compiler >>> ipb = IPBatch() # compile requests into one buffer >>> ipb.link("add", index=550, kind="dummy", ifname="test") >>> ipb.link("set", index=550, state="up") >>> ipb.addr("add", index=550, address="10.0.0.2", mask=24) # inspect the buffer >>> hexdump(ipb.batch) '3c:00:00:00:10:00:05:06:00:00:00:00:a2:7c:00:00:00:00:00:00: 26:02:00:00:00:00:00:00:00:00:00:00:09:00:03:00:74:65:73:74: 00:00:00:00:10:00:12:00:0a:00:01:00:64:75:6d:6d:79:00:00:00: 20:00:00:00:13:00:05:06:00:00:00:00:a2:7c:00:00:00:00:00:00: 26:02:00:00:01:00:00:00:01:00:00:00:28:00:00:00:14:00:05:06: 00:00:00:00:a2:7c:00:00:02:18:00:00:26:02:00:00:08:00:01:00: 0a:00:00:02:08:00:02:00:0a:00:00:02' # reset the buffer >>> ipb.reset() Pls notice, that in Python2 you should use `hexdump(str(ipb.batch))` instead of `hexdump(ipb.batch)`. The data, compiled by `IPBatch` can be used either to run batch requests, when one `send()` call sends several messages at once, or to produce binary buffers to test your own netlink parsers. Or just to dump some data to be sent later and probably even on another host:: >>> ipr = IPRoute() >>> ipr.sendto(ipb.batch, (0, 0)) The compiler always produces requests with `sequence_number == 0`, so if there will be any responses, they can be handled as broadcasts. pyroute2-0.7.11/docs/event.rst000066400000000000000000000014021455030217500161730ustar00rootroot00000000000000.. _event: Generic netlink events protocols ================================ The only available method for the event sockets is `get()` -- it returns an iterator over broadcasted messages, following the generic pyroute2 API. Even though the event protocols provide one message per `recv()`. No manual `bind()` or `discovery()` required -- the event sockets run these methods authomatically. Please keep in mind that you have to consume all the incoming messages in time, otherwise a buffer overflow happens on the socket and the only way to fix that is to `close()` the failed socket and to open a new one. ACPI events ----------- .. automodule:: pyroute2.netlink.event.acpi_event Disk quota events ----------------- .. automodule:: pyroute2.netlink.event.dquot pyroute2-0.7.11/docs/index.rst000066400000000000000000000015011455030217500161610ustar00rootroot00000000000000.. pyroute2 documentation master file Pyroute2 netlink library ======================== General information ------------------- .. toctree:: :maxdepth: 2 general changelog report Usage ----- .. toctree:: :maxdepth: 2 usage iproute ndb wiset ipset netns wireguard event Howtos ------ .. toctree:: :maxdepth: 2 mpls debug Man pages --------- .. toctree:: :maxdepth: 1 pyroute2-cli pyroute2-dhcp-client Development ----------- .. toctree:: :maxdepth: 2 devcontribute arch parser netlink nlsocket Experimental ------------ .. toctree:: :maxdepth: 1 remote Deprecated ---------- .. toctree:: :maxdepth: 1 ipdb_toc Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` pyroute2-0.7.11/docs/ipdb.rst000066400000000000000000000000761455030217500157760ustar00rootroot00000000000000.. _ipdb: .. automodule:: pyroute2.ipdb.main :members: pyroute2-0.7.11/docs/ipdb_toc.rst000066400000000000000000000002731455030217500166420ustar00rootroot00000000000000.. ipdbtoc: IPDB module =========== .. warning:: The IPDB module is deprecated and obsoleted by NDB. Please consider using NDB instead. .. toctree:: :maxdepth: 2 ipdb pyroute2-0.7.11/docs/iproute.rst000066400000000000000000000015101455030217500165410ustar00rootroot00000000000000.. _iproute: IPRoute module ============== .. automodule:: pyroute2.iproute :members: BSD systems ----------- .. automodule:: pyroute2.iproute.bsd Windows systems --------------- .. automodule:: pyroute2.iproute.windows .. autoclass:: pyroute2.iproute.windows.IPRoute :members: Linux systems ------------- .. automodule:: pyroute2.iproute.linux :members: Queueing disciplines -------------------- .. automodule:: pyroute2.netlink.rtnl.tcmsg.sched_drr :members: .. automodule:: pyroute2.netlink.rtnl.tcmsg.sched_choke :members: .. automodule:: pyroute2.netlink.rtnl.tcmsg.sched_clsact :members: .. automodule:: pyroute2.netlink.rtnl.tcmsg.sched_hfsc :members: .. automodule:: pyroute2.netlink.rtnl.tcmsg.sched_htb :members: Filters ------- .. automodule:: pyroute2.netlink.rtnl.tcmsg.cls_u32 pyroute2-0.7.11/docs/ipset.rst000066400000000000000000000001251455030217500161770ustar00rootroot00000000000000.. ipset: IPSet module ============== .. automodule:: pyroute2.ipset :members: pyroute2-0.7.11/docs/mpls.rst000066400000000000000000000063571455030217500160430ustar00rootroot00000000000000.. _mpls: MPLS howto ---------- Short introduction into Linux MPLS. Requirements: * kernel >= 4.4 * modules: `mpls_router`, `mpls_iptunnel` * `$ sudo sysctl net.mpls.platform_labels=$x`, where `$x` -- number of labels * `pyroute2` >= 0.4.0 MPLS labels =========== Possible label formats:: # int "newdst": 20 # list of ints "newdst": [20] "newdst": [20, 30] # string "newdst": "20/30" # dict "newdst": {"label": 20} # list of dicts "newdst": [{"label": 20, "tc": 0, "bos": 0, "ttl": 16}, {"label": 30, "tc": 0, "bos": 1, "ttl": 16}] IPRoute ======= MPLS routes ~~~~~~~~~~~ Label swap:: from pyroute2 import IPRoute from pyroute2.common import AF_MPLS ipr = IPRoute() # get the `eth0` interface's index: idx = ipr.link_lookup(ifname="eth0")[0] # create the request req = {"family": AF_MPLS, "oif": idx, "dst": 20, "newdst": [30]} # set up the route ipr.route("add", **req) Please notice that "dst" can specify only one label, even being a list. Label push:: req = {"family": AF_MPLS, "oif": idx, "dst": 20, "newdst": [20, 30]} ipr.route("add", **req) One can set up also the `via` field:: from socket import AF_INET req = {"family": AF_MPLS, "oif": idx, "dst": 20, "newdst": [30], "via": {"family": AF_INET, "addr": "1.2.3.4"}} ipr.route("add", **req) MPLS lwtunnel ~~~~~~~~~~~~~ To inject IP packets into MPLS:: req = {"dst": "1.2.3.0/24", "oif": idx, "encap": {"type": "mpls", "labels": [202, 303]}} ipr.route("add", **req) NDB === .. note:: basic MPLS routes management in NDB since version 0.5.11 List MPLS routes:: >>> from pyroute2.common import AF_MPLS >>> ndb.routes.dump().filter(family=AF_MPLS) ('localhost', 0, 28, 20, 0, 0, 254, 4, 0, 1, 0, ... ('localhost', 0, 28, 20, 0, 0, 254, 4, 0, 1, 0, ... >>> ndb.routes.dump().filter(family=AF_MPLS).select('oif', 'dst', 'newdst') (40627, '[{"label": 16, "tc": 0, "bos": 1, "ttl": 0}]', '[{"label": 500, ... (40627, '[{"label": 40, "tc": 0, "bos": 1, "ttl": 0}]', '[{"label": 40, ... List lwtunnel routes:: >>> ndb.routes.dump().filter(lambda x: x.encap is not None) ('localhost', 0, 2, 24, 0, 0, 254, 4, 0, 1, 16, '10.255.145.0', ... ('localhost', 0, 2, 24, 0, 0, 254, 4, 0, 1, 0, '192.168.142.0', ... >>> ndb.routes.dump().filter(lambda x: x.encap is not None).select('dst', 'encap') ('10.255.145.0', '[{"label": 20, "tc": 0, "bos": 0, "ttl": 0}, ... ('192.168.142.0', '[{"label": 20, "tc": 0, "bos": 0, "ttl": 0}, ... Create MPLS routes:: >>> from pyroute2.common import AF_MPLS >>> ndb.routes.create(family=AF_MPLS, dst=128, # label oif=1, # output interface newdst=[128, 132]).commit() # label stack Create lwtunnel:: >>> ndb.routes.create(dst='192.168.145.0/24', gateway='192.168.140.5', encap={'type': 'mpls', 'labels': [128, 132]}).commit() pyroute2-0.7.11/docs/ndb.rst000066400000000000000000000002061455030217500156160ustar00rootroot00000000000000.. _ndb: NDB module intro ================ .. automodule:: pyroute2.ndb.main NDB reference ============= .. include:: ndb_toc.rst pyroute2-0.7.11/docs/ndb_addresses.rst000066400000000000000000000001601455030217500176520ustar00rootroot00000000000000.. ndbaddresses: IP addresses management ======================= .. automodule:: pyroute2.ndb.objects.address pyroute2-0.7.11/docs/ndb_auth.rst000066400000000000000000000017171455030217500166470ustar00rootroot00000000000000.. _ndbauth: Authorization plugins ===================== .. automodule:: pyroute2.ndb.auth_manager Usecase: OpenStack Keystone auth -------------------------------- Say we have a public service that provides access to NDB instance via HTTP, and authenticates users via Keystone. Then the auth flow could be: 1. Accept a connection from a client 2. Create custom auth manager object A 3. A.__init__() validates X-Auth-Token against Keystone (Authentication) 4. A.check() checks that X-Auth-Token is not expired (Authorization) 5. The auth result is being logged (Accounting) An example AuthManager with OpenStack APIv3 support you may find in the `/examples/ndb/` directory. .. literalinclude:: ../examples/ndb/keystone_auth.py :language: python :caption: keystone_auth.py :name: keystone_auth Usecase: RADIUS auth -------------------- .. literalinclude:: ../examples/ndb/radius_auth.py :language: python :caption: radius_auth.py :name: radius_auth pyroute2-0.7.11/docs/ndb_debug.rst000066400000000000000000000112711455030217500167700ustar00rootroot00000000000000.. _ndbdebug: Debug and logging ================= Logging ------- A simple way to set up stderr logging:: # to start logging on the DB init ndb = NDB(log='on') # ... or to start it in run time ndb.log('on') # ... the same as above, another syntax ndb.log.on # ... turn logging off ndb.log('off') # ... or ndb.log.off It is possible also to set up logging to a file or to a syslog server:: # ndb.log('file_name.log') # ndb.log('syslog://server:port') Fetching DB data ---------------- By default, NDB starts with an in-memory SQLite3 database. In order to perform post mortem analysis it may be more useful to start the DB with a file DB or a PostgreSQL as the backend. See more: :ref:`ndbschema` It is possible to dump all the DB data with `schema.export()`:: with NDB() as ndb: ndb.schema.export('stderr') # dump the DB to stderr ... ndb.schema.export('pr2_debug') # dump the DB to a file RTNL events ----------- All the loaded RTNL events may be stored in the DB. To turn that feature on, one should start NDB with the `debug` option:: ndb = NDB(rtnl_debug=True) The events may be exported with the same `schema.export()`. Unlike ordinary table, limited with the number of network objects in the system, the events log tables are not limited. Do not enable the events logging in production, it may exhaust all the memory. RTNL objects ------------ NDB creates RTNL objects on demand, it doesn't keep them all the time. References to created objects are linked to the `ndb..cache` set:: >>> ndb.interfaces.cache.keys() [(('target', u'localhost'), ('index', 2)), (('target', u'localhost'), ('index', 39615))] >>> [x['ifname'] for x in ndb.interfaces.cache.values()] [u'eth0', u't2'] Object states ------------- RTNL objects may be in several states: * invalid: the object does not exist in the system * system: the object exists both in the system and in NDB * setns: the existing object should be moved to another network namespace * remove: the existing object must be deleted from the system The state transitions are logged in the state log:: >>> from pyroute2 import NDB >>> ndb = NDB() >>> c = ndb.interfaces.create(ifname='t0', kind='dummy').commit() >>> c.state.events [ (1557752212.6703758, 'invalid'), (1557752212.6821117, 'system') ] The timestamps allow to correlate the state transitions with the NDB log and the RTNL events log, in the case it was enabled. Object snapshots ---------------- Before running any commit, NDB marks all the related records in the DB with a random value in the `f_tflags` DB field (`tflags` object field), and stores all the marked records in the snapshot tables. Shortly, the `commit()` is a `snapshot() + apply() + revert() if failed`:: >>> nic = ndb.interfaces['t0'] >>> nic['state'] 'down' >>> nic['state'] = 'up' >>> snapshot = nic.snapshot() >>> ndb.schema.snapshots { 'addresses_139736119707256': , 'neighbours_139736119707256': , 'routes_139736119707256': , 'nh_139736119707256': , 'p2p_139736119707256': , 'ifinfo_bridge_139736119707256': , 'ifinfo_bond_139736119707256': , 'ifinfo_vlan_139736119707256': , 'ifinfo_vxlan_139736119707256': , 'ifinfo_gre_139736119707256': , 'ifinfo_vrf_139736119707256': , 'ifinfo_vti_139736119707256': , 'ifinfo_vti6_139736119707256': , 'interfaces_139736119707256': } >>> nic.apply() ... >>> nic['state'] 'up' >>> snapshot.apply(rollback=True) ... >>> nic['state'] 'down' Or same:: >>> nic = ndb.interfaces['t0'] >>> nic['state'] 'down' >>> nic['state'] = 'up' >>> nic.commit() >>> nic['state'] 'up' >>> nic.rollback() >>> nic['state'] 'down' These snapshot tables are the objects' state before the changes were applied. pyroute2-0.7.11/docs/ndb_init.rst000066400000000000000000000076411455030217500166530ustar00rootroot00000000000000.. _ndbinit: NDB objects =========== Start ----- In the simplest case to start the DB is as easy as:: ndb = NDB() There are several debug options that may be useful: * `log=` -- controls the logging * `rtnl_debug=` -- create and use log tables to store RTNL events * `libc=` -- NDB doesn't use libc, but may pass it to RTNL sources * `sources={}` -- RTNL sources to use * `db_provider=` -- which DB backend to use * `db_spec=` -- this spec will be passed to the DB provider * `db_cleanup=` -- cleanup the DB upon exit * `auto_netns=` -- [experimental] discover and connect to netns Some options explained: log ~~~ The simplest is `log='on'`, it turns on stdio logging on the default level. To force the debug level, use `log='debug'`. More log alternatives: :ref:`ndbdebug` db_cleanup ~~~~~~~~~~ Default is `True`. Setting this to `False` forces NDB to leave the data in the connected database upon exit. This may have side effects on the next start, use it only for debug purposes. rtnl_debug ~~~~~~~~~~ This option tells NDB if it must create and use the log tables. Normally all the incoming events become aggregated, thus `RTM_NEWLINK` + `RTM_DELLINK` will result in zero records -- an interface was created, destroyed and removed from the database. But in the log tables all the records will be stored, so it is what it looks like -- the events log. The log tables are not used to create objects, they are not rotated. Use this option with caution. To review the event logs use SQL or `ndb.schema.export()` See also: :ref:`ndbdebug` sources ~~~~~~~ .. doctest:: :skipif: True >>> sources = [{'netns': 'test01'}, {'netns': 'test02'}, {'target': 'localhost', 'kind': 'local'}] >>> ndb = NDB(log='on', sources=sources) 2020-03-24 18:01:48,241 DEBUG pyroute2.ndb.139900805197264.sources.test01: init 2020-03-24 18:01:48,242 DEBUG pyroute2.ndb.139900805197264.sources.test01: starting the source 2020-03-24 18:01:48,242 DEBUG pyroute2.ndb.139900805197264.sources.test02: init 2020-03-24 18:01:48,243 DEBUG pyroute2.ndb.139900805197264.sources.test01: connecting 2020-03-24 18:01:48,248 DEBUG pyroute2.ndb.139900805197264.sources.test02: starting the source 2020-03-24 18:01:48,249 DEBUG pyroute2.ndb.139900805197264.sources.localhost: init 2020-03-24 18:01:48,250 DEBUG pyroute2.ndb.139900805197264.sources.test02: connecting 2020-03-24 18:01:48,256 DEBUG pyroute2.ndb.139900805197264.sources.localhost: starting the source 2020-03-24 18:01:48,259 DEBUG pyroute2.ndb.139900805197264.sources.localhost: connecting 2020-03-24 18:01:48,262 DEBUG pyroute2.ndb.139900805197264.sources.localhost: loading 2020-03-24 18:01:48,265 DEBUG pyroute2.ndb.139900805197264.sources.test01: loading 2020-03-24 18:01:48,278 DEBUG pyroute2.ndb.139900805197264.sources.test02: loading 2020-03-24 18:01:48,478 DEBUG pyroute2.ndb.139900805197264.sources.localhost: running 2020-03-24 18:01:48,499 DEBUG pyroute2.ndb.139900805197264.sources.test01: running 2020-03-24 18:01:48,537 DEBUG pyroute2.ndb.139900805197264.sources.test02: running The RTNL sources documentation: :ref:`ndbsources` db_provider, db_spec ~~~~~~~~~~~~~~~~~~~~ .. doctest:: :skipif: True >>> ndb_fs = NDB(db_provider='sqlite3', db_spec='test.db') ... $ echo 'select f_ifla_ifname from interfaces' | sqlite3 test.db lo enp0s31f6 wlp58s0 virbr0 virbr0-nic ... The database backend options: :ref:`ndbschema` Stop ---- In order to get all the pending calls finished and synchronized, it is a good idea to explicitly close and stop the DB:: ndb = NDB() ... ndb.close() NDB objects also support the context manager protocol:: with NDB() as ndb: ... ... # # ---> <--- here the NDB instance will be synchronized and stopped pyroute2-0.7.11/docs/ndb_interfaces.rst000066400000000000000000000001521455030217500200210ustar00rootroot00000000000000.. _ndbinterfaces: Network interfaces ================== .. automodule:: pyroute2.ndb.objects.interface pyroute2-0.7.11/docs/ndb_objects.rst000066400000000000000000000010431455030217500173270ustar00rootroot00000000000000.. ndbobjects: RTNL objects ============ .. automodule:: pyroute2.ndb.objects .. autoclass:: pyroute2.ndb.objects.RTNL_Object() .. autoproperty:: table .. autoproperty:: etable .. autoproperty:: key .. automethod:: apply .. automethod:: commit .. automethod:: complete_key .. automethod:: create(**spec) .. automethod:: exists .. automethod:: load_sql .. automethod:: load_value .. automethod:: set(key, value) .. automethod:: show(fmt) .. automethod:: snapshot .. automethod:: rollback pyroute2-0.7.11/docs/ndb_reports.rst000066400000000000000000000001541455030217500173760ustar00rootroot00000000000000.. _ndbreports: Record list filters =================== .. automodule:: pyroute2.ndb.report :members: pyroute2-0.7.11/docs/ndb_routes.rst000066400000000000000000000001401455030217500172140ustar00rootroot00000000000000.. _ndbroutes: Routes management ================= .. automodule:: pyroute2.ndb.objects.route pyroute2-0.7.11/docs/ndb_schema.rst000066400000000000000000000001071455030217500171360ustar00rootroot00000000000000.. _ndbschema: Database ======== .. automodule:: pyroute2.ndb.schema pyroute2-0.7.11/docs/ndb_sources.rst000066400000000000000000000001201455030217500173540ustar00rootroot00000000000000.. _ndbsources: RTNL sources ============ .. automodule:: pyroute2.ndb.source pyroute2-0.7.11/docs/ndb_toc.rst000066400000000000000000000003361455030217500164670ustar00rootroot00000000000000.. _ndb_toc: .. toctree:: :maxdepth: 2 ndb_init ndb_objects ndb_views ndb_transactions ndb_reports ndb_interfaces ndb_addresses ndb_routes ndb_schema ndb_sources ndb_debug ndb_auth pyroute2-0.7.11/docs/ndb_transactions.rst000066400000000000000000000001471455030217500204120ustar00rootroot00000000000000 .. ndbtransactions: Transactions ============ .. automodule:: pyroute2.ndb.transaction :members: pyroute2-0.7.11/docs/ndb_views.rst000066400000000000000000000001641455030217500170360ustar00rootroot00000000000000.. ndbviews: Views ===== .. automodule:: pyroute2.ndb.view .. autoclass:: pyroute2.ndb.view.View() :members: pyroute2-0.7.11/docs/netlink.rst000066400000000000000000000000561455030217500165220ustar00rootroot00000000000000.. netlink: .. automodule:: pyroute2.netlink pyroute2-0.7.11/docs/netns.rst000066400000000000000000000002731455030217500162060ustar00rootroot00000000000000.. _netns: NetNS management ================ .. automodule:: pyroute2.netns :members: .. automodule:: pyroute2.nslink :members: .. automodule:: pyroute2.NSPopen :members: pyroute2-0.7.11/docs/nlsocket.rst000066400000000000000000000001061455030217500166740ustar00rootroot00000000000000.. nlsocket: .. automodule:: pyroute2.netlink.nlsocket :members: pyroute2-0.7.11/docs/parser.rst000066400000000000000000000273151455030217500163610ustar00rootroot00000000000000.. parser: .. raw:: html Netlink parser data flow ======================== NetlinkSocketBase: receive the data ----------------------------------- When `NetlinkSocketBase` receives the data from a netlink socket, it can do it in two ways: 1. get data directly with `socket.recv()` or `socket.recv_into()` 2. run a buffer thread that receives the data asap and leaves in the `buffer_queue` to be consumed later by `recv()` or `recv_into()` `NetlinkSocketBase` implements these two receive methods, that choose the data source -- directly from the socket or from `buffer_queue` -- depending on the `buffer_thread` property: **pyroute2.netlink.nlsocket.NetlinkSocketBase** .. code-include:: :func:`pyroute2.netlink.nlsocket.NetlinkSocketBase.recv` :language: python .. code-include:: :func:`pyroute2.netlink.nlsocket.NetlinkSocketBase.recv_into` :language: python .. code-include:: :func:`pyroute2.netlink.nlsocket.NetlinkSocketBase.buffer_thread_routine` :language: python .. aafig:: :scale: 80 :textual: ` ` data flow struct marshal +--------+ +--------+ +------------+ | |--->| bits | | | | | | 32 |--->| length | 4 bytes, offset 0 | | +--------+ +------------+ | | | 16 |--->| type-> key | 2 bytes, offset 4 | | +--------+ +------------+ | | | 16 | | flags | 2 bytes, offset 6 | | +--------+ +------------+ | | | | | `sequence` | | | | 32 |--->| `number` | 4 bytes, offset 8 | | +--------+ +------------+ | | | | | | | 32 | pid (ignored by marshal) | | +--------+ | | | | | | | | payload (ignored by marshal) | | | | \ / +--------+ +--------+ ---+-------------------- | | | / `marshal.msg_map = {` | | | | key-> parser, | | +--------+ key-> parser, | | | | key-> parser, | | | \ `}` | v Marshal: get and run parsers ---------------------------- Marshal should choose a proper parser depending on the `key`, `flags` and `sequence_number`. By default it uses only `nlmsg->type` as the `key` and `nlmsg->flags`, and there are several ways to customize getting parsers. 1. Use custom `key_format`, `key_offset` and `key_mask`. The latter is used to partially match the key, while `key_format` and `key_offset` are used to `struct.unpack()` the key from the raw netlink data. 2. You can overload `Marshal.get_parser()` and implement your own way to get parsers. A parser should be a simple function that gets only `data`, `offset` and `length` as arguments, and returns one dict compatible message. .. aafig:: :scale: 80 :textual: ` ` | | | | | v `if marshal.key_format is not None:` `marshal.key_format`\ | `marshal.key_offset` +-- custom key | `marshal.key_mask` / `parser = marshal.get_parser(key, flags, sequence_number)` `msg = parser(data, offset, length)` | | | | | v **pyroute2.netlink.nlsocket.Marshal** .. code-include:: :func:`pyroute2.netlink.nlsocket.Marshal.parse` :language: python The message parser routine must accept `data, offset, length` as the arguments, and must return a valid `nlmsg` or `dict`, with the mandatory fields, see the spec below. The parser can also return `None` which tells the marshal to skip this message. The parser must parse data for one message. Mandatory message fields, expected by NetlinkSocketBase methods: .. code-block:: python { 'header': { 'type': int, 'flags': int, 'error': None or NetlinkError(), 'sequence_number': int, } } .. aafig:: :scale: 80 :textual: ` ` | | | v parsed msg +-------------------------------------------+ | header | | `{` | | `uint32 length,` | | `uint16 type,` | | `uint16 flags,` | | `uint32 sequence_number,` | | `uint32 pid,` | | `}` | +- - - - - - - - - - - - - - - - - - - - - -+ | data fields (optional) | | `{` | | `int field,` | | `int field,` | | `}` | | or | | `string field` | | | +- - - - - - - - - - - - - - - - - - - - - -+ | nla chain | | | | +-------------------------------+ | | | header | | | | `{` | | | | `uint16 length,` | | | | `uint16 type,` | | | | `}` | | | +- - - - - - - - - - - - - - - -+ | | | data fields (optional) | | | | | | | | ... | | | | | | | +- - - - - - - - - - - - - - - -+ | | | nla chain | | | | | | | | recursive | | | | | | | +-------------------------------+ | | | +-------------------------------------------+ Per-request parsers ------------------- Sometimes, it may be reasonable to handle a particular response with a specific parser rather than a generic one. An example is `IPRoute.get_default_routes()`, which could be slow on systems with huge amounts of routes. Instead of parsing every route record as `rtmsg`, this method assigns a specific parser to its request. The custom parser doesn't parse records blindly, but looks up only for default route records in the dump, and then parses only matched records with the standard routine: **pyroute2.iproute.linux.IPRoute** .. code-include:: :func:`pyroute2.iproute.linux.RTNL_API.get_default_routes` :language: python **pyroute2.iproute.parsers** .. code-include:: :func:`pyroute2.iproute.parsers.default_routes` :language: python To assign a custom parser to a request/response communication, you should know first `sequence_number`, be it allocated dynamically with `NetlinkSocketBase.addr_pool.alloc()` or assigned statically. Then you can create a record in `NetlinkSocketBase.seq_map`: .. code-block:: python # def my_parser(data, offset, length): ... return parsed_message msg_seq = nlsocket.addr_pool.alloc() msg = nlmsg() msg['header'] = { 'type': my_type, 'flags': NLM_F_REQUEST | NLM_F_ACK, 'sequence_number': msg_seq, } msg['data'] = my_data msg.encode() nlsocket.seq_map[msg_seq] = my_parser nlsocket.sendto(msg.data, (0, 0)) for reponse_message in nlsocket.get(msg_seq=msg_seq): handle(response_message) NetlinkSocketBase: pick correct messages ---------------------------------------- The netlink protocol is asynchronous, so responses to several requests may come simultaneously. Also the kernel may send broadcast messages that are not responses, and have `sequence_number == 0`. As the response *may* contain multiple messages, and *may* or *may not* be terminated by some specific type of message, the task of returning relevant messages from the flow is a bit complicated. Let's look at an example: .. aafig:: :scale: 80 :textual: +-----------+ +-----------+ | program | | kernel | +-----+-----+ +-----+-----+ | | | | | | random broadcast |<---------------| | | | | request seq 1 X | X--------------->X X X X X X X random broadcast X<---------------X X X X X X X `response seq 1` X<---------------X `flags: NLM_F_MULTI` X X X X X X random broadcast X<---------------X X X X X X X `response seq 1` X<---------------X `type: NLMSG_DONE` X | | | v v The message flow on the diagram features `sequence_number == 0` broadcasts and `sequence_number == 1` request and response packets. To complicate it even further you can run a request with `sequence_number == 2` before the final response with `sequence_number == 1` comes. To handle that, `NetlinkSocketBase.get()` buffers all the irrelevant messages, returns ones with only the requested `sequence_number`, and uses locks to wait on the resource. The current implementation is relatively complicated and will be changed in the future. **pyroute2.netlink.nlsocket.NetlinkSocketBase** .. code-include:: :func:`pyroute2.netlink.nlsocket.NetlinkSocketBase.get` :language: python pyroute2-0.7.11/docs/pyroute2-cli.rst000066400000000000000000000044711455030217500174210ustar00rootroot00000000000000pyroute2-cli ============ Synopsis -------- cat script_file | **pyroute2-cli** [options] **pyroute2-cli** [options] script_file Description ----------- **pyroute2-cli** is an interface towards pyroute2 NDB -- network database. It can provide both CLI and HTTP interfaces. Status ------ **pyroute2-cli** is a proof-of-concept for now, it has some auth framework, but no support for SSL. Don't hesitate to file feature requests and bugs on the project page. Options ------- .. program:: pyroute2-cli .. option:: -m Mode to use: C (cli, by default) or S (server) .. rubric:: **CLI mode options** .. option:: -c Command line to run .. option:: -r An rc file to load before the session .. option:: -s Load sources spec from a JSON file .. option:: -l Log spec .. rubric:: **Server mode options** .. option:: -a IP address to listen on .. option:: -p TCP port to listen on .. option:: -s Load sources spec from a JSON file .. option:: -l Log spec Examples -------- Running CLI: .. code-block:: bash # bring eth0 up and add an IP address pyroute2-cli -l debug -c "interfaces eth0 set { state up } => add_ip { 10.0.0.2/24 } => commit" # same via stdin + pipe: cat < interfaces eth0 > set { state up } > add_ip { 10.0.0.2/24 } > commit > EOF # run a script from a file script.pr2: interfaces eth0 set { state up } add_ip { 10.0.0.2/24 } commit pyroute2-cli -l debug script.pr2 The server mode: .. code-block:: bash # start the server pyroute2-cli -l debug -m S -a 127.0.0.1 -p 8080 # run a request # text/plain: send a text script to the server curl \ -H "Content-Type: text/plain" \ -d "neighbours summary | format json" \ http://localhost:8080/v1/ # application/json: send a script as a JSON data curl \ -H "Content-Type: application/json" \ -d '{"commands": ["neighbours summary | format json"]}' http://localhost:8080/v1/ curl \ -H "Content-Type: application/json" \ -d '{"commands": ["interfaces eth0", "set state down", "commit"]}' http://localhost:8080/v1/ pyroute2-0.7.11/docs/pyroute2-dhcp-client.rst000066400000000000000000000021531455030217500210370ustar00rootroot00000000000000pyroute2-dhcp-client ==================== Synopsis -------- **pyroute2-dhcp-client** Description ----------- **pyroute2-dhcp-client** is a way too simple DHCP client. The only option is the network interface to run on. The script prints the DHCP server response as JSON. Examples -------- .. code-block:: bash # pyroute2-dhcp-client eth0 { "op": 2, "htype": 1, "hlen": 6, "hops": 0, "xid": 17, "secs": 0, "flags": 0, "ciaddr": "0.0.0.0", "yiaddr": "172.16.1.105", "siaddr": "172.16.1.1", "giaddr": "0.0.0.0", "chaddr": "18:56:80:11:ff:a3", "sname": "", "file": "", "cookie": "63:82:53", "options": { "message_type": 5, "server_id": "172.16.1.1", "lease_time": 43200, "renewal_time": 21600, "rebinding_time": 37800, "subnet_mask": "255.255.255.0", "router": [ "172.16.1.1" ], "name_server": [ "172.16.1.1" ] } } pyroute2-0.7.11/docs/remote.rst000066400000000000000000000056261455030217500163610ustar00rootroot00000000000000.. _remote: RemoteIPRoute ------------- Caveats ======= .. warning:: The class implies a serious performance penalty. Please consider other options if you expect high loads of the netlink traffic. .. warning:: The class requires the mitogen library that should be installed separately: https://mitogen.readthedocs.io/en/latest/ .. warning:: The object of this class implicitly spawn child processes. Beware. Here are some reasons why this class is not used as a general class instead of specific IPRoute for local RTNL, and NetNS for local netns management: * The performance of the Python parser for the binary netlink protocol is not so good, but using such proxies makes it even worse. * Local IPRoute and NetNS access is the core functionality and must work with no additional libraries installed. Introduction ============ It is possible to run IPRoute instances remotely using the mitogen library. The remote node must have same python version installed, but no additional libraries are required there: all the code will be imported from the host where you start your script. The simplest case, run IPRoute on a remote Linux host via ssh (assume the keys are deployed):: from pyroute2 import RemoteIPRoute rip = RemoteIPRoute(protocol='ssh', hostname='test01', username='ci') rip.get_links() # ... Indirect access =============== Building mitogen proxy chains you can access nodes indirectly:: import mitogen.master from pyroute2 import RemoteIPRoute broker = mitogen.master.Broker() router = mitogen.master.Router(broker) # login to the gateway gw = router.ssh(hostname='test-gateway', username='ci') # login from the gateway to the target node host = router.ssh(via=gw, hostname='test01', username='ci') rip = RemoteIPRoute(router=router, context=host) rip.get_links() # ... Run with privileges =================== It requires the mitogen sudo proxy to run IPRoute with root permissions:: import mitogen.master from pyroute2 import RemoteIPRoute broker = mitogen.master.Broker() router = mitogen.master.Router(broker) host = router.ssh(hostname='test01', username='ci') sudo = router.sudo(via=host, username='root') rip = RemoteIPRoute(router=router, context=sudo) rip.link('add', ifname='br0', kind='bridge') # ... Remote network namespaces ========================= You also can access remote network namespaces with the same RemoteIPRoute object:: import mitogen.master from pyroute2 import RemoteIPRoute broker = mitogen.master.Broker() router = mitogen.master.Router(broker) host = router.ssh(hostname='test01', username='ci') sudo = router.sudo(via=host, username='root') rip = RemoteIPRoute(router=router, context=sudo, netns='test-netns') rip.link('add', ifname='br0', kind='bridge') # ... pyroute2-0.7.11/docs/usage.rst000066400000000000000000000054051455030217500161650ustar00rootroot00000000000000.. usage: Quickstart ========== Hello, world:: $ pip install pyroute2 $ cat example.py from pyroute2 import IPRoute with IPRoute() as ipr: print([x.get_attr('IFLA_IFNAME') for x in ipr.get_links()]) $ python example.py ['lo', 'p6p1', 'wlan0', 'virbr0', 'virbr0-nic'] Sockets ------- In the runtime pyroute2 socket objects behave as normal sockets. One can use them in the poll/select, one can call `recv()` and `sendmsg()`:: from pyroute2 import IPRoute # create RTNL socket ipr = IPRoute() # subscribe to broadcast messages ipr.bind() # wait for data (do not parse it) data = ipr.recv(65535) # parse received data messages = ipr.marshal.parse(data) # shortcut: recv() + parse() # # (under the hood is much more, but for # simplicity it's enough to say so) # messages = ipr.get() But pyroute2 objects have a lot of methods, written to handle specific tasks:: from pyroute2 import IPRoute # RTNL interface with IPRoute() as ipr: # get devices list ipr.get_links() # get addresses ipr.get_addr() Resource release ---------------- Do not forget to release resources and close sockets. Also keep in mind, that the real fd will be closed only when the Python GC will collect closed objects. Imports ------- The public API is exported by `pyroute2/__init__.py`. It is done so to provide a stable API that will not be affected by changes in the package layout. There may be significant layout changes between versions, but if a symbol is re-exported via `pyroute2/__init__.py`, it will be available with the same import signature. .. warning:: All other objects are also available for import, but they may change signatures in the next versions. E.g.:: # Import a pyroute2 class directly. In the next versions # the import signature can be changed, e.g., NetNS from # pyroute2.netns.nslink it can be moved somewhere else. # from pyroute2.netns.nslink import NetNS ns = NetNS('test') # Import the same class from root module. This signature # will stay the same, any layout change is reflected in # the root module. # from pyroute2 import NetNS ns = NetNS('test') Special cases ============= eventlet -------- The eventlet environment conflicts in some way with socket objects, and pyroute2 provides some workaround for that:: # import symbols # import eventlet from pyroute2 import NetNS from pyroute2.config.eventlet import eventlet_config # setup the environment eventlet.monkey_patch() eventlet_config() # run the code ns = NetNS('nsname') ns.get_routes() ... This may help, but not always. In general, the pyroute2 library is not eventlet-friendly. pyroute2-0.7.11/docs/wireguard.rst000066400000000000000000000001461455030217500170470ustar00rootroot00000000000000.. _wireguard: WireGuard module ================ .. automodule:: pyroute2.netlink.generic.wireguard pyroute2-0.7.11/docs/wiset.rst000066400000000000000000000001231455030217500162040ustar00rootroot00000000000000.. wiset: WiSet module ============ .. automodule:: pyroute2.wiset :members: pyroute2-0.7.11/examples/000077500000000000000000000000001455030217500152115ustar00rootroot00000000000000pyroute2-0.7.11/examples/README.md000066400000000000000000000000221455030217500164620ustar00rootroot00000000000000Examples ======== pyroute2-0.7.11/examples/devlink/000077500000000000000000000000001455030217500166455ustar00rootroot00000000000000pyroute2-0.7.11/examples/devlink/devlink_list.py000066400000000000000000000003401455030217500217030ustar00rootroot00000000000000from pyroute2 import DL dl = DL() for q in dl.get_dump(): print( '%s\t%s' % ( q.get_attr('DEVLINK_ATTR_BUS_NAME'), q.get_attr('DEVLINK_ATTR_DEV_NAME'), ) ) dl.close() pyroute2-0.7.11/examples/devlink/devlink_monitor.py000066400000000000000000000001201455030217500224130ustar00rootroot00000000000000from pyroute2.devlink import DL dl = DL(groups=~0) print(dl.get()) dl.close() pyroute2-0.7.11/examples/devlink/devlink_port_list.py000066400000000000000000000004341455030217500227530ustar00rootroot00000000000000from pyroute2 import DL dl = DL() for q in dl.get_port_dump(): print( '%s\t%s\t%u' % ( q.get_attr('DEVLINK_ATTR_BUS_NAME'), q.get_attr('DEVLINK_ATTR_DEV_NAME'), q.get_attr('DEVLINK_ATTR_PORT_INDEX'), ) ) dl.close() pyroute2-0.7.11/examples/ethtool/000077500000000000000000000000001455030217500166675ustar00rootroot00000000000000pyroute2-0.7.11/examples/ethtool/ethtool-ioctl_get_infos.py000066400000000000000000000016521455030217500240700ustar00rootroot00000000000000import sys from pyroute2.ethtool.ioctl import IoctlEthtool from pyroute2.ethtool.ioctl import NotSupportedError if len(sys.argv) != 2: raise Exception("USAGE: {0} IFNAME".format(sys.argv[0])) dev = IoctlEthtool(sys.argv[1]) print("=== Device cmd: ===") try: for name, value in dev.get_cmd().items(): print("\t{}: {}".format(name, value)) except NotSupportedError: print("Not supported by driver.\n") print("") print("=== Device feature: ===") for name, value, not_fixed, _, _ in dev.get_features(): value = "on" if value else "off" if not not_fixed: # I love double negations value += " [fixed]" print("\t{}: {}".format(name, value)) print("\n=== Device coalesce: ===") for name, value in dev.get_coalesce().items(): print("\t{}: {}".format(name, value)) print("\n=== Device statistics: ===") for name, value in dev.get_statistics(): print("\t{}: {}".format(name, value)) pyroute2-0.7.11/examples/ethtool/ethtool-netlink_get_infos.py000066400000000000000000000007601455030217500244210ustar00rootroot00000000000000import pprint import sys from pyroute2.netlink.generic.ethtool import NlEthtool if len(sys.argv) != 2: raise Exception("USAGE: {0} IFNAME".format(sys.argv[0])) IFNAME = sys.argv[1] eth = NlEthtool() print("kernel ok?:", eth.is_nlethtool_in_kernel()) pprint.pprint(eth.get_linkmode(IFNAME)) print("") pprint.pprint(eth.get_linkinfo(IFNAME)) print("") pprint.pprint(eth.get_stringset(IFNAME)) print("") pprint.pprint(eth.get_linkstate(IFNAME)) print("") pprint.pprint(eth.get_wol(IFNAME)) pyroute2-0.7.11/examples/ethtool/ethtool_get_infos.py000066400000000000000000000006071455030217500227570ustar00rootroot00000000000000import sys from pyroute2.ethtool import Ethtool if len(sys.argv) != 2: raise Exception("USAGE: {0} IFNAME".format(sys.argv[0])) ethtool = Ethtool() ifname = sys.argv[1] print(ethtool.get_link_mode(ifname)) print(ethtool.get_link_info(ifname)) print(ethtool.get_strings_set(ifname)) print(ethtool.get_wol(ifname)) print(ethtool.get_features(ifname)) print(ethtool.get_coalesce(ifname)) pyroute2-0.7.11/examples/generic/000077500000000000000000000000001455030217500166255ustar00rootroot00000000000000pyroute2-0.7.11/examples/generic/Makefile000066400000000000000000000002331455030217500202630ustar00rootroot00000000000000obj-m += netl.o all: make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules clean: make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean pyroute2-0.7.11/examples/generic/netl.c000066400000000000000000000067511455030217500177440ustar00rootroot00000000000000/* * Generic netlink sample -- kernel module * Use `make` to compile and `insmod` to load the module * * Sergiy Lozovsky * Peter V. Saveliev * * Requires kernel 4.10+ */ #include /* Needed by all modules */ #include /* Needed for KERN_INFO */ #include /* Needed for the macros */ #include /* attributes (variables): the index in this enum is used as a reference for the type, * userspace application has to indicate the corresponding type * the policy is used for security considerations */ enum { EXMPL_NLA_UNSPEC, EXMPL_NLA_DATA, EXMPL_NLA_LEN, __EXMPL_NLA_MAX, }; /* ... and the same for commands */ enum { EXMPL_CMD_UNSPEC, EXMPL_CMD_MSG, }; /* attribute policy: defines which attribute has which type (e.g int, char * etc) * possible values defined in net/netlink.h */ static struct nla_policy exmpl_genl_policy[__EXMPL_NLA_MAX] = { [EXMPL_NLA_DATA] = { .type = NLA_NUL_STRING }, [EXMPL_NLA_LEN] = { .type = NLA_U32 }, }; #define VERSION_NR 1 static struct genl_family exmpl_gnl_family; static int get_length(struct sk_buff *request, struct genl_info *info) { struct sk_buff *reply; char *buffer; void *msg_head; if (info == NULL) return -EINVAL; if (!info->attrs[EXMPL_NLA_DATA]) return -EINVAL; /* get the data */ buffer = nla_data(info->attrs[EXMPL_NLA_DATA]); /* send a message back*/ /* allocate some memory, since the size is not yet known use NLMSG_GOODSIZE*/ reply = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); if (reply == NULL) return -ENOMEM; /* start the message */ msg_head = genlmsg_put_reply(reply, info, &exmpl_gnl_family, 0, info->genlhdr->cmd); if (msg_head == NULL) { return -ENOMEM; } /* add a EXMPL_LEN attribute -- report the data length */ if (0 != nla_put_u32(reply, EXMPL_NLA_LEN, strlen(buffer))) return -EINVAL; /* finalize the message */ genlmsg_end(reply, msg_head); /* send the message back */ if (0 != genlmsg_reply(reply, info)) return -EINVAL; return 0; } /* commands: mapping between commands and actual functions*/ static const struct genl_ops exmpl_gnl_ops_echo[] = { { .cmd = EXMPL_CMD_MSG, .policy = exmpl_genl_policy, .doit = get_length, }, }; /* family definition */ static struct genl_family exmpl_gnl_family __ro_after_init = { .name = "EXMPL_GENL", //the name of this family, used by userspace application .version = VERSION_NR, //version number .maxattr = __EXMPL_NLA_MAX - 1, .module = THIS_MODULE, .ops = exmpl_gnl_ops_echo, .n_ops = ARRAY_SIZE(exmpl_gnl_ops_echo), }; static int __init exmpl_gnl_init(void) { int rc; rc = genl_register_family(&exmpl_gnl_family); if (rc != 0) { printk(KERN_INFO "rkmod: genl_register_family failed %d\n", rc); return 1; } printk(KERN_INFO "Generic netlink example loaded, protocol version %d\n", VERSION_NR); return 0; } static void __exit exmpl_gnl_exit(void) { int ret; /*unregister the family*/ ret = genl_unregister_family(&exmpl_gnl_family); if(ret !=0){ printk("unregister family %i\n",ret); } } module_init(exmpl_gnl_init); module_exit(exmpl_gnl_exit); MODULE_LICENSE("GPL"); pyroute2-0.7.11/examples/generic/netl.py000077500000000000000000000022341455030217500201450ustar00rootroot00000000000000#!/usr/bin/env python3 import traceback from pyroute2.netlink import NLM_F_REQUEST from pyroute2.netlink import genlmsg from pyroute2.netlink.generic import GenericNetlinkSocket RLINK_CMD_UNSPEC = 0 RLINK_CMD_REQ = 1 class rcmd(genlmsg): ''' Message class that will be used to communicate with the kernel module ''' nla_map = ( ('RLINK_ATTR_UNSPEC', 'none'), ('RLINK_ATTR_DATA', 'asciiz'), ('RLINK_ATTR_LEN', 'uint32'), ) class Rlink(GenericNetlinkSocket): def send_data(self, data): msg = rcmd() msg['cmd'] = RLINK_CMD_REQ msg['version'] = 1 msg['attrs'] = [('RLINK_ATTR_DATA', data)] ret = self.nlm_request(msg, self.prid, msg_flags=NLM_F_REQUEST)[0] return ret.get_attr('RLINK_ATTR_LEN') if __name__ == '__main__': try: # create protocol instance rlink = Rlink() rlink.bind('EXMPL_GENL', rcmd) # request a method print(rlink.send_data('x' * 65000)) except: # if there was an error, log it to the console traceback.print_exc() finally: # finally -- release the instance rlink.close() pyroute2-0.7.11/examples/ipq.py000066400000000000000000000006071455030217500163570ustar00rootroot00000000000000from pyroute2.common import hexdump from pyroute2 import IPQSocket from pyroute2.netlink.ipq import NF_ACCEPT from dpkt.ip import IP ip = IPQSocket() ip.bind() try: while True: msg = ip.get()[0] print("\n") print(hexdump(msg.raw)) print(repr(IP(msg['payload']))) ip.verdict(msg['packet_id'], NF_ACCEPT) except: pass finally: ip.release() pyroute2-0.7.11/examples/iproute/000077500000000000000000000000001455030217500167005ustar00rootroot00000000000000pyroute2-0.7.11/examples/iproute/ip_monitor.py000066400000000000000000000002731455030217500214330ustar00rootroot00000000000000''' Simplest example to monitor Netlink events with a Python script. ''' from pyroute2 import IPRSocket from pprint import pprint ip = IPRSocket() ip.bind() pprint(ip.get()) ip.close() pyroute2-0.7.11/examples/iproute/socketcan.py000066400000000000000000000011461455030217500212260ustar00rootroot00000000000000''' Simplest example to set CAN bitrate. ''' from pyroute2 import IPRoute with IPRoute() as ip_route: # loolkup can0 interface idx = ip_route.link_lookup(ifname='can0')[0] link = ip_route.link('get', index=idx) # bring can0 interface down. CAN settings can be set only # if the interface is down if 'state' in link[0] and link[0]['state'] == 'up': ip_route.link('set', index=idx, state='down') # set CAN birate ip_route.link('set', index=idx, kind='can', can_bittiming={'bitrate': 250000 }) # bring can0 interface up ip_route.link('set', index=idx, state='up') pyroute2-0.7.11/examples/ipset.py000066400000000000000000000027631455030217500167170ustar00rootroot00000000000000import socket from pyroute2.ipset import IPSet, PortRange, PortEntry ipset = IPSet() ipset.create("foo", stype="hash:ip") ipset.add("foo", "198.51.100.1", etype="ip") ipset.add("foo", "198.51.100.2", etype="ip") print(ipset.test("foo", "198.51.100.1")) # True print(ipset.test("foo", "198.51.100.10")) # False msg_list = ipset.list("foo") for msg in msg_list: for attr_data in msg.get_attr('IPSET_ATTR_ADT').get_attrs( 'IPSET_ATTR_DATA' ): for attr_ip_from in attr_data.get_attrs('IPSET_ATTR_IP_FROM'): for ipv4 in attr_ip_from.get_attrs('IPSET_ATTR_IPADDR_IPV4'): print("- " + ipv4) ipset.destroy("foo") ipset.close() ipset = IPSet() ipset.create("bar", stype="bitmap:port", bitmap_ports_range=(1000, 2000)) ipset.add("bar", 1001, etype="port") ipset.add("bar", PortRange(1500, 2000), etype="port") print(ipset.test("bar", 1600, etype="port")) # True print(ipset.test("bar", 2600, etype="port")) # False ipset.destroy("bar") ipset.close() ipset = IPSet() protocol_tcp = socket.getprotobyname("tcp") ipset.create("foobar", stype="hash:net,port") port_entry_http = PortEntry(80, protocol=protocol_tcp) ipset.add("foobar", ("198.51.100.0/24", port_entry_http), etype="net,port") print( ipset.test("foobar", ("198.51.100.1", port_entry_http), etype="ip,port") ) # True port_entry_https = PortEntry(443, protocol=protocol_tcp) print( ipset.test("foobar", ("198.51.100.1", port_entry_https), etype="ip,port") ) # False ipset.destroy("foobar") ipset.close() pyroute2-0.7.11/examples/kobject_uevent.py000066400000000000000000000001771455030217500205770ustar00rootroot00000000000000from pprint import pprint from pyroute2 import UeventSocket kus = UeventSocket() kus.bind() while True: pprint(kus.get()) pyroute2-0.7.11/examples/lab/000077500000000000000000000000001455030217500157475ustar00rootroot00000000000000pyroute2-0.7.11/examples/lab/README.rst000066400000000000000000000007161455030217500174420ustar00rootroot00000000000000The pyroute2 lab is a collection if interactive tutorials and code samples. The online version of the lab is here: https://lab.pyroute2.org/ To run the lab examples in the command line, you can use nox: .. code-block:: shell # init the environment from scratch, rebuild the project # and install all the dependencies: nox -e lab # reuse the environment, only build the sphinx project # and run the tests nox -e lab -r -- '{"reuse": true}' pyroute2-0.7.11/examples/lab/iproute_get_addr/000077500000000000000000000000001455030217500212675ustar00rootroot00000000000000pyroute2-0.7.11/examples/lab/iproute_get_addr/README.rst000066400000000000000000000002421455030217500227540ustar00rootroot00000000000000Create an `IPRoute` object and list addresses. Conditions: * create exactly on `IPRoute()` object * run `get_addr()` at least once * run `close()` exactly once pyroute2-0.7.11/examples/lab/iproute_get_addr/check.py000066400000000000000000000005031455030217500227140ustar00rootroot00000000000000from setup import lab from task import IPRoute if len(lab.registry) != 1: raise AssertionError('expected exactly one IPRoute instance') if not isinstance(lab.registry[0], IPRoute): raise AssertionError('expected IPRoute instance') ipr = lab.registry[0] ipr.get_addr.assert_called() ipr.close.assert_called_once() pyroute2-0.7.11/examples/lab/iproute_get_addr/setup.py000066400000000000000000000001001455030217500227700ustar00rootroot00000000000000from pyroute2 import lab lab.use_mock = True lab.registry = [] pyroute2-0.7.11/examples/lab/iproute_get_addr/task.py000066400000000000000000000001461455030217500226040ustar00rootroot00000000000000from pyroute2 import IPRoute ipr = IPRoute() for msg in ipr.get_addr(): print(msg) ipr.close() pyroute2-0.7.11/examples/lab/iproute_get_attr/000077500000000000000000000000001455030217500213275ustar00rootroot00000000000000pyroute2-0.7.11/examples/lab/iproute_get_attr/README.rst000066400000000000000000000005251455030217500230200ustar00rootroot00000000000000Access netlink message attributes. * `get_links()` returns an iterator over link objects * `msg.get('index')` returns `index` field just like `msg['index']` does * `msg.get('ifname')` returns `IFLA_IFNAME` value as a string * `msg.get('af_spec')` returns `IFLA_AF_SPEC` as a dict * `msg.get(('af_spec', 'af_inet', 'forwarding'))` as an int pyroute2-0.7.11/examples/lab/iproute_get_attr/check.py000066400000000000000000000003741455030217500227620ustar00rootroot00000000000000from setup import lab from task import IPRoute ipr = lab.registry[0] if not isinstance(ipr, IPRoute): raise AssertionError('expected IPRoute instance') if not ipr.close.called: print('\nWARNING: it is recommended to close IPRoute instances') pyroute2-0.7.11/examples/lab/iproute_get_attr/setup.py000066400000000000000000000001001455030217500230300ustar00rootroot00000000000000from pyroute2 import lab lab.use_mock = True lab.registry = [] pyroute2-0.7.11/examples/lab/iproute_get_attr/task.py000066400000000000000000000004641455030217500226470ustar00rootroot00000000000000from pyroute2 import IPRoute ipr = IPRoute() print('iterate network interfaces\n') for msg in ipr.get_links(): index = msg.get('index') ifname = msg.get('ifname') forwarding = msg.get(('af_spec', 'af_inet', 'forwarding')) print(f'{index}: {ifname}: forwarding = {forwarding}') ipr.close() pyroute2-0.7.11/examples/ndb/000077500000000000000000000000001455030217500157545ustar00rootroot00000000000000pyroute2-0.7.11/examples/ndb/create_bond.py000066400000000000000000000013601455030217500205730ustar00rootroot00000000000000from pyroute2 import NDB from pyroute2.common import uifname # create unique interface names p0 = uifname() p1 = uifname() bond = uifname() with NDB() as ndb: # The same scheme works for bridge interfaces too: you # can create a bridge interface and assign ports to it # just as below. ndb.interfaces.create(kind='dummy', ifname=p0).commit() ndb.interfaces.create(kind='dummy', ifname=p1).commit() with ndb.interfaces.create(kind='bond', ifname=bond) as i: # assign two interfaces i.add_port(p0) i.add_port(p0) # make an example more scary: add IPs i.add_ip('10.251.0.1/24') i.add_ip('10.251.0.2/24') for i in (p0, p1, bond): ndb.interfaces[i].remove().commit() pyroute2-0.7.11/examples/ndb/create_interface.py000066400000000000000000000012031455030217500216050ustar00rootroot00000000000000from pyroute2 import NDB from pyroute2.common import uifname with NDB() as ndb: # dummy, bridge and bond interfaces are created in the # same way # # uifname() function is used here only to generate a # unique name of the interface for the regression testing, # you can pick up any name # ifname = uifname() ( ndb.interfaces.create(kind='dummy', ifname=ifname, state='up') .set('state', 'up') .set('address', '00:11:22:33:44:55') .commit() ) print(ndb.interfaces[ifname].show('json')) ( ndb.interfaces[ifname] .remove() .commit() ) pyroute2-0.7.11/examples/ndb/create_vlan.py000066400000000000000000000012541455030217500206130ustar00rootroot00000000000000from pyroute2 import NDB from pyroute2.common import uifname # unique interface names vlan_host = uifname() vlan_interface = uifname() with NDB() as ndb: ( ndb.interfaces.create(ifname=vlan_host, kind='dummy') .set('state', 'up') .commit() ) ( ndb.interfaces.create( ifname=vlan_interface, kind='vlan', link=ndb.interfaces[vlan_host], vlan_id=101 ) .set('mtu', 1400) .set('state', 'up') .add_ip('10.251.0.1/24') .add_ip('10.251.0.2/24') .commit() ) for i in (vlan_interface, vlan_host): ndb.interfaces[i].remove().commit() pyroute2-0.7.11/examples/ndb/keystone_auth.py000066400000000000000000000067331455030217500212210ustar00rootroot00000000000000''' :test:argv:14080769fe05e1f8b837fb43ca0f0ba4 A simplest example of a custom AuthManager and its usage with `AuthProxy` objects. Here we authenticate the auth token against Keystone and allow any NDB operations until it is expired. One can get such token with a curl request:: $ cat request.json { "auth": { "identity": { "methods": ["password"], "password": { "user": { "name": "admin", "domain": { "name": "admin_domain" }, "password": "secret" } } }, "scope": { "project": { "id": "f0af12d451fb4bccbb38217e7f9afe9a" } } } } $ curl -i \ -H "Content-Type: application/json" \ -d "@request.json" \ http://keystone:5000/v3/auth/tokens `X-Subject-Token` header in the response will be the token we need. Say we get `14080769fe05e1f8b837fb43ca0f0ba4` as `X-Subject-Token`. Then you can run:: $ . openstack.rc # <-- your OpenStack APIv3 RC file $ export PYTHONPATH=`pwd` $ python3 examples/ndb/keystone_auth.py 14080769fe05e1f8b837fb43ca0f0ba4 Using this example you can implement services that export NDB via any RPC, e.g. HTTP, and use Keystone integration. Same scheme may be used for any other Auth API, be it RADIUS or like that. An example of a simple HTTP service you can find in /cli/pyroute2-cli. ''' import os import sys import time from dateutil.parser import parse as isodate from keystoneauth1.identity import v3 from keystoneauth1 import session from keystoneclient.v3 import client as ksclient from keystoneclient.v3.tokens import TokenManager from pyroute2 import NDB class OSAuthManager(object): def __init__(self, token, log): # create a Keystone password object auth = v3.Password( auth_url=os.environ.get('OS_AUTH_URL'), username=os.environ.get('OS_USERNAME'), password=os.environ.get('OS_PASSWORD'), user_domain_name=(os.environ.get('OS_USER_DOMAIN_NAME')), project_id=os.environ.get('OS_PROJECT_ID'), ) # create a session object sess = session.Session(auth=auth) # create a token manager tmanager = TokenManager(ksclient.Client(session=sess)) # validate the token keystone_response = tmanager.validate(token) # init attrs self.log = log self.expire = isodate(keystone_response['expires_at']).timestamp() def check(self, obj, tag): # # totally ignore obj and tag, validate only token expiration # # problems to be solved before you use this code in production: # 1. access levels: read-only, read-write -- match tag # 2. how to deal with revoked tokens # if time.time() > self.expire: self.log.error('%s permission denied' % (tag,)) raise PermissionError('keystone token has been expired') self.log.info('%s permission granted' % (tag,)) return True with NDB(log='debug') as ndb: # create a utility log channel log = ndb.log.channel('main') # create an AuthManager-compatible object log.info('request keystone auth') am = OSAuthManager(sys.argv[1], ndb.log.channel('keystone')) log.info('keystone auth complete, expires %s' % am.expire) # create an auth proxy for this particular token ap = ndb.auth_proxy(am) # validate access via that proxy print(ap.interfaces['lo']) pyroute2-0.7.11/examples/ndb/radius_auth.py000066400000000000000000000042151455030217500206400ustar00rootroot00000000000000''' :test:argv:testing :test:argv:secret :test:environ:RADIUS_SERVER=127.0.0.1 :test:environ:RADIUS_SECRET=secret An example of using RADIUS authentication with NDB. In order to run the example you can setup a FreeRADIUS server:: # /etc/raddb/clients client test { ipaddr = 192.168.122.101 # IP addr of your client secret = s3cr3t } # /etc/raddb/users testing Cleartext-Password := "secret" Then setup your client:: # download RADIUS dictionaries $ export GITSERVER=https://raw.githubusercontent.com $ export DICTPATH=pyradius/pyrad/master/example $ wget $GITSERVER/$DICTPATH/dictionary $ wget $GITSERVER/$DICTPATH/dictionary.freeradius # setup the environment $ cat radius.rc export RADIUS_SERVER=192.168.122.1 export RADIUS_SECRET=s3cr3t export PYTHONPATH=`pwd` $ . radius.rc $ python3 examples/ndb/radius_auth.py testing secret ''' import os import sys from pyrad.client import Client from pyrad.dictionary import Dictionary import pyrad.packet from pyroute2 import NDB class RadiusAuthManager(object): def __init__(self, user, password, log): client = Client( server=os.environ.get('RADIUS_SERVER'), secret=os.environ.get('RADIUS_SECRET').encode('ascii'), dict=Dictionary('dictionary'), ) req = client.CreateAuthPacket( code=pyrad.packet.AccessRequest, User_Name=user ) req['User-Password'] = req.PwCrypt(password) reply = client.SendPacket(req) self.auth = reply.code self.log = log def check(self, obj, tag): # self.log.info('%s access' % (tag,)) return self.auth == pyrad.packet.AccessAccept with NDB(log='debug') as ndb: # create a utility log channel log = ndb.log.channel('main') # create an AuthManager-compatible object log.info('request radius auth') am = RadiusAuthManager(sys.argv[1], sys.argv[2], ndb.log.channel('radius')) log.info('radius auth complete') # create an auth proxy for these credentials ap = ndb.auth_proxy(am) # validate access via that proxy print(ap.interfaces['lo']) pyroute2-0.7.11/examples/nftables.py000077500000000000000000000006741455030217500173730ustar00rootroot00000000000000#!/usr/bin/env python3 from pyroute2.nftables.main import NFTables # nfgen_family 0 == inet def show_nftables(family: int = 0) -> None: nft = NFTables(nfgen_family=family) tables = nft.get_tables() chains = nft.get_chains() rules = nft.get_rules() print("Tables:") print(tables) print("\nChains:") print(chains) print("\nRules:") for rule in rules: print(rule, type(rule)) show_nftables(0) pyroute2-0.7.11/examples/nftables_sets.py000066400000000000000000000027621455030217500204260ustar00rootroot00000000000000import time from pyroute2.netlink.nfnetlink.nftsocket import NFPROTO_IPV4 from pyroute2.nftables.main import NFTables from pyroute2.nftables.main import NFTSetElem def test_ipv4_addr_set(): with NFTables(nfgen_family=NFPROTO_IPV4) as nft: nft.table("add", name="filter") my_set = nft.sets("add", table="filter", name="test0", key_type="ipv4_addr", comment="my test set", timeout=0) # With str nft.set_elems( "add", table="filter", set="test0", elements={"10.2.3.4", "10.4.3.2"}, ) # With NFTSet & NFTSetElem classes nft.set_elems( "add", set=my_set, elements={NFTSetElem(value="9.9.9.9", timeout=1000)}, ) try: assert {e.value for e in nft.set_elems("get", table="filter", set="test0")} == { "10.2.3.4", "10.4.3.2", "9.9.9.9", } assert nft.sets("get", table="filter", name="test0").comment == b"my test set" time.sleep(1.2) # timeout for elem 9.9.9.9 (1000ms) assert {e.value for e in nft.set_elems("get", table="filter", set="test0")} == { "10.2.3.4", "10.4.3.2", } finally: nft.sets("del", table="filter", name="test0") nft.table("del", name="filter") def main(): test_ipv4_addr_set() if __name__ == "__main__": main() pyroute2-0.7.11/examples/policy/000077500000000000000000000000001455030217500165105ustar00rootroot00000000000000pyroute2-0.7.11/examples/policy/policy.py000077500000000000000000000010311455030217500203570ustar00rootroot00000000000000#!/usr/bin/env python3 import traceback from pprint import pprint from pyroute2.netlink.generic import GenericNetlinkSocket if __name__ == '__main__': try: # create protocol instance genl = GenericNetlinkSocket(ext_ack=True) # extract policy msg = genl.policy('nlctrl') # dump policy information pprint(msg) except: # if there was an error, log it to the console traceback.print_exc() finally: # finally -- release the instance genl.close() pyroute2-0.7.11/examples/processes/000077500000000000000000000000001455030217500172175ustar00rootroot00000000000000pyroute2-0.7.11/examples/processes/pmonitor.py000066400000000000000000000006361455030217500214450ustar00rootroot00000000000000''' Monitor process exit ''' from pyroute2 import TaskStats from pyroute2.common import hexdump pmask = '' with open('/proc/cpuinfo', 'r') as f: for line in f.readlines(): if line.startswith('processor'): pmask += ',' + line.split()[2] pmask = pmask[1:] ts = TaskStats() ts.register_mask(pmask) msg = ts.get()[0] print(hexdump(msg.raw)) print(msg) ts.deregister_mask(pmask) ts.release() pyroute2-0.7.11/examples/processes/taskstats.py000066400000000000000000000003661455030217500216170ustar00rootroot00000000000000''' Simple taskstats sample. ''' import os from pyroute2 import TaskStats pid = os.getpid() ts = TaskStats() # bind is required in the case of generic netlink ts.bind() ret = ts.get_pid_stat(int(pid))[0] # parsed structure print(ret) ts.close() pyroute2-0.7.11/examples/pyroute2-cli/000077500000000000000000000000001455030217500175475ustar00rootroot00000000000000pyroute2-0.7.11/examples/pyroute2-cli/comments000066400000000000000000000004541455030217500213220ustar00rootroot00000000000000#!/usr/bin/env pyroute2-cli # ! Test mixed comments, both ! and # # interfaces # ... tail comments ! # ... indented comments ! create {ifname test01, kind dummy, address 00:11:22:33:44:55} commit ! test01 # show ! remove commit pyroute2-0.7.11/examples/pyroute2-cli/create_bridge000066400000000000000000000035151455030217500222550ustar00rootroot00000000000000#!/usr/bin/env pyroute2-cli # interfaces # # ports create ifname br0p0, kind dummy, state up | commit create ifname br0p1, kind dummy, state up | commit # # bridge create ifname br0, kind bridge, state up br_stp_state 1 br_forward_delay 1500 address 00:11:22:33:44:55 add_port br0p0 add_port br0p1 add_ip 10.251.0.1/24 add_ip 10.251.0.2/24 commit # # commas between properies are not required, if properties # are in pairs like { name0 value0 name1 value1 } routes create dst 10.100.0.0/24 gateway 10.251.0.10 | commit create dst 10.101.0.0/24 gateway 10.251.0.10 | commit # # the pipe operator | connects calls on the same object, # these two statements are equal: # # interfaces create { ifname test, kind dummy } | commit # # interfaces create { ifname test, kind dummy } # commit interfaces br0 remove | commit br0p0 remove | commit br0p1 remove | commit interfaces # # you can use more explicit syntax with properties # specified within braces # # => is a synonym of | , use any variant you like more # create { ifname br0p0, kind dummy, state up } => commit create { ifname br0p1, kind dummy, state up } => commit create { ifname br0, kind bridge } set { state up } set { br_stp_state 1 } set { br_forward_delay 1500 } set { address 00:11:22:33:44:55 } add_port { br0p0 } add_port { br0p1 } add_ip { address 10.251.0.1, prefixlen 24 } add_ip { address 10.251.0.2, prefixlen 24 } commit routes create { dst 10.100.0.0/24, gateway 10.251.0.10 } => commit create { dst 10.101.0.0/24, gateway 10.251.0.10 } => commit # # run cleanup interfaces br0 remove => commit br0p0 remove => commit br0p1 remove => commit pyroute2-0.7.11/examples/pyroute2-cli/create_dummy000066400000000000000000000013231455030217500221470ustar00rootroot00000000000000#!/usr/bin/env pyroute2-cli # # interfaces # create a dummy interface # # there the very minimal spec consists of ifname, # other properties may be set later # create {ifname test01} # # set properties kind dummy address 00:11:22:33:44:55 commit # # create addresses ... ipaddr create {address 192.168.15.67, prefixlen 24} commit create {address 192.168.15.68, prefixlen 24} commit # # and remove one of them 192.168.15.68/24 remove commit # # remove the interface remove commit pyroute2-0.7.11/examples/pyroute2-cli/dump_lo000066400000000000000000000001271455030217500211310ustar00rootroot00000000000000#!/usr/bin/env pyroute2-cli ! ! Just dump the loopback interface. ! interfaces lo show pyroute2-0.7.11/examples/wifi/000077500000000000000000000000001455030217500161475ustar00rootroot00000000000000pyroute2-0.7.11/examples/wifi/nl80211_interface_type.py000066400000000000000000000007201455030217500226060ustar00rootroot00000000000000import errno from pyroute2 import IW from pyroute2 import IPRoute from pyroute2.netlink.exceptions import NetlinkError # interface name to check ifname = 'lo' ip = IPRoute() iw = IW() index = ip.link_lookup(ifname=ifname)[0] try: iw.get_interface_by_ifindex(index) print("wireless interface") except NetlinkError as e: if e.code == errno.ENODEV: # 19 'No such device' print("not a wireless interface") finally: iw.close() ip.close() pyroute2-0.7.11/examples/wifi/nl80211_interfaces.py000066400000000000000000000006471455030217500217400ustar00rootroot00000000000000#!/usr/bin/env python3 # -*- coding: utf-8 -*- # from pyroute2.iwutil import IW iw = IW() for q in iw.get_interfaces_dump(): phyname = 'phy%i' % int(q.get_attr('NL80211_ATTR_WIPHY')) print( '%s\t%s\t%s\t%s' % ( q.get_attr('NL80211_ATTR_IFINDEX'), phyname, q.get_attr('NL80211_ATTR_IFNAME'), q.get_attr('NL80211_ATTR_MAC'), ) ) iw.close() pyroute2-0.7.11/examples/wifi/nl80211_monitor.py000066400000000000000000000001611455030217500212730ustar00rootroot00000000000000from pyroute2 import IW # register IW to get all the messages iw = IW(groups=0xFFF) print(iw.get()) iw.close() pyroute2-0.7.11/examples/wifi/nl80211_scan_dump.py000066400000000000000000000047101455030217500215610ustar00rootroot00000000000000#!/usr/bin/env python3 import sys import logging from pyroute2 import IPRoute from pyroute2.iwutil import IW from pyroute2.netlink import NLM_F_REQUEST from pyroute2.netlink import NLM_F_DUMP from pyroute2.netlink.nl80211 import nl80211cmd from pyroute2.netlink.nl80211 import NL80211_NAMES logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger("scandump") logger.setLevel(level=logging.INFO) # interface name to dump scan results ifname = sys.argv[1] iw = IW() ip = IPRoute() ifindex = ip.link_lookup(ifname=ifname)[0] ip.close() # CMD_GET_SCAN doesn't require root privileges. # Can use 'nmcli device wifi' or 'nmcli d w' to trigger a scan which will fill # the scan results cache for ~30 seconds. # See also 'iw dev $yourdev scan dump' msg = nl80211cmd() msg['cmd'] = NL80211_NAMES['NL80211_CMD_GET_SCAN'] msg['attrs'] = [['NL80211_ATTR_IFINDEX', ifindex]] scan_dump = iw.nlm_request( msg, msg_type=iw.prid, msg_flags=NLM_F_REQUEST | NLM_F_DUMP ) for network in scan_dump: for attr in network['attrs']: if attr[0] == 'NL80211_ATTR_BSS': # handy debugging; see everything we captured for bss_attr in attr[1]['attrs']: logger.debug("bss attr=%r", bss_attr) bss = dict(attr[1]['attrs']) # NOTE: the contents of beacon and probe response frames may or # may not contain all these fields. Very likely there could be a # keyerror in the following code. Needs a bit more bulletproofing. # print like 'iw dev $dev scan dump" print("BSS {}".format(bss['NL80211_BSS_BSSID'])) print( "\tTSF: {0[VALUE]} ({0[TIME]})".format(bss['NL80211_BSS_TSF']) ) print("\tfreq: {}".format(bss['NL80211_BSS_FREQUENCY'])) print( "\tcapability: {}".format( bss['NL80211_BSS_CAPABILITY']['CAPABILITIES'] ) ) print( "\tsignal: {0[VALUE]} {0[UNITS]}".format( bss['NL80211_BSS_SIGNAL_MBM']['SIGNAL_STRENGTH'] ) ) print( "\tlast seen: {} ms ago".format(bss['NL80211_BSS_SEEN_MS_AGO']) ) ies = bss['NL80211_BSS_INFORMATION_ELEMENTS'] # Be VERY careful with the SSID! Can contain hostile input. print("\tSSID: {}".format(ies['SSID'].decode("utf8"))) # TODO more IE decodes iw.close() pyroute2-0.7.11/examples/wifi/nl80211_set_type.py000066400000000000000000000010571455030217500214450ustar00rootroot00000000000000import errno from pyroute2 import IW from pyroute2 import IPRoute from pyroute2.netlink.exceptions import NetlinkError from pyroute2.netlink.nl80211 import IFTYPE_NAMES # interface name to check ifname = 'wlx2' iftype = 'monitor' iw = IW() ip = IPRoute() index = ip.link_lookup(ifname=ifname)[0] try: print(f"Original type: '{iw.get_interface_type(index)}'") iw.set_interface_type(index, iftype) print(f"New state: '{iw.get_interface_type(index)}'") except NetlinkError as e: print(f"Exception : {e}") finally: iw.close() ip.close() pyroute2-0.7.11/lab/000077500000000000000000000000001455030217500141315ustar00rootroot00000000000000pyroute2-0.7.11/lab/Makefile000066400000000000000000000011721455030217500155720ustar00rootroot00000000000000# Minimal makefile for Sphinx documentation # # You can set these variables from the command line, and also # from the environment for the first two. SPHINXOPTS ?= SPHINXBUILD ?= sphinx-build SOURCEDIR = . BUILDDIR = _build # Put it first so that "make" without argument is like "make help". help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) .PHONY: help Makefile # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) pyroute2-0.7.11/lab/_static/000077500000000000000000000000001455030217500155575ustar00rootroot00000000000000pyroute2-0.7.11/lab/_static/classic.css000066400000000000000000000000001455030217500177000ustar00rootroot00000000000000pyroute2-0.7.11/lab/_static/custom.css000066400000000000000000000121521455030217500176040ustar00rootroot00000000000000/* * * Sphinx stylesheet based on the default theme. * * :copyright: Copyright 2012 by Peter V. Saveliev * :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ @import url("basic.css"); /* -- page layout ----------------------------------------------------------- */ body { font-family: sans-serif; font-size: 100%; color: #000; margin: 0; padding: 0; } div.document { } div.documentwrapper { float: left; width: 100%; } div.bodywrapper { } div.body { background-color: #ffffff; color: #000000; padding: 0 20px 30px 20px; max-width: 45em; min-width: 10em; padding: 2em; } div.footer { color: #ffffff; width: 100%; padding: 9px 0 9px 0; text-align: center; font-size: 75%; } div.footer a { color: #ffffff; text-decoration: underline; } div.related { background-color: #fafafa; line-height: 30px; border-top: 1px solid #c0c0c0; border-bottom: 1px solid #c0c0c0; } div.sphinxsidebar { display: none; } div.sphinxsidebar h3 { color: #355f7c; font-size: 1.2em; font-weight: normal; margin: 0; padding: 0; } div.sphinxsidebar h4 { color: #355f7c; font-size: 1.2em; font-weight: normal; margin: 5px 0 0 0; padding: 0; } div.sphinxsidebar p { color: #ffffff; } div.sphinxsidebar p.topless { margin: 5px 10px 10px 10px; } div.sphinxsidebar ul { margin: 10px; padding: 0; color: #ffffff; } div.sphinxsidebar a { } div.sphinxsidebar input { border: 1px solid #98dbcc; font-family: sans-serif; font-size: 1em; } /* -- hyperlink styles ------------------------------------------------------ */ a { color: #355f7c; text-decoration: none; } a:visited { color: #355f7c; text-decoration: none; } a:hover { text-decoration: underline; } /* -- body styles ----------------------------------------------------------- */ div.body h1, div.body h2, div.body h3, div.body h4, div.body h5, div.body h6 { font-weight: normal; color: #20435c; border-bottom: 1px solid #ccc; margin: 20px -20px 10px -20px; padding: 3px 0 3px 10px; } div.body h1 { margin-top: 0; font-size: 200%; } div.body h2 { font-size: 160%; } div.body h3 { font-size: 140%; } div.body h4 { font-size: 120%; } div.body h5 { font-size: 110%; } div.body h6 { font-size: 100%; } a.headerlink { color: #c60f0f; font-size: 0.8em; padding: 0 4px 0 4px; text-decoration: none; } a.headerlink:hover { background-color: #c60f0f; color: white; } img.align-right { position: absolute; right: 50px; top: 50px; } div.body p, div.body dd, div.body li { text-align: justify; line-height: 130%; } div.admonition p.admonition-title + p { display: inline; } div.admonition p { margin-bottom: 5px; } div.admonition pre { margin-bottom: 5px; } div.admonition ul, div.admonition ol { margin-bottom: 5px; } p.first { margin: 2px; } dl.class, dl.exception { border-bottom: 1px dashed #ccc; } div.note { background-color: #eee; border: 1px solid #ccc; } div.seealso { background-color: #ffc; border: 1px solid #ff6; } div.topic { background-color: #eee; } div.warning { background-color: #ffe4e4; border: 1px solid #f66; border-radius: 10px; } p.admonition-title { display: inline; } p.admonition-title:after { content: ":"; } cite { background-color: #ececec; color: #333333; font-family: monospace; font-style: normal; } pre { padding: 5px; color: #333333; line-height: 120%; border: 1px solid #ac9; border-left: none; border-right: none; border-radius: 10px; } tt { background-color: #ecf0f3; padding: 0 1px 0 1px; font-size: 0.95em; } th { background-color: #ede; } .warning tt { background: #efc2c2; } .note tt { background: #d6d6d6; } .viewcode-back { font-family: sans-serif; } div.viewcode-block:target { background-color: #f4debf; border-top: 1px solid #ac9; border-bottom: 1px solid #ac9; } .highlight { background: #f9f9f9; } dl.method > dt { margin-bottom: 1em; padding: 1em; } dl.class > dt { margin-bottom: 1em; padding: 1em; } div.aafig-caption { width: 100%; } div.aafig-caption p { text-align: center; color: #a0a0a0; } .exercise { width: 100%; } .loading { background: #f0f0f0; color: #909090; width: 99%; } .loaded { background: #ffffff; color: #000000; width: 99%; } .button, .button-right { padding: 1em; width: 10em; border: solid 1px #000000; cursor: pointer; } .button-right { float: right; } section section { display: none; } .hidden { display: none; } #dmesg { border: dashed 1px #c0c0c0; display: block; } .pyroute2-log-record { display: block; color: #909090; margin: 0px; padding: 2px; padding-left: 1em; } .pyroute2-example-description { border: solid 1px #c0c0c0; background-color: #ffffe0; font-family: monospace; } .pyroute2-example-description::before { content: "Example:"; font-weight: bold; } pyroute2-0.7.11/lab/_static/fixup.js000066400000000000000000000003651455030217500172540ustar00rootroot00000000000000window.addEventListener("load", function() { Array.from( document.getElementsByTagName("img") ).map( function(img) { img.removeAttribute("width"); img.removeAttribute("height"); } ) }) pyroute2-0.7.11/lab/_static/lab.js000066400000000000000000000107271455030217500166620ustar00rootroot00000000000000 const pyroute2_lab_context = { log_buffer: [], log_size_max: 16, pyodide: null, python_namespace: null, python_loaded: false, bootstrap: ` import io import micropip import sys import pprint import builtins await micropip.install("${pyroute2_base_url}/${pyroute2_distfile}") from pyroute2.netlink import nlmsg def print(*argv, end='\\n'): for data in argv: if isinstance(data, nlmsg): pprint.pprint(data.dump()) elif isinstance(data, (str, int, float)): builtins.print(data, end='') else: pprint.pprint(data) builtins.print(' ', end='') builtins.print(end=end) `, exercise_pre: "sys.stdout = io.StringIO()", exercise_post: "result = sys.stdout.getvalue()", }; function pyroute2_log_record(argv) { let ctime = new Date(); pyroute2_lab_context.log_buffer.push([ctime, argv]); if (pyroute2_lab_context.log_buffer.length > pyroute2_lab_context.log_size_max) { pyroute2_lab_context.log_buffer.shift(); }; dmesg = document.getElementById("dmesg"); if (dmesg) { let log_output = ""; pyroute2_lab_context.log_buffer.map(function (x) { log_output += `${x[1]}`; }); dmesg.innerHTML = log_output; }; }; function pyroute2_escape_untrusted(data) { return data.replace(/[<>&'"]/g, function (x) { switch (x) { case '<': return '<'; case '>': return '>'; case '&': return '&'; case "'": return '''; case '"': return '"'; } }); } async function pyroute2_execute_example(name) { let setup = document.getElementById(name + "-setup").value; let task = document.getElementById(name + "-task").value; let check = document.getElementById(name + "-check").value; let data = ""; let namespace = { globals: pyroute2_lab_context.python_namespace }; let pyodide = pyroute2_lab_context.pyodide; if (!pyroute2_lab_context.python_loaded) { // if python is not loaded yet, wait a second... await new Promise(resolve => setTimeout(resolve, 1000)); // and try again await pyroute2_execute_example(name); return; } else { try { pyodide.runPython(pyroute2_lab_context.exercise_pre, namespace); pyodide.runPython(setup, namespace); pyodide.runPython(task, namespace); pyodide.runPython(check, namespace); pyodide.runPython(pyroute2_lab_context.exercise_post, namespace); data = pyroute2_lab_context.python_namespace.get("result"); } catch(exception) { data = `${exception}` }; }; // recode untrusted output data = pyroute2_escape_untrusted(data) document.getElementById(name + "-data").innerHTML = `
${data}
`; } function pyroute2_clear_example_output(name) { document.getElementById(name + "-data").innerHTML = ""; } async function pyroute2_lab_main() { if (!document.getElementById("dmesg")) { return; }; pyroute2_log_record("Booting the system, be patient"); pyroute2_log_record("Starting python"); let pyodide = null; let namespace = null; // try to load python try { pyodide = await loadPyodide(); namespace = pyodide.globals.get("dict")(); await pyodide.loadPackage("micropip"); await pyodide.runPythonAsync(pyroute2_lab_context.bootstrap, { globals: namespace }); } catch(exception) { pyroute2_log_record(`
${exception}
`); pyroute2_log_record("Please report this bug to the project bug tracker, and don't forget to specify your browser."); return; }; // setup global context pyroute2_lab_context.pyodide = pyodide; pyroute2_lab_context.python_namespace = namespace; // reset log pyroute2_lab_context.log_buffer.length = 0; pyroute2_lab_context.python_loaded = true; // make exercises visible Array.from( document.getElementsByTagName("section") ).map(function(x) { x.style['display'] = 'block'; }); // unlock code blocks Array.from( document.getElementsByClassName("loading") ).map(function(x) { x.removeAttribute("readonly"); x.className = "loaded"; }); pyroute2_log_record(`System loaded [ ${pyroute2_distfile} ]`); }; window.addEventListener("load", pyroute2_lab_main); pyroute2-0.7.11/lab/_templates/000077500000000000000000000000001455030217500162665ustar00rootroot00000000000000pyroute2-0.7.11/lab/_templates/conf.js000066400000000000000000000002451455030217500175520ustar00rootroot00000000000000const pyroute2_distfile = "{{ distfile }}"; let pyroute2_base_url = ""; if (window.hostname) { pyroute2_base_url = `${window.protocol}//${window.hostname}`; }; pyroute2-0.7.11/lab/_templates/form_template.html000066400000000000000000000012441455030217500220130ustar00rootroot00000000000000
{{ readme }}
pyroute2-0.7.11/lab/_templates/layout.html000066400000000000000000000024201455030217500204670ustar00rootroot00000000000000{% extends "!layout.html" %} {% block extrahead %} {% endblock %} {%- macro c_relbar() %} {%- endmacro %} {% block relbar1 %} {{ c_relbar() }} {% endblock %} pyroute2-0.7.11/lab/conf.py000066400000000000000000000006521455030217500154330ustar00rootroot00000000000000import pyroute2 source_suffix = '.rst' master_doc = 'index' project = 'lab.pyroute2' copyright = '2022, Peter Saveliev' author = 'Peter Saveliev' release = pyroute2.__version__ extensions = [ ] templates_path = ['_templates'] exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] html_theme = 'default' html_css_files = ['custom.css'] html_js_files = ['conf.js', 'lab.js', 'fixup.js'] html_static_path = ['_static'] pyroute2-0.7.11/lab/index.rst000066400000000000000000000005111455030217500157670ustar00rootroot00000000000000pyroute2 labs ============= Welcome to pyroute2 online labs, a collection of interactive tutorials. The work is in progress, and the collection grows, so stay tuned. .. toctree:: :maxdepth: 2 :caption: Contents: iproute Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` pyroute2-0.7.11/lab/iproute.rst000066400000000000000000000056361455030217500163640ustar00rootroot00000000000000IPRoute -- work with interfaces, addresses and routes ----------------------------------------------------- .. _dmesg: The lab requires JavaScript to be enabled, as it runs Python over JS. It may be also incompatible with your browser, so consider using FireFox, Chrome or like that. Create IPRoute and get network objects ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ `IPRoute` objects provide kernel RTNL API, just as Linux `iproute2` utility does. The differences are that `IPRoute` provides a subset -- so your contributions are more than welcome, -- and `IPRoute` returns parsed netlink packets, only adding a few extra fields. .. note:: Netlink protocol and packets structure: https://docs.pyroute2.org/netlink.html More about netlink packets structure and API see below in this lab. Let's start with a simple exercise: create an `IPRoute` object and get IP addresses. Most of pyroute2 classes, that provide some netlink API, create a netlink socket and allocate some other resources. Because of the netlink protocol, there may be not more than 1024 netlink sockets opened at the same time per process ID, and it may be important to close objects if they're not in use. .. note:: `IPRoute` methods always return iterables, as a response from the kernel always is a series of `[1..x]` messages. To continue, run the exercise by pressing the `execute` button. .. raw:: html :file: iproute_get_addr.html Access messages data ~~~~~~~~~~~~~~~~~~~~ All the messages returned by `IPRoute` methods provide standard `nlmsg/nla` API. Every message is a recursive dict-like structure, with fields accessible via `__getitem__()`, and an optional NLA list accessible via `msg['attrs']`, as you can see in the exercise above. If a message or NLA has `value` field defined, this field is being returned by `getvalue()` method, otherwise `getvalue()` returns the message or NLA itself. This makes simple type NLA retrieval a bit more convenient. These are methods to get fields and NLA values: * `__getitem__('field')` -- return a field of this name * `.get('field')`, `get('NLA_TYPE')`, `.get(('NLA_TYPE', ..., 'field'))` -- the universal get method, see below * `.getvalue()` -- return the `value` field, if defined, otherwise the object itself * `.get_attr('NLA_TYPE')` -- get one NLA by the type; if there atr several NLA of the same type, get the first one in the list * `.get_attrs('NLA_NAME')` -- get a list of NLA of this type Some notes on `get()` method: * returns a field or NLA value * case insensitive for NLA types * if NLA has `prefix` defined, it allows type notation both with and without the prefix, thus `get('IFLA_IFNAME') == get('IFNAME') == get('ifname')` * the method first looks up for an NLA, and only then for a field of this name; if a message has both, like as `ndmsg` has a field `ifindex` and NLA type `NDA_IFINDEX`, then you can use `__getitem__()` and `getattr()` .. raw:: html :file: iproute_get_attr.html pyroute2-0.7.11/noxfile.py000066400000000000000000000230051455030217500154110ustar00rootroot00000000000000import getpass import json import os import subprocess import sys import nox nox.options.envdir = f'./.nox-{getpass.getuser()}' nox.options.reuse_existing_virtualenvs = False nox.options.sessions = [ 'linter', 'repo', 'unit', 'lab', 'neutron', 'integration', 'linux-3.6', 'linux-3.8', 'linux-3.10', 'linux-3.12', 'minimal', ] linux_kernel_modules = [ 'dummy', 'bonding', '8021q', 'mpls_router', 'mpls_iptunnel', 'l2tp_ip', 'l2tp_eth', 'l2tp_netlink', ] def add_session_config(func): '''Decorator to load the session config. Usage:: @nox.session @load_session_config def my_session_func(session, config): pass Command line usage:: nox -e my_session_name -- '{"option": value}' The session config must be a valid JSON dictionary of options. ''' def wrapper(session): if session.posargs and len(session.posargs[0]) > 0: config = json.loads(session.posargs[0]) else: config = {} session.debug(f'session config: {config}') return func(session, config) wrapper.__name__ = func.__name__ wrapper.__doc__ = func.__doc__ wrapper.__has_user_config__ = True return wrapper def options(module, config): '''Return pytest options set.''' ret = [ 'python', '-m', 'pytest', '--basetemp', './log', '--exitfirst', '--verbose', '--junitxml=junit.xml', ] if config.get('fail_on_warnings'): ret.insert(1, 'error') ret.insert(1, '-W') if config.get('pdb'): ret.append('--pdb') if config.get('coverage'): ret.append('--cov-report=html') ret.append('--cov=pyroute2') if config.get('tests_prefix'): module = f'{config["tests_prefix"]}/{module}' if config.get('sub'): module = f'{module}/{config["sub"]}' ret.append(module) return ret def setup_linux(session): '''Setup a Linux system. Load all the modules, but ignore any errors: missing kernel API will be handled at the test module level. Same for sysctl. ''' if sys.platform == 'linux' and getpass.getuser() == 'root': for module in linux_kernel_modules: session.run( 'modprobe', module, external=True, success_codes=[0, 1] ) session.run( 'sysctl', 'net.mpls.platform_labels=2048', external=True, success_codes=[0, 255], ) def setup_venv_minimal(session, config): if not config.get('reuse'): session.install('--upgrade', 'pip') session.install('build') session.install('twine') session.install('-r', 'requirements.dev.txt') session.install('-r', 'requirements.docs.txt') session.run('mv', '-f', 'setup.cfg', '.setup.cfg.orig', external=True) session.run( 'mv', '-f', 'pyroute2/__init__.py', '.init.py.orig', external=True ) session.run('cp', 'setup.minimal.cfg', 'setup.cfg', external=True) session.run( 'cp', 'pyroute2/minimal.py', 'pyroute2/__init__.py', external=True ) session.run('python', '-m', 'build') session.run('python', '-m', 'twine', 'check', 'dist/*') session.install('.') session.run('mv', '-f', '.setup.cfg.orig', 'setup.cfg', external=True) session.run( 'mv', '-f', '.init.py.orig', 'pyroute2/__init__.py', external=True ) session.run('rm', '-rf', 'build', external=True) tmpdir = os.path.abspath(session.create_tmp()) session.run('cp', '-a', 'lab', tmpdir, external=True) session.run('cp', '-a', 'tests', tmpdir, external=True) session.run('cp', '-a', 'examples', tmpdir, external=True) return tmpdir def setup_venv_common(session, flavour='dev'): session.install('--upgrade', 'pip') session.install('-r', f'requirements.{flavour}.txt') session.install('.') return os.path.abspath(session.create_tmp()) def setup_venv_dev(session): tmpdir = setup_venv_common(session) session.run('cp', '-a', 'tests', tmpdir, external=True) session.run('cp', '-a', 'examples', tmpdir, external=True) session.chdir(f'{tmpdir}/tests') return tmpdir def setup_venv_repo(session): tmpdir = setup_venv_common(session, 'repo') for item in ( ('tests', tmpdir), ('noxfile.py', tmpdir), ('VERSION', tmpdir), ('CHANGELOG.rst', tmpdir), ): session.run('cp', '-a', *item, external=True) git_ls_files = subprocess.run( ['git', 'ls-files', 'requirements*'], stdout=subprocess.PIPE ) files = [x.decode('utf-8') for x in git_ls_files.stdout.split()] for fname in files: session.run('cp', '-a', fname, tmpdir, external=True) session.chdir(tmpdir) return tmpdir def setup_venv_docs(session): tmpdir = setup_venv_common(session, 'docs') session.run('cp', '-a', 'docs', tmpdir, external=True) session.run('cp', '-a', 'examples', tmpdir, external=True) [ session.run('cp', src, dst, external=True) for (src, dst) in ( ('README.rst', f'{tmpdir}/docs/general.rst'), ('README.report.rst', f'{tmpdir}/docs/report.rst'), ('README.contribute.rst', f'{tmpdir}/docs/devcontribute.rst'), ('CHANGELOG.rst', f'{tmpdir}/docs/changelog.rst'), ) ] return tmpdir @nox.session(name='test-platform') def test_platform(session): '''Test platform capabilities. Requires root to run.''' setup_venv_common(session) session.run('pyroute2-test-platform') @nox.session def docs(session): '''Generate project docs.''' tmpdir = setup_venv_docs(session) cwd = os.path.abspath(os.getcwd()) # man pages session.chdir(f'{tmpdir}/docs/') session.run('make', 'man', 'SPHINXOPTS="-W"', external=True) session.run('cp', '-a', 'man', f'{cwd}/docs/', external=True) # html session.chdir(f'{tmpdir}/docs/') session.run('make', 'html', 'SPHINXOPTS="-W"', external=True) session.run('cp', '-a', 'html', f'{cwd}/docs/', external=True) session.run('make', 'doctest', external=True) session.chdir(cwd) session.run('bash', 'util/aafigure_mapper.sh', external=True) # session.log('8<---------------------------------------------------------') session.log('compiled docs:') session.log(f'html pages -> {cwd}/docs/html') session.log(f'man pages -> {cwd}/docs/man') @nox.session def linter(session): '''Run code checks and linters.''' session.install('pre-commit') session.run('pre-commit', 'run', '-a') @nox.session @add_session_config def unit(session, config): '''Run unit tests.''' setup_venv_dev(session) session.run(*options('test_unit', config)) @nox.session @add_session_config def integration(session, config): '''Run integration tests (lnst, kuryr, ...).''' setup_venv_dev(session) session.run(*options('test_integration', config)) @nox.session(python=['3.6', '3.8', '3.10', '3.12']) @add_session_config def linux(session, config): '''Run Linux functional tests. Requires root to run all the tests.''' setup_linux(session) workspace = setup_venv_dev(session) session.run( *options('test_linux', config), env={ 'WORKSPACE': workspace, 'SKIPDB': 'postgres', 'PYTHONPATH': f'{workspace}/tests/mocklib', }, ) @nox.session @add_session_config def minimal(session, config): '''Run tests on pyroute2.minimal package.''' tmpdir = setup_venv_minimal(session, config) session.chdir(f'{tmpdir}/tests') session.run(*options('test_minimal', config)) @nox.session @add_session_config def lab(session, config): '''Test lab code blocks.''' workspace = setup_venv_minimal(session, config) for fname in os.listdir('dist'): if fname.startswith('pyroute2.minimal') and fname.endswith('whl'): break session.run('python', 'util/make_lab_templates.py', fname, external=True) session.run('make', '-C', 'lab', 'html', external=True) session.run('cp', f'dist/{fname}', 'lab/_build/html/', external=True) # make tests session.chdir(f'{workspace}/tests') session.run(*options('test_lab', config), env={'WORKSPACE': workspace}) @nox.session @add_session_config def openbsd(session, config): '''Run OpenBSD tests. Requires OpenBSD >= 7.1''' setup_venv_dev(session) session.run(*options('test_openbsd', config)) @nox.session @add_session_config def windows(session, config): '''Rin Windows tests.''' setup_venv_dev(session) session.run(*options('test_windows', config)) @nox.session @add_session_config def neutron(session, config): '''Run Neutron integration tests.''' setup_venv_dev(session) session.run(*options('test_neutron', config)) @nox.session @add_session_config def repo(session, config): '''Run repo tests.''' setup_venv_repo(session) config['tests_prefix'] = 'tests' session.run(*options('test_repo', config)) @nox.session def build(session): '''Run package build.''' session.install('build') session.install('twine') session.run('python', '-m', 'build') session.run('python', '-m', 'twine', 'check', 'dist/*') @nox.session @add_session_config def build_minimal(session, config): '''Build the minimal package''' setup_venv_minimal(session, config) @nox.session @add_session_config def upload(session, config): '''Upload built packages''' session.install('twine') session.run('python', '-m', 'twine', 'upload', 'dist/*') pyroute2-0.7.11/pr2modules/000077500000000000000000000000001455030217500154675ustar00rootroot00000000000000pyroute2-0.7.11/pr2modules/__init__.py000066400000000000000000000010701455030217500175760ustar00rootroot00000000000000''' This namespace is here only to provide compatibility with 0.6.x It will be removed in 0.8.x ''' import sys import warnings # load pyroute2 entry points import pyroute2 # noqa: F401 warnings.warn( 'pr2modules namespace is deprecated, use pyroute2 instead', DeprecationWarning, ) # alias every `pyroute2` entry, in addition to the block above # # Bug-Url: https://github.com/svinota/pyroute2/issues/913 # for key, value in list(sys.modules.items()): if key.startswith("pyroute2."): sys.modules[key.replace("pyroute2", "pr2modules")] = value pyroute2-0.7.11/pyproject.toml000066400000000000000000000001321455030217500163030ustar00rootroot00000000000000[build-system] requires = ["setuptools", "wheel"] build-backend = "setuptools.build_meta" pyroute2-0.7.11/pyroute2/000077500000000000000000000000001455030217500151645ustar00rootroot00000000000000pyroute2-0.7.11/pyroute2/__init__.py000066400000000000000000000054741455030217500173070ustar00rootroot00000000000000## # # This module contains all the public symbols from the library. # ## # # Version # try: from pyroute2.config.version import __version__ except ImportError: __version__ = 'unknown' from pyroute2 import loader from pyroute2.cli.console import Console from pyroute2.cli.server import Server from pyroute2.conntrack import Conntrack, ConntrackEntry from pyroute2.devlink import DL from pyroute2.ethtool.ethtool import Ethtool from pyroute2.ipdb.exceptions import ( CommitException, CreateException, DeprecationException, PartialCommitException, ) from pyroute2.ipdb.main import IPDB from pyroute2.iproute import ChaoticIPRoute, IPBatch, IPRoute, RawIPRoute from pyroute2.iproute.ipmock import IPRoute as IPMock from pyroute2.ipset import IPSet from pyroute2.iwutil import IW from pyroute2.ndb.main import NDB from pyroute2.ndb.noipdb import NoIPDB from pyroute2.netlink.connector.cn_proc import ProcEventSocket from pyroute2.netlink.devlink import DevlinkSocket from pyroute2.netlink.diag import DiagSocket, ss2 from pyroute2.netlink.event.acpi_event import AcpiEventSocket from pyroute2.netlink.event.dquot import DQuotSocket from pyroute2.netlink.exceptions import ( ChaoticException, NetlinkDecodeError, NetlinkDumpInterrupted, NetlinkError, ) from pyroute2.netlink.generic import GenericNetlinkSocket from pyroute2.netlink.generic.l2tp import L2tp from pyroute2.netlink.generic.mptcp import MPTCP from pyroute2.netlink.generic.wireguard import WireGuard from pyroute2.netlink.ipq import IPQSocket from pyroute2.netlink.nfnetlink.nfctsocket import NFCTSocket from pyroute2.netlink.nfnetlink.nftsocket import NFTSocket from pyroute2.netlink.nl80211 import NL80211 from pyroute2.netlink.rtnl.iprsocket import IPRSocket from pyroute2.netlink.taskstats import TaskStats from pyroute2.netlink.uevent import UeventSocket from pyroute2.nslink.nslink import NetNS from pyroute2.nslink.nspopen import NSPopen from pyroute2.remote import RemoteIPRoute from pyroute2.remote.transport import RemoteSocket from pyroute2.wiset import WiSet modules = [ AcpiEventSocket, ChaoticException, ChaoticIPRoute, CommitException, Conntrack, ConntrackEntry, Console, CreateException, DeprecationException, DevlinkSocket, DiagSocket, DL, DQuotSocket, Ethtool, IPBatch, IPDB, IPMock, IPQSocket, IPRoute, IPRSocket, IPSet, IW, GenericNetlinkSocket, L2tp, MPTCP, NDB, NetlinkError, NetlinkDecodeError, NetlinkDumpInterrupted, NetNS, NFCTSocket, NFTSocket, NL80211, NoIPDB, NSPopen, PartialCommitException, ProcEventSocket, RawIPRoute, RemoteIPRoute, RemoteSocket, Server, ss2, TaskStats, UeventSocket, WireGuard, WiSet, ] loader.init() __all__ = [] __all__.extend(modules) pyroute2-0.7.11/pyroute2/arp.py000066400000000000000000000046521455030217500163270ustar00rootroot00000000000000from pyroute2.common import map_namespace # ARP protocol HARDWARE identifiers. ARPHRD_NETROM = 0 # from KA9Q: NET/ROM pseudo ARPHRD_ETHER = 1 # Ethernet 10Mbps ARPHRD_EETHER = 2 # Experimental Ethernet ARPHRD_AX25 = 3 # AX.25 Level 2 ARPHRD_PRONET = 4 # PROnet token ring ARPHRD_CHAOS = 5 # Chaosnet ARPHRD_IEEE802 = 6 # IEEE 802.2 Ethernet/TR/TB ARPHRD_ARCNET = 7 # ARCnet ARPHRD_APPLETLK = 8 # APPLEtalk ARPHRD_DLCI = 15 # Frame Relay DLCI ARPHRD_ATM = 19 # ATM ARPHRD_METRICOM = 23 # Metricom STRIP (new IANA id) ARPHRD_IEEE1394 = 24 # IEEE 1394 IPv4 - RFC 2734 ARPHRD_EUI64 = 27 # EUI-64 ARPHRD_INFINIBAND = 32 # InfiniBand # Dummy types for non ARP hardware ARPHRD_SLIP = 256 ARPHRD_CSLIP = 257 ARPHRD_SLIP6 = 258 ARPHRD_CSLIP6 = 259 ARPHRD_RSRVD = 260 # Notional KISS type ARPHRD_ADAPT = 264 ARPHRD_ROSE = 270 ARPHRD_X25 = 271 # CCITT X.25 ARPHRD_HWX25 = 272 # Boards with X.25 in firmware ARPHRD_PPP = 512 ARPHRD_CISCO = 513 # Cisco HDLC ARPHRD_HDLC = ARPHRD_CISCO ARPHRD_LAPB = 516 # LAPB ARPHRD_DDCMP = 517 # Digital's DDCMP protocol ARPHRD_RAWHDLC = 518 # Raw HDLC ARPHRD_TUNNEL = 768 # IPIP tunnel ARPHRD_TUNNEL6 = 769 # IP6IP6 tunnel ARPHRD_FRAD = 770 # Frame Relay Access Device ARPHRD_SKIP = 771 # SKIP vif ARPHRD_LOOPBACK = 772 # Loopback device ARPHRD_LOCALTLK = 773 # Localtalk device ARPHRD_FDDI = 774 # Fiber Distributed Data Interface ARPHRD_BIF = 775 # AP1000 BIF ARPHRD_SIT = 776 # sit0 device - IPv6-in-IPv4 ARPHRD_IPDDP = 777 # IP over DDP tunneller ARPHRD_IPGRE = 778 # GRE over IP ARPHRD_PIMREG = 779 # PIMSM register interface ARPHRD_HIPPI = 780 # High Performance Parallel Interface ARPHRD_ASH = 781 # Nexus 64Mbps Ash ARPHRD_ECONET = 782 # Acorn Econet ARPHRD_IRDA = 783 # Linux-IrDA # ARP works differently on different FC media .. so ARPHRD_FCPP = 784 # Point to point fibrechannel ARPHRD_FCAL = 785 # Fibrechannel arbitrated loop ARPHRD_FCPL = 786 # Fibrechannel public loop ARPHRD_FCFABRIC = 787 # Fibrechannel fabric # 787->799 reserved for fibrechannel media types ARPHRD_IEEE802_TR = 800 # Magic type ident for TR ARPHRD_IEEE80211 = 801 # IEEE 802.11 ARPHRD_IEEE80211_PRISM = 802 # IEEE 802.11 + Prism2 header ARPHRD_IEEE80211_RADIOTAP = 803 # IEEE 802.11 + radiotap header ARPHRD_MPLS_TUNNEL = 899 # MPLS Tunnel Interface ARPHRD_VOID = 0xFFFF # Void type, nothing is known ARPHRD_NONE = 0xFFFE # zero header length (ARPHRD_NAMES, ARPHRD_VALUES) = map_namespace("ARPHRD_", globals()) pyroute2-0.7.11/pyroute2/bsd/000077500000000000000000000000001455030217500157345ustar00rootroot00000000000000pyroute2-0.7.11/pyroute2/bsd/README000066400000000000000000000006271455030217500166210ustar00rootroot00000000000000BSD platform support ==================== BSD systems have PF_ROUTE -- a protocol similar to Netlink, but with very limited functionality. Still it is possible to use it in almost the same way one uses Netlink. Almost. This module is in the very early development stage. Ye warned. Example:: from pyroute2.bsd.rtmsocket import RTMSocket rs = RTMSocket() while True: print(rs.get()) pyroute2-0.7.11/pyroute2/bsd/__init__.py000066400000000000000000000000001455030217500200330ustar00rootroot00000000000000pyroute2-0.7.11/pyroute2/bsd/pf_route/000077500000000000000000000000001455030217500175575ustar00rootroot00000000000000pyroute2-0.7.11/pyroute2/bsd/pf_route/__init__.py000066400000000000000000000101321455030217500216650ustar00rootroot00000000000000import socket import struct from pyroute2 import config from pyroute2.common import hexdump from pyroute2.netlink import nlmsg_base if config.uname[0] == 'OpenBSD': from pyroute2.bsd.pf_route.openbsd import ( IFF_NAMES, IFF_VALUES, bsdmsg, if_announcemsg, if_msg, ifa_msg_base, ifma_msg_base, rt_msg_base, ) else: from pyroute2.bsd.pf_route.freebsd import ( IFF_NAMES, IFF_VALUES, bsdmsg, if_announcemsg, if_msg, ifa_msg_base, ifma_msg_base, rt_msg_base, ) RTAX_MAX = 8 class rt_slot(nlmsg_base): __slots__ = () header = (('length', 'B'), ('family', 'B')) class rt_msg(rt_msg_base): __slots__ = () force_mask = False class hex(rt_slot): def decode(self): rt_slot.decode(self) length = self['header']['length'] self['value'] = hexdump( self.data[self.offset + 2 : self.offset + length] ) class rt_slot_ifp(rt_slot): def decode(self): rt_slot.decode(self) # # Structure # 0 1 2 3 4 5 6 7 # |-------+-------+-------+-------|-------+-------+-------+-------| # | len | fam | ifindex | ? | nlen | padding? | # |-------+-------+-------+-------|-------+-------+-------+-------| # | ... # | ... # # len -- sockaddr len # fam -- sockaddr family # ifindex -- interface index # ? -- no idea, probably again some sockaddr related info? # nlen -- device name length # padding? -- probably structure alignment # (self['index'], _, name_length) = struct.unpack( 'HBB', self.data[self.offset + 2 : self.offset + 6] ) self['ifname'] = self.data[ self.offset + 8 : self.offset + 8 + name_length ] class rt_slot_addr(rt_slot): def decode(self): alen = {socket.AF_INET: 4, socket.AF_INET6: 16} rt_slot.decode(self) # # Yksinkertainen: only the sockaddr family (one byte) and the # network address. # # But for netmask it's completely screwed up. E.g.: # # ifconfig disc2 10.0.0.1 255.255.255.0 up # --> # ... NETMASK: 38:12:00:00:ff:00:00:00:00:00:00:... # # Why?! # family = self['header']['family'] length = self['header']['length'] if family in (socket.AF_INET, socket.AF_INET6): addrlen = alen.get(family, 0) data = self.data[self.offset + 4 : self.offset + 4 + addrlen] self['address'] = socket.inet_ntop(family, data) else: # FreeBSD and OpenBSD use different approaches # FreeBSD: family == 0x12 # OpenBSD: family == 0x0 if self.parent.force_mask and family in (0x0, 0x12): data = self.data[self.offset + 4 : self.offset + 8] data = data + b'\0' * (4 - len(data)) self['address'] = socket.inet_ntop(socket.AF_INET, data) else: self['raw'] = self.data[self.offset : self.offset + length] def decode(self): bsdmsg.decode(self) offset = self.sockaddr_offset for i in range(RTAX_MAX): if self['rtm_addrs'] & (1 << i): handler = getattr(self, self.ifa_slots[i][1]) slot = handler(self.data[offset:], parent=self) slot.decode() offset += slot['header']['length'] self[self.ifa_slots[i][0]] = slot class ifa_msg(ifa_msg_base, rt_msg): force_mask = True class ifma_msg(ifma_msg_base, rt_msg): pass __all__ = ( bsdmsg, if_msg, rt_msg, ifa_msg, ifma_msg, if_announcemsg, IFF_NAMES, IFF_VALUES, ) pyroute2-0.7.11/pyroute2/bsd/pf_route/freebsd.py000066400000000000000000000063061455030217500215500ustar00rootroot00000000000000from pyroute2.common import map_namespace from pyroute2.netlink import nlmsg_base IFNAMSIZ = 16 IFF_UP = 0x1 IFF_BROADCAST = 0x2 IFF_DEBUG = 0x4 IFF_LOOPBACK = 0x8 IFF_POINTOPOINT = 0x10 IFF_DRV_RUNNING = 0x40 IFF_NOARP = 0x80 IFF_PROMISC = 0x100 IFF_ALLMULTI = 0x200 IFF_DRV_OACTIVE = 0x400 IFF_SIMPLEX = 0x800 IFF_LINK0 = 0x1000 IFF_LINK1 = 0x2000 IFF_LINK2 = 0x4000 IFF_MULTICAST = 0x8000 IFF_CANTCONFIG = 0x10000 IFF_PPROMISC = 0x20000 IFF_MONITOR = 0x40000 IFF_STATICARP = 0x80000 IFF_DYING = 0x200000 IFF_RENAMING = 0x400000 IFF_NOGROUP = 0x800000 (IFF_NAMES, IFF_VALUES) = map_namespace('IFF', globals()) class bsdmsg(nlmsg_base): __slots__ = () header = (('length', 'H'), ('version', 'B'), ('type', 'B')) class if_msg(bsdmsg): __slots__ = () fields = ( ('ifm_addrs', 'i'), ('ifm_flags', 'i'), ('ifm_index', 'H'), ('ifi_type', 'B'), ('ifi_physical', 'B'), ('ifi_addrlen', 'B'), ('ifi_hdrlen', 'B'), ('ifi_link_state', 'B'), ('ifi_vhid', 'B'), ('ifi_datalen', 'H'), ('ifi_mtu', 'I'), ('ifi_metric', 'I'), ('ifi_baudrate', 'Q'), ('ifi_ipackets', 'Q'), ('ifi_ierrors', 'Q'), ('ifi_opackets', 'Q'), ('ifi_oerrors', 'Q'), ('ifi_collisions', 'Q'), ('ifi_ibytes', 'Q'), ('ifi_obytes', 'Q'), ('ifi_imcasts', 'Q'), ('ifi_omcasts', 'Q'), ('ifi_iqdrops', 'Q'), ('ifi_oqdrops', 'Q'), ('ifi_noproto', 'Q'), ('ifi_hwassist', 'Q'), ('ifu_tt', 'Q'), ('ifu_tv1', 'Q'), ('ifu_tv2', 'Q'), ) class rt_msg_base(bsdmsg): __slots__ = () fields = ( ('rtm_index', 'I'), ('rtm_flags', 'i'), ('rtm_addrs', 'i'), ('rtm_pid', 'I'), ('rtm_seq', 'i'), ('rtm_errno', 'i'), ('rtm_fmask', 'i'), ('rtm_inits', 'I'), ('rmx_locks', 'I'), ('rmx_mtu', 'I'), ('rmx_hopcount', 'I'), ('rmx_expire', 'I'), ('rmx_recvpipe', 'I'), ('rmx_sendpipe', 'I'), ('rmx_ssthresh', 'I'), ('rmx_rtt', 'I'), ('rmx_rttvar', 'I'), ('rmx_pksent', 'I'), ('rmx_weight', 'I'), ('rmx_filler', '3I'), ) sockaddr_offset = 92 ifa_slots = { 0: ('DST', 'rt_slot_addr'), 1: ('GATEWAY', 'rt_slot_addr'), 2: ('NETMASK', 'rt_slot_addr'), 3: ('GENMASK', 'hex'), 4: ('IFP', 'rt_slot_ifp'), 5: ('IFA', 'rt_slot_addr'), 6: ('AUTHOR', 'hex'), 7: ('BRD', 'rt_slot_addr'), } class ifa_msg_base(bsdmsg): __slots__ = () fields = ( ('rtm_addrs', 'i'), ('ifam_flags', 'i'), ('ifam_index', 'H'), ('ifam_metric', 'i'), ) sockaddr_offset = 20 class ifma_msg_base(bsdmsg): __slots__ = () fields = (('rtm_addrs', 'i'), ('ifmam_flags', 'i'), ('ifmam_index', 'H')) sockaddr_offset = 16 class if_announcemsg(bsdmsg): __slots__ = () fields = ( ('ifan_index', 'H'), ('ifan_name', '%is' % IFNAMSIZ), ('ifan_what', 'H'), ) def decode(self): bsdmsg.decode(self) self['ifan_name'] = self['ifan_name'].strip(b'\0').decode('ascii') pyroute2-0.7.11/pyroute2/bsd/pf_route/openbsd.py000066400000000000000000000070271455030217500215710ustar00rootroot00000000000000from pyroute2.common import map_namespace from pyroute2.netlink import nlmsg_base IFNAMSIZ = 16 IFF_UP = 0x1 IFF_BROADCAST = 0x2 IFF_DEBUG = 0x4 IFF_LOOPBACK = 0x8 IFF_POINTOPOINT = 0x10 IFF_STATICARP = 0x20 IFF_RUNNING = 0x40 IFF_NOARP = 0x80 IFF_PROMISC = 0x100 IFF_ALLMULTI = 0x200 IFF_OACTIVE = 0x400 IFF_SIMPLEX = 0x800 IFF_LINK0 = 0x1000 IFF_LINK1 = 0x2000 IFF_LINK2 = 0x4000 IFF_MULTICAST = 0x8000 (IFF_NAMES, IFF_VALUES) = map_namespace('IFF', globals()) class bsdmsg(nlmsg_base): __slots__ = () header = ( ('length', 'H'), ('version', 'B'), ('type', 'B'), ('hdrlen', 'H'), ) class if_msg(bsdmsg): __slots__ = () fields = ( ('ifm_index', 'H'), ('ifm_tableid', 'H'), ('ifm_pad1', 'B'), ('ifm_pad2', 'B'), ('ifm_addrs', 'i'), ('ifm_flags', 'i'), ('ifm_xflags', 'i'), ('ifi_type', 'B'), ('ifi_addrlen', 'B'), ('ifi_hdrlen', 'B'), ('ifi_link_state', 'B'), ('ifi_mtu', 'I'), ('ifi_metric', 'I'), ('ifi_rdomain', 'I'), ('ifi_baudrate', 'Q'), ('ifi_ipackets', 'Q'), ('ifi_ierrors', 'Q'), ('ifi_opackets', 'Q'), ('ifi_oerrors', 'Q'), ('ifi_collisions', 'Q'), ('ifi_ibytes', 'Q'), ('ifi_obytes', 'Q'), ('ifi_imcasts', 'Q'), ('ifi_omcasts', 'Q'), ('ifi_iqdrops', 'Q'), ('ifi_oqdrops', 'Q'), ('ifi_noproto', 'Q'), ('ifi_capabilities', 'I'), ('ifu_sec', 'Q'), ('ifu_usec', 'I'), ) class rt_msg_base(bsdmsg): __slots__ = () fields = ( ('rtm_index', 'H'), ('rtm_tableid', 'H'), ('rtm_priority', 'B'), ('rtm_mpls', 'B'), ('rtm_addrs', 'i'), ('rtm_flags', 'i'), ('rtm_fmask', 'i'), ('rtm_pid', 'I'), ('rtm_seq', 'i'), ('rtm_errno', 'i'), ('rtm_inits', 'I'), ('rmx_pksent', 'Q'), ('rmx_expire', 'q'), ('rmx_locks', 'I'), ('rmx_mtu', 'I'), ('rmx_refcnt', 'I'), ('rmx_hopcount', 'I'), ('rmx_recvpipe', 'I'), ('rmx_sendpipe', 'I'), ('rmx_ssthresh', 'I'), ('rmx_rtt', 'I'), ('rmx_rttvar', 'I'), ('rmx_pad', 'I'), ) sockaddr_offset = 96 ifa_slots = { 0: ('DST', 'rt_slot_addr'), 1: ('GATEWAY', 'rt_slot_addr'), 2: ('NETMASK', 'rt_slot_addr'), 3: ('GENMASK', 'hex'), 4: ('IFP', 'rt_slot_ifp'), 5: ('IFA', 'rt_slot_addr'), 6: ('AUTHOR', 'hex'), 7: ('BRD', 'rt_slot_addr'), 8: ('SRC', 'rt_slot_addr'), 9: ('SRCMASK', 'rt_slot_addr'), 10: ('LABEL', 'hex'), 11: ('BFD', 'hex'), 12: ('DNS', 'hex'), 13: ('STATIC', 'hex'), 14: ('SEARCH', 'hex'), } class ifa_msg_base(bsdmsg): __slots__ = () fields = ( ('ifam_index', 'H'), ('ifam_tableid', 'H'), ('ifam_pad1', 'B'), ('ifam_pad2', 'B'), ('rtm_addrs', 'i'), ('ifam_flags', 'i'), ('ifam_metric', 'i'), ) sockaddr_offset = 24 class ifma_msg_base(bsdmsg): __slots__ = () fields = (('rtm_addrs', 'i'), ('ifmam_flags', 'i'), ('ifmam_index', 'H')) sockaddr_offset = 16 class if_announcemsg(bsdmsg): __slots__ = () fields = ( ('ifan_index', 'H'), ('ifan_what', 'H'), ('ifan_name', '%is' % IFNAMSIZ), ) def decode(self): bsdmsg.decode(self) self['ifan_name'] = self['ifan_name'].strip(b'\0').decode('ascii') pyroute2-0.7.11/pyroute2/bsd/rtmsocket/000077500000000000000000000000001455030217500177475ustar00rootroot00000000000000pyroute2-0.7.11/pyroute2/bsd/rtmsocket/__init__.py000066400000000000000000000102771455030217500220670ustar00rootroot00000000000000import struct from socket import AF_INET, AF_INET6, AF_ROUTE, SOCK_RAW from pyroute2 import config from pyroute2.bsd.pf_route import ( bsdmsg, if_announcemsg, if_msg, ifa_msg, ifma_msg, rt_msg, ) from pyroute2.common import dqn2int from pyroute2.netlink.rtnl import RTM_DELADDR as RTNL_DELADDR from pyroute2.netlink.rtnl import RTM_DELLINK as RTNL_DELLINK from pyroute2.netlink.rtnl import RTM_DELROUTE as RTNL_DELROUTE from pyroute2.netlink.rtnl import RTM_NEWADDR as RTNL_NEWADDR from pyroute2.netlink.rtnl import RTM_NEWLINK as RTNL_NEWLINK from pyroute2.netlink.rtnl import RTM_NEWROUTE as RTNL_NEWROUTE from pyroute2.netlink.rtnl.ifaddrmsg import ifaddrmsg from pyroute2.netlink.rtnl.ifinfmsg import ifinfmsg from pyroute2.netlink.rtnl.rtmsg import rtmsg if config.uname[0] == 'OpenBSD': from pyroute2.bsd.rtmsocket.openbsd import ( RTM_ADD, RTM_NEWADDR, RTMSocketBase, ) else: from pyroute2.bsd.rtmsocket.freebsd import ( RTM_ADD, RTM_NEWADDR, RTMSocketBase, ) def convert_rt_msg(msg): ret = rtmsg() ret['header']['type'] = ( RTNL_NEWROUTE if msg['header']['type'] == RTM_ADD else RTNL_DELROUTE ) ret['family'] = msg['DST']['header']['family'] ret['attrs'] = [] if 'address' in msg['DST']: ret['attrs'].append(['RTA_DST', msg['DST']['address']]) if ( 'NETMASK' in msg and msg['NETMASK']['header']['family'] == ret['family'] ): ret['dst_len'] = dqn2int(msg['NETMASK']['address'], ret['family']) if 'GATEWAY' in msg: if msg['GATEWAY']['header']['family'] not in (AF_INET, AF_INET6): # interface routes, table 255 # discard for now return None ret['attrs'].append(['RTA_GATEWAY', msg['GATEWAY']['address']]) if 'IFA' in msg: ret['attrs'].append(['RTA_SRC', msg['IFA']['address']]) if 'IFP' in msg: ret['attrs'].append(['RTA_OIF', msg['IFP']['index']]) elif msg['rtm_index'] != 0: ret['attrs'].append(['RTA_OIF', msg['rtm_index']]) del ret['value'] return ret def convert_if_msg(msg): # discard this type for now return None def convert_ifa_msg(msg): ret = ifaddrmsg() ret['header']['type'] = ( RTNL_NEWADDR if msg['header']['type'] == RTM_NEWADDR else RTNL_DELADDR ) ret['index'] = msg['IFP']['index'] ret['family'] = msg['IFA']['header']['family'] ret['prefixlen'] = dqn2int(msg['NETMASK']['address'], ret['family']) ret['attrs'] = [ ['IFA_ADDRESS', msg['IFA']['address']], ['IFA_BROADCAST', msg['BRD']['address']], ['IFA_LABEL', msg['IFP']['ifname']], ] del ret['value'] return ret def convert_ifma_msg(msg): # ignore for now return None def convert_if_announcemsg(msg): ret = ifinfmsg() ret['header']['type'] = RTNL_DELLINK if msg['ifan_what'] else RTNL_NEWLINK ret['index'] = msg['ifan_index'] ret['attrs'] = [['IFLA_IFNAME', msg['ifan_name']]] del ret['value'] return ret def convert_bsdmsg(msg): # ignore unknown messages return None convert = { rt_msg: convert_rt_msg, ifa_msg: convert_ifa_msg, if_msg: convert_if_msg, ifma_msg: convert_ifma_msg, if_announcemsg: convert_if_announcemsg, bsdmsg: convert_bsdmsg, } class RTMSocket(RTMSocketBase): def __init__(self, output='pf_route', target='localhost'): self.target = target self._sock = config.SocketBase(AF_ROUTE, SOCK_RAW) self._output = output def fileno(self): return self._sock.fileno() def get(self): msg = self._sock.recv(2048) _, _, msg_type = struct.unpack('HBB', msg[:4]) msg_class = self.msg_map.get(msg_type, None) if msg_class is not None: msg = msg_class(msg) msg.decode() if self._output == 'netlink': # convert messages to the Netlink format msg = convert[type(msg)](msg) msg['header']['target'] = self.target return msg def close(self): self._sock.close() def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.close() __all__ = [RTMSocket] pyroute2-0.7.11/pyroute2/bsd/rtmsocket/freebsd.py000066400000000000000000000025461455030217500217420ustar00rootroot00000000000000from pyroute2.bsd.pf_route import ( if_announcemsg, if_msg, ifa_msg, ifma_msg, rt_msg, ) RTM_ADD = 0x1 # Add Route RTM_DELETE = 0x2 # Delete Route RTM_CHANGE = 0x3 # Change Metrics or flags RTM_GET = 0x4 # Report Metrics RTM_LOSING = 0x5 # Kernel Suspects Partitioning RTM_REDIRECT = 0x6 # Told to use different route RTM_MISS = 0x7 # Lookup failed on this address RTM_LOCK = 0x8 # Fix specified metrics RTM_RESOLVE = 0xB # Req to resolve dst to LL addr RTM_NEWADDR = 0xC # Address being added to iface RTM_DELADDR = 0xD # Address being removed from iface RTM_IFINFO = 0xE # Iface going up/down etc RTM_NEWMADDR = 0xF # Mcast group membership being added to if RTM_DELMADDR = 0x10 # Mcast group membership being deleted RTM_IFANNOUNCE = 0x11 # Iface arrival/departure RTM_IEEE80211 = 0x12 # IEEE80211 wireless event class RTMSocketBase(object): msg_map = { RTM_ADD: rt_msg, RTM_DELETE: rt_msg, RTM_CHANGE: rt_msg, RTM_GET: rt_msg, RTM_LOSING: rt_msg, RTM_REDIRECT: rt_msg, RTM_MISS: rt_msg, RTM_LOCK: rt_msg, RTM_RESOLVE: rt_msg, RTM_NEWADDR: ifa_msg, RTM_DELADDR: ifa_msg, RTM_IFINFO: if_msg, RTM_NEWMADDR: ifma_msg, RTM_DELMADDR: ifma_msg, RTM_IFANNOUNCE: if_announcemsg, RTM_IEEE80211: if_announcemsg, } pyroute2-0.7.11/pyroute2/bsd/rtmsocket/openbsd.py000066400000000000000000000026171455030217500217610ustar00rootroot00000000000000from pyroute2.bsd.pf_route import ( bsdmsg, if_announcemsg, if_msg, ifa_msg, rt_msg, ) RTM_ADD = 0x1 # Add Route RTM_DELETE = 0x2 # Delete Route RTM_CHANGE = 0x3 # Change Metrics or flags RTM_GET = 0x4 # Report Metrics RTM_LOSING = 0x5 # Kernel Suspects Partitioning RTM_REDIRECT = 0x6 # Told to use different route RTM_MISS = 0x7 # Lookup failed on this address RTM_LOCK = 0x8 # Fix specified metrics RTM_RESOLVE = 0xB # Req to resolve dst to LL addr RTM_NEWADDR = 0xC # Address being added to iface RTM_DELADDR = 0xD # Address being removed from iface RTM_IFINFO = 0xE # Iface going up/down etc RTM_IFANNOUNCE = 0xF # Iface arrival/departure RTM_DESYNC = 0x10 # route socket buffer overflow RTM_INVALIDATE = 0x10 # Invalidate cache of L2 route RTM_BFD = 0x12 # bidirectional forwarding detection RTM_PROPOSAL = 0x13 # proposal for netconfigd class RTMSocketBase(object): msg_map = { RTM_ADD: rt_msg, RTM_DELETE: rt_msg, RTM_CHANGE: rt_msg, RTM_GET: rt_msg, RTM_LOSING: rt_msg, RTM_REDIRECT: rt_msg, RTM_MISS: rt_msg, RTM_LOCK: rt_msg, RTM_RESOLVE: rt_msg, RTM_NEWADDR: ifa_msg, RTM_DELADDR: ifa_msg, RTM_IFINFO: if_msg, RTM_IFANNOUNCE: if_announcemsg, RTM_DESYNC: bsdmsg, RTM_INVALIDATE: bsdmsg, RTM_BFD: bsdmsg, RTM_PROPOSAL: bsdmsg, } pyroute2-0.7.11/pyroute2/bsd/util.py000066400000000000000000000177101455030217500172710ustar00rootroot00000000000000''' Utility to parse ifconfig, netstat etc. PF_ROUTE may be effectively used only to get notifications. To fetch info from the system we have to use ioctl or external utilities. Maybe some day it will be ioctl. For now it's ifconfig and netstat. ''' import re import socket import subprocess class CMD(object): cmd = ['uname', '-s'] def __init__(self, cmd=None): if cmd is not None: self.cmd = cmd def run(self): ''' Run the command and get stdout ''' stdout = stderr = '' try: process = subprocess.Popen(self.cmd, stdout=subprocess.PIPE) (stdout, stderr) = process.communicate() except Exception: process.kill() finally: process.wait() return stdout class Route(CMD): cmd = ['netstat', '-rn'] def parse(self, data): ret = [] family = 0 if isinstance(data, bytes): data = data.decode('utf-8') for line in data.split('\n'): if line == 'Internet:': family = socket.AF_INET elif line == 'Internet6:': # do NOT support IPv6 routes yet break sl = line.split() if len(sl) < 4: continue if sl[0] == 'Destination': # create the field map fmap = dict([(x[1], x[0]) for x in enumerate(sl)]) if 'Netif' not in fmap: fmap['Netif'] = fmap['Iface'] continue route = {'family': family, 'attrs': []} # # RTA_DST dst = sl[fmap['Destination']] if dst != 'default': dst = dst.split('/') if len(dst) == 2: dst, dst_len = dst else: dst = dst[0] if family == socket.AF_INET: dst_len = 32 else: dst_len = 128 dst = dst.split('%') if len(dst) == 2: dst, _ = dst else: dst = dst[0] dst = '%s%s' % (dst, '.0' * (3 - dst.count('.'))) route['dst_len'] = int(dst_len) route['attrs'].append(['RTA_DST', dst]) # # RTA_GATEWAY gw = sl[fmap['Gateway']] if not gw.startswith('link') and not gw.find(':') >= 0: route['attrs'].append(['RTA_GATEWAY', sl[fmap['Gateway']]]) # # RTA_OIF -- do not resolve it here! just save route['ifname'] = sl[fmap['Netif']] ret.append(route) return ret class ARP(CMD): cmd = ['arp', '-an'] def parse(self, data): ret = [] f_dst = 1 f_addr = 3 f_ifname = 5 if isinstance(data, bytes): data = data.decode('utf-8') for line in data.split('\n'): sl = line.split() if not sl: continue if sl[0] == 'Host': f_dst = 0 f_addr = 1 f_ifname = 2 continue dst = sl[f_dst].strip('(').strip(')') addr = sl[f_addr].strip('(').strip(')') if addr == 'incomplete': continue ifname = sl[f_ifname] neighbour = { 'ifindex': 0, 'ifname': ifname, 'family': 2, 'attrs': [['NDA_DST', dst], ['NDA_LLADDR', addr]], } ret.append(neighbour) return ret class Ifconfig(CMD): match = {'NR': re.compile(r'^\b').match} cmd = ['ifconfig', '-a'] def parse_line(self, line): ''' Dumb line parser: "key1 value1 key2 value2 something" -> {"key1": "value1", "key2": "value2"} ''' ret = {} cursor = 0 while cursor < (len(line) - 1): ret[line[cursor]] = line[cursor + 1] cursor += 2 return ret def parse(self, data): ''' Parse ifconfig output into netlink-compatible dicts:: from pyroute2.netlink.rtnl.ifinfmsg import ifinfmsg from pyroute2.bsd.util import Ifconfig def links() ifc = Ifconfig() data = ifc.run() for name, spec in ifc.parse(data)["links"].items(): yield ifinfmsg().load(spec) ''' ifname = None kind = None ret = {'links': {}, 'addrs': {}} idx = 0 info_data = {'attrs': None} if isinstance(data, bytes): data = data.decode('utf-8') for line in data.split('\n'): sl = line.split() pl = self.parse_line(sl) # type-specific if kind == 'gre' and 'inet' in sl and not info_data['attrs']: # first "inet" -- low-level addresses arrow = None try: arrow = sl.index('->') except ValueError: try: arrow = sl.index('-->') except ValueError: continue if arrow is not None: info_data['attrs'] = [ ('IFLA_GRE_LOCAL', sl[arrow - 1]), ('IFLA_GRE_REMOTE', sl[arrow + 1]), ] continue # first line -- ifname, flags, mtu if self.match['NR'](line): ifname = sl[0][:-1] kind = None idx += 1 ret['links'][ifname] = link = {'index': idx, 'attrs': []} ret['addrs'][ifname] = addrs = [] link['attrs'].append(['IFLA_IFNAME', ifname]) # if ifname[:3] == 'gre': kind = 'gre' info_data = {'attrs': []} linkinfo = { 'attrs': [ ('IFLA_INFO_KIND', kind), ('IFLA_INFO_DATA', info_data), ] } link['attrs'].append(['IFLA_LINKINFO', linkinfo]) # extract flags try: link['flags'] = int(sl[1].split('=')[1].split('<')[0]) except Exception: pass # extract MTU if 'mtu' in pl: link['attrs'].append(['IFLA_MTU', int(pl['mtu'])]) elif 'ether' in pl: link['attrs'].append(['IFLA_ADDRESS', pl['ether']]) elif 'lladdr' in pl: link['attrs'].append(['IFLA_ADDRESS', pl['lladdr']]) elif 'index' in pl: idx = int(pl['index']) link['index'] = int(pl['index']) elif 'inet' in pl: if ('netmask' not in pl) or ('inet' not in pl): print(pl) continue addr = { 'index': idx, 'family': socket.AF_INET, 'prefixlen': bin(int(pl['netmask'], 16)).count('1'), 'attrs': [['IFA_ADDRESS', pl['inet']]], } if 'broadcast' in pl: addr['attrs'].append(['IFA_BROADCAST', pl['broadcast']]) addrs.append(addr) elif 'inet6' in pl: if ('prefixlen' not in pl) or ('inet6' not in pl): print(pl) continue addr = { 'index': idx, 'family': socket.AF_INET6, 'prefixlen': int(pl['prefixlen']), 'attrs': [['IFA_ADDRESS', pl['inet6'].split('%')[0]]], } if 'scopeid' in pl: addr['scope'] = int(pl['scopeid'], 16) addrs.append(addr) return ret pyroute2-0.7.11/pyroute2/cli/000077500000000000000000000000001455030217500157335ustar00rootroot00000000000000pyroute2-0.7.11/pyroute2/cli/__init__.py000066400000000000000000000056571455030217500200610ustar00rootroot00000000000000''' CLI provides a simple syntax to manipulate NDB. The syntax is the same for the console and the http versions. The first level of the hierarchy represents NDB views: * interfaces -- network interfaces * addresses -- IP addresses * routes -- IP and MPLS routes, one record per NH * neighbours -- ARP cache * rules -- RPDB rules * netns -- network namespaces * vlans -- bridge VLAN filters CLI supports indentation, though it is optional. A level up in the indentation means a level up in the object hierarchy. Same effect with `..` command. An example script to create a bridge and a port, no indentation:: ; comments start with ; interfaces create ifname br0, kind bridge address 00:11:22:33:44:55 state up add_ip 192.168.123.21/24 add_ip 192.168.123.22/24 commit .. ; level up to the interfaces view create ifname br0p0, kind dummy state up master br0 commit Same script with indentation, no `..` needed:: interfaces create ifname br0, kind bridge address 00:11:22:33:44:55 state up add_ip 192.168.123.21/24 add_ip 192.168.123.22/24 commit create ifname br0p0, kind dummy state up master br0 commit Select objects:: ; by name interfaces br0 ; ... ; by spec interfaces {target netns01, ifname eth0} ; ... {address 00:11:22:33:44:55} ; ... Manage interfaces ----------------- Create:: interfaces create ifname br0.100, kind vlan, vlan_id 100, link br0 commit create ifname v0, kind veth, peer v0p commit Change mac address:: interfaces br0 address 00:11:22:33:44:55 commit Change netns and rename:: sources add netns test01 interfaces v0p net_ns_fd test01 commit {target test01, ifname v0p} ifname eth0 commit summary | filter kind veth | select target, ifname | format json Manage addresses ---------------- ... Manage routes ------------- ... Generate reports ---------------- It is possible to modify the output of the dump or summary commands:: interfaces ; print index, ifname and MAC address for UP interfaces dump | filter state up | select index, ifname, address routes ; output in JSON format summary | format json The `format` command can only be the last in the sentence. Available output filters: filter { } -- filter out records select { } -- output only selected record fields format { } -- change the output format, possible values: csv, json Wait for events --------------- ... ''' t_stmt = 1 t_dict = 2 t_comma = 3 t_pipe = 4 t_end_of_dict = 7 t_end_of_sentence = 8 t_end_of_stream = 9 def change_pointer(f): f.__cli_cptr__ = True return f def show_result(f): f.__cli_publish__ = True return f pyroute2-0.7.11/pyroute2/cli/auth/000077500000000000000000000000001455030217500166745ustar00rootroot00000000000000pyroute2-0.7.11/pyroute2/cli/auth/__init__.py000066400000000000000000000000001455030217500207730ustar00rootroot00000000000000pyroute2-0.7.11/pyroute2/cli/auth/auth_keystone.py000066400000000000000000000022521455030217500221310ustar00rootroot00000000000000import os import time from dateutil.parser import parse as isodate from keystoneauth1 import session from keystoneauth1.identity import v3 from keystoneclient.v3 import client as ksclient from keystoneclient.v3.tokens import TokenManager class OSAuthManager(object): def __init__(self, headers): # create a Keystone password object auth = v3.Password( auth_url=os.environ.get('OS_AUTH_URL'), username=os.environ.get('OS_USERNAME'), password=os.environ.get('OS_PASSWORD'), user_domain_name=(os.environ.get('OS_USER_DOMAIN_NAME')), project_id=os.environ.get('OS_PROJECT_ID'), ) # create a session object sess = session.Session(auth=auth) # create a token manager tmanager = TokenManager(ksclient.Client(session=sess)) # validate the token keystone_response = tmanager.validate(headers['X-Auth-Token']) # init attrs self.expire = isodate(keystone_response['expires_at']).timestamp() def check(self, obj, tag): if time.time() > self.expire: raise PermissionError('keystone token has been expired') return True pyroute2-0.7.11/pyroute2/cli/auth/auth_radius.py000066400000000000000000000014061455030217500215570ustar00rootroot00000000000000import os import pyrad.packet from pyrad.client import Client from pyrad.dictionary import Dictionary class RadiusAuthManager(object): def __init__(self, headers): user = headers['X-Auth-User'] password = headers['X-Auth-Password'] client = Client( server=os.environ.get('RADIUS_SERVER'), secret=os.environ.get('RADIUS_SECRET').encode('ascii'), dict=Dictionary('dictionary'), ) req = client.CreateAuthPacket( code=pyrad.packet.AccessRequest, User_Name=user ) req['User-Password'] = req.PwCrypt(password) reply = client.SendPacket(req) self.auth = reply.code def check(self, obj, tag): return self.auth == pyrad.packet.AccessAccept pyroute2-0.7.11/pyroute2/cli/console.py000066400000000000000000000064261455030217500177570ustar00rootroot00000000000000import code import getpass import socket import sys from pyroute2.cli.session import Session from pyroute2.ndb.main import NDB try: import readline HAS_READLINE = True except ImportError: HAS_READLINE = False class Console(code.InteractiveConsole): def __init__(self, stdout=None, log=None, sources=None): global HAS_READLINE self.db = NDB(log=log, sources=sources) self.db.config.update( {'show_format': 'json', 'recordset_pipe': 'true'} ) self.stdout = stdout or sys.stdout self.session = Session(self.db, self.stdout, self.set_prompt) self.matches = [] self.isatty = sys.stdin.isatty() self.prompt = '' self.set_prompt() code.InteractiveConsole.__init__(self) if HAS_READLINE: readline.parse_and_bind('tab: complete') readline.set_completer(self.completer) readline.set_completion_display_matches_hook(self.display) def close(self): self.db.close() def help(self): self.session.lprint( "Built-in commands: \n" "exit\t-- exit cli\n" "ls\t-- list current namespace\n" ".\t-- print the current object\n" ".. or Ctrl-D\t-- one level up\n" ) def set_prompt(self, prompt=None): if self.isatty: if prompt is not None: self.prompt = '%s > ' % (prompt) else: self.prompt = '%s > ' % (self.session.ptr.__class__.__name__) self.prompt = '%s@%s : %s' % ( getpass.getuser(), (socket.gethostname().split('.')[0]), self.prompt, ) def loadrc(self, fname): with open(fname, 'r') as f: self.session.handle(f.read()) def interact(self, readfunc=None): if self.isatty and readfunc is None: self.session.lprint("pyroute2 cli prototype") if readfunc is None: readfunc = self.raw_input indent = 0 while True: try: text = readfunc(self.prompt) except EOFError: if self.session.stack: self.session.stack_pop() continue else: self.close() break except Exception: self.close() break try: indent = self.session.handle(text, indent) except SystemExit: self.close() return except: self.showtraceback() continue def completer(self, text, state): if state == 0: d = [x for x in dir(self.session.ptr) if x.startswith(text)] if isinstance(self.session.ptr, dict): keys = [str(y) for y in self.session.ptr.keys()] d.extend([x for x in keys if x.startswith(text)]) self.matches = d try: return self.matches[state] except: pass def display(self, line, matches, length): self.session.lprint() self.session.lprint(matches) self.session.lprint('%s%s' % (self.prompt, line), end='') if __name__ == '__main__': Console().interact() pyroute2-0.7.11/pyroute2/cli/parser.py000066400000000000000000000127221455030217500176050ustar00rootroot00000000000000import re import shlex from pyroute2.cli import ( t_comma, t_dict, t_end_of_dict, t_end_of_sentence, t_end_of_stream, t_pipe, t_stmt, ) from pyroute2.common import basestring class Token(object): def __init__(self, lex, expect=(), prohibit=(), leaf=False): self.lex = lex self.leaf = leaf self.kind = 0 self.name = None self.argv = [] self.kwarg = {} self.parse() if expect and self.kind not in expect: raise SyntaxError('expected %s, got %s' % (expect, self.kind)) if prohibit and self.kind in prohibit: raise SyntaxError('unexpected %s' % (self.name,)) def convert(self, arg): if re.match('^[0-9]+$', arg): return int(arg) else: return arg def parse(self): # triage first = self.lex.get_token() self.name = first ## # no token # if first == '': self.kind = t_end_of_stream ## # dict, e.g. # # resource spec, function arguments:: # {arg1, arg2} # {key1 value1, key2 value2} # {key {skey1 value}} # elif first == '{': arg_name = None while True: nt = Token( self.lex, expect=(t_stmt, t_dict, t_comma, t_end_of_dict) ) if arg_name is None: if nt.kind == t_dict: self.argv.append(nt.kwarg) elif nt.kind == t_comma: continue elif nt.kind == t_stmt: arg_name = nt.name else: if nt.kind in (t_end_of_dict, t_comma): self.argv.append(arg_name) elif nt.kind == t_stmt: self.kwarg[arg_name] = nt.name elif nt.kind == t_dict: self.kwarg[arg_name] = nt.kwarg arg_name = None if nt.kind == t_end_of_dict: self.kind = t_dict self.name = '%s %s' % (self.argv, self.kwarg) return ## # end of dict # elif first == '}': self.kind = t_end_of_dict ## # end of sentence # elif first == ';': self.kind = t_end_of_sentence ## # end of dict entry # elif first == ',': self.kind = t_comma ## # pipe # elif first == '|': self.kind = t_pipe elif first == '=': lookahead = self.lex.get_token() if lookahead == '>': self.name = '=>' self.kind = t_pipe else: self.lex.push_token(lookahead) self.kind = t_stmt ## # simple statement # # object name:: # name # # function call:: # func # func {arg1, arg2} # func {key1 value1, key2 value2} # else: self.name = self.convert(first) self.kind = t_stmt class Sentence(object): def __init__(self, text, indent=0, master=None): self.offset = 0 self.statements = [] self.text = text self.lex = shlex.shlex(text) self.lex.wordchars += '.:/' self.lex.commenters = '#!' self.lex.debug = False self.indent = indent if master: self.chain = master.chain else: self.chain = [] self.parse() def __iter__(self): for stmt in self.statements: yield stmt def parse(self): sentence = self while True: nt = Token(self.lex) if nt.kind == t_end_of_sentence: sentence = Sentence(None, self.indent, master=self) elif nt.kind == t_end_of_stream: return else: sentence.statements.append(nt) if sentence not in self.chain: self.chain.append(sentence) def __repr__(self): ret = '----\n' for s in self.statements: ret += '%i [%s] %s\n' % (self.indent, s.kind, s.name) ret += '\targv: %s\n' % (s.argv) ret += '\tkwarg: %s\n' % (s.kwarg) return ret class Parser(object): def __init__(self, stream): self.stream = stream self.indent = None self.sentences = [] self.parse() def parse(self): if hasattr(self.stream, 'readlines'): for text in self.stream.readlines(): self.parse_string(text) elif isinstance(self.stream, basestring): self.parse_string(self.stream) else: raise ValueError('unsupported stream') self.parsed = True def parse_string(self, text): # 1. get indentation indent = re.match(r'^([ \t]*)', text).groups(0)[0] spaces = [] # 2. sort it if indent: spaces = list(set(indent)) if len(spaces) > 1: raise SyntaxError('mixed indentation') if self.indent is None: self.indent = spaces[0] if self.indent != spaces[0]: raise SyntaxError('mixed indentation') sentence = Sentence(text, len(indent)) self.sentences.extend(sentence.chain) pyroute2-0.7.11/pyroute2/cli/server.py000066400000000000000000000062521455030217500176200ustar00rootroot00000000000000import json from pyroute2.cli.session import Session from pyroute2.ndb.main import NDB try: from BaseHTTPServer import BaseHTTPRequestHandler from BaseHTTPServer import HTTPServer as HTTPServer except ImportError: from http.server import BaseHTTPRequestHandler from http.server import HTTPServer as HTTPServer class ProxyEncoder(object): def __init__(self, wfile): self.wfile = wfile def write(self, data): self.wfile.write(data.encode('utf-8')) def flush(self): self.wfile.flush() class Handler(BaseHTTPRequestHandler): def do_error(self, code, reason): self.send_error(code, reason) self.end_headers() def do_POST(self): # # sanity checks: # # * path if self.path != '/v1/': return self.do_error(404, 'url not found') # * content length if 'Content-Length' not in self.headers: return self.do_error(411, 'Content-Length') # * content type if 'Content-Type' not in self.headers: return self.do_error(400, 'Content-Type') # content_length = int(self.headers['Content-Length']) content_type = self.headers['Content-Type'] data = self.rfile.read(content_length) if content_type == 'application/json': try: request = json.loads(data) except ValueError: return self.do_error(400, 'Incorrect JSON input') elif content_type == 'text/plain': request = {'commands': data.decode('utf-8').split(';')} else: self.do_error(400, 'Incorrect content type') # auth plugins if 'X-Auth-Mech' in self.headers: auth_plugin = self.server.auth_plugins.get( self.headers['X-Auth-Mech'] ) if auth_plugin is None: return self.do_error(501, 'Authentication mechanism not found') try: am = auth_plugin(self.headers) except Exception: return self.do_error(401, 'Authentication failed') ndb = self.server.ndb.auth_proxy(am) elif self.server.auth_strict: return self.do_error(401, 'Authentication required') else: ndb = self.server.ndb session = Session( ndb=ndb, stdout=ProxyEncoder(self.wfile), builtins=('ls', '.', '..', 'version'), ) self.send_response(200) self.end_headers() for cmd in request['commands']: session.handle(cmd) class Server(HTTPServer): def __init__( self, address='localhost', port=8080, sources=None, ndb=None, log=None, auth_strict=False, auth_plugins=None, ): self.sessions = {} self.auth_strict = auth_strict self.auth_plugins = auth_plugins or {} if ndb is not None: self.ndb = ndb else: self.ndb = NDB(sources=sources, log=log) self.ndb.config.update( {'show_format': 'json', 'recordset_pipe': 'true'} ) HTTPServer.__init__(self, (address, port), Handler) pyroute2-0.7.11/pyroute2/cli/session.py000066400000000000000000000217041455030217500177740ustar00rootroot00000000000000from __future__ import print_function import sys import traceback from collections import namedtuple from pyroute2 import config from pyroute2.cli import t_comma, t_dict, t_pipe, t_stmt from pyroute2.cli.parser import Parser from pyroute2.common import basestring class Session(object): def __init__(self, ndb, stdout=None, ptrname_callback=None, builtins=None): self.db = ndb self.ptr = self.db self._ptrname = None self._ptrname_callback = ptrname_callback self.stack = [] self.errors = 0 self.indent_stack = set() self.prompt = '' self.stdout = stdout or sys.stdout self.builtins = builtins or ( 'ls', '.', '..', 'version', 'exit', ':stack', ) @property def ptrname(self): return self._ptrname @ptrname.setter def ptrname(self, name): self._ptrname = name if self._ptrname_callback is not None: self._ptrname_callback(name) def stack_pop(self): self.ptr, self.ptrname = self.stack.pop() return (self.ptr, self.ptrname) def lprint(self, text='', end='\n'): if not isinstance(text, basestring): text = str(text) self.stdout.write(text) if end: self.stdout.write(end) self.stdout.flush() def handle_statement(self, stmt, token): obj = None if stmt.kind == t_dict: obj = self.ptr[stmt.kwarg] elif stmt.kind == t_stmt: obj = getattr(self.ptr, stmt.name, None) if obj is None and isinstance(self.ptr, dict): try: obj = self.ptr.get(stmt.name, None) except KeyError: pass if hasattr(obj, '__call__'): try: nt = next(token) except StopIteration: nt = namedtuple('Token', ('kind', 'argv', 'kwarg'))( t_dict, [], {} ) if nt.kind == t_dict: args = nt try: pipe = next(token) if pipe.kind != t_pipe: raise TypeError('pipe expected') except StopIteration: pipe = None elif nt.kind == t_stmt: argv = [] kwarg = {} arg_name = nt.name pipe = None for nt in token: if arg_name is None: if nt.kind == t_stmt: arg_name = nt.name elif nt.kind == t_comma: continue elif nt.kind == t_pipe: pipe = nt break else: raise TypeError('stmt expected') else: if nt.kind == t_comma: argv.append(arg_name) elif nt.kind == t_stmt: kwarg[arg_name] = nt.name elif nt.kind == t_pipe: pipe = nt break else: raise TypeError('stmt or comma expected') arg_name = None if arg_name is not None: argv.append(arg_name) args = namedtuple('Token', ('kind', 'argv', 'kwarg'))( t_dict, argv, kwarg ) elif nt.kind == t_pipe: args = namedtuple('Token', ('kind', 'argv', 'kwarg'))( t_dict, [], {} ) pipe = nt else: raise TypeError('dict, stmt or comma expected') # at this step we have # args -- arguments # pipe -- pipe or None try: ret = obj(*args.argv, **args.kwarg) # if pipe is not None: ptr = self.ptr self.ptr = ret try: stmt = next(token) except StopIteration: raise TypeError('statement expected') if stmt.kind != t_stmt: raise TypeError('statement expected') try: self.handle_statement(stmt, token) except Exception: pass self.ptr = ptr return if hasattr(obj, '__cli_cptr__'): obj = ret elif hasattr(obj, '__cli_publish__'): if hasattr(ret, 'generator') or hasattr(ret, 'next'): for line in ret: if isinstance(line, basestring): self.lprint(line) else: self.lprint(repr(line)) else: self.lprint(ret) return elif isinstance(ret, (bool, basestring, int, float)): self.lprint(ret) return else: return except Exception: self.errors += 1 traceback.print_exc() return else: if isinstance(self.ptr, dict) and not isinstance(obj, dict): try: nt = next(token) if nt.kind == t_stmt: self.ptr[stmt.name] = nt.name elif nt.kind == t_dict and nt.argv: self.ptr[stmt.name] = nt.argv elif nt.kind == t_dict and nt.kwarg: self.ptr[stmt.name] = nt.kwarg else: raise TypeError('failed setting a key/value pair') return except NotImplementedError: raise KeyError() except StopIteration: pass if obj is None: raise KeyError() elif isinstance(obj, (basestring, int, float)): self.lprint(obj) else: return obj def handle_sentence(self, sentence, indent): if sentence.indent < indent: while max(self.indent_stack) > sentence.indent: self.indent_stack.remove(max(self.indent_stack)) if self.stack: self.ptr, self.ptrname = self.stack.pop() else: self.indent_stack.add(sentence.indent) indent = sentence.indent iterator = iter(sentence) obj = None save_ptr = self.ptr save_ptrname = self.ptrname try: for stmt in iterator: if stmt.name in self.builtins: if stmt.name == 'exit': raise SystemExit() elif stmt.name == 'ls': self.lprint(dir(self.ptr)) elif stmt.name == ':stack': self.lprint('stack:') for item in self.stack: self.lprint(item) self.lprint('end') elif stmt.name == '.': self.lprint(repr(self.ptr)) elif stmt.name == '..': if self.stack: save_ptr, save_ptrname = self.stack.pop() elif stmt.name == 'version': try: self.lprint(config.version.__version__) except: self.lprint('unknown') break else: try: obj = self.handle_statement(stmt, iterator) if obj is not None: self.ptr = obj if hasattr(obj, 'key_repr'): self.ptrname = obj.key_repr() else: self.ptrname = stmt.name except KeyError: self.lprint('object not found') self.errors += 1 return indent except: self.errors += 1 traceback.print_exc() except SystemExit: raise finally: if obj is not None: self.stack.append((save_ptr, save_ptrname)) else: self.ptr, self.ptrname = save_ptr, save_ptrname return indent def handle(self, text, indent=0): parser = Parser(text) for sentence in parser.sentences: indent = self.handle_sentence(sentence, indent) return indent pyroute2-0.7.11/pyroute2/common.py000066400000000000000000000426511455030217500170360ustar00rootroot00000000000000# -*- coding: utf-8 -*- ''' Common utilities ''' import errno import io import logging import os import re import socket import struct import sys import threading import time import types log = logging.getLogger(__name__) try: # # Python2 section # basestring = basestring reduce = reduce file = file except NameError: # # Python3 section # basestring = (str, bytes) from functools import reduce reduce = reduce file = io.BytesIO AF_MPLS = 28 AF_PIPE = 255 # Right now AF_MAX == 40 DEFAULT_RCVBUF = 65536 _uuid32 = 0 # (singleton) the last uuid32 value saved to avoid collisions _uuid32_lock = threading.Lock() size_suffixes = { 'b': 1, 'k': 1024, 'kb': 1024, 'm': 1024 * 1024, 'mb': 1024 * 1024, 'g': 1024 * 1024 * 1024, 'gb': 1024 * 1024 * 1024, 'kbit': 1024 / 8, 'mbit': 1024 * 1024 / 8, 'gbit': 1024 * 1024 * 1024 / 8, } time_suffixes = { 's': 1, 'sec': 1, 'secs': 1, 'ms': 1000, 'msec': 1000, 'msecs': 1000, 'us': 1000000, 'usec': 1000000, 'usecs': 1000000, } rate_suffixes = { 'bit': 1, 'Kibit': 1024, 'kbit': 1000, 'mibit': 1024 * 1024, 'mbit': 1000000, 'gibit': 1024 * 1024 * 1024, 'gbit': 1000000000, 'tibit': 1024 * 1024 * 1024 * 1024, 'tbit': 1000000000000, 'Bps': 8, 'KiBps': 8 * 1024, 'KBps': 8000, 'MiBps': 8 * 1024 * 1024, 'MBps': 8000000, 'GiBps': 8 * 1024 * 1024 * 1024, 'GBps': 8000000000, 'TiBps': 8 * 1024 * 1024 * 1024 * 1024, 'TBps': 8000000000000, } ## # General purpose # class View(object): ''' A read-only view of a dictionary object. ''' def __init__(self, src=None, path=None, constraint=lambda k, v: True): self.src = src if src is not None else {} if path is not None: path = path.split('/') for step in path: self.src = getattr(self.src, step) self.constraint = constraint def __getitem__(self, key): if key in self.keys(): return self.src[key] raise KeyError() def __setitem__(self, key, value): raise NotImplementedError() def __delitem__(self, key): raise NotImplementedError() def get(self, key, default=None): try: return self[key] except KeyError: return default def _filter(self): ret = [] for key, value in tuple(self.src.items()): try: if self.constraint(key, value): ret.append((key, value)) except Exception as e: log.error("view filter error: %s", e) return ret def keys(self): return [x[0] for x in self._filter()] def values(self): return [x[1] for x in self._filter()] def items(self): return self._filter() def __iter__(self): for key in self.keys(): yield key def __repr__(self): return repr(dict(self._filter())) class Namespace(object): def __init__(self, parent, override=None): self.parent = parent self.override = override or {} def __getattr__(self, key): if key in ('parent', 'override'): return object.__getattr__(self, key) elif key in self.override: return self.override[key] else: ret = getattr(self.parent, key) # ACHTUNG # # if the attribute we got with `getattr` # is a method, rebind it to the Namespace # object, so all subsequent getattrs will # go through the Namespace also. # if isinstance(ret, types.MethodType): ret = type(ret)(ret.__func__, self) return ret def __setattr__(self, key, value): if key in ('parent', 'override'): object.__setattr__(self, key, value) elif key in self.override: self.override[key] = value else: setattr(self.parent, key, value) class Dotkeys(dict): ''' This is a sick-minded hack of dict, intended to be an eye-candy. It allows to get dict's items by dot reference: ipdb["lo"] == ipdb.lo ipdb["eth0"] == ipdb.eth0 Obviously, it will not work for some cases, like unicode names of interfaces and so on. Beside of that, it introduces some complexity. But it simplifies live for old-school admins, who works with good old "lo", "eth0", and like that naming schemes. ''' __var_name = re.compile('^[a-zA-Z_]+[a-zA-Z_0-9]*$') def __dir__(self): return [i for i in self if type(i) == str and self.__var_name.match(i)] def __getattribute__(self, key, *argv): try: return dict.__getattribute__(self, key) except AttributeError as e: if key == '__deepcopy__': raise e elif key[:4] == 'set_': def set_value(value): self[key[4:]] = value return self return set_value elif key in self: return self[key] else: raise e def __setattr__(self, key, value): if key in self: self[key] = value else: dict.__setattr__(self, key, value) def __delattr__(self, key): if key in self: del self[key] else: dict.__delattr__(self, key) def map_namespace(prefix, ns, normalize=None): ''' Take the namespace prefix, list all constants and build two dictionaries -- straight and reverse mappings. E.g.: ## neighbor attributes NDA_UNSPEC = 0 NDA_DST = 1 NDA_LLADDR = 2 NDA_CACHEINFO = 3 NDA_PROBES = 4 (NDA_NAMES, NDA_VALUES) = map_namespace('NDA', globals()) Will lead to:: NDA_NAMES = {'NDA_UNSPEC': 0, ... 'NDA_PROBES': 4} NDA_VALUES = {0: 'NDA_UNSPEC', ... 4: 'NDA_PROBES'} The `normalize` parameter can be: - None — no name transformation will be done - True — cut the prefix and `lower()` the rest - lambda x: â€Ļ — apply the function to every name ''' nmap = {None: lambda x: x, True: lambda x: x[len(prefix) :].lower()} if not isinstance(normalize, types.FunctionType): normalize = nmap[normalize] by_name = dict( [(normalize(i), ns[i]) for i in ns.keys() if i.startswith(prefix)] ) by_value = dict( [(ns[i], normalize(i)) for i in ns.keys() if i.startswith(prefix)] ) return (by_name, by_value) def getbroadcast(addr, mask, family=socket.AF_INET): # 1. convert addr to int i = socket.inet_pton(family, addr) if family == socket.AF_INET: i = struct.unpack('>I', i)[0] a = 0xFFFFFFFF length = 32 elif family == socket.AF_INET6: i = struct.unpack('>QQ', i) i = i[0] << 64 | i[1] a = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF length = 128 else: raise NotImplementedError('family not supported') # 2. calculate mask m = (a << length - mask) & a # 3. calculate default broadcast n = (i & m) | a >> mask # 4. convert it back to the normal address form if family == socket.AF_INET: n = struct.pack('>I', n) else: n = struct.pack('>QQ', n >> 64, n & (a >> 64)) return socket.inet_ntop(family, n) def dqn2int(mask, family=socket.AF_INET): ''' IPv4 dotted quad notation to int mask conversion ''' ret = 0 binary = socket.inet_pton(family, mask) for offset in range(len(binary) // 4): ret += bin( struct.unpack('I', binary[offset * 4 : offset * 4 + 4])[0] ).count('1') return ret def get_address_family(address): if address.find(':') > -1: return socket.AF_INET6 else: return socket.AF_INET def hexdump(payload, length=0): ''' Represent byte string as hex -- for debug purposes ''' return ':'.join('{0:02x}'.format(c) for c in payload[:length] or payload) def hexload(data): return bytes(bytearray((int(x, 16) for x in data.split(':')))) def load_dump(f, meta=None): ''' Load a packet dump from an open file-like object or a string. Supported dump formats: * strace hex dump (\\x00\\x00...) * pyroute2 hex dump (00:00:...) Simple markup is also supported. Any data from # or ; till the end of the string is a comment and ignored. Any data after . till EOF is ignored as well. With #! starts an optional code block. All the data in the code block will be read and returned via metadata dictionary. ''' data = '' code = None meta_data = None meta_label = None if isinstance(f, str): io_obj = io.StringIO() io_obj.write(f) io_obj.seek(0) else: io_obj = f for a in io_obj.readlines(): if code is not None: code += a continue if meta_data is not None: meta_data += a continue offset = 0 length = len(a) while offset < length: if a[offset] in (' ', '\t', '\n'): offset += 1 elif a[offset] == '#': if a[offset : offset + 2] == '#!': # read the code block until EOF code = '' elif a[offset : offset + 2] == '#:': # read data block until EOF meta_label = a.split(':')[1].strip() meta_data = '' break elif a[offset] == '.': return data elif a[offset] == '\\': # strace hex format data += chr(int(a[offset + 2 : offset + 4], 16)) offset += 4 else: # pyroute2 hex format data += chr(int(a[offset : offset + 2], 16)) offset += 3 if isinstance(meta, dict): if code is not None: meta['code'] = code if meta_data is not None: meta[meta_label] = meta_data if sys.version[0] == '3': return bytes(data, 'iso8859-1') else: return data class AddrPool(object): ''' Address pool ''' cell = 0xFFFFFFFFFFFFFFFF def __init__( self, minaddr=0xF, maxaddr=0xFFFFFF, reverse=False, release=False ): self.cell_size = 0 # in bits mx = self.cell self.reverse = reverse self.release = release self.allocated = 0 if self.release and not isinstance(self.release, int): raise TypeError() self.ban = [] while mx: mx >>= 8 self.cell_size += 1 self.cell_size *= 8 # calculate, how many ints we need to bitmap all addresses self.cells = int((maxaddr - minaddr) / self.cell_size + 1) # initial array self.addr_map = [self.cell] self.minaddr = minaddr self.maxaddr = maxaddr self.lock = threading.RLock() def alloc(self): with self.lock: # gc self.ban: for item in tuple(self.ban): if item['counter'] == 0: self.free(item['addr']) self.ban.remove(item) else: item['counter'] -= 1 # iterate through addr_map base = 0 for cell in self.addr_map: if cell: # not allocated addr bit = 0 while True: if (1 << bit) & self.addr_map[base]: self.addr_map[base] ^= 1 << bit break bit += 1 ret = base * self.cell_size + bit if self.reverse: ret = self.maxaddr - ret else: ret = ret + self.minaddr if self.minaddr <= ret <= self.maxaddr: if self.release: self.free(ret, ban=self.release) self.allocated += 1 return ret else: self.free(ret) raise KeyError('no free address available') base += 1 # no free address available if len(self.addr_map) < self.cells: # create new cell to allocate address from self.addr_map.append(self.cell) return self.alloc() else: raise KeyError('no free address available') def alloc_multi(self, count): with self.lock: addresses = [] raised = False try: for _ in range(count): addr = self.alloc() try: addresses.append(addr) except: # In case of a MemoryError during appending, # the finally block would not free the address. self.free(addr) return addresses except: raised = True raise finally: if raised: for addr in addresses: self.free(addr) def locate(self, addr): if self.reverse: addr = self.maxaddr - addr else: addr -= self.minaddr base = addr // self.cell_size bit = addr % self.cell_size try: is_allocated = not self.addr_map[base] & (1 << bit) except IndexError: is_allocated = False return (base, bit, is_allocated) def setaddr(self, addr, value): if value not in ('free', 'allocated'): raise TypeError() with self.lock: base, bit, is_allocated = self.locate(addr) if value == 'free' and is_allocated: self.allocated -= 1 self.addr_map[base] |= 1 << bit elif value == 'allocated' and not is_allocated: self.allocated += 1 self.addr_map[base] &= ~(1 << bit) def free(self, addr, ban=0): with self.lock: if ban != 0: self.ban.append({'addr': addr, 'counter': ban}) else: base, bit, is_allocated = self.locate(addr) if len(self.addr_map) <= base: raise KeyError('address is not allocated') if self.addr_map[base] & (1 << bit): raise KeyError('address is not allocated') self.allocated -= 1 self.addr_map[base] ^= 1 << bit def _fnv1_python2(data): ''' FNV1 -- 32bit hash, python2 version @param data: input @type data: bytes @return: 32bit int hash @rtype: int See: http://www.isthe.com/chongo/tech/comp/fnv/index.html ''' hval = 0x811C9DC5 for i in range(len(data)): hval *= 0x01000193 hval ^= struct.unpack('B', data[i])[0] return hval & 0xFFFFFFFF def _fnv1_python3(data): ''' FNV1 -- 32bit hash, python3 version @param data: input @type data: bytes @return: 32bit int hash @rtype: int See: http://www.isthe.com/chongo/tech/comp/fnv/index.html ''' hval = 0x811C9DC5 for i in range(len(data)): hval *= 0x01000193 hval ^= data[i] return hval & 0xFFFFFFFF if sys.version[0] == '3': fnv1 = _fnv1_python3 else: fnv1 = _fnv1_python2 def uuid32(): ''' Return 32bit UUID, based on the current time and pid. @return: 32bit int uuid @rtype: int The uuid is guaranteed to be unique within one process. ''' global _uuid32 global _uuid32_lock with _uuid32_lock: candidate = _uuid32 while candidate == _uuid32: candidate = fnv1( struct.pack('QQ', int(time.time() * 1000000), os.getpid()) ) _uuid32 = candidate return candidate def uifname(): ''' Return a unique interface name based on a prime function @return: interface name @rtype: str ''' return 'pr%x' % uuid32() def map_exception(match, subst): ''' Decorator to map exception types ''' def wrapper(f): def decorated(*argv, **kwarg): try: f(*argv, **kwarg) except Exception as e: if match(e): raise subst(e) raise return decorated return wrapper def map_enoent(f): ''' Shortcut to map OSError(2) -> OSError(95) ''' return map_exception( lambda x: (isinstance(x, OSError) and x.errno == errno.ENOENT), lambda x: OSError(errno.EOPNOTSUPP, 'Operation not supported'), )(f) def metaclass(mc): def wrapped(cls): nvars = {} skip = ['__dict__', '__weakref__'] slots = cls.__dict__.get('__slots__') if not isinstance(slots, (list, tuple)): slots = [slots] for k in slots: skip.append(k) for k, v in cls.__dict__.items(): if k not in skip: nvars[k] = v return mc(cls.__name__, cls.__bases__, nvars) return wrapped def failed_class(message): class FailedClass(object): def __init__(self, *argv, **kwarg): ret = RuntimeError(message) ret.feature_supported = False raise ret return FailedClass pyroute2-0.7.11/pyroute2/config/000077500000000000000000000000001455030217500164315ustar00rootroot00000000000000pyroute2-0.7.11/pyroute2/config/__init__.py000066400000000000000000000025251455030217500205460ustar00rootroot00000000000000import multiprocessing import platform import re import signal import socket kernel_version_re = re.compile('^[0-9.]+') def parse_kernel_version(kernel_name): match_obj = kernel_version_re.match(kernel_name) if match_obj is None: return [] return [int(x) for x in kernel_name[0 : match_obj.end()].split('.') if x] SocketBase = socket.socket MpPipe = multiprocessing.Pipe MpQueue = multiprocessing.Queue MpProcess = multiprocessing.Process ipdb_nl_async = True nlm_generator = False nla_via_getattr = False async_qsize = 4096 commit_barrier = 0 gc_timeout = 60 db_transaction_limit = 1 cache_expire = 60 signal_stop_remote = None if hasattr(signal, 'SIGUSR1'): signal_stop_remote = signal.SIGUSR1 mock_iproute = False nlsocket_thread_safe = True # save uname() on startup time: it is not so # highly possible that the kernel will be # changed in runtime, while calling uname() # every time is a bit expensive uname = tuple(platform.uname()) machine = platform.machine() arch = platform.architecture()[0] kernel = parse_kernel_version(uname[2]) AF_BRIDGE = getattr(socket, 'AF_BRIDGE', 7) AF_NETLINK = getattr(socket, 'AF_NETLINK', 16) data_plugins_pkgs = [] data_plugins_path = [] netns_path = ['/var/run/netns', '/var/run/docker/netns'] entry_points_aliases = { 'pyroute2.netlink.exceptions': 'pyroute2.netlink.exceptions' } pyroute2-0.7.11/pyroute2/config/asyncio.py000066400000000000000000000044441455030217500204560ustar00rootroot00000000000000# # Author: Angus Lees # # Backported from a Neutron privsep proposal with the # permission of the author. # from __future__ import absolute_import import functools import socket import types try: import cPickle as pickle except ImportError: import pickle from pyroute2 import config as config _socketmethods = ( 'bind', 'close', 'connect', 'connect_ex', 'listen', 'getpeername', 'getsockname', 'getsockopt', 'makefile', 'recv', 'recvfrom', 'recv_into', 'recvfrom_into', 'send', 'sendto', 'sendall', 'setsockopt', 'setblocking', 'settimeout', 'gettimeout', 'shutdown', ) def _forward(name, self, *args, **kwargs): return getattr(self._sock, name)(*args, **kwargs) class _SocketWrapper(object): """eventlet-monkeypatch friendly socket class""" def __init__(self, *args, **kwargs): _sock = kwargs.get('_sock', None) or socket.socket(*args, **kwargs) self._sock = _sock for name in _socketmethods: f = functools.partial(_forward, name) f.__name__ = name setattr(_SocketWrapper, name, types.MethodType(f, self)) def fileno(self): return self._sock.fileno() def dup(self): return self.__class__(_sock=self._sock.dup()) class _MpConnection(object): """Highly limited multiprocessing.Connection alternative""" def __init__(self, sock): sock.setblocking(True) self.sock = sock def fileno(self): return self.sock.fileno() def send(self, obj): pickle.dump(obj, self, protocol=-1) def write(self, s): self.sock.sendall(s) def recv(self): return pickle.load(self) def read(self, n): return self.sock.recv(n) def readline(self): buf = b'' c = None while c != b'\n': c = self.sock.recv(1) buf += c return buf def close(self): self.sock.close() def _MultiprocessingPipe(): """multiprocess.Pipe reimplementation that uses MpConnection wrapper""" s1, s2 = socket.socketpair() return (_MpConnection(s1), _MpConnection(s2)) def asyncio_config(): config.SocketBase = _SocketWrapper config.MpPipe = _MultiprocessingPipe config.ipdb_nl_async = False pyroute2-0.7.11/pyroute2/config/eventlet.py000066400000000000000000000004021455030217500206250ustar00rootroot00000000000000import logging from pyroute2.config.asyncio import asyncio_config log = logging.getLogger(__name__) log.warning("Please use pyroute2.config.asyncio.asyncio_config") log.warning("The eventlet module will be dropped soon ") eventlet_config = asyncio_config pyroute2-0.7.11/pyroute2/config/log.py000066400000000000000000000010351455030217500175630ustar00rootroot00000000000000import logging ## # Create the main logger # # Do NOT touch the root logger -- not to break basicConfig() etc # log = logging.getLogger('pyroute2') log.setLevel(0) log.addHandler(logging.NullHandler()) def debug(*argv, **kwarg): return log.debug(*argv, **kwarg) def info(*argv, **kwarg): return log.info(*argv, **kwarg) def warning(*argv, **kwarg): return log.warning(*argv, **kwarg) def error(*argv, **kwarg): return log.error(*argv, **kwarg) def critical(*argv, **kwarg): return log.critical(*argv, **kwarg) pyroute2-0.7.11/pyroute2/config/test_platform.py000066400000000000000000000204611455030217500216710ustar00rootroot00000000000000#!/usr/bin/env python ''' Platform tests to discover the system capabilities. ''' import json import os import select import struct import sys import threading from pyroute2 import config from pyroute2.common import uifname from pyroute2.iproute.linux import RawIPRoute from pyroute2.netlink.rtnl import RTMGRP_LINK class SkipTest(Exception): pass class TestCapsRtnl(object): ''' A minimal test set to collect the RTNL implementation capabilities. It uses raw RTNL sockets and doesn't run any proxy code, so no transparent helpers are executed -- e.g., it will not create bridge via `brctl`, if RTNL doesn't support it. A short developer's guide:: def test_whatever_else(self): code This test will create a capability record `whatever_else`. If the `code` fails, the `whatever_else` will be set to `False`. If it throws the `SkipTest` exception, the `whatever_else` will be set to `None`. Otherwise it will be set to whatever the test returns. To collect the capabilities:: tce = TestCapsExt() tce.collect() print(tce.capabilities) Collected capabilities are in the `TestCapsExt.capabilities` dictionary, you can use them directly or by setting the `config.capabilities` singletone:: from pyroute2 import config # ... tce.collect() config.capabilities = tce.capabilities ''' def __init__(self): self.capabilities = {} self.ifnames = [] self.rtm_newlink = {} self.rtm_dellink = {} self.rtm_events = {} self.cmd, self.cmdw = os.pipe() self.ip = None self.event = threading.Event() def __getitem__(self, key): return self.capabilities[key] def set_capability(self, key, value): ''' Set a capability. ''' self.capabilities[key] = value def ifname(self): ''' Register and return a new unique interface name to be used in a test. ''' ifname = uifname() self.ifnames.append(ifname) self.rtm_events[ifname] = threading.Event() self.rtm_newlink[ifname] = [] self.rtm_dellink[ifname] = [] return ifname def monitor(self): # The monitoring code to collect RTNL messages # asynchronously. # Do **NOT** run manually. # use a separate socket for monitoring ip = RawIPRoute() ip.bind(RTMGRP_LINK) poll = select.poll() poll.register(ip, select.POLLIN | select.POLLPRI) poll.register(self.cmd, select.POLLIN | select.POLLPRI) self.event.set() while True: events = poll.poll() for fd, evt in events: if fd == ip.fileno(): msgs = ip.get() for msg in msgs: name = msg.get_attr('IFLA_IFNAME') event = msg.get('event') if name not in self.rtm_events: continue if event == 'RTM_NEWLINK': self.rtm_events[name].set() self.rtm_newlink[name].append(msg) elif event == 'RTM_DELLINK': self.rtm_dellink[name].append(msg) else: ip.close() return def setup(self): # The setup procedure for a test. # Do **NOT** run manually. # create the raw socket self.ip = RawIPRoute() def teardown(self): # The teardown procedure for a test. # Do **NOT** run manually. # clear the collected interfaces for ifname in self.ifnames: self.rtm_events[ifname].wait() self.rtm_events[ifname].clear() if self.rtm_newlink.get(ifname): self.ip.link('del', index=self.rtm_newlink[ifname][0]['index']) self.ifnames = [] # close the socket self.ip.close() def collect(self): ''' Run the tests and collect the capabilities. They will be saved in the `TestCapsRtnl.capabilities` attribute. ''' symbols = sorted(dir(self)) # start the monitoring thread mthread = threading.Thread(target=self.monitor) mthread.start() self.event.wait() # wait for the thread setup for name in symbols: if name.startswith('test_'): self.setup() try: ret = getattr(self, name)() if ret is None: ret = True self.set_capability(name[5:], ret) except SkipTest: self.set_capability(name[5:], None) except Exception: for ifname in self.ifnames: # cancel events queued for that test self.rtm_events[ifname].set() self.set_capability(name[5:], False) self.teardown() # stop the monitor os.write(self.cmdw, b'q') mthread.join() return self.capabilities def test_uname(self): ''' Return collected uname ''' return config.uname def test_machine(self): ''' Return machine, arch and byte order ''' return (config.machine, config.arch, sys.byteorder) def test_parsed_kernel_version(self): ''' Returned parsed kernel ''' return config.kernel def test_uid_gid(self): ''' Return current user/group id ''' return (os.getuid(), os.getgid()) def test_python_version(self): ''' Return Python version ''' return sys.version def test_unpack_from(self): ''' Does unpack_from() support bytearray as the buffer ''' # probe unpack from try: struct.unpack_from('I', bytearray((1, 0, 0, 0)), 0) except: return False # works... but may it be monkey patched? if hasattr(struct, '_u_f_orig'): return False def test_create_dummy(self): ''' An obvious test: an ability to create dummy interfaces ''' self.ghost = self.ifname() self.ip.link('add', ifname=self.ghost, kind='dummy') def test_create_bridge(self): ''' Can the kernel create bridges via netlink? ''' self.ip.link('add', ifname=self.ifname(), kind='bridge') def test_create_bond(self): ''' Can the kernel create bonds via netlink? ''' self.ip.link('add', ifname=self.ifname(), kind='bond') def test_ghost_newlink_count(self): ''' A normal flow (req == request, brd == broadcast message):: (req) -> RTM_NEWLINK (brd) <- RTM_NEWLINK (req) -> RTM_DELLINK (brd) <- RTM_DELLINK But on old kernels you can encounter the following:: (req) -> RTM_NEWLINK (brd) <- RTM_NEWLINK (req) -> RTM_DELLINK (brd) <- RTM_DELLINK (brd) <- RTM_NEWLINK (!) false positive And that obviously can break the code that relies on broadcast updates, since it will see as a new interface is created immediately after it was destroyed. One can ignore RTM_NEWLINK for the same name that follows a normal RTM_DELLINK. To do that, one should be sure the message will come. Another question is how many messages to ignore. This is not a test s.str., but it should follow after the `test_create_dummy`. It counts, how many RTM_NEWLINK messages arrived during the `test_create_dummy`. The ghost newlink messages count will be the same for other interface types as well. ''' with open('/proc/version', 'r') as f: if int(f.read().split()[2][0]) > 2: # the issue is reported only for kernels 2.x return 0 # there is no guarantee it will come; it *may* come self.rtm_events[self.ghost].wait(0.5) return max(len(self.rtm_newlink.get(self.ghost, [])) - 1, 0) def run(): print(json.dumps(TestCapsRtnl().collect(), indent=4)) if __name__ == '__main__': run() pyroute2-0.7.11/pyroute2/conntrack.py000066400000000000000000000152751455030217500175320ustar00rootroot00000000000000import socket from pyroute2.netlink.nfnetlink.nfctsocket import ( IP_CT_TCP_FLAG_TO_NAME, IPSBIT_TO_NAME, TCP_CONNTRACK_TO_NAME, NFCTAttrTuple, NFCTSocket, ) class NFCTATcpProtoInfo(object): __slots__ = ( 'state', 'wscale_orig', 'wscale_reply', 'flags_orig', 'flags_reply', ) def __init__( self, state, wscale_orig=None, wscale_reply=None, flags_orig=None, flags_reply=None, ): self.state = state self.wscale_orig = wscale_orig self.wscale_reply = wscale_reply self.flags_orig = flags_orig self.flags_reply = flags_reply def state_name(self): return TCP_CONNTRACK_TO_NAME.get(self.state, "UNKNOWN") def flags_name(self, flags): if flags is None: return '' s = '' for bit, name in IP_CT_TCP_FLAG_TO_NAME.items(): if flags & bit: s += '{},'.format(name) return s[:-1] @classmethod def from_netlink(cls, ndmsg): cta_tcp = ndmsg.get_attr('CTA_PROTOINFO_TCP') state = cta_tcp.get_attr('CTA_PROTOINFO_TCP_STATE') # second argument is the mask returned by kernel but useless for us flags_orig, _ = cta_tcp.get_attr('CTA_PROTOINFO_TCP_FLAGS_ORIGINAL') flags_reply, _ = cta_tcp.get_attr('CTA_PROTOINFO_TCP_FLAGS_REPLY') return cls(state=state, flags_orig=flags_orig, flags_reply=flags_reply) def __repr__(self): return 'TcpInfo(state={}, orig_flags={}, reply_flags={})'.format( self.state_name(), self.flags_name(self.flags_orig), self.flags_name(self.flags_reply), ) class ConntrackEntry(object): __slots__ = ( 'tuple_orig', 'tuple_reply', 'status', 'timeout', 'protoinfo', 'mark', 'id', 'use', ) def __init__( self, family, tuple_orig, tuple_reply, cta_status, cta_timeout, cta_protoinfo, cta_mark, cta_id, cta_use, ): self.tuple_orig = NFCTAttrTuple.from_netlink(family, tuple_orig) self.tuple_reply = NFCTAttrTuple.from_netlink(family, tuple_reply) self.status = cta_status self.timeout = cta_timeout if self.tuple_orig.proto == socket.IPPROTO_TCP: self.protoinfo = NFCTATcpProtoInfo.from_netlink(cta_protoinfo) else: self.protoinfo = None self.mark = cta_mark self.id = cta_id self.use = cta_use def status_name(self): s = '' for bit, name in IPSBIT_TO_NAME.items(): if self.status & bit: s += '{},'.format(name) return s[:-1] def __repr__(self): s = 'Entry(orig={}, reply={}, status={}'.format( self.tuple_orig, self.tuple_reply, self.status_name() ) if self.protoinfo is not None: s += ', protoinfo={}'.format(self.protoinfo) s += ')' return s class Conntrack(NFCTSocket): """ High level conntrack functions """ def __init__(self, nlm_generator=True, **kwargs): super(Conntrack, self).__init__(nlm_generator=nlm_generator, **kwargs) def stat(self): """Return current statistics per CPU Same result than conntrack -S command but a list of dictionaries """ stats = [] for msg in super(Conntrack, self).stat(): stats.append({'cpu': msg['res_id']}) stats[-1].update( (k[10:].lower(), v) for k, v in msg['attrs'] if k.startswith('CTA_STATS_') ) return stats def count(self): """Return current number of conntrack entries Same result than /proc/sys/net/netfilter/nf_conntrack_count file or conntrack -C command """ for ndmsg in super(Conntrack, self).count(): return ndmsg.get_attr('CTA_STATS_GLOBAL_ENTRIES') def conntrack_max_size(self): """ Return the max size of connection tracking table /proc/sys/net/netfilter/nf_conntrack_max """ for ndmsg in super(Conntrack, self).conntrack_max_size(): return ndmsg.get_attr('CTA_STATS_GLOBAL_MAX_ENTRIES') def delete(self, entry): if isinstance(entry, ConntrackEntry): tuple_orig = entry.tuple_orig elif isinstance(entry, NFCTAttrTuple): tuple_orig = entry else: raise NotImplementedError() for ndmsg in self.entry('del', tuple_orig=tuple_orig): return ndmsg def entry(self, cmd, **kwargs): for res in super(Conntrack, self).entry(cmd, **kwargs): return res def dump_entries( self, mark=None, mark_mask=0xFFFFFFFF, tuple_orig=None, tuple_reply=None, ): """ Dump all entries from conntrack table with filters Filters can be only part of a conntrack tuple :param NFCTAttrTuple tuple_orig: filter on original tuple :param NFCTAttrTuple tuple_reply: filter on reply tuple Examples:: # Filter only on tcp connections for entry in ct.dump_entries(tuple_orig=NFCTAttrTuple( proto=socket.IPPROTO_TCP)): print("This entry is tcp: {}".format(entry)) # Filter only on icmp message to 8.8.8.8 for entry in ct.dump_entries(tuple_orig=NFCTAttrTuple( proto=socket.IPPROTO_ICMP, daddr='8.8.8.8')): print("This entry is icmp to 8.8.8.8: {}".format(entry)) """ for ndmsg in self.dump( mark=mark, mark_mask=mark_mask, tuple_orig=tuple_orig, tuple_reply=tuple_reply, ): if tuple_orig is not None and not tuple_orig.nla_eq( ndmsg['nfgen_family'], ndmsg.get_attr('CTA_TUPLE_ORIG') ): continue if tuple_reply is not None and not tuple_reply.nla_eq( ndmsg['nfgen_family'], ndmsg.get_attr('CTA_TUPLE_REPLY') ): continue yield ConntrackEntry( ndmsg['nfgen_family'], ndmsg.get_attr('CTA_TUPLE_ORIG'), ndmsg.get_attr('CTA_TUPLE_REPLY'), ndmsg.get_attr('CTA_STATUS'), ndmsg.get_attr('CTA_TIMEOUT'), ndmsg.get_attr('CTA_PROTOINFO'), ndmsg.get_attr('CTA_MARK'), ndmsg.get_attr('CTA_ID'), ndmsg.get_attr('CTA_USE'), ) pyroute2-0.7.11/pyroute2/devlink.py000066400000000000000000000040271455030217500171750ustar00rootroot00000000000000import logging from pyroute2.netlink import NLM_F_DUMP, NLM_F_REQUEST from pyroute2.netlink.devlink import DEVLINK_NAMES, DevlinkSocket, devlinkcmd log = logging.getLogger(__name__) class DL(DevlinkSocket): def __init__(self, *argv, **kwarg): # get specific groups kwarg if 'groups' in kwarg: groups = kwarg['groups'] del kwarg['groups'] else: groups = None # get specific async kwarg if 'async' in kwarg: # FIXME # raise deprecation error after 0.5.3 # log.warning( 'use "async_cache" instead of "async", ' '"async" is a keyword from Python 3.7' ) kwarg['async_cache'] = kwarg.pop('async') if 'async_cache' in kwarg: async_cache = kwarg.pop('async_cache') else: async_cache = False # align groups with async_cache if groups is None: groups = ~0 if async_cache else 0 # continue with init super(DL, self).__init__(*argv, **kwarg) # do automatic bind # FIXME: unfortunately we can not omit it here try: self.bind(groups, async_cache=async_cache) except: # thanks to jtluka at redhat.com and the LNST # team for the fixed fd leak super(DL, self).close() raise def list(self): return self.get_dump() def get_dump(self): msg = devlinkcmd() msg['cmd'] = DEVLINK_NAMES['DEVLINK_CMD_GET'] return tuple( self.nlm_request( msg, msg_type=self.prid, msg_flags=NLM_F_REQUEST | NLM_F_DUMP ) ) def port_list(self): return self.get_port_dump() def get_port_dump(self): msg = devlinkcmd() msg['cmd'] = DEVLINK_NAMES['DEVLINK_CMD_PORT_GET'] return tuple( self.nlm_request( msg, msg_type=self.prid, msg_flags=NLM_F_REQUEST | NLM_F_DUMP ) ) pyroute2-0.7.11/pyroute2/dhcp/000077500000000000000000000000001455030217500161025ustar00rootroot00000000000000pyroute2-0.7.11/pyroute2/dhcp/__init__.py000066400000000000000000000235001455030217500202130ustar00rootroot00000000000000''' DHCP protocol ============= The DHCP implementation here is far from complete, but already provides some basic functionality. Later it will be extended with IPv6 support and more DHCP options will be added. Right now it can be interesting mostly to developers, but not users and/or system administrators. So, the development hints first. The packet structure description is intentionally implemented as for netlink packets. Later these two parsers, netlink and generic, can be merged, so the syntax is more or less compatible. Packet fields ------------- There are two big groups of items within any DHCP packet. First, there are BOOTP/DHCP packet fields, they're defined with the `fields` attribute:: class dhcp4msg(msg): fields = ((name, format, policy), (name, format, policy), ... (name, format, policy)) The `name` can be any literal. Format should be specified as for the struct module, like `B` for `uint8`, or `i` for `int32`, or `>Q` for big-endian uint64. There are also aliases defined, so one can write `uint8` or `be16`, or like that. Possible aliases can be seen in the `pyroute2.protocols` module. The `policy` is a bit complicated. It can be a number or literal, and it will mean that it is a default value, that should be encoded if no other value is given. But when the `policy` is a dictionary, it can contain keys as follows:: 'l2addr': {'format': '6B', 'decode': ..., 'encode': ...} Keys `encode` and `decode` should contain filters to be used in decoding and encoding procedures. The encoding filter should accept the value from user's definition and should return a value that can be packed using `format`. The decoding filter should accept a value, decoded according to `format`, and should return value that can be used by a user. The `struct` module can not decode IP addresses etc, so they should be decoded as `4s`, e.g. Further transformation from 4 bytes string to a string like '10.0.0.1' performs the filter. DHCP options ------------ DHCP options are described in a similar way:: options = ((code, name, format), (code, name, format), ... (code, name, format)) Code is a `uint8` value, name can be any string literal. Format is a string, that must have a corresponding class, inherited from `pyroute2.dhcp.option`. One can find these classes in `pyroute2.dhcp` (more generic) or in `pyroute2.dhcp.dhcp4msg` (IPv4-specific). The option class must reside within dhcp message class. Every option class can be decoded in two ways. If it has fixed width fields, it can be decoded with ordinary `msg` routines, and in this case it can look like that:: class client_id(option): fields = (('type', 'uint8'), ('key', 'l2addr')) If it must be decoded by some custom rules, one can define the policy just like for the fields above:: class array8(option): policy = {'format': 'string', 'encode': lambda x: array('B', x).tobytes(), 'decode': lambda x: array('B', x).tolist()} In the corresponding modules, like in `pyroute2.dhcp.dhcp4msg`, one can define as many custom DHCP options, as one need. Just be sure, that they are compatible with the DHCP server and all fit into 1..254 (`uint8`) -- the 0 code is used for padding and the code 255 is the end of options code. ''' import struct import sys from array import array from pyroute2.common import basestring from pyroute2.protocols import msg BOOTREQUEST = 1 BOOTREPLY = 2 DHCPDISCOVER = 1 DHCPOFFER = 2 DHCPREQUEST = 3 DHCPDECLINE = 4 DHCPACK = 5 DHCPNAK = 6 DHCPRELEASE = 7 DHCPINFORM = 8 if not hasattr(array, 'tobytes'): # Python2 and Python3 versions of array differ, # but we need here a consistent API w/o warnings class array(array): tobytes = array.tostring class option(msg): code = 0 data_length = 0 policy = None value = None def __init__(self, content=None, buf=b'', offset=0, value=None, code=0): msg.__init__( self, content=content, buf=buf, offset=offset, value=value ) self.code = code @property def length(self): if self.data_length is None: return None if self.data_length == 0: return 1 else: return self.data_length + 2 def encode(self): # pack code self.buf += struct.pack('B', self.code) if self.code in (0, 255): return self # save buf save = self.buf self.buf = b'' # pack data into the new buf if self.policy is not None: value = self.policy.get('encode', lambda x: x)(self.value) if self.policy['format'] == 'string': fmt = '%is' % len(value) else: fmt = self.policy['format'] if sys.version_info[0] == 3 and isinstance(value, str): value = value.encode('utf-8') self.buf = struct.pack(fmt, value) else: msg.encode(self) # get the length data = self.buf self.buf = save self.buf += struct.pack('B', len(data)) # attach the packed data self.buf += data return self def decode(self): self.data_length = struct.unpack( 'B', self.buf[self.offset + 1 : self.offset + 2] )[0] if self.policy is not None: if self.policy['format'] == 'string': fmt = '%is' % self.data_length else: fmt = self.policy['format'] value = struct.unpack( fmt, self.buf[self.offset + 2 : self.offset + 2 + self.data_length], ) if len(value) == 1: value = value[0] value = self.policy.get('decode', lambda x: x)(value) if ( isinstance(value, basestring) and self.policy['format'] == 'string' ): value = value[: value.find(b'\x00')] self.value = value else: # remember current offset as msg.decode() will advance it offset = self.offset # move past the code and option length bytes so that msg.decode() # starts parsing at the right location self.offset += 2 msg.decode(self) # restore offset so that dhcpmsg.decode() advances it correctly self.offset = offset return self class dhcpmsg(msg): options = () l2addr = None _encode_map = {} _decode_map = {} def _register_options(self): for option in self.options: code, name, fmt = option[:3] self._decode_map[code] = self._encode_map[name] = { 'name': name, 'code': code, 'format': fmt, } def decode(self): msg.decode(self) self._register_options() self['options'] = {} while self.offset < len(self.buf): code = struct.unpack('B', self.buf[self.offset : self.offset + 1])[ 0 ] if code == 0: self.offset += 1 continue if code == 255: return self # code is unknown -- bypass it if code not in self._decode_map: length = struct.unpack( 'B', self.buf[self.offset + 1 : self.offset + 2] )[0] self.offset += length + 2 continue # code is known, work on it option_class = getattr(self, self._decode_map[code]['format']) option = option_class(buf=self.buf, offset=self.offset) option.decode() self.offset += option.length if option.value is not None: value = option.value else: value = option self['options'][self._decode_map[code]['name']] = value return self def encode(self): msg.encode(self) self._register_options() # put message type options = self.get('options') or { 'message_type': DHCPDISCOVER, 'parameter_list': [1, 3, 6, 12, 15, 28], } self.buf += ( self.uint8(code=53, value=options['message_type']).encode().buf ) self.buf += ( self.client_id({'type': 1, 'key': self['chaddr']}, code=61) .encode() .buf ) self.buf += self.string(code=60, value='pyroute2').encode().buf for name, value in options.items(): if name in ('message_type', 'client_id', 'vendor_id'): continue fmt = self._encode_map.get(name, {'format': None})['format'] if fmt is None: continue # name is known, ok option_class = getattr(self, fmt) if isinstance(value, dict): option = option_class( value, code=self._encode_map[name]['code'] ) else: option = option_class( code=self._encode_map[name]['code'], value=value ) self.buf += option.encode().buf self.buf += self.none(code=255).encode().buf return self class none(option): pass class be16(option): policy = {'format': '>H'} class be32(option): policy = {'format': '>I'} class uint8(option): policy = {'format': 'B'} class string(option): policy = {'format': 'string'} class array8(option): policy = { 'format': 'string', 'encode': lambda x: array('B', x).tobytes(), 'decode': lambda x: array('B', x).tolist(), } class client_id(option): fields = (('type', 'uint8'), ('key', 'l2addr')) pyroute2-0.7.11/pyroute2/dhcp/client.py000066400000000000000000000035501455030217500177350ustar00rootroot00000000000000import json import select import sys from pyroute2.dhcp import ( BOOTREQUEST, DHCPACK, DHCPDISCOVER, DHCPOFFER, DHCPREQUEST, ) from pyroute2.dhcp.dhcp4msg import dhcp4msg from pyroute2.dhcp.dhcp4socket import DHCP4Socket def req(s, poll, msg, expect): do_req = True xid = None while True: # get transaction id if do_req: xid = s.put(msg)['xid'] # wait for response events = poll.poll(2) for fd, event in events: response = s.get() if response['xid'] != xid: do_req = False continue if response['options']['message_type'] != expect: raise Exception("DHCP protocol error") return response do_req = True def action(ifname): s = DHCP4Socket(ifname) poll = select.poll() poll.register(s, select.POLLIN | select.POLLPRI) # DISCOVER discover = dhcp4msg( { 'op': BOOTREQUEST, 'chaddr': s.l2addr, 'options': { 'message_type': DHCPDISCOVER, 'parameter_list': [1, 3, 6, 12, 15, 28], }, } ) reply = req(s, poll, discover, expect=DHCPOFFER) # REQUEST request = dhcp4msg( { 'op': BOOTREQUEST, 'chaddr': s.l2addr, 'options': { 'message_type': DHCPREQUEST, 'requested_ip': reply['yiaddr'], 'server_id': reply['options']['server_id'], 'parameter_list': [1, 3, 6, 12, 15, 28], }, } ) reply = req(s, poll, request, expect=DHCPACK) s.close() return reply def run(): if len(sys.argv) > 1: ifname = sys.argv[1] else: ifname = 'eth0' print(json.dumps(action(ifname), indent=4)) if __name__ == '__main__': run() pyroute2-0.7.11/pyroute2/dhcp/dhcp4msg.py000066400000000000000000000040221455030217500201630ustar00rootroot00000000000000from socket import AF_INET, inet_ntop, inet_pton from pyroute2.dhcp import dhcpmsg, option class dhcp4msg(dhcpmsg): # # https://www.ietf.org/rfc/rfc2131.txt # fields = ( ('op', 'uint8', 1), # request ('htype', 'uint8', 1), # ethernet ('hlen', 'uint8', 6), # ethernet addr len ('hops', 'uint8'), ('xid', 'uint32'), ('secs', 'uint16'), ('flags', 'uint16'), ('ciaddr', 'ip4addr'), ('yiaddr', 'ip4addr'), ('siaddr', 'ip4addr'), ('giaddr', 'ip4addr'), ('chaddr', 'l2paddr'), ('sname', '64s'), ('file', '128s'), ('cookie', '4s', b'c\x82Sc'), ) # # https://www.ietf.org/rfc/rfc2132.txt # options = ( (0, 'pad', 'none'), (1, 'subnet_mask', 'ip4addr'), (2, 'time_offset', 'be32'), (3, 'router', 'ip4list'), (4, 'time_server', 'ip4list'), (5, 'ien_name_server', 'ip4list'), (6, 'name_server', 'ip4list'), (7, 'log_server', 'ip4list'), (8, 'cookie_server', 'ip4list'), (9, 'lpr_server', 'ip4list'), (50, 'requested_ip', 'ip4addr'), (51, 'lease_time', 'be32'), (53, 'message_type', 'uint8'), (54, 'server_id', 'ip4addr'), (55, 'parameter_list', 'array8'), (57, 'messagi_size', 'be16'), (58, 'renewal_time', 'be32'), (59, 'rebinding_time', 'be32'), (60, 'vendor_id', 'string'), (61, 'client_id', 'client_id'), (255, 'end', 'none'), ) class ip4addr(option): policy = { 'format': '4s', 'encode': lambda x: inet_pton(AF_INET, x), 'decode': lambda x: inet_ntop(AF_INET, x), } class ip4list(option): policy = { 'format': 'string', 'encode': lambda x: ''.join([inet_pton(AF_INET, i) for i in x]), 'decode': lambda x: [ inet_ntop(AF_INET, x[i * 4 : i * 4 + 4]) for i in range(len(x) // 4) ], } pyroute2-0.7.11/pyroute2/dhcp/dhcp4socket.py000066400000000000000000000100301455030217500206610ustar00rootroot00000000000000''' IPv4 DHCP socket ================ ''' from pyroute2.common import AddrPool from pyroute2.dhcp.dhcp4msg import dhcp4msg from pyroute2.ext.rawsocket import RawSocket from pyroute2.protocols import ethmsg, ip4msg, udp4_pseudo_header, udpmsg def listen_udp_port(port=68): # pre-scripted BPF code that matches UDP port bpf_code = [ [40, 0, 0, 12], [21, 0, 8, 2048], [48, 0, 0, 23], [21, 0, 6, 17], [40, 0, 0, 20], [69, 4, 0, 8191], [177, 0, 0, 14], [72, 0, 0, 16], [21, 0, 1, port], [6, 0, 0, 65535], [6, 0, 0, 0], ] return bpf_code class DHCP4Socket(RawSocket): ''' Parameters: * ifname -- interface name to work on This raw socket binds to an interface and installs BPF filter to get only its UDP port. It can be used in poll/select and provides also the context manager protocol, so can be used in `with` statements. It does not provide any DHCP state machine, and does not inspect DHCP packets, it is totally up to you. No default values are provided here, except `xid` -- DHCP transaction ID. If `xid` is not provided, DHCP4Socket generates it for outgoing messages. ''' def __init__(self, ifname, port=68): RawSocket.__init__(self, ifname, listen_udp_port(port)) self.port = port # Create xid pool # # Every allocated xid will be released automatically after 1024 # alloc() calls, there is no need to call free(). Minimal xid == 16 self.xid_pool = AddrPool(minaddr=16, release=1024) def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.close() def put(self, msg=None, dport=67): ''' Put DHCP message. Parameters: * msg -- dhcp4msg instance * dport -- DHCP server port If `msg` is not provided, it is constructed as default BOOTREQUEST + DHCPDISCOVER. Examples:: sock.put(dhcp4msg({'op': BOOTREQUEST, 'chaddr': 'ff:11:22:33:44:55', 'options': {'message_type': DHCPREQUEST, 'parameter_list': [1, 3, 6, 12, 15], 'requested_ip': '172.16.101.2', 'server_id': '172.16.101.1'}})) The method returns dhcp4msg that was sent, so one can get from there `xid` (transaction id) and other details. ''' # DHCP layer dhcp = msg or dhcp4msg({'chaddr': self.l2addr}) # dhcp transaction id if dhcp['xid'] is None: dhcp['xid'] = self.xid_pool.alloc() data = dhcp.encode().buf # UDP layer udp = udpmsg( {'sport': self.port, 'dport': dport, 'len': 8 + len(data)} ) udph = udp4_pseudo_header( {'dst': '255.255.255.255', 'len': 8 + len(data)} ) udp['csum'] = self.csum(udph.encode().buf + udp.encode().buf + data) udp.reset() # IPv4 layer ip4 = ip4msg( {'len': 20 + 8 + len(data), 'proto': 17, 'dst': '255.255.255.255'} ) ip4['csum'] = self.csum(ip4.encode().buf) ip4.reset() # MAC layer eth = ethmsg( {'dst': 'ff:ff:ff:ff:ff:ff', 'src': self.l2addr, 'type': 0x800} ) data = eth.encode().buf + ip4.encode().buf + udp.encode().buf + data self.send(data) dhcp.reset() return dhcp def get(self): ''' Get the next incoming packet from the socket and try to decode it as IPv4 DHCP. No analysis is done here, only MAC/IPv4/UDP headers are stripped out, and the rest is interpreted as DHCP. ''' (data, addr) = self.recvfrom(4096) eth = ethmsg(buf=data).decode() ip4 = ip4msg(buf=data, offset=eth.offset).decode() udp = udpmsg(buf=data, offset=ip4.offset).decode() return dhcp4msg(buf=data, offset=udp.offset).decode() pyroute2-0.7.11/pyroute2/ethtool/000077500000000000000000000000001455030217500166425ustar00rootroot00000000000000pyroute2-0.7.11/pyroute2/ethtool/__init__.py000066400000000000000000000000621455030217500207510ustar00rootroot00000000000000from .ethtool import Ethtool __all__ = [Ethtool] pyroute2-0.7.11/pyroute2/ethtool/common.py000066400000000000000000000150541455030217500205110ustar00rootroot00000000000000from collections import namedtuple # from ethtool/ethtool-copy.h of ethtool repo DUPLEX_HALF = 0x0 DUPLEX_FULL = 0x1 DUPLEX_UNKNOWN = 0xFF LINK_DUPLEX_NAMES = { DUPLEX_HALF: "Half", DUPLEX_FULL: "Full", DUPLEX_UNKNOWN: "Unknown", } # Which connector port. PORT_TP = 0x00 PORT_AUI = 0x01 PORT_MII = 0x02 PORT_FIBRE = 0x03 PORT_BNC = 0x04 PORT_DA = 0x05 PORT_NONE = 0xEF PORT_OTHER = 0xFF LINK_PORT_NAMES = { PORT_TP: "Twisted Pair", PORT_AUI: "AUI", PORT_MII: "MII", PORT_FIBRE: "FIBRE", PORT_BNC: "BNC", PORT_DA: "Direct Attach Copper", PORT_NONE: "NONE", PORT_OTHER: "Other", } # Which transceiver to use. XCVR_INTERNAL = 0x00 # PHY and MAC are in the same package XCVR_EXTERNAL = 0x01 # PHY and MAC are in different packages LINK_TRANSCEIVER_NAMES = {XCVR_INTERNAL: "Internal", XCVR_EXTERNAL: "External"} # Enable or disable autonegotiation. AUTONEG_DISABLE = 0x00 AUTONEG_ENABLE = 0x01 LINK_AUTONEG_NAMES = {AUTONEG_DISABLE: "off", AUTONEG_ENABLE: "on"} # MDI or MDI-X status/control - if MDI/MDI_X/AUTO is set then # the driver is required to renegotiate link ETH_TP_MDI_INVALID = 0x00 # status: unknown; control: unsupported ETH_TP_MDI = 0x01 # status: MDI; control: force MDI ETH_TP_MDI_X = 0x02 # status: MDI-X; control: force MDI-X ETH_TP_MDI_AUTO = 0x03 # control: auto-select LINK_TP_MDI_NAMES = { ETH_TP_MDI: "off", ETH_TP_MDI_X: "on", ETH_TP_MDI_AUTO: "auto", } LMBTypePort = 0 LMBTypeMode = 1 LMBTypeOther = -1 LinkModeBit = namedtuple('LinkModeBit', ('bit_index', 'name', 'type')) LinkModeBits = ( LinkModeBit(bit_index=0, name='10baseT/Half', type=LMBTypeMode), LinkModeBit(bit_index=1, name='10baseT/Full', type=LMBTypeMode), LinkModeBit(bit_index=2, name='100baseT/Half', type=LMBTypeMode), LinkModeBit(bit_index=3, name='100baseT/Full', type=LMBTypeMode), LinkModeBit(bit_index=4, name='1000baseT/Half', type=LMBTypeMode), LinkModeBit(bit_index=5, name='1000baseT/Full', type=LMBTypeMode), LinkModeBit(bit_index=6, name='Autoneg', type=LMBTypeOther), LinkModeBit(bit_index=7, name='TP', type=LMBTypePort), LinkModeBit(bit_index=8, name='AUI', type=LMBTypePort), LinkModeBit(bit_index=9, name='MII', type=LMBTypePort), LinkModeBit(bit_index=10, name='FIBRE', type=LMBTypePort), LinkModeBit(bit_index=11, name='BNC', type=LMBTypePort), LinkModeBit(bit_index=12, name='10000baseT/Full', type=LMBTypeMode), LinkModeBit(bit_index=13, name='Pause', type=LMBTypeOther), LinkModeBit(bit_index=14, name='Asym_Pause', type=LMBTypeOther), LinkModeBit(bit_index=15, name='2500baseX/Full', type=LMBTypeMode), LinkModeBit(bit_index=16, name='Backplane', type=LMBTypeOther), LinkModeBit(bit_index=17, name='1000baseKX/Full', type=LMBTypeMode), LinkModeBit(bit_index=18, name='10000baseKX4/Full', type=LMBTypeMode), LinkModeBit(bit_index=19, name='10000baseKR/Full', type=LMBTypeMode), LinkModeBit(bit_index=20, name='10000baseR_FEC', type=LMBTypeMode), LinkModeBit(bit_index=21, name='20000baseMLD2/Full', type=LMBTypeMode), LinkModeBit(bit_index=22, name='20000baseKR2/Full', type=LMBTypeMode), LinkModeBit(bit_index=23, name='40000baseKR4/Full', type=LMBTypeMode), LinkModeBit(bit_index=24, name='40000baseCR4/Full', type=LMBTypeMode), LinkModeBit(bit_index=25, name='40000baseSR4/Full', type=LMBTypeMode), LinkModeBit(bit_index=26, name='40000baseLR4/Full', type=LMBTypeMode), LinkModeBit(bit_index=27, name='56000baseKR4/Full', type=LMBTypeMode), LinkModeBit(bit_index=28, name='56000baseCR4/Full', type=LMBTypeMode), LinkModeBit(bit_index=29, name='56000baseSR4/Full', type=LMBTypeMode), LinkModeBit(bit_index=30, name='56000baseLR4/Full', type=LMBTypeMode), LinkModeBit(bit_index=31, name='25000baseCR/Full', type=LMBTypeMode), LinkModeBit(bit_index=32, name='25000baseKR/Full', type=LMBTypeMode), LinkModeBit(bit_index=33, name='25000baseSR/Full', type=LMBTypeMode), LinkModeBit(bit_index=34, name='50000baseCR2/Full', type=LMBTypeMode), LinkModeBit(bit_index=35, name='50000baseKR2/Full', type=LMBTypeMode), LinkModeBit(bit_index=36, name='100000baseKR4/Full', type=LMBTypeMode), LinkModeBit(bit_index=37, name='100000baseSR4/Full', type=LMBTypeMode), LinkModeBit(bit_index=38, name='100000baseCR4/Full', type=LMBTypeMode), LinkModeBit(bit_index=39, name='100000baseLR4_ER4/Full', type=LMBTypeMode), LinkModeBit(bit_index=40, name='50000baseSR2/Full', type=LMBTypeMode), LinkModeBit(bit_index=41, name='1000baseX/Full', type=LMBTypeMode), LinkModeBit(bit_index=42, name='10000baseCR/Full', type=LMBTypeMode), LinkModeBit(bit_index=43, name='10000baseSR/Full', type=LMBTypeMode), LinkModeBit(bit_index=44, name='10000baseLR/Full', type=LMBTypeMode), LinkModeBit(bit_index=45, name='10000baseLRM/Full', type=LMBTypeMode), LinkModeBit(bit_index=46, name='10000baseER/Full', type=LMBTypeMode), LinkModeBit(bit_index=47, name='2500baseT/Full', type=LMBTypeMode), LinkModeBit(bit_index=48, name='5000baseT/Full', type=LMBTypeMode), LinkModeBit(bit_index=49, name='FEC_NONE', type=LMBTypeOther), LinkModeBit(bit_index=50, name='FEC_RS', type=LMBTypeOther), LinkModeBit(bit_index=51, name='FEC_BASER', type=LMBTypeOther), LinkModeBit(bit_index=52, name='50000baseKR/Full', type=LMBTypeMode), LinkModeBit(bit_index=53, name='50000baseSR/Full', type=LMBTypeMode), LinkModeBit(bit_index=54, name='50000baseCR/Full', type=LMBTypeMode), LinkModeBit(bit_index=55, name='50000baseLR_ER_FR/Full', type=LMBTypeMode), LinkModeBit(bit_index=56, name='50000baseDR/Full', type=LMBTypeMode), LinkModeBit(bit_index=57, name='100000baseKR2/Full', type=LMBTypeMode), LinkModeBit(bit_index=58, name='100000baseSR2/Full', type=LMBTypeMode), LinkModeBit(bit_index=59, name='100000baseCR2/Full', type=LMBTypeMode), LinkModeBit( bit_index=60, name='100000baseLR2_ER2_FR2/Full', type=LMBTypeMode ), LinkModeBit(bit_index=61, name='100000baseDR2/Full', type=LMBTypeMode), LinkModeBit(bit_index=62, name='200000baseKR4/Full', type=LMBTypeMode), LinkModeBit(bit_index=63, name='200000baseSR4/Full', type=LMBTypeMode), LinkModeBit( bit_index=64, name='200000baseLR4_ER4_FR4/Full', type=LMBTypeMode ), LinkModeBit(bit_index=65, name='200000baseDR4/Full', type=LMBTypeMode), LinkModeBit(bit_index=66, name='200000baseCR4/Full', type=LMBTypeMode), LinkModeBit(bit_index=67, name='100baseT1/Full', type=LMBTypeMode), LinkModeBit(bit_index=68, name='1000baseT1/Full', type=LMBTypeMode), ) LinkModeBits_by_index = {bit.bit_index: bit for bit in LinkModeBits} pyroute2-0.7.11/pyroute2/ethtool/ethtool.py000066400000000000000000000367001455030217500207000ustar00rootroot00000000000000import logging from collections import namedtuple from ctypes import c_uint16, c_uint32 from pyroute2.ethtool.common import ( LINK_DUPLEX_NAMES, LINK_PORT_NAMES, LINK_TP_MDI_NAMES, LINK_TRANSCEIVER_NAMES, LinkModeBits_by_index, LMBTypeMode, LMBTypePort, ) from pyroute2.ethtool.ioctl import WAKE_NAMES, IoctlEthtool from pyroute2.netlink.exceptions import NetlinkError from pyroute2.netlink.generic.ethtool import NlEthtool, ethtool_rings_msg INT32MINUS_UINT32 = c_uint32(-1).value INT16MINUS_UINT16 = c_uint16(-1).value log = logging.getLogger(__name__) EthtoolBitsetBit = namedtuple( 'EthtoolBitsetBit', ('index', 'name', 'enable', 'set') ) class UseIoctl(Exception): pass class EthtoolCoalesce(object): @staticmethod def from_ioctl(ioctl_coalesce): return {name: int(value) for name, value in ioctl_coalesce.items()} @staticmethod def to_ioctl(ioctl_coalesce, coalesce): for name, value in coalesce.items(): if ioctl_coalesce[name] != value: ioctl_coalesce[name] = value class EthtoolFeature(object): __slots__ = ('set', 'index', 'name', 'enable', 'available') def __init__(self, set, index, name, enable, available): self.set = set self.index = index self.name = name self.enable = enable self.available = available class EthtoolFeatures(namedtuple('EthtoolFeatures', ('features',))): @classmethod def from_ioctl(cls, features): return cls( { name: EthtoolFeature(set, index, name, enable, available) for name, enable, available, set, index in features } ) @staticmethod def to_ioctl(ioctl_features, eth_features): for feature in eth_features.features.values(): enable = ioctl_features[feature.name] if feature.enable == enable: continue ioctl_features[feature.name] = feature.enable class EthtoolWakeOnLan(namedtuple('EthtoolWolMode', ('modes', 'sopass'))): @classmethod def from_netlink(cls, nl_wol): nl_wol = nl_wol[0].get_attr('ETHTOOL_A_WOL_MODES') wol_modes = {} for mode in nl_wol.get_attr('ETHTOOL_A_BITSET_BITS')['attrs']: mode = mode[1] index = mode.get_attr('ETHTOOL_A_BITSET_BIT_INDEX') name = mode.get_attr('ETHTOOL_A_BITSET_BIT_NAME') enable = mode.get_attr('ETHTOOL_A_BITSET_BIT_VALUE') wol_modes[name] = EthtoolBitsetBit( index, name, True if enable is True else False, set=None ) return EthtoolWakeOnLan(modes=wol_modes, sopass=None) @classmethod def from_ioctl(cls, wol_mode): dict_wol_modes = {} for bit_index, name in WAKE_NAMES.items(): if wol_mode.supported & bit_index: dict_wol_modes[name] = EthtoolBitsetBit( bit_index, name, wol_mode.wolopts & bit_index != 0, set=None, ) return EthtoolWakeOnLan(modes=dict_wol_modes, sopass=None) class EthtoolStringBit( namedtuple('EthtoolStringBit', ('set', 'index', 'name')) ): @classmethod def from_netlink(cls, nl_string_sets): nl_string_sets = nl_string_sets[0] ethtool_strings_set = set() for i in nl_string_sets.get_attr('ETHTOOL_A_STRSET_STRINGSETS')[ 'attrs' ]: i = i[1] set_id = i.get_attr('ETHTOOL_A_STRINGSET_ID') i = i.get_attr('ETHTOOL_A_STRINGSET_STRINGS') for i in i['attrs']: i = i[1] ethtool_strings_set.add( cls( set=set_id, index=i.get_attr('ETHTOOL_A_STRING_INDEX'), name=i.get_attr('ETHTOOL_A_STRING_VALUE'), ) ) return ethtool_strings_set @classmethod def from_ioctl(cls, string_sets): return { cls(i // 32, i % 32, string) for i, string in enumerate(string_sets) } class EthtoolLinkInfo( namedtuple( 'EthtoolLinkInfo', ('port', 'phyaddr', 'tp_mdix', 'tp_mdix_ctrl', 'transceiver'), ) ): def __new__(cls, port, phyaddr, tp_mdix, tp_mdix_ctrl, transceiver): port = LINK_PORT_NAMES.get(port, None) transceiver = LINK_TRANSCEIVER_NAMES.get(transceiver, None) tp_mdix = LINK_TP_MDI_NAMES.get(tp_mdix, None) tp_mdix_ctrl = LINK_TP_MDI_NAMES.get(tp_mdix_ctrl, None) return super(EthtoolLinkInfo, cls).__new__( cls, port, phyaddr, tp_mdix, tp_mdix_ctrl, transceiver ) @classmethod def from_ioctl(cls, link_settings): return cls( port=link_settings.port, phyaddr=link_settings.phy_address, tp_mdix=link_settings.eth_tp_mdix, tp_mdix_ctrl=link_settings.eth_tp_mdix_ctrl, transceiver=link_settings.transceiver, ) @classmethod def from_netlink(cls, nl_link_mode): nl_link_mode = nl_link_mode[0] return cls( port=nl_link_mode.get_attr('ETHTOOL_A_LINKINFO_PORT'), phyaddr=nl_link_mode.get_attr('ETHTOOL_A_LINKINFO_PHYADDR'), tp_mdix=nl_link_mode.get_attr('ETHTOOL_A_LINKINFO_TP_MDIX'), tp_mdix_ctrl=( nl_link_mode.get_attr('ETHTOOL_A_LINKINFO_TP_MDIX_CTR') ), transceiver=( nl_link_mode.get_attr('ETHTOOL_A_LINKINFO_TRANSCEIVER') ), ) class EthtoolLinkMode( namedtuple( 'EthtoolLinkMode', ('speed', 'duplex', 'autoneg', 'supported_ports', 'supported_modes'), ) ): def __new__(cls, speed, duplex, autoneg, supported_ports, supported_modes): if ( speed == 0 or speed == INT32MINUS_UINT32 or speed == INT16MINUS_UINT16 ): speed = None duplex = LINK_DUPLEX_NAMES.get(duplex, None) return super(EthtoolLinkMode, cls).__new__( cls, speed, duplex, bool(autoneg), supported_ports, supported_modes ) @classmethod def from_ioctl(cls, link_settings): ( map_supported, map_advertising, map_lp_advertising, ) = IoctlEthtool.get_link_mode_masks(link_settings) bits_supported = IoctlEthtool.get_link_mode_bits(map_supported) supported_ports = [] supported_modes = [] for bit in bits_supported: if bit.type == LMBTypePort: supported_ports.append(bit.name) elif bit.type == LMBTypeMode: supported_modes.append(bit.name) return cls( speed=link_settings.speed, duplex=link_settings.duplex, autoneg=link_settings.autoneg, supported_ports=supported_ports, supported_modes=supported_modes, ) @classmethod def from_netlink(cls, nl_link_mode): nl_link_mode = nl_link_mode[0] supported_ports = [] supported_modes = [] for bitset_bit in nl_link_mode.get_attr( 'ETHTOOL_A_LINKMODES_OURS' ).get_attr('ETHTOOL_A_BITSET_BITS')['attrs']: bitset_bit = bitset_bit[1] bit_index = bitset_bit.get_attr('ETHTOOL_A_BITSET_BIT_INDEX') bit_name = bitset_bit.get_attr('ETHTOOL_A_BITSET_BIT_NAME') bit_value = bitset_bit.get_attr('ETHTOOL_A_BITSET_BIT_VALUE') if bit_value is not True: continue bit = LinkModeBits_by_index[bit_index] if bit.name != bit_name: log.error( "Bit name is not the same as the target: %s <> %s", bit.name, bit_name, ) continue if bit.type == LMBTypePort: supported_ports.append(bit.name) elif bit.type == LMBTypeMode: supported_modes.append(bit.name) return cls( speed=nl_link_mode.get_attr("ETHTOOL_A_LINKMODES_SPEED"), duplex=nl_link_mode.get_attr("ETHTOOL_A_LINKMODES_DUPLEX"), autoneg=nl_link_mode.get_attr("ETHTOOL_A_LINKMODES_AUTONEG"), supported_ports=supported_ports, supported_modes=supported_modes, ) class EthtoolRings( namedtuple( 'EthtoolRings', ( "rx_max", "rx_mini_max", "rx_jumbo_max", "tx_max", "rx", "rx_mini", "rx_jumbo", "tx", "rx_buf_len", "tcp_data_split", "cqe_size", "tx_push", "rx_push", "tx_push_buf_len", "tx_push_buf_len_max", ), ) ): nl_attributs_dict = { "rx_max": 'ETHTOOL_A_RINGS_RX_MAX', "rx_mini_max": 'ETHTOOL_A_RINGS_RX_MINI_MAX', "rx_jumbo_max": 'ETHTOOL_A_RINGS_RX_JUMBO_MAX', "tx_max": 'ETHTOOL_A_RINGS_TX_MAX', "rx": 'ETHTOOL_A_RINGS_RX', "rx_mini": 'ETHTOOL_A_RINGS_RX_MINI', "rx_jumbo": 'ETHTOOL_A_RINGS_RX_JUMBO', "tx": 'ETHTOOL_A_RINGS_TX', "rx_buf_len": 'ETHTOOL_A_RINGS_RX_BUF_LEN', "tcp_data_split": 'ETHTOOL_A_RINGS_TCP_DATA_SPLIT', "cqe_size": 'ETHTOOL_A_RINGS_CQE_SIZE', "tx_push": 'ETHTOOL_A_RINGS_TX_PUSH', "rx_push": 'ETHTOOL_A_RINGS_RX_PUSH', "tx_push_buf_len": 'ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN', "tx_push_buf_len_max": 'ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX', } def __new__( cls, rx_max=None, rx_mini_max=None, rx_jumbo_max=None, tx_max=None, rx=None, rx_mini=None, rx_jumbo=None, tx=None, rx_buf_len=None, tcp_data_split=None, cqe_size=None, tx_push=None, rx_push=None, tx_push_buf_len=None, tx_push_buf_len_max=None, ): return super(EthtoolRings, cls).__new__( cls, rx_max, rx_mini_max, rx_jumbo_max, tx_max, rx, rx_mini, rx_jumbo, tx, rx_buf_len, tcp_data_split, cqe_size, tx_push, rx_push, tx_push_buf_len, tx_push_buf_len_max, ) @classmethod def from_netlink(cls, nl_rings): nl_rings = nl_rings[0] return cls( **{ cls_attr: nl_rings.get_attr(netlink_attr) for cls_attr, netlink_attr in cls.nl_attributs_dict.items() } ) def to_netlink(self): nl_rings_attrs = ethtool_rings_msg() for cls_attr, netlink_attr in self.nl_attributs_dict.items(): attr = getattr(self, cls_attr) if attr is not None: nl_rings_attrs["attrs"].append((netlink_attr, attr)) return nl_rings_attrs @classmethod def from_ioctl(cls, ioctl_rings): ioctl_rings = dict(ioctl_rings) ioctl_rings.pop("cmd") return cls(**ioctl_rings) class Ethtool: def __init__(self): self._with_ioctl = IoctlEthtool() self._with_nl = NlEthtool() self._with_nl.module_err_level = 'debug' self._is_nl_working = self._with_nl.is_nlethtool_in_kernel() def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.close() def _nl_exec(self, f, with_netlink, *args, **kwargs): if with_netlink is None: with_netlink = self._is_nl_working if with_netlink is False: raise UseIoctl() try: return f(*args, **kwargs) except NetlinkError: raise UseIoctl() def get_link_mode(self, ifname, with_netlink=None): try: link_mode = self._nl_exec( self._with_nl.get_linkmode, with_netlink, ifname ) link_mode = EthtoolLinkMode.from_netlink(link_mode) except UseIoctl: self._with_ioctl.change_ifname(ifname) link_settings = self._with_ioctl.get_link_settings() link_mode = EthtoolLinkMode.from_ioctl(link_settings) return link_mode def get_link_info(self, ifname, with_netlink=None): try: link_info = self._nl_exec( self._with_nl.get_linkinfo, with_netlink, ifname ) link_info = EthtoolLinkInfo.from_netlink(link_info) except UseIoctl: self._with_ioctl.change_ifname(ifname) link_settings = self._with_ioctl.get_link_settings() link_info = EthtoolLinkInfo.from_ioctl(link_settings) return link_info def get_strings_set(self, ifname, with_netlink=None): try: stringsets = self._nl_exec( self._with_nl.get_stringset, with_netlink, ifname ) return EthtoolStringBit.from_netlink(stringsets) except UseIoctl: self._with_ioctl.change_ifname(ifname) stringsets = self._with_ioctl.get_stringset() return EthtoolStringBit.from_ioctl(stringsets) def get_wol(self, ifname): nl_working = self._is_nl_working if nl_working is True: try: wol = self._with_nl.get_wol(ifname) return EthtoolWakeOnLan.from_netlink(wol) except NetlinkError: nl_working = False if nl_working is False: self._with_ioctl.change_ifname(ifname) wol_mode = self._with_ioctl.get_wol() return EthtoolWakeOnLan.from_ioctl(wol_mode) def get_rings(self, ifname, with_netlink=None): try: rings = self._nl_exec( self._with_nl.get_rings, with_netlink, ifname ) rings = EthtoolRings.from_netlink(rings) except UseIoctl: self._with_ioctl.change_ifname(ifname) rings_info = self._with_ioctl.get_rings() rings = EthtoolRings.from_ioctl(rings_info) return rings def set_rings(self, ifname, with_netlink=None, **kwargs): try: rings = EthtoolRings(**kwargs).to_netlink() self._nl_exec(self._with_nl.set_rings, with_netlink, rings, ifname) except UseIoctl: self._with_ioctl.change_ifname(ifname) ioctl_rings = self._with_ioctl.get_rings() for name, value in kwargs.items(): if name in ioctl_rings.keys() and ioctl_rings[name] != value: ioctl_rings[name] = value self._with_ioctl.set_rings(ioctl_rings) def get_features(self, ifname): self._with_ioctl.change_ifname(ifname) return EthtoolFeatures.from_ioctl(self._with_ioctl.get_features()) def set_features(self, ifname, features): self._with_ioctl.change_ifname(ifname) ioctl_features = self._with_ioctl.get_features() EthtoolFeatures.to_ioctl(ioctl_features, features) self._with_ioctl.set_features(ioctl_features) def get_coalesce(self, ifname): self._with_ioctl.change_ifname(ifname) return EthtoolCoalesce.from_ioctl(self._with_ioctl.get_coalesce()) def set_coalesce(self, ifname, coalesce): self._with_ioctl.change_ifname(ifname) ioctl_coalesce = self._with_ioctl.get_coalesce() EthtoolCoalesce.to_ioctl(ioctl_coalesce, coalesce) self._with_ioctl.set_coalesce(ioctl_coalesce) def close(self): self._with_ioctl.close() self._with_nl.close() pyroute2-0.7.11/pyroute2/ethtool/ioctl.py000066400000000000000000000504111455030217500203270ustar00rootroot00000000000000import ctypes import errno import fcntl import socket from pyroute2.ethtool.common import LinkModeBits # ethtool/ethtool-copy.h IFNAMSIZ = 16 SIOCETHTOOL = 0x8946 ETHTOOL_GSET = 0x1 ETHTOOL_GCOALESCE = 0xE ETHTOOL_SCOALESCE = 0xF ETHTOOL_GSSET_INFO = 0x37 ETHTOOL_GWOL = 0x00000005 ETHTOOL_GFLAGS = 0x00000025 ETHTOOL_GFEATURES = 0x0000003A ETHTOOL_SFEATURES = 0x0000003B ETHTOOL_GLINKSETTINGS = 0x0000004C ETHTOOL_GSTRINGS = 0x0000001B ETHTOOL_GSTATS = 0x0000001D ETH_GSTRING_LEN = 32 ETHTOOL_GRINGPARAM = 0x00000010 ETHTOOL_SRINGPARAM = 0x00000011 ETHTOOL_GRXCSUM = 0x00000014 ETHTOOL_SRXCSUM = 0x00000015 ETHTOOL_GTXCSUM = 0x00000016 ETHTOOL_STXCSUM = 0x00000017 ETHTOOL_GSG = 0x00000018 ETHTOOL_SSG = 0x00000019 ETHTOOL_GTSO = 0x0000001E ETHTOOL_STSO = 0x0000001F ETHTOOL_GUFO = 0x00000021 ETHTOOL_SUFO = 0x00000022 ETHTOOL_GGSO = 0x00000023 ETHTOOL_SGSO = 0x00000024 ETHTOOL_GGRO = 0x0000002B ETHTOOL_SGRO = 0x0000002C SOPASS_MAX = 6 ETH_SS_STATS = 1 ETH_SS_FEATURES = 4 ETH_FLAG_RXCSUM = 1 << 0 ETH_FLAG_TXCSUM = 1 << 1 ETH_FLAG_SG = 1 << 2 ETH_FLAG_TSO = 1 << 3 ETH_FLAG_UFO = 1 << 4 ETH_FLAG_GSO = 1 << 5 ETH_FLAG_GRO = 1 << 6 ETH_FLAG_TXVLAN = 1 << 7 ETH_FLAG_RXVLAN = 1 << 8 ETH_FLAG_LRO = 1 << 15 ETH_FLAG_NTUPLE = 1 << 27 ETH_FLAG_RXHASH = 1 << 28 ETH_FLAG_EXT_MASK = ( ETH_FLAG_LRO | ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN | ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH ) SCHAR_MAX = 127 ETHTOOL_LINK_MODE_MASK_MAX_KERNEL_NU32 = SCHAR_MAX # Wake-On-Lan options. WAKE_PHY = 1 << 0 WAKE_UCAST = 1 << 1 WAKE_MCAST = 1 << 2 WAKE_BCAST = 1 << 3 WAKE_ARP = 1 << 4 WAKE_MAGIC = 1 << 5 WAKE_MAGICSECURE = 1 << 6 # only meaningful if WAKE_MAGIC WAKE_FILTER = 1 << 7 WAKE_NAMES = { WAKE_PHY: "phy", WAKE_UCAST: "ucast", WAKE_MCAST: "mcast", WAKE_BCAST: "bcast", WAKE_ARP: "arp", WAKE_MAGIC: "magic", WAKE_MAGICSECURE: "magic_secure", WAKE_FILTER: "filter", } class EthtoolError(Exception): pass class NotSupportedError(EthtoolError): pass class NoSuchDevice(EthtoolError): pass class DictStruct(ctypes.Structure): def __init__(self, *args, **kwargs): super(DictStruct, self).__init__(*args, **kwargs) self._fields_as_dict = { name: [ lambda k: getattr(self, k), lambda k, v: setattr(self, k, v), ] for name, ct in self._fields_ } def __getitem__(self, key): return self._fields_as_dict[key][0](key) def __setitem__(self, key, value): return self._fields_as_dict[key][1](key, value) def __iter__(self): return iter(self._fields_as_dict) def items(self): for k, f in self._fields_as_dict.items(): getter, _ = f yield k, getter(k) def keys(self): return self._fields_as_dict.keys() def __contains__(self, key): return key in self._fields_as_dict class EthtoolWolInfo(DictStruct): _fields_ = [ ("cmd", ctypes.c_uint32), ("supported", ctypes.c_uint32), ("wolopts", ctypes.c_uint32), ("sopass", ctypes.c_uint8 * SOPASS_MAX), ] class EthtoolCmd(DictStruct): _pack_ = 1 _fields_ = [ ("cmd", ctypes.c_uint32), ("supported", ctypes.c_uint32), ("advertising", ctypes.c_uint32), ("speed", ctypes.c_uint16), ("duplex", ctypes.c_uint8), ("port", ctypes.c_uint8), ("phy_address", ctypes.c_uint8), ("transceiver", ctypes.c_uint8), ("autoneg", ctypes.c_uint8), ("mdio_support", ctypes.c_uint8), ("maxtxpkt", ctypes.c_uint32), ("maxrxpkt", ctypes.c_uint32), ("speed_hi", ctypes.c_uint16), ("eth_tp_mdix", ctypes.c_uint8), ("reserved2", ctypes.c_uint8), ("lp_advertising", ctypes.c_uint32), ("reserved", ctypes.c_uint32 * 2), ] class IoctlEthtoolLinkSettings(DictStruct): _pack_ = 1 _fields_ = [ ("cmd", ctypes.c_uint32), ("speed", ctypes.c_uint32), ("duplex", ctypes.c_uint8), ("port", ctypes.c_uint8), ("phy_address", ctypes.c_uint8), ("autoneg", ctypes.c_uint8), ("mdio_support", ctypes.c_uint8), ("eth_tp_mdix", ctypes.c_uint8), ("eth_tp_mdix_ctrl", ctypes.c_uint8), ("link_mode_masks_nwords", ctypes.c_int8), ("transceiver", ctypes.c_uint8), ("reserved1", ctypes.c_uint8 * 3), ("reserved", ctypes.c_uint32 * 7), ( "link_mode_data", ctypes.c_uint32 * (3 * ETHTOOL_LINK_MODE_MASK_MAX_KERNEL_NU32), ), ] class EthtoolCoalesce(DictStruct): _pack_ = 1 _fields_ = [ # ETHTOOL_{G,S}COALESCE ("cmd", ctypes.c_uint32), # How many usecs to delay an RX interrupt after # a packet arrives. If 0, only rx_max_coalesced_frames # is used. ("rx_coalesce_usecs", ctypes.c_uint32), # How many packets to delay an RX interrupt after # a packet arrives. If 0, only rx_coalesce_usecs is # used. It is illegal to set both usecs and max frames # to zero as this would cause RX interrupts to never be # generated. ("rx_max_coalesced_frames", ctypes.c_uint32), # Same as above two parameters, except that these values # apply while an IRQ is being serviced by the host. Not # all cards support this feature and the values are ignored # in that case. ("rx_coalesce_usecs_irq", ctypes.c_uint32), ("rx_max_coalesced_frames_irq", ctypes.c_uint32), # How many usecs to delay a TX interrupt after # a packet is sent. If 0, only tx_max_coalesced_frames # is used. ("tx_coalesce_usecs", ctypes.c_uint32), # How many packets to delay a TX interrupt after # a packet is sent. If 0, only tx_coalesce_usecs is # used. It is illegal to set both usecs and max frames # to zero as this would cause TX interrupts to never be # generated. ("tx_max_coalesced_frames", ctypes.c_uint32), # Same as above two parameters, except that these values # apply while an IRQ is being serviced by the host. Not # all cards support this feature and the values are ignored # in that case. ("tx_coalesce_usecs_irq", ctypes.c_uint32), ("tx_max_coalesced_frames_irq", ctypes.c_uint32), # How many usecs to delay in-memory statistics # block updates. Some drivers do not have an in-memory # statistic block, and in such cases this value is ignored. # This value must not be zero. ("stats_block_coalesce_usecs", ctypes.c_uint32), # Adaptive RX/TX coalescing is an algorithm implemented by # some drivers to improve latency under low packet rates and # improve throughput under high packet rates. Some drivers # only implement one of RX or TX adaptive coalescing. Anything # not implemented by the driver causes these values to be # silently ignored. ("use_adaptive_rx_coalesce", ctypes.c_uint32), ("use_adaptive_tx_coalesce", ctypes.c_uint32), # When the packet rate (measured in packets per second) # is below pkt_rate_low, the {rx,tx}_*_low parameters are # used. ("pkt_rate_low", ctypes.c_uint32), ("rx_coalesce_usecs_low", ctypes.c_uint32), ("rx_max_coalesced_frames_low", ctypes.c_uint32), ("tx_coalesce_usecs_low", ctypes.c_uint32), ("tx_max_coalesced_frames_low", ctypes.c_uint32), # When the packet rate is below pkt_rate_high but above # pkt_rate_low (both measured in packets per second) the # normal {rx,tx}_* coalescing parameters are used. # When the packet rate is (measured in packets per second) # is above pkt_rate_high, the {rx,tx}_*_high parameters are # used. ("pkt_rate_high", ctypes.c_uint32), ("rx_coalesce_usecs_high", ctypes.c_uint32), ("rx_max_coalesced_frames_high", ctypes.c_uint32), ("tx_coalesce_usecs_high", ctypes.c_uint32), ("tx_max_coalesced_frames_high", ctypes.c_uint32), # How often to do adaptive coalescing packet rate sampling, # measured in seconds. Must not be zero. ("rate_sample_interval", ctypes.c_uint32), ] class EthtoolValue(ctypes.Structure): _fields_ = [("cmd", ctypes.c_uint32), ("data", ctypes.c_uint32)] class EthtoolSsetInfo(ctypes.Structure): _pack_ = 1 _fields_ = [ ("cmd", ctypes.c_uint32), ("reserved", ctypes.c_uint32), ("sset_mask", ctypes.c_uint64), ("data", ctypes.c_uint32), ] def generate_EthtoolGstrings(gstrings_length): class EthtoolGstrings(ctypes.Structure): _fields_ = [ ("cmd", ctypes.c_uint32), ("string_set", ctypes.c_uint32), ("len", ctypes.c_uint32), ("strings", ctypes.c_ubyte * ETH_GSTRING_LEN * gstrings_length), ] return EthtoolGstrings class EthtoolGetFeaturesBlock(ctypes.Structure): _fields_ = [ ("available", ctypes.c_uint32), ("requested", ctypes.c_uint32), ("active", ctypes.c_uint32), ("never_changed", ctypes.c_uint32), ] class EthtoolSetFeaturesBlock(ctypes.Structure): _fields_ = [("changed", ctypes.c_uint32), ("active", ctypes.c_uint32)] def generate_EthtoolGStats(stats_length): class EthtoolGStats(ctypes.Structure): _fields_ = [ ("cmd", ctypes.c_uint32), ("size", ctypes.c_uint32), ("data", ctypes.c_uint64 * stats_length), ] return EthtoolGStats def div_round_up(n, d): return int(((n) + (d) - 1) / (d)) def feature_bits_to_blocks(n_bits): return div_round_up(n_bits, 32) class EthtoolGfeatures(ctypes.Structure): _fields_ = [ ("cmd", ctypes.c_uint32), ("size", ctypes.c_uint32), ("features", EthtoolGetFeaturesBlock * feature_bits_to_blocks(256)), ] class EthtoolSfeatures(ctypes.Structure): _fields_ = [ ("cmd", ctypes.c_uint32), ("size", ctypes.c_uint32), ("features", EthtoolSetFeaturesBlock * feature_bits_to_blocks(256)), ] class FeatureState(ctypes.Structure): _fields_ = [("off_flags", ctypes.c_uint32), ("features", EthtoolGfeatures)] class EthtoolRingParam(DictStruct): _pack_ = 1 _fields_ = [ ("cmd", ctypes.c_uint32), ("rx_max", ctypes.c_uint32), ("rx_mini_max", ctypes.c_uint32), ("rx_jumbo_max", ctypes.c_uint32), ("tx_max", ctypes.c_uint32), ("rx", ctypes.c_uint32), ("rx_mini", ctypes.c_uint32), ("rx_jumbo", ctypes.c_uint32), ("tx", ctypes.c_uint32), ] class IfReqData(ctypes.Union): dummy = generate_EthtoolGstrings(0) _fields_ = [ ("ifr_data", ctypes.POINTER(EthtoolCmd)), ("coalesce", ctypes.POINTER(EthtoolCoalesce)), ("value", ctypes.POINTER(EthtoolValue)), ("sset_info", ctypes.POINTER(EthtoolSsetInfo)), ("gstrings", ctypes.POINTER(None)), ("gstats", ctypes.POINTER(None)), ("gfeatures", ctypes.POINTER(EthtoolGfeatures)), ("sfeatures", ctypes.POINTER(EthtoolSfeatures)), ("glinksettings", ctypes.POINTER(IoctlEthtoolLinkSettings)), ("wolinfo", ctypes.POINTER(EthtoolWolInfo)), ("rings", ctypes.POINTER(EthtoolRingParam)), ] class IfReq(ctypes.Structure): _pack_ = 1 _anonymous_ = ("u",) _fields_ = [("ifr_name", ctypes.c_uint8 * IFNAMSIZ), ("u", IfReqData)] class IfReqSsetInfo(ctypes.Structure): _pack_ = 1 _fields_ = [ ("ifr_name", ctypes.c_uint8 * IFNAMSIZ), ("info", ctypes.POINTER(EthtoolSsetInfo)), ] class EthtoolFeaturesList: def __init__(self, cmd, stringsset): self._offsets = {} self._cmd = cmd self._cmd_set = EthtoolSfeatures(cmd=ETHTOOL_SFEATURES, size=cmd.size) self._gfeatures = cmd.features self._sfeatures = self._cmd_set.features feature_i = 0 for i, name in enumerate(stringsset): feature_i = i // 32 flag_bit = 1 << (i % 32) self._offsets[name] = (feature_i, flag_bit) while feature_i: feature_i -= 1 self._sfeatures[feature_i].active = self._gfeatures[ feature_i ].active self._sfeatures[feature_i].changed = 0 def is_available(self, name): feature_i, flag_bit = self._offsets[name] return self._gfeatures[feature_i].available & flag_bit != 0 def is_active(self, name): feature_i, flag_bit = self._offsets[name] return self._gfeatures[feature_i].active & flag_bit != 0 def is_requested(self, name): feature_i, flag_bit = self._offsets[name] return self._gfeatures[feature_i].requested & flag_bit != 0 def is_never_changed(self, name): feature_i, flag_bit = self._offsets[name] return self._gfeatures[feature_i].never_changed & flag_bit != 0 def __iter__(self): for name in self._offsets: feature_i, flag_bit = self._offsets[name] yield ( name, self.get_value(name), self.is_available(name), feature_i, flag_bit, ) def keys(self): return self._offsets.keys() def __contains__(self, name): return name in self._offsets def __getitem__(self, key): return self.get_value(key) def __setitem__(self, key, value): return self.set_value(key, value) def get_value(self, name): return self.is_active(name) def set_value(self, name, value): if value not in (1, 0, True, False): raise ValueError("Need a boolean value") feature_i, flag_bit = self._offsets[name] if value: self._gfeatures[feature_i].active |= flag_bit self._sfeatures[feature_i].active |= flag_bit else: # active is ctypes.c_uint32 self._gfeatures[feature_i].active &= flag_bit ^ 0xFFFFFFFF self._sfeatures[feature_i].active &= flag_bit ^ 0xFFFFFFFF self._sfeatures[feature_i].changed |= flag_bit class IoctlEthtool: def __init__(self, ifname=None): self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.ifname = None self.ifreq = None self.stat_names = None if ifname is not None: self.change_ifname(ifname) def close(self): self.sock.close() def change_ifname(self, ifname): self.ifname = bytearray(ifname, 'utf-8') self.ifname.extend(b"\0" * (IFNAMSIZ - len(self.ifname))) self.ifreq = IfReq() self.ifreq.ifr_name = (ctypes.c_uint8 * IFNAMSIZ)(*self.ifname) self.stat_names = None def ioctl(self): try: if fcntl.ioctl(self.sock, SIOCETHTOOL, self.ifreq): raise NotSupportedError() except OSError as e: if e.errno == errno.ENOTSUP: raise NotSupportedError(self.ifname.decode("utf-8")) elif e.errno == errno.ENODEV: raise NoSuchDevice(self.ifname.decode("utf-8")) raise def get_statistics(self): """Statistics in raw format, without names""" if not self.stat_names: self.stat_names = self.get_stringset(set_id=ETH_SS_STATS) gstats = generate_EthtoolGStats(len(self.stat_names))( cmd=ETHTOOL_GSTATS ) self.ifreq.gstats = ctypes.cast( ctypes.pointer(gstats), ctypes.POINTER(None) ) self.ioctl() assert len(self.stat_names) == len(gstats.data) return list(zip(self.stat_names, gstats.data)) def get_stringset_length(self, set_id): sset_info = EthtoolSsetInfo( cmd=ETHTOOL_GSSET_INFO, reserved=0, sset_mask=1 << set_id ) ifreq_sset = IfReqSsetInfo() ifreq_sset.ifr_name = (ctypes.c_uint8 * IFNAMSIZ)(*self.ifname) ifreq_sset.info = ctypes.pointer(sset_info) fcntl.ioctl(self.sock, SIOCETHTOOL, ifreq_sset) assert sset_info.sset_mask return sset_info.data def get_stringset( self, set_id=ETH_SS_FEATURES, drvinfo_offset=0, null_terminate=1 ): # different sets have potentially different lengthts, # obtain size dynamically gstrings_length = self.get_stringset_length(set_id) EthtoolGstringsType = generate_EthtoolGstrings(gstrings_length) gstrings = EthtoolGstringsType( cmd=ETHTOOL_GSTRINGS, string_set=set_id, len=gstrings_length ) self.ifreq.gstrings = ctypes.cast( ctypes.pointer(gstrings), ctypes.POINTER(None) ) self.ioctl() strings_found = [] for i in range(gstrings_length): buf = '' for j in range(ETH_GSTRING_LEN): code = gstrings.strings[i][j] if code == 0: break buf += chr(code) strings_found.append(buf) return strings_found def get_features(self): stringsset = self.get_stringset(set_id=ETH_SS_FEATURES) cmd = EthtoolGfeatures() cmd.cmd = ETHTOOL_GFEATURES cmd.size = feature_bits_to_blocks(len(stringsset)) self.ifreq.gfeatures = ctypes.pointer(cmd) self.ioctl() return EthtoolFeaturesList(cmd, stringsset) def set_features(self, features): self.ifreq.sfeatures = ctypes.pointer(features._cmd_set) return self.ioctl() def get_cmd(self): cmd = EthtoolCmd(cmd=ETHTOOL_GSET) self.ifreq.ifr_data = ctypes.pointer(cmd) self.ioctl() return cmd @staticmethod def get_link_mode_bits(map_bits): for bit in LinkModeBits: map_i = bit.bit_index // 32 map_bit = bit.bit_index % 32 if map_i >= len(map_bits): continue if map_bits[map_i] & (1 << map_bit): yield bit @staticmethod def get_link_mode_masks(ecmd): map_supported = [] map_advertising = [] map_lp_advertising = [] i = 0 while i != ecmd.link_mode_masks_nwords: map_supported.append(ecmd.link_mode_data[i]) i += 1 while i != ecmd.link_mode_masks_nwords * 2: map_advertising.append(ecmd.link_mode_data[i]) i += 1 while i != ecmd.link_mode_masks_nwords * 3: map_lp_advertising.append(ecmd.link_mode_data[i]) i += 1 return (map_supported, map_advertising, map_lp_advertising) def get_link_settings(self): ecmd = IoctlEthtoolLinkSettings() ecmd.cmd = ETHTOOL_GLINKSETTINGS self.ifreq.glinksettings = ctypes.pointer(ecmd) # Handshake with kernel to determine number of words for link # mode bitmaps. When requested number of bitmap words is not # the one expected by kernel, the latter returns the integer # opposite of what it is expecting. We request length 0 below # (aka. invalid bitmap length) to get this info. self.ioctl() # see above: we expect a strictly negative value from kernel. if ( ecmd.link_mode_masks_nwords >= 0 or ecmd.cmd != ETHTOOL_GLINKSETTINGS ): raise NotSupportedError() # got the real ecmd.req.link_mode_masks_nwords, # now send the real request ecmd.link_mode_masks_nwords = -ecmd.link_mode_masks_nwords self.ioctl() if ( ecmd.link_mode_masks_nwords <= 0 or ecmd.cmd != ETHTOOL_GLINKSETTINGS ): raise NotSupportedError() return ecmd def get_coalesce(self): cmd = EthtoolCoalesce(cmd=ETHTOOL_GCOALESCE) self.ifreq.coalesce = ctypes.pointer(cmd) self.ioctl() return cmd def set_coalesce(self, coalesce): coalesce.cmd = ETHTOOL_SCOALESCE self.ifreq.coalesce = ctypes.pointer(coalesce) self.ioctl() return def get_wol(self): cmd = EthtoolWolInfo(cmd=ETHTOOL_GWOL) self.ifreq.wolinfo = ctypes.pointer(cmd) self.ioctl() return cmd def get_rings(self): cmd = EthtoolRingParam(cmd=ETHTOOL_GRINGPARAM) self.ifreq.rings = ctypes.pointer(cmd) self.ioctl() return cmd def set_rings(self, rings): rings.cmd = ETHTOOL_SRINGPARAM self.ifreq.rings = ctypes.pointer(rings) self.ioctl() pyroute2-0.7.11/pyroute2/ext/000077500000000000000000000000001455030217500157645ustar00rootroot00000000000000pyroute2-0.7.11/pyroute2/ext/__init__.py000066400000000000000000000000001455030217500200630ustar00rootroot00000000000000pyroute2-0.7.11/pyroute2/ext/icmp.py000066400000000000000000000004761455030217500172750ustar00rootroot00000000000000from pyroute2.protocols import msg class icmpmsg(msg): fields = [('type', 'uint8'), ('code', 'uint8'), ('csum', 'be32')] class icmp_router_adv(icmpmsg): fields = icmpmsg.fields + [ ('addrs_num', 'uint8'), ('alen', 'uint8'), ('lifetime', 'be32'), ('addrs', 'routers'), ] pyroute2-0.7.11/pyroute2/ext/rawsocket.py000066400000000000000000000074151455030217500203470ustar00rootroot00000000000000import struct from ctypes import ( Structure, addressof, c_ubyte, c_uint, c_ushort, c_void_p, sizeof, string_at, ) from socket import AF_PACKET, SOCK_RAW, SOL_SOCKET, errno, error, htons, socket from pyroute2.iproute.linux import IPRoute ETH_P_ALL = 3 SO_ATTACH_FILTER = 26 SO_DETACH_FILTER = 27 total_filter = [[0x06, 0, 0, 0]] class sock_filter(Structure): _fields_ = [ ('code', c_ushort), # u16 ('jt', c_ubyte), # u8 ('jf', c_ubyte), # u8 ('k', c_uint), ] # u32 class sock_fprog(Structure): _fields_ = [('len', c_ushort), ('filter', c_void_p)] def compile_bpf(code): ProgramType = sock_filter * len(code) program = ProgramType(*[sock_filter(*line) for line in code]) sfp = sock_fprog(len(code), addressof(program[0])) return string_at(addressof(sfp), sizeof(sfp)), program class RawSocket(socket): ''' This raw socket binds to an interface and optionally installs a BPF filter. When created, the socket's buffer is cleared to remove packets that arrived before bind() or the BPF filter is installed. Doing so requires calling recvfrom() which may raise an exception if the interface is down. In order to allow creating the socket when the interface is down, the ENETDOWN exception is caught and discarded. ''' fprog = None def __init__(self, ifname, bpf=None): self.ifname = ifname # lookup the interface details with IPRoute() as ip: for link in ip.get_links(): if link.get_attr('IFLA_IFNAME') == ifname: break else: raise IOError(2, 'Link not found') self.l2addr = link.get_attr('IFLA_ADDRESS') self.ifindex = link['index'] # bring up the socket socket.__init__(self, AF_PACKET, SOCK_RAW, htons(ETH_P_ALL)) socket.bind(self, (self.ifname, ETH_P_ALL)) if bpf: self.clear_buffer() fstring, self.fprog = compile_bpf(bpf) socket.setsockopt(self, SOL_SOCKET, SO_ATTACH_FILTER, fstring) else: self.clear_buffer(remove_total_filter=True) def clear_buffer(self, remove_total_filter=False): # there is a window of time after the socket has been created and # before bind/attaching a filter where packets can be queued onto the # socket buffer # see comments in function set_kernel_filter() in libpcap's # pcap-linux.c. libpcap sets a total filter which does not match any # packet. It then clears what is already in the socket # before setting the desired filter total_fstring, prog = compile_bpf(total_filter) socket.setsockopt(self, SOL_SOCKET, SO_ATTACH_FILTER, total_fstring) self.setblocking(0) while True: try: self.recvfrom(0) except error as e: if e.args[0] == errno.ENETDOWN: # we only get this exception once per down event # there may be more packets left to clean pass elif e.args[0] in [errno.EAGAIN, errno.EWOULDBLOCK]: break else: raise self.setblocking(1) if remove_total_filter: # total_fstring ignored socket.setsockopt( self, SOL_SOCKET, SO_DETACH_FILTER, total_fstring ) def csum(self, data): if len(data) % 2: data += b'\x00' csum = sum( [ struct.unpack('>H', data[x * 2 : x * 2 + 2])[0] for x in range(len(data) // 2) ] ) csum = (csum >> 16) + (csum & 0xFFFF) csum += csum >> 16 return ~csum & 0xFFFF pyroute2-0.7.11/pyroute2/inotify/000077500000000000000000000000001455030217500166455ustar00rootroot00000000000000pyroute2-0.7.11/pyroute2/inotify/__init__.py000066400000000000000000000000001455030217500207440ustar00rootroot00000000000000pyroute2-0.7.11/pyroute2/inotify/inotify_fd.py000066400000000000000000000045431455030217500213570ustar00rootroot00000000000000import ctypes import ctypes.util import os import select import socket import threading from pyroute2.inotify.inotify_msg import inotify_msg class Inotify(object): def __init__(self, libc=None, path=None): self.fd = None self.wd = {} self.ctlr, self.ctlw = os.pipe() self.path = set(path) self._poll = select.poll() self._poll.register(self.ctlr) self.lock = threading.RLock() self.libc = libc or ctypes.CDLL( ctypes.util.find_library('c'), use_errno=True ) def bind(self, *argv, **kwarg): with self.lock: if self.fd is not None: raise socket.error(22, 'Invalid argument') self.fd = self.libc.inotify_init() self._poll.register(self.fd) for path in self.path: self.register_path(path) def register_path(self, path, mask=0x100 | 0x200): os.stat(path) with self.lock: if path in self.wd: return if self.fd is not None: s_path = ctypes.create_string_buffer(path.encode('utf-8')) wd = self.libc.inotify_add_watch( self.fd, ctypes.byref(s_path), mask ) self.wd[wd] = path self.path.add(path) def unregister_path(self): pass def get(self): # events = self._poll.poll() for fd, event in events: if fd == self.fd: data = os.read(self.fd, 4096) for msg in self.parse(data): yield msg else: yield def close(self): with self.lock: if self.fd is not None: os.write(self.ctlw, b'\0') for fd in (self.fd, self.ctlw, self.ctlr): if fd is not None: try: os.close(fd) self._poll.unregister(fd) except Exception: pass def parse(self, data): offset = 0 while offset <= len(data) - 16: # pick one header msg = inotify_msg(data, offset=offset) msg.decode() if msg['wd'] == 0: break msg['path'] = self.wd[msg['wd']] offset += msg.length yield msg pyroute2-0.7.11/pyroute2/inotify/inotify_msg.py000066400000000000000000000010331455030217500215430ustar00rootroot00000000000000import struct from pyroute2.netlink import nlmsg_base, nlmsg_decoder_generic class inotify_msg(nlmsg_base, nlmsg_decoder_generic): fields = ( ('wd', 'i'), ('mask', 'I'), ('cookie', 'I'), ('name_length', 'I'), ) def decode(self): super(inotify_msg, self).decode() (name,) = struct.unpack_from( '%is' % self['name_length'], self.data, self.offset + 16 ) self['name'] = name.decode('utf-8').strip('\0') self.length = self['name_length'] + 16 pyroute2-0.7.11/pyroute2/ipdb/000077500000000000000000000000001455030217500161025ustar00rootroot00000000000000pyroute2-0.7.11/pyroute2/ipdb/__init__.py000066400000000000000000000000001455030217500202010ustar00rootroot00000000000000pyroute2-0.7.11/pyroute2/ipdb/exceptions.py000066400000000000000000000003551455030217500206400ustar00rootroot00000000000000class DeprecationException(Exception): pass class CommitException(Exception): pass class CreateException(Exception): pass class PartialCommitException(Exception): pass class ShutdownException(Exception): pass pyroute2-0.7.11/pyroute2/ipdb/interfaces.py000066400000000000000000001506631455030217500206120ustar00rootroot00000000000000import errno import time import traceback from socket import AF_INET, AF_INET6 from socket import error as socket_error from socket import inet_ntop, inet_pton from pyroute2 import config from pyroute2.common import Dotkeys, View, basestring, dqn2int from pyroute2.config import AF_BRIDGE from pyroute2.ipdb.exceptions import ( CommitException, CreateException, PartialCommitException, ) from pyroute2.ipdb.linkedset import LinkedSet from pyroute2.ipdb.transactional import ( SYNC_TIMEOUT, Transactional, with_transaction, ) from pyroute2.netlink import rtnl from pyroute2.netlink.exceptions import NetlinkError from pyroute2.netlink.rtnl.ifinfmsg import IFF_MASK, ifinfmsg supported_kinds = ( 'bridge', 'bond', 'tuntap', 'vxlan', 'gre', 'gretap', 'ip6gre', 'ip6gretap', 'macvlan', 'macvtap', 'ipvlan', 'vrf', 'vti', ) groups = ( rtnl.RTMGRP_LINK | rtnl.RTMGRP_NEIGH | rtnl.RTMGRP_IPV4_IFADDR | rtnl.RTMGRP_IPV6_IFADDR ) def _get_data_fields(): global supported_kinds ret = [] for data in supported_kinds: msg = ifinfmsg.ifinfo.data_map.get(data) if msg is not None: if getattr(msg, 'prefix', None) is not None: ret += [msg.nla2name(i[0]) for i in msg.nla_map] else: ret += [ifinfmsg.nla2name(i[0]) for i in msg.nla_map] return ret def _br_time_check(x, y): return abs(x - y) < 5 class Interface(Transactional): ''' Objects of this class represent network interface and all related objects: * addresses * (todo) neighbours * (todo) routes Interfaces provide transactional model and can act as context managers. Any attribute change implicitly starts a transaction. The transaction can be managed with three methods: * review() -- review changes * rollback() -- drop all the changes * commit() -- try to apply changes If anything will go wrong during transaction commit, it will be rolled back authomatically and an exception will be raised. Failed transaction review will be attached to the exception. ''' _fields_cmp = { 'flags': lambda x, y: x & y & IFF_MASK == y & IFF_MASK, 'br_hello_time': _br_time_check, 'br_max_age': _br_time_check, 'br_ageing_time': _br_time_check, 'br_forward_delay': _br_time_check, 'br_mcast_membership_intvl': _br_time_check, 'br_mcast_querier_intvl': _br_time_check, 'br_mcast_query_intvl': _br_time_check, 'br_mcast_query_response_intvl': _br_time_check, 'br_mcast_startup_query_intvl': _br_time_check, } _virtual_fields = [ 'ipdb_scope', 'ipdb_priority', 'vlans', 'ipaddr', 'ports', 'vlan_flags', 'net_ns_fd', 'net_ns_pid', ] _fields = [ifinfmsg.nla2name(i[0]) for i in ifinfmsg.nla_map] for name in ('bridge_slave_data',): data = getattr(ifinfmsg.ifinfo, name) _fields.extend([ifinfmsg.nla2name(i[0]) for i in data.nla_map]) _fields.append('index') _fields.append('flags') _fields.append('mask') _fields.append('change') _fields.append('kind') _fields.append('peer') _fields.append('vlan_id') _fields.append('vlan_protocol') _fields.append('bond_mode') _fields.extend(_get_data_fields()) _fields.extend(_virtual_fields) def __init__(self, ipdb, mode=None, parent=None, uid=None): ''' Parameters: * ipdb -- ipdb() reference * mode -- transaction mode ''' Transactional.__init__(self, ipdb, mode) self.cleanup = ( 'header', 'linkinfo', 'protinfo', 'af_spec', 'attrs', 'event', 'map', 'stats', 'stats64', 'change', '__align', ) self.ingress = None self.egress = None self.nlmsg = None self.errors = [] self.partial = False self._exception = None self._deferred_link = None self._tb = None self._linked_sets.add('ipaddr') self._linked_sets.add('ports') self._linked_sets.add('vlans') self._freeze = None self._delay_add_port = set() self._delay_del_port = set() # 8<----------------------------------- # local setup: direct state is required with self._direct_state: for i in ('change', 'mask'): del self[i] self['ipaddr'] = self.ipdb._ipaddr_set() self['ports'] = LinkedSet() self['vlans'] = LinkedSet() self['ipdb_priority'] = 0 # 8<----------------------------------- def __hash__(self): return self['index'] @property def if_master(self): ''' [property] Link to the parent interface -- if it exists ''' return self.get('master', None) def detach(self): self.ipdb.interfaces._detach(self['ifname'], self['index'], self.nlmsg) return self def freeze(self): if self._freeze is not None: raise RuntimeError("the interface is frozen already") dump = self.pick() def cb(ipdb, msg, action): if msg.get('index', -1) == dump['index']: try: # important: that's a rollback, so do not # try to revert changes in the case of failure self.commit( transaction=dump, commit_phase=2, commit_mask=2 ) except Exception: pass self._freeze = self.ipdb.register_callback(cb) return self def unfreeze(self): self.ipdb.unregister_callback(self._freeze) self._freeze = None return self def load(self, data): ''' Load the data from a dictionary to an existing transaction. Requires `commit()` call, or must be called from within a `with` statement. Sample:: data = json.loads(...) with ipdb.interfaces['dummy1'] as i: i.load(data) Sample, mode `explicit:: data = json.loads(...) i = ipdb.interfaces['dummy1'] i.begin() i.load(data) i.commit() ''' for key in data: if data[key] is None: continue if key == 'ipaddr': for addr in self['ipaddr']: self.del_ip(*addr) for addr in data[key]: if isinstance(addr, basestring): addr = (addr,) self.add_ip(*addr) elif key == 'ports': for port in self['ports']: self.del_port(port) for port in data[key]: self.add_port(port) elif key == 'vlans': for vlan in self['vlans']: self.del_vlan(vlan) for vlan in data[key]: if vlan != 1: self.add_vlan(vlan) elif key in ('neighbours', 'family'): # ignore on load pass else: self[key] = data[key] return self def load_dict(self, data): ''' Update the interface info from a dictionary. This call always bypasses open transactions, loading changes directly into the interface data. ''' with self._direct_state: self.load(data) def load_netlink(self, dev): ''' Update the interface info from RTM_NEWLINK message. This call always bypasses open transactions, loading changes directly into the interface data. ''' global supported_kinds with self._direct_state: if self['ipdb_scope'] == 'locked': # do not touch locked interfaces return if self['ipdb_scope'] in ('shadow', 'create'): # ignore non-broadcast messages if dev['header']['sequence_number'] != 0: return # ignore ghost RTM_NEWLINK messages if (config.kernel[0] < 3) and ( not dev.get_attr('IFLA_AF_SPEC') ): return for name, value in dev.items(): self[name] = value for cell in dev['attrs']: # # Parse on demand # # At that moment, being not referenced, the # NLA is not decoded (yet). Calling # `__getitem__()` on nla_slot triggers the # NLA decoding, if the nla is referenced: # norm = ifinfmsg.nla2name(cell[0]) if norm not in self.cleanup: self[norm] = cell[1] # load interface kind linkinfo = dev.get_attr('IFLA_LINKINFO') if linkinfo is not None: kind = linkinfo.get_attr('IFLA_INFO_KIND') if kind is not None: self['kind'] = kind if kind == 'vlan': data = linkinfo.get_attr('IFLA_INFO_DATA') self['vlan_id'] = data.get_attr('IFLA_VLAN_ID') self['vlan_protocol'] = data.get_attr( 'IFLA_VLAN_PROTOCOL' ) self['vlan_flags'] = data.get_attr( 'IFLA_VLAN_FLAGS', {} ).get('flags', 0) if kind in supported_kinds: data = linkinfo.get_attr('IFLA_INFO_DATA') or {} for nla in data.get('attrs', []): norm = ifinfmsg.nla2name(nla[0]) self[norm] = nla[1] # load vlans if dev['family'] == AF_BRIDGE: spec = dev.get_attr('IFLA_AF_SPEC') if spec is not None: vlans = spec.get_attrs('IFLA_BRIDGE_VLAN_INFO') vmap = {} for vlan in vlans: vmap[vlan['vid']] = vlan vids = set(vmap.keys()) # remove vids we do not have anymore for vid in self['vlans'] - vids: self.del_vlan(vid) for vid in vids - self['vlans']: self.add_vlan(vmap[vid]) protinfo = dev.get_attr('IFLA_PROTINFO') if protinfo is not None: for attr, value in protinfo['attrs']: attr = attr[5:].lower() self[attr] = value # the rest is possible only when interface # is used in IPDB, not standalone if self.ipdb is not None: self['ipaddr'] = self.ipdb.ipaddr[self['index']] self['neighbours'] = self.ipdb.neighbours[self['index']] # finally, cleanup all not needed for item in self.cleanup: if item in self: del self[item] # AF_BRIDGE messages for bridges contain # IFLA_MASTER == self.index, we should fix it if self.get('master', None) == self['index']: self['master'] = None self['ipdb_scope'] = 'system' def wait_ip(self, *argv, **kwarg): return self['ipaddr'].wait_ip(*argv, **kwarg) @with_transaction def add_ip(self, ip, mask=None, broadcast=None, anycast=None, scope=None): ''' Add IP address to an interface Address formats: with ipdb.interfaces.eth0 as i: i.add_ip('192.168.0.1', 24) i.add_ip('192.168.0.2/24') i.add_ip('192.168.0.3/255.255.255.0') i.add_ip('192.168.0.4/24', broadcast='192.168.0.255', scope=254) ''' family = 0 # split mask if mask is None: ip, mask = ip.split('/') if ip.find(':') > -1: family = AF_INET6 # normalize IPv6 format ip = inet_ntop(AF_INET6, inet_pton(AF_INET6, ip)) else: family = AF_INET if isinstance(mask, basestring): try: mask = int(mask, 0) except: mask = dqn2int(mask, family) # if it is a transaction or an interface update, apply the change self['ipaddr'].unlink((ip, mask)) request = {} if broadcast is not None: request['broadcast'] = broadcast if anycast is not None: request['anycast'] = anycast if scope is not None: request['scope'] = scope self['ipaddr'].add((ip, mask), raw=request) @with_transaction def del_ip(self, ip, mask=None): ''' Delete IP address from an interface ''' if mask is None: ip, mask = ip.split('/') if mask.find('.') > -1: mask = dqn2int(mask) else: mask = int(mask, 0) # normalize the address if ip.find(':') > -1: ip = inet_ntop(AF_INET6, inet_pton(AF_INET6, ip)) if (ip, mask) in self['ipaddr']: self['ipaddr'].unlink((ip, mask)) self['ipaddr'].remove((ip, mask)) @with_transaction def add_vlan(self, vlan, flags=None): if isinstance(vlan, dict): vid = vlan['vid'] else: vid = vlan vlan = {'vid': vlan, 'flags': 0} self['vlans'].unlink(vid) self['vlans'].add(vid, raw=(vlan, flags)) @with_transaction def del_vlan(self, vlan): if vlan in self['vlans']: self['vlans'].unlink(vlan) self['vlans'].remove(vlan) @with_transaction def add_port(self, port): ''' Add port to a bridge or bonding ''' ifindex = self._resolve_port(port) if not ifindex: self._delay_add_port.add(port) else: self['ports'].unlink(ifindex) self['ports'].add(ifindex) @with_transaction def del_port(self, port): ''' Remove port from a bridge or bonding ''' ifindex = self._resolve_port(port) if not ifindex: self._delay_del_port.add(port) else: self['ports'].unlink(ifindex) self['ports'].remove(ifindex) def reload(self): ''' Reload interface information ''' countdown = 3 while countdown: links = self.nl.get_links(self['index']) if links: self.load_netlink(links[0]) break else: countdown -= 1 time.sleep(1) return self def review(self): ret = super(Interface, self).review() last = self.current_tx if self['ipdb_scope'] == 'create': ret['+ipaddr'] = last['ipaddr'] ret['+ports'] = last['ports'] ret['+vlans'] = last['vlans'] del ret['ports'] del ret['ipaddr'] del ret['vlans'] if last._delay_add_port: ports = set(['*%s' % x for x in last._delay_add_port]) if '+ports' in ret: ret['+ports'] |= ports else: ret['+ports'] = ports if last._delay_del_port: ports = set(['*%s' % x for x in last._delay_del_port]) if '-ports' in ret: ret['-ports'] |= ports else: ret['-ports'] = ports return ret def _run(self, cmd, *argv, **kwarg): try: return cmd(*argv, **kwarg) except Exception as error: if self.partial: self.errors.append(error) return [] raise error def _resolve_port(self, port): # for now just a stupid resolver, will be # improved later with search by mac, etc. if isinstance(port, Interface): return port['index'] else: return self.ipdb.interfaces.get(port, {}).get('index', None) def commit( self, tid=None, transaction=None, commit_phase=1, commit_mask=0xFF, newif=False, ): ''' Commit transaction. In the case of exception all changes applied during commit will be reverted. ''' if not commit_phase & commit_mask: return self error = None added = None removed = None drop = self.ipdb.txdrop notx = True init = None debug = {'traceback': None, 'transaction': None, 'next_stage': None} if tid or transaction: notx = False if tid: transaction = self.global_tx[tid] else: transaction = transaction or self.current_tx if transaction.partial: transaction.errors = [] with self._write_lock: # if the interface does not exist, create it first ;) if self['ipdb_scope'] != 'system': # a special case: transition "create" -> "remove" if ( transaction['ipdb_scope'] == 'remove' and self['ipdb_scope'] == 'create' ): self.invalidate() return self newif = True self.set_target('ipdb_scope', 'system') try: # 8<--------------------------------------------------- # link resolve if self._deferred_link: link_key, link_obj = self._deferred_link transaction[link_key] = self._resolve_port(link_obj) self._deferred_link = None # 8<---------------------------------------------------- # ACHTUNG: hack for old platforms if self['address'] == '00:00:00:00:00:00': with self._direct_state: self['address'] = None self['broadcast'] = None # 8<---------------------------------------------------- init = self.pick() try: request = { key: transaction[key] for key in filter( lambda x: x[:5] != 'bond_' and x[:7] != 'brport_' and x[:3] != 'br_', transaction, ) if transaction[key] is not None } for key in ('net_ns_fd', 'net_ns_pid'): if key in request: with self._direct_state: self[key] = None del request[key] self.nl.link('add', **request) except NetlinkError as x: # File exists if x.code == errno.EEXIST: # A bit special case, could be one of two cases: # # 1. A race condition between two different IPDB # processes # 2. An attempt to create dummy0, gre0, bond0 when # the corrseponding module is not loaded. Being # loaded, the module creates a default interface # by itself, causing the request to fail # # The exception in that case can cause the DB # inconsistence, since there can be queued not only # the interface creation, but also IP address # changes etc. # # So we ignore this particular exception and try to # continue, as it is created by us. # # 3. An attempt to create VLAN or VXLAN interface # with the same ID but under different name # # In that case we should forward error properly if self['kind'] in ('vlan', 'vxlan'): newif = x else: raise except Exception as e: if transaction.partial: transaction.errors.append(e) raise PartialCommitException() else: # If link('add', ...) raises an exception, no netlink # broadcast will be sent, and the object is unmodified. # After the exception forwarding, the object is ready # to repeat the commit() call. if drop and notx: self.drop(transaction.uid) raise if transaction['ipdb_scope'] == 'create' and commit_phase > 1: if self['index']: wd = self.ipdb.watchdog('RTM_DELLINK', ifname=self['ifname']) with self._direct_state: self['ipdb_scope'] = 'locked' self.nl.link('delete', index=self['index']) wd.wait() self.load_dict(transaction) return self elif newif: # Here we come only if a new interface is created # if commit_phase == 1 and not self.wait_target('ipdb_scope'): if drop and notx: self.drop(transaction.uid) self.invalidate() if isinstance(newif, Exception): raise newif else: raise CreateException() # Re-populate transaction.ipaddr to have a proper IP target # # The reason behind the code is that a new interface in the # "up" state will have automatic IPv6 addresses, that aren't # reflected in the transaction. This may cause a false IP # target mismatch and a commit failure. # # To avoid that, collect automatic addresses to the # transaction manually, since it is not yet properly linked. # for addr in self.ipdb.ipaddr[self['index']]: transaction['ipaddr'].add(addr) # Reload the interface data try: self.load_netlink(self.nl.link('get', **request)[0]) except Exception: pass # now we have our index and IP set and all other stuff snapshot = self.pick() # make snapshots of all dependent routes if commit_phase == 1 and hasattr(self.ipdb, 'routes'): self.routes = [] for record in self.ipdb.routes.filter({'oif': self['index']}): # For MPLS routes the key is an integer # They should match anyways if getattr(record['key'], 'table', None) != 255: self.routes.append( (record['route'], record['route'].pick()) ) # resolve all delayed ports def resolve_ports(transaction, ports, callback, self, drop): def error(x): return KeyError('can not resolve port %s' % x) for port in tuple(ports): ifindex = self._resolve_port(port) if not ifindex: if transaction.partial: transaction.errors.append(error(port)) else: if drop: self.drop(transaction.uid) raise error(port) else: ports.remove(port) with transaction._direct_state: # ???? callback(ifindex) resolve_ports( transaction, transaction._delay_add_port, transaction.add_port, self, drop and notx, ) resolve_ports( transaction, transaction._delay_del_port, transaction.del_port, self, drop and notx, ) try: removed, added = snapshot // transaction run = transaction._run nl = transaction.nl # 8<--------------------------------------------- # Port vlans if removed['vlans'] or added['vlans']: self['vlans'].set_target(transaction['vlans']) for i in removed['vlans']: # remove vlan from the port run( nl.vlan_filter, 'del', index=self['index'], vlan_info=self['vlans'][i][0], ) for i in added['vlans']: # add vlan to the port vinfo = transaction['vlans'][i][0] flags = transaction['vlans'][i][1] req = {'index': self['index'], 'vlan_info': vinfo} if flags == 'self': req['vlan_flags'] = flags # this request will NOT give echo, # so bypass the check with self._direct_state: self.add_vlan(vinfo['vid']) run(nl.vlan_filter, 'add', **req) self['vlans'].target.wait(SYNC_TIMEOUT) if not self['vlans'].target.is_set(): raise CommitException('vlans target is not set') # 8<--------------------------------------------- # Ports if removed['ports'] or added['ports']: self['ports'].set_target(transaction['ports']) for i in removed['ports']: # detach port if i in self.ipdb.interfaces: ( self.ipdb.interfaces[i] .set_target('master', None) .mirror_target('master', 'link') ) run(nl.link, 'update', index=i, master=0) else: transaction.errors.append(KeyError(i)) for i in added['ports']: # attach port if i in self.ipdb.interfaces: ( self.ipdb.interfaces[i] .set_target('master', self['index']) .mirror_target('master', 'link') ) run(nl.link, 'update', index=i, master=self['index']) else: transaction.errors.append(KeyError(i)) self['ports'].target.wait(SYNC_TIMEOUT) if self['ports'].target.is_set(): for msg in self.nl.get_vlans(index=self['index']): self.load_netlink(msg) else: raise CommitException('ports target is not set') # 1. wait for proper targets on ports # 2. wait for mtu sync # # the bridge mtu is set from the port, if the latter is smaller # the bond mtu sets the port mtu, if the latter is smaller # # FIXME: team interfaces? for i in list(added['ports']) + list(removed['ports']): port = self.ipdb.interfaces[i] # port update target = port._local_targets['master'] target.wait(SYNC_TIMEOUT) with port._write_lock: del port._local_targets['master'] del port._local_targets['link'] if not target.is_set(): raise CommitException('master target failed') if i in added['ports']: if port.if_master != self['index']: raise CommitException('master set failed') else: if port.if_master == self['index']: raise CommitException('master unset failed') # master update if self['kind'] == 'bridge' and self['mtu'] > port['mtu']: self.set_target('mtu', port['mtu']) self.wait_target('mtu') # 8<--------------------------------------------- # Interface changes request = {} brequest = {} prequest = {} # preseed requests with the interface kind request['kind'] = self['kind'] brequest['kind'] = self['kind'] wait_all = False for key, value in added.items(): if ( value is not None and (key not in self._virtual_fields) and (key != 'kind') ): if key[:3] == 'br_': brequest[key] = added[key] elif key[:7] == 'brport_': prequest[key[7:]] = added[key] else: if key == 'address' and added[key] is not None: self[key] = added[key].lower() request[key] = added[key] # FIXME: flush the interface type so the next two conditions # will work correctly request['kind'] = None brequest['kind'] = None # apply changes only if there is something to apply if (self['kind'] == 'bridge') and any( [brequest[item] is not None for item in brequest] ): brequest['index'] = self['index'] brequest['kind'] = self['kind'] brequest['family'] = AF_BRIDGE wait_all = True run(nl.link, 'set', **brequest) if any([request[item] is not None for item in request]): request['index'] = self['index'] request['kind'] = self['kind'] if request.get('address', None) == '00:00:00:00:00:00': request.pop('address') request.pop('broadcast', None) wait_all = True run(nl.link, 'update', **request) # Yet another trick: setting ifalias doesn't cause # netlink updates if 'ifalias' in request: self.reload() if any([prequest[item] is not None for item in prequest]): prequest['index'] = self['index'] run(nl.brport, 'set', **prequest) if (wait_all) and (not transaction.partial): transaction.wait_all_targets() # 8<--------------------------------------------- # VLAN flags -- a dirty hack, pls do something with it if added.get('vlan_flags') is not None: run( nl.link, 'set', **{ 'kind': 'vlan', 'index': self['index'], 'vlan_flags': added['vlan_flags'], } ) # 8<--------------------------------------------- # IP address changes for _ in range(3): ip2add = transaction['ipaddr'] - self['ipaddr'] ip2remove = self['ipaddr'] - transaction['ipaddr'] if not ip2add and not ip2remove: break self['ipaddr'].set_target(transaction['ipaddr']) ### # Remove # # The promote_secondaries sysctl causes the kernel # to add secondary addresses back after the primary # address is removed. # # The library can not tell this from the result of # an external program. # # One simple way to work that around is to remove # secondaries first. rip = sorted( ip2remove, key=lambda x: self['ipaddr'][x]['flags'], reverse=True, ) # 8<-------------------------------------- for i in rip: # When you remove a primary IP addr, all the # subnetwork can be removed. In this case you # will fail, but it is OK, no need to roll back try: run( nl.addr, 'delete', index=self['index'], address=i[0], prefixlen=i[1], ) except NetlinkError as x: # bypass only errno 99, # 'Cannot assign address' if x.code != errno.EADDRNOTAVAIL: raise except socket_error as x: # bypass illegal IP requests if isinstance(x.args[0], basestring) and x.args[ 0 ].startswith('illegal IP'): continue raise ### # Add addresses # 8<-------------------------------------- for i in ip2add: # Try to fetch additional address attributes try: kwarg = dict( [ k for k in transaction['ipaddr'][i].items() if k[0] in ('broadcast', 'anycast', 'scope') ] ) except KeyError: kwarg = None try: # feed the address to the OS kwarg = kwarg or {} kwarg['index'] = self['index'] kwarg['address'] = i[0] kwarg['prefixlen'] = i[1] run(nl.addr, 'add', **kwarg) except NetlinkError as x: if x.code != errno.EEXIST: raise # 8<-------------------------------------- # some interfaces do not send IPv6 address # updates, when are down # # beside of that, bridge interfaces are # down by default, so they never send # address updates from beginning # # FIXME: # # that all is a dirtiest hack ever, pls do # something with it # if (not self['flags'] & 1) or hasattr(self.ipdb.nl, 'netns'): # 1. flush old IPv6 addresses for addr in list(self['ipaddr'].ipv6): self['ipaddr'].remove(addr) # 2. reload addresses for addr in self.nl.get_addr( index=self['index'], family=AF_INET6 ): self.ipdb.ipaddr._new(addr) # if there are tons of IPv6 addresses, it may take a # really long time, and that's bad, but it's broken in # the kernel :| # 8<-------------------------------------- self['ipaddr'].target.wait(SYNC_TIMEOUT) if self['ipaddr'].target.is_set(): break else: raise CommitException('ipaddr target is not set') # 8<--------------------------------------------- # Iterate callback chain for ch in self._commit_hooks: # An exception will rollback the transaction ch(self.dump(), snapshot.dump(), transaction.dump()) # 8<--------------------------------------------- # Move the interface to a netns if ('net_ns_fd' in added) or ('net_ns_pid' in added): request = {} for key in ('net_ns_fd', 'net_ns_pid'): if key in added: request[key] = added[key] request['index'] = self['index'] run(nl.link, 'update', **request) countdown = 10 while countdown: # wait until the interface will disappear # from the current network namespace -- # up to 1 second (make it configurable?) try: self.nl.get_links(self['index']) except NetlinkError as e: if e.code == errno.ENODEV: break raise except Exception: raise countdown -= 1 time.sleep(0.1) # 8<--------------------------------------------- # Interface removal if added.get('ipdb_scope') in ('shadow', 'remove'): wd = self.ipdb.watchdog('RTM_DELLINK', ifname=self['ifname']) with self._direct_state: self['ipdb_scope'] = 'locked' self.nl.link('delete', index=self['index']) wd.wait() with self._direct_state: self['ipdb_scope'] = 'shadow' # system-wide checks if commit_phase == 1: self.ipdb.ensure('run') if added.get('ipdb_scope') == 'remove': self.ipdb.interfaces._detach(None, self['index'], None) if notx: self.drop(transaction.uid) return self # 8<--------------------------------------------- # system-wide checks if commit_phase == 1: self.ipdb.ensure('run') # so far all's ok drop = True except Exception as e: error = e # log the error environment debug['traceback'] = traceback.format_exc() debug['transaction'] = transaction debug['next_stage'] = None # something went wrong: roll the transaction back if commit_phase == 1: if newif: drop = False try: self.commit( transaction=init if newif else snapshot, commit_phase=2, commit_mask=commit_mask, newif=newif, ) except Exception as i_e: debug['next_stage'] = i_e error = RuntimeError() else: # reload all the database -- it can take a long time, # but it is required since we have no idea, what is # the result of the failure links = self.nl.get_links() for link in links: self.ipdb.interfaces._new(link) links = self.nl.get_vlans() for link in links: self.ipdb.interfaces._new(link) for addr in self.nl.get_addr(): self.ipdb.ipaddr._new(addr) for key in ('ipaddr', 'ports', 'vlans'): self[key].clear_target() # raise partial commit exceptions if transaction.partial and transaction.errors: error = PartialCommitException('partial commit error') # drop only if required if drop and notx: # drop last transaction in any case self.drop(transaction.uid) # raise exception for failed transaction if error is not None: error.debug = debug raise error # restore dependent routes for successful rollback if commit_phase == 2: for route in self.routes: with route[0]._direct_state: route[0]['ipdb_scope'] = 'restore' try: route[0].commit( transaction=route[1], commit_phase=2, commit_mask=2 ) except RuntimeError as x: # RuntimeError is raised due to phase 2, so # an additional check is required if ( isinstance(x.cause, NetlinkError) and x.cause.code == errno.EEXIST ): pass time.sleep(config.commit_barrier) # drop all collected errors, if any self.errors = [] return self def up(self): ''' Shortcut: change the interface state to 'up'. ''' self['state'] = 'up' return self def down(self): ''' Shortcut: change the interface state to 'down'. ''' self['state'] = 'down' return self def remove(self): ''' Mark the interface for removal ''' self['ipdb_scope'] = 'remove' return self def shadow(self): ''' Remove the interface from the OS, but leave it in the database. When one will try to re-create interface with the same name, all the old saved attributes will apply to the new interface, incl. MAC-address and even the interface index. Please be aware, that the interface index can be reused by OS while the interface is "in the shadow state", in this case re-creation will fail. ''' self['ipdb_scope'] = 'shadow' return self class InterfacesDict(Dotkeys): def __init__(self, ipdb): self.ipdb = ipdb self._event_map = {'RTM_NEWLINK': self._new, 'RTM_DELLINK': self._del} def _register(self): links = self.ipdb.nl.get_links() # iterate twice to map port/master relations for link in links: self._new(link, skip_master=True) for link in links: self._new(link) # load bridge vlan information links = self.ipdb.nl.get_vlans() for link in links: self._new(link) def add(self, kind, ifname, reuse=False, **kwarg): ''' Create new network interface ''' with self.ipdb.exclusive: # check for existing interface if ifname in self: if (self[ifname]['ipdb_scope'] == 'shadow') or reuse: device = self[ifname] kwarg['kind'] = kind device.load_dict(kwarg) if self[ifname]['ipdb_scope'] == 'shadow': with device._direct_state: device['ipdb_scope'] = 'create' device.begin() else: raise CreateException("interface %s exists" % ifname) else: device = self[ifname] = Interface( ipdb=self.ipdb, mode='snapshot' ) # delay link resolve? for key in kwarg: # any /.+link$/ attr if key[-4:] == 'link': if isinstance(kwarg[key], Interface): kwarg[key] = kwarg[key].get('index') or kwarg[ key ].get('ifname') if not isinstance(kwarg[key], int): device._deferred_link = (key, kwarg[key]) device._mode = self.ipdb.mode with device._direct_state: device['kind'] = kind device['index'] = kwarg.get('index', 0) device['ifname'] = ifname device['ipdb_scope'] = 'create' # set some specific attrs for attr in ( 'peer', 'uid', 'gid', 'ifr', 'mode', 'bond_mode', 'address', ): if attr in kwarg: device[attr] = kwarg.pop(attr) device.begin() device.load(kwarg) return device def _del(self, msg): target = self.get(msg['index']) if target is None: return if msg['family'] == AF_BRIDGE: with target._direct_state: for vlan in tuple(target['vlans']): target.del_vlan(vlan) # check for freezed devices if getattr(target, '_freeze', None): with target._direct_state: target['ipdb_scope'] = 'shadow' return # check for locked devices if target.get('ipdb_scope') in ('locked', 'shadow'): return self._detach(None, msg['index'], msg) def _new(self, msg, skip_master=False): # check, if a record exists index = msg.get('index', None) ifname = msg.get_attr('IFLA_IFNAME', None) device = None cleanup = None # scenario #1: no matches for both: new interface # # scenario #2: ifname exists, index doesn't: # index changed # scenario #3: index exists, ifname doesn't: # name changed # scenario #4: both exist: assume simple update and # an optional name change if (index not in self) and (ifname not in self): # scenario #1, new interface device = self[index] = self[ifname] = Interface(ipdb=self.ipdb) elif (index not in self) and (ifname in self): # scenario #2, index change old_index = self[ifname]['index'] device = self[index] = self[ifname] if old_index in self: cleanup = old_index if old_index in self.ipdb.ipaddr: self.ipdb.ipaddr[index] = self.ipdb.ipaddr[old_index] del self.ipdb.ipaddr[old_index] if old_index in self.ipdb.neighbours: self.ipdb.neighbours[index] = self.ipdb.neighbours[old_index] del self.ipdb.neighbours[old_index] else: # scenario #3, interface rename # scenario #4, assume rename old_name = self[index]['ifname'] if old_name != ifname: # unlink old name cleanup = old_name device = self[ifname] = self[index] if index not in self.ipdb.ipaddr: self.ipdb.ipaddr[index] = self.ipdb._ipaddr_set() if index not in self.ipdb.neighbours: self.ipdb.neighbours[index] = LinkedSet() # update port references old_master = device.get('master', None) new_master = msg.get_attr('IFLA_MASTER') if old_master != new_master: if old_master in self: with self[old_master]._direct_state: if index in self[old_master]['ports']: self[old_master].del_port(index) if new_master in self and new_master != index: with self[new_master]._direct_state: self[new_master].add_port(index) if cleanup is not None: del self[cleanup] if skip_master: msg.strip('IFLA_MASTER') device.load_netlink(msg) if new_master is None: with device._direct_state: device['master'] = None def _detach(self, name, idx, msg=None): with self.ipdb.exclusive: if msg is not None: if ( msg['event'] == 'RTM_DELLINK' and msg['change'] != 0xFFFFFFFF ): return if idx is None or idx < 1: target = self[name] idx = target['index'] else: target = self[idx] name = target['ifname'] # clean up port, if exists master = target.get('master', None) if master in self and target['index'] in self[master]['ports']: with self[master]._direct_state: self[master].del_port(target) self.pop(name, None) self.pop(idx, None) self.ipdb.ipaddr.pop(idx, None) self.ipdb.neighbours.pop(idx, None) with target._direct_state: target['ipdb_scope'] = 'detached' class AddressesDict(dict): def __init__(self, ipdb): self.ipdb = ipdb self._event_map = {'RTM_NEWADDR': self._new, 'RTM_DELADDR': self._del} def _register(self): for msg in self.ipdb.nl.get_addr(): self._new(msg) def reload(self): # Reload addresses from the kernel. # (This is a workaround to reorder primary and secondary addresses.) for k in self.keys(): self[k] = self.ipdb._ipaddr_set() for msg in self.ipdb.nl.get_addr(): self._new(msg) for idx in self.keys(): iff = self.ipdb.interfaces[idx] with iff._direct_state: iff['ipaddr'] = self[idx] def _new(self, msg): if msg['family'] == AF_INET: addr = msg.get_attr('IFA_LOCAL') elif msg['family'] == AF_INET6: addr = msg.get_attr('IFA_LOCAL') if not addr: addr = msg.get_attr('IFA_ADDRESS') else: return raw = { 'local': msg.get_attr('IFA_LOCAL'), 'broadcast': msg.get_attr('IFA_BROADCAST'), 'address': msg.get_attr('IFA_ADDRESS'), 'flags': msg.get_attr('IFA_FLAGS') or msg.get('flags'), 'prefixlen': msg['prefixlen'], 'family': msg['family'], 'cacheinfo': msg.get_attr('IFA_CACHEINFO'), } try: self[msg['index']].add(key=(addr, raw['prefixlen']), raw=raw) except: pass def _del(self, msg): if msg['family'] == AF_INET: addr = msg.get_attr('IFA_LOCAL') elif msg['family'] == AF_INET6: addr = msg.get_attr('IFA_ADDRESS') else: return try: self[msg['index']].remove((addr, msg['prefixlen'])) except: pass class NeighboursDict(dict): def __init__(self, ipdb): self.ipdb = ipdb self._event_map = { 'RTM_NEWNEIGH': self._new, 'RTM_DELNEIGH': self._del, } def _register(self): for msg in self.ipdb.nl.get_neighbours(): self._new(msg) def _new(self, msg): if msg['family'] == AF_BRIDGE: return try: ( self[msg['ifindex']].add( key=msg.get_attr('NDA_DST'), raw={'lladdr': msg.get_attr('NDA_LLADDR')}, ) ) except: pass def _del(self, msg): if msg['family'] == AF_BRIDGE: return try: (self[msg['ifindex']].remove(msg.get_attr('NDA_DST'))) except: pass spec = [ {'name': 'interfaces', 'class': InterfacesDict, 'kwarg': {}}, { 'name': 'by_name', 'class': View, 'kwarg': { 'path': 'interfaces', 'constraint': lambda k, v: isinstance(k, basestring), }, }, { 'name': 'by_index', 'class': View, 'kwarg': { 'path': 'interfaces', 'constraint': lambda k, v: isinstance(k, int), }, }, {'name': 'ipaddr', 'class': AddressesDict, 'kwarg': {}}, {'name': 'neighbours', 'class': NeighboursDict, 'kwarg': {}}, ] pyroute2-0.7.11/pyroute2/ipdb/linkedset.py000066400000000000000000000223621455030217500204430ustar00rootroot00000000000000''' ''' import struct import threading from collections import OrderedDict from socket import AF_INET, AF_INET6, inet_pton from pyroute2.common import basestring class LinkedSet(set): ''' Utility class, used by `Interface` to track ip addresses and ports. Called "linked" as it automatically updates all instances, linked with it. Target filter is a function, that returns `True` if a set member should be counted in target checks (target methods see below), or `False` if it should be ignored. ''' def target_filter(self, x): return True def __init__(self, *argv, **kwarg): set.__init__(self, *argv, **kwarg) def _check_default_target(self): if self._ct is not None: if set(filter(self.target_filter, self)) == set( filter(self.target_filter, self._ct) ): self._ct = None return True return False self.lock = threading.RLock() self.target = threading.Event() self.targets = {self.target: _check_default_target} self._ct = None self.raw = OrderedDict() self.links = [] self.exclusive = set() def __getitem__(self, key): return self.raw[key] def clear_target(self, target=None): with self.lock: if target is None: self._ct = None self.target.clear() else: target.clear() del self.targets[target] def set_target(self, value, ignore_state=False): ''' Set target state for the object and clear the target event. Once the target is reached, the event will be set, see also: `check_target()` Args: - value (set): the target state to compare with ''' with self.lock: if isinstance(value, (set, tuple, list)): self._ct = value self.target.clear() # immediately check, if the target already # reached -- otherwise you will miss the # target forever if not ignore_state: self.check_target() elif hasattr(value, '__call__'): new_target = threading.Event() self.targets[new_target] = value if not ignore_state: self.check_target() return new_target else: raise TypeError("target type not supported") def check_target(self): ''' Check the target state and set the target event in the case the state is reached. Called from mutators, `add()` and `remove()` ''' with self.lock: for evt in self.targets: if self.targets[evt](self): evt.set() def add(self, key, raw=None, cascade=False): ''' Add an item to the set and all connected instances, check the target state. Args: - key: any hashable object - raw (optional): raw representation of the object Raw representation is not required. It can be used, e.g., to store RTM_NEWADDR RTNL messages along with human-readable ip addr representation. ''' with self.lock: if cascade and (key in self.exclusive): return if key not in self: self.raw[key] = raw super(LinkedSet, self).add(key) for link in self.links: link.add(key, raw, cascade=True) self.check_target() def remove(self, key, raw=None, cascade=False): ''' Remove an item from the set and all connected instances, check the target state. ''' with self.lock: if cascade and (key in self.exclusive): return super(LinkedSet, self).remove(key) self.raw.pop(key, None) for link in self.links: if key in link: link.remove(key, cascade=True) self.check_target() def unlink(self, key): ''' Exclude key from cascade updates. ''' self.exclusive.add(key) def relink(self, key): ''' Do not ignore key on cascade updates. ''' self.exclusive.remove(key) def connect(self, link): ''' Connect a LinkedSet instance to this one. Connected sets will be updated together with this instance. ''' if not isinstance(link, LinkedSet): raise TypeError() self.links.append(link) def disconnect(self, link): self.links.remove(link) def __repr__(self): return repr(tuple(self)) class IPaddrSet(LinkedSet): ''' LinkedSet child class with different target filter. The filter ignores link local IPv6 addresses when sets and checks the target. The `wait_ip()` routine by default does not ignore link local IPv6 addresses, but it may be changed with the `ignore_link_local` argument. ''' @property def ipv4(self): ret = IPaddrSet() for x in self: if self[x]['family'] == AF_INET: ret.add(x, self[x]) return ret @property def ipv6(self): ret = IPaddrSet() for x in self: if self[x]['family'] == AF_INET6: ret.add(x, self[x]) return ret def wait_ip(self, net, mask=None, timeout=None, ignore_link_local=False): family = AF_INET6 if net.find(':') >= 0 else AF_INET alen = 32 if family == AF_INET else 128 net = inet_pton(family, net) if mask is None: mask = alen if family == AF_INET: net = struct.unpack('>I', net)[0] else: na, nb = struct.unpack('>QQ', net) net = (na << 64) | nb match = net & (((1 << mask) - 1) << (alen - mask)) def match_ip(ipset): for rnet, rmask in ipset: rfamily = AF_INET6 if rnet.find(':') >= 0 else AF_INET if family != rfamily: continue if ( family == AF_INET6 and ignore_link_local and rnet[:4] == 'fe80' and rmask == 64 ): continue rnet = inet_pton(family, rnet) if family == AF_INET: rnet = struct.unpack('>I', rnet)[0] else: rna, rnb = struct.unpack('>QQ', rnet) rnet = (rna << 64) | rnb if (rnet & (((1 << mask) - 1) << (alen - mask))) == match: return True return False target = self.set_target(match_ip) target.wait(timeout) ret = target.is_set() self.clear_target(target) return ret def __getitem__(self, key): if isinstance(key, (tuple, list)): return self.raw[key] elif isinstance(key, int): return self.raw[tuple(self.raw.keys())[key]] elif isinstance(key, basestring): key = key.split('/') key = (key[0], int(key[1])) return self.raw[key] else: TypeError('wrong key type') class SortedIPaddrSet(IPaddrSet): def __init__(self, *argv, **kwarg): super(SortedIPaddrSet, self).__init__(*argv, **kwarg) if argv and isinstance(argv[0], SortedIPaddrSet): # Re-initialize self.raw from argv[0].raw to preserve order: self.raw = OrderedDict(argv[0].raw) def __and__(self, other): nset = SortedIPaddrSet(self) return nset.__iand__(other) def __iand__(self, other): for key in self.raw: if key not in other: self.remove(key) return self def __rand__(self, other): return self.__and__(other) def __xor__(self, other): nset = SortedIPaddrSet(self) return nset.__ixor__(other) def __ixor__(self, other): if not isinstance(other, SortedIPaddrSet): return RuntimeError('SortedIPaddrSet instance required') xor_keys = set(self.raw.keys()) ^ set(other.raw.keys()) for key in xor_keys: if key in self: self.remove(key) else: self.add(key, raw=other.raw[key], cascade=False) return self def __rxor__(self, other): return self.__xor__(other) def __or__(self, other): nset = SortedIPaddrSet(self) return nset.__ior__(other) def __ior__(self, other): if not isinstance(other, SortedIPaddrSet): return RuntimeError('SortedIPaddrSet instance required') for key, value in other.raw.items(): if key not in self: self.add(key, raw=value, cascade=False) return self def __ror__(self, other): return self.__or__(other) def __sub__(self, other): nset = SortedIPaddrSet(self) return nset.__isub__(other) def __isub__(self, other): for key in other: if key in self: self.remove(key) return self def __iter__(self): return iter(self.raw) pyroute2-0.7.11/pyroute2/ipdb/main.py000066400000000000000000001376261455030217500174170ustar00rootroot00000000000000# -*- coding: utf-8 -*- ''' IPDB guide ========== .. warning:: The IPDB module has design issues that may not be fixed. It is recommended to switch to NDB wherever it's possible. Basically, IPDB is a transactional database, containing records, that represent network stack objects. Any change in the database is not reflected immediately in OS, but waits until `commit()` is called. One failed operation during `commit()` rolls back all the changes, has been made so far. Moreover, IPDB has commit hooks API, that allows you to roll back changes depending on your own function calls, e.g. when a host or a network becomes unreachable. Limitations ----------- One of the major issues with IPDB is its memory footprint. It proved not to be suitable for environments with thousands of routes or neighbours. Being a design issue, it could not be fixed, so a new module was started, NDB, that aims to replace IPDB. IPDB is still more feature rich, but NDB is already more fast and stable. IPDB, NDB, IPRoute ------------------ These modules use different approaches. * IPRoute just forwards requests to the kernel, and doesn't wait for the system state. So it's up to developer to check, whether the requested object is really set up or not. * IPDB is an asynchronously updated database, that starts several additional threads by default. If your project's policy doesn't allow implicit threads, keep it in mind. But unlike IPRoute, the IPDB ensures the changes to be reflected in the system. * NDB is like IPDB, and will obsolete it in the future. The difference is that IPDB creates Python object for every RTNL object, while NDB stores everything in an SQL DB, and creates objects on demand. Being asynchronously updated, IPDB does sync on commit:: with IPDB() as ipdb: with ipdb.interfaces['eth0'] as i: i.up() i.add_ip('192.168.0.2/24') i.add_ip('192.168.0.3/24') # ---> <--- here you can expect `eth0` is up # and has these two addresses, so # the following code can rely on that NB: *In the example above `commit()` is implied with the `__exit__()` of the `with` statement.* IPDB and other software ----------------------- IPDB is designed to be a non-exclusive network settings database. There may be several IPDB instances on the same OS, as well as other network management software, such as NetworkManager etc. The IPDB transactions should not interfere with other software settings, unless they touch the same objects. E.g., if IPDB brings an interface up, while NM shuts it down, there will be a race condition. An example:: # IPDB code # NetworkManager at the same time: ipdb.interfaces['eth0'].up() # ipdb.interfaces['eth0'].commit() # $ sudo nmcli con down eth0 # ---> <--- # The eth0 state here is undefined. Some of the commands # above will fail But as long as the software doesn't touch the same objects, there will be no conflicts. Another example:: # IPDB code # At the same time, NetworkManager with ipdb.interfaces['eth0'] as i: # adds addresses: i.add_ip('172.16.254.2/24') # * 10.0.0.2/24 i.add_ip('172.16.254.3/24') # * 10.0.0.3/24 # ---> <--- # At this point the eth0 interface will have all four addresses. # If the IPDB transaction fails by some reason, only IPDB addresses # will be rolled back. There may be a need to prevent other software from changing the network settings. There is no locking at the kernel level, but IPDB can revert all the changes as soon as they appear on the interface:: # IPDB code ipdb.interfaces['eth0'].freeze() # Here some other software tries to # add an address, or to remove the old # one # ---> <--- # At this point the eth0 interface will have all the same settings as # at the `freeze()` call moment. Newly added addresses will be removed, # all the deleted addresses will be restored. # # Please notice, that an address removal may cause also a routes removal, # and that is the thing that IPDB can not neither prevent, nor revert. ipdb.interfaces['eth0'].unfreeze() Quickstart ---------- Simple tutorial:: from pyroute2 import IPDB # several IPDB instances are supported within on process ipdb = IPDB() # commit is called automatically upon the exit from `with` # statement with ipdb.interfaces.eth0 as i: i.address = '00:11:22:33:44:55' i.ifname = 'bala' i.txqlen = 2000 # basic routing support ipdb.routes.add({'dst': 'default', 'gateway': '10.0.0.1'}).commit() # do not forget to shutdown IPDB ipdb.release() Please, notice `ip.release()` call in the end. Though it is not forced in an interactive python session for the better user experience, it is required in the scripts to sync the IPDB state before exit. IPDB supports functional-like syntax also:: from pyroute2 import IPDB with IPDB() as ipdb: intf = (ipdb.interfaces['eth0'] .add_ip('10.0.0.2/24') .add_ip('10.0.0.3/24') .set_address('00:11:22:33:44:55') .set_mtu(1460) .set_name('external') .commit()) # ---> <--- here you have the interface reference with # all the changes applied: renamed, added ipaddr, # changed macaddr and mtu. ... # some code # pls notice, that the interface reference will not work # outside of `with IPDB() ...` Transaction modes ----------------- IPDB has several operating modes: - 'implicit' (default) -- the first change starts an implicit transaction, that have to be committed - 'explicit' -- you have to begin() a transaction prior to make any change The default is to use implicit transaction. This behaviour can be changed in the future, so use 'mode' argument when creating IPDB instances. The sample session with explicit transactions:: In [1]: from pyroute2 import IPDB In [2]: ip = IPDB(mode='explicit') In [3]: ifdb = ip.interfaces In [4]: ifdb.tap0.begin() Out[3]: UUID('7a637a44-8935-4395-b5e7-0ce40d31d937') In [5]: ifdb.tap0.up() In [6]: ifdb.tap0.address = '00:11:22:33:44:55' In [7]: ifdb.tap0.add_ip('10.0.0.1', 24) In [8]: ifdb.tap0.add_ip('10.0.0.2', 24) In [9]: ifdb.tap0.review() Out[8]: {'+ipaddr': set([('10.0.0.2', 24), ('10.0.0.1', 24)]), '-ipaddr': set([]), 'address': '00:11:22:33:44:55', 'flags': 4099} In [10]: ifdb.tap0.commit() Note, that you can `review()` the `current_tx` transaction, and `commit()` or `drop()` it. Also, multiple transactions are supported, use uuid returned by `begin()` to identify them. Actually, the form like 'ip.tap0.address' is an eye-candy. The IPDB objects are dictionaries, so you can write the code above as that:: ipdb.interfaces['tap0'].down() ipdb.interfaces['tap0']['address'] = '00:11:22:33:44:55' ... Context managers ---------------- Transactional objects (interfaces, routes) can act as context managers in the same way as IPDB does itself:: with ipdb.interfaces.tap0 as i: i.address = '00:11:22:33:44:55' i.ifname = 'vpn' i.add_ip('10.0.0.1', 24) i.add_ip('10.0.0.1', 24) On exit, the context manager will automatically `commit()` the transaction. Read-only interface views ------------------------- Using an interface as a context manager **will** start a transaction. Sometimes it is not what one needs. To avoid unnecessary transactions, and to avoid the risk to occasionally change interface attributes, one can use read-only views:: with ipdb.interfaces[1].ro as iface: print(iface.ifname) print(iface.address) The `.ro` view neither starts transactions, nor allows to change anything, raising the `RuntimeError` exception. The same read-only views are available for routes and rules. Create interfaces ----------------- IPDB can also create virtual interfaces:: with ipdb.create(kind='bridge', ifname='control') as i: i.add_port(ip.interfaces.eth1) i.add_port(ip.interfaces.eth2) i.add_ip('10.0.0.1/24') The `IPDB.create()` call has the same syntax as `IPRoute.link('add', ...)`, except you shouldn't specify the `'add'` command. Refer to `IPRoute` docs for details. Please notice, that the interface object stays in the database even if there was an error during the interface creation. It is done so to make it possible to fix the interface object and try to run `commit()` again. Or you can drop the interface object with the `.remove().commit()` call. IP address management --------------------- IP addresses on interfaces may be managed using `add_ip()` and `del_ip()`:: with ipdb.interfaces['eth0'] as eth: eth.add_ip('10.0.0.1/24') eth.add_ip('10.0.0.2/24') eth.add_ip('2001:4c8:1023:108::39/64') eth.del_ip('172.16.12.5/24') The address format may be either a string with `'address/mask'` notation, or a pair of `'address', mask`:: with ipdb.interfaces['eth0'] as eth: eth.add_ip('10.0.0.1', 24) eth.del_ip('172.16.12.5', 24) The `ipaddr` attribute contains all the IP addresses of the interface, which are accessible in different ways. Getting an iterator from `ipaddr` gives you a sequence of tuples `('address', mask)`: .. doctest:: :skipif: True >>> for addr in ipdb.interfaces['eth0'].ipaddr: ... print(ipaddr) ... ('10.0.0.2', 24) ('10.0.0.1', 24) Getting one IP from `ipaddr` returns a dict object with full spec: .. doctest:: :skipif: True >>> ipdb.interfaces['eth0'].ipaddr[0] {'family': 2, 'broadcast': None, 'flags': 128, 'address': '10.0.0.2', 'prefixlen': 24, 'local': '10.0.0.2'} >>> ipdb.intefaces['eth0'].ipaddr['10.0.0.2/24'] {'family': 2, 'broadcast': None, 'flags': 128, 'address': '10.0.0.2', 'prefixlen': 24, 'local': '10.0.0.2'} The API is a bit weird, but it's because of historical reasons. In the future it may be changed. Another feature of the `ipaddr` attribute is views: .. doctest:: :skipif: True >>> ipdb.interfaces['eth0'].ipaddr.ipv4: (('10.0.0.2', 24), ('10.0.0.1', 24)) >>> ipdb.interfaces['eth0'].ipaddr.ipv6: (('2001:4c8:1023:108::39', 64),) The views, as well as the `ipaddr` attribute itself are not supposed to be changed by user, but only by the internal API. Bridge interfaces ----------------- Modern kernels provide possibility to manage bridge interface properties such as STP, forward delay, ageing time etc. Names of these properties start with `br_`, like `br_ageing_time`, `br_forward_delay` e.g.:: [x for x in dir(ipdb.interfaces.virbr0) if x.startswith('br_')] Bridge ports ------------ IPDB supports specific bridge port parameters, such as proxyarp, unicast/multicast flood, cost etc.:: with ipdb.interfaces['br-port0'] as p: p.brport_cost = 200 p.brport_unicast_flood = 0 p.brport_proxyarp = 0 Ports management ---------------- IPDB provides a uniform API to manage bridge, bond and vrf ports:: with ipdb.interfaces['br-int'] as br: br.add_port('veth0') br.add_port(ipdb.interfaces.veth1) br.add_port(700) br.del_port('veth2') Both `add_port()` and `del_port()` accept three types of arguments: * `'veth0'` -- interface name as a string * `ipdb.interfaces.veth1` -- IPDB interface object * `700` -- interface index, an integer Routes management ----------------- IPDB has a simple yet useful routing management interface. Create a route ~~~~~~~~~~~~~~ To add a route, there is an easy to use syntax:: # spec as a dictionary spec = {'dst': '172.16.1.0/24', 'oif': 4, 'gateway': '192.168.122.60', 'metrics': {'mtu': 1400, 'advmss': 500}} # pass spec as is ipdb.routes.add(spec).commit() # pass spec as kwargs ipdb.routes.add(**spec).commit() # use keyword arguments explicitly ipdb.routes.add(dst='172.16.1.0/24', oif=4, ...).commit() Please notice, that the device can be specified with `oif` (output interface) or `iif` (input interface), the `device` keyword is not supported anymore. More examples:: # specify table and priority (ipdb.routes .add(dst='172.16.1.0/24', gateway='192.168.0.1', table=100, priority=10) .commit()) The `priority` field is what the `iproute2` utility calls `metric` -- see also below. Get a route ~~~~~~~~~~~ To access and change the routes, one can use notations as follows:: # default table (254) # # change the route gateway and mtu # with ipdb.routes['172.16.1.0/24'] as route: route.gateway = '192.168.122.60' route.metrics.mtu = 1500 # access the default route print(ipdb.routes['default']) # change the default gateway with ipdb.routes['default'] as route: route.gateway = '10.0.0.1' By default, the path `ipdb.routes` reflects only the main routing table (254). But Linux supports much more routing tables, so does IPDB:: In [1]: ipdb.routes.tables.keys() Out[1]: [0, 254, 255] In [2]: len(ipdb.routes.tables[255]) Out[2]: 11 # => 11 automatic routes in the table local It is important to understand, that routing tables keys in IPDB are not only the destination prefix. The key consists of 'prefix/mask' string and the route priority (if any):: In [1]: ipdb.routes.tables[254].idx.keys() Out[1]: [RouteKey(dst='default', table=254, family=2, ...), RouteKey(dst='172.17.0.0/16', table=254, ...), RouteKey(dst='172.16.254.0/24', table=254, ...), RouteKey(dst='192.168.122.0/24', table=254, ...), RouteKey(dst='fe80::/64', table=254, family=10, ...)] But a routing table in IPDB allows several variants of the route spec. The simplest case is to retrieve a route by prefix, if there is only one match:: # get route by prefix ipdb.routes['172.16.1.0/24'] # get route by a special name ipdb.routes['default'] If there are more than one route that matches the spec, only the first one will be retrieved. One should iterate all the records and filter by a key to retrieve all matches:: # only one route will be retrieved ipdb.routes['fe80::/64'] # get all routes by this prefix [ x for x in ipdb.routes if x['dst'] == 'fe80::/64' ] It is also possible to use dicts as specs:: # get IPv4 default route ipdb.routes[{'dst': 'default', 'family': AF_INET}] # get IPv6 default route ipdb.routes[{'dst': 'default', 'family': AF_INET6}] # get route by priority ipdb.routes.table[100][{'dst': '10.0.0.0/24', 'priority': 10}] While this notation returns one route, there is a method to get all the routes matching the spec:: # get all the routes from all the tables via some interface ipdb.routes.filter({'oif': idx}) # get all IPv6 routes from some table ipdb.routes.table[tnum].filter({'family': AF_INET6}) Route metrics ~~~~~~~~~~~~~ A special object is dedicated to route metrics, one can access it via `route.metrics` or `route['metrics']`:: # these two statements are equal: with ipdb.routes['172.16.1.0/24'] as route: route['metrics']['mtu'] = 1400 with ipdb.routes['172.16.1.0/24'] as route: route.metrics.mtu = 1400 Possible metrics are defined in `rtmsg.py:rtmsg.metrics`, e.g. `RTAX_HOPLIMIT` means `hoplimit` metric etc. Multipath routing ~~~~~~~~~~~~~~~~~ Multipath nexthops are managed via `route.add_nh()` and `route.del_nh()` methods. They are available to review via `route.multipath`, but one should not directly add/remove/modify nexthops in `route.multipath`, as the changes will not be committed correctly. To create a multipath route:: ipdb.routes.add({'dst': '172.16.232.0/24', 'multipath': [{'gateway': '172.16.231.2', 'hops': 2}, {'gateway': '172.16.231.3', 'hops': 1}, {'gateway': '172.16.231.4'}]}).commit() To change a multipath route:: with ipdb.routes['172.16.232.0/24'] as r: r.add_nh({'gateway': '172.16.231.5'}) r.del_nh({'gateway': '172.16.231.4'}) Another possible way is to create a normal route and turn it into multipath by `add_nh()`:: # create a non-MP route with one gateway: (ipdb .routes .add({'dst': '172.16.232.0/24', 'gateway': '172.16.231.2'}) .commit()) # turn it to become a MP route: (ipdb .routes['172.16.232.0/24'] .add_nh({'gateway': '172.16.231.3'}) .commit()) # here the route will contain two NH records, with # gateways 172.16.231.2 and 172.16.231.3 # remove one NH and turn the route to be a normal one (ipdb .routes['172.16.232.0/24'] .del_nh({'gateway': '172.16.231.2'}) .commit()) # thereafter the traffic to 172.16.232.0/24 will go only # via 172.16.231.3 Differences from the iproute2 syntax ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ By historical reasons, `iproute2` uses names that differs from what the kernel uses. E.g., `iproute2` uses `weight` for multipath route hops instead of `hops`, where `weight == (hops + 1)`. Thus, a route created with `hops == 2` will be listed by `iproute2` as `weight 3`. Another significant difference is `metrics`. The `pyroute2` library uses the kernel naming scheme, where `metrics` means mtu, rtt, window etc. The `iproute2` utility uses `metric` (not `metrics`) as a name for the `priority` field. In examples:: # ------------------------------------------------------- # iproute2 command: $ ip route add default \\ nexthop via 172.16.0.1 weight 2 \\ nexthop via 172.16.0.2 weight 9 # pyroute2 code: (ipdb .routes .add({'dst': 'default', 'multipath': [{'gateway': '172.16.0.1', 'hops': 1}, {'gateway': '172.16.0.2', 'hops': 8}]) .commit()) # ------------------------------------------------------- # iproute2 command: $ ip route add default via 172.16.0.2 metric 200 # pyroute2 code: (ipdb .routes .add({'dst': 'default', 'gateway': '172.16.0.2', 'priority': 200}) .commit()) # ------------------------------------------------------- # iproute2 command: $ ip route add default via 172.16.0.2 mtu 1460 # pyroute2 code: (ipdb .routes .add({'dst': 'default', 'gateway': '172.16.0.2', 'metrics': {'mtu': 1460}}) .commit()) Multipath default routes ~~~~~~~~~~~~~~~~~~~~~~~~ .. warning:: As of the merge of kill_rtcache into the kernel, and it's release in ~3.6, weighted default routes no longer work in Linux. Please refer to https://github.com/svinota/pyroute2/issues/171#issuecomment-149297244 for details. Rules management ---------------- IPDB provides a basic IP rules management system. Create a rule ~~~~~~~~~~~~~ Syntax is almost the same as for routes:: # rule spec spec = {'src': '172.16.1.0/24', 'table': 200, 'priority': 15000} ipdb.rules.add(spec).commit() Get a rule ~~~~~~~~~~ The way IPDB handles IP rules is almost the same as routes, but rule keys are more complicated -- the Linux kernel doesn't use keys for rules, but instead iterates all the records until the first one w/o any attribute mismatch. The fields that the kernel uses to compare rules, IPDB uses as the key fields (see `pyroute2/ipdb/rule.py:RuleKey`) There are also more ways to find a record, as with routes:: # 1. iterate all the records for record in ipdb.rules: match(record) # 2. an integer as the key matches the first # rule with that priority ipdb.rules[32565] # 3. a dict as the key returns the first match # for all the specified attrs ipdb.rules[{'dst': '10.0.0.0/24', 'table': 200}] Priorities ~~~~~~~~~~ Thus, the rule priority is **not** a key, neither in the kernel, nor in IPDB. One should **not** rely on priorities as on keys, there may be several rules with the same priority, and it often happens, e.g. on Android systems. Persistence ~~~~~~~~~~~ There is no *change* operation for the rule records in the kernel, so only *add/del* work. When IPDB changes a record, it effectively deletes the old one and creates the new with new parameters, but the object, referring the record, stays the same. Also that means, that IPDB can not recognize the situation, when someone else does the same. So if there is another program changing records by *del/add* operations, even another IPDB instance, referring objects in the IPDB will be recreated. Performance issues ------------------ In the case of bursts of Netlink broadcast messages, all the activity of the pyroute2-based code in the async mode becomes suppressed to leave more CPU resources to the packet reader thread. So please be ready to cope with delays in the case of Netlink broadcast storms. It means also, that IPDB state will be synchronized with OS also after some delay. The class API ------------- ''' import atexit import logging import sys import threading import traceback import warnings import weakref try: import queue except ImportError: import Queue as queue # The module is called 'Queue' in Python2 # prepare to deprecate the module # import warnings from functools import partial from pprint import pprint from pyroute2 import config from pyroute2.common import basestring, uuid32 from pyroute2.ipdb import interfaces, routes, rules from pyroute2.ipdb.exceptions import ShutdownException from pyroute2.ipdb.linkedset import IPaddrSet, SortedIPaddrSet from pyroute2.ipdb.routes import BaseRoute from pyroute2.ipdb.transactional import SYNC_TIMEOUT from pyroute2.ipdb.utils import test_reachable_icmp from pyroute2.iproute import IPRoute from pyroute2.netlink.rtnl import RTM_GETLINK, RTMGRP_DEFAULTS from pyroute2.netlink.rtnl.ifinfmsg import ifinfmsg log = logging.getLogger(__name__) class Watchdog(object): def __init__(self, ipdb, action, kwarg): self.event = threading.Event() self.is_set = False self.ipdb = ipdb def cb(ipdb, msg, _action): if _action != action: return for key in kwarg: if (msg.get(key, None) != kwarg[key]) and ( msg.get_attr(msg.name2nla(key)) != kwarg[key] ): return self.is_set = True self.event.set() self.cb = cb # register callback prior to other things self.uuid = self.ipdb.register_callback(self.cb) def wait(self, timeout=SYNC_TIMEOUT): ret = self.event.wait(timeout=timeout) self.cancel() return ret def cancel(self): self.ipdb.unregister_callback(self.uuid) class _evq_context(object): ''' Context manager class for the event queue used by the event loop ''' def __init__(self, ipdb, qsize, block, timeout): self._ipdb = ipdb self._qsize = qsize self._block = block self._timeout = timeout def __enter__(self): # Context manager protocol self._ipdb._evq_lock.acquire() self._ipdb._evq = queue.Queue(maxsize=self._qsize) self._ipdb._evq_drop = 0 return self def __exit__(self, exc_type, exc_value, traceback): # Context manager protocol self._ipdb._evq = None self._ipdb._evq_drop = 0 self._ipdb._evq_lock.release() def __iter__(self): # Iterator protocol if not self._ipdb._evq: raise RuntimeError( 'eventqueue must be used ' 'as a context manager' ) return self def next(self): # Iterator protocol -- Python 2.x compatibility return self.__next__() def __next__(self): # Iterator protocol -- Python 3.x msg = self._ipdb._evq.get(self._block, self._timeout) self._ipdb._evq.task_done() if isinstance(msg, Exception): raise msg return msg class IPDB(object): ''' The class that maintains information about network setup of the host. Monitoring netlink events allows it to react immediately. It uses no polling. ''' def __init__( self, nl=None, mode='implicit', restart_on_error=None, nl_async=None, sndbuf=1048576, rcvbuf=1048576, nl_bind_groups=RTMGRP_DEFAULTS, ignore_rtables=None, callbacks=None, sort_addresses=False, plugins=None, deprecation_warning=True, ): msg = 'https://docs.pyroute2.org/ipdb_toc.html' log.warning('Deprecation warning ' + msg) if deprecation_warning: log.warning( 'To remove this DeprecationWarning exception, ' 'start IPDB(deprecation_warning=False, ...)' ) warnings.warn( 'IPDB module is deprecated and will be removed in 0.7.1', DeprecationWarning, ) plugins = plugins or ['interfaces', 'routes', 'rules'] pmap = {'interfaces': interfaces, 'routes': routes, 'rules': rules} self.mode = mode self.txdrop = False self._stdout = sys.stdout self._ipaddr_set = SortedIPaddrSet if sort_addresses else IPaddrSet self._event_map = {} self._deferred = {} self._ensure = [] self._loaded = set() self._mthread = None self._nl_own = nl is None self._nl_async = config.ipdb_nl_async if nl_async is None else True self.mnl = None self.nl = nl self._sndbuf = sndbuf self._rcvbuf = rcvbuf self.nl_bind_groups = nl_bind_groups self._plugins = [pmap[x] for x in plugins if x in pmap] if isinstance(ignore_rtables, int): self._ignore_rtables = [ignore_rtables] elif isinstance(ignore_rtables, (list, tuple, set)): self._ignore_rtables = ignore_rtables else: self._ignore_rtables = [] self._stop = False # see also 'register_callback' self._post_callbacks = {} self._pre_callbacks = {} # local event queues # - callbacks event queue self._cbq = queue.Queue(maxsize=8192) self._cbq_drop = 0 # - users event queue self._evq = None self._evq_lock = threading.Lock() self._evq_drop = 0 # locks and events self.exclusive = threading.RLock() self._shutdown_lock = threading.Lock() # register callbacks # # examples:: # def cb1(ipdb, msg, event): # print(event, msg) # def cb2(...): # ... # # # default mode: post # IPDB(callbacks=[cb1, cb2]) # # specify the mode explicitly # IPDB(callbacks=[(cb1, 'pre'), (cb2, 'post')]) # for cba in callbacks or []: if not isinstance(cba, (tuple, list, set)): cba = (cba,) self.register_callback(*cba) # load information self.restart_on_error = ( restart_on_error if restart_on_error is not None else nl is None ) # init the database self.initdb() # init the dir() cache self.__dir_cache__ = [ i for i in self.__class__.__dict__.keys() if i[0] != '_' ] self.__dir_cache__.extend(list(self._deferred.keys())) def cleanup(ref): ipdb_obj = ref() if (ipdb_obj is not None) and (not ipdb_obj._stop): ipdb_obj.release() atexit.register(cleanup, weakref.ref(self)) def __dir__(self): return self.__dir_cache__ def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.release() def _flush_db(self): def flush(idx): for key in tuple(idx.keys()): try: del idx[key] except KeyError: pass idx_list = [] if 'interfaces' in self._loaded: for key, dev in self.by_name.items(): try: # FIXME self.interfaces._detach(key, dev['index'], dev.nlmsg) except KeyError: pass idx_list.append(self.ipaddr) idx_list.append(self.neighbours) if 'routes' in self._loaded: idx_list.extend( [self.routes.tables[x] for x in self.routes.tables.keys()] ) if 'rules' in self._loaded: idx_list.append(self.rules) for idx in idx_list: flush(idx) def initdb(self): # flush all the DB objects with self.exclusive: # explicitly cleanup object references for event in tuple(self._event_map): del self._event_map[event] self._flush_db() # if the command socket is not provided, create it if self._nl_own: if self.nl is not None: self.nl.close() self.nl = IPRoute( sndbuf=self._sndbuf, rcvbuf=self._rcvbuf, async_qsize=0 ) # OBS: legacy design # setup monitoring socket if self.mnl is not None: self._flush_mnl() self.mnl.close() self.mnl = self.nl.clone() try: self.mnl.bind( groups=self.nl_bind_groups, async_cache=self._nl_async ) except: self.mnl.close() if self._nl_own is None: self.nl.close() raise # explicitly cleanup references for key in tuple(self._deferred): del self._deferred[key] for module in self._plugins: if (module.groups & self.nl_bind_groups) != module.groups: continue for plugin in module.spec: self._deferred[plugin['name']] = module.spec if plugin['name'] in self._loaded: delattr(self, plugin['name']) self._loaded.remove(plugin['name']) # start service threads for tspec in ( ('_mthread', '_serve_main', 'IPDB main event loop'), ('_cthread', '_serve_cb', 'IPDB cb event loop'), ): tg = getattr(self, tspec[0], None) if not getattr(tg, 'is_alive', lambda: False)(): tx = threading.Thread( name=tspec[2], target=getattr(self, tspec[1]) ) setattr(self, tspec[0], tx) tx.daemon = True tx.start() def __getattribute__(self, name): deferred = super(IPDB, self).__getattribute__('_deferred') if name in deferred: register = [] spec = deferred[name] for plugin in spec: obj = plugin['class'](self, **plugin['kwarg']) setattr(self, plugin['name'], obj) register.append(obj) self._loaded.add(plugin['name']) del deferred[plugin['name']] for obj in register: if hasattr(obj, '_register'): obj._register() if hasattr(obj, '_event_map'): for event in obj._event_map: if event not in self._event_map: self._event_map[event] = [] self._event_map[event].append(obj._event_map[event]) return super(IPDB, self).__getattribute__(name) def register_callback(self, callback, mode='post'): ''' IPDB callbacks are routines executed on a RT netlink message arrival. There are two types of callbacks: "post" and "pre" callbacks. ... "Post" callbacks are executed after the message is processed by IPDB and all corresponding objects are created or deleted. Using ipdb reference in "post" callbacks you will access the most up-to-date state of the IP database. "Post" callbacks are executed asynchronously in separate threads. These threads can work as long as you want them to. Callback threads are joined occasionally, so for a short time there can exist stopped threads. ... "Pre" callbacks are synchronous routines, executed before the message gets processed by IPDB. It gives you the way to patch arriving messages, but also places a restriction: until the callback exits, the main event IPDB loop is blocked. Normally, only "post" callbacks are required. But in some specific cases "pre" also can be useful. ... The routine, `register_callback()`, takes two arguments: - callback function - mode (optional, default="post") The callback should be a routine, that accepts three arguments:: cb(ipdb, msg, action) Arguments are: - **ipdb** is a reference to IPDB instance, that invokes the callback. - **msg** is a message arrived - **action** is just a msg['event'] field E.g., to work on a new interface, you should catch action == 'RTM_NEWLINK' and with the interface index (arrived in msg['index']) get it from IPDB:: index = msg['index'] interface = ipdb.interfaces[index] ''' lock = threading.Lock() def safe(*argv, **kwarg): with lock: callback(*argv, **kwarg) safe.hook = callback safe.lock = lock safe.uuid = uuid32() if mode == 'post': self._post_callbacks[safe.uuid] = safe elif mode == 'pre': self._pre_callbacks[safe.uuid] = safe else: raise KeyError('Unknown callback mode') return safe.uuid def unregister_callback(self, cuid, mode='post'): if mode == 'post': cbchain = self._post_callbacks elif mode == 'pre': cbchain = self._pre_callbacks else: raise KeyError('Unknown callback mode') safe = cbchain[cuid] with safe.lock: ret = cbchain.pop(cuid) return ret def eventqueue(self, qsize=8192, block=True, timeout=None): ''' Initializes event queue and returns event queue context manager. Once the context manager is initialized, events start to be collected, so it is possible to read initial state from the system without losing last moment changes, and once that is done, start processing events. Example:: ipdb = IPDB() with ipdb.eventqueue() as evq: my_state = ipdb.... for msg in evq: update_state_by_msg(my_state, msg) ''' return _evq_context(self, qsize, block, timeout) def eventloop(self, qsize=8192, block=True, timeout=None): """ Event generator for simple cases when there is no need for initial state setup. Initialize event queue and yield events as they happen. """ with self.eventqueue(qsize=qsize, block=block, timeout=timeout) as evq: for msg in evq: yield msg def release(self): ''' Shutdown IPDB instance and sync the state. Since IPDB is asyncronous, some operations continue in the background, e.g. callbacks. So, prior to exit the script, it is required to properly shutdown IPDB. The shutdown sequence is not forced in an interactive python session, since it is easier for users and there is enough time to sync the state. But for the scripts the `release()` call is required. ''' with self._shutdown_lock: if self._stop: log.warning("shutdown in progress") return self._stop = True self._cbq.put(ShutdownException("shutdown")) if self._mthread is not None: self._flush_mnl() self._mthread.join() if self.mnl is not None: self.mnl.close() self.mnl = None if self._nl_own: self.nl.close() self.nl = None self._flush_db() def _flush_mnl(self): if self.mnl is not None: # terminate the main loop for t in range(3): try: msg = ifinfmsg() msg['index'] = 1 msg.reset() self.mnl.put(msg, RTM_GETLINK) except Exception as e: log.error("shutdown error: %s", e) # Just give up. # We can not handle this case def create(self, kind, ifname, reuse=False, **kwarg): return self.interfaces.add(kind, ifname, reuse, **kwarg) def ensure(self, cmd='add', reachable=None, condition=None): if cmd == 'reset': self._ensure = [] elif cmd == 'run': for f in self._ensure: f() elif cmd == 'add': if isinstance(reachable, basestring): reachable = reachable.split(':') if len(reachable) == 1: f = partial(test_reachable_icmp, reachable[0]) else: raise NotImplementedError() self._ensure.append(f) else: if sys.stdin.isatty(): pprint(self._ensure, stream=self._stdout) elif cmd == 'print': pprint(self._ensure, stream=self._stdout) elif cmd == 'get': return self._ensure else: raise NotImplementedError() def items(self): # TODO: add support for filters? # iterate interfaces for ifname in getattr(self, 'by_name', {}): yield (('interfaces', ifname), self.interfaces[ifname]) # iterate routes for table in getattr(getattr(self, 'routes', None), 'tables', {}): for key, route in self.routes.tables[table].items(): yield (('routes', table, key), route) def dump(self): ret = {} for key, obj in self.items(): ptr = ret for step in key[:-1]: if step not in ptr: ptr[step] = {} ptr = ptr[step] ptr[key[-1]] = obj return ret def load(self, config, ptr=None): if ptr is None: ptr = self for key in config: obj = getattr(ptr, key, None) if obj is not None: if hasattr(obj, 'load'): obj.load(config[key]) else: self.load(config[key], ptr=obj) elif hasattr(ptr, 'add'): ptr.add(**config[key]) return self def review(self): ret = {} for key, obj in self.items(): ptr = ret try: rev = obj.review() except TypeError: continue for step in key[:-1]: if step not in ptr: ptr[step] = {} ptr = ptr[step] ptr[key[-1]] = rev if not ret: raise TypeError('no transaction started') return ret def drop(self): ok = False for key, obj in self.items(): try: obj.drop() except TypeError: continue ok = True if not ok: raise TypeError('no transaction started') def commit(self, transactions=None, phase=1): # what to commit: either from transactions argument, or from # started transactions on existing objects if transactions is None: # collect interface transactions txlist = [ (x, x.current_tx) for x in getattr(self, 'by_name', {}).values() if x.local_tx.values() ] # collect route transactions for table in getattr( getattr(self, 'routes', None), 'tables', {} ).keys(): txlist.extend( [ (x, x.current_tx) for x in self.routes.tables[table] if x.local_tx.values() ] ) transactions = txlist snapshots = [] removed = [] tx_ipdb_prio = [] tx_main = [] tx_prio1 = [] tx_prio2 = [] tx_prio3 = [] for target, tx in transactions: # 8<------------------------------ # first -- explicit priorities if tx['ipdb_priority']: tx_ipdb_prio.append((target, tx)) continue # 8<------------------------------ # routes if isinstance(target, BaseRoute): tx_prio3.append((target, tx)) continue # 8<------------------------------ # intefaces kind = target.get('kind', None) if kind in ( 'vlan', 'vxlan', 'gre', 'tuntap', 'vti', 'vti6', 'vrf', 'xfrm', ): tx_prio1.append((target, tx)) elif kind in ('bridge', 'bond'): tx_prio2.append((target, tx)) else: tx_main.append((target, tx)) # 8<------------------------------ # explicitly sorted transactions tx_ipdb_prio = sorted( tx_ipdb_prio, key=lambda x: x[1]['ipdb_priority'], reverse=True ) # FIXME: this should be documented # # The final transactions order: # 1. any txs with ipdb_priority (sorted by that field) # # Then come default priorities (no ipdb_priority specified): # 2. all the rest # 3. vlan, vxlan, gre, tuntap, vti, vrf # 4. bridge, bond # 5. routes transactions = tx_ipdb_prio + tx_main + tx_prio1 + tx_prio2 + tx_prio3 try: for target, tx in transactions: if target['ipdb_scope'] == 'detached': continue if tx['ipdb_scope'] == 'remove': tx['ipdb_scope'] = 'shadow' removed.append((target, tx)) if phase == 1: s = (target, target.pick(detached=True)) snapshots.append(s) # apply the changes, but NO rollback -- only phase 1 target.commit( transaction=tx, commit_phase=phase, commit_mask=phase ) # if the commit above fails, the next code # branch will run rollbacks except Exception: if phase == 1: # run rollbacks for ALL the collected transactions, # even successful ones self.fallen = transactions txs = filter( lambda x: not ( 'create' == x[0]['ipdb_scope'] == x[1]['ipdb_scope'] ), snapshots, ) self.commit(transactions=txs, phase=2) raise else: if phase == 1: for target, tx in removed: target['ipdb_scope'] = 'detached' target.detach() finally: if phase == 1: for target, tx in transactions: target.drop(tx.uid) return self def watchdog(self, wdops='RTM_NEWLINK', **kwarg): return Watchdog(self, wdops, kwarg) def _serve_cb(self): ### # Callbacks thread working on a dedicated event queue. ### while not self._stop: msg = self._cbq.get() self._cbq.task_done() if isinstance(msg, ShutdownException): return elif isinstance(msg, Exception): raise msg for cb in tuple(self._post_callbacks.values()): try: cb(self, msg, msg['event']) except: pass def _serve_main(self): ### # Main monitoring cycle. It gets messages from the # default iproute queue and updates objects in the # database. ### while not self._stop: try: messages = self.mnl.get() ## # Check it again # # NOTE: one should not run callbacks or # anything like that after setting the # _stop flag, since IPDB is not valid # anymore if self._stop: break except Exception as e: with self.exclusive: if self._evq: self._evq.put(e) return if self.restart_on_error: log.error( 'Restarting IPDB instance after ' 'error:\n%s', traceback.format_exc(), ) try: self.initdb() except: log.error( 'Error restarting DB:\n%s', traceback.format_exc() ) return continue else: log.error('Emergency shutdown, cleanup manually') raise RuntimeError('Emergency shutdown') for msg in messages: # Run pre-callbacks # NOTE: pre-callbacks are synchronous for cuid, cb in tuple(self._pre_callbacks.items()): try: cb(self, msg, msg['event']) except: pass with self.exclusive: event = msg.get('event', None) if event in self._event_map: for func in self._event_map[event]: func(msg) # Post-callbacks try: self._cbq.put_nowait(msg) if self._cbq_drop: log.warning('dropped %d events', self._cbq_drop) self._cbq_drop = 0 except queue.Full: self._cbq_drop += 1 except Exception: log.error('Emergency shutdown, cleanup manually') raise RuntimeError('Emergency shutdown') # # Why not to put these two pieces of the code # it in a routine? # # TODO: run performance tests with routines # Users event queue if self._evq: try: self._evq.put_nowait(msg) if self._evq_drop: log.warning( "dropped %d events", self._evq_drop ) self._evq_drop = 0 except queue.Full: self._evq_drop += 1 except Exception: log.error('Emergency shutdown, cleanup manually') raise RuntimeError('Emergency shutdown') pyroute2-0.7.11/pyroute2/ipdb/routes.py000066400000000000000000001320531455030217500200010ustar00rootroot00000000000000import logging import struct import threading import time import traceback import types from collections import namedtuple from socket import AF_INET, AF_INET6, AF_UNSPEC, inet_ntop, inet_pton from pyroute2.common import AF_MPLS, basestring from pyroute2.ipdb.exceptions import CommitException from pyroute2.ipdb.linkedset import LinkedSet from pyroute2.ipdb.transactional import ( SYNC_TIMEOUT, Transactional, with_transaction, ) from pyroute2.netlink import NLM_F_CREATE, NLM_F_MULTI, nlmsg, nlmsg_base, rtnl from pyroute2.netlink.rtnl import encap_type, rt_proto, rt_type from pyroute2.netlink.rtnl.ifaddrmsg import IFA_F_SECONDARY from pyroute2.netlink.rtnl.rtmsg import rtmsg from pyroute2.requests.main import RequestProcessor from pyroute2.requests.route import RouteFieldFilter log = logging.getLogger(__name__) groups = ( rtnl.RTMGRP_IPV4_ROUTE | rtnl.RTMGRP_IPV6_ROUTE | rtnl.RTMGRP_MPLS_ROUTE ) IP6_RT_PRIO_USER = 1024 class Metrics(Transactional): _fields = [rtmsg.metrics.nla2name(i[0]) for i in rtmsg.metrics.nla_map] class Encap(Transactional): _fields = ['type', 'labels'] class Via(Transactional): _fields = ['family', 'addr'] class NextHopSet(LinkedSet): def __init__(self, prime=None): super(NextHopSet, self).__init__() prime = prime or [] for v in prime: self.add(v) def __sub__(self, vs): ret = type(self)() sub = set(self.raw.keys()) - set(vs.raw.keys()) for v in sub: ret.add(self[v], raw=self.raw[v]) return ret def __make_nh(self, prime): if isinstance(prime, BaseRoute): return prime.make_nh_key(prime) elif isinstance(prime, dict): if prime.get('family', None) == AF_MPLS: return MPLSRoute.make_nh_key(prime) else: return Route.make_nh_key(prime) elif isinstance(prime, tuple): return prime else: raise TypeError("unknown prime type %s" % type(prime)) def __getitem__(self, key): return self.raw[key] def __iter__(self): def NHIterator(): for x in tuple(self.raw.values()): yield x return NHIterator() def add(self, prime, raw=None, cascade=False): key = self.__make_nh(prime) req = key._required fields = key._fields skey = key[:req] + (None,) * (len(fields) - req) if skey in self.raw: del self.raw[skey] return super(NextHopSet, self).add(key, raw=prime) def remove(self, prime, raw=None, cascade=False): key = self.__make_nh(prime) try: super(NextHopSet, self).remove(key) except KeyError as e: req = key._required fields = key._fields skey = key[:req] + (None,) * (len(fields) - req) for rkey in tuple(self.raw.keys()): if skey == rkey[:req] + (None,) * (len(fields) - req): break else: raise e super(NextHopSet, self).remove(rkey) class WatchdogMPLSKey(dict): def __init__(self, route): dict.__init__(self) self['oif'] = route['oif'] self['dst'] = [{'ttl': 0, 'bos': 1, 'tc': 0, 'label': route['dst']}] class WatchdogKey(dict): ''' Construct from a route a dictionary that could be used as a match for IPDB watchdogs. ''' def __init__(self, route): dict.__init__( self, [ x for x in RequestProcessor( RouteFieldFilter(), context=route, prime=route ).items() if x[0] in ( 'dst', 'dst_len', 'src', 'src_len', 'tos', 'priority', 'gateway', 'table', ) and x[1] ], ) # Universal route key # Holds the fields that the kernel uses to uniquely identify routes. # IPv4 allows redundant routes with different 'tos' but IPv6 does not, # so 'tos' is used for IPv4 but not IPv6. # For reference, see fib_table_insert() in # https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/net/ipv4/fib_trie.c#n1147 # and fib6_add_rt2node() in # https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/net/ipv6/ip6_fib.c#n765 RouteKey = namedtuple( 'RouteKey', ('dst', 'table', 'family', 'priority', 'tos') ) # IP multipath NH key IPNHKey = namedtuple('IPNHKey', ('gateway', 'encap', 'oif')) IPNHKey._required = 2 # MPLS multipath NH key MPLSNHKey = namedtuple('MPLSNHKey', ('newdst', 'via', 'oif')) MPLSNHKey._required = 2 def _normalize_ipaddr(x, y): if isinstance(y, basestring) and y.find(':') > -1: y = inet_ntop(AF_INET6, inet_pton(AF_INET6, y)) return x == y def _normalize_ipnet(x, y): # # x -- incoming value # y -- transaction value # if isinstance(y, basestring) and y.find(':') > -1: s = y.split('/') ip = inet_ntop(AF_INET6, inet_pton(AF_INET6, s[0])) if len(s) > 1: y = '%s/%s' % (ip, s[1]) else: y = ip return x == y class BaseRoute(Transactional): ''' Persistent transactional route object ''' _fields = [rtmsg.nla2name(i[0]) for i in rtmsg.nla_map] for key, _ in rtmsg.fields: _fields.append(key) _fields.append('removal') _virtual_fields = ['ipdb_scope', 'ipdb_priority'] _fields.extend(_virtual_fields) _linked_sets = ['multipath'] _nested = [] _gctime = None cleanup = ('attrs', 'header', 'event', 'cacheinfo') _fields_cmp = { 'src': _normalize_ipnet, 'dst': _normalize_ipnet, 'gateway': _normalize_ipaddr, 'prefsrc': _normalize_ipaddr, } def __init__(self, ipdb, mode=None, parent=None, uid=None): Transactional.__init__(self, ipdb, mode, parent, uid) with self._direct_state: self['ipdb_priority'] = 0 @with_transaction def add_nh(self, prime): with self._write_lock: # if the multipath chain is empty, copy the current # nexthop as the first in the multipath if not self['multipath']: first = {} for key in ('oif', 'gateway', 'newdst'): if self[key]: first[key] = self[key] if first: if self['family']: first['family'] = self['family'] for key in ('encap', 'via', 'metrics'): if self[key] and any(self[key].values()): first[key] = self[key] self[key] = None self['multipath'].add(first) # cleanup key fields for key in ('oif', 'gateway', 'newdst'): self[key] = None # add the prime as NH if self['family'] == AF_MPLS: prime['family'] = AF_MPLS self['multipath'].add(prime) @with_transaction def del_nh(self, prime): with self._write_lock: if not self['multipath']: raise KeyError( 'attempt to delete nexthop from ' 'non-multipath route' ) nh = dict(prime) if self['family'] == AF_MPLS: nh['family'] = AF_MPLS self['multipath'].remove(nh) def load_netlink(self, msg): with self._direct_state: if self['ipdb_scope'] == 'locked': # do not touch locked interfaces return self['ipdb_scope'] = 'system' # IPv6 multipath via several devices (not networks) is a very # special case, since we get only the first hop notification. Ask # the kernel guys why. I've got no idea. # # So load all the rest flags = msg.get('header', {}).get('flags', 0) family = msg.get('family', 0) clean_mp = True table = msg.get_attr('RTA_TABLE') or msg.get('table') dst = msg.get_attr('RTA_DST') # # It MAY be a multipath hop # if family == AF_INET6 and not msg.get_attr('RTA_MULTIPATH'): # # It is a notification about the route created # if flags == NLM_F_CREATE: # # This routine can significantly slow down the IPDB # instance, but I see no way around. Some are born # to endless night. # clean_mp = False msgs = self.nl.route( 'show', table=table, dst=dst, family=family ) for nhmsg in msgs: nh = type(self)(ipdb=self.ipdb, parent=self) nh.load_netlink(nhmsg) with nh._direct_state: del nh['dst'] del nh['ipdb_scope'] del nh['ipdb_priority'] del nh['multipath'] del nh['metrics'] self.add_nh(nh) # # it IS a multipath hop loaded during IPDB init # elif flags == NLM_F_MULTI and self.get('dst'): nh = type(self)(ipdb=self.ipdb, parent=self) nh.load_netlink(msg) with nh._direct_state: del nh['dst'] del nh['ipdb_scope'] del nh['ipdb_priority'] del nh['multipath'] del nh['metrics'] self.add_nh(nh) return for key, value in msg.items(): self[key] = value # cleanup multipath NH if clean_mp: for nh in self['multipath']: self.del_nh(nh) for cell in msg['attrs']: # # Parse on demand # norm = rtmsg.nla2name(cell[0]) if norm in self.cleanup: continue value = cell[1] # normalize RTAX if norm == 'metrics': with self['metrics']._direct_state: for metric in tuple(self['metrics'].keys()): del self['metrics'][metric] for rtax, rtax_value in value['attrs']: rtax_norm = rtmsg.metrics.nla2name(rtax) self['metrics'][rtax_norm] = rtax_value elif norm == 'multipath': for record in value: nh = type(self)(ipdb=self.ipdb, parent=self) nh.load_netlink(record) with nh._direct_state: del nh['dst'] del nh['ipdb_scope'] del nh['ipdb_priority'] del nh['multipath'] del nh['metrics'] self['multipath'].add(nh) elif norm == 'encap': with self['encap']._direct_state: # WIP: should support encap_types other than MPLS if value.get_attr('MPLS_IPTUNNEL_DST'): ret = [] for dst in value.get_attr('MPLS_IPTUNNEL_DST'): ret.append(str(dst['label'])) if ret: self['encap']['labels'] = '/'.join(ret) elif norm == 'via': with self['via']._direct_state: self['via'] = value elif norm == 'newdst': self['newdst'] = [x['label'] for x in value] else: self[norm] = value if msg.get('family', 0) == AF_MPLS: dst = msg.get_attr('RTA_DST') if dst: dst = dst[0]['label'] else: if msg.get_attr('RTA_DST'): dst = '%s/%s' % (msg.get_attr('RTA_DST'), msg['dst_len']) else: dst = 'default' self['dst'] = dst # fix RTA_ENCAP_TYPE if needed if msg.get_attr('RTA_ENCAP'): if self['encap_type'] is not None: with self['encap']._direct_state: self['encap']['type'] = self['encap_type'] self['encap_type'] = None # or drop encap, if there is no RTA_ENCAP in msg elif self['encap'] is not None: self['encap_type'] = None with self['encap']._direct_state: self['encap'] = {} # drop metrics, if there is no RTA_METRICS in msg if not msg.get_attr('RTA_METRICS') and self['metrics'] is not None: with self['metrics']._direct_state: self['metrics'] = {} # same for via if not msg.get_attr('RTA_VIA') and self['via'] is not None: with self['via']._direct_state: self['via'] = {} # one hop -> multihop transition if not msg.get_attr('RTA_GATEWAY') and self['gateway'] is not None: self['gateway'] = None if ( 'oif' not in msg and not msg.get_attr('RTA_OIF') and self['oif'] is not None ): self['oif'] = None # finally, cleanup all not needed for item in self.cleanup: if item in self: del self[item] def commit( self, tid=None, transaction=None, commit_phase=1, commit_mask=0xFF ): if not commit_phase & commit_mask: return self error = None drop = self.ipdb.txdrop devop = 'set' cleanup = [] # FIXME -- make a debug object debug = {'traceback': None, 'next_stage': None} notx = True if tid or transaction: notx = False if tid: transaction = self.global_tx[tid] else: transaction = transaction or self.current_tx # ignore global rollbacks on invalid routes if self['ipdb_scope'] == 'create' and commit_phase > 1: return # create a new route if self['ipdb_scope'] != 'system': devop = 'add' # work on an existing route snapshot = self.pick() added, removed = transaction // snapshot added.pop('ipdb_scope', None) removed.pop('ipdb_scope', None) try: # route set if self['family'] != AF_MPLS: cleanup = [ any(snapshot['metrics'].values()) and not any(added.get('metrics', {}).values()), any(snapshot['encap'].values()) and not any(added.get('encap', {}).values()), ] if ( any(added.values()) or any(cleanup) or removed.get('multipath', None) or devop == 'add' ): # prepare multipath target sync wlist = [] if transaction['multipath']: mplen = len(transaction['multipath']) if mplen == 1: # set up local targets for nh in transaction['multipath']: for key in ('oif', 'gateway', 'newdst'): if nh.get(key, None): self.set_target(key, nh[key]) wlist.append(key) mpt = None else: def mpcheck(mpset): return len(mpset) == mplen mpt = self['multipath'].set_target(mpcheck, True) else: mpt = None # prepare the anchor key to catch *possible* route update old_key = self.make_key(self) new_key = self.make_key(transaction) if old_key != new_key: # assume we can not move routes between tables (yet ;) if self['family'] == AF_MPLS: route_index = self.ipdb.routes.tables['mpls'].idx else: route_index = self.ipdb.routes.tables[ self['table'] or 254 ].idx # re-link the route record if new_key in route_index: raise CommitException('route idx conflict') else: route_index[new_key] = {'key': new_key, 'route': self} # wipe the old key, if needed if old_key in route_index: del route_index[old_key] self.nl.route(devop, **transaction) # delete old record, if required if (old_key != new_key) and (devop == 'set'): req = dict(old_key._asdict()) # update the request with the scope. # # though the scope isn't a part of the # key, it is required for the correct # removal -- only if it is set req['scope'] = self.get('scope', 0) self.nl.route('del', **req) transaction.wait_all_targets() for key in ('metrics', 'via'): if transaction[key] and transaction[key]._targets: transaction[key].wait_all_targets() if mpt is not None: mpt.wait(SYNC_TIMEOUT) if not mpt.is_set(): raise CommitException('multipath target is not set') self['multipath'].clear_target(mpt) for key in wlist: self.wait_target(key) # route removal if (transaction['ipdb_scope'] in ('shadow', 'remove')) or ( (transaction['ipdb_scope'] == 'create') and commit_phase == 2 ): if transaction['ipdb_scope'] == 'shadow': with self._direct_state: self['ipdb_scope'] = 'locked' # create watchdog wd = self.ipdb.watchdog( 'RTM_DELROUTE', **self.wd_key(snapshot) ) for route in self.nl.route('delete', **snapshot): self.ipdb.routes.load_netlink(route) wd.wait() if transaction['ipdb_scope'] == 'shadow': with self._direct_state: self['ipdb_scope'] = 'shadow' # success, so it's safe to drop the transaction drop = True except Exception as e: error = e # prepare postmortem debug['traceback'] = traceback.format_exc() debug['error_stack'] = [] debug['next_stage'] = None if commit_phase == 1: try: self.commit( transaction=snapshot, commit_phase=2, commit_mask=commit_mask, ) except Exception as i_e: debug['next_stage'] = i_e error = RuntimeError() if drop and notx: self.drop(transaction.uid) if error is not None: error.debug = debug raise error self.ipdb.routes.gc() return self def remove(self): self['ipdb_scope'] = 'remove' return self def shadow(self): self['ipdb_scope'] = 'shadow' return self def detach(self): if self.get('family') == AF_MPLS: table = 'mpls' else: table = self.get('table', 254) del self.ipdb.routes.tables[table][self.make_key(self)] class Route(BaseRoute): _nested = ['encap', 'metrics'] wd_key = WatchdogKey @classmethod def make_encap(cls, encap): ''' Normalize encap object ''' labels = encap.get('labels', None) if isinstance(labels, (list, tuple, set)): labels = '/'.join( map( lambda x: str(x['label']) if isinstance(x, dict) else str(x), labels, ) ) if not isinstance(labels, basestring): raise TypeError('labels struct not supported') return {'type': encap.get('type', 'mpls'), 'labels': labels} @classmethod def make_nh_key(cls, msg): ''' Construct from a netlink message a multipath nexthop key ''' values = [] if isinstance(msg, nlmsg_base): for field in IPNHKey._fields: v = msg.get_attr(msg.name2nla(field)) if field == 'encap': # 1. encap type if msg.get_attr('RTA_ENCAP_TYPE') != 1: # FIXME values.append(None) continue # 2. encap_type == 'mpls' v = '/'.join( [ str(x['label']) for x in v.get_attr('MPLS_IPTUNNEL_DST') ] ) elif v is None: v = msg.get(field, None) values.append(v) elif isinstance(msg, dict): for field in IPNHKey._fields: v = msg.get(field, None) if field == 'encap' and v and v['labels']: v = v['labels'] elif (field == 'encap') and ( len(msg.get('multipath', []) or []) == 1 ): v = ( tuple(msg['multipath'].raw.values())[0] .get('encap', {}) .get('labels', None) ) elif field == 'encap': v = None elif ( (field == 'gateway') and (len(msg.get('multipath', []) or []) == 1) and not v ): v = tuple(msg['multipath'].raw.values())[0].get( 'gateway', None ) if field == 'encap' and isinstance(v, (list, tuple, set)): v = '/'.join( map( lambda x: str(x['label']) if isinstance(x, dict) else str(x), v, ) ) values.append(v) else: raise TypeError('prime not supported: %s' % type(msg)) return IPNHKey(*values) @classmethod def make_key(cls, msg): ''' Construct from a netlink message a key that can be used to locate the route in the table ''' values = [] if isinstance(msg, nlmsg_base): for field in RouteKey._fields: v = msg.get_attr(msg.name2nla(field)) if field == 'dst': if v is not None: v = '%s/%s' % (v, msg['dst_len']) else: v = 'default' elif field == 'tos' and msg.get('family') != AF_INET: # ignore tos field for non-IPv6 routes, # as it used as a key only there v = None elif v is None: v = msg.get(field, None) values.append(v) elif isinstance(msg, dict): for field in RouteKey._fields: v = msg.get(field, None) if ( field == 'dst' and isinstance(v, basestring) and v.find(':') > -1 ): v = v.split('/') ip = inet_ntop(AF_INET6, inet_pton(AF_INET6, v[0])) if len(v) > 1: v = '%s/%s' % (ip, v[1]) else: v = ip elif field == 'tos' and msg.get('family') != AF_INET: # ignore tos field for non-IPv6 routes, # as it used as a key only there v = None values.append(v) else: raise TypeError('prime not supported: %s' % type(msg)) return RouteKey(*values) def __setitem__(self, key, value): ret = value if (key in ('encap', 'metrics')) and isinstance(value, dict): # transactionals attach as is if type(value) in (Encap, Metrics): with self._direct_state: return Transactional.__setitem__(self, key, value) # check, if it exists already ret = Transactional.__getitem__(self, key) # it doesn't # (plain dict can be safely discarded) if (type(ret) == dict) or not ret: # bake transactionals in place if key == 'encap': ret = Encap(parent=self) elif key == 'metrics': ret = Metrics(parent=self) # attach transactional to the route with self._direct_state: Transactional.__setitem__(self, key, ret) # begin() works only if the transactional is attached if any(value.values()): if self._mode in ('implicit', 'explicit'): ret._begin(tid=self.current_tx.uid) [ ret.__setitem__(k, v) for k, v in value.items() if v is not None ] # corresponding transactional exists else: # set fields for k in ret: ret[k] = value.get(k, None) return elif key == 'multipath': cur = Transactional.__getitem__(self, key) if isinstance(cur, NextHopSet): # load entries vs = NextHopSet(value) for key in vs - cur: cur.add(key) for key in cur - vs: cur.remove(key) else: # drop any result of `update()` Transactional.__setitem__(self, key, NextHopSet(value)) return elif key == 'encap_type' and not isinstance(value, int): ret = encap_type.get(value, value) elif key == 'type' and not isinstance(value, int): ret = rt_type.get(value, value) elif key == 'proto' and not isinstance(value, int): ret = rt_proto.get(value, value) elif ( key == 'dst' and isinstance(value, basestring) and value in ('0.0.0.0/0', '::/0') ): ret = 'default' Transactional.__setitem__(self, key, ret) def __getitem__(self, key): ret = Transactional.__getitem__(self, key) if (key in ('encap', 'metrics', 'multipath')) and (ret is None): with self._direct_state: self[key] = [] if key == 'multipath' else {} ret = self[key] return ret class MPLSRoute(BaseRoute): wd_key = WatchdogMPLSKey _nested = ['via'] @classmethod def make_nh_key(cls, msg): ''' Construct from a netlink message a multipath nexthop key ''' return MPLSNHKey( newdst=tuple(msg['newdst']), via=msg.get('via', {}).get('addr', None), oif=msg.get('oif', None), ) @classmethod def make_key(cls, msg): ''' Construct from a netlink message a key that can be used to locate the route in the table ''' ret = None if isinstance(msg, nlmsg): ret = msg.get_attr('RTA_DST') elif isinstance(msg, dict): ret = msg.get('dst', None) else: raise TypeError('prime not supported') if isinstance(ret, list): ret = ret[0]['label'] return ret def __setitem__(self, key, value): if key == 'via' and isinstance(value, dict): # replace with a new transactional if type(value) == Via: with self._direct_state: return BaseRoute.__setitem__(self, key, value) # or load the dict ret = BaseRoute.__getitem__(self, key) if not isinstance(ret, Via): ret = Via(parent=self) # attach new transactional -- replace any # non-Via object (may be a result of update()) with self._direct_state: BaseRoute.__setitem__(self, key, ret) # load value into the new object if any(value.values()): if self._mode in ('implicit', 'explicit'): ret._begin(tid=self.current_tx.uid) [ ret.__setitem__(k, v) for k, v in value.items() if v is not None ] else: # load value into existing object for k in ret: ret[k] = value.get(k, None) return elif key == 'multipath': cur = BaseRoute.__getitem__(self, key) if isinstance(cur, NextHopSet): # load entries vs = NextHopSet(value) for key in vs - cur: cur.add(key) for key in cur - vs: cur.remove(key) else: BaseRoute.__setitem__(self, key, NextHopSet(value)) else: BaseRoute.__setitem__(self, key, value) def __getitem__(self, key): with self._direct_state: ret = BaseRoute.__getitem__(self, key) if key == 'multipath' and ret is None: self[key] = [] ret = self[key] elif key == 'via' and ret is None: self[key] = {} ret = self[key] return ret class RoutingTable(object): route_class = Route def __init__(self, ipdb, prime=None): self.ipdb = ipdb self.lock = threading.Lock() self.idx = {} self.kdx = {} def __nogc__(self): return self.filter(lambda x: x['route']['ipdb_scope'] != 'gc') def __repr__(self): return repr([x['route'] for x in self.__nogc__()]) def __len__(self): return len(self.keys()) def __iter__(self): for record in self.__nogc__(): yield record['route'] def gc(self): now = time.time() for route in self.filter({'ipdb_scope': 'gc'}): if now - route['route']._gctime < 2: continue try: if not self.ipdb.nl.route('dump', **route['route']): raise with route['route']._direct_state: route['route']['ipdb_scope'] = 'system' except: del self.idx[route['key']] def keys(self, key='dst'): with self.lock: return [x['route'][key] for x in self.__nogc__()] def items(self): for key in self.keys(): yield (key, self[key]) def filter(self, target, oneshot=False): # if isinstance(target, types.FunctionType): return filter(target, [x for x in tuple(self.idx.values())]) if isinstance(target, basestring): target = {'dst': target} if not isinstance(target, dict): raise TypeError('target type not supported: %s' % type(target)) ret = [] for record in tuple(self.idx.values()): for key, value in tuple(target.items()): if (key not in record['route']) or ( value != record['route'][key] ): break else: ret.append(record) if oneshot: return ret return ret def describe(self, target, forward=False): # match the route by index -- a bit meaningless, # but for compatibility if isinstance(target, int): keys = [x['key'] for x in self.__nogc__()] return self.idx[keys[target]] # match the route by key if isinstance(target, (tuple, list)): # full match return self.idx[RouteKey(*target)] if isinstance(target, nlmsg): return self.idx[Route.make_key(target)] # match the route by filter ret = self.filter(target, oneshot=True) if ret: return ret[0] if not forward: raise KeyError('record not found') # match the route by dict spec if not isinstance(target, dict): raise TypeError('lookups can be done only with dict targets') # split masks if target.get('dst', '').find('/') >= 0: dst = target['dst'].split('/') target['dst'] = dst[0] target['dst_len'] = int(dst[1]) if target.get('src', '').find('/') >= 0: src = target['src'].split('/') target['src'] = src[0] target['src_len'] = int(src[1]) # load and return the route, if exists route = Route(self.ipdb) ret = self.ipdb.nl.get_routes(**target) if not ret: raise KeyError('record not found') route.load_netlink(ret[0]) return {'route': route, 'key': None} def __delitem__(self, key): with self.lock: item = self.describe(key, forward=False) del self.idx[self.route_class.make_key(item['route'])] def load(self, msg): key = self.route_class.make_key(msg) self[key] = msg return key def __setitem__(self, key, value): with self.lock: try: record = self.describe(key, forward=False) except KeyError: record = {'route': self.route_class(self.ipdb), 'key': None} if isinstance(value, nlmsg): record['route'].load_netlink(value) elif isinstance(value, self.route_class): record['route'] = value elif isinstance(value, dict): with record['route']._direct_state: record['route'].update(value) key = self.route_class.make_key(record['route']) if record['key'] is None: self.idx[key] = {'route': record['route'], 'key': key} else: self.idx[key] = record if record['key'] != key: del self.idx[record['key']] record['key'] = key def __getitem__(self, key): with self.lock: return self.describe(key, forward=False)['route'] def __contains__(self, key): try: with self.lock: self.describe(key, forward=False) return True except KeyError: return False class MPLSTable(RoutingTable): route_class = MPLSRoute def keys(self): return self.idx.keys() def describe(self, target, forward=False): # match by key if isinstance(target, int): return self.idx[target] # match by rtmsg if isinstance(target, rtmsg): return self.idx[self.route_class.make_key(target)] raise KeyError('record not found') class RoutingTableSet(object): def __init__(self, ipdb): self.ipdb = ipdb self._gctime = time.time() self.ignore_rtables = ipdb._ignore_rtables or [] self.tables = {254: RoutingTable(self.ipdb)} self._event_map = { 'RTM_NEWROUTE': self.load_netlink, 'RTM_DELROUTE': self.load_netlink, 'RTM_NEWLINK': self.gc_mark_link, 'RTM_DELLINK': self.gc_mark_link, 'RTM_DELADDR': self.gc_mark_addr, } def _register(self): for msg in self.ipdb.nl.get_routes( family=AF_INET, match={'family': AF_INET} ): self.load_netlink(msg) for msg in self.ipdb.nl.get_routes( family=AF_INET6, match={'family': AF_INET6} ): self.load_netlink(msg) for msg in self.ipdb.nl.get_routes( family=AF_MPLS, match={'family': AF_MPLS} ): self.load_netlink(msg) def add(self, spec=None, **kwarg): ''' Create a route from a dictionary ''' spec = dict(spec or kwarg) gateway = spec.get('gateway') or '' dst = spec.get('dst') or '' if 'tos' not in spec: spec['tos'] = 0 if 'scope' not in spec: spec['scope'] = 0 if 'table' not in spec: spec['table'] = 254 if 'family' not in spec: if (dst.find(':') > -1) or (gateway.find(':') > -1): spec['family'] = AF_INET6 else: spec['family'] = AF_INET if not dst: raise ValueError('dst not specified') if ( isinstance(dst, basestring) and (dst not in ('', 'default')) and ('/' not in dst) ): if spec['family'] == AF_INET: spec['dst'] = dst + '/32' elif spec['family'] == AF_INET6: spec['dst'] = dst + '/128' if 'priority' not in spec: if spec['family'] == AF_INET6: spec['priority'] = IP6_RT_PRIO_USER else: spec['priority'] = None multipath = spec.pop('multipath', []) if spec.get('family', 0) == AF_MPLS: table = 'mpls' if table not in self.tables: self.tables[table] = MPLSTable(self.ipdb) route = MPLSRoute(self.ipdb) else: table = spec.get('table', 254) if table not in self.tables: self.tables[table] = RoutingTable(self.ipdb) route = Route(self.ipdb) route.update(spec) with route._direct_state: route['ipdb_scope'] = 'create' for nh in multipath: if 'encap' in nh: nh['encap'] = route.make_encap(nh['encap']) if table == 'mpls': nh['family'] = AF_MPLS route.add_nh(nh) route.begin() for key, value in spec.items(): if key == 'encap': route[key] = route.make_encap(value) else: route[key] = value self.tables[table][route.make_key(route)] = route return route def load_netlink(self, msg): ''' Loads an existing route from a rtmsg ''' if not isinstance(msg, rtmsg): return if msg['family'] == AF_MPLS: table = 'mpls' else: table = msg.get_attr('RTA_TABLE', msg['table']) if table in self.ignore_rtables: return now = time.time() if now - self._gctime > 5: self._gctime = now self.gc() # RTM_DELROUTE if msg['event'] == 'RTM_DELROUTE': try: # locate the record record = self.tables[table][msg] # delete the record if record['ipdb_scope'] not in ('locked', 'shadow'): del self.tables[table][msg] with record._direct_state: record['ipdb_scope'] = 'detached' except Exception as e: # just ignore this failure for now log.debug("delroute failed for %s", e) return # RTM_NEWROUTE if table not in self.tables: if table == 'mpls': self.tables[table] = MPLSTable(self.ipdb) else: self.tables[table] = RoutingTable(self.ipdb) self.tables[table].load(msg) def gc_mark_addr(self, msg): ## # Find invalid IPv4 route records after addr delete # # Example:: # $ sudo ip link add test0 type dummy # $ sudo ip link set dev test0 up # $ sudo ip addr add 172.18.0.5/24 dev test0 # $ sudo ip route add 10.1.2.0/24 via 172.18.0.1 # ... # $ sudo ip addr flush dev test0 # # The route {'dst': '10.1.2.0/24', 'gateway': '172.18.0.1'} # will stay in the routing table being removed from the system. # That's because the kernel doesn't send IPv4 route updates in # that case, so we have to calculate the update here -- or load # all the routes from scratch. The latter may be far too # expensive. # # See http://www.spinics.net/lists/netdev/msg254186.html for # background on this kernel behavior. # Simply ignore secondary addresses, as they don't matter if msg['flags'] & IFA_F_SECONDARY: return # When the primary address is removed, corresponding routes # may be silently discarded. But if promote_secondaries is set # to 1, the next secondary becomes a new primary, and routes # stay. There is no way to know here, whether promote_secondaries # was set at the moment of the address removal, so we have to # act as if it wasn't. # Get the removed address: family = msg['family'] if family == AF_INET: addr = msg.get_attr('IFA_LOCAL') net = struct.unpack('>I', inet_pton(family, addr))[0] & ( 0xFFFFFFFF << (32 - msg['prefixlen']) ) # now iterate all registered routes and mark those with # gateway from that network for record in self.filter({'family': family}): gw = record['route'].get('gateway') if gw: gwnet = struct.unpack('>I', inet_pton(family, gw))[0] & net if gwnet == net: with record['route']._direct_state: record['route']['ipdb_scope'] = 'gc' record['route']._gctime = time.time() elif family == AF_INET6: # Unlike IPv4, IPv6 route updates are sent after addr # delete, so no need to delete them here. pass else: # ignore not (IPv4 or IPv6) return def gc_mark_link(self, msg): ### # mark route records for GC after link delete # if msg['family'] != 0 or msg['state'] != 'down': return for record in self.filter({'oif': msg['index']}): with record['route']._direct_state: record['route']['ipdb_scope'] = 'gc' record['route']._gctime = time.time() for record in self.filter({'iif': msg['index']}): with record['route']._direct_state: record['route']['ipdb_scope'] = 'gc' record['route']._gctime = time.time() def gc(self): for table in self.tables.keys(): self.tables[table].gc() def remove(self, route, table=None): if isinstance(route, Route): table = route.get('table', 254) or 254 route = route.get('dst', 'default') else: table = table or 254 self.tables[table][route].remove() def filter(self, target): # FIXME: turn into generator! ret = [] for table in tuple(self.tables.values()): if table is not None: ret.extend(table.filter(target)) return ret def describe(self, spec, table=254): return self.tables[table].describe(spec) def get(self, dst, table=None): table = table or 254 return self.tables[table][dst] def keys(self, table=254, family=AF_UNSPEC): return [ x['dst'] for x in self.tables[table] if (x.get('family') == family) or (family == AF_UNSPEC) ] def has_key(self, key, table=254): return key in self.tables[table] def __contains__(self, key): return key in self.tables[254] def __getitem__(self, key): return self.get(key) def __setitem__(self, key, value): if key != value['dst']: raise ValueError("dst doesn't match key") return self.add(value) def __delitem__(self, key): return self.remove(key) def __repr__(self): return repr(self.tables[254]) spec = [{'name': 'routes', 'class': RoutingTableSet, 'kwarg': {}}] pyroute2-0.7.11/pyroute2/ipdb/rules.py000066400000000000000000000226211455030217500176110ustar00rootroot00000000000000import logging import threading import traceback from collections import namedtuple from socket import AF_INET, AF_INET6 from pyroute2.ipdb.exceptions import CommitException from pyroute2.ipdb.transactional import Transactional from pyroute2.netlink import rtnl from pyroute2.netlink.rtnl.fibmsg import FR_ACT_NAMES, fibmsg log = logging.getLogger(__name__) groups = rtnl.RTMGRP_IPV4_RULE | rtnl.RTMGRP_IPV6_RULE RuleKey = namedtuple( 'RuleKey', ( 'action', 'table', 'priority', 'iifname', 'oifname', 'fwmark', 'fwmask', 'family', 'goto', 'tun_id', ), ) class Rule(Transactional): ''' Persistent transactional rule object ''' _fields = [fibmsg.nla2name(i[1]) for i in fibmsg.nla_map] for key, _ in fibmsg.fields: _fields.append(key) _fields.append('removal') _virtual_fields = ['ipdb_scope', 'ipdb_priority'] _fields.extend(_virtual_fields) cleanup = ( 'attrs', 'header', 'event', 'src_len', 'dst_len', 'res1', 'res2', ) @classmethod def make_key(cls, msg): values = [] if isinstance(msg, fibmsg): for field in RuleKey._fields: v = msg.get_attr(msg.name2nla(field)) if v is None: v = msg.get(field, 0) values.append(v) elif isinstance(msg, dict): for field in RuleKey._fields: values.append(msg.get(field, 0)) else: raise TypeError('prime not supported: %s' % type(msg)) return RuleKey(*values) def __init__(self, ipdb, mode=None, parent=None, uid=None): Transactional.__init__(self, ipdb, mode, parent, uid) with self._direct_state: self['ipdb_priority'] = 0 def load_netlink(self, msg): with self._direct_state: if self['ipdb_scope'] == 'locked': # do not touch locked interfaces return self['ipdb_scope'] = 'system' for key, value in msg.items(): self[key] = value # merge NLA for cell in msg['attrs']: # # Parse on demand # norm = fibmsg.nla2name(cell[0]) if norm in self.cleanup: continue self[norm] = cell[1] if msg.get_attr('FRA_DST'): dst = '%s/%s' % (msg.get_attr('FRA_DST'), msg['dst_len']) self['dst'] = dst if msg.get_attr('FRA_SRC'): src = '%s/%s' % (msg.get_attr('FRA_SRC'), msg['src_len']) self['src'] = src # finally, cleanup all not needed for item in self.cleanup: if item in self: del self[item] return self def commit( self, tid=None, transaction=None, commit_phase=1, commit_mask=0xFF ): if not commit_phase & commit_mask: return self error = None drop = self.ipdb.txdrop devop = 'set' debug = {'traceback': None, 'next_stage': None} notx = True if tid or transaction: notx = False if tid: transaction = self.global_tx[tid] else: transaction = transaction or self.current_tx # create a new route if self['ipdb_scope'] != 'system': devop = 'add' # work on an existing route snapshot = self.pick() added, removed = transaction // snapshot added.pop('ipdb_scope', None) removed.pop('ipdb_scope', None) try: # rule add/set if any(added.values()) or devop == 'add': old_key = self.make_key(self) new_key = self.make_key(transaction) if new_key != old_key: # check for the key conflict if new_key in self.ipdb.rules: raise CommitException('rule priority conflict') else: self.ipdb.rules[new_key] = self self.nl.rule('del', **old_key._asdict()) self.nl.rule('add', **transaction) else: if devop != 'add': with self._direct_state: self['ipdb_scope'] = 'locked' wd = self.ipdb.watchdog( 'RTM_DELRULE', **old_key._asdict() ) self.nl.rule('del', **old_key._asdict()) wd.wait() with self._direct_state: self['ipdb_scope'] = 'reload' self.nl.rule('add', **transaction) transaction.wait_all_targets() # rule removal if (transaction['ipdb_scope'] in ('shadow', 'remove')) or ( (transaction['ipdb_scope'] == 'create') and commit_phase == 2 ): if transaction['ipdb_scope'] == 'shadow': with self._direct_state: self['ipdb_scope'] = 'locked' # create watchdog key = self.make_key(snapshot) wd = self.ipdb.watchdog('RTM_DELRULE', **key._asdict()) self.nl.rule('del', **key._asdict()) wd.wait() if transaction['ipdb_scope'] == 'shadow': with self._direct_state: self['ipdb_scope'] = 'shadow' # everything ok drop = True except Exception as e: error = e # prepare postmortem debug['traceback'] = traceback.format_exc() debug['error_stack'] = [] debug['next_stage'] = None if commit_phase == 1: try: self.commit( transaction=snapshot, commit_phase=2, commit_mask=commit_mask, ) except Exception as i_e: debug['next_stage'] = i_e error = RuntimeError() if drop and notx: self.drop(transaction.uid) if error is not None: error.debug = debug raise error return self def remove(self): self['ipdb_scope'] = 'remove' return self def shadow(self): self['ipdb_scope'] = 'shadow' return self class RulesDict(dict): def __init__(self, ipdb): self.ipdb = ipdb self.lock = threading.Lock() self._event_map = { 'RTM_NEWRULE': self.load_netlink, 'RTM_DELRULE': self.load_netlink, } def _register(self): for msg in self.ipdb.nl.get_rules(family=AF_INET): self.load_netlink(msg) for msg in self.ipdb.nl.get_rules(family=AF_INET6): self.load_netlink(msg) def __getitem__(self, key): with self.lock: if isinstance(key, RuleKey): return super(RulesDict, self).__getitem__(key) elif isinstance(key, tuple): return super(RulesDict, self).__getitem__(RuleKey(*key)) elif isinstance(key, int): for k in self.keys(): if key == k[2]: return super(RulesDict, self).__getitem__(k) elif isinstance(key, dict): for v in self.values(): for k in key: if key[k] != v.get(k, None): break else: return v def add(self, spec=None, **kwarg): ''' Create a rule from a dictionary ''' spec = dict(spec or kwarg) # action and priority are parts of the key, so # they must be specified if 'priority' not in spec: spec['priority'] = 32000 if 'table' in spec: spec['action'] = FR_ACT_NAMES['FR_ACT_TO_TBL'] elif 'goto' in spec: spec['action'] = FR_ACT_NAMES['FR_ACT_GOTO'] if 'family' not in spec: spec['family'] = AF_INET rule = Rule(self.ipdb) rule.update(spec) # setup the scope with rule._direct_state: rule['ipdb_scope'] = 'create' # rule.begin() for key, value in spec.items(): rule[key] = value self[rule.make_key(spec)] = rule return rule def load_netlink(self, msg): if not isinstance(msg, fibmsg): return key = Rule.make_key(msg) # RTM_DELRULE if msg['event'] == 'RTM_DELRULE': try: # locate the record record = self[key] # delete the record if record['ipdb_scope'] not in ('locked', 'shadow'): del self[key] with record._direct_state: record['ipdb_scope'] = 'detached' except Exception as e: # just ignore this failure for now log.debug("delrule failed for %s", e) return # RTM_NEWRULE if key not in self: self[key] = Rule(self.ipdb) self[key].load_netlink(msg) return self[key] spec = [{'name': 'rules', 'class': RulesDict, 'kwarg': {}}] pyroute2-0.7.11/pyroute2/ipdb/transactional.py000066400000000000000000000402031455030217500213150ustar00rootroot00000000000000''' ''' import logging import threading from pyroute2.common import Dotkeys, uuid32 from pyroute2.ipdb.exceptions import CommitException from pyroute2.ipdb.linkedset import LinkedSet # How long should we wait on EACH commit() checkpoint: for ipaddr, # ports etc. That's not total commit() timeout. SYNC_TIMEOUT = 5 log = logging.getLogger(__name__) class State(object): def __init__(self, lock=None): self.lock = lock or threading.Lock() self.flag = 0 def acquire(self): self.lock.acquire() self.flag += 1 def release(self): if self.flag < 1: raise RuntimeError('release unlocked state') self.flag -= 1 self.lock.release() def is_set(self): return self.flag def __enter__(self): self.acquire() return self def __exit__(self, exc_type, exc_value, traceback): self.release() def update(f): def decorated(self, *argv, **kwarg): if self._mode == 'snapshot': # short-circuit with self._write_lock: return f(self, True, *argv, **kwarg) elif self._mode == 'readonly': raise RuntimeError('can not change readonly object') with self._write_lock: direct = self._direct_state.is_set() if not direct: # 1. 'implicit': begin transaction, if there is none if self._mode == 'implicit': if not self.current_tx: self.begin() # 2. require open transaction for 'explicit' type elif self._mode == 'explicit': if not self.current_tx: raise TypeError('start a transaction first') # do not support other modes else: raise TypeError('transaction mode not supported') # now that the transaction _is_ open return f(self, direct, *argv, **kwarg) decorated.__doc__ = f.__doc__ return decorated def with_transaction(f): def decorated(self, direct, *argv, **kwarg): if direct: f(self, *argv, **kwarg) else: transaction = self.current_tx f(transaction, *argv, **kwarg) return self return update(decorated) class Transactional(Dotkeys): ''' Utility class that implements common transactional logic. ''' _fields = [] _virtual_fields = [] _fields_cmp = {} _linked_sets = [] _nested = [] def __init__(self, ipdb=None, mode=None, parent=None, uid=None): # if ipdb is not None: self.nl = ipdb.nl self.ipdb = ipdb else: self.nl = None self.ipdb = None # self._parent = None if parent is not None: self._mode = mode or parent._mode self._parent = parent elif ipdb is not None: self._mode = mode or ipdb.mode else: self._mode = mode or 'implicit' # self.nlmsg = None self.uid = uid or uuid32() self.last_error = None self._commit_hooks = [] self._sids = [] self._ts = threading.local() self._snapshots = {} self.global_tx = {} self._targets = {} self._local_targets = {} self._write_lock = threading.RLock() self._direct_state = State(self._write_lock) self._linked_sets = self._linked_sets or set() # for i in self._fields: Dotkeys.__setitem__(self, i, None) @property def ro(self): return self.pick(detached=False, readonly=True) def register_commit_hook(self, hook): ''' ''' self._commit_hooks.append(hook) def unregister_commit_hook(self, hook): ''' ''' with self._write_lock: for cb in tuple(self._commit_hooks): if hook == cb: self._commit_hooks.pop(self._commit_hooks.index(cb)) ## # Object serialization: dump, pick def dump(self, not_none=True): ''' ''' with self._write_lock: res = {} for key in self: if self[key] is not None and key[0] != '_': if isinstance(self[key], Transactional): res[key] = self[key].dump() elif isinstance(self[key], LinkedSet): res[key] = tuple(self[key]) else: res[key] = self[key] return res def pick(self, detached=True, uid=None, parent=None, readonly=False): ''' Get a snapshot of the object. Can be of two types: * detached=True -- (default) "true" snapshot * detached=False -- keep ip addr set updated from OS Please note, that "updated" doesn't mean "in sync". The reason behind this logic is that snapshots can be used as transactions. ''' with self._write_lock: res = self.__class__( ipdb=self.ipdb, mode='snapshot', parent=parent, uid=uid ) for key, value in self.items(): if self[key] is not None: if key in self._fields: res[key] = self[key] for key in self._linked_sets: res[key] = type(self[key])(self[key]) if not detached: self[key].connect(res[key]) if readonly: res._mode = 'readonly' return res ## # Context management: enter, exit def __enter__(self): if self._mode == 'readonly': return self elif self._mode not in ('implicit', 'explicit'): raise TypeError('context managers require a transactional mode') if not self.current_tx: self.begin() return self def __exit__(self, exc_type, exc_value, traceback): # apply transaction only if there was no error if self._mode == 'readonly': return elif exc_type is None: try: self.commit() except Exception as e: self.last_error = e raise ## # Implicit object transfomations def __repr__(self): res = {} for i in tuple(self): if self[i] is not None: res[i] = self[i] return res.__repr__() ## # Object ops: +, -, /, ... def __sub__(self, vs): # create result res = {} with self._direct_state: # simple keys for key in self: if key in self._fields: if (key not in vs) or (self[key] != vs[key]): res[key] = self[key] for key in self._linked_sets: diff = type(self[key])(self[key] - vs[key]) if diff: res[key] = diff else: res[key] = set() for key in self._nested: res[key] = self[key] - vs[key] return res def __floordiv__(self, vs): left = {} right = {} with self._direct_state: with vs._direct_state: for key in set(tuple(self.keys()) + tuple(vs.keys())): if self.get(key, None) != vs.get(key, None): left[key] = self.get(key) right[key] = vs.get(key) continue if key not in self: right[key] = vs[key] elif key not in vs: left[key] = self[key] for key in self._linked_sets: ldiff = type(self[key])(self[key] - vs[key]) rdiff = type(vs[key])(vs[key] - self[key]) if ldiff: left[key] = ldiff else: left[key] = set() if rdiff: right[key] = rdiff else: right[key] = set() for key in self._nested: left[key], right[key] = self[key] // vs[key] return left, right ## # Methods to be overloaded def detach(self): pass def load(self, data): pass def commit(self, *args, **kwarg): pass def last_snapshot_id(self): return self._sids[-1] def invalidate(self): # on failure, invalidate the interface and detach it # from the parent # 0. obtain lock on IPDB, to avoid deadlocks # ... all the DB updates will wait with self.ipdb.exclusive: # 1. drop the IPRoute() link self.nl = None # 2. clean up ipdb self.detach() # 3. invalidate the interface with self._direct_state: for i in tuple(self.keys()): del self[i] self['ipdb_scope'] = 'invalid' # 4. the rest self._mode = 'invalid' ## # Snapshot methods def revert(self, sid): with self._write_lock: assert sid in self._snapshots self.local_tx[sid] = self._snapshots[sid] self.global_tx[sid] = self._snapshots[sid] self.current_tx = self._snapshots[sid] self._sids.remove(sid) del self._snapshots[sid] return self def snapshot(self, sid=None): ''' Create new snapshot ''' if self._parent: raise RuntimeError("Can't init snapshot from a nested object") if (self.ipdb is not None) and self.ipdb._stop: raise RuntimeError("Can't create snapshots on released IPDB") t = self.pick(detached=True, uid=sid) self._snapshots[t.uid] = t self._sids.append(t.uid) for key, value in t.items(): if isinstance(value, Transactional): value.snapshot(sid=t.uid) return t.uid def last_snapshot(self): if not self._sids: raise TypeError('create a snapshot first') return self._snapshots[self._sids[-1]] ## # Current tx def _set_current_tx(self, tx): with self._write_lock: self._ts.current = tx def _get_current_tx(self): ''' The current active transaction (thread-local) ''' with self._write_lock: if not hasattr(self._ts, 'current'): self._ts.current = None return self._ts.current current_tx = property(_get_current_tx, _set_current_tx) ## # Local tx registry def _get_local_tx(self): with self._write_lock: if not hasattr(self._ts, 'tx'): self._ts.tx = {} return self._ts.tx local_tx = property(_get_local_tx) ## # Transaction ops: begin, review, drop def begin(self): ''' Start new transaction ''' if self._parent is not None: self._parent.begin() else: return self._begin() def _begin(self, tid=None): if (self.ipdb is not None) and self.ipdb._stop: raise RuntimeError("Can't start transaction on released IPDB") t = self.pick(detached=False, uid=tid) self.local_tx[t.uid] = t self.global_tx[t.uid] = t if self.current_tx is None: self.current_tx = t for key, value in t.items(): if isinstance(value, Transactional): # start transaction on a nested object value._begin(tid=t.uid) # link transaction to own one t[key] = value.global_tx[t.uid] return t.uid def review(self, tid=None): ''' Review the changes made in the transaction `tid` or in the current active transaction (thread-local) ''' if self.current_tx is None: raise TypeError('start a transaction first') tid = tid or self.current_tx.uid if self.get('ipdb_scope') == 'create': if self.current_tx is not None: prime = self.current_tx else: log.warning('the "create" scope without transaction') prime = self return dict( [(x[0], x[1]) for x in prime.items() if x[1] is not None] ) with self._write_lock: added = self.global_tx[tid] - self removed = self - self.global_tx[tid] for key in self._linked_sets: added['-%s' % (key)] = removed[key] added['+%s' % (key)] = added[key] del added[key] return added def drop(self, tid=None): ''' Drop a transaction. If tid is not specified, drop the current one. ''' with self._write_lock: if tid is None: tx = self.current_tx if tx is None: raise TypeError("no transaction") else: tx = self.global_tx[tid] if self.current_tx == tx: self.current_tx = None # detach linked sets for key in self._linked_sets: if tx[key] in self[key].links: self[key].disconnect(tx[key]) for key, value in self.items(): if isinstance(value, Transactional): try: value.drop(tx.uid) except KeyError: pass # finally -- delete the transaction del self.local_tx[tx.uid] del self.global_tx[tx.uid] ## # Property ops: set/get/delete @update def __setitem__(self, direct, key, value): if not direct: if self.get(key) == value: return # automatically set target on the active transaction, # which must be started prior to that call transaction = self.current_tx transaction[key] = value if value is not None: transaction._targets[key] = threading.Event() else: # set the item Dotkeys.__setitem__(self, key, value) # update on local targets with self._write_lock: if key in self._local_targets: func = self._fields_cmp.get(key, lambda x, y: x == y) if func(value, self._local_targets[key].value): self._local_targets[key].set() # cascade update on nested targets for tn in tuple(self.global_tx.values()): if (key in tn._targets) and (key in tn): if self._fields_cmp.get(key, lambda x, y: x == y)( value, tn[key] ): tn._targets[key].set() @update def __delitem__(self, direct, key): # firstly set targets self[key] = None # then continue with delete if not direct: transaction = self.current_tx if key in transaction: del transaction[key] else: Dotkeys.__delitem__(self, key) def option(self, key, value): self[key] = value return self def unset(self, key): del self[key] return self def wait_all_targets(self): for key, target in self._targets.items(): if key not in self._virtual_fields: target.wait(SYNC_TIMEOUT) if not target.is_set(): raise CommitException('target %s is not set' % key) def wait_target(self, key, timeout=SYNC_TIMEOUT): self._local_targets[key].wait(SYNC_TIMEOUT) with self._write_lock: return self._local_targets.pop(key).is_set() def set_target(self, key, value): with self._write_lock: self._local_targets[key] = threading.Event() self._local_targets[key].value = value if self.get(key) == value: self._local_targets[key].set() return self def mirror_target(self, key_from, key_to): with self._write_lock: self._local_targets[key_to] = self._local_targets[key_from] return self def set(self, key, value): self[key] = value return self pyroute2-0.7.11/pyroute2/ipdb/utils.py000066400000000000000000000003361455030217500176160ustar00rootroot00000000000000import os import subprocess def test_reachable_icmp(host): with open(os.devnull, 'w') as devnull: return subprocess.check_call( ['ping', '-c', '1', host], stdout=devnull, stderr=devnull ) pyroute2-0.7.11/pyroute2/iproute/000077500000000000000000000000001455030217500166535ustar00rootroot00000000000000pyroute2-0.7.11/pyroute2/iproute/__init__.py000066400000000000000000000114701455030217500207670ustar00rootroot00000000000000# -*- coding: utf-8 -*- ''' Classes ------- The RTNL API is provided by the class `RTNL_API`. It is a mixin class that works on top of any RTNL-compatible socket, so several classes with almost the same API are available: * `IPRoute` -- simple RTNL API * `NetNS` -- RTNL API in a network namespace * `IPBatch` -- RTNL packet compiler * `RemoteIPRoute` -- run RTNL remotely (no deployment required) Responses as lists ------------------ The netlink socket implementation in the pyroute2 is agnostic to particular netlink protocols, and always returns a list of messages as the response to a request sent to the kernel:: with IPRoute() as ipr: # this request returns one match eth0 = ipr.link_lookup(ifname='eth0') len(eth0) # -> 1, if exists, else 0 # but that one returns a set of up = ipr.link_lookup(operstate='UP') len(up) # -> k, where 0 <= k <= [interface count] Thus, always expect a list in the response, running any `IPRoute()` netlink request. NLMSG_ERROR responses ~~~~~~~~~~~~~~~~~~~~~ Some kernel subsystems return `NLMSG_ERROR` in response to any request. It is OK as long as `nlmsg["header"]["error"] is None`. Otherwise an exception will be raised by the parser. So if instead of an exception you get a `NLMSG_ERROR` message, it means `error == 0`, the same as `$? == 0` in bash. How to work with messages ~~~~~~~~~~~~~~~~~~~~~~~~~ Every netlink message contains header, fields and NLAs (netlink attributes). Every NLA is a netlink message... (see "recursion"). And the library provides parsed messages according to this scheme. Every RTNL message contains: * `nlmsg['header']` -- parsed header * `nlmsg['attrs']` -- NLA chain (parsed on demand) * 0 .. k data fields, e.g. `nlmsg['flags']` etc. * `nlmsg.header` -- the header fields spec * `nlmsg.fields` -- the data fields spec * `nlmsg.nla_map` -- NLA spec An important parser feature is that NLAs are parsed on demand, when someone tries to access them. Otherwise the parser doesn't waste CPU cycles. The NLA chain is a list-like structure, not a dictionary. The netlink standard doesn't require NLAs to be unique within one message:: {'attrs': [('IFLA_IFNAME', 'lo'), # [1] ('IFLA_TXQLEN', 1), ('IFLA_OPERSTATE', 'UNKNOWN'), ('IFLA_LINKMODE', 0), ('IFLA_MTU', 65536), ('IFLA_GROUP', 0), ('IFLA_PROMISCUITY', 0), ('IFLA_NUM_TX_QUEUES', 1), ('IFLA_NUM_RX_QUEUES', 1), ('IFLA_CARRIER', 1), ...], 'change': 0, 'event': 'RTM_NEWLINK', # [2] 'family': 0, 'flags': 65609, 'header': {'error': None, # [3] 'flags': 2, 'length': 1180, 'pid': 28233, 'sequence_number': 257, # [4] 'type': 16}, # [5] 'ifi_type': 772, 'index': 1} # [1] every NLA is parsed upon access # [2] this field is injected by the RTNL parser # [3] if not None, an exception will be raised # [4] more details in the netlink description # [5] 16 == RTM_NEWLINK To access fields:: msg['index'] == 1 To access one NLA:: msg.get_attr('IFLA_CARRIER') == 1 When an NLA with the specified name is not present in the chain, `get_attr()` returns `None`. To get the list of all NLAs of that name, use `get_attrs()`. A real example with NLA hierarchy, take notice of `get_attr()` and `get_attrs()` usage:: # for macvlan interfaces there may be several # IFLA_MACVLAN_MACADDR NLA provided, so use # get_attrs() to get all the list, not only # the first one (msg .get_attr('IFLA_LINKINFO') # one NLA .get_attr('IFLA_INFO_DATA') # one NLA .get_attrs('IFLA_MACVLAN_MACADDR')) # a list of The protocol itself has no limit for number of NLAs of the same type in one message, that's why we can not make a dictionary from them -- unlike PF_ROUTE messages. ''' import sys from pyroute2 import config from pyroute2.iproute.linux import RTNL_API, IPBatch # compatibility fix -- LNST: from pyroute2.netlink.rtnl import ( RTM_DELADDR, RTM_DELLINK, RTM_GETADDR, RTM_GETLINK, RTM_NEWADDR, RTM_NEWLINK, ) if sys.platform.startswith('emscripten'): from pyroute2.iproute.ipmock import ChaoticIPRoute, IPRoute, RawIPRoute elif sys.platform.startswith('win'): from pyroute2.iproute.windows import ChaoticIPRoute, IPRoute, RawIPRoute elif config.uname[0][-3:] == 'BSD': from pyroute2.iproute.bsd import ChaoticIPRoute, IPRoute, RawIPRoute else: from pyroute2.iproute.linux import ChaoticIPRoute, IPRoute, RawIPRoute classes = [RTNL_API, IPBatch, IPRoute, RawIPRoute, ChaoticIPRoute] constants = [ RTM_GETLINK, RTM_NEWLINK, RTM_DELLINK, RTM_GETADDR, RTM_NEWADDR, RTM_DELADDR, ] pyroute2-0.7.11/pyroute2/iproute/bsd.py000066400000000000000000000244441455030217500200050ustar00rootroot00000000000000''' The library provides very basic RTNL API for BSD systems via protocol emulation. Only getters are supported yet, no setters. BSD employs PF_ROUTE sockets to send notifications about network object changes, but the protocol doesn not allow changing links/addresses/etc like Netlink. To change network setting one have to rely on system calls or external tools. Thus IPRoute on BSD systems is not as effective as on Linux, where all the changes are done via Netlink. The monitoring started with `bind()` is implemented as an implicit thread, started by the `bind()` call. This is done to have only one notification FD, used both for normal calls and notifications. This allows to use IPRoute objects in poll/select calls. On Linux systems RTNL API is provided by the netlink protocol, so no implicit threads are started by default to monitor the system updates. `IPRoute.bind(...)` may start the async cache thread, but only when asked explicitly:: # # Normal monitoring. Always starts monitoring thread on # FreeBSD / OpenBSD, no threads on Linux. # with IPRoute() as ipr: ipr.bind() ... # # Monitoring with async cache. Always starts cache thread # on Linux, ignored on FreeBSD / OpenBSD. # with IPRoute() as ipr: ipr.bind(async_cache=True) ... On all the supported platforms, be it Linux or BSD, the `IPRoute.recv(...)` method returns valid netlink RTNL raw binary payload and `IPRoute.get(...)` returns parsed RTNL messages. ''' import errno import os import select import struct import threading from pyroute2 import config from pyroute2.bsd.pf_route import IFF_VALUES from pyroute2.bsd.rtmsocket import RTMSocket from pyroute2.bsd.util import ARP, Ifconfig, Route from pyroute2.common import AddrPool, Namespace from pyroute2.netlink import NLM_F_DUMP, NLM_F_MULTI, NLM_F_REQUEST, NLMSG_DONE from pyroute2.netlink.proxy import NetlinkProxy from pyroute2.netlink.rtnl import ( RTM_GETADDR, RTM_GETLINK, RTM_GETNEIGH, RTM_GETROUTE, RTM_NEWADDR, RTM_NEWLINK, RTM_NEWNEIGH, RTM_NEWROUTE, ) from pyroute2.netlink.rtnl.ifaddrmsg import ifaddrmsg from pyroute2.netlink.rtnl.ifinfmsg import IFF_NAMES, ifinfmsg from pyroute2.netlink.rtnl.marshal import MarshalRtnl from pyroute2.netlink.rtnl.ndmsg import ndmsg from pyroute2.netlink.rtnl.rtmsg import rtmsg try: import queue except ImportError: import Queue as queue class IPRoute(object): def __init__(self, *argv, **kwarg): if 'ssh' in kwarg: self._ssh = ['ssh', kwarg.pop('ssh')] else: self._ssh = [] async_qsize = kwarg.get('async_qsize') self._ifc = Ifconfig(cmd=self._ssh + ['ifconfig', '-a']) self._arp = ARP(cmd=self._ssh + ['arp', '-an']) self._route = Route(cmd=self._ssh + ['netstat', '-rn']) self.marshal = MarshalRtnl() self.target = kwarg.get('target') or 'localhost' send_ns = Namespace( self, {'addr_pool': AddrPool(0x10000, 0x1FFFF), 'monitor': False} ) self._sproxy = NetlinkProxy(policy='return', nl=send_ns) self._mon_th = None self._rtm = None self._brd_socket = None self._pfdr, self._pfdw = os.pipe() # notify external poll/select self._ctlr, self._ctlw = os.pipe() # notify monitoring thread self._outq = queue.Queue(maxsize=async_qsize or config.async_qsize) self._system_lock = threading.Lock() self.closed = threading.Event() def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.close() def clone(self): return self def close(self, code=errno.ECONNRESET): with self._system_lock: if self.closed.is_set(): return if self._mon_th is not None: os.write(self._ctlw, b'\0') self._mon_th.join() self._rtm.close() if code > 0: self._outq.put(struct.pack('IHHQIQQ', 28, 2, 0, 0, code, 0, 0)) os.write(self._pfdw, b'\0') for ep in (self._pfdr, self._pfdw, self._ctlr, self._ctlw): try: os.close(ep) except OSError: pass self.closed.set() def bind(self, *argv, **kwarg): with self._system_lock: if self._mon_th is not None: return if self._ssh: return self._mon_th = threading.Thread( target=self._monitor_thread, name='PF_ROUTE monitoring' ) self._mon_th.setDaemon(True) self._mon_th.start() def _monitor_thread(self): # Monitoring thread to convert arriving PF_ROUTE data into # the netlink format, enqueue it and notify poll/select. self._rtm = RTMSocket(output='netlink') inputs = [self._rtm.fileno(), self._ctlr] outputs = [] while True: try: events, _, _ = select.select(inputs, outputs, inputs) except: continue for fd in events: if fd == self._ctlr: # Main thread <-> monitor thread protocol is # pretty simple: discard the data and terminate # the monitor thread. os.read(self._ctlr, 1) return else: # Read the data from the socket and queue it msg = self._rtm.get() if msg is not None: msg.encode() self._outq.put(msg.data) # Notify external poll/select os.write(self._pfdw, b'\0') def fileno(self): # Every time when some new data arrives, one should write # into self._pfdw one byte to kick possible poll/select. # # Resp. recv() discards one byte from self._pfdr each call. return self._pfdr def get(self): data = self.recv() return self.marshal.parse(data) def recv(self, bufsize=None): os.read(self._pfdr, 1) return self._outq.get() def getsockopt(self, *argv, **kwarg): return 1024 * 1024 def sendto_gate(self, msg, addr): # # handle incoming netlink requests # # sendto_gate() receives single RTNL messages as objects # cmd = msg['header']['type'] flags = msg['header']['flags'] seq = msg['header']['sequence_number'] # work only on dump requests for now if flags != NLM_F_REQUEST | NLM_F_DUMP: return # if cmd == RTM_GETLINK: rtype = RTM_NEWLINK ret = self.get_links() elif cmd == RTM_GETADDR: rtype = RTM_NEWADDR ret = self.get_addr() elif cmd == RTM_GETROUTE: rtype = RTM_NEWROUTE ret = self.get_routes() elif cmd == RTM_GETNEIGH: rtype = RTM_NEWNEIGH ret = self.get_neighbours() # # set response type and finalize the message for r in ret: r['header']['type'] = rtype r['header']['flags'] = NLM_F_MULTI r['header']['sequence_number'] = seq # r = type(msg)() r['header']['type'] = NLMSG_DONE r['header']['sequence_number'] = seq ret.append(r) data = b'' for r in ret: r.encode() data += r.data self._outq.put(data) os.write(self._pfdw, b'\0') # 8<--------------------------------------------------------------- # def dump(self, groups=None): ''' Iterate all the objects -- links, routes, addresses etc. ''' for method in ( self.get_links, self.get_addr, self.get_neighbours, self.get_routes, ): for msg in method(): yield msg # 8<--------------------------------------------------------------- def get_links(self, *argv, **kwarg): ret = [] data = self._ifc.run() parsed = self._ifc.parse(data) for name, spec in parsed['links'].items(): msg = ifinfmsg().load(spec) msg['header']['type'] = RTM_NEWLINK msg['header']['target'] = self.target del msg['value'] flags = msg['flags'] new_flags = 0 for value, name in IFF_VALUES.items(): if value & flags and name in IFF_NAMES: new_flags |= IFF_NAMES[name] msg['flags'] = new_flags ret.append(msg) return ret def get_addr(self, *argv, **kwarg): ret = [] data = self._ifc.run() parsed = self._ifc.parse(data) for name, specs in parsed['addrs'].items(): for spec in specs: msg = ifaddrmsg().load(spec) msg['header']['type'] = RTM_NEWADDR msg['header']['target'] = self.target del msg['value'] ret.append(msg) return ret def get_neighbours(self, *argv, **kwarg): ifc = self._ifc.parse(self._ifc.run()) arp = self._arp.parse(self._arp.run()) ret = [] for spec in arp: if spec['ifname'] not in ifc['links']: continue spec['ifindex'] = ifc['links'][spec['ifname']]['index'] msg = ndmsg().load(spec) msg['header']['type'] = RTM_NEWNEIGH msg['header']['target'] = self.target del msg['value'] ret.append(msg) return ret def get_routes(self, *argv, **kwarg): ifc = self._ifc.parse(self._ifc.run()) rta = self._route.parse(self._route.run()) ret = [] for spec in rta: if spec['ifname'] not in ifc['links']: continue idx = ifc['links'][spec['ifname']]['index'] spec['attrs'].append(['RTA_OIF', idx]) msg = rtmsg().load(spec) msg['header']['type'] = RTM_NEWROUTE msg['header']['target'] = self.target del msg['value'] ret.append(msg) return ret class RawIPRoute(IPRoute): pass class ChaoticIPRoute: def __init__(self, *argv, **kwarg): raise NotImplementedError() pyroute2-0.7.11/pyroute2/iproute/ipmock.py000066400000000000000000000561161455030217500205200ustar00rootroot00000000000000import copy import errno import queue import socket import struct from itertools import count from pyroute2.lab import LAB_API from pyroute2.netlink.exceptions import NetlinkError from pyroute2.netlink.nlsocket import NetlinkSocketBase, Stats from pyroute2.netlink.rtnl.ifaddrmsg import ifaddrmsg from pyroute2.netlink.rtnl.ifinfmsg import ifinfmsg from pyroute2.netlink.rtnl.marshal import MarshalRtnl from pyroute2.netlink.rtnl.rtmsg import rtmsg from pyroute2.requests.address import AddressFieldFilter, AddressIPRouteFilter from pyroute2.requests.link import LinkFieldFilter from pyroute2.requests.main import RequestProcessor from pyroute2.requests.route import RouteFieldFilter interface_counter = count(3) class MockLink: def __init__( self, index, ifname='', address='00:00:00:00:00:00', broadcast='ff:ff:ff:ff:ff:ff', perm_address=None, flags=1, rx_bytes=0, tx_bytes=0, rx_packets=0, tx_packets=0, mtu=0, qdisc='noqueue', kind=None, link=None, vlan_id=None, master=0, br_max_age=0, br_forward_delay=0, alt_ifname_list=None, ): self.index = index self.ifname = ifname self.flags = flags self.address = address self.broadcast = broadcast self.perm_address = perm_address self.rx_bytes = rx_bytes self.tx_bytes = tx_bytes self.rx_packets = rx_packets self.tx_packets = tx_packets self.mtu = mtu self.qdisc = qdisc self.kind = kind self.link = link self.vlan_id = vlan_id self.master = master self.br_max_age = br_max_age self.br_forward_delay = br_forward_delay self.alt_ifname_list = alt_ifname_list or [] def export(self): ret = { 'attrs': [ ['IFLA_IFNAME', self.ifname], ['IFLA_TXQLEN', 1000], ['IFLA_OPERSTATE', 'UNKNOWN'], ['IFLA_LINKMODE', 0], ['IFLA_MTU', self.mtu], ['IFLA_GROUP', 0], ['IFLA_PROMISCUITY', 0], ['IFLA_NUM_TX_QUEUES', 1], ['IFLA_GSO_MAX_SEGS', 65535], ['IFLA_GSO_MAX_SIZE', 65536], ['IFLA_GRO_MAX_SIZE', 65536], ['IFLA_NUM_RX_QUEUES', 1], ['IFLA_CARRIER', 1], ['IFLA_QDISC', self.qdisc], ['IFLA_CARRIER_CHANGES', 0], ['IFLA_CARRIER_UP_COUNT', 0], ['IFLA_CARRIER_DOWN_COUNT', 0], ['IFLA_PROTO_DOWN', 0], [ 'IFLA_MAP', { 'base_addr': 0, 'dma': 0, 'irq': 0, 'mem_end': 0, 'mem_start': 0, 'port': 0, }, ], ['IFLA_ADDRESS', self.address], ['IFLA_BROADCAST', self.broadcast], [ 'IFLA_STATS64', { 'collisions': 0, 'multicast': 0, 'rx_bytes': self.rx_bytes, 'rx_compressed': 0, 'rx_crc_errors': 0, 'rx_dropped': 0, 'rx_errors': 0, 'rx_fifo_errors': 0, 'rx_frame_errors': 0, 'rx_length_errors': 0, 'rx_missed_errors': 0, 'rx_over_errors': 0, 'rx_packets': self.rx_packets, 'tx_aborted_errors': 0, 'tx_bytes': self.tx_bytes, 'tx_carrier_errors': 0, 'tx_compressed': 0, 'tx_dropped': 0, 'tx_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_packets': self.tx_packets, 'tx_window_errors': 0, }, ], [ 'IFLA_STATS', { 'collisions': 0, 'multicast': 0, 'rx_bytes': self.rx_bytes, 'rx_compressed': 0, 'rx_crc_errors': 0, 'rx_dropped': 0, 'rx_errors': 0, 'rx_fifo_errors': 0, 'rx_frame_errors': 0, 'rx_length_errors': 0, 'rx_missed_errors': 0, 'rx_over_errors': 0, 'rx_packets': self.rx_packets, 'tx_aborted_errors': 0, 'tx_bytes': self.tx_bytes, 'tx_carrier_errors': 0, 'tx_compressed': 0, 'tx_dropped': 0, 'tx_errors': 0, 'tx_fifo_errors': 0, 'tx_heartbeat_errors': 0, 'tx_packets': self.tx_packets, 'tx_window_errors': 0, }, ], ['IFLA_XDP', {'attrs': [['IFLA_XDP_ATTACHED', None]]}], ( 'IFLA_PERM_ADDRESS', self.perm_address if self.perm_address else self.address, ), [ 'IFLA_AF_SPEC', { 'attrs': [ [ 'AF_INET', { 'accept_local': 0, 'accept_redirects': 1, 'accept_source_route': 0, 'arp_accept': 0, 'arp_announce': 0, 'arp_ignore': 0, 'arp_notify': 0, 'arpfilter': 0, 'bootp_relay': 0, 'dummy': 65672, 'force_igmp_version': 0, 'forwarding': 1, 'log_martians': 0, 'mc_forwarding': 0, 'medium_id': 0, 'nopolicy': 1, 'noxfrm': 1, 'promote_secondaries': 1, 'proxy_arp': 0, 'proxy_arp_pvlan': 0, 'route_localnet': 0, 'rp_filter': 2, 'secure_redirects': 1, 'send_redirects': 1, 'shared_media': 1, 'src_vmark': 0, 'tag': 0, }, ] ] }, ], ], 'change': 0, 'event': 'RTM_NEWLINK', 'family': 0, 'flags': self.flags, 'header': { 'error': None, 'flags': 2, 'length': 1364, 'pid': 303471, 'sequence_number': 260, 'stats': Stats(qsize=0, delta=0, delay=0), 'target': 'localhost', 'type': 16, }, 'ifi_type': 772, 'index': self.index, 'state': 'up' if self.flags & 1 else 'down', } linkinfo = None infodata = {'attrs': []} if self.kind is not None: linkinfo = {'attrs': [('IFLA_INFO_KIND', self.kind)]} if self.kind not in (None, 'dummy'): linkinfo['attrs'].append(('IFLA_INFO_DATA', infodata)) if self.kind == 'vlan': infodata['attrs'].append(('IFLA_VLAN_ID', self.vlan_id)) ret['attrs'].append(('IFLA_LINK', self.link)) if self.kind == 'bridge': infodata['attrs'].extend( ( ('IFLA_BR_MAX_AGE', self.br_max_age), ('IFLA_BR_FORWARD_DELAY', self.br_forward_delay), ) ) if linkinfo is not None: ret['attrs'].append(('IFLA_LINKINFO', linkinfo)) if self.master != 0: ret['attrs'].append(('IFLA_MASTER', self.master)) return ret class MockAddress: def __init__( self, index, address, prefixlen, broadcast=None, label=None, family=2, local=None, **kwarg, ): self.address = address self.local = local self.broadcast = broadcast self.prefixlen = prefixlen self.index = index self.label = label self.family = family def export(self): ret = { 'family': self.family, 'prefixlen': self.prefixlen, 'flags': 0, 'scope': 0, 'index': self.index, 'attrs': [ ('IFA_ADDRESS', self.address), ('IFA_LOCAL', self.local if self.local else self.address), ('IFA_FLAGS', 512), ( 'IFA_CACHEINFO', { 'ifa_preferred': 3476, 'ifa_valid': 3476, 'cstamp': 138655779, 'tstamp': 141288674, }, ), ], 'header': { 'length': 88, 'type': 20, 'flags': 2, 'sequence_number': 256, 'pid': 320994, 'error': None, 'target': 'localhost', 'stats': Stats(qsize=0, delta=0, delay=0), }, 'event': 'RTM_NEWADDR', } if self.label is not None: ret['attrs'].append(('IFA_LABEL', self.label)) if self.broadcast is not None: ret['attrs'].append(('IFA_BROADCAST', self.broadcast)) return ret class MockRoute: def __init__( self, dst, oif, gateway=None, prefsrc=None, family=2, dst_len=24, table=254, scope=253, proto=2, route_type=1, **kwarg, ): self.dst = dst self.gateway = gateway self.prefsrc = prefsrc self.oif = oif self.family = family self.dst_len = dst_len self.table = table self.scope = scope self.proto = proto self.route_type = route_type self.priority = kwarg.get('priority', 0) self.tos = kwarg.get('tos', 0) self._type = kwarg.get('type', 2) def export(self): ret = { 'family': self.family, 'dst_len': self.dst_len, 'src_len': 0, 'tos': self.tos, 'table': self.table if self.table <= 255 else 252, 'proto': self.proto, 'scope': self.scope, 'type': self._type, 'flags': 0, 'attrs': [('RTA_TABLE', self.table), ('RTA_OIF', self.oif)], 'header': { 'length': 60, 'type': 24, 'flags': 2, 'sequence_number': 255, 'pid': 325359, 'error': None, 'target': 'localhost', 'stats': Stats(qsize=0, delta=0, delay=0), }, 'event': 'RTM_NEWROUTE', } if self.dst is not None: ret['attrs'].append(('RTA_DST', self.dst)) if self.prefsrc is not None: ret['attrs'].append(('RTA_PREFSRC', self.prefsrc)) if self.gateway is not None: ret['attrs'].append(('RTA_GATEWAY', self.gateway)) if self.priority > 0: ret['attrs'].append(('RTA_PRIORITY', self.priority)) return ret presets = { 'default': { 'links': [ MockLink( index=1, ifname='lo', address='00:00:00:00:00:00', broadcast='00:00:00:00:00:00', rx_bytes=43309665, tx_bytes=43309665, rx_packets=173776, tx_packets=173776, mtu=65536, qdisc='noqueue', ), MockLink( index=2, ifname='eth0', address='52:54:00:72:58:b2', broadcast='ff:ff:ff:ff:ff:ff', rx_bytes=175340, tx_bytes=175340, rx_packets=10251, tx_packets=10251, mtu=1500, qdisc='fq_codel', ), ], 'addr': [ MockAddress( index=1, label='lo', address='127.0.0.1', broadcast='127.255.255.255', prefixlen=8, ), MockAddress( index=2, label='eth0', address='192.168.122.28', broadcast='192.168.122.255', prefixlen=24, ), ], 'routes': [ MockRoute( dst=None, gateway='192.168.122.1', oif=2, dst_len=0, table=254, scope=0, ), MockRoute(dst='192.168.122.0', oif=2, dst_len=24, table=254), MockRoute( dst='127.0.0.0', oif=1, dst_len=8, table=255, route_type=2 ), MockRoute( dst='127.0.0.1', oif=1, dst_len=32, table=255, route_type=2 ), MockRoute( dst='127.255.255.255', oif=1, dst_len=32, table=255, route_type=3, ), MockRoute( dst='192.168.122.28', oif=2, dst_len=32, table=255, route_type=2, ), MockRoute( dst='192.168.122.255', oif=2, dst_len=32, table=255, route_type=3, ), ], }, 'netns': { 'links': [ MockLink( index=1, ifname='lo', address='00:00:00:00:00:00', broadcast='00:00:00:00:00:00', rx_bytes=43309665, tx_bytes=43309665, rx_packets=173776, tx_packets=173776, mtu=65536, qdisc='noqueue', ) ], 'addr': [ MockAddress( index=1, label='lo', address='127.0.0.1', broadcast='127.255.255.255', prefixlen=8, ) ], 'routes': [ MockRoute( dst='127.0.0.0', oif=1, dst_len=8, table=255, route_type=2 ), MockRoute( dst='127.0.0.1', oif=1, dst_len=32, table=255, route_type=2 ), MockRoute( dst='127.255.255.255', oif=1, dst_len=32, table=255, route_type=3, ), ], }, } class IPRoute(LAB_API, NetlinkSocketBase): def __init__(self, *argv, **kwarg): super().__init__() self.marshal = MarshalRtnl() self.target = kwarg.get('target') self.preset = copy.deepcopy( presets[kwarg['preset'] if 'preset' in kwarg else 'default'] ) self.buffer_queue = queue.Queue(maxsize=512) self.input_from_buffer_queue = True def bind(self, async_cache=True, clone_socket=True): pass def dump(self, groups=None): for method in (self.get_links, self.get_addr, self.get_routes): for msg in method(): yield msg def _get_dump(self, dump, msg_class): for data in dump: loader = msg_class() loader.load(data.export()) loader.encode() msg = msg_class() msg.data = loader.data msg.decode() if self.target is not None: msg['header']['target'] = self.target yield msg def _match(self, mode, obj, spec): keys = { 'address': ['address', 'prefixlen', 'index', 'family'], 'link': ['index', 'ifname'], 'route': ['dst', 'dst_len', 'oif', 'priority'], } check = False for key in keys[mode]: if key in spec: check = True if spec[key] != getattr(obj, key): return False if not check: return False return True def addr(self, command, **spec): if command == 'dump': return self.get_addr() request = RequestProcessor(context=spec, prime=spec) request.apply_filter(AddressFieldFilter()) request.apply_filter(AddressIPRouteFilter(command)) request.finalize() address = None for address in self.preset['addr']: if self._match('address', address, request): if command == 'add': raise NetlinkError(errno.EEXIST, 'address exists') break else: if command == 'del': raise NetlinkError(errno.ENOENT, 'address does not exist') address = MockAddress(**request) if command == 'add': for link in self.preset['links']: if link.index == request['index']: break else: raise NetlinkError(errno.ENOENT, 'link not found') address.label = link.ifname self.preset['addr'].append(address) for msg in self._get_dump([address], ifaddrmsg): msg.encode() self.buffer_queue.put(msg.data) elif command == 'del': self.preset['addr'].remove(address) for msg in self._get_dump([address], ifaddrmsg): msg['header']['type'] = 21 msg['event'] = 'RTM_DELADDR' msg.encode() self.buffer_queue.put(msg.data) return self._get_dump([address], ifaddrmsg) def link(self, command, **spec): if command == 'dump': return self.get_links() if 'state' in spec: spec['flags'] = 1 if spec.pop('state') == 'up' else 0 request = RequestProcessor(context=spec, prime=spec) request.apply_filter(LinkFieldFilter()) request.finalize() for interface in self.preset['links']: if self._match('link', interface, request): if command == 'add': raise NetlinkError(errno.EEXIST, 'interface exists') break else: index = next(interface_counter) if 'address' not in request: request['address'] = f'00:11:22:33:44:{index:02}' if 'index' not in request: request['index'] = index if 'tflags' in request: del request['tflags'] if 'target' in request: del request['target'] interface = MockLink(**request) if command == 'add': self.preset['links'].append(interface) for msg in self._get_dump([interface], ifinfmsg): msg.encode() self.buffer_queue.put(msg.data) elif command == 'set': for key, value in request.items(): if hasattr(interface, key): setattr(interface, key, value) for msg in self._get_dump([interface], ifinfmsg): msg.encode() self.buffer_queue.put(msg.data) return self._get_dump([interface], ifinfmsg) def route(self, command, **spec): if command == 'dump': return self.get_routes() request = RequestProcessor(context=spec, prime=spec) request.apply_filter(RouteFieldFilter()) request.finalize() for route in self.preset['routes']: if self._match('route', route, request): if command == 'add': raise NetlinkError(errno.EEXIST, 'route exists') break else: if command == 'del': raise NetlinkError(errno.ENOENT, 'route does not exist') if 'tflags' in request: del request['tflags'] if 'target' in request: del request['target'] if 'multipath' in request: del request['multipath'] if 'metrics' in request: del request['metrics'] if 'deps' in request: del request['deps'] if 'oif' not in request: (gateway,) = struct.unpack( '>I', socket.inet_aton(request['gateway']) ) for route in self.preset['routes']: if route.dst is None: continue (dst,) = struct.unpack('>I', socket.inet_aton(route.dst)) if (gateway & (0xFFFFFFFF << (32 - route.dst_len))) == dst: request['oif'] = route.oif break else: raise NetlinkError(errno.ENOENT, 'no route to the gateway') route = MockRoute(**request) if command == 'add': self.preset['routes'].append(route) for msg in self._get_dump([route], rtmsg): msg.encode() self.buffer_queue.put(msg.data) elif command == 'set': for key, value in request.items(): if hasattr(route, key): setattr(route, key, value) for msg in self._get_dump([route], rtmsg): msg.encode() self.buffer_queue.put(msg.data) elif command == 'del': self.preset['routes'].remove(route) for msg in self._get_dump([route], rtmsg): msg['header']['type'] = 25 msg['event'] = 'RTM_DELROUTE' msg.encode() self.buffer_queue.put(msg.data) return self._get_dump([route], rtmsg) def get_addr(self): return self._get_dump(self.preset['addr'], ifaddrmsg) def get_links(self): return self._get_dump(self.preset['links'], ifinfmsg) def get_routes(self): return self._get_dump(self.preset['routes'], rtmsg) class ChaoticIPRoute: def __init__(self, *argv, **kwarg): raise NotImplementedError() class RawIPRoute: def __init__(self, *argv, **kwarg): raise NotImplementedError() pyroute2-0.7.11/pyroute2/iproute/linux.py000066400000000000000000002425601455030217500203750ustar00rootroot00000000000000# -*- coding: utf-8 -*- import logging import os import time import warnings from functools import partial from itertools import chain from socket import AF_INET, AF_INET6, AF_UNSPEC from pyroute2 import config from pyroute2.common import AF_MPLS, basestring from pyroute2.config import AF_BRIDGE from pyroute2.lab import LAB_API from pyroute2.netlink import ( NLM_F_ACK, NLM_F_APPEND, NLM_F_ATOMIC, NLM_F_CREATE, NLM_F_DUMP, NLM_F_ECHO, NLM_F_EXCL, NLM_F_REPLACE, NLM_F_REQUEST, NLM_F_ROOT, NLMSG_ERROR, ) from pyroute2.netlink.exceptions import ( NetlinkDumpInterrupted, NetlinkError, SkipInode, ) from pyroute2.netlink.rtnl import ( RTM_DELADDR, RTM_DELLINK, RTM_DELLINKPROP, RTM_DELNEIGH, RTM_DELQDISC, RTM_DELROUTE, RTM_DELRULE, RTM_DELTCLASS, RTM_DELTFILTER, RTM_GETADDR, RTM_GETLINK, RTM_GETNEIGH, RTM_GETNEIGHTBL, RTM_GETNSID, RTM_GETQDISC, RTM_GETROUTE, RTM_GETRULE, RTM_GETSTATS, RTM_GETTCLASS, RTM_GETTFILTER, RTM_NEWADDR, RTM_NEWLINK, RTM_NEWLINKPROP, RTM_NEWNEIGH, RTM_NEWNETNS, RTM_NEWNSID, RTM_NEWQDISC, RTM_NEWROUTE, RTM_NEWRULE, RTM_NEWTCLASS, RTM_NEWTFILTER, RTM_SETLINK, RTMGRP_IPV4_IFADDR, RTMGRP_IPV4_ROUTE, RTMGRP_IPV4_RULE, RTMGRP_IPV6_IFADDR, RTMGRP_IPV6_ROUTE, RTMGRP_IPV6_RULE, RTMGRP_LINK, RTMGRP_MPLS_ROUTE, RTMGRP_NEIGH, TC_H_ROOT, ndmsg, rt_proto, rt_scope, rt_type, ) from pyroute2.netlink.rtnl.fibmsg import fibmsg from pyroute2.netlink.rtnl.ifaddrmsg import ifaddrmsg from pyroute2.netlink.rtnl.ifinfmsg import ifinfmsg from pyroute2.netlink.rtnl.ifstatsmsg import ifstatsmsg from pyroute2.netlink.rtnl.iprsocket import ( ChaoticIPRSocket, IPBatchSocket, IPRSocket, ) from pyroute2.netlink.rtnl.ndtmsg import ndtmsg from pyroute2.netlink.rtnl.nsidmsg import nsidmsg from pyroute2.netlink.rtnl.nsinfmsg import nsinfmsg from pyroute2.netlink.rtnl.riprsocket import RawIPRSocket from pyroute2.netlink.rtnl.rtmsg import rtmsg from pyroute2.netlink.rtnl.tcmsg import plugins as tc_plugins from pyroute2.netlink.rtnl.tcmsg import tcmsg from pyroute2.requests.address import AddressFieldFilter, AddressIPRouteFilter from pyroute2.requests.bridge import ( BridgeFieldFilter, BridgeIPRouteFilter, BridgePortFieldFilter, ) from pyroute2.requests.link import LinkFieldFilter, LinkIPRouteFilter from pyroute2.requests.main import RequestProcessor from pyroute2.requests.neighbour import ( NeighbourFieldFilter, NeighbourIPRouteFilter, ) from pyroute2.requests.route import RouteFieldFilter, RouteIPRouteFilter from pyroute2.requests.rule import RuleFieldFilter, RuleIPRouteFilter from .parsers import default_routes DEFAULT_TABLE = 254 log = logging.getLogger(__name__) def get_dump_filter(kwarg): if 'match' in kwarg: return kwarg.pop('match'), kwarg else: new_kwarg = {} if 'family' in kwarg: new_kwarg['family'] = kwarg.pop('family') return kwarg, new_kwarg def transform_handle(handle): if isinstance(handle, basestring): (major, minor) = [int(x if x else '0', 16) for x in handle.split(':')] handle = (major << 8 * 2) | minor return handle class RTNL_API: ''' `RTNL_API` should not be instantiated by itself. It is intended to be used as a mixin class. Following classes use `RTNL_API`: * `IPRoute` -- RTNL API to the current network namespace * `NetNS` -- RTNL API to another network namespace * `IPBatch` -- RTNL compiler * `ShellIPR` -- RTNL via standard I/O, runs IPRoute in a shell It is an old-school API, that provides access to rtnetlink as is. It helps you to retrieve and change almost all the data, available through rtnetlink:: from pyroute2 import IPRoute ipr = IPRoute() # create an interface ipr.link('add', ifname='brx', kind='bridge') # lookup the index dev = ipr.link_lookup(ifname='brx')[0] # bring it down ipr.link('set', index=dev, state='down') # change the interface MAC address and rename it just for fun ipr.link('set', index=dev, address='00:11:22:33:44:55', ifname='br-ctrl') # add primary IP address ipr.addr('add', index=dev, address='10.0.0.1', mask=24, broadcast='10.0.0.255') # add secondary IP address ipr.addr('add', index=dev, address='10.0.0.2', mask=24, broadcast='10.0.0.255') # bring it up ipr.link('set', index=dev, state='up') ''' def __init__(self, *argv, **kwarg): if 'netns_path' in kwarg: self.netns_path = kwarg['netns_path'] else: self.netns_path = config.netns_path super().__init__(*argv, **kwarg) if not self.nlm_generator: def filter_messages(*argv, **kwarg): return tuple(self._genmatch(*argv, **kwarg)) self._genmatch = self.filter_messages self.filter_messages = filter_messages def make_request_type(self, command, command_map): if isinstance(command, basestring): return (lambda x: (x[0], self.make_request_flags(x[1])))( command_map[command] ) elif isinstance(command, int): return command, self.make_request_flags('create') elif isinstance(command, (list, tuple)): return command else: raise TypeError('allowed command types: int, str, list, tuple') def make_request_flags(self, mode): flags = { 'dump': NLM_F_REQUEST | NLM_F_DUMP, 'get': NLM_F_REQUEST | NLM_F_ACK, 'req': NLM_F_REQUEST | NLM_F_ACK, } flags['create'] = flags['req'] | NLM_F_CREATE | NLM_F_EXCL flags['append'] = flags['req'] | NLM_F_CREATE | NLM_F_APPEND flags['change'] = flags['req'] | NLM_F_REPLACE flags['replace'] = flags['change'] | NLM_F_CREATE return flags[mode] | ( NLM_F_ECHO if (self.config['nlm_echo'] and mode not in ('get', 'dump')) else 0 ) def filter_messages(self, dump_filter, msgs): ''' Filter messages using `dump_filter`. The filter might be a callable, then it will be called for every message in the list. Or it might be a dict, where keys are used to get values from messages, and dict values are used to match the message. The method might be called directly. It is also used by calls like `ipr.link('dump', ....)`, where keyword arguments work as `dump_filter` for `ipr.filter_messages()`. A callable `dump_filter` must return True or False: .. code-block:: python # get all links with names starting with eth: # ipr.filter_messages( lambda x: x.get_attr('IFLA_IFNAME').startswith('eth'), ipr.link('dump') ) A dict `dump_filter` can have callables as values: .. code-block:: python # get all links with names starting with eth, and # MAC address in a database: # ipr.filter_messages( { 'ifname': lambda x: x.startswith('eth'), 'address': lambda x: x in database, }, ipr.link('dump') ) ... or constants to compare with: .. code-block:: python # get all links in state up: # ipr.filter_message({'state': 'up'}, ipr.link('dump')) ''' # filtered results, the generator version for msg in msgs: if hasattr(dump_filter, '__call__'): if dump_filter(msg): yield msg elif isinstance(dump_filter, dict): matches = [] for key in dump_filter: # get the attribute if isinstance(key, str): nkey = (key,) elif isinstance(key, tuple): nkey = key else: continue value = msg.get_nested(*nkey) if value is not None and callable(dump_filter[key]): matches.append(dump_filter[key](value)) else: matches.append(dump_filter[key] == value) if all(matches): yield msg # 8<--------------------------------------------------------------- # def dump(self, groups=None): ''' Dump network objects. On OpenBSD: * get_links() * get_addr() * get_neighbours() * get_routes() On Linux: * get_links() * get_addr() * get_neighbours() * get_vlans() * dump FDB * IPv4 and IPv6 rules ''' ## # Well, it's the Linux API, why OpenBSD / FreeBSD here? # # 'Cause when you run RemoteIPRoute, it uses this class, # and the code may be run on BSD systems as well, though # BSD systems have only subset of the API # if self.uname[0] == 'OpenBSD': groups_map = { 1: [ self.get_links, self.get_addr, self.get_neighbours, self.get_routes, ] } else: groups_map = { RTMGRP_LINK: [ self.get_links, self.get_vlans, partial(self.fdb, 'dump'), ], RTMGRP_IPV4_IFADDR: [partial(self.get_addr, family=AF_INET)], RTMGRP_IPV6_IFADDR: [partial(self.get_addr, family=AF_INET6)], RTMGRP_NEIGH: [self.get_neighbours], RTMGRP_IPV4_ROUTE: [partial(self.get_routes, family=AF_INET)], RTMGRP_IPV6_ROUTE: [partial(self.get_routes, family=AF_INET6)], RTMGRP_MPLS_ROUTE: [partial(self.get_routes, family=AF_MPLS)], RTMGRP_IPV4_RULE: [partial(self.get_rules, family=AF_INET)], RTMGRP_IPV6_RULE: [partial(self.get_rules, family=AF_INET6)], } for group, methods in groups_map.items(): if group & (groups if groups is not None else self.groups): for method in methods: for msg in method(): yield msg def poll(self, method, command, timeout=10, interval=0.2, **spec): ''' Run `method` with a positional argument `command` and keyword arguments `**spec` every `interval` seconds, but not more than `timeout`, until it returns a result which doesn't evaluate to `False`. Example: .. code-block:: python # create a bridge interface and wait for it: # spec = { 'ifname': 'br0', 'kind': 'bridge', 'state': 'up', 'br_stp_state': 1, } ipr.link('add', **spec) ret = ipr.poll(ipr.link, 'dump', **spec) assert ret[0].get('ifname') == 'br0' assert ret[0].get('state') == 'up' assert ret[0].get(('linkinfo', 'data', 'br_stp_state')) == 1 ''' ctime = time.time() ret = tuple() while ctime + timeout > time.time(): try: ret = method(command, **spec) if ret: return ret time.sleep(interval) except NetlinkDumpInterrupted: pass raise TimeoutError() # 8<--------------------------------------------------------------- # # Listing methods # def get_qdiscs(self, index=None): ''' Get all queue disciplines for all interfaces or for specified one. ''' msg = tcmsg() msg['family'] = AF_UNSPEC ret = self.nlm_request(msg, RTM_GETQDISC) if index is None: return tuple(ret) else: return [x for x in ret if x['index'] == index] def get_filters(self, index=0, handle=0, parent=0): ''' Get filters for specified interface, handle and parent. ''' msg = tcmsg() msg['family'] = AF_UNSPEC msg['index'] = index msg['handle'] = transform_handle(handle) msg['parent'] = transform_handle(parent) return tuple(self.nlm_request(msg, RTM_GETTFILTER)) def get_classes(self, index=0): ''' Get classes for specified interface. ''' msg = tcmsg() msg['family'] = AF_UNSPEC msg['index'] = index return tuple(self.nlm_request(msg, RTM_GETTCLASS)) def get_vlans(self, **kwarg): ''' Dump available vlan info on bridge ports ''' # IFLA_EXT_MASK, extended info mask # # include/uapi/linux/rtnetlink.h # 1 << 0 => RTEXT_FILTER_VF # 1 << 1 => RTEXT_FILTER_BRVLAN # 1 << 2 => RTEXT_FILTER_BRVLAN_COMPRESSED # 1 << 3 => RTEXT_FILTER_SKIP_STATS # # maybe place it as mapping into ifinfomsg.py? # dump_filter, kwarg = get_dump_filter(kwarg) return self.link( 'dump', family=AF_BRIDGE, ext_mask=2, match=dump_filter ) def get_links(self, *argv, **kwarg): ''' Get network interfaces. By default returns all interfaces. Arguments vector can contain interface indices or a special keyword 'all':: ip.get_links() ip.get_links('all') ip.get_links(1, 2, 3) interfaces = [1, 2, 3] ip.get_links(*interfaces) ''' result = [] links = argv or [0] if links[0] == 'all': # compat syntax links = [0] if links[0] == 0: cmd = 'dump' else: cmd = 'get' for index in links: if index > 0: kwarg['index'] = index result.extend(self.link(cmd, **kwarg)) return result def get_neighbours(self, family=AF_UNSPEC, match=None, **kwarg): ''' Dump ARP cache records. The `family` keyword sets the family for the request: e.g. `AF_INET` or `AF_INET6` for arp cache, `AF_BRIDGE` for fdb. If other keyword arguments not empty, they are used as filter. Also, one can explicitly set filter as a function with the `match` parameter. Examples:: # get neighbours on the 3rd link: ip.get_neighbours(ifindex=3) # get a particular record by dst: ip.get_neighbours(dst='172.16.0.1') # get fdb records: ip.get_neighbours(AF_BRIDGE) # and filter them by a function: ip.get_neighbours(AF_BRIDGE, match=lambda x: x['state'] == 2) ''' return self.neigh('dump', family=family, match=match or kwarg) def get_ntables(self, family=AF_UNSPEC): ''' Get neighbour tables ''' msg = ndtmsg() msg['family'] = family return tuple(self.nlm_request(msg, RTM_GETNEIGHTBL)) def get_addr(self, family=AF_UNSPEC, match=None, **kwarg): ''' Dump addresses. If family is not specified, both AF_INET and AF_INET6 addresses will be dumped:: # get all addresses ip.get_addr() It is possible to apply filters on the results:: # get addresses for the 2nd interface ip.get_addr(index=2) # get addresses with IFA_LABEL == 'eth0' ip.get_addr(label='eth0') # get all the subnet addresses on the interface, identified # by broadcast address (should be explicitly specified upon # creation) ip.get_addr(index=2, broadcast='192.168.1.255') A custom predicate can be used as a filter:: ip.get_addr(match=lambda x: x['index'] == 1) ''' return self.addr('dump', family=family, match=match or kwarg) def get_rules(self, family=AF_UNSPEC, match=None, **kwarg): ''' Get all rules. By default return all rules. To explicitly request the IPv4 rules use `family=AF_INET`. Example:: ip.get_rules() # get all the rules for all families ip.get_rules(family=AF_INET6) # get only IPv6 rules ''' return self.rule( (RTM_GETRULE, NLM_F_REQUEST | NLM_F_ROOT | NLM_F_ATOMIC), family=family, match=match or kwarg, ) def get_routes(self, family=255, match=None, **kwarg): ''' Get all routes. You can specify the table. There are up to 4294967295 routing classes (tables), and the kernel returns all the routes on each request. So the routine filters routes from full output. Note the number of tables is increased from 255 in Linux 2.6+. Example:: ip.get_routes() # get all the routes for all families ip.get_routes(family=AF_INET6) # get only IPv6 routes ip.get_routes(table=254) # get routes from 254 table The default family=255 is a hack. Despite the specs, the kernel returns only IPv4 routes for AF_UNSPEC family. But it returns all the routes for all the families if one uses an invalid value here. Hack but true. And let's hope the kernel team will not fix this bug. ''' # get a particular route? if isinstance(kwarg.get('dst'), str): return self.route('get', dst=kwarg['dst']) else: return self.route('dump', family=family, match=match or kwarg) # 8<--------------------------------------------------------------- # 8<--------------------------------------------------------------- @staticmethod def open_file(path): '''Open a file (read only) and return its (fd, inode).''' fd = os.open(path, os.O_RDONLY) inode = os.fstat(fd).st_ino return (fd, inode) @staticmethod def close_file(fd): '''Close a file that was previously opened with open_file().''' os.close(fd) @staticmethod def get_pid(): '''Return the PID of the current process.''' return os.getpid() def register_link_kind(self, path=None, pkg=None, module=None): return ifinfmsg.ifinfo.register_link_kind(path, pkg, module) def unregister_link_kind(self, kind): return ifinfmsg.ifinfo.unregister_link_kind(kind) def list_link_kind(self): return ifinfmsg.ifinfo.list_link_kind() # # List NetNS info # def _dump_one_ns(self, path, registry): item = nsinfmsg() item['netnsid'] = 0xFFFFFFFF # default netnsid "unknown" nsfd = 0 info = nsidmsg() msg = nsidmsg() try: (nsfd, inode) = self.open_file(path) item['inode'] = inode # # if the inode is registered, skip it # if item['inode'] in registry: raise SkipInode() registry.add(item['inode']) # # request NETNSA_NSID # # may not work on older kernels ( <4.20 ?) # msg['attrs'] = [('NETNSA_FD', nsfd)] try: for info in self.nlm_request(msg, RTM_GETNSID, NLM_F_REQUEST): # response to nlm_request() is a list or a generator, # that's why loop item['netnsid'] = info.get_attr('NETNSA_NSID') break except Exception: pass item['attrs'] = [('NSINFO_PATH', path)] except OSError as e: raise SkipInode(e.errno) finally: if nsfd > 0: self.close_file(nsfd) item['header']['type'] = RTM_NEWNETNS item['header']['target'] = self.target item['event'] = 'RTM_NEWNETNS' return item def _dump_dir(self, path, registry): for name in os.listdir(path): # strictly speaking, there is no need to use os.sep, # since the code is not portable outside of Linux nspath = '%s%s%s' % (path, os.sep, name) try: yield self._dump_one_ns(nspath, registry) except SkipInode: pass def _dump_proc(self, registry): for name in os.listdir('/proc'): try: int(name) except ValueError: continue try: yield self._dump_one_ns('/proc/%s/ns/net' % name, registry) except SkipInode: pass def get_netnsid(self, nsid=None, pid=None, fd=None, target_nsid=None): '''Return a dict containing the result of a RTM_GETNSID query. This loosely corresponds to the "ip netns list-id" command. ''' msg = nsidmsg() if nsid is not None: msg['attrs'].append(('NETNSA_NSID', nsid)) if pid is not None: msg['attrs'].append(('NETNSA_PID', pid)) if fd is not None: msg['attrs'].append(('NETNSA_FD', fd)) if target_nsid is not None: msg['attrs'].append(('NETNSA_TARGET_NSID', target_nsid)) response = self.nlm_request(msg, RTM_GETNSID, NLM_F_REQUEST) for r in response: return { 'nsid': r.get_attr('NETNSA_NSID'), 'current_nsid': r.get_attr('NETNSA_CURRENT_NSID'), } return None def get_netns_info(self, list_proc=False): ''' A prototype method to list available netns and associated interfaces. A bit weird to have it here and not under `pyroute2.netns`, but it uses RTNL to get all the info. ''' # # register all the ns inodes, not to repeat items in the output # registry = set() # # fetch veth peers # peers = {} for peer in self.get_links(): netnsid = peer.get_attr('IFLA_LINK_NETNSID') if netnsid is not None: if netnsid not in peers: peers[netnsid] = [] peers[netnsid].append(peer.get_attr('IFLA_IFNAME')) # # chain iterators: # # * one iterator for every item in self.path # * one iterator for /proc//ns/net # views = [] for path in self.netns_path: views.append(self._dump_dir(path, registry)) if list_proc: views.append(self._dump_proc(registry)) # # iterate all the items # for view in views: try: for item in view: # # remove uninitialized 'value' field # del item['value'] # # fetch peers for that ns # for peer in peers.get(item['netnsid'], []): item['attrs'].append(('NSINFO_PEER', peer)) yield item except OSError: pass def set_netnsid(self, nsid=None, pid=None, fd=None): '''Assigns an id to a peer netns using RTM_NEWNSID query. The kernel chooses an unique id if nsid is omitted. This corresponds to the "ip netns set" command. ''' msg = nsidmsg() if nsid is None or nsid < 0: # kernel auto select msg['attrs'].append(('NETNSA_NSID', 4294967295)) else: msg['attrs'].append(('NETNSA_NSID', nsid)) if pid is not None: msg['attrs'].append(('NETNSA_PID', pid)) if fd is not None: msg['attrs'].append(('NETNSA_FD', fd)) return self.nlm_request(msg, RTM_NEWNSID, NLM_F_REQUEST | NLM_F_ACK) # 8<--------------------------------------------------------------- # 8<--------------------------------------------------------------- # # Shortcuts # def get_default_routes(self, family=AF_UNSPEC, table=DEFAULT_TABLE): ''' Get default routes ''' msg = rtmsg() msg['family'] = family routes = self.nlm_request( msg, msg_type=RTM_GETROUTE, msg_flags=NLM_F_DUMP | NLM_F_REQUEST, parser=default_routes, ) if table is None: return routes else: return self.filter_messages({'table': table}, routes) def link_lookup(self, match=None, **kwarg): ''' Lookup interface index (indeces) by first level NLA value. Example:: ip.link_lookup(address="52:54:00:9d:4e:3d") ip.link_lookup(ifname="lo") ip.link_lookup(operstate="UP") Please note, that link_lookup() returns list, not one value. ''' if kwarg and set(kwarg) < {'index', 'ifname', 'altname'}: # shortcut for index and ifname try: for link in self.link('get', **kwarg): return [link['index']] except NetlinkError: return [] else: # otherwise fallback to the userspace filter return [ link['index'] for link in self.get_links(match=match or kwarg) ] # 8<--------------------------------------------------------------- # 8<--------------------------------------------------------------- # # Shortcuts to flush RTNL objects # def flush_routes(self, *argv, **kwarg): ''' Flush routes -- purge route records from a table. Arguments are the same as for `get_routes()` routine. Actually, this routine implements a pipe from `get_routes()` to `nlm_request()`. ''' ret = [] for route in self.get_routes(*argv, **kwarg): self.put(route, msg_type=RTM_DELROUTE, msg_flags=NLM_F_REQUEST) ret.append(route) return ret def flush_addr(self, *argv, **kwarg): ''' Flush IP addresses. Examples:: # flush all addresses on the interface with index 2: ipr.flush_addr(index=2) # flush all addresses with IFA_LABEL='eth0': ipr.flush_addr(label='eth0') ''' flags = NLM_F_CREATE | NLM_F_REQUEST ret = [] for addr in self.get_addr(*argv, **kwarg): self.put(addr, msg_type=RTM_DELADDR, msg_flags=flags) ret.append(addr) return ret def flush_rules(self, *argv, **kwarg): ''' Flush rules. Please keep in mind, that by default the function operates on **all** rules of **all** families. To work only on IPv4 rules, one should explicitly specify `family=AF_INET`. Examples:: # flush all IPv4 rule with priorities above 5 and below 32000 ipr.flush_rules(family=AF_INET, priority=lambda x: 5 < x < 32000) # flush all IPv6 rules that point to table 250: ipr.flush_rules(family=socket.AF_INET6, table=250) ''' flags = NLM_F_CREATE | NLM_F_REQUEST ret = [] for rule in self.get_rules(*argv, **kwarg): self.put(rule, msg_type=RTM_DELRULE, msg_flags=flags) ret.append(rule) return ret # 8<--------------------------------------------------------------- # 8<--------------------------------------------------------------- # # Extensions to low-level functions # def brport(self, command, **kwarg): ''' Set bridge port parameters. Example:: idx = ip.link_lookup(ifname='eth0') ip.brport("set", index=idx, unicast_flood=0, cost=200) ip.brport("show", index=idx) Possible keywords are NLA names for the `protinfo_bridge` class, without the prefix and in lower letters. ''' if command == 'set': linkkwarg = dict() linkkwarg['index'] = kwarg.pop('index', 0) linkkwarg['kind'] = 'bridge_slave' for key in kwarg: linkkwarg[key] = kwarg[key] return self.link(command, **linkkwarg) if (command in ('dump', 'show')) and ('match' not in kwarg): match = kwarg else: match = kwarg.pop('match', None) command_map = { 'dump': (RTM_GETLINK, 'dump'), 'show': (RTM_GETLINK, 'dump'), } (command, msg_flags) = self.make_request_type(command, command_map) msg = ifinfmsg() msg['index'] = kwarg.get('index', 0) msg['family'] = AF_BRIDGE protinfo = ( RequestProcessor(context=match, prime=match) .apply_filter(BridgePortFieldFilter(command)) .finalize() ) msg['attrs'].append( ('IFLA_PROTINFO', {'attrs': protinfo['attrs']}, 0x8000) ) ret = self.nlm_request(msg, msg_type=command, msg_flags=msg_flags) if match is not None: ret = self.filter_messages(match, ret) if self.nlm_generator and not msg_flags & NLM_F_DUMP == NLM_F_DUMP: ret = tuple(ret) return ret def vlan_filter(self, command, **kwarg): ''' Vlan filters is another approach to support vlans in Linux. Before vlan filters were introduced, there was only one way to bridge vlans: one had to create vlan interfaces and then add them as ports:: +------+ +----------+ net --> | eth0 | <--> | eth0.500 | <---+ +------+ +----------+ | v +------+ +-----+ net --> | eth1 | | br0 | +------+ +-----+ ^ +------+ +----------+ | net --> | eth2 | <--> | eth2.500 | <---+ +------+ +----------+ It means that one has to create as many bridges, as there were vlans. Vlan filters allow to bridge together underlying interfaces and create vlans already on the bridge:: # v500 label shows which interfaces have vlan filter +------+ v500 net --> | eth0 | <-------+ +------+ | v +------+ +-----+ +---------+ net --> | eth1 | <--> | br0 |<-->| br0v500 | +------+ +-----+ +---------+ ^ +------+ v500 | net --> | eth2 | <-------+ +------+ In this example vlan 500 will be allowed only on ports `eth0` and `eth2`, though all three eth nics are bridged. Some example code:: # create bridge ip.link("add", ifname="br0", kind="bridge") # attach a port ip.link("set", index=ip.link_lookup(ifname="eth0")[0], master=ip.link_lookup(ifname="br0")[0]) # set vlan filter ip.vlan_filter("add", index=ip.link_lookup(ifname="eth0")[0], vlan_info={"vid": 500}) # create vlan interface on the bridge ip.link("add", ifname="br0v500", kind="vlan", link=ip.link_lookup(ifname="br0")[0], vlan_id=500) # set all UP ip.link("set", index=ip.link_lookup(ifname="br0")[0], state="up") ip.link("set", index=ip.link_lookup(ifname="br0v500")[0], state="up") ip.link("set", index=ip.link_lookup(ifname="eth0")[0], state="up") # set IP address ip.addr("add", index=ip.link_lookup(ifname="br0v500")[0], address="172.16.5.2", mask=24) Now all the traffic to the network 172.16.5.2/24 will go to vlan 500 only via ports that have such vlan filter. Required arguments for `vlan_filter()`: `index` and `vlan_info`. Vlan info dict:: ip.vlan_filter('add', index=, vlan_info = {'vid': , 'pvid': , 'flags': int or list} More details: * kernel:Documentation/networking/switchdev.txt * pyroute2.netlink.rtnl.ifinfmsg:... vlan_info Setting PVID or specifying a range will specify the approprate flags. One can specify `flags` as int or as a list of flag names: * `master` == 0x1 * `pvid` == 0x2 * `untagged` == 0x4 * `range_begin` == 0x8 * `range_end` == 0x10 * `brentry` == 0x20 E.g.:: {'vid': 20, 'pvid': true } # is equal to {'vid': 20, 'flags': ['pvid', 'untagged']} # is equal to {'vid': 20, 'flags': 6} # range {'vid': '100-199'} Required arguments for `vlan_filter()`: `index` and `vlan_tunnel_info`. Vlan tunnel info dict:: ip.vlan_filter('add', index=, vlan_tunnel_info = {'vid': , 'id': } vlan_tunnel_info appears to only use the 'range_begin' and 'range_end' flags from vlan_info. Specifying a range will automatically send the needed flags. Example:: {'vid': 20, 'id: 20} {'vid': '200-299', 'id': '200-299'} The above directives can be combined as in the example:: ip.vlan_filter('add', index=7, vlan_info={'vid': 600}, vlan_tunnel_info={'vid': 600, 'id': 600}) Commands: **add** Add vlan filter to a bridge port. Example:: ip.vlan_filter("add", index=2, vlan_info={"vid": 200}) **del** Remove vlan filter from a bridge port. Example:: ip.vlan_filter("del", index=2, vlan_info={"vid": 200}) ''' command_map = { 'add': (RTM_SETLINK, 'req'), 'del': (RTM_DELLINK, 'req'), } kwarg['family'] = AF_BRIDGE kwarg['kwarg_filter'] = [ BridgeFieldFilter(), BridgeIPRouteFilter(command), ] (command, flags) = self.make_request_type(command, command_map) return tuple(self.link((command, flags), **kwarg)) def fdb(self, command, **kwarg): ''' Bridge forwarding database management. More details: * kernel:Documentation/networking/switchdev.txt * pyroute2.netlink.rtnl.ndmsg **add** Add a new FDB record. Works in the same way as ARP cache management, but some additional NLAs can be used:: # simple FDB record # ip.fdb('add', ifindex=ip.link_lookup(ifname='br0')[0], lladdr='00:11:22:33:44:55', dst='10.0.0.1') # specify vlan # NB: vlan should exist on the device, use # `vlan_filter()` # ip.fdb('add', ifindex=ip.link_lookup(ifname='br0')[0], lladdr='00:11:22:33:44:55', dst='10.0.0.1', vlan=200) # specify vxlan id and port # NB: works only for vxlan devices, use # `link("add", kind="vxlan", ...)` # # if port is not specified, the default one is used # by the kernel. # # if vni (vxlan id) is equal to the device vni, # the kernel doesn't report it back # ip.fdb('add', ifindex=ip.link_lookup(ifname='vx500')[0] lladdr='00:11:22:33:44:55', dst='10.0.0.1', port=5678, vni=600) # or specify src_vni for a vlan-aware vxlan device ip.fdb('add', ifindex=ip.link_lookup(ifname='vx500')[0] lladdr='00:11:22:33:44:55', dst='10.0.0.1', port=5678, src_vni=600) **append** Append a new FDB record. The same syntax as for **add**. **del** Remove an existing FDB record. The same syntax as for **add**. **dump** Dump all the FDB records. If any `**kwarg` is provided, results will be filtered:: # dump all the records ip.fdb('dump') # show only specific lladdr, dst, vlan etc. ip.fdb('dump', lladdr='00:11:22:33:44:55') ip.fdb('dump', dst='10.0.0.1') ip.fdb('dump', vlan=200) ''' dump_filter = None if command == 'dump': dump_filter, kwarg = get_dump_filter(kwarg) kwarg['family'] = AF_BRIDGE # nud -> state if 'nud' in kwarg: kwarg['state'] = kwarg.pop('nud') if (command in ('add', 'del', 'append')) and not ( kwarg.get('state', 0) & ndmsg.states['noarp'] ): # state must contain noarp in add / del / append kwarg['state'] = kwarg.pop('state', 0) | ndmsg.states['noarp'] # other assumptions if not kwarg.get('state', 0) & ( ndmsg.states['permanent'] | ndmsg.states['reachable'] ): # permanent (default) or reachable kwarg['state'] |= ndmsg.states['permanent'] if not kwarg.get('flags', 0) & ( ndmsg.flags['self'] | ndmsg.flags['master'] ): # self (default) or master kwarg['flags'] = kwarg.get('flags', 0) | ndmsg.flags['self'] # if dump_filter is not None: kwarg['match'] = dump_filter return self.neigh(command, **kwarg) # 8<--------------------------------------------------------------- # # General low-level configuration methods # def neigh(self, command, **kwarg): ''' Neighbours operations, same as `ip neigh` or `bridge fdb` **add** Add a neighbour record, e.g.:: from pyroute2 import IPRoute from pyroute2.netlink.rtnl import ndmsg # add a permanent record on veth0 idx = ip.link_lookup(ifname='veth0')[0] ip.neigh('add', dst='172.16.45.1', lladdr='00:11:22:33:44:55', ifindex=idx, state=ndmsg.states['permanent']) **set** Set an existing record or create a new one, if it doesn't exist. The same as above, but the command is "set":: ip.neigh('set', dst='172.16.45.1', lladdr='00:11:22:33:44:55', ifindex=idx, state=ndmsg.states['permanent']) **change** Change an existing record. If the record doesn't exist, fail. **del** Delete an existing record. **dump** Dump all the records in the NDB:: ip.neigh('dump') **get** Get specific record (dst and ifindex are mandatory). Available only on recent kernel:: ip.neigh('get', dst='172.16.45.1', ifindex=idx) ''' command_map = { 'add': (RTM_NEWNEIGH, 'create'), 'set': (RTM_NEWNEIGH, 'replace'), 'replace': (RTM_NEWNEIGH, 'replace'), 'change': (RTM_NEWNEIGH, 'change'), 'del': (RTM_DELNEIGH, 'req'), 'remove': (RTM_DELNEIGH, 'req'), 'delete': (RTM_DELNEIGH, 'req'), 'dump': (RTM_GETNEIGH, 'dump'), 'get': (RTM_GETNEIGH, 'get'), 'append': (RTM_NEWNEIGH, 'append'), } dump_filter = None msg = ndmsg.ndmsg() if command == 'dump': dump_filter, kwarg = get_dump_filter(kwarg) request = ( RequestProcessor(context=kwarg, prime=kwarg) .apply_filter(NeighbourFieldFilter()) .apply_filter(NeighbourIPRouteFilter(command)) .finalize() ) msg_type, msg_flags = self.make_request_type(command, command_map) # fill the fields for field in msg.fields: if ( command == "dump" and self.strict_check and field[0] == "ifindex" ): # is dump & strict_check, leave ifindex for NLA continue msg[field[0]] = request.pop(field[0], 0) for key, value in request.items(): nla = ndmsg.ndmsg.name2nla(key) if msg.valid_nla(nla) and value is not None: msg['attrs'].append([nla, value]) ret = self.nlm_request(msg, msg_type=msg_type, msg_flags=msg_flags) if command == 'dump' and dump_filter: ret = self.filter_messages(dump_filter, ret) if self.nlm_generator and not msg_flags & NLM_F_DUMP == NLM_F_DUMP: ret = tuple(ret) return ret def link(self, command, **kwarg): ''' Link operations. Keywords to set up ifinfmsg fields: * index -- interface index * family -- AF_BRIDGE for bridge operations, otherwise 0 * flags -- device flags * change -- change mask All other keywords will be translated to NLA names, e.g. `mtu -> IFLA_MTU`, `af_spec -> IFLA_AF_SPEC` etc. You can provide a complete NLA structure or let filters do it for you. E.g., these pairs show equal statements:: # set device MTU ip.link("set", index=x, mtu=1000) ip.link("set", index=x, IFLA_MTU=1000) # add vlan device ip.link("add", ifname="test", kind="dummy") ip.link("add", ifname="test", IFLA_LINKINFO={'attrs': [['IFLA_INFO_KIND', 'dummy']]}) Filters are implemented in the `pyroute2.iproute.req` module. You can contribute your own if you miss shortcuts. Commands: **add** To create an interface, one should specify the interface kind:: ip.link("add", ifname="test", kind="dummy") The kind can be any of those supported by kernel. It can be `dummy`, `bridge`, `bond` etc. On modern kernels one can specify even interface index:: ip.link("add", ifname="br-test", kind="bridge", index=2345) Specific type notes: â–ē geneve Create GENEVE tunnel:: ip.link("add", ifname="genx", kind="geneve", geneve_id=42, geneve_remote="172.16.0.101") Support for GENEVE over IPv6 is also included; use `geneve_remote6` to configure a remote IPv6 address. â–ē gre Create GRE tunnel:: ip.link("add", ifname="grex", kind="gre", gre_local="172.16.0.1", gre_remote="172.16.0.101", gre_ttl=16) The keyed GRE requires explicit iflags/oflags specification:: ip.link("add", ifname="grex", kind="gre", gre_local="172.16.0.1", gre_remote="172.16.0.101", gre_ttl=16, gre_ikey=10, gre_okey=10, gre_iflags=32, gre_oflags=32) Support for GRE over IPv6 is also included; use `kind=ip6gre` and `ip6gre_` as the prefix for its values. â–ē ipip Create ipip tunnel:: ip.link("add", ifname="tun1", kind="ipip", ipip_local="172.16.0.1", ipip_remote="172.16.0.101", ipip_ttl=16) Support for sit and ip6tnl is also included; use `kind=sit` and `sit_` as prefix for sit tunnels, and `kind=ip6tnl` and `ip6tnl_` prefix for ip6tnl tunnels. â–ē macvlan Macvlan interfaces act like VLANs within OS. The macvlan driver provides an ability to add several MAC addresses on one interface, where every MAC address is reflected with a virtual interface in the system. In some setups macvlan interfaces can replace bridge interfaces, providing more simple and at the same time high-performance solution:: ip.link("add", ifname="mvlan0", kind="macvlan", link=ip.link_lookup(ifname="em1")[0], macvlan_mode="private").commit() Several macvlan modes are available: "private", "vepa", "bridge", "passthru". Ususally the default is "vepa". â–ē macvtap Almost the same as macvlan, but creates also a character tap device:: ip.link("add", ifname="mvtap0", kind="macvtap", link=ip.link_lookup(ifname="em1")[0], macvtap_mode="vepa").commit() Will create a device file `"/dev/tap%s" % index` â–ē tuntap Possible `tuntap` keywords: * `mode` — "tun" or "tap" * `uid` — integer * `gid` — integer * `ifr` — dict of tuntap flags (see ifinfmsg:... tuntap_data) Create a tap interface:: ip.link("add", ifname="tap0", kind="tuntap", mode="tap") Tun/tap interfaces are created using `ioctl()`, but the library provides a transparent way to manage them using netlink API. â–ē veth To properly create `veth` interface, one should specify `peer` also, since `veth` interfaces are created in pairs:: # simple call ip.link("add", ifname="v1p0", kind="veth", peer="v1p1") # set up specific veth peer attributes ip.link("add", ifname="v1p0", kind="veth", peer={"ifname": "v1p1", "net_ns_fd": "test_netns"}) â–ē vlan VLAN interfaces require additional parameters, `vlan_id` and `link`, where `link` is a master interface to create VLAN on:: ip.link("add", ifname="v100", kind="vlan", link=ip.link_lookup(ifname="eth0")[0], vlan_id=100) There is a possibility to create also 802.1ad interfaces:: # create external vlan 802.1ad, s-tag ip.link("add", ifname="v100s", kind="vlan", link=ip.link_lookup(ifname="eth0")[0], vlan_id=100, vlan_protocol=0x88a8) # create internal vlan 802.1q, c-tag ip.link("add", ifname="v200c", kind="vlan", link=ip.link_lookup(ifname="v100s")[0], vlan_id=200, vlan_protocol=0x8100) â–ē vrf VRF interfaces (see linux/Documentation/networking/vrf.txt):: ip.link("add", ifname="vrf-foo", kind="vrf", vrf_table=42) â–ē vxlan VXLAN interfaces are like VLAN ones, but require a bit more parameters:: ip.link("add", ifname="vx101", kind="vxlan", vxlan_link=ip.link_lookup(ifname="eth0")[0], vxlan_id=101, vxlan_group='239.1.1.1', vxlan_ttl=16) All possible vxlan parameters are listed in the module `pyroute2.netlink.rtnl.ifinfmsg:... vxlan_data`. â–ē ipoib IPoIB driver provides an ability to create several ip interfaces on one interface. IPoIB interfaces requires the following parameter: `link` : The master interface to create IPoIB on. The following parameters can also be provided: * `pkey`- Inifiniband partition key the ip interface is associated with * `mode`- Underlying infiniband transport mode. One of: ['datagram' ,'connected'] * `umcast`- If set(1), multicast group membership for this interface is handled by user space. Example:: ip.link("add", ifname="ipoib1", kind="ipoib", link=ip.link_lookup(ifname="ib0")[0], pkey=10) **set** Set interface attributes:: # get interface index x = ip.link_lookup(ifname="eth0")[0] # put link down ip.link("set", index=x, state="down") # rename and set MAC addr ip.link("set", index=x, address="00:11:22:33:44:55", name="bala") # set MTU and TX queue length ip.link("set", index=x, mtu=1000, txqlen=2000) # bring link up ip.link("set", index=x, state="up") Seting bridge or tunnel attributes require `kind` to be specified in order to properly encode `IFLA_LINKINFO`:: ip.link("set", index=x, kind="bridge", br_forward_delay=2000) ip.link("set", index=x, kind="gre", gre_local="10.0.0.1", gre_remote="10.1.0.103") Keyword "state" is reserved. State can be "up" or "down", it is a shortcut:: state="up": flags=1, mask=1 state="down": flags=0, mask=0 SR-IOV virtual function setup:: # get PF index x = ip.link_lookup(ifname="eth0")[0] # setup macaddr ip.link("set", index=x, # PF index vf={"vf": 0, # VF index "mac": "00:11:22:33:44:55"}) # address # setup vlan ip.link("set", index=x, # PF index vf={"vf": 0, # VF index "vlan": 100}) # the simplest case # setup QinQ ip.link("set", index=x, # PF index vf={"vf": 0, # VF index "vlan": [{"vlan": 100, # vlan id "proto": 0x88a8}, # 802.1ad {"vlan": 200, # vlan id "proto": 0x8100}]}) # 802.1q **update** Almost the same as `set`, except it uses different flags and message type. Mostly does the same, but in some cases differs. If you're not sure what to use, use `set`. **del** Destroy the interface:: ip.link("del", index=ip.link_lookup(ifname="dummy0")[0]) **dump** Dump info for all interfaces **get** Get specific interface info:: ip.link("get", index=ip.link_lookup(ifname="br0")[0]) Get extended attributes like SR-IOV setup:: ip.link("get", index=3, ext_mask=1) ''' command_map = { 'set': (RTM_NEWLINK, 'req'), 'update': (RTM_SETLINK, 'create'), 'add': (RTM_NEWLINK, 'create'), 'del': (RTM_DELLINK, 'req'), 'property_add': (RTM_NEWLINKPROP, 'append'), 'property_del': (RTM_DELLINKPROP, 'req'), 'remove': (RTM_DELLINK, 'req'), 'delete': (RTM_DELLINK, 'req'), 'dump': (RTM_GETLINK, 'dump'), 'get': (RTM_GETLINK, 'get'), } dump_filter = None request = {} msg = ifinfmsg() if command == 'dump': dump_filter, kwarg = get_dump_filter(kwarg) if kwarg: if kwarg.get('kwarg_filter'): filters = kwarg['kwarg_filter'] else: filters = [LinkFieldFilter(), LinkIPRouteFilter(command)] request = RequestProcessor(context=kwarg, prime=kwarg) for rfilter in filters: request.apply_filter(rfilter) request.finalize() msg_type, msg_flags = self.make_request_type(command, command_map) for field in msg.fields: msg[field[0]] = request.pop(field[0], 0) # attach NLA for key, value in request.items(): nla = type(msg).name2nla(key) if msg.valid_nla(nla) and value is not None: msg['attrs'].append([nla, value]) ret = self.nlm_request(msg, msg_type=msg_type, msg_flags=msg_flags) if command == 'dump' and dump_filter is not None: if isinstance(dump_filter, dict): dump_filter = ( RequestProcessor(context=dump_filter, prime=dump_filter) .apply_filter(LinkFieldFilter()) .apply_filter(LinkIPRouteFilter('dump')) .finalize() ) ret = self.filter_messages(dump_filter, ret) if self.nlm_generator and not msg_flags & NLM_F_DUMP == NLM_F_DUMP: ret = tuple(ret) return ret def addr(self, command, *argv, **kwarg): ''' Address operations * command -- add, delete, replace, dump * index -- device index * address -- IPv4 or IPv6 address * mask -- address mask * family -- socket.AF_INET for IPv4 or socket.AF_INET6 for IPv6 * scope -- the address scope, see /etc/iproute2/rt_scopes * kwarg -- dictionary, any ifaddrmsg field or NLA Later the method signature will be changed to:: def addr(self, command, match=None, **kwarg): # the method body So only keyword arguments (except of the command) will be accepted. The reason for this change is an unification of API. Example:: idx = 62 ip.addr('add', index=idx, address='10.0.0.1', mask=24) ip.addr('add', index=idx, address='10.0.0.2', mask=24) With more NLAs:: # explicitly set broadcast address ip.addr('add', index=idx, address='10.0.0.3', broadcast='10.0.0.255', prefixlen=24) # make the secondary address visible to ifconfig: add label ip.addr('add', index=idx, address='10.0.0.4', broadcast='10.0.0.255', prefixlen=24, label='eth0:1') Configure p2p address on an interface:: ip.addr('add', index=idx, address='10.1.1.2', mask=24, local='10.1.1.1') ''' if command in ('get', 'set'): return [] ## # This block will be deprecated in a short term if argv: warnings.warn( 'positional arguments for IPRoute.addr() are deprecated, ' 'use keyword arguments', DeprecationWarning, ) converted_argv = zip( ('index', 'address', 'prefixlen', 'family', 'scope', 'match'), argv, ) kwarg.update(converted_argv) if 'mask' in kwarg: warnings.warn( 'usage of mask is deprecated, use prefixlen instead', DeprecationWarning, ) command_map = { 'add': (RTM_NEWADDR, 'create'), 'del': (RTM_DELADDR, 'req'), 'remove': (RTM_DELADDR, 'req'), 'delete': (RTM_DELADDR, 'req'), 'replace': (RTM_NEWADDR, 'replace'), 'dump': (RTM_GETADDR, 'dump'), } dump_filter = None msg = ifaddrmsg() if command == 'dump': dump_filter, kwarg = get_dump_filter(kwarg) request = ( RequestProcessor(context=kwarg, prime=kwarg) .apply_filter(AddressFieldFilter()) .apply_filter(AddressIPRouteFilter(command)) .finalize() ) msg_type, msg_flags = self.make_request_type(command, command_map) for field in msg.fields: if field[0] != 'flags': # Flags are supplied as NLA msg[field[0]] = request.pop(field[0], 0) # work on NLA for key, value in request.items(): nla = ifaddrmsg.name2nla(key) if msg.valid_nla(nla) and value is not None: msg['attrs'].append([nla, value]) ret = self.nlm_request( msg, msg_type=msg_type, msg_flags=msg_flags, terminate=lambda x: x['header']['type'] == NLMSG_ERROR, ) if command == 'dump' and dump_filter is not None: ret = self.filter_messages(dump_filter, ret) if self.nlm_generator and not msg_flags & NLM_F_DUMP == NLM_F_DUMP: ret = tuple(ret) return ret def tc(self, command, kind=None, index=0, handle=0, **kwarg): ''' "Swiss knife" for traffic control. With the method you can add, delete or modify qdiscs, classes and filters. * command -- add or delete qdisc, class, filter. * kind -- a string identifier -- "sfq", "htb", "u32" and so on. * handle -- integer or string Command can be one of ("add", "del", "add-class", "del-class", "add-filter", "del-filter") (see `commands` dict in the code). Handle notice: traditional iproute2 notation, like "1:0", actually represents two parts in one four-bytes integer:: 1:0 -> 0x10000 1:1 -> 0x10001 ff:0 -> 0xff0000 ffff:1 -> 0xffff0001 Target notice: if your target is a class/qdisc that applies an algorithm that can only apply to upstream traffic profile, but your keys variable explicitly references a match that is only relevant for upstream traffic, the kernel will reject the filter. Unless you're dealing with devices like IMQs For pyroute2 tc() you can use both forms: integer like 0xffff0000 or string like 'ffff:0000'. By default, handle is 0, so you can add simple classless queues w/o need to specify handle. Ingress queue causes handle to be 0xffff0000. So, to set up sfq queue on interface 1, the function call will be like that:: ip = IPRoute() ip.tc("add", "sfq", 1) Instead of string commands ("add", "del"...), you can use also module constants, `RTM_NEWQDISC`, `RTM_DELQDISC` and so on:: ip = IPRoute() flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_CREATE | NLM_F_EXCL ip.tc((RTM_NEWQDISC, flags), "sfq", 1) It should be noted that "change", "change-class" and "change-filter" work like "replace", "replace-class" and "replace-filter", except they will fail if the node doesn't exist (while it would have been created by "replace"). This is not the same behaviour as with "tc" where "change" can be used to modify the value of some options while leaving the others unchanged. However, as not all entities support this operation, we believe the "change" commands as implemented here are more useful. Also available "modules" (returns tc plugins dict) and "help" commands:: help(ip.tc("modules")["htb"]) print(ip.tc("help", "htb")) ''' if command == 'set': return if command == 'modules': return tc_plugins if command == 'help': p = tc_plugins.get(kind) if p is not None and hasattr(p, '__doc__'): return p.__doc__ else: return 'No help available' command_map = { 'add': (RTM_NEWQDISC, 'create'), 'del': (RTM_DELQDISC, 'req'), 'remove': (RTM_DELQDISC, 'req'), 'delete': (RTM_DELQDISC, 'req'), 'change': (RTM_NEWQDISC, 'change'), 'replace': (RTM_NEWQDISC, 'replace'), 'add-class': (RTM_NEWTCLASS, 'create'), 'del-class': (RTM_DELTCLASS, 'req'), 'change-class': (RTM_NEWTCLASS, 'change'), 'replace-class': (RTM_NEWTCLASS, 'replace'), 'add-filter': (RTM_NEWTFILTER, 'create'), 'del-filter': (RTM_DELTFILTER, 'req'), 'change-filter': (RTM_NEWTFILTER, 'change'), 'replace-filter': (RTM_NEWTFILTER, 'replace'), } if command == 'del': if index == 0: index = [ x['index'] for x in self.get_links() if x['index'] != 1 ] if isinstance(index, (list, tuple, set)): return list(chain(*(self.tc('del', index=x) for x in index))) command, flags = self.make_request_type(command, command_map) msg = tcmsg() # transform handle, parent and target, if needed: handle = transform_handle(handle) for item in ('parent', 'target', 'default'): if item in kwarg and kwarg[item] is not None: kwarg[item] = transform_handle(kwarg[item]) msg['index'] = index msg['handle'] = handle if 'info' in kwarg: msg['info'] = kwarg['info'] opts = kwarg.get('opts', None) ## # # if kind in tc_plugins: p = tc_plugins[kind] msg['parent'] = kwarg.pop('parent', getattr(p, 'parent', 0)) if hasattr(p, 'fix_msg'): p.fix_msg(msg, kwarg) if kwarg: if command in (RTM_NEWTCLASS, RTM_DELTCLASS): opts = p.get_class_parameters(kwarg) else: opts = p.get_parameters(kwarg) else: msg['parent'] = kwarg.get('parent', TC_H_ROOT) if kind is not None: msg['attrs'].append(['TCA_KIND', kind]) if opts is not None: msg['attrs'].append(['TCA_OPTIONS', opts]) return tuple(self.nlm_request(msg, msg_type=command, msg_flags=flags)) def route(self, command, **kwarg): ''' Route operations. Keywords to set up rtmsg fields: * dst_len, src_len -- destination and source mask(see `dst` below) * tos -- type of service * table -- routing table * proto -- `redirect`, `boot`, `static` (see `rt_proto`) * scope -- routing realm * type -- `unicast`, `local`, etc. (see `rt_type`) `pyroute2/netlink/rtnl/rtmsg.py` rtmsg.nla_map: * table -- routing table to use (default: 254) * gateway -- via address * prefsrc -- preferred source IP address * dst -- the same as `prefix` * iif -- incoming traffic interface * oif -- outgoing traffic interface etc. One can specify mask not as `dst_len`, but as a part of `dst`, e.g.: `dst="10.0.0.0/24"`. Commands: **add** Example:: ipr.route("add", dst="10.0.0.0/24", gateway="192.168.0.1") ... More `route()` examples. Blackhole route:: ipr.route( "add", dst="10.0.0.0/24", type="blackhole", ) Create a route with metrics:: ipr.route( "add", dst="172.16.0.0/24", gateway="10.0.0.10", metrics={ "mtu": 1400, "hoplimit": 16, }, ) Multipath route:: ipr.route( "add", dst="10.0.0.0/24", multipath=[ {"gateway": "192.168.0.1", "hops": 2}, {"gateway": "192.168.0.2", "hops": 1}, {"gateway": "192.168.0.3"}, ], ) MPLS lwtunnel on eth0:: ipr.route( "add", dst="10.0.0.0/24", oif=ip.link_lookup(ifname="eth0"), encap={ "type": "mpls", "labels": "200/300", }, ) IPv6 next hop for IPv4 dst:: ipr.route( "add", prefsrc="10.127.30.4", dst="172.16.0.0/24", via={"family": AF_INET6, "addr": "fe80::1337"}, oif=ipr.link_lookup(ifname="eth0"), table=100, ) Create MPLS route: push label:: # $ sudo modprobe mpls_router # $ sudo sysctl net.mpls.platform_labels=1024 ipr.route( "add", family=AF_MPLS, oif=ipr.link_lookup(ifname="eth0"), dst=0x200, newdst=[0x200, 0x300], ) MPLS multipath:: ipr.route( "add", dst="10.0.0.0/24", table=20, multipath=[ { "gateway": "192.168.0.1", "encap": {"type": "mpls", "labels": 200}, }, { "ifindex": ipr.link_lookup(ifname="eth0"), "encap": {"type": "mpls", "labels": 300}, }, ], ) MPLS target can be int, string, dict or list:: "labels": 300 # simple label "labels": "300" # the same "labels": (200, 300) # stacked "labels": "200/300" # the same # explicit label definition "labels": { "bos": 1, "label": 300, "tc": 0, "ttl": 16, } Create SEG6 tunnel encap mode (kernel >= 4.10):: ipr.route( "add", dst="2001:0:0:10::2/128", oif=idx, encap={ "type": "seg6", "mode": "encap", "segs": "2000::5,2000::6", }, ) Create SEG6 tunnel inline mode (kernel >= 4.10):: ipr.route( "add", dst="2001:0:0:10::2/128", oif=idx, encap={ "type": "seg6", "mode": "inline", "segs": ["2000::5", "2000::6"], }, ) Create SEG6 tunnel inline mode with hmac (kernel >= 4.10):: ipr.route( "add", dst="2001:0:0:22::2/128", oif=idx, encap={ "type": "seg6", "mode": "inline", "segs": "2000::5,2000::6,2000::7,2000::8", "hmac": 0xf, }, ) Create SEG6 tunnel with ip4ip6 encapsulation (kernel >= 4.14):: ipr.route( "add", dst="172.16.0.0/24", oif=idx, encap={ "type": "seg6", "mode": "encap", "segs": "2000::5,2000::6", }, ) Create SEG6LOCAL tunnel End.DX4 action (kernel >= 4.14):: ipr.route( "add", dst="2001:0:0:10::2/128", oif=idx, encap={ "type": "seg6local", "action": "End.DX4", "nh4": "172.16.0.10", }, ) Create SEG6LOCAL tunnel End.DT6 action (kernel >= 4.14):: ipr.route( "add", dst="2001:0:0:10::2/128", oif=idx, encap={ "type": "seg6local", "action": "End.DT6", "table": "10", }, ) Create SEG6LOCAL tunnel End.DT4 action (kernel >= 5.11):: # $ sudo modprobe vrf # $ sudo sysctl -w net.vrf.strict_mode=1 ipr.link( "add", ifname="vrf-foo", kind="vrf", vrf_table=10, ) ipr.route( "add", dst="2001:0:0:10::2/128", oif=idx, encap={ "type": "seg6local", "action": "End.DT4", "vrf_table": 10, }, ) Create SEG6LOCAL tunnel End.DT46 action (kernel >= 5.14):: # $ sudo modprobe vrf # $ sudo sysctl -w net.vrf.strict_mode=1 ip.link('add', ifname='vrf-foo', kind='vrf', vrf_table=10) ip.route('add', dst='2001:0:0:10::2/128', oif=idx, encap={'type': 'seg6local', 'action': 'End.DT46', 'vrf_table': 10}) Create SEG6LOCAL tunnel End.B6 action (kernel >= 4.14):: ipr.route( "add", dst="2001:0:0:10::2/128", oif=idx, encap={ "type": "seg6local", "action": "End.B6", "srh": {"segs": "2000::5,2000::6"}, }, ) Create SEG6LOCAL tunnel End.B6 action with hmac (kernel >= 4.14):: ipr.route( "add", dst="2001:0:0:10::2/128", oif=idx, encap={ "type": "seg6local", "action": "End.B6", "srh": { "segs": "2000::5,2000::6", "hmac": 0xf, }, }, ) **change**, **replace**, **append** Commands `change`, `replace` and `append` have the same meanings as in ip-route(8): `change` modifies only existing route, while `replace` creates a new one, if there is no such route yet. `append` allows to create an IPv6 multipath route. **del** Remove the route. The same syntax as for **add**. **get** Get route by spec. **dump** Dump all routes. ''' # transform kwarg if command in ('add', 'set', 'replace', 'change', 'append'): kwarg['proto'] = kwarg.get('proto', 'static') or 'static' kwarg['type'] = kwarg.get('type', 'unicast') or 'unicast' if 'match' not in kwarg and command in ('dump', 'show'): match = kwarg else: match = kwarg.pop('match', None) callback = kwarg.pop('callback', None) request = ( RequestProcessor(context=kwarg, prime=kwarg) .apply_filter(RouteFieldFilter()) .apply_filter(RouteIPRouteFilter(command)) .finalize() ) kwarg = request command_map = { 'add': (RTM_NEWROUTE, 'create'), 'set': (RTM_NEWROUTE, 'replace'), 'replace': (RTM_NEWROUTE, 'replace'), 'change': (RTM_NEWROUTE, 'change'), 'append': (RTM_NEWROUTE, 'append'), 'del': (RTM_DELROUTE, 'req'), 'remove': (RTM_DELROUTE, 'req'), 'delete': (RTM_DELROUTE, 'req'), 'get': (RTM_GETROUTE, 'get'), 'show': (RTM_GETROUTE, 'dump'), 'dump': (RTM_GETROUTE, 'dump'), } (command, flags) = self.make_request_type(command, command_map) msg = rtmsg() # table is mandatory without strict_check; by default == 254 # if table is not defined in kwarg, save it there # also for nla_attr. Do not set it in strict_check, use # NLA instead if not self.strict_check: table = kwarg.get('table', 254) msg['table'] = table if table <= 255 else 252 msg['family'] = kwarg.pop('family', AF_INET) msg['scope'] = kwarg.pop('scope', rt_scope['universe']) msg['dst_len'] = kwarg.pop('dst_len', None) or kwarg.pop('mask', 0) msg['src_len'] = kwarg.pop('src_len', 0) msg['tos'] = kwarg.pop('tos', 0) msg['flags'] = kwarg.pop('flags', 0) msg['type'] = kwarg.pop('type', rt_type['unspec']) msg['proto'] = kwarg.pop('proto', rt_proto['unspec']) msg['attrs'] = [] if msg['family'] == AF_MPLS: for key in tuple(kwarg): if key not in ('dst', 'newdst', 'via', 'multipath', 'oif'): kwarg.pop(key) for key in kwarg: nla = rtmsg.name2nla(key) if nla == 'RTA_DST' and not kwarg[key]: continue if kwarg[key] is not None: msg['attrs'].append([nla, kwarg[key]]) # fix IP family, if needed if msg['family'] in (AF_UNSPEC, 255): if key == 'multipath' and len(kwarg[key]) > 0: hop = kwarg[key][0] attrs = hop.get('attrs', []) for attr in attrs: if attr[0] == 'RTA_GATEWAY': msg['family'] = ( AF_INET6 if attr[1].find(':') >= 0 else AF_INET ) break ret = self.nlm_request( msg, msg_type=command, msg_flags=flags, callback=callback ) if match: if isinstance(match, dict): match = ( RequestProcessor(context=match, prime=match) .apply_filter(RouteFieldFilter(add_defaults=False)) .apply_filter(RouteIPRouteFilter('dump')) .finalize() ) ret = self.filter_messages(match, ret) if self.nlm_generator and not flags & NLM_F_DUMP == NLM_F_DUMP: ret = tuple(ret) return ret def rule(self, command, **kwarg): ''' Rule operations - command — add, delete - table — 0 < table id < 253 - priority — 0 < rule's priority < 32766 - action — type of rule, default 'FR_ACT_NOP' (see fibmsg.py) - rtscope — routing scope, default RT_SCOPE_UNIVERSE `(RT_SCOPE_UNIVERSE|RT_SCOPE_SITE|\ RT_SCOPE_LINK|RT_SCOPE_HOST|RT_SCOPE_NOWHERE)` - family — rule's family (socket.AF_INET (default) or socket.AF_INET6) - src — IP source for Source Based (Policy Based) routing's rule - dst — IP for Destination Based (Policy Based) routing's rule - src_len — Mask for Source Based (Policy Based) routing's rule - dst_len — Mask for Destination Based (Policy Based) routing's rule - iifname — Input interface for Interface Based (Policy Based) routing's rule - oifname — Output interface for Interface Based (Policy Based) routing's rule - uid_range — Range of user identifiers, a string like "1000:1234" - dport_range — Range of destination ports, a string like "80-120" - sport_range — Range of source ports, as a string like "80-120" All packets route via table 10:: # 32000: from all lookup 10 # ... ip.rule('add', table=10, priority=32000) Default action:: # 32001: from all lookup 11 unreachable # ... iproute.rule('add', table=11, priority=32001, action='FR_ACT_UNREACHABLE') Use source address to choose a routing table:: # 32004: from 10.64.75.141 lookup 14 # ... iproute.rule('add', table=14, priority=32004, src='10.64.75.141') Use dst address to choose a routing table:: # 32005: from 10.64.75.141/24 lookup 15 # ... iproute.rule('add', table=15, priority=32005, dst='10.64.75.141', dst_len=24) Match fwmark:: # 32006: from 10.64.75.141 fwmark 0xa lookup 15 # ... iproute.rule('add', table=15, priority=32006, dst='10.64.75.141', fwmark=10) ''' if command == 'set': return if 'match' not in kwarg and command == 'dump': match = kwarg else: match = kwarg.pop('match', None) request = ( RequestProcessor(context=kwarg, prime=kwarg) .apply_filter(RuleFieldFilter()) .apply_filter(RuleIPRouteFilter(command)) .finalize() ) command_map = { 'add': (RTM_NEWRULE, 'create'), 'del': (RTM_DELRULE, 'req'), 'remove': (RTM_DELRULE, 'req'), 'delete': (RTM_DELRULE, 'req'), 'dump': (RTM_GETRULE, 'dump'), } command, flags = self.make_request_type(command, command_map) msg = fibmsg() table = request.get('table', 0) msg['table'] = table if table <= 255 else 252 for key in ('family', 'src_len', 'dst_len', 'action', 'tos', 'flags'): msg[key] = request.pop(key, 0) msg['attrs'] = [] for key in request: if command == RTM_GETRULE and self.strict_check: if key in ("match", "priority"): continue nla = fibmsg.name2nla(key) if request[key] is not None: msg['attrs'].append([nla, request[key]]) ret = self.nlm_request(msg, msg_type=command, msg_flags=flags) if match: if isinstance(match, dict): match = ( RequestProcessor(context=match, prime=match) .apply_filter(RuleFieldFilter()) .apply_filter(RuleIPRouteFilter('dump')) .finalize() ) ret = self.filter_messages(match, ret) if self.nlm_generator and not flags & NLM_F_DUMP == NLM_F_DUMP: ret = tuple(ret) return ret def stats(self, command, **kwarg): ''' Stats prototype. ''' if (command == 'dump') and ('match' not in kwarg): match = kwarg else: match = kwarg.pop('match', None) command_map = { 'dump': (RTM_GETSTATS, 'dump'), 'get': (RTM_GETSTATS, 'get'), } command, flags = self.make_request_type(command, command_map) msg = ifstatsmsg() msg['filter_mask'] = kwarg.get('filter_mask', 31) msg['ifindex'] = kwarg.get('ifindex', 0) ret = self.nlm_request(msg, msg_type=command, msg_flags=flags) if match is not None: ret = self.filter_messages(match, ret) if self.nlm_generator and not flags & NLM_F_DUMP == NLM_F_DUMP: ret = tuple(ret) return ret # 8<--------------------------------------------------------------- class IPBatch(RTNL_API, IPBatchSocket): ''' Netlink requests compiler. Does not send any requests, but instead stores them in the internal binary buffer. The contents of the buffer can be used to send batch requests, to test custom netlink parsers and so on. Uses `RTNL_API` and provides all the same API as normal `IPRoute` objects:: # create the batch compiler ipb = IPBatch() # compile requests into the internal buffer ipb.link("add", index=550, ifname="test", kind="dummy") ipb.link("set", index=550, state="up") ipb.addr("add", index=550, address="10.0.0.2", mask=24) # save the buffer data = ipb.batch # reset the buffer ipb.reset() ... # send the buffer IPRoute().sendto(data, (0, 0)) ''' pass class IPRoute(LAB_API, RTNL_API, IPRSocket): ''' Regular ordinary utility class, see RTNL API for the list of methods. ''' pass class RawIPRoute(RTNL_API, RawIPRSocket): ''' The same as `IPRoute`, but does not use the netlink proxy. Thus it can not manage e.g. tun/tap interfaces. ''' pass class ChaoticIPRoute(RTNL_API, ChaoticIPRSocket): ''' IPRoute interface for chaotic tests - raising exceptions randomly. ''' pass pyroute2-0.7.11/pyroute2/iproute/parsers.py000066400000000000000000000024411455030217500207050ustar00rootroot00000000000000import struct from pyroute2.netlink import NLMSG_DONE, nlmsg from pyroute2.netlink.rtnl.rtmsg import rtmsg def default_routes(data, offset, length): ''' Only for RTM_NEWROUTE. This parser returns: * rtmsg() -- only for default routes (no RTA_DST) * nlmsg() -- NLMSG_DONE * None for any other messages ''' # get message header header = dict( zip( ('length', 'type', 'flags', 'sequence_number'), struct.unpack_from('IHHI', data, offset), ) ) header['error'] = None if header['type'] == NLMSG_DONE: msg = nlmsg() msg['header'] = header msg.length = msg['header']['length'] return msg # skip to NLA: offset + nlmsg header + rtmsg data cursor = offset + 28 # iterate NLA, if meet RTA_DST -- return None (not a default route) while cursor < offset + length: nla_length, nla_type = struct.unpack_from('HH', data, cursor) nla_length = (nla_length + 3) & ~3 # align, page size = 4 cursor += nla_length if nla_type == 1: return # no RTA_DST, a default route -- spend time to decode using the # standard routine msg = rtmsg(data, offset=offset) msg.decode() msg['header']['error'] = None # required return msg pyroute2-0.7.11/pyroute2/iproute/windows.py000066400000000000000000000176221455030217500207270ustar00rootroot00000000000000''' Windows systems are not supported, but the library provides some proof-of-concept how to build an RTNL-compatible core on top of WinAPI calls. Only two methods are provided so far. If you're interested in extending the functionality, you're welcome to propose PRs. .. warning:: Using pyroute2 on Windows requires installing `win_inet_pton` module, you can use `pip install win_inet_pton`. ''' import ctypes import os from socket import AF_INET from pyroute2.common import AddrPool, Namespace, dqn2int from pyroute2.netlink import NLM_F_DUMP, NLM_F_MULTI, NLM_F_REQUEST, NLMSG_DONE from pyroute2.netlink.proxy import NetlinkProxy from pyroute2.netlink.rtnl import ( RTM_GETADDR, RTM_GETLINK, RTM_GETNEIGH, RTM_GETROUTE, RTM_NEWADDR, RTM_NEWLINK, RTM_NEWNEIGH, RTM_NEWROUTE, ) from pyroute2.netlink.rtnl.ifaddrmsg import ifaddrmsg from pyroute2.netlink.rtnl.ifinfmsg import ifinfmsg from pyroute2.netlink.rtnl.marshal import MarshalRtnl MAX_ADAPTER_NAME_LENGTH = 256 MAX_ADAPTER_DESCRIPTION_LENGTH = 128 MAX_ADAPTER_ADDRESS_LENGTH = 8 class IP_ADDRESS_STRING(ctypes.Structure): pass PIP_ADDRESS_STRING = ctypes.POINTER(IP_ADDRESS_STRING) IP_ADDRESS_STRING._fields_ = [ ('Next', PIP_ADDRESS_STRING), ('IpAddress', ctypes.c_byte * 16), ('IpMask', ctypes.c_byte * 16), ('Context', ctypes.c_ulong), ] class IP_ADAPTER_INFO(ctypes.Structure): pass PIP_ADAPTER_INFO = ctypes.POINTER(IP_ADAPTER_INFO) IP_ADAPTER_INFO._fields_ = [ ('Next', PIP_ADAPTER_INFO), ('ComboIndex', ctypes.c_ulong), ('AdapterName', ctypes.c_byte * (256 + 4)), ('Description', ctypes.c_byte * (128 + 4)), ('AddressLength', ctypes.c_uint), ('Address', ctypes.c_ubyte * 8), ('Index', ctypes.c_ulong), ('Type', ctypes.c_uint), ('DhcpEnabled', ctypes.c_uint), ('CurrentIpAddress', PIP_ADDRESS_STRING), ('IpAddressList', IP_ADDRESS_STRING), ('GatewayList', IP_ADDRESS_STRING), ('DhcpServer', IP_ADDRESS_STRING), ('HaveWins', ctypes.c_byte), ('PrimaryWinsServer', IP_ADDRESS_STRING), ('SecondaryWinsServer', IP_ADDRESS_STRING), ('LeaseObtained', ctypes.c_ulong), ('LeaseExpires', ctypes.c_ulong), ] class IPRoute(object): def __init__(self, *argv, **kwarg): self.marshal = MarshalRtnl() send_ns = Namespace( self, {'addr_pool': AddrPool(0x10000, 0x1FFFF), 'monitor': False} ) self._sproxy = NetlinkProxy(policy='return', nl=send_ns) self.target = kwarg.get('target') or 'localhost' def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.close() def clone(self): return self def close(self, code=None): pass def bind(self, *argv, **kwarg): pass def getsockopt(self, *argv, **kwarg): return 1024 * 1024 def sendto_gate(self, msg, addr): # # handle incoming netlink requests # # sendto_gate() receives single RTNL messages as objects # cmd = msg['header']['type'] flags = msg['header']['flags'] seq = msg['header']['sequence_number'] # work only on dump requests for now if flags != NLM_F_REQUEST | NLM_F_DUMP: return # if cmd == RTM_GETLINK: rtype = RTM_NEWLINK ret = self.get_links() elif cmd == RTM_GETADDR: rtype = RTM_NEWADDR ret = self.get_addr() elif cmd == RTM_GETROUTE: rtype = RTM_NEWROUTE ret = self.get_routes() elif cmd == RTM_GETNEIGH: rtype = RTM_NEWNEIGH ret = self.get_neighbours() # # set response type and finalize the message for r in ret: r['header']['type'] = rtype r['header']['flags'] = NLM_F_MULTI r['header']['sequence_number'] = seq # r = type(msg)() r['header']['type'] = NLMSG_DONE r['header']['sequence_number'] = seq ret.append(r) data = b'' for r in ret: r.encode() data += r.data self._outq.put(data) os.write(self._pfdw, b'\0') def _GetAdaptersInfo(self): ret = {'interfaces': [], 'addresses': []} # prepare buffer buf = ctypes.create_string_buffer(15000) buf_len = ctypes.c_ulong(15000) ( ctypes.windll.iphlpapi.GetAdaptersInfo( ctypes.byref(buf), ctypes.byref(buf_len) ) ) adapter = IP_ADAPTER_INFO.from_address(ctypes.addressof(buf)) while True: mac = ':'.join(['%02x' % x for x in adapter.Address][:6]) ifname = ctypes.string_at( ctypes.addressof(adapter.AdapterName) ).decode('utf-8') spec = { 'index': adapter.Index, 'attrs': (['IFLA_ADDRESS', mac], ['IFLA_IFNAME', ifname]), } msg = ifinfmsg().load(spec) msg['header']['target'] = self.target msg['header']['type'] = RTM_NEWLINK del msg['value'] ret['interfaces'].append(msg) ipaddr = adapter.IpAddressList while True: addr = ctypes.string_at( ctypes.addressof(ipaddr.IpAddress) ).decode('utf-8') mask = ctypes.string_at( ctypes.addressof(ipaddr.IpMask) ).decode('utf-8') spec = { 'index': adapter.Index, 'family': AF_INET, 'prefixlen': dqn2int(mask), 'attrs': ( ['IFA_ADDRESS', addr], ['IFA_LOCAL', addr], ['IFA_LABEL', ifname], ), } msg = ifaddrmsg().load(spec) msg['header']['target'] = self.target msg['header']['type'] = RTM_NEWADDR del msg['value'] ret['addresses'].append(msg) if ipaddr.Next: ipaddr = ipaddr.Next.contents else: break if adapter.Next: adapter = adapter.Next.contents else: break return ret def dump(self, groups=None): for method in ( self.get_links, self.get_addr, self.get_neighbours, self.get_routes, ): for msg in method(): yield msg def get_links(self, *argv, **kwarg): ''' Get network interfaces list:: >>> pprint(ipr.get_links()) [{'attrs': (['IFLA_ADDRESS', '52:54:00:7a:8a:49'], ['IFLA_IFNAME', '{F444467B-3549-455D-81F2-AB617C7421AB}']), 'change': 0, 'family': 0, 'flags': 0, 'header': {}, 'ifi_type': 0, 'index': 7}] ''' return self._GetAdaptersInfo()['interfaces'] def get_addr(self, *argv, **kwarg): ''' Get IP addresses:: >>> pprint(ipr.get_addr()) [{'attrs': (['IFA_ADDRESS', '192.168.122.81'], ['IFA_LOCAL', '192.168.122.81'], ['IFA_LABEL', '{F444467B-3549-455D-81F2-AB617C7421AB}']), 'family': , 'flags': 0, 'header': {}, 'index': 7, 'prefixlen': 24, 'scope': 0}] ''' return self._GetAdaptersInfo()['addresses'] def get_neighbours(self, *argv, **kwarg): ret = [] return ret def get_routes(self, *argv, **kwarg): ret = [] return ret class RawIPRoute(IPRoute): pass class ChaoticIPRoute: def __init__(self, *argv, **kwarg): raise NotImplementedError() pyroute2-0.7.11/pyroute2/ipset.py000066400000000000000000000571031455030217500166700ustar00rootroot00000000000000''' ipset support. This module is tested with hash:ip, hash:net, list:set and several other ipset structures (like hash:net,iface). There is no guarantee that this module is working with all available ipset modules. It supports almost all kernel commands (create, destroy, flush, rename, swap, test...) ''' import errno import socket from pyroute2.common import basestring from pyroute2.netlink import ( NETLINK_NETFILTER, NLM_F_ACK, NLM_F_DUMP, NLM_F_EXCL, NLM_F_REQUEST, NLMSG_ERROR, ) from pyroute2.netlink.exceptions import IPSetError, NetlinkError from pyroute2.netlink.nfnetlink import NFNL_SUBSYS_IPSET from pyroute2.netlink.nfnetlink.ipset import ( IPSET_CMD_ADD, IPSET_CMD_CREATE, IPSET_CMD_DEL, IPSET_CMD_DESTROY, IPSET_CMD_FLUSH, IPSET_CMD_GET_BYINDEX, IPSET_CMD_GET_BYNAME, IPSET_CMD_HEADER, IPSET_CMD_LIST, IPSET_CMD_PROTOCOL, IPSET_CMD_RENAME, IPSET_CMD_SWAP, IPSET_CMD_TEST, IPSET_CMD_TYPE, IPSET_ERR_BUSY, IPSET_ERR_COMMENT, IPSET_ERR_COUNTER, IPSET_ERR_EXIST, IPSET_ERR_EXIST_SETNAME2, IPSET_ERR_FIND_TYPE, IPSET_ERR_INVALID_CIDR, IPSET_ERR_INVALID_FAMILY, IPSET_ERR_INVALID_MARKMASK, IPSET_ERR_INVALID_NETMASK, IPSET_ERR_IPADDR_IPV4, IPSET_ERR_IPADDR_IPV6, IPSET_ERR_MAX_SETS, IPSET_ERR_PROTOCOL, IPSET_ERR_REFERENCED, IPSET_ERR_SKBINFO, IPSET_ERR_TIMEOUT, IPSET_ERR_TYPE_MISMATCH, IPSET_FLAG_IFACE_WILDCARD, IPSET_FLAG_PHYSDEV, IPSET_FLAG_WITH_COMMENT, IPSET_FLAG_WITH_COUNTERS, IPSET_FLAG_WITH_FORCEADD, IPSET_FLAG_WITH_SKBINFO, ipset_msg, ) from pyroute2.netlink.nlsocket import NetlinkSocket def _nlmsg_error(msg): return msg['header']['type'] == NLMSG_ERROR class PortRange(object): """A simple container for port range with optional protocol Note that optional protocol parameter is not supported by all kernel ipset modules using ports. On the other hand, it's sometimes mandatory to set it (like for hash:net,port ipsets) Example:: udp_proto = socket.getprotobyname("udp") port_range = PortRange(1000, 2000, protocol=udp_proto) ipset.create("foo", stype="hash:net,port") ipset.add("foo", ("192.0.2.0/24", port_range), etype="net,port") ipset.test("foo", ("192.0.2.0/24", port_range), etype="net,port") """ def __init__(self, begin, end, protocol=None): self.begin = begin self.end = end self.protocol = protocol class PortEntry(object): """A simple container for port entry with optional protocol""" def __init__(self, port, protocol=None): self.port = port self.protocol = protocol class IPSet(NetlinkSocket): ''' NFNetlink socket (family=NETLINK_NETFILTER). Implements API to the ipset functionality. ''' policy = { IPSET_CMD_PROTOCOL: ipset_msg, IPSET_CMD_LIST: ipset_msg, IPSET_CMD_TYPE: ipset_msg, IPSET_CMD_HEADER: ipset_msg, IPSET_CMD_GET_BYNAME: ipset_msg, IPSET_CMD_GET_BYINDEX: ipset_msg, } attr_map = { 'iface': 'IPSET_ATTR_IFACE', 'mark': 'IPSET_ATTR_MARK', 'set': 'IPSET_ATTR_NAME', 'mac': 'IPSET_ATTR_ETHER', 'port': 'IPSET_ATTR_PORT', ('ip_from', 1): 'IPSET_ATTR_IP_FROM', ('ip_from', 2): 'IPSET_ATTR_IP2', ('cidr', 1): 'IPSET_ATTR_CIDR', ('cidr', 2): 'IPSET_ATTR_CIDR2', ('ip_to', 1): 'IPSET_ATTR_IP_TO', ('ip_to', 2): 'IPSET_ATTR_IP2_TO', } def __init__(self, version=None, attr_revision=None, nfgen_family=2): super(IPSet, self).__init__(family=NETLINK_NETFILTER) policy = dict( [ (x | (NFNL_SUBSYS_IPSET << 8), y) for (x, y) in self.policy.items() ] ) self.register_policy(policy) self._nfgen_family = nfgen_family if version is None: msg = self.get_proto_version() version = msg[0].get_attr('IPSET_ATTR_PROTOCOL') self._proto_version = version self._attr_revision = attr_revision def request( self, msg, msg_type, msg_flags=NLM_F_REQUEST | NLM_F_DUMP, terminate=None, ): msg['nfgen_family'] = self._nfgen_family try: return tuple( self.nlm_request( msg, msg_type | (NFNL_SUBSYS_IPSET << 8), msg_flags, terminate=terminate, ) ) except NetlinkError as err: raise _IPSetError(err.code, cmd=msg_type) def headers(self, name, **kwargs): ''' Get headers of the named ipset. It can be used to test if one ipset exists, since it returns a no such file or directory. ''' return self._list_or_headers(IPSET_CMD_HEADER, name=name, **kwargs) def get_proto_version(self, version=6): ''' Get supported protocol version by kernel. version parameter allow to set mandatory (but unused?) IPSET_ATTR_PROTOCOL netlink attribute in the request. ''' msg = ipset_msg() msg['attrs'] = [['IPSET_ATTR_PROTOCOL', version]] return self.request(msg, IPSET_CMD_PROTOCOL) def list(self, *argv, **kwargs): ''' List installed ipsets. If `name` is provided, list the named ipset or return an empty list. Be warned: netlink does not return an error if given name does not exit, you will receive an empty list. ''' if argv: kwargs['name'] = argv[0] return self._list_or_headers(IPSET_CMD_LIST, **kwargs) def _list_or_headers(self, cmd, name=None, flags=None): msg = ipset_msg() msg['attrs'] = [['IPSET_ATTR_PROTOCOL', self._proto_version]] if name is not None: msg['attrs'].append(['IPSET_ATTR_SETNAME', name]) if flags is not None: msg['attrs'].append(['IPSET_ATTR_FLAGS', flags]) return self.request(msg, cmd) def destroy(self, name=None): ''' Destroy one (when name is set) or all ipset (when name is None) ''' msg = ipset_msg() msg['attrs'] = [['IPSET_ATTR_PROTOCOL', self._proto_version]] if name is not None: msg['attrs'].append(['IPSET_ATTR_SETNAME', name]) return self.request( msg, IPSET_CMD_DESTROY, msg_flags=NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL, terminate=_nlmsg_error, ) def create( self, name, stype='hash:ip', family=socket.AF_INET, exclusive=True, counters=False, comment=False, maxelem=None, forceadd=False, hashsize=None, timeout=None, bitmap_ports_range=None, size=None, skbinfo=False, ): ''' Create an ipset `name` of type `stype`, by default `hash:ip`. Common ipset options are supported: * exclusive -- if set, raise an error if the ipset exists * counters -- enable data/packets counters * comment -- enable comments capability * maxelem -- max size of the ipset * forceadd -- you should refer to the ipset manpage * hashsize -- size of the hashtable (if any) * timeout -- enable and set a default value for entries (if not None) * bitmap_ports_range -- set the specified inclusive portrange for the bitmap ipset structure (0, 65536) * size -- Size of the list:set, the default is 8 * skbinfo -- enable skbinfo capability ''' excl_flag = NLM_F_EXCL if exclusive else 0 msg = ipset_msg() cadt_flags = 0 if counters: cadt_flags |= IPSET_FLAG_WITH_COUNTERS if comment: cadt_flags |= IPSET_FLAG_WITH_COMMENT if forceadd: cadt_flags |= IPSET_FLAG_WITH_FORCEADD if skbinfo: cadt_flags |= IPSET_FLAG_WITH_SKBINFO if stype == 'bitmap:port' and bitmap_ports_range is None: raise ValueError('Missing value bitmap_ports_range') data = {'attrs': []} if cadt_flags: data['attrs'] += [['IPSET_ATTR_CADT_FLAGS', cadt_flags]] if maxelem is not None: data['attrs'] += [['IPSET_ATTR_MAXELEM', maxelem]] if hashsize is not None: data['attrs'] += [["IPSET_ATTR_HASHSIZE", hashsize]] elif size is not None and stype == 'list:set': data['attrs'] += [['IPSET_ATTR_SIZE', size]] if timeout is not None: data['attrs'] += [["IPSET_ATTR_TIMEOUT", timeout]] if bitmap_ports_range is not None and stype == 'bitmap:port': # Set the bitmap range A bitmap type of set # can store up to 65536 entries if isinstance(bitmap_ports_range, PortRange): data['attrs'] += [ ['IPSET_ATTR_PORT_FROM', bitmap_ports_range.begin] ] data['attrs'] += [ ['IPSET_ATTR_PORT_TO', bitmap_ports_range.end] ] else: data['attrs'] += [ ['IPSET_ATTR_PORT_FROM', bitmap_ports_range[0]] ] data['attrs'] += [ ['IPSET_ATTR_PORT_TO', bitmap_ports_range[1]] ] if self._attr_revision is None: # Get the last revision supported by kernel revision = self.get_supported_revisions(stype)[1] else: revision = self._attr_revision msg['attrs'] = [ ['IPSET_ATTR_PROTOCOL', self._proto_version], ['IPSET_ATTR_SETNAME', name], ['IPSET_ATTR_TYPENAME', stype], ['IPSET_ATTR_FAMILY', family], ['IPSET_ATTR_REVISION', revision], ["IPSET_ATTR_DATA", data], ] return self.request( msg, IPSET_CMD_CREATE, msg_flags=NLM_F_REQUEST | NLM_F_ACK | excl_flag, terminate=_nlmsg_error, ) @staticmethod def _family_to_version(family): if family is not None: if family == socket.AF_INET: return 'IPSET_ATTR_IPADDR_IPV4' elif family == socket.AF_INET6: return 'IPSET_ATTR_IPADDR_IPV6' elif family == socket.AF_UNSPEC: return None raise TypeError('unknown family') def _entry_to_data_attrs(self, entry, etype, ip_version): attrs = [] ip_count = 0 if etype == 'set': attrs += [['IPSET_ATTR_NAME', entry]] return attrs # We support string (for one element, and for users calling this # function like a command line), and tuple/list if isinstance(entry, basestring): entry = entry.split(',') if isinstance(entry, (int, PortRange, PortEntry)): entry = [entry] for e, t in zip(entry, etype.split(',')): if t in ('ip', 'net'): ip_count += 1 if t == 'net': if '/' in e: e, cidr = e.split('/') attrs += [ [self.attr_map[('cidr', ip_count)], int(cidr)] ] elif '-' in e: e, to = e.split('-') attrs += [ [ self.attr_map[('ip_to', ip_count)], {'attrs': [[ip_version, to]]}, ] ] attrs += [ [ self.attr_map[('ip_from', ip_count)], {'attrs': [[ip_version, e]]}, ] ] elif t == "port": if isinstance(e, PortRange): attrs += [['IPSET_ATTR_PORT_FROM', e.begin]] attrs += [['IPSET_ATTR_PORT_TO', e.end]] if e.protocol is not None: attrs += [['IPSET_ATTR_PROTO', e.protocol]] elif isinstance(e, PortEntry): attrs += [['IPSET_ATTR_PORT', e.port]] if e.protocol is not None: attrs += [['IPSET_ATTR_PROTO', e.protocol]] else: attrs += [[self.attr_map[t], e]] else: attrs += [[self.attr_map[t], e]] return attrs def _add_delete_test( self, name, entry, family, cmd, exclusive, comment=None, timeout=None, etype="ip", packets=None, bytes=None, skbmark=None, skbprio=None, skbqueue=None, wildcard=False, physdev=False, ): excl_flag = NLM_F_EXCL if exclusive else 0 adt_flags = 0 if wildcard: adt_flags |= IPSET_FLAG_IFACE_WILDCARD if physdev: adt_flags |= IPSET_FLAG_PHYSDEV ip_version = self._family_to_version(family) data_attrs = self._entry_to_data_attrs(entry, etype, ip_version) if comment is not None: data_attrs += [ ["IPSET_ATTR_COMMENT", comment], ["IPSET_ATTR_CADT_LINENO", 0], ] if timeout is not None: data_attrs += [["IPSET_ATTR_TIMEOUT", timeout]] if bytes is not None: data_attrs += [["IPSET_ATTR_BYTES", bytes]] if packets is not None: data_attrs += [["IPSET_ATTR_PACKETS", packets]] if skbmark is not None: data_attrs += [["IPSET_ATTR_SKBMARK", skbmark]] if skbprio is not None: data_attrs += [["IPSET_ATTR_SKBPRIO", skbprio]] if skbqueue is not None: data_attrs += [["IPSET_ATTR_SKBQUEUE", skbqueue]] if adt_flags: data_attrs += [["IPSET_ATTR_CADT_FLAGS", adt_flags]] msg = ipset_msg() msg['attrs'] = [ ['IPSET_ATTR_PROTOCOL', self._proto_version], ['IPSET_ATTR_SETNAME', name], ['IPSET_ATTR_DATA', {'attrs': data_attrs}], ] return self.request( msg, cmd, msg_flags=NLM_F_REQUEST | NLM_F_ACK | excl_flag, terminate=_nlmsg_error, ) def add( self, name, entry, family=socket.AF_INET, exclusive=True, comment=None, timeout=None, etype="ip", skbmark=None, skbprio=None, skbqueue=None, wildcard=False, **kwargs ): ''' Add a member to the ipset. etype is the entry type that you add to the ipset. It's related to the ipset type. For example, use "ip" for one hash:ip or bitmap:ip ipset. When your ipset store a tuple, like "hash:net,iface", you must use a comma a separator (etype="net,iface") entry is a string for "ip" and "net" objects. For ipset with several dimensions, you must use a tuple (or a list) of objects. "port" type is specific, since you can use integer of specialized containers like :class:`PortEntry` and :class:`PortRange` Examples:: ipset = IPSet() ipset.create("foo", stype="hash:ip") ipset.add("foo", "198.51.100.1", etype="ip") ipset = IPSet() ipset.create("bar", stype="bitmap:port", bitmap_ports_range=(1000, 2000)) ipset.add("bar", 1001, etype="port") ipset.add("bar", PortRange(1500, 2000), etype="port") ipset = IPSet() import socket protocol = socket.getprotobyname("tcp") ipset.create("foobar", stype="hash:net,port") port_entry = PortEntry(80, protocol=protocol) ipset.add("foobar", ("198.51.100.0/24", port_entry), etype="net,port") wildcard option enable kernel wildcard matching on interface name for net,iface entries. ''' return self._add_delete_test( name, entry, family, IPSET_CMD_ADD, exclusive, comment=comment, timeout=timeout, etype=etype, skbmark=skbmark, skbprio=skbprio, skbqueue=skbqueue, wildcard=wildcard, **kwargs ) def delete( self, name, entry, family=socket.AF_INET, exclusive=True, etype="ip" ): ''' Delete a member from the ipset. See :func:`add` method for more information on etype. ''' return self._add_delete_test( name, entry, family, IPSET_CMD_DEL, exclusive, etype=etype ) def test(self, name, entry, family=socket.AF_INET, etype="ip"): ''' Test if entry is part of an ipset See :func:`add` method for more information on etype. ''' try: self._add_delete_test( name, entry, family, IPSET_CMD_TEST, False, etype=etype ) return True except IPSetError as e: if e.code == IPSET_ERR_EXIST: return False raise e def swap(self, set_a, set_b): ''' Swap two ipsets. They must have compatible content type. ''' msg = ipset_msg() msg['attrs'] = [ ['IPSET_ATTR_PROTOCOL', self._proto_version], ['IPSET_ATTR_SETNAME', set_a], ['IPSET_ATTR_TYPENAME', set_b], ] return self.request( msg, IPSET_CMD_SWAP, msg_flags=NLM_F_REQUEST | NLM_F_ACK, terminate=_nlmsg_error, ) def flush(self, name=None): ''' Flush all ipsets. When name is set, flush only this ipset. ''' msg = ipset_msg() msg['attrs'] = [['IPSET_ATTR_PROTOCOL', self._proto_version]] if name is not None: msg['attrs'].append(['IPSET_ATTR_SETNAME', name]) return self.request( msg, IPSET_CMD_FLUSH, msg_flags=NLM_F_REQUEST | NLM_F_ACK, terminate=_nlmsg_error, ) def rename(self, name_src, name_dst): ''' Rename the ipset. ''' msg = ipset_msg() msg['attrs'] = [ ['IPSET_ATTR_PROTOCOL', self._proto_version], ['IPSET_ATTR_SETNAME', name_src], ['IPSET_ATTR_TYPENAME', name_dst], ] return self.request( msg, IPSET_CMD_RENAME, msg_flags=NLM_F_REQUEST | NLM_F_ACK, terminate=_nlmsg_error, ) def _get_set_by(self, cmd, value): # Check that IPSet version is supported if self._proto_version < 7: raise NotImplementedError() msg = ipset_msg() if cmd == IPSET_CMD_GET_BYNAME: msg['attrs'] = [ ['IPSET_ATTR_PROTOCOL', self._proto_version], ['IPSET_ATTR_SETNAME', value], ] if cmd == IPSET_CMD_GET_BYINDEX: msg['attrs'] = [ ['IPSET_ATTR_PROTOCOL', self._proto_version], ['IPSET_ATTR_INDEX', value], ] return self.request(msg, cmd) def get_set_byname(self, name): ''' Get a set by its name ''' return self._get_set_by(IPSET_CMD_GET_BYNAME, name) def get_set_byindex(self, index): ''' Get a set by its index ''' return self._get_set_by(IPSET_CMD_GET_BYINDEX, index) def get_supported_revisions(self, stype, family=socket.AF_INET): ''' Return minimum and maximum of revisions supported by the kernel. Each ipset module (like hash:net, hash:ip, etc) has several revisions. Newer revisions often have more features or more performances. Thanks to this call, you can ask the kernel the list of supported revisions. You can manually set/force revisions used in IPSet constructor. Example:: ipset = IPSet() ipset.get_supported_revisions("hash:net") ipset.get_supported_revisions("hash:net,port,net") ''' msg = ipset_msg() msg['attrs'] = [ ['IPSET_ATTR_PROTOCOL', self._proto_version], ['IPSET_ATTR_TYPENAME', stype], ['IPSET_ATTR_FAMILY', family], ] response = self.request( msg, IPSET_CMD_TYPE, msg_flags=NLM_F_REQUEST | NLM_F_ACK, terminate=_nlmsg_error, ) min_revision = response[0].get_attr("IPSET_ATTR_PROTOCOL_MIN") max_revision = response[0].get_attr("IPSET_ATTR_REVISION") return min_revision, max_revision class _IPSetError(IPSetError): ''' Proxy class to not import all specifics ipset code in exceptions.py Out of the ipset module, a caller should use parent class instead ''' def __init__(self, code, msg=None, cmd=None): if code in self.base_map: msg = self.base_map[code] elif cmd in self.cmd_map: error_map = self.cmd_map[cmd] if code in error_map: msg = error_map[code] super(_IPSetError, self).__init__(code, msg) base_map = { IPSET_ERR_PROTOCOL: "Kernel error received:" " ipset protocol error", IPSET_ERR_INVALID_CIDR: "The value of the CIDR parameter of" " the IP address is invalid", IPSET_ERR_TIMEOUT: "Timeout cannot be used: set was created" " without timeout support", IPSET_ERR_IPADDR_IPV4: "An IPv4 address is expected, but" " not received", IPSET_ERR_IPADDR_IPV6: "An IPv6 address is expected, but" " not received", IPSET_ERR_COUNTER: "Packet/byte counters cannot be used:" " set was created without counter support", IPSET_ERR_COMMENT: "Comment string is too long!", IPSET_ERR_SKBINFO: "Skbinfo mapping cannot be used: " " set was created without skbinfo support", } c_map = { errno.EEXIST: "Set cannot be created: set with the same" " name already exists", IPSET_ERR_FIND_TYPE: "Kernel error received: " "set type not supported", IPSET_ERR_MAX_SETS: "Kernel error received: maximal number of" " sets reached, cannot create more.", IPSET_ERR_INVALID_NETMASK: "The value of the netmask parameter" " is invalid", IPSET_ERR_INVALID_MARKMASK: "The value of the markmask parameter" " is invalid", IPSET_ERR_INVALID_FAMILY: "Protocol family not supported by the" " set type", } destroy_map = { IPSET_ERR_BUSY: "Set cannot be destroyed: it is in use" " by a kernel component" } r_map = { IPSET_ERR_EXIST_SETNAME2: "Set cannot be renamed: a set with the" " new name already exists", IPSET_ERR_REFERENCED: "Set cannot be renamed: it is in use by" " another system", } s_map = { IPSET_ERR_EXIST_SETNAME2: "Sets cannot be swapped: the second set" " does not exist", IPSET_ERR_TYPE_MISMATCH: "The sets cannot be swapped: their type" " does not match", } a_map = { IPSET_ERR_EXIST: "Element cannot be added to the set: it's" " already added" } del_map = { IPSET_ERR_EXIST: "Element cannot be deleted from the set:" " it's not added" } cmd_map = { IPSET_CMD_CREATE: c_map, IPSET_CMD_DESTROY: destroy_map, IPSET_CMD_RENAME: r_map, IPSET_CMD_SWAP: s_map, IPSET_CMD_ADD: a_map, IPSET_CMD_DEL: del_map, } pyroute2-0.7.11/pyroute2/iwutil.py000066400000000000000000000555431455030217500170670ustar00rootroot00000000000000# -*- coding: utf-8 -*- ''' IW module ========= Experimental wireless module — nl80211 support. Disclaimer ---------- Unlike IPRoute, which is mostly usable, though is far from complete yet, the IW module is in the very initial state. Neither the module itself, nor the message class cover the nl80211 functionality reasonably enough. So if you're going to use it, brace yourself — debug is coming. Messages -------- nl80211 messages are defined here:: pyroute2/netlink/nl80211/__init__.py Pls notice NLAs of type `hex`. On the early development stage `hex` allows to inspect incoming data as a hex dump and, occasionally, even make requests with such NLAs. But it's not a production way. The type `hex` in the NLA definitions means that this particular NLA is not handled yet properly. If you want to use some NLA which is defined as `hex` yet, pls find out a specific type, patch the message class and submit your pull request on github. If you're not familiar with NLA types, take a look at RTNL definitions:: pyroute2/netlink/rtnl/ndmsg.py and so on. Communication with the kernel ----------------------------- There are several methods of the communication with the kernel. * `sendto()` — lowest possible, send a raw binary data * `put()` — send a netlink message * `nlm_request()` — send a message, return the response * `get()` — get a netlink message * `recv()` — get a raw binary data from the kernel There are no errors on `put()` usually. Any `permission denied`, any `invalid value` errors are returned from the kernel with netlink also. So if you do `put()`, but don't do `get()`, be prepared to miss errors. The preferred method for the communication is `nlm_request()`. It tracks the message ID, returns the corresponding response. In the case of errors `nlm_request()` raises an exception. To get the response on any operation with nl80211, use flag `NLM_F_ACK`. Reverse it ---------- If you're too lazy to read the kernel sources, but still need something not implemented here, you can use reverse engineering on a reference implementation. E.g.:: # strace -e trace=network -f -x -s 4096 \\ iw phy phy0 interface add test type monitor Will dump all the netlink traffic between the program `iw` and the kernel. Three first packets are the generic netlink protocol discovery, you can ignore them. All that follows, is the nl80211 traffic:: sendmsg(3, {msg_name(12)={sa_family=AF_NETLINK, ... }, msg_iov(1)=[{"\\x30\\x00\\x00\\x00\\x1b\\x00\\x05 ...", 48}], msg_controllen=0, msg_flags=0}, 0) = 48 recvmsg(3, {msg_name(12)={sa_family=AF_NETLINK, ... }, msg_iov(1)=[{"\\x58\\x00\\x00\\x00\\x1b\\x00\\x00 ...", 16384}], msg_controllen=0, msg_flags=0}, 0) = 88 ... With `-s 4096` you will get the full dump. Then copy the strings from `msg_iov` to a file, let's say `data`, and run the decoder:: $ pwd /home/user/Projects/pyroute2 $ export PYTHONPATH=`pwd` $ python scripts/decoder.py pyroute2.netlink.nl80211.nl80211cmd data You will get the session decoded:: {'attrs': [['NL80211_ATTR_WIPHY', 0], ['NL80211_ATTR_IFNAME', 'test'], ['NL80211_ATTR_IFTYPE', 6]], 'cmd': 7, 'header': {'flags': 5, 'length': 48, 'pid': 3292542647, 'sequence_number': 1430426434, 'type': 27}, 'reserved': 0, 'version': 0} {'attrs': [['NL80211_ATTR_IFINDEX', 23811], ['NL80211_ATTR_IFNAME', 'test'], ['NL80211_ATTR_WIPHY', 0], ['NL80211_ATTR_IFTYPE', 6], ['NL80211_ATTR_WDEV', 4], ['NL80211_ATTR_MAC', 'a4:4e:31:43:1c:7c'], ['NL80211_ATTR_GENERATION', '02:00:00:00']], 'cmd': 7, 'header': {'flags': 0, 'length': 88, 'pid': 3292542647, 'sequence_number': 1430426434, 'type': 27}, 'reserved': 0, 'version': 1} Now you know, how to do a request and what you will get as a response. Sample collected data is in the `scripts` directory. Submit changes -------------- Please do not hesitate to submit the changes on github. Without your patches this module will not evolve. ''' import logging from pyroute2.netlink import NLM_F_ACK, NLM_F_DUMP, NLM_F_REQUEST from pyroute2.netlink.nl80211 import ( BSS_STATUS_NAMES, CHAN_WIDTH, IFTYPE_NAMES, NL80211, NL80211_NAMES, SCAN_FLAGS_NAMES, nl80211cmd, ) log = logging.getLogger(__name__) class IW(NL80211): def __init__(self, *argv, **kwarg): # get specific groups kwarg if 'groups' in kwarg: groups = kwarg['groups'] del kwarg['groups'] else: groups = None # get specific async kwarg if 'async' in kwarg: # FIXME # raise deprecation error after 0.5.3 # log.warning( 'use "async_cache" instead of "async", ' '"async" is a keyword from Python 3.7' ) kwarg['async_cache'] = kwarg.pop('async') if 'async_cache' in kwarg: async_cache = kwarg.pop('async_cache') else: async_cache = False # align groups with async_cache if groups is None: groups = ~0 if async_cache else 0 # continue with init super(IW, self).__init__(*argv, **kwarg) # do automatic bind # FIXME: unfortunately we can not omit it here self.bind(groups, async_cache=async_cache) def del_interface(self, dev): ''' Delete a virtual interface - dev — device index ''' msg = nl80211cmd() msg['cmd'] = NL80211_NAMES['NL80211_CMD_DEL_INTERFACE'] msg['attrs'] = [['NL80211_ATTR_IFINDEX', dev]] self.nlm_request( msg, msg_type=self.prid, msg_flags=NLM_F_REQUEST | NLM_F_ACK ) def add_interface(self, ifname, iftype, dev=None, phy=0): ''' Create a virtual interface - ifname — name of the interface to create - iftype — interface type to create - dev — device index - phy — phy index One should specify `dev` (device index) or `phy` (phy index). If no one specified, phy == 0. `iftype` can be integer or string: 1. adhoc 2. station 3. ap 4. ap_vlan 5. wds 6. monitor 7. mesh_point 8. p2p_client 9. p2p_go 10. p2p_device 11. ocb ''' # lookup the interface type iftype = IFTYPE_NAMES.get(iftype, iftype) if not isinstance(iftype, int): raise TypeError('iftype must be int') msg = nl80211cmd() msg['cmd'] = NL80211_NAMES['NL80211_CMD_NEW_INTERFACE'] msg['attrs'] = [ ['NL80211_ATTR_IFNAME', ifname], ['NL80211_ATTR_IFTYPE', iftype], ] if dev is not None: msg['attrs'].append(['NL80211_ATTR_IFINDEX', dev]) elif phy is not None: msg['attrs'].append(['NL80211_ATTR_WIPHY', phy]) else: raise TypeError('no device specified') self.nlm_request( msg, msg_type=self.prid, msg_flags=NLM_F_REQUEST | NLM_F_ACK ) def list_dev(self): ''' Get list of all wifi network interfaces ''' return self.get_interfaces_dump() def list_wiphy(self): ''' Get list of all phy devices ''' msg = nl80211cmd() msg['cmd'] = NL80211_NAMES['NL80211_CMD_GET_WIPHY'] return self.nlm_request( msg, msg_type=self.prid, msg_flags=NLM_F_REQUEST | NLM_F_DUMP ) def _get_phy_name(self, attr): return 'phy%i' % attr.get_attr('NL80211_ATTR_WIPHY') def _get_frequency(self, attr): return attr.get_attr('NL80211_ATTR_WIPHY_FREQ') or 0 def get_interfaces_dict(self): ''' Get interfaces dictionary ''' ret = {} for wif in self.get_interfaces_dump(): chan_width = wif.get_attr('NL80211_ATTR_CHANNEL_WIDTH') freq = self._get_frequency(wif) if chan_width is not None else 0 wifname = wif.get_attr('NL80211_ATTR_IFNAME') ret[wifname] = [ wif.get_attr('NL80211_ATTR_IFINDEX'), self._get_phy_name(wif), wif.get_attr('NL80211_ATTR_MAC'), freq, chan_width, ] return ret def get_interfaces_dump(self): ''' Get interfaces dump ''' msg = nl80211cmd() msg['cmd'] = NL80211_NAMES['NL80211_CMD_GET_INTERFACE'] return self.nlm_request( msg, msg_type=self.prid, msg_flags=NLM_F_REQUEST | NLM_F_DUMP ) def get_interface_by_phy(self, attr): ''' Get interface by phy ( use x.get_attr('NL80211_ATTR_WIPHY') ) ''' msg = nl80211cmd() msg['cmd'] = NL80211_NAMES['NL80211_CMD_GET_INTERFACE'] msg['attrs'] = [['NL80211_ATTR_WIPHY', attr]] return self.nlm_request( msg, msg_type=self.prid, msg_flags=NLM_F_REQUEST | NLM_F_DUMP ) def get_interface_by_ifindex(self, ifindex): ''' Get interface by ifindex ( use x.get_attr('NL80211_ATTR_IFINDEX') ''' msg = nl80211cmd() msg['cmd'] = NL80211_NAMES['NL80211_CMD_GET_INTERFACE'] msg['attrs'] = [['NL80211_ATTR_IFINDEX', ifindex]] return self.nlm_request( msg, msg_type=self.prid, msg_flags=NLM_F_REQUEST ) def get_stations(self, ifindex): ''' Get stations by ifindex ''' msg = nl80211cmd() msg['cmd'] = NL80211_NAMES['NL80211_CMD_GET_STATION'] msg['attrs'] = [['NL80211_ATTR_IFINDEX', ifindex]] return self.nlm_request( msg, msg_type=self.prid, msg_flags=NLM_F_REQUEST | NLM_F_DUMP ) def join_ibss( self, ifindex, ssid, freq, bssid=None, channel_fixed=False, width=None, center=None, center2=None, ): ''' Connect to network by ssid - ifindex - IFINDEX of the interface to perform the connection - ssid - Service set identification - freq - Frequency in MHz - bssid - The MAC address of target interface - channel_fixed: Boolean flag - width - Channel width - center - Central frequency of the 40/80/160 MHz channel - center2 - Center frequency of second segment if 80P80 If the flag of channel_fixed is True, one should specify both the width and center of the channel `width` can be integer of string: 0. 20_noht 1. 20 2. 40 3. 80 4. 80p80 5. 160 6. 5 7. 10 ''' msg = nl80211cmd() msg['cmd'] = NL80211_NAMES['NL80211_CMD_JOIN_IBSS'] msg['attrs'] = [ ['NL80211_ATTR_IFINDEX', ifindex], ['NL80211_ATTR_SSID', ssid], ['NL80211_ATTR_WIPHY_FREQ', freq], ] if channel_fixed: msg['attrs'].append(['NL80211_ATTR_FREQ_FIXED', None]) width = CHAN_WIDTH.get(width, width) if not isinstance(width, int): raise TypeError('width must be int') if width in [2, 3, 5] and center: msg['attrs'].append(['NL80211_ATTR_CHANNEL_WIDTH', width]) msg['attrs'].append(['NL80211_ATTR_CENTER_FREQ1', center]) elif width == 4 and center and center2: msg['attrs'].append(['NL80211_ATTR_CHANNEL_WIDTH', width]) msg['attrs'].append(['NL80211_ATTR_CENTER_FREQ1', center]) msg['attrs'].append(['NL80211_ATTR_CENTER_FREQ2', center2]) elif width in [0, 1, 6, 7]: msg['attrs'].append(['NL80211_ATTR_CHANNEL_WIDTH', width]) else: raise TypeError('No channel specified') if bssid is not None: msg['attrs'].append(['NL80211_ATTR_MAC', bssid]) self.nlm_request( msg, msg_type=self.prid, msg_flags=NLM_F_REQUEST | NLM_F_ACK ) def leave_ibss(self, ifindex): ''' Leave the IBSS -- the IBSS is determined by the network interface ''' msg = nl80211cmd() msg['cmd'] = NL80211_NAMES['NL80211_CMD_LEAVE_IBSS'] msg['attrs'] = [['NL80211_ATTR_IFINDEX', ifindex]] self.nlm_request( msg, msg_type=self.prid, msg_flags=NLM_F_REQUEST | NLM_F_ACK ) def authenticate(self, ifindex, bssid, ssid, freq, auth_type=0): ''' Send an Authentication management frame. ''' msg = nl80211cmd() msg['cmd'] = NL80211_NAMES['NL80211_CMD_AUTHENTICATE'] msg['attrs'] = [ ['NL80211_ATTR_IFINDEX', ifindex], ['NL80211_ATTR_MAC', bssid], ['NL80211_ATTR_SSID', ssid], ['NL80211_ATTR_WIPHY_FREQ', freq], ['NL80211_ATTR_AUTH_TYPE', auth_type], ] self.nlm_request( msg, msg_type=self.prid, msg_flags=NLM_F_REQUEST | NLM_F_ACK ) def deauthenticate(self, ifindex, bssid, reason_code=0x01): ''' Send a Deauthentication management frame. ''' msg = nl80211cmd() msg['cmd'] = NL80211_NAMES['NL80211_CMD_DEAUTHENTICATE'] msg['attrs'] = [ ['NL80211_ATTR_IFINDEX', ifindex], ['NL80211_ATTR_MAC', bssid], ['NL80211_ATTR_REASON_CODE', reason_code], ] self.nlm_request( msg, msg_type=self.prid, msg_flags=NLM_F_REQUEST | NLM_F_ACK ) def associate(self, ifindex, bssid, ssid, freq, info_elements=None): ''' Send an Association request frame. ''' msg = nl80211cmd() msg['cmd'] = NL80211_NAMES['NL80211_CMD_ASSOCIATE'] msg['attrs'] = [ ['NL80211_ATTR_IFINDEX', ifindex], ['NL80211_ATTR_MAC', bssid], ['NL80211_ATTR_SSID', ssid], ['NL80211_ATTR_WIPHY_FREQ', freq], ] if info_elements is not None: msg['attrs'].append(['NL80211_ATTR_IE', info_elements]) self.nlm_request( msg, msg_type=self.prid, msg_flags=NLM_F_REQUEST | NLM_F_ACK ) def disassociate(self, ifindex, bssid, reason_code=0x03): ''' Send a Disassociation management frame. ''' msg = nl80211cmd() msg['cmd'] = NL80211_NAMES['NL80211_CMD_DISASSOCIATE'] msg['attrs'] = [ ['NL80211_ATTR_IFINDEX', ifindex], ['NL80211_ATTR_MAC', bssid], ['NL80211_ATTR_REASON_CODE', reason_code], ] self.nlm_request( msg, msg_type=self.prid, msg_flags=NLM_F_REQUEST | NLM_F_ACK ) def connect(self, ifindex, ssid, bssid=None): ''' Connect to the ap with ssid and bssid ''' msg = nl80211cmd() msg['cmd'] = NL80211_NAMES['NL80211_CMD_CONNECT'] msg['attrs'] = [ ['NL80211_ATTR_IFINDEX', ifindex], ['NL80211_ATTR_SSID', ssid], ] if bssid is not None: msg['attrs'].append(['NL80211_ATTR_MAC', bssid]) self.nlm_request( msg, msg_type=self.prid, msg_flags=NLM_F_REQUEST | NLM_F_ACK ) def disconnect(self, ifindex): ''' Disconnect the device ''' msg = nl80211cmd() msg['cmd'] = NL80211_NAMES['NL80211_CMD_DISCONNECT'] msg['attrs'] = [['NL80211_ATTR_IFINDEX', ifindex]] self.nlm_request( msg, msg_type=self.prid, msg_flags=NLM_F_REQUEST | NLM_F_ACK ) def survey(self, ifindex): ''' Return the survey info. ''' msg = nl80211cmd() msg['cmd'] = NL80211_NAMES['NL80211_CMD_GET_SURVEY'] msg['attrs'] = [['NL80211_ATTR_IFINDEX', ifindex]] return self.nlm_request( msg, msg_type=self.prid, msg_flags=NLM_F_REQUEST | NLM_F_DUMP ) def scan(self, ifindex, ssids=None, flush_cache=False): ''' Trigger scan and get results. Triggering scan usually requires root, and can take a couple of seconds. ''' # Prepare a second netlink socket to get the scan results. # The issue is that the kernel can send the results notification # before we get answer for the NL80211_CMD_TRIGGER_SCAN nsock = NL80211() nsock.bind() nsock.add_membership('scan') # send scan request msg = nl80211cmd() msg['cmd'] = NL80211_NAMES['NL80211_CMD_TRIGGER_SCAN'] msg['attrs'] = [['NL80211_ATTR_IFINDEX', ifindex]] # If a list of SSIDs is provided, active scanning should be performed if ssids is not None: if isinstance(ssids, list): msg['attrs'].append(['NL80211_ATTR_SCAN_SSIDS', ssids]) scan_flags = 0 if flush_cache: # Flush the cache before scanning scan_flags |= SCAN_FLAGS_NAMES['NL80211_SCAN_FLAG_FLUSH'] msg['attrs'].append(['NL80211_ATTR_SCAN_FLAGS', scan_flags]) self.nlm_request( msg, msg_type=self.prid, msg_flags=NLM_F_REQUEST | NLM_F_ACK ) # monitor the results notification on the secondary socket scanResultNotFound = True while scanResultNotFound: listMsg = nsock.get() for msg in listMsg: if msg["event"] == "NL80211_CMD_NEW_SCAN_RESULTS": scanResultNotFound = False break # close the secondary socket nsock.close() # request the results msg2 = nl80211cmd() msg2['cmd'] = NL80211_NAMES['NL80211_CMD_GET_SCAN'] msg2['attrs'] = [['NL80211_ATTR_IFINDEX', ifindex]] return self.nlm_request( msg2, msg_type=self.prid, msg_flags=NLM_F_REQUEST | NLM_F_DUMP ) def get_associated_bss(self, ifindex): ''' Returns the same info like scan() does, but only about the currently associated BSS. Unlike scan(), it returns immediately and doesn't require root. ''' # When getting scan results without triggering scan first, # you'll always get the information about currently associated BSS # # However, it may return other BSS, if last scan wasn't very # long time go msg = nl80211cmd() msg['cmd'] = NL80211_NAMES['NL80211_CMD_GET_SCAN'] msg['attrs'] = [['NL80211_ATTR_IFINDEX', ifindex]] res = self.nlm_request( msg, msg_type=self.prid, msg_flags=NLM_F_REQUEST | NLM_F_DUMP ) for x in res: attr_bss = x.get_attr('NL80211_ATTR_BSS') if attr_bss is not None: status = attr_bss.get_attr('NL80211_BSS_STATUS') if status in ( BSS_STATUS_NAMES['associated'], BSS_STATUS_NAMES['ibss_joined'], ): return x return None def get_regulatory_domain(self, attr=None): ''' Get regulatory domain information. If attr specified, get regulatory domain information for this device ( use x.get_attr('NL80211_ATTR_WIPHY') ). ''' msg = nl80211cmd() msg['cmd'] = NL80211_NAMES['NL80211_CMD_GET_REG'] flags = NLM_F_REQUEST if attr is None: flags |= NLM_F_DUMP else: msg['attrs'] = [['NL80211_ATTR_WIPHY', attr]] return self.nlm_request(msg, msg_type=self.prid, msg_flags=flags) def set_regulatory_domain(self, alpha2): ''' Set regulatory domain. ''' msg = nl80211cmd() msg['cmd'] = NL80211_NAMES['NL80211_CMD_REQ_SET_REG'] msg['attrs'] = [['NL80211_ATTR_REG_ALPHA2', alpha2]] self.nlm_request( msg, msg_type=self.prid, msg_flags=NLM_F_REQUEST | NLM_F_ACK ) def set_tx_power(self, dev, mode, mbm=None): ''' Set TX power of interface. - dev — device index - mode — TX power setting (0 - auto, 1 - limit, 2 - fixed) - mbm — TX power in mBm (dBm * 100) ''' msg = nl80211cmd() msg['cmd'] = NL80211_NAMES['NL80211_CMD_SET_WIPHY'] msg['attrs'] = [ ['NL80211_ATTR_IFINDEX', dev], ['NL80211_ATTR_WIPHY_TX_POWER_SETTING', mode], ] if mbm is not None: msg['attrs'].append(['NL80211_ATTR_WIPHY_TX_POWER_LEVEL', mbm]) self.nlm_request( msg, msg_type=self.prid, msg_flags=NLM_F_REQUEST | NLM_F_ACK ) def set_wiphy_netns_by_pid(self, wiphy, pid): ''' Set wiphy network namespace to process network namespace. ''' msg = nl80211cmd() msg['cmd'] = NL80211_NAMES['NL80211_CMD_SET_WIPHY_NETNS'] msg['attrs'] = [ ['NL80211_ATTR_WIPHY', wiphy], ['NL80211_ATTR_PID', pid], ] self.nlm_request( msg, msg_type=self.prid, msg_flags=NLM_F_REQUEST | NLM_F_ACK ) def set_wiphy_netns_by_fd(self, wiphy, netns_fd): ''' Set wiphy network namespace to namespace referenced by fd. ''' msg = nl80211cmd() msg['cmd'] = NL80211_NAMES['NL80211_CMD_SET_WIPHY_NETNS'] msg['attrs'] = [ ['NL80211_ATTR_WIPHY', wiphy], ['NL80211_ATTR_NETNS_FD', netns_fd], ] self.nlm_request( msg, msg_type=self.prid, msg_flags=NLM_F_REQUEST | NLM_F_ACK ) def set_interface_type(self, ifindex, iftype): ''' Set interface type - ifindex — device index - iftype — interface type `iftype` can be integer or string: 1. adhoc 2. station 3. ap 4. ap_vlan 5. wds 6. monitor 7. mesh_point 8. p2p_client 9. p2p_go 10. p2p_device 11. ocb ''' iftype = IFTYPE_NAMES.get(iftype, iftype) if not isinstance(iftype, int): raise TypeError('iftype must be int') msg = nl80211cmd() msg['cmd'] = NL80211_NAMES['NL80211_CMD_SET_INTERFACE'] msg['attrs'] = [ ['NL80211_ATTR_IFINDEX', ifindex], ['NL80211_ATTR_IFTYPE', iftype], ] self.nlm_request( msg, msg_type=self.prid, msg_flags=NLM_F_REQUEST | NLM_F_ACK ) def get_interface_type(self, ifindex) -> str: ''' return interface type name ''' dump = self.get_interface_by_ifindex(ifindex) type = None for d in dump: type = d.get_attr('NL80211_ATTR_IFTYPE') if type is not None: for key, value in IFTYPE_NAMES.items(): if value == type: res = key else: res = 'Not Found Type' return res pyroute2-0.7.11/pyroute2/lab.py000066400000000000000000000016761455030217500163060ustar00rootroot00000000000000import inspect try: from unittest import mock except ImportError: mock = None registry = [] use_mock = False class LAB_API: def __init__(self, *argv, **kwarg): super().__init__(*argv, **kwarg) if use_mock: if mock is None: # postpone ImportError # # unittest may not be available on embedded platforms, # but it is still used by IPRoute class; it is safe # to leave it in the minimal for now, just raise an # exception when being used # # Bug-Url: https://github.com/svinota/pyroute2/pull/1096 raise ImportError('unittest.mock not available') registry.append(self) for name, method in inspect.getmembers( self, predicate=inspect.ismethod ): setattr(self, name, mock.MagicMock(name=name, wraps=method)) pyroute2-0.7.11/pyroute2/loader.py000066400000000000000000000021631455030217500170060ustar00rootroot00000000000000import struct import sys ## # # Logging setup # # See the history: # * https://github.com/svinota/pyroute2/issues/246 # * https://github.com/svinota/pyroute2/issues/255 # * https://github.com/svinota/pyroute2/issues/270 # * https://github.com/svinota/pyroute2/issues/573 # * https://github.com/svinota/pyroute2/issues/601 # from pyroute2.config import log ## # # Windows platform specific: socket module monkey patching # # To use the library on Windows, run:: # pip install win-inet-pton # if sys.platform.startswith('win'): # noqa: E402 import win_inet_pton # noqa: F401 def init(): try: # probe, if the bytearray can be used in struct.unpack_from() struct.unpack_from('I', bytearray((1, 0, 0, 0)), 0) except Exception: if sys.version_info[0] < 3: # monkeypatch for old Python versions log.warning('patching struct.unpack_from()') def wrapped(fmt, buf, offset=0): return struct._u_f_orig(fmt, str(buf), offset) struct._u_f_orig = struct.unpack_from struct.unpack_from = wrapped else: raise pyroute2-0.7.11/pyroute2/minimal.py000066400000000000000000000034521455030217500171700ustar00rootroot00000000000000## # # This module contains all the public symbols from the library. # ## # # Version # try: from pyroute2.config.version import __version__ except ImportError: __version__ = 'unknown' from pyroute2.conntrack import Conntrack, ConntrackEntry from pyroute2.iproute import ChaoticIPRoute, IPBatch, IPRoute, RawIPRoute from pyroute2.iproute.ipmock import IPRoute as IPMock from pyroute2.iwutil import IW from pyroute2.netlink.devlink import DevlinkSocket from pyroute2.netlink.diag import DiagSocket from pyroute2.netlink.event.acpi_event import AcpiEventSocket from pyroute2.netlink.event.dquot import DQuotSocket from pyroute2.netlink.exceptions import ( ChaoticException, NetlinkDecodeError, NetlinkDumpInterrupted, NetlinkError, ) from pyroute2.netlink.generic import GenericNetlinkSocket from pyroute2.netlink.generic.l2tp import L2tp from pyroute2.netlink.generic.mptcp import MPTCP from pyroute2.netlink.generic.wireguard import WireGuard from pyroute2.netlink.ipq import IPQSocket from pyroute2.netlink.nfnetlink.nfctsocket import NFCTSocket from pyroute2.netlink.nfnetlink.nftsocket import NFTSocket from pyroute2.netlink.nl80211 import NL80211 from pyroute2.netlink.rtnl.iprsocket import IPRSocket from pyroute2.netlink.taskstats import TaskStats from pyroute2.netlink.uevent import UeventSocket modules = [ AcpiEventSocket, ChaoticException, ChaoticIPRoute, Conntrack, ConntrackEntry, DevlinkSocket, DiagSocket, DQuotSocket, IPBatch, IPMock, IPQSocket, IPRoute, IPRSocket, IW, GenericNetlinkSocket, L2tp, MPTCP, NetlinkError, NetlinkDecodeError, NetlinkDumpInterrupted, NFCTSocket, NFTSocket, NL80211, RawIPRoute, TaskStats, UeventSocket, WireGuard, ] __all__ = [] __all__.extend(modules) pyroute2-0.7.11/pyroute2/ndb/000077500000000000000000000000001455030217500157275ustar00rootroot00000000000000pyroute2-0.7.11/pyroute2/ndb/__init__.py000066400000000000000000000000001455030217500200260ustar00rootroot00000000000000pyroute2-0.7.11/pyroute2/ndb/auth_manager.py000066400000000000000000000051271455030217500207410ustar00rootroot00000000000000''' AAA concept ----------- AAA refers to Authentication, Authorization and Accounting. NDB provides a minimalistic API to integrate Authorization routines, leaving the rest -- Authentication and Accounting -- to the user. Some of NDB routines and RTNL object methods are guarded with a parametrized decorator. The decorator takes the only parameter `tag`:: @check_auth('obj:read') def __getitem__(self, key): ... @check_auth('obj:modify') def __setitem__(self, key, value): ... AuthManager ----------- The tag is checked by `AuthManager.check(...)` routine. The routine is the only method that must be provided by AuthManager-compatible objects, and must be defined as:: def check(self, obj, tag): # -> True: grant access to the tag # -> False: reject access # -> raise Exception(): reject access with a specific exception ... NDB module provides an example AuthManager:: from pyroute2 import NDB from pyroute2.ndb.auth_manager import AuthManager ndb = NDB(log='debug') am = AuthManager({'obj:list': False, # deny dump(), summary() 'obj:read': True, # permit reading RTNL attributes 'obj:modify': True}, # permit add_ip(), commit() etc. ndb.log.channel('auth')) ap = ndb.auth_proxy(am) ap.interfaces.summary() # <-- fails with PermissionError You can implement custom AuthManager classes, the only requirement -- they must provide `.check(self, obj, tag)` routine, which returns `True` or `False` or raises an exception. ''' class check_auth(object): def __init__(self, tag): self.tag = tag def __call__(self, f): def guard(obj, *argv, **kwarg): if not getattr(obj, '_init_complete', True): return f(obj, *argv, **kwarg) if not obj.auth_managers: raise PermissionError('access rejected') if all([x.check(obj, self.tag) for x in obj.auth_managers]): return f(obj, *argv, **kwarg) raise PermissionError('access rejected') guard.__doc__ = f.__doc__ return guard class AuthManager(object): def __init__(self, auth, log, policy=False): self.auth = auth self.log = log self.policy = policy self.exception = PermissionError def check(self, obj, tag): ret = self.policy if isinstance(self.auth, dict): ret = self.auth.get(tag, self.policy) if not ret and self.exception: raise self.exception('%s access rejected' % (tag,)) return ret pyroute2-0.7.11/pyroute2/ndb/cli.py000077500000000000000000000041631455030217500170570ustar00rootroot00000000000000#!/usr/bin/env python import argparse import json import sys from pyroute2.cli.console import Console from pyroute2.cli.server import Server try: from pyroute2.cli.auth.auth_keystone import OSAuthManager except ImportError: OSAuthManager = None try: from pyroute2.cli.auth.auth_radius import RadiusAuthManager except ImportError: RadiusAuthManager = None def run(): argp = argparse.ArgumentParser() for spec in ( ('-a', '[S] IP address to listen on'), ('-c', '[C] Command line to run'), ('-l', '[C,S] Log spec'), ('-m', 'Set mode (C,S)'), ('-p', '[S] Port to listen on'), ('-r', '[C] Load rc file'), ('-s', '[C,S] Load sources from a json file'), ('-x', '[S] Strict auth'), ): argp.add_argument(spec[0], help=spec[1]) argp.add_argument('script', nargs='*', help='script to run') args = argp.parse_args() commands = [] sources = None if args.s: with open(args.s, 'r') as f: sources = json.loads(f.read()) if args.m in ('S', 'server'): if args.p: port = int(args.p) else: port = 8080 auth_plugins = {} if OSAuthManager is not None: auth_plugins['keystone'] = OSAuthManager if RadiusAuthManager is not None: auth_plugins['radius:cleartext'] = RadiusAuthManager server = Server( address=args.a or 'localhost', port=port, log=args.l, sources=sources, auth_strict=args.x, auth_plugins=auth_plugins, ) server.serve_forever() return 0 else: console = Console(log=args.l, sources=sources) if args.r: console.loadrc(args.r) for script in args.script: console.loadrc(script) if args.c: commands.append(args.c) console.interact(readfunc=lambda x: commands.pop(0)) elif not args.script: console.interact() return 1 if console.session.errors > 0 else 0 if __name__ == '__main__': rcode = run() sys.exit(rcode) pyroute2-0.7.11/pyroute2/ndb/cluster.py000066400000000000000000000017531455030217500177700ustar00rootroot00000000000000import json import socket from pyroute2.common import basestring from .main import NDB from .transport import Messenger, Transport def init(config): if isinstance(config, basestring): config = json.loads(config) else: config = json.load(config) hostname = config['local'].get('hostname', socket.gethostname()) messenger = Messenger( config['local']['id'], Transport(config['local']['address'], config['local']['port']), ) for target in config['local'].get('targets', []): messenger.targets.add(target) if not messenger.targets: messenger.targets.add(hostname) for peer in config.get('peers', []): messenger.add_peer(*peer) sources = config['local'].get('sources') if sources is None: sources = [{'target': hostname, 'kind': 'local'}] return NDB( log=config.get('log', 'debug'), sources=sources, localhost=sources[0]['target'], messenger=messenger, ) pyroute2-0.7.11/pyroute2/ndb/compat.py000066400000000000000000000043161455030217500175700ustar00rootroot00000000000000def ipdb_interfaces_view(ndb): '''Provide read-only interfaces view with IPDB layout. In addition to standard NDB fields provides some IPDB specific fields. The method returns a simple dict structure, no background updates or system changes are supported. Please open a ticket on the project page if you are missing any attribute used in your project: https://github.com/svinota/pyroute2/issues ''' ret = {} for record in ndb.interfaces.dump(): interface = record._as_dict() interface['ipdb_scope'] = 'system' interface['ipdb_priority'] = 0 try: interface['ipaddr'] = tuple( ( (x.address, x.prefixlen) for x in ( ndb.addresses.dump().select_records(index=record.index) ) ) ) except: with ndb.addresses.summary() as report: report.select_records(ifname=f"{record.ifname}") interface['ipaddr'] = tuple( ((x.address, x.prefixlen) for x in report) ) try: interface['ports'] = tuple( ( x.index for x in ( ndb.interfaces.dump().select_records( master=record.index ) ) ) ) except: with ndb.interfaces.dump() as report: report.select_records(ifname=f"{record.ifname}") interface['ports'] = tuple((x.index for x in report)) try: interface['neighbours'] = tuple( ( x.dst for x in ( ndb.neighbours.dump().select_records( ifindex=record.index ) ) ) ) except: with ndb.neighbours.dump() as report: report.select_records(ifindex=record.index) interface['neighbours'] = tuple((x.dst for x in report)) ret[record.ifname] = interface return ret pyroute2-0.7.11/pyroute2/ndb/events.py000066400000000000000000000040231455030217500176040ustar00rootroot00000000000000import threading import time class SyncStart(Exception): pass class SchemaFlush(Exception): pass class SchemaReadLock(Exception): pass class SchemaReadUnlock(Exception): pass class SchemaGenericRequest(object): def __init__(self, response, *argv, **kwarg): self.response = response self.argv = argv self.kwarg = kwarg class MarkFailed(Exception): pass class DBMExitException(Exception): pass class ShutdownException(Exception): pass class RescheduleException(Exception): pass class InvalidateHandlerException(Exception): pass class State(object): events = None def __init__(self, prime=None, log=None, wait_list=None): wait_list = wait_list or [] self.events = [] self.log = log self.wait_list = {x: threading.Event() for x in wait_list} if prime is not None: self.load(prime) def wait(self, state, *argv, **kwarg): return self.wait_list[state].wait(*argv, **kwarg) def load(self, prime): self.events = [] for state in prime.events: self.events.append(state) def transition(self): if len(self.events) < 2: return None return (self.events[-2][1], self.events[-1][1]) def get(self): if not self.events: return None return self.events[-1][1] def set(self, state): for key in self.wait_list: if key == state: self.wait_list[key].set() else: self.wait_list[key].clear() if self.log is not None: self.log.debug(state) if self.events and self.events[-1][1] == state: self.events.pop() self.events.append((time.time(), state)) return state def __eq__(self, other): if not self.events: return False return self.events[-1][1] == other def __ne__(self, other): if not self.events: return True return self.events[-1][1] != other pyroute2-0.7.11/pyroute2/ndb/main.py000066400000000000000000000517231455030217500172350ustar00rootroot00000000000000''' .. testsetup:: from pyroute2 import NDB ndb = NDB(sources=[{'target': 'localhost', 'kind': 'IPMock'}]) .. testsetup:: netns from types import MethodType from pyroute2 import NDB ndb = NDB(sources=[{'target': 'localhost', 'kind': 'IPMock'}]) def add_mock_netns(self, netns): return self.add_orig(target=netns, kind='IPMock', preset='netns') ndb.sources.add_orig = ndb.sources.add ndb.sources.add = MethodType(add_mock_netns, ndb.sources) .. testcleanup:: * for key, value in tuple(globals().items()): if key.startswith('ndb') and hasattr(value, 'close'): value.close() NDB is a high level network management module. IT allows to manage interfaces, routes, addresses etc. of connected systems, containers and network namespaces. In a nutshell, NDB collects and aggregates netlink events in an SQL database, provides Python objects to reflect the system state, and applies changes back to the system. The database expects updates only from the sources, no manual SQL updates are expected normally. .. aafig:: :scale: 80 :textual: +----------------------------------------------------------------+ +----------------------------------------------------------------+ | +----------------------------------------------------------------+ | | | | | | | kernel | |-+ | |-+ +----------------------------------------------------------------+ | | ^ | ^ | `netlink events` | | | | | `inotify events` | | | | | `...` | | | | v v | v | +--------------+ +--------------+ +--------------+ | source | | source | | source |<--\\ +--------------+ +--------------+ +--------------+ | | | | | | | | | \\-----------------------+-----------------------/ | | | parsed netlink events | `NDB._event_queue` | | | v | +------------------------+ | | `NDB.__dbm__()` thread | | +------------------------+ | | | v | +-----------------------------+ | | `NDB.schema.load_netlink()` | | | `NDB.objects.*.load*()` | | +-----------------------------+ | | | v | +----------------------+ | | SQL database | | | `SQLite` | | | `PostgreSQL` | | +----------------------+ | | | | | V | +---------------+ | +---------------+ | | +---------------+ | | `RTNL_Object.apply()` | | NDB object: | | |-------------------------/ | `interface` | | | | `address` | | | | `route` | |-+ | `...` |-+ +---------------+ .. container:: aafig-caption object names on the diagram are clickable The goal of NDB is to provide an easy access to RTNL info and entities via Python objects, like `pyroute2.ndb.objects.interface` (see also: :ref:`ndbinterfaces`), `pyroute2.ndb.objects.route` (see also: :ref:`ndbroutes`) etc. These objects do not only reflect the system state for the time of their instantiation, but continuously monitor the system for relevant updates. The monitoring is done via netlink notifications, thus no polling. Also the objects allow to apply changes back to the system and rollback the changes. On the other hand it's too expensive to create Python objects for all the available RTNL entities, e.g. when there are hundreds of interfaces and thousands of routes. Thus NDB creates objects only upon request, when the user calls `.create()` to create new objects or runs `ndb.[selector]` (e.g. `ndb.interfaces['eth0']`) to access an existing object. To list existing RTNL entities NDB uses objects of the class `RecordSet` that `yield` individual `Record` objects for every entity (see also: :ref:`ndbreports`). An object of the `Record` class is immutable, doesn't monitor any updates, doesn't contain any links to other objects and essentially behaves like a simple named tuple. .. aafig:: :scale: 80 :textual: +---------------------+ | | | | | `NDB() instance` | | | | | +---------------------+ | | +-------------------+ +-------------------+ | +-------------------+ | |-----------+--------------------------+ | | | | | | | | | | | | | `View()` | | | | | | | |-+ | | | |-+ | | +-------------------+ | | +------------------+ +------------------+ | | | | | | | | | `.dump()` | | `.create()` | | `.summary()` | | `.__getitem__()` | | | | | | | | | +------------------+ +------------------+ | | | | v v +-------------------+ +------------------+ | | +------------------+ | | | +------------------+ | | | `RecordSet()` | | `Interface()` | | | | | | `Address()` | | | | | | `Route()` | | | +-------------------+ | `Neighbour()` | | | | | `Rule()` | |-+ | | ... |-+ v +------------------+ +-------------------+ +-------------------+ | +-------------------+ | | | `filter()` | | | | `select()` | | | | `transform()` | | | | `join()` | |-+ | ... |-+ +-------------------+ | v +-------------------+ +-------------------+ | +-------------------+ | | | | | | | | | | | `Record()` | | | | | |-+ | |-+ +-------------------+ .. container:: aafig-caption object names on the diagram are clickable Here are some simple NDB usage examples. More info see in the reference documentation below. Print all the interface names on the system, assume we have an NDB instance `ndb`: .. testcode:: for interface in ndb.interfaces.dump(): print(interface.ifname) .. testoutput:: lo eth0 Print the routing information in the CSV format: .. testcode:: for record in ndb.routes.summary().format('csv'): print(record) .. testoutput:: 'target','tflags','table','ifname','dst','dst_len','gateway' 'localhost',0,254,'eth0','',0,'192.168.122.1' 'localhost',0,254,'eth0','192.168.122.0',24, 'localhost',0,255,'lo','127.0.0.0',8, 'localhost',0,255,'lo','127.0.0.1',32, 'localhost',0,255,'lo','127.255.255.255',32, 'localhost',0,255,'eth0','192.168.122.28',32, 'localhost',0,255,'eth0','192.168.122.255',32, .. note:: More on report filtering and formatting: :ref:`ndbreports` Print IP addresses of interfaces in several network namespaces as: .. testcode:: netns nslist = ['netns01', 'netns02', 'netns03'] for nsname in nslist: ndb.sources.add(netns=nsname) report = ndb.addresses.summary() report.select_records(target=lambda x: x.startswith('netns')) report.select_fields('address', 'ifname', 'target') for line in report.format('json'): print(line) .. testoutput:: netns [ { "address": "127.0.0.1", "ifname": "lo", "target": "netns01" }, { "address": "127.0.0.1", "ifname": "lo", "target": "netns02" }, { "address": "127.0.0.1", "ifname": "lo", "target": "netns03" } ] Add an IP address on an interface: .. testcode:: with ndb.interfaces['eth0'] as eth0: eth0.add_ip('10.0.0.1/24') # ---> <--- NDB waits until the address setup Change an interface property: .. testcode:: with ndb.interfaces['eth0'] as eth0: eth0.set( state='up', address='00:11:22:33:44:55', ) # ---> <--- NDB waits here for the changes to be applied # the commit() is called automatically by the # context manager's __exit__() ''' import atexit import ctypes import ctypes.util import logging import logging.handlers import sys import threading from pyroute2 import config from pyroute2.common import basestring ## # NDB stuff from .auth_manager import AuthManager from .events import ShutdownException from .messages import cmsg from .schema import DBProvider from .task_manager import TaskManager from .transaction import Transaction from .view import SourcesView, View try: from urlparse import urlparse except ImportError: from urllib.parse import urlparse try: import queue except ImportError: import Queue as queue log = logging.getLogger(__name__) NDB_VIEWS_SPECS = ( ('interfaces', 'interfaces'), ('addresses', 'addresses'), ('routes', 'routes'), ('neighbours', 'neighbours'), ('af_bridge_fdb', 'fdb'), ('rules', 'rules'), ('netns', 'netns'), ('af_bridge_vlans', 'vlans'), ) class Log: def __init__(self, log_id=None): self.logger = None self.state = False self.log_id = log_id or id(self) self.logger = logging.getLogger('pyroute2.ndb.%s' % self.log_id) self.main = self.channel('main') def __call__(self, target=None, level=logging.INFO): if target is None: return self.logger is not None if self.logger is not None: for handler in tuple(self.logger.handlers): self.logger.removeHandler(handler) if target in ('off', False): if self.state: self.logger.setLevel(0) self.logger.addHandler(logging.NullHandler()) return if target in ('on', 'stderr'): handler = logging.StreamHandler() elif target == 'debug': handler = logging.StreamHandler() level = logging.DEBUG elif isinstance(target, basestring): url = urlparse(target) if not url.scheme and url.path: handler = logging.FileHandler(url.path) elif url.scheme == 'syslog': handler = logging.handlers.SysLogHandler( address=url.netloc.split(':') ) else: raise ValueError('logging scheme not supported') else: handler = target # set formatting only for new created logging handlers if handler is not target: fmt = '%(asctime)s %(levelname)8s %(name)s: %(message)s' formatter = logging.Formatter(fmt) handler.setFormatter(formatter) self.logger.addHandler(handler) self.logger.setLevel(level) @property def on(self): self.__call__(target='on') @property def off(self): self.__call__(target='off') def close(self): manager = self.logger.manager name = self.logger.name # the loggerDict can be huge, so don't # cache all the keys -- cache only the # needed ones purge_list = [] for logger in manager.loggerDict.keys(): if logger.startswith(name): purge_list.append(logger) # now shoot them one by one for logger in purge_list: del manager.loggerDict[logger] # don't force GC, leave it to the user del manager del name del purge_list def channel(self, name): return logging.getLogger('pyroute2.ndb.%s.%s' % (self.log_id, name)) def debug(self, *argv, **kwarg): return self.main.debug(*argv, **kwarg) def info(self, *argv, **kwarg): return self.main.info(*argv, **kwarg) def warning(self, *argv, **kwarg): return self.main.warning(*argv, **kwarg) def error(self, *argv, **kwarg): return self.main.error(*argv, **kwarg) def critical(self, *argv, **kwarg): return self.main.critical(*argv, **kwarg) class DeadEnd: def put(self, *argv, **kwarg): raise ShutdownException('shutdown in progress') class EventQueue: def __init__(self, *argv, **kwarg): self._bypass = self._queue = queue.Queue(*argv, **kwarg) def put(self, msg, source=None): return self._queue.put((source, msg)) def shutdown(self): self._queue = DeadEnd() def bypass(self, msg, source=None): return self._bypass.put((source, msg)) def get(self, *argv, **kwarg): return self._bypass.get(*argv, **kwarg) def qsize(self): return self._bypass.qsize() class AuthProxy: def __init__(self, ndb, auth_managers): self._ndb = ndb self._auth_managers = auth_managers for vtable, vname in NDB_VIEWS_SPECS: view = View(self._ndb, vtable, auth_managers=self._auth_managers) setattr(self, vname, view) class NDB: @property def nsmanager(self): return '%s/nsmanager' % self.localhost def __init__( self, sources=None, localhost='localhost', db_provider='sqlite3', db_spec=':memory:', db_cleanup=True, rtnl_debug=False, log=False, auto_netns=False, libc=None, ): if db_provider == 'postgres': db_provider = 'psycopg2' self.localhost = localhost self.schema = None self.libc = libc or ctypes.CDLL( ctypes.util.find_library('c'), use_errno=True ) self.log = Log(log_id=id(self)) self._db = None self._dbm_thread = None self._dbm_ready = threading.Event() self._dbm_shutdown = threading.Event() self._global_lock = threading.Lock() self._event_queue = EventQueue(maxsize=100) self.messenger = None # if log: if isinstance(log, basestring): self.log(log) elif isinstance(log, (tuple, list)): self.log(*log) elif isinstance(log, dict): self.log(**log) else: raise TypeError('wrong log spec format') # # fix sources prime if sources is None: if config.mock_iproute: sources = [{'target': 'localhost', 'kind': 'IPMock'}] else: sources = [ { 'target': self.localhost, 'kind': 'local', 'nlm_generator': 1, } ] if sys.platform.startswith('linux'): sources.append( {'target': self.nsmanager, 'kind': 'nsmanager'} ) elif not isinstance(sources, (list, tuple)): raise ValueError('sources format not supported') for spec in sources: if 'target' not in spec: spec['target'] = self.localhost break am = AuthManager( {'obj:list': True, 'obj:read': True, 'obj:modify': True}, self.log.channel('auth'), ) self.sources = SourcesView(self, auth_managers=[am]) self._call_registry = {} self._nl = sources atexit.register(self.close) self._dbm_ready.clear() self._dbm_error = None self.config = { 'provider': str(DBProvider(db_provider)), 'spec': db_spec, 'rtnl_debug': rtnl_debug, 'db_cleanup': db_cleanup, 'auto_netns': auto_netns, 'recordset_pipe': 'false', } self.task_manager = TaskManager(self) self._dbm_thread = threading.Thread( target=self.task_manager.run, name='NDB main loop' ) self._dbm_thread.daemon = True self._dbm_thread.start() self._dbm_ready.wait() if self._dbm_error is not None: raise self._dbm_error for vtable, vname in NDB_VIEWS_SPECS: view = View(self, vtable, auth_managers=[am]) setattr(self, vname, view) # self.query = Query(self.schema) def _get_view(self, table, chain=None, auth_managers=None): return View(self, table, chain, auth_managers) def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.close() def begin(self): return Transaction(self.log.channel('transaction')) def readonly(self): return self.auth_proxy( AuthManager( {'obj:list': True, 'obj:read': True, 'obj:modify': False}, self.log.channel('auth'), ) ) def auth_proxy(self, auth_manager): return AuthProxy(self, [auth_manager]) def close(self): with self._global_lock: if self._dbm_shutdown.is_set(): return else: self._dbm_shutdown.set() if hasattr(atexit, 'unregister'): atexit.unregister(self.close) else: try: atexit._exithandlers.remove((self.close, (), {})) except ValueError: pass # shutdown the _dbm_thread self._event_queue.shutdown() self._event_queue.bypass((cmsg(None, ShutdownException()),)) self._dbm_thread.join() # shutdown the logger -- free the resources self.log.close() def backup(self, spec): self.task_manager.db_backup(spec) def reload(self, kinds=None): for source in self.sources.values(): if kinds is not None and source.kind in kinds: source.restart() pyroute2-0.7.11/pyroute2/ndb/messages.py000066400000000000000000000003661455030217500201150ustar00rootroot00000000000000class cmsg(dict): def __init__(self, target, payload=None): self['header'] = {'target': target} self.payload = payload class cmsg_event(cmsg): pass class cmsg_failed(cmsg): pass class cmsg_sstart(cmsg): pass pyroute2-0.7.11/pyroute2/ndb/noipdb.py000066400000000000000000000120331455030217500175530ustar00rootroot00000000000000import logging from pyroute2.ndb.main import NDB log = logging.getLogger(__name__) class ObjectProxy(dict): def __init__(self, obj): self._obj = obj def __getattribute__(self, key): if key[:4] == 'set_': def set_value(value): self[key[4:]] = value return self return set_value try: return self[key] except KeyError: return super(ObjectProxy, self).__getattribute__(key) def __setattr__(self, key, value): if key == '_obj': super(ObjectProxy, self).__setattr__(key, value) else: super(ObjectProxy, self).__getattribute__('_obj')[key] = value def __getitem__(self, key): return super(ObjectProxy, self).__getattribute__('_obj')[key] def __setitem__(self, key, value): super(ObjectProxy, self).__getattribute__('_obj')[key] = value def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): if hasattr(self, 'commit'): self.commit() def __repr__(self): return repr(super(ObjectProxy, self).__getattribute__('_obj')) def __contains__(self, key): return key in super(ObjectProxy, self).__getattribute__('_obj') def get_ndb_object(self): return self._obj def keys(self): return self._obj.keys() def items(self): return self._obj.items() def values(self): return self._obj.values() def __iter__(self): return self._obj.__iter__() @property def _mode(self): return 'implicit' class Interface(ObjectProxy): def add_ip(self, *argv, **kwarg): self._obj.add_ip(*argv, **kwarg) return self def del_ip(self, *argv, **kwarg): self._obj.del_ip(*argv, **kwarg) return self def add_port(self, *argv, **kwarg): self._obj.add_port(*argv, **kwarg) return self def del_port(self, *argv, **kwarg): self._obj.del_port(*argv, **kwarg) return self def commit(self, *argv, **kwarg): self._obj.commit(*argv, **kwarg) return self def up(self): self._obj.set('state', 'up') return self def down(self): self._obj.set('state', 'down') return self def remove(self): self._obj.remove() return self @property def if_master(self): return self._obj.get('master', None) @property def ipaddr(self): return tuple(self._obj.ipaddr.dump().select('address', 'prefixlen')) class Interfaces(ObjectProxy): text_create = ''' When `create().commit()` fails, the failed interface object behaves differently in IPDB and NDB. IPDB saves the failed object in the database, while the NDB database contains only the system reflection, and the failed object may stay only being referenced by a variable. `KeyError: 'object exists'` vs. `CreateException` ''' def __getitem__(self, key): return Interface(super(Interfaces, self).__getitem__(key)) def __iter__(self): return iter(self.keys()) def add(self, *argv, **kwarg): return self.create(*argv, **kwarg) def create(self, *argv, **kwarg): log.warning(self.text_create) return Interface(self._obj.create(*argv, **kwarg)) def keys(self): ret = [] for record in self._obj.dump(): ret += [record.ifname, record.index] return ret def has_key(self, key): return key in self.keys() class NoIPDB(object): text_create = ''' IPDB has a shortcut method to create interfaces: `ipdb.create(...)`. NDB has `create()` methods only under respective views: `ndb.interfaces.create(...)`, `ndb.addresses.create(...)` etc. ''' text_nl = ''' Unlike IPDB, NDB can work with many netlink sources. The default one referenced as `localhost`:: # # these two statements are equivalent: # ndb.sources['localhost'].nl.get_links() ipdb.nl.get_links() ''' def __init__(self, *argv, **kwarg): if argv or kwarg: log.warning( '%s does not support IPDB parameters, ignoring', self.__class__.__name__, ) if len(argv) > 0 or 'nl' in kwarg: log.warning( '%s does not support shared netlink sources,' ' ignoring `nl` and starting with local IPRoute', self.__class__.__name__, ) self._ndb = NDB() self.interfaces = Interfaces(self._ndb.interfaces) @property def nl(self): log.warning(self.text_nl) return self._ndb.sources['localhost'].nl @property def ipaddr(self): ret = dict([(x.index, []) for x in self._ndb.interfaces.dump()]) for record in self._ndb.addresses.dump(): ret[record.index].append((record.address, record.prefixlen)) return ret def create(self, *argv, **kwarg): log.warning(self.text_create) return self.interfaces.create(*argv, **kwarg) def release(self): self._ndb.close() pyroute2-0.7.11/pyroute2/ndb/objects/000077500000000000000000000000001455030217500173605ustar00rootroot00000000000000pyroute2-0.7.11/pyroute2/ndb/objects/__init__.py000066400000000000000000001117331455030217500214770ustar00rootroot00000000000000''' General structure ================= The NDB objects are dictionary-like structures that represent network objects -- interfaces, routes, addresses etc. They support the common dict API, like item getting, setting, iteration through key and values. In addition to that, NDB object add specific calls, see the API section below. Most of the NDB object types store all attributes in a flat one level dictionary. Some, like multihop routes, implement nested structures. In addition to that, some objects like `Interface` provide views on the DB that list only related objects -- addresses, routes and neighbours. More on these topic see in the corresponding sections. NDB objects and RTNL API ======================== The dictionary fields represent RTNL messages fields and NLA names, and the objects are used as argument dictionaries to normal `IPRoute` methods like `link()` or `route()`. Thus everything described for the `IPRoute` methods is valid here as well. See also: :ref:`iproute` .. testsetup:: from pyroute2 import IPMock as IPRoute from pyroute2 import NDB from pyroute2 import config config.mock_iproute = True .. testcode:: # create a vlan interface with IPRoute eth0 = 2 with IPRoute() as ipr: ipr.link("add", ifname="vlan1108", kind="vlan", link=eth0, vlan_id=1108) # same with NDB: with NDB(log="stderr") as ndb: vlan = ndb.interfaces.create( ifname="vlan1108", kind="vlan", link="eth0", vlan_id=1108, ) vlan.commit() Slightly simplifying, if a network object doesn't exist, NDB will run an RTNL method with "add" argument, if exists -- "set", and to remove an object NDB will call the method with "del" argument. API === ''' import collections import errno import json import threading import time import traceback import weakref from functools import partial from pyroute2 import cli from pyroute2.netlink.exceptions import NetlinkError from pyroute2.requests.main import RequestProcessor from ..auth_manager import AuthManager, check_auth from ..events import InvalidateHandlerException, State from ..messages import cmsg_event from ..report import Record RSLV_IGNORE = 0 RSLV_RAISE = 1 RSLV_NONE = 2 RSLV_DELETE = 3 def fallback_add(self, idx_req, req): # ignore all set/get for objects with incomplete idx_req if set(idx_req.keys()) != set(self.kspec): self.log.debug('ignore incomplete idx_req in the fallback') return # try to set the object ( self.ndb._event_queue.put( self.sources[self['target']].api(self.api, 'set', **req), source=self['target'], ) ) # try to get the object ( self.ndb._event_queue.put( self.sources[self['target']].api(self.api, 'get', **idx_req), source=self['target'], ) ) # reload the collected data self.load_sql() class RTNL_Object(dict): ''' The common base class for NDB objects -- interfaces, routes, rules addresses etc. Implements common logic for all the classes, like item setting, commit/rollback, RTNL event filters, loading values from the DB backend etc. ''' view = None # (optional) view to load values for the summary etc. utable = None # table to send updates to resolve_fields = [] key_extra_fields = [] hidden_fields = [] fields_cmp = {} fields_load_transform = {} field_filter = object rollback_chain = [] fallback_for = None schema = None event_map = None state = None log = None errors = None msg_class = None reverse_update = None _table = None _apply_script = None _apply_script_snapshots = [] _key = None _replace = None _replace_on_key_change = False _init_complete = False # 8<------------------------------------------------------------ # # Documented public properties section # @property def table(self): ''' Main reference table for the object. The SQL schema of this table is used to build the object key and to verify fields. Read-write property. ''' return self._table @table.setter def table(self, value): self._table = value @property def etable(self): ''' Effective table where the object actually fetches the data from. It is not always equal `self.table`, e.g. snapshot objects fetch the data from snapshot tables. Read-only property. ''' if self.ctxid: return '%s_%s' % (self.table, self.ctxid) else: return self.table @property def key(self): ''' Key of the object, used to build SQL requests to fetch the data from the DB. Read-write property. ''' nkey = self._key or {} ret = collections.OrderedDict() for name in self.kspec: kname = self.iclass.nla2name(name) if kname in self: value = self[kname] if value is None and name in nkey: value = nkey[name] if isinstance(value, (list, tuple, dict)): value = json.dumps(value) ret[name] = value if len(ret) < len(self.kspec): for name in self.key_extra_fields: kname = self.iclass.nla2name(name) if self.get(kname): ret[name] = self[kname] return ret @key.setter def key(self, k): if not isinstance(k, dict): return for key, value in k.items(): if value is not None: dict.__setitem__(self, self.iclass.nla2name(key), value) # # 8<------------------------------------------------------------ # @classmethod def _count(cls, view): return view.ndb.task_manager.db_fetchone( 'SELECT count(*) FROM %s' % view.table ) @classmethod def _dump_where(cls, view): return '', [] @classmethod def _sdump(cls, view, names, fnames): req = ''' SELECT %s FROM %s AS main ''' % ( fnames, cls.table, ) yield names where, values = cls._dump_where(view) for record in view.ndb.task_manager.db_fetch(req + where, values): yield record @classmethod def summary(cls, view): return cls._sdump( view, view.ndb.schema.compiled[cls.table]['norm_idx'], view.ndb.schema.compiled[cls.table]['knames'], ) @classmethod def dump(cls, view): return cls._sdump( view, view.ndb.schema.compiled[cls.table]['norm_names'], view.ndb.schema.compiled[cls.table]['fnames'], ) @classmethod def spec_normalize(cls, processed, spec): return processed @staticmethod def key_load_context(key, context): return key def __init__( self, view, key, iclass, ctxid=None, load=True, master=None, check=True, auth_managers=None, ): self.view = view self.ndb = view.ndb self.sources = view.ndb.sources self.master = master self.ctxid = ctxid self.schema = view.ndb.schema self.task_manager = view.ndb.task_manager self.changed = set() self.iclass = iclass self.utable = self.utable or self.table self.errors = [] self.atime = time.time() self.log = self.ndb.log.channel('rtnl_object') self.log.debug('init') if auth_managers is None: auth_managers = [AuthManager(None, self.ndb.log.channel('auth'))] self.auth_managers = auth_managers self.state = State() self.state.set('invalid') self.snapshot_deps = [] self.load_event = threading.Event() self.load_event.set() self.load_debug = False self.lock = threading.Lock() self.object_data = RequestProcessor( self.field_filter(), context=weakref.proxy(self) ) self.kspec = self.schema.compiled[self.table]['idx'] self.knorm = self.schema.compiled[self.table]['norm_idx'] self.spec = self.schema.compiled[self.table]['all_names'] self.names = self.schema.compiled[self.table]['norm_names'] self.names_count = [self.names.count(x) for x in self.names] self.last_save = None if self.event_map is None: self.event_map = {} self._apply_script = [] self.fallback_for = { 'add': {errno.EEXIST: fallback_add, errno.EAGAIN: None}, 'set': {errno.ENODEV: None}, 'del': { errno.ENODEV: None, # interfaces errno.ENOENT: None, # rules errno.ESRCH: None, # routes errno.EADDRNOTAVAIL: None, # addresses }, } if isinstance(key, dict): self.chain = key.pop('ndb_chain', None) create = key.pop('create', False) else: self.chain = None create = False exists = self.exists(key) ckey = self.complete_key(key) if create: if check & exists: raise KeyError('object exists') for name in key: self[self.iclass.nla2name(name)] = key[name] # FIXME -- merge with complete_key() if 'target' not in self: self.load_value('target', self.view.default_target) else: if not exists: raise KeyError('object does not exists') self.key = ckey if load: if ctxid is None: self.load_sql() else: self.load_sql(table=self.table) self._init_complete = True @classmethod def new_spec(cls, spec, context=None, localhost=None): if isinstance(spec, Record): spec = spec._as_dict() rp = RequestProcessor(cls.field_filter(), context=spec, prime=spec) if isinstance(context, dict): rp.update(context) if 'target' not in rp and localhost is not None: rp['target'] = localhost return cls.spec_normalize(rp, spec) @staticmethod def resolve(view, spec, fields, policy=RSLV_IGNORE): ''' Resolve specific fields e.g. convert port ifname into index. ''' for field in fields: reference = spec.get(field) try: if isinstance(reference, dict) and 'index' in reference: spec[field] = reference['index'] elif reference is not None and not isinstance(reference, int): spec[field] = view[reference]['index'] except (KeyError, TypeError): if policy == RSLV_RAISE: raise elif policy == RSLV_NONE: spec[field] = None elif policy == RSLV_DELETE: del spec[field] elif policy == RSLV_IGNORE: pass else: raise TypeError('unknown rslv policy') def mark_tflags(self, mark): pass def keys(self): return filter(lambda x: x not in self.hidden_fields, dict.keys(self)) def items(self): return filter( lambda x: x[0] not in self.hidden_fields, dict.items(self) ) @property def context(self): return {'target': self.get('target', self.ndb.localhost)} @classmethod def nla2name(self, name): return self.msg_class.nla2name(name) @classmethod def compare_record(self, left, right): pass @classmethod def name2nla(self, name): return self.msg_class.name2nla(name) def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.commit() def __hash__(self): return id(self) @check_auth('obj:read') def __getitem__(self, key): return dict.__getitem__(self, key) @check_auth('obj:modify') def __setitem__(self, key, value): for nkey, nvalue in self.object_data.filter(key, value).items(): if self.get(nkey) == nvalue: continue if self.state == 'system' and nkey in self.knorm: if self._replace_on_key_change: self.log.debug( f'prepare replace {nkey} = {nvalue} in {self.key}' ) self._replace = type(self)( self.view, self.key, auth_managers=self.auth_managers ) self.state.set('replace') else: raise ValueError( 'attempt to change a key field (%s)' % nkey ) if nkey in ('net_ns_fd', 'net_ns_pid'): self.state.set('setns') if nvalue != self.get(nkey, None): if nkey != 'target': self.changed.add(nkey) dict.__setitem__(self, nkey, nvalue) def fields(self, *argv): # TODO: deprecate and move to show() Fields = collections.namedtuple('Fields', argv) return Fields(*[self[key] for key in argv]) def key_repr(self): return repr(self.key) @cli.change_pointer def create(self, **spec): ''' Create an RTNL object of the same type, and add it to the commit chain. The spec format depends on the object. The method allows to chain creation of multiple objects sharing the same context. .. code-block:: python ( ndb.interfaces['eth0'] # 1. .set(state="up") # 2. .ipaddr # 3. .create(address='10.0.0.1', prefixlen=24) # 4. <- create() .create(address='10.0.0.2', prefixlen=24) # 5. <- create() .commit() # 6. ) Here: 1. returns an interface object `eth0` 2. sets `state="up"` and returns the object itself 3. returns an address view, that uses `eth0` as the context 4. creates an IP address, the interface lookup is done via context 5. creates another IP address -- same type, same context 6. commits the changes in the order: (interface `state="up"`; address `10.0.0.1/24`; address `10.0.0.2/24`) ''' spec['create'] = True spec['ndb_chain'] = self return self.view[spec] @cli.show_result @check_auth('obj:read') def show(self, fmt=None): ''' Return the object in a specified format. The format may be specified with the keyword argument `format` or in the `ndb.config['show_format']`. TODO: document different formats ''' fmt = fmt or self.view.ndb.config.get('show_format', 'native') if fmt == 'native': return dict(self) else: out = collections.OrderedDict() for key in sorted(self): out[key] = self[key] return '%s\n' % json.dumps(out, indent=4, separators=(',', ': ')) def set(self, *argv, **kwarg): ''' Call formats: * `set(key, value)` * `set(key=value)` * `set(key1=value1, key2=value2)` .. code-block:: python with ndb.interfaces["eth0"] as eth0: eth0.set( mtu=1200, state='up', address='00:11:22:33:44:55', ) ''' if argv: self[argv[0]] = argv[1] elif kwarg: for key, value in kwarg.items(): self[key] = value return self def wtime(self, itn=1): return max(min(itn * 0.1, 1), self.view.ndb._event_queue.qsize() / 10) def register(self): # # Construct a weakref handler for events. # # If the referent doesn't exist, raise the # exception to remove the handler from the # chain. # def wr_handler(wr, fname, *argv): try: return getattr(wr(), fname)(*argv) except Exception: # check if the weakref became invalid if wr() is None: raise InvalidateHandlerException() raise wr = weakref.ref(self) for event, fname in self.event_map.items(): # # Do not trust the implicit scope and pass the # weakref explicitly via partial # ( self.ndb.task_manager.register_handler( event, partial(wr_handler, wr, fname) ) ) @check_auth('obj:modify') def snapshot(self, ctxid=None): ''' Create and return a snapshot of the object. The method creates corresponding SQL tables for the object itself and for detected dependencies. The snapshot tables will be removed as soon as the snapshot gets collected by the GC. ''' ctxid = ctxid or self.ctxid or id(self) if self._replace is None: key = self.key else: key = self._replace.key snp = type(self)( self.view, key, ctxid=ctxid, auth_managers=self.auth_managers ) self.ndb.task_manager.db_save_deps( ctxid, weakref.ref(snp), self.iclass ) snp.changed = set(self.changed) return snp def complete_key(self, key): ''' Try to complete the object key based on the provided fields. E.g.:: >>> ndb.interfaces['eth0'].complete_key({"ifname": "eth0"}) {'ifname': 'eth0', 'index': 2, 'target': u'localhost', 'tflags': 0} It is an internal method and is not supposed to be used externally. ''' self.log.debug('complete key %s from table %s' % (key, self.etable)) fetch = [] if isinstance(key, Record): key = key._as_dict() else: key = dict(key) self.resolve( view=self.view, spec=key, fields=self.resolve_fields, policy=RSLV_DELETE, ) for name in self.kspec: if name not in key: fetch.append('f_%s' % name) if fetch: keys = [] values = [] for name, value in key.items(): nla_name = self.iclass.name2nla(name) if nla_name in self.spec: name = nla_name if value is not None and name in self.spec: keys.append('f_%s = %s' % (name, self.schema.plch)) values.append(value) spec = self.ndb.task_manager.db_fetchone( 'SELECT %s FROM %s WHERE %s' % (' , '.join(fetch), self.etable, ' AND '.join(keys)), values, ) if spec is None: self.log.debug('got none') return None for name, value in zip(fetch, spec): key[name[2:]] = value self.log.debug('got %s' % key) return key def exists(self, key): ''' Check if the object exists in the DB ''' return self.view.exists(key) @check_auth('obj:modify') def rollback(self, snapshot=None): ''' Try to rollback the object state using the snapshot provided as an argument or using `self.last_save`. ''' if self._replace is not None: self.log.debug( 'rollback replace: %s :: %s' % (self.key, self._replace.key) ) new_replace = type(self)( self.view, self.key, auth_managers=self.auth_managers ) new_replace.state.set('remove') self.state.set('replace') self.update(self._replace) self._replace = new_replace self.log.debug('rollback: %s' % str(self.state.events)) snapshot = snapshot or self.last_save if snapshot == -1: return self.remove().apply() else: snapshot.state.set(self.state.get()) snapshot.rollback_chain = self._apply_script_snapshots snapshot.apply(rollback=True) for link, snp in snapshot.snapshot_deps: link.rollback(snapshot=snp) return self def clear(self): pass @property def clean(self): return ( self.state == 'system' and not self.changed and not self._apply_script ) @check_auth('obj:modify') def commit(self): ''' Commit the pending changes. If an exception is raised during `commit()`, automatically `rollback()` to the latest saved snapshot. ''' if self.clean: return self if self.chain: self.chain.commit() self.log.debug('commit: %s' % str(self.state.events)) # Is it a new object? if self.state == 'invalid': # Save values, try to apply save = dict(self) self.last_save = -1 try: return self.apply(mode='commit') except Exception as e_i: # Save the debug info e_i.trace = traceback.format_exc() # ACHTUNG! The routine doesn't clean up the system # # Drop all the values and rollback to the initial state for key in tuple(self.keys()): del self[key] for key in save: dict.__setitem__(self, key, save[key]) raise e_i # Continue with an existing object # The snapshot tables in the DB will be dropped as soon as the GC # collects the object. But in the case of an exception the `snp` # variable will be saved in the traceback, so the tables will be # available to debug. If the traceback will be saved somewhere then # the tables will never be dropped by the GC, so you can do it # manually by `ndb.task_manager.db_purge_snapshots()` -- to invalidate # all the snapshots and to drop the associated tables. self.last_save = self.snapshot() # Apply the changes try: self.apply(mode='commit') except Exception as e_c: # Rollback in the case of any error try: self.rollback() except Exception as e_r: e_c.chain = [e_r] if hasattr(e_r, 'chain'): e_c.chain.extend(e_r.chain) e_r.chain = None raise finally: if self.last_save is not None: (self.last_save.state.set(self.state.get())) if self._replace is not None: self._replace = None return self def remove(self): ''' Set the desired state to `remove`, so the next `apply()` call will delete the object from the system. ''' with self.lock: self.state.set('remove') return self def check(self): state_map = ( ('invalid', 'system'), ('remove', 'invalid'), ('setns', 'invalid'), ('setns', 'system'), ('replace', 'system'), ) self.load_sql() self.log.debug('check: %s' % str(self.state.events)) if self.state.transition() not in state_map: self.log.debug('check state: False') return False if self.changed: self.log.debug('check changed: %s' % (self.changed)) return False self.log.debug('check: True') return True def make_req(self, prime): req = dict(prime) for key in self.changed: req[key] = self[key] return req def make_idx_req(self, prime): return prime def get_count(self): conditions = [] values = [] for name in self.kspec: conditions.append('f_%s = %s' % (name, self.schema.plch)) values.append(self.get(self.iclass.nla2name(name), None)) return ( self.ndb.task_manager.db_fetchone( ''' SELECT count(*) FROM %s WHERE %s ''' % (self.table, ' AND '.join(conditions)), values, ) )[0] def hook_apply(self, method, **spec): pass @check_auth('obj:modify') def save_context(self): if self.state == 'invalid': self.last_save = -1 else: self.last_save = self.snapshot() return self @check_auth('obj:modify') def apply(self, rollback=False, req_filter=None, mode='apply'): ''' Apply the pending changes. If an exception is raised during `apply()`, no `rollback()` is called. No automatic snapshots are madre. In order to properly revert the changes, you have to run:: obj.save_context() try: obj.apply() except Exception: obj.rollback() ''' # Resolve the fields self.resolve( view=self.view, spec=self, fields=self.resolve_fields, policy=RSLV_RAISE, ) self.log.debug('events log: %s' % str(self.state.events)) self.log.debug('run apply') self.load_event.clear() self._apply_script_snapshots = [] # Load the current state try: self.task_manager.db_commit() except Exception: pass self.load_sql(set_state=False) if self.state == 'system' and self.get_count() == 0: state = self.state.set('invalid') else: state = self.state.get() # Create the request. prime = { x: self[x] for x in self.schema.compiled[self.table]['norm_idx'] if self.get(x) is not None } req = self.make_req(prime) idx_req = self.make_idx_req(prime) self.log.debug('apply req: %s' % str(req)) self.log.debug('apply idx_req: %s' % str(idx_req)) method = None # if state in ('invalid', 'replace'): for k, v in tuple(self.items()): if k not in req and v is not None: req[k] = v if self.master is not None: req = self.new_spec( req, self.master.context, self.ndb.localhost ) method = 'add' elif state == 'system': method = 'set' elif state == 'setns': method = 'set' elif state == 'remove': method = 'del' req = idx_req else: raise Exception('state transition not supported') self.log.debug(f'apply transition from: {state}') self.log.debug(f'apply method: {method}') if req_filter is not None: req = req_filter(req) first_call_success = False for itn in range(10): try: self.log.debug('API call %s (%s)' % (method, req)) (self.sources[self['target']].api(self.api, method, **req)) first_call_success = True (self.hook_apply(method, **req)) except NetlinkError as e: (self.log.debug('error: %s' % e)) if not first_call_success: self.log.debug('error on the first API call, escalate') raise ## # # FIXME: performance penalty # required now only in some NDA corner cases # must be moved to objects.neighbour # # ## if e.code in self.fallback_for[method]: self.log.debug('ignore error %s for %s' % (e.code, self)) if self.fallback_for[method][e.code] is not None: self.log.debug( 'run fallback %s (%s)' % (self.fallback_for[method][e.code], req) ) try: if isinstance( self.fallback_for[method][e.code], str ): self.sources[self['target']].api( self.api, self.fallback_for[method][e.code], **req, ) else: self.fallback_for[method][e.code]( self, idx_req, req ) except NetlinkError: pass else: raise e wtime = self.wtime(itn) mqsize = self.view.ndb._event_queue.qsize() nq = self.schema.stats.get(self['target']) if nq is not None: nqsize = nq.qsize else: nqsize = 0 self.log.debug( 'stats: apply %s {' 'objid %s, wtime %s, ' 'mqsize %s, nqsize %s' '}' % (method, id(self), wtime, mqsize, nqsize) ) if self.check(): self.log.debug('checked') break self.log.debug('check failed') self.load_event.wait(wtime) self.load_event.clear() else: self.log.debug('stats: %s apply %s fail' % (id(self), method)) if not self.use_db_resync(lambda x: x, self.check): self._apply_script = [] raise Exception('could not apply the changes') self.log.debug('stats: %s pass' % (id(self))) # if state == 'replace': self._replace.remove() self._replace.apply() # if rollback: # # Iterate all the snapshot tables and collect the diff for cls in self.view.classes.values(): if issubclass(type(self), cls) or issubclass(cls, type(self)): continue table = cls.table # comprare the tables diff = self.ndb.task_manager.db_fetch( ''' SELECT * FROM %s_%s EXCEPT SELECT * FROM %s ''' % (table, self.ctxid, table) ) for record in diff: record = dict( zip((self.schema.compiled[table]['all_names']), record) ) key = dict( [ x for x in record.items() if x[0] in self.schema.compiled[table]['idx'] ] ) key['create'] = True try: obj = self.view.template(key, table) except KeyError: continue obj.load_sql(ctxid=self.ctxid) obj.state.set('invalid') obj.register() try: obj.apply() except Exception as e: self.errors.append((time.time(), obj, e)) for obj in reversed(self.rollback_chain): obj.rollback() else: apply_script = self._apply_script self._apply_script = [] for op, kwarg in apply_script: kwarg['self'] = self kwarg['mode'] = mode ret = self.use_db_resync( lambda x: not isinstance(x, KeyError), op, tuple(), kwarg ) if not isinstance(ret, list): ret = [ret] for obj in ret: if isinstance(obj, Exception): raise obj elif obj is not None: self._apply_script_snapshots.append(obj) return self def use_db_resync(self, criteria, method, argv=None, kwarg=None): ret = None argv = argv or [] kwarg = kwarg or {} self.log.debug(f'criteria {criteria}') self.log.debug(f'method {method}, {argv}, {kwarg}') for attempt in range(3): ret = method(*argv, **kwarg) self.log.debug(f'ret {ret}') if criteria(ret): self.log.debug('criteria matched') return ret self.log.debug(f'resync the DB attempt {attempt}') self.ndb.task_manager.db_flush(self['target']) self.load_event.clear() ( self.ndb._event_queue.put( self.sources[self['target']].api('dump'), source=self['target'], ) ) ( self.ndb._event_queue.put( (cmsg_event(self['target'], self.load_event),), source=self['target'], ) ) self.load_event.wait(self.wtime(1)) self.load_event.clear() return ret def update(self, data): for key, value in data.items(): self.load_value(key, value) def update_from_sql(self, spec): ''' A bit special case: we can have several fields with non unique names. ''' for key, count, value in zip(self.names, self.names_count, spec): if count == 1 or value is not None: self.load_value(key, value) def load_direct(self, key, value): super(RTNL_Object, self).__setitem__(key, value) def load_value(self, key, value): ''' Load a value and clean up the `self.changed` set if the loaded value matches the expectation. ''' if key in self.fields_load_transform: value = self.fields_load_transform[key](value) if self.load_debug: self.log.debug('load %s: %s' % (key, value)) if key not in self.changed: dict.__setitem__(self, key, value) elif self.get(key) == value: self.changed.remove(key) elif key in self.fields_cmp and self.fields_cmp[key](self, value): self.changed.remove(key) elif self.load_debug: self.log.debug( 'discard %s: %s (expected %s)' % (key, value, self.get(key)) ) def load_sql(self, table=None, ctxid=None, set_state=True): ''' Load the data from the database. ''' if not self.key: return if table is None: if ctxid is None: table = self.etable else: table = '%s_%s' % (self.table, ctxid) keys = [] values = [] for name, value in self.key.items(): keys.append('f_%s = %s' % (name, self.schema.plch)) if isinstance(value, (list, tuple, dict)): value = json.dumps(value) values.append(value) spec = self.ndb.task_manager.db_fetchone( 'SELECT * FROM %s WHERE %s' % (table, ' AND '.join(keys)), values ) self.log.debug('load_sql load: %s' % str(spec)) self.log.debug('load_sql names: %s' % str(self.names)) if set_state: with self.lock: if spec is None: if self.state != 'invalid': # No such object (anymore) self.state.set('invalid') self.changed = set() elif self.state not in ('remove', 'setns'): self.update_from_sql(spec) self.state.set('system') return spec def load_rtnlmsg(self, target, event): ''' Check if the RTNL event matches the object and load the data from the database if it does. ''' # TODO: partial match (object rename / restore) # ... # full match for norm, name in zip(self.knorm, self.kspec): value = self.get(norm) if name == 'target': if value != target: return elif name == 'tflags': continue elif value not in (event.get_attr(name), event.get(norm)): return self.log.debug('load_rtnl: %s' % str(event.get('header'))) if event['header'].get('type', 0) % 2: self.state.set('invalid') self.changed = set() else: self.load_sql() self.load_event.set() pyroute2-0.7.11/pyroute2/ndb/objects/address.py000066400000000000000000000215041455030217500213610ustar00rootroot00000000000000''' .. testsetup:: * from socket import AF_INET from pyroute2 import NDB from pyroute2 import config config.mock_iproute = True ndb = NDB() .. testcleanup:: * ndb.close() Using the global view ===================== The `addresses` view provides access to all the addresses registered in the DB, as well as methods to create and remove them: .. testcode:: eth0 = ndb.interfaces['eth0'] # create an address ndb.addresses.create( address='10.0.0.1', prefixlen=24, index=eth0['index'], ).commit() # remove it with ndb.addresses['10.0.0.1/24'] as addr: addr.remove() # list addresses for record in ndb.addresses.summary(): print(record) .. testoutput:: :hide: ('localhost', 0, 'lo', '127.0.0.1', 8) ('localhost', 0, 'eth0', '192.168.122.28', 24) Using ipaddr views ================== Interfaces also provide address views as subsets of the global address view: .. testcode:: with ndb.interfaces['eth0'] as eth0: for record in eth0.ipaddr.summary(): print(record) .. testoutput:: ('localhost', 0, 'eth0', '192.168.122.28', 24) It is possible use the same API as with the global address view: .. testcode:: with ndb.interfaces['eth0'] as eth0: eth0.ipaddr.create( address='10.0.0.1', prefixlen=24 # index is implied ).commit() for record in ndb.addresses.summary(): print(record) .. testoutput:: ('localhost', 0, 'lo', '127.0.0.1', 8) ('localhost', 0, 'eth0', '10.0.0.1', 24) ('localhost', 0, 'eth0', '192.168.122.28', 24) Using interface methods ======================= Interfaces provide also simple methods to manage addresses: .. testcode:: with ndb.interfaces['eth0'] as eth0: eth0.del_ip('192.168.122.28/24') # remove an existing address eth0.del_ip(family=AF_INET) # ... or remove all IPv4 addresses eth0.add_ip('10.0.0.1/24') # add a new IP address eth0.add_ip(address='10.0.0.2', prefixlen=24) # ... or using keywords eth0.set('state', 'up') with ndb.addresses.summary() as report: report.select_records(ifname='eth0') for address in report: print(address) .. testoutput:: ('localhost', 0, 'eth0', '10.0.0.1', 24) ('localhost', 0, 'eth0', '10.0.0.2', 24) Functions `add_ip()` and `del_ip()` return the interface object, so they can be chained as in the example above, and the final `commit()` will commit all the changes in the chain. The keywords to `del_ip()` are the same object field names that may be used in the selectors or report filters: .. testcode:: with ndb.interfaces['eth0'] as eth0: eth0.del_ip(prefixlen=24) # remove all addresses with mask /24 A match function that may be passed to the `del_ip()` is the same as for `addresses.dump().select_records()`, and it gets a named tuple as the argument. The fields are named in the same way as address objects fields. So if you want to filter addresses by a pattern or the `prefixlen` field with a match function, you may use: .. testcode:: x1 with ndb.interfaces['eth0'] as eth0: eth0.add_ip('10.0.0.1/25') with ndb.interfaces['eth0'] as eth0: eth0.del_ip(lambda x: x.address.startswith('192.168')) eth0.del_ip(lambda x: x.prefixlen == 25) An empty `del_ip()` removes all the IP addresses on the interface: .. testcode:: x2 with ndb.interfaces['eth0'] as eth0: eth0.del_ip() # flush all the IP:s Accessing one address details ============================= Access an address as a separate RTNL object: .. testcode:: x3 print(ndb.addresses['192.168.122.28/24']) .. testoutput:: x3 :hide: {'target': 'localhost', 'address': '192.168.122.28', 'prefixlen': 24, \ 'tflags': 0, 'family': 2, 'index': 2, 'local': '192.168.122.28', \ 'flags': 512, 'scope': 0, 'label': 'eth0', 'broadcast': '192.168.122.255', \ 'anycast': None, 'multicast': None} Please notice that address objects are read-only, you may not change them, only remove old ones, and create new. ''' from pyroute2.netlink.rtnl.ifaddrmsg import ifaddrmsg from pyroute2.requests.address import AddressFieldFilter from ..objects import RTNL_Object def load_ifaddrmsg(schema, target, event): # # bypass # schema.load_netlink('addresses', target, event) # # last address removal should trigger routes flush # Bug-Url: https://github.com/svinota/pyroute2/issues/849 # if event['header']['type'] % 2 and event.get('index'): # # check IPv4 addresses on the interface # addresses = schema.execute( ''' SELECT * FROM addresses WHERE f_target = %s AND f_index = %s AND f_family = 2 ''' % (schema.plch, schema.plch), (target, event['index']), ).fetchmany() if not len(addresses): schema.execute( ''' DELETE FROM routes WHERE f_target = %s AND f_RTA_OIF = %s OR f_RTA_IIF = %s ''' % (schema.plch, schema.plch, schema.plch), (target, event['index'], event['index']), ) ifaddr_spec = ( ifaddrmsg.sql_schema() .unique_index('family', 'prefixlen', 'index', 'IFA_ADDRESS', 'IFA_LOCAL') .foreign_key( 'interfaces', ('f_target', 'f_tflags', 'f_index'), ('f_target', 'f_tflags', 'f_index'), ) ) init = { 'specs': [['addresses', ifaddr_spec]], 'classes': [['addresses', ifaddrmsg]], 'event_map': {ifaddrmsg: [load_ifaddrmsg]}, } class Address(RTNL_Object): table = 'addresses' msg_class = ifaddrmsg field_filter = AddressFieldFilter api = 'addr' @classmethod def _count(cls, view): if view.chain: return view.ndb.task_manager.db_fetchone( 'SELECT count(*) FROM %s WHERE f_index = %s' % (view.table, view.ndb.schema.plch), [view.chain['index']], ) else: return view.ndb.task_manager.db_fetchone( 'SELECT count(*) FROM %s' % view.table ) @classmethod def _dump_where(cls, view): if view.chain: plch = view.ndb.schema.plch where = ''' WHERE main.f_target = %s AND main.f_index = %s ''' % ( plch, plch, ) values = [view.chain['target'], view.chain['index']] else: where = '' values = [] return (where, values) @classmethod def summary(cls, view): req = ''' SELECT main.f_target, main.f_tflags, intf.f_IFLA_IFNAME, main.f_IFA_ADDRESS, main.f_prefixlen FROM addresses AS main INNER JOIN interfaces AS intf ON main.f_index = intf.f_index AND main.f_target = intf.f_target ''' yield ('target', 'tflags', 'ifname', 'address', 'prefixlen') where, values = cls._dump_where(view) for record in view.ndb.task_manager.db_fetch(req + where, values): yield record def mark_tflags(self, mark): plch = (self.schema.plch,) * 3 self.schema.execute( ''' UPDATE interfaces SET f_tflags = %s WHERE f_index = %s AND f_target = %s ''' % plch, (mark, self['index'], self['target']), ) def __init__(self, *argv, **kwarg): kwarg['iclass'] = ifaddrmsg self.event_map = {ifaddrmsg: "load_rtnlmsg"} super(Address, self).__init__(*argv, **kwarg) @staticmethod def compare_record(left, right): if isinstance(right, str): return right == left['address'] or right == '%s/%i' % ( left['address'], left['prefixlen'], ) @classmethod def spec_normalize(cls, processed, spec): ''' Address key normalization:: { ... } -> { ... } "10.0.0.1/24" -> {"address": "10.0.0.1", "prefixlen": 24} ''' if isinstance(spec, str): processed['address'] = spec return processed def key_repr(self): return '%s/%s %s/%s' % ( self.get('target', ''), self.get('label', self.get('index', '')), self.get('local', self.get('address', '')), self.get('prefixlen', ''), ) pyroute2-0.7.11/pyroute2/ndb/objects/interface.py000066400000000000000000001033341455030217500216760ustar00rootroot00000000000000''' .. testsetup:: from pyroute2 import IPMock as IPRoute from pyroute2 import NDB from pyroute2 import config config.mock_iproute = True .. testsetup:: preset_1 from pyroute2 import NDB from pyroute2 import config config.mock_iproute = True ndb = NDB( sources=[ {'target': 'localhost', 'kind': 'IPMock'}, {'target': 'worker1.sample.com', 'kind': 'IPMock'}, {'target': 'worker2.sample.com', 'kind': 'IPMock'}, ] ) .. testsetup:: preset_br0_1 from pyroute2 import NDB from pyroute2 import config config.mock_iproute = True ndb = NDB() ndb.interfaces.create(ifname='eth1', kind='dummy').commit() ndb.interfaces.create(ifname='br0', kind='bridge').commit() ndb.interfaces.create(ifname='bond0', kind='bond').commit() .. testsetup:: preset_br0_2 from pyroute2 import NDB from pyroute2 import config config.mock_iproute = True ndb = NDB() ndb.interfaces.create(ifname='br0', kind='bridge').commit() ndb.interfaces['br0'].add_port('eth0').commit() List interfaces =============== List interface keys: .. testcode:: with NDB(log='on') as ndb: for key in ndb.interfaces: print(key) .. testoutput:: :hide: ('localhost', 0, 0, 772, 1, 1, 0, '00:00:00:00:00:00', \ '00:00:00:00:00:00', 'lo', 65536, None, 'noqueue', None, 1000, 'UNKNOWN', 0, \ None, None, None, 0, None, 0, 1, 1, 1, 0, None, None, 0, 65535, 65536, None, \ None, None, 0, 0, None, None, None, None, None, None, 65536, None, None, \ 'up', None, None, None, None, None, None, None, None, '[]') ('localhost', 0, 0, 772, 2, 1, 0, '52:54:00:72:58:b2', \ 'ff:ff:ff:ff:ff:ff', 'eth0', 1500, None, 'fq_codel', None, 1000, 'UNKNOWN', \ 0, None, None, None, 0, None, 0, 1, 1, 1, 0, None, None, 0, 65535, 65536, \ None, None, None, 0, 0, None, None, None, None, None, None, 65536, None, \ None, 'up', None, None, None, None, None, None, None, None, '[]') NDB views support some dict methods: `items()`, `values()`, `keys()`: .. testcode:: with NDB(log='on') as ndb: for key, nic in ndb.interfaces.items(): nic.set('state', 'up') nic.commit() Get interface objects ===================== The keys may be used as selectors to get interface objects: .. testcode:: with NDB() as ndb: for key in ndb.interfaces: print(ndb.interfaces[key]) .. testoutput:: :hide: :options: +ELLIPSIS ... Also possible selector formats are `dict()` and simple string. The latter means the interface name: .. testcode:: preset_1 eth0 = ndb.interfaces['eth0'] Dict selectors are necessary to get interfaces by other properties: .. testcode:: preset_1 wrk1_eth0 = ndb.interfaces[{'target': 'worker1.sample.com', 'ifname': 'eth0'}] wrk2_eth0 = ndb.interfaces[{'target': 'worker2.sample.com', 'address': '52:54:00:72:58:b2'}] Change nic properties ===================== Changing MTU and MAC address: .. testcode:: preset_1 with ndb.interfaces['eth0'] as eth0: eth0['mtu'] = 1248 eth0['address'] = '00:11:22:33:44:55' # --> <-- eth0.commit() is called by the context manager One can change a property either using the assignment statement, or using the `.set()` routine: .. testcode:: preset_1 # same code with ndb.interfaces['eth0'] as eth0: eth0.set('mtu', 1248) eth0.set('address', '00:11:22:33:44:55') Create virtual interfaces ========================= Create a bridge and add a port, `eth0`: .. testcode:: preset_1 with ndb.interfaces.create(ifname='br0', kind='bridge') as br0: br0.add_port('eth0') Bridge and bond ports ===================== Add bridge and bond ports one can use specific API: .. testcode:: preset_br0_1 with ndb.interfaces['br0'] as br0: br0.add_port('eth0') br0.add_port('eth1') br0.set('br_max_age', 1024) br0.set('br_forward_delay', 1500) with ndb.interfaces['bond0'] as bond0: bond0.add_port('eth0') bond0.add_port('eth1') To remove a port: .. testcode:: preset_br0_2 with ndb.interfaces['br0'] as br0: br0.del_port('eth0') Or by setting the master property on a port, in the same way as with `IPRoute`: .. testcode:: preset_br0_1 index = ndb.interfaces['br0']['index'] # add a port to a bridge with ndb.interfaces['eth0'] as eth0: eth0.set('master', index) # remove a port from a bridge with ndb.interfaces['eth0'] as eth0: eth0.set('master', 0) ''' import errno import json import traceback from pyroute2.common import basestring from pyroute2.config import AF_BRIDGE from pyroute2.netlink.exceptions import NetlinkError from pyroute2.netlink.rtnl.ifinfmsg import ifinfmsg from pyroute2.netlink.rtnl.p2pmsg import p2pmsg from pyroute2.requests.link import LinkFieldFilter from ..auth_manager import AuthManager, check_auth from ..objects import RTNL_Object def load_ifinfmsg(schema, target, event): # # link goes down: flush all related routes # if not event['flags'] & 1: schema.execute( 'DELETE FROM routes WHERE ' 'f_target = %s AND ' 'f_RTA_OIF = %s OR f_RTA_IIF = %s' % (schema.plch, schema.plch, schema.plch), (target, event['index'], event['index']), ) # # ignore wireless updates # if event.get_attr('IFLA_WIRELESS'): return # # IFLA_PROP_LIST, IFLA_ALT_IFNAME # prop_list = event.get('IFLA_PROP_LIST') event['alt_ifname_list'] = [] if prop_list is not None: for ifname in prop_list.altnames(): event['alt_ifname_list'].append(ifname) # # AF_BRIDGE events # if event['family'] == AF_BRIDGE: # schema.load_netlink('af_bridge_ifs', target, event) try: vlans = event.get_attr('IFLA_AF_SPEC').get_attrs( 'IFLA_BRIDGE_VLAN_INFO' ) except AttributeError: # AttributeError: 'NoneType' object has no attribute 'get_attrs' # -- vlan filters not supported return # flush the old vlans info schema.execute( ''' DELETE FROM af_bridge_vlans WHERE f_target = %s AND f_index = %s ''' % (schema.plch, schema.plch), (target, event['index']), ) for v in vlans: v['index'] = event['index'] v['header'] = {'type': event['header']['type']} schema.load_netlink('af_bridge_vlans', target, v) return schema.load_netlink('interfaces', target, event) # # load ifinfo, if exists # if not event['header'].get('type', 0) % 2: linkinfo = event.get_attr('IFLA_LINKINFO') if linkinfo is not None: iftype = linkinfo.get_attr('IFLA_INFO_KIND') table = 'ifinfo_%s' % iftype if iftype == 'gre': ifdata = linkinfo.get_attr('IFLA_INFO_DATA') local = ifdata.get_attr('IFLA_GRE_LOCAL') remote = ifdata.get_attr('IFLA_GRE_REMOTE') p2p = p2pmsg() p2p['index'] = event['index'] p2p['family'] = 2 p2p['attrs'] = [('P2P_LOCAL', local), ('P2P_REMOTE', remote)] schema.load_netlink('p2p', target, p2p) elif iftype == 'veth': link = event.get_attr('IFLA_LINK') ifname = event.get_attr('IFLA_IFNAME') # for veth interfaces, IFLA_LINK points to # the peer -- but NOT in automatic updates if (not link) and ( (target,) in schema.fetch('SELECT f_target FROM SOURCES') ): schema.log.debug('reload veth %s' % event['index']) try: update = schema.sources[target].api( 'link', 'get', index=event['index'] ) update = tuple(update)[0] return schema.load_netlink( 'interfaces', target, update ) except NetlinkError as e: if e.code == errno.ENODEV: schema.log.debug(f"interface has gone: {ifname}") if table in schema.spec: ifdata = linkinfo.get_attr('IFLA_INFO_DATA') if ifdata is not None: ifdata['header'] = {} ifdata['index'] = event['index'] schema.load_netlink(table, target, ifdata) ip_tunnels = ('gre', 'gretap', 'ip6gre', 'ip6gretap', 'ip6tnl', 'sit', 'ipip') schema_ifinfmsg = ( ifinfmsg.sql_schema().push('alt_ifname_list', 'TEXT').unique_index('index') ) schema_brinfmsg = ( ifinfmsg.sql_schema() .unique_index('index') .foreign_key( 'interface', ('f_target', 'f_tflags', 'f_index'), ('f_target', 'f_tflags', 'f_index'), ) ) schema_p2pmsg = ( p2pmsg.sql_schema() .unique_index('index') .foreign_key( 'interfaces', ('f_target', 'f_tflags', 'f_index'), ('f_target', 'f_tflags', 'f_index'), ) ) schema_af_bridge_vlans = ( ifinfmsg.af_spec_bridge.vlan_info.sql_schema() .push('index', 'INTEGER') .unique_index('vid', 'index') .foreign_key( 'af_bridge_ifs', ('f_target', 'f_tflags', 'f_index'), ('f_target', 'f_tflags', 'f_index'), ) ) init = { 'specs': [ ['interfaces', schema_ifinfmsg], ['af_bridge_ifs', schema_ifinfmsg], ['af_bridge_vlans', schema_af_bridge_vlans], ['p2p', schema_p2pmsg], ], 'classes': [ ['interfaces', ifinfmsg], ['af_bridge_ifs', ifinfmsg], ['vlans', ifinfmsg], ['af_bridge_vlans', ifinfmsg.af_spec_bridge.vlan_info], ['p2p', p2pmsg], ], 'event_map': {ifinfmsg: [load_ifinfmsg]}, } ifinfo_names = ( 'bridge', 'bond', 'vlan', 'vxlan', 'gre', 'gretap', 'ip6gre', 'ip6gretap', 'ip6tnl', 'ipip', 'ipvlan', 'sit', 'macvlan', 'macvtap', 'tun', 'vrf', 'vti', 'vti6', ) supported_ifinfo = {x: ifinfmsg.ifinfo.data_map[x] for x in ifinfo_names} # # load supported ifinfo # for name, data in supported_ifinfo.items(): name = 'ifinfo_%s' % name init['classes'].append([name, data]) schema = ( data.sql_schema() .push('index', 'BIGINT') .unique_index('index') .foreign_key( 'interfaces', ('f_target', 'f_tflags', 'f_index'), ('f_target', 'f_tflags', 'f_index'), ) ) init['specs'].append([name, schema]) def _cmp_master(self, value): if self['master'] == value: return True elif self['master'] == 0 and value is None: dict.__setitem__(self, 'master', None) return True return False class Vlan(RTNL_Object): table = 'af_bridge_vlans' msg_class = ifinfmsg.af_spec_bridge.vlan_info api = 'vlan_filter' @classmethod def _count(cls, view): if view.chain: return view.ndb.task_manager.db_fetchone( 'SELECT count(*) FROM %s WHERE f_index = %s' % (view.table, view.ndb.schema.plch), [view.chain['index']], ) else: return view.ndb.task_manager.db_fetchone( 'SELECT count(*) FROM %s' % view.table ) @classmethod def _dump_where(cls, view): if view.chain: plch = view.ndb.schema.plch where = ''' WHERE main.f_target = %s AND main.f_index = %s ''' % ( plch, plch, ) values = [view.chain['target'], view.chain['index']] else: where = '' values = [] return (where, values) @classmethod def summary(cls, view): req = ''' SELECT main.f_target, main.f_tflags, main.f_vid, intf.f_IFLA_IFNAME FROM af_bridge_vlans AS main INNER JOIN interfaces AS intf ON main.f_index = intf.f_index AND main.f_target = intf.f_target ''' yield ('target', 'tflags', 'vid', 'ifname') where, values = cls._dump_where(view) for record in view.ndb.task_manager.db_fetch(req + where, values): yield record @staticmethod def compare_record(left, right): if isinstance(right, int): return right == left['vid'] def __init__(self, *argv, **kwarg): kwarg['iclass'] = ifinfmsg.af_spec_bridge.vlan_info if 'auth_managers' not in kwarg or kwarg['auth_managers'] is None: kwarg['auth_managers'] = [] log = argv[0].ndb.log.channel('vlan auth') kwarg['auth_managers'].append( AuthManager( {'obj:read': True, 'obj:list': True, 'obj:modify': False}, log ) ) super(Vlan, self).__init__(*argv, **kwarg) def make_req(self, prime): ret = {} if 'index' in self: ret['index'] = self['index'] ret['vlan_info'] = {'vid': self['vid']} if 'flags' in self: ret['vlan_info']['flags'] = self['flags'] return ret def make_idx_req(self, prime): return self.make_req(prime) class Interface(RTNL_Object): table = 'interfaces' msg_class = ifinfmsg api = 'link' key_extra_fields = ['IFLA_IFNAME'] resolve_fields = ['vxlan_link', 'link', 'master'] fields_cmp = {'master': _cmp_master} fields_load_transform = { 'alt_ifname_list': lambda x: list(json.loads(x or '[]')) } field_filter = LinkFieldFilter @classmethod def _count(cls, view): if view.chain: return view.ndb.task_manager.db_fetchone( 'SELECT count(*) FROM %s WHERE f_IFLA_MASTER = %s' % (view.table, view.ndb.schema.plch), [view.chain['index']], ) else: return view.ndb.task_manager.db_fetchone( 'SELECT count(*) FROM %s' % view.table ) @classmethod def _dump_where(cls, view): if view.chain: plch = view.ndb.schema.plch where = ''' WHERE f_target = %s AND f_IFLA_MASTER = %s ''' % ( plch, plch, ) values = [view.chain['target'], view.chain['index']] else: where = 'WHERE f_index != 0' values = [] return (where, values) @classmethod def summary(cls, view): req = ''' SELECT f_target, f_tflags, f_index, f_IFLA_IFNAME, f_IFLA_ADDRESS, f_flags, f_IFLA_INFO_KIND FROM interfaces ''' yield ( 'target', 'tflags', 'index', 'ifname', 'address', 'flags', 'kind', ) where, values = cls._dump_where(view) for record in view.ndb.task_manager.db_fetch(req + where, values): yield record def mark_tflags(self, mark): plch = (self.schema.plch,) * 3 self.schema.execute( ''' UPDATE interfaces SET f_tflags = %s WHERE f_index = %s AND f_target = %s ''' % plch, (mark, self['index'], self['target']), ) def __init__(self, *argv, **kwarg): kwarg['iclass'] = ifinfmsg self.event_map = {ifinfmsg: "load_rtnlmsg"} self._alt_ifname_orig = set() dict.__setitem__(self, 'alt_ifname_list', list()) dict.__setitem__(self, 'state', 'unknown') warnings = [] if isinstance(argv[1], dict): if 'reuse' in argv[1]: warnings.append('ignore IPDB-specific `reuse` keyword') del argv[1]['reuse'] if argv[1].get('create') and 'ifname' not in argv[1]: raise Exception('specify at least ifname') # type specific cases if argv[1].get('kind') == 'tuntap': # translate custom tuntap format into the native tun warnings.append('translated tuntap ifinfo into tun, no flags') argv[1]['kind'] = 'tun' if argv[1].get('mode') == 'tun': argv[1]['tun_type'] = 1 elif argv[1].get('mode') == 'tap': argv[1]['tun_type'] = 2 else: raise TypeError('tun type error') del argv[1]['mode'] if 'uid' in argv[1]: argv[1]['tun_owner'] = argv[1].pop('uid') if 'gid' in argv[1]: argv[1]['tun_owner'] = argv[1].pop('gid') super(Interface, self).__init__(*argv, **kwarg) for line in warnings: self.log.warning(line) @property def ipaddr(self): return self.view.ndb._get_view('addresses', chain=self) @property def ports(self): return self.view.ndb._get_view('interfaces', chain=self) @property def routes(self): return self.view.ndb._get_view('routes', chain=self) @property def neighbours(self): return self.view.ndb._get_view('neighbours', chain=self) @property def vlans(self): return self.view.ndb._get_view('af_bridge_vlans', chain=self) @property def context(self): ctx = {} if self.get('target'): ctx['target'] = self['target'] if self.get('index'): ctx['index'] = self['index'] return ctx @classmethod def compare_record(self, left, right): # specific compare if isinstance(right, basestring): return right == left['ifname'] or right == left['address'] @check_auth('obj:modify') def add_vlan(self, spec): def do_add_vlan(self, mode, spec): try: method = getattr(self.vlan.create(spec), mode) return [method()] except Exception as e_s: e_s.trace = traceback.format_stack() return [e_s] self._apply_script.append((do_add_vlan, {'spec': spec})) return self @check_auth('obj:modify') def del_vlan(self, spec): def do_del_vlan(self, mode, spec): try: method = getattr(self.vlan[spec].remove(), mode) return [method()] except Exception as e_s: e_s.trace = traceback.format_stack() return [e_s] self._apply_script.append((do_del_vlan, {'spec': spec})) return self @check_auth('obj:modify') def add_neighbour(self, spec=None, **kwarg): spec = spec or kwarg def do_add_neighbour(self, mode, spec): try: method = getattr(self.neighbours.create(spec), mode) return [method()] except Exception as e_s: e_s.trace = traceback.format_stack() return [e_s] self._apply_script.append((do_add_neighbour, {'spec': spec})) return self @check_auth('obj:modify') def del_neighbour(self, spec=None, **kwarg): spec = spec or dict(kwarg) def do_del_neighbour(self, mode, spec): ret = [] if isinstance(spec, basestring): specs = [spec] elif callable(spec): specs = self.ipaddr.dump() specs.select_records(spec) else: specs = self.ipaddr.dump() specs.select_records(**spec) for sp in specs: try: method = getattr(self.neighbours.locate(sp).remove(), mode) ret.append(method()) except KeyError: pass except Exception as e_s: e_s.trace = traceback.format_stack() ret.append(e_s) if not ret: ret = KeyError('no neighbour records matched') return ret self._apply_script.append((do_del_neighbour, {'spec': spec})) return self @check_auth('obj:modify') def add_ip(self, spec=None, **kwarg): spec = spec or kwarg def do_add_ip(self, mode, spec): try: method = getattr(self.ipaddr.create(spec), mode) return [method()] except Exception as e_s: e_s.trace = traceback.format_stack() return [e_s] self._apply_script.append((do_add_ip, {'spec': spec})) return self @check_auth('obj:modify') def del_ip(self, spec=None, **kwarg): spec = spec or kwarg def do_del_ip(self, mode, spec): ret = [] if isinstance(spec, basestring): specs = [spec] elif callable(spec): specs = self.ipaddr.dump() specs.select_records(spec) else: specs = self.ipaddr.dump() specs.select_records(**spec) for sp in specs: try: method = getattr(self.ipaddr.locate(sp).remove(), mode) ret.append(method()) except KeyError: pass except Exception as e_s: e_s.trace = traceback.format_stack() ret.append(e_s) if not ret: ret = KeyError('no address records matched') return ret self._apply_script.append((do_del_ip, {'spec': spec})) return self @check_auth('obj:modify') def add_port(self, spec): def do_add_port(self, mode, spec): try: port = self.view[spec] if port['target'] != self['target']: raise ValueError('target must be the same') port['master'] = self['index'] getattr(port, mode)() return [port] except Exception as e_s: e_s.trace = traceback.format_stack() return [e_s] self._apply_script.append((do_add_port, {'spec': spec})) return self @check_auth('obj:modify') def del_port(self, spec): def do_del_port(self, mode, spec): try: port = self.view[spec] if port['master'] != self['index']: raise ValueError('wrong port master index') if port['target'] != self['target']: raise ValueError('target must be the same') port['master'] = 0 getattr(port, mode)() return [port] except Exception as e_s: e_s.trace = traceback.format_stack() return [e_s] self._apply_script.append((do_del_port, {'spec': spec})) return self @check_auth('obj:modify') def add_altname(self, ifname): new_list = set(self['alt_ifname_list']) new_list.add(ifname) self['alt_ifname_list'] = list(new_list) @check_auth('obj:modify') def del_altname(self, ifname): new_list = set(self['alt_ifname_list']) new_list.remove(ifname) self['alt_ifname_list'] = list(new_list) @check_auth('obj:modify') def __setitem__(self, key, value): if key == 'peer': dict.__setitem__(self, key, value) elif key == 'target' and self.state == 'invalid': dict.__setitem__(self, key, value) elif key == 'net_ns_fd' and self.state == 'invalid': dict.__setitem__(self, 'target', value) elif ( key == 'target' and self.get('target') and self['target'] != value ): super(Interface, self).__setitem__('net_ns_fd', value) else: super(Interface, self).__setitem__(key, value) @classmethod def spec_normalize(cls, processed, spec): ''' Interface key normalization:: { ... } -> { ... } "eth0" -> {"ifname": "eth0", ...} 1 -> {"index": 1, ...} ''' if isinstance(spec, basestring): processed['ifname'] = spec elif isinstance(spec, int): processed['index'] = spec return processed def complete_key(self, key): if isinstance(key, dict): ret_key = key else: ret_key = {'target': self.ndb.localhost} if isinstance(key, basestring): ret_key['ifname'] = key elif isinstance(key, int): ret_key['index'] = key return super(Interface, self).complete_key(ret_key) def is_peer(self, other): '''Evaluate whether the given interface "points at" this one.''' if other['kind'] == 'vlan': return ( other['target'] == self['target'] and other['link'] == self['index'] ) elif other['kind'] == 'vxlan': return ( other['target'] == self['target'] and other['vxlan_link'] == self['index'] ) elif other['kind'] == self['kind'] == 'veth': other_link = other.get('link') if other_link != self['index']: return False other_link_netnsid = other.get('link_netnsid') if other_link_netnsid is not None: self_source = self.sources[self['target']] other_source = other.sources[other['target']] info = other_source.api( 'get_netnsid', pid=self_source.api('get_pid'), target_nsid=other_link_netnsid, ) return info['current_nsid'] == other_link_netnsid return self['target'] == other['target'] def set_xdp_fd(self, fd): self.sources[self['target']].api( 'link', 'set', index=self['index'], xdp_fd=fd ) def snapshot(self, ctxid=None): # 1. make own snapshot snp = super(Interface, self).snapshot(ctxid=ctxid) # 2. collect dependencies and store in self.snapshot_deps for spec in self.ndb.interfaces.getmany( {'IFLA_MASTER': self['index']} ): # bridge ports link = type(self)( self.view, spec, auth_managers=self.auth_managers ) snp.snapshot_deps.append((link, link.snapshot())) for spec in self.ndb.interfaces.getmany({'IFLA_LINK': self['index']}): link = type(self)( self.view, spec, auth_managers=self.auth_managers ) # vlans & veth if self.is_peer(link) and not link.is_peer(self): snp.snapshot_deps.append((link, link.snapshot())) # return the root node return snp def make_req(self, prime): req = super(Interface, self).make_req(prime) # # --> link('set', ...) if self.state == 'system': req['master'] = self['master'] # # FIXME: make type plugins? kind = self['kind'] if kind in ip_tunnels: req['kind'] = kind for key in self: if ( key.startswith(f'{kind}_') and key not in req and self[key] ): req[key] = self[key] return req @check_auth('obj:modify') def apply_altnames(self, alt_ifname_setup): alt_ifname_remove = set(self['alt_ifname_list']) - alt_ifname_setup alt_ifname_add = alt_ifname_setup - set(self['alt_ifname_list']) for ifname in alt_ifname_remove: self.sources[self['target']].api( 'link', 'property_del', index=self['index'], altname=ifname ) for ifname in alt_ifname_add: self.sources[self['target']].api( 'link', 'property_add', index=self['index'], altname=ifname ) self.load_from_system() self.load_sql(set_state=False) if set(self['alt_ifname_list']) != alt_ifname_setup: raise Exception('could not setup alt ifnames') @check_auth('obj:modify') def apply(self, rollback=False, req_filter=None, mode='apply'): # translate string link references into numbers for key in ('link', 'master'): if key in self and isinstance(self[key], basestring): self[key] = self.ndb.interfaces[self[key]]['index'] setns = self.state.get() == 'setns' remove = self.state.get() == 'remove' alt_ifname_setup = set(self['alt_ifname_list']) if 'alt_ifname_list' in self.changed: self.changed.remove('alt_ifname_list') try: super(Interface, self).apply(rollback, req_filter, mode) if setns: self.load_value('target', self['net_ns_fd']) dict.__setitem__(self, 'net_ns_fd', None) spec = self.load_sql() if spec: self.state.set('system') if not remove: self.apply_altnames(alt_ifname_setup) except NetlinkError as e: if ( e.code == 95 and self.get('master') is not None and self.get('master') > 0 and self.state == 'invalid' ): # # on some old kernels it is impossible to create # interfaces with master set; attempt to do it in # two steps def req_filter(req): return dict( [ x for x in req.items() if not x[0].startswith('master') ] ) self.apply(rollback, req_filter, mode) self.apply(rollback, None, mode) elif ( e.code == 95 and self.get('br_vlan_filtering') is not None and self.get('br_vlan_filtering') == 0 ): # # if vlan filtering is not enabled, then the parameter # is reported by netlink, but not accepted upon bridge # creation, so simply strip it def req_filter(req): return dict( [ x for x in req.items() if not x[0].startswith('br_vlan_') ] ) self.apply(rollback, req_filter, mode) else: raise if ('net_ns_fd' in self.get('peer', {})) and ( self['peer']['net_ns_fd'] in self.view.ndb.sources ): # wait for the peer in net_ns_fd, only if the netns # is connected to the NDB instance self.view.wait( target=self['peer']['net_ns_fd'], ifname=self['peer']['ifname'], timeout=5, ) return self def hook_apply(self, method, **spec): if method == 'set': if self['kind'] == 'bridge': keys = filter(lambda x: x.startswith('br_'), self.changed) if keys: req = { 'index': self['index'], 'kind': 'bridge', 'family': AF_BRIDGE, } for key in keys: req[key] = self[key] self.sources[self['target']].api(self.api, method, **req) # FIXME: make a reasonable shortcut for this self.load_from_system() elif self['kind'] in ip_tunnels and self['state'] == 'down': # force reading attributes for tunnels in the down state self.load_from_system() elif method == 'add': if self['kind'] == 'tun': self.load_sql() self.load_event.wait(0.1) if 'index' not in self: raise NetlinkError(errno.EAGAIN) update = self.sources[self['target']].api( self.api, 'get', index=self['index'] ) self.ndb._event_queue.put(update) def load_from_system(self): ( self.ndb._event_queue.put( self.sources[self['target']].api( self.api, 'get', index=self['index'] ) ) ) def load_sql(self, *argv, **kwarg): spec = super(Interface, self).load_sql(*argv, **kwarg) if spec: tname = 'ifinfo_%s' % self['kind'] if tname in self.schema.compiled: names = self.schema.compiled[tname]['norm_names'] spec = self.ndb.task_manager.db_fetchone( 'SELECT * from %s WHERE f_index = %s' % (tname, self.schema.plch), (self['index'],), ) if spec: self.update(dict(zip(names, spec))) return spec def load_rtnlmsg(self, *argv, **kwarg): super(Interface, self).load_rtnlmsg(*argv, **kwarg) def key_repr(self): return '%s/%s' % ( self.get('target', ''), self.get('ifname', self.get('index', '')), ) pyroute2-0.7.11/pyroute2/ndb/objects/neighbour.py000066400000000000000000000114351455030217500217200ustar00rootroot00000000000000import errno from pyroute2.common import basestring from pyroute2.config import AF_BRIDGE from pyroute2.netlink.rtnl.ndmsg import ndmsg from pyroute2.requests.neighbour import NeighbourFieldFilter from ..events import RescheduleException from ..objects import RTNL_Object def load_ndmsg(schema, target, event): # # ignore events with ifindex == 0 # if event['ifindex'] == 0: return # if event.get_attr('NDA_IFINDEX') is None: event['attrs'].append(('NDA_IFINDEX', event['ifindex'])) # # AF_BRIDGE events # if event['family'] == AF_BRIDGE: # # bypass for now # try: schema.load_netlink('af_bridge_fdb', target, event, propagate=True) except Exception: raise RescheduleException() else: schema.load_netlink('neighbours', target, event) ndmsg_schema = ( ndmsg.sql_schema() .unique_index('ifindex', 'NDA_DST', 'NDA_VLAN') .constraint('NDA_DST', "NOT NULL DEFAULT ''") .constraint('NDA_VLAN', "NOT NULL DEFAULT 0") .foreign_key( 'interfaces', ('f_target', 'f_tflags', 'f_ifindex'), ('f_target', 'f_tflags', 'f_index'), ) ) brmsg_schema = ( ndmsg.sql_schema() .unique_index('ifindex', 'NDA_LLADDR', 'NDA_DST', 'NDA_VLAN') .constraint('NDA_LLADDR', "NOT NULL DEFAULT ''") .constraint('NDA_DST', "NOT NULL DEFAULT ''") .constraint('NDA_VLAN', "NOT NULL DEFAULT 0") .foreign_key( 'interfaces', ('f_target', 'f_tflags', 'f_ifindex'), ('f_target', 'f_tflags', 'f_index'), ) ) init = { 'specs': [['neighbours', ndmsg_schema], ['af_bridge_fdb', brmsg_schema]], 'classes': [['neighbours', ndmsg], ['af_bridge_fdb', ndmsg]], 'event_map': {ndmsg: [load_ndmsg]}, } def fallback_add(self, idx_req, req): ( self.ndb._event_queue.put( self.sources[self['target']].api(self.api, 'dump'), source=self['target'], ) ) self.load_sql() class Neighbour(RTNL_Object): table = 'neighbours' msg_class = ndmsg field_filter = NeighbourFieldFilter api = 'neigh' @classmethod def _count(cls, view): if view.chain: return view.ndb.task_manager.db_fetchone( 'SELECT count(*) FROM %s WHERE f_ifindex = %s' % (view.table, view.ndb.schema.plch), [view.chain['index']], ) else: return view.ndb.task_manager.db_fetchone( 'SELECT count(*) FROM %s' % view.table ) @classmethod def _dump_where(cls, view): if view.chain: plch = view.ndb.schema.plch where = ''' WHERE main.f_target = %s AND main.f_ifindex = %s ''' % ( plch, plch, ) values = [view.chain['target'], view.chain['index']] else: where = '' values = [] return (where, values) @classmethod def summary(cls, view): req = f''' SELECT main.f_target, main.f_tflags, intf.f_IFLA_IFNAME, main.f_NDA_LLADDR, main.f_NDA_DST FROM {cls.table} AS main INNER JOIN interfaces AS intf ON main.f_ifindex = intf.f_index AND main.f_target = intf.f_target ''' yield ('target', 'tflags', 'ifname', 'lladdr', 'dst') where, values = cls._dump_where(view) for record in view.ndb.task_manager.db_fetch(req + where, values): yield record def __init__(self, *argv, **kwarg): kwarg['iclass'] = ndmsg self.event_map = {ndmsg: "load_rtnlmsg"} super(Neighbour, self).__init__(*argv, **kwarg) self.fallback_for['add'][errno.EEXIST] = fallback_add def complete_key(self, key): if isinstance(key, dict): ret_key = key else: ret_key = {'target': self.ndb.localhost} if isinstance(key, basestring): ret_key['NDA_DST'] = key return super(Neighbour, self).complete_key(ret_key) def make_req(self, prime): req = super(Neighbour, self).make_req(prime) if 'vlan' in req and req['vlan'] == 0: req.pop('vlan') return req class FDBRecord(Neighbour): table = 'af_bridge_fdb' msg_class = ndmsg api = 'fdb' @classmethod def summary(cls, view): for record in super(FDBRecord, cls).summary(view): yield record[:-1] def make_idx_req(self, prime): req = super(FDBRecord, self).make_req(prime) if 'NDA_VLAN' in req and req['NDA_VLAN'] == 0: req.pop('NDA_VLAN') return req pyroute2-0.7.11/pyroute2/ndb/objects/netns.py000066400000000000000000000036711455030217500210700ustar00rootroot00000000000000import warnings from pyroute2 import netns from pyroute2.common import basestring from pyroute2.netlink.rtnl.nsinfmsg import nsinfmsg from pyroute2.requests.netns import NetNSFieldFilter from ..objects import RTNL_Object def load_nsinfmsg(schema, target, event): # # check if there is corresponding source # netns_path = event.get_attr('NSINFO_PATH') if netns_path is None: schema.log.debug('ignore %s %s' % (target, event)) return if schema.config['auto_netns']: warnings.warn( 'automatic netns sourcing is being refactored', DeprecationWarning ) schema.load_netlink('netns', target, event) schema = nsinfmsg.sql_schema().unique_index('NSINFO_PATH') init = { 'specs': [['netns', schema]], 'classes': [['netns', nsinfmsg]], 'event_map': {nsinfmsg: [load_nsinfmsg]}, } class NetNS(RTNL_Object): table = 'netns' msg_class = nsinfmsg table_alias = 'n' api = 'netns' field_filter = NetNSFieldFilter def __init__(self, *argv, **kwarg): kwarg['iclass'] = nsinfmsg self.event_map = {nsinfmsg: "load_rtnlmsg"} super(NetNS, self).__init__(*argv, **kwarg) @classmethod def spec_normalize(cls, processed, spec): if isinstance(spec, basestring): processed['path'] = spec path = netns._get_netnspath(processed['path']) # on Python3 _get_netnspath() returns bytes, not str, so # we have to decode it here in order to avoid issues with # cache keys and DB inserts if hasattr(path, 'decode'): path = path.decode('utf-8') processed['path'] = path return processed def __setitem__(self, key, value): if self.state == 'system': raise ValueError('attempt to change a readonly object') if key == 'path': value = netns._get_netnspath(value).decode('utf-8') return super(NetNS, self).__setitem__(key, value) pyroute2-0.7.11/pyroute2/ndb/objects/route.py000066400000000000000000000703331455030217500210760ustar00rootroot00000000000000''' .. testsetup:: from pyroute2 import NDB from pyroute2 import config config.mock_iproute = True ndb = NDB() .. testsetup:: tables from pyroute2 import NDB from pyroute2 import config config.mock_iproute = True ndb = NDB() ndb.routes.create( dst='1.1.1.1/32', gateway='127.0.0.10', oif=1, table=101 ).commit() ndb.routes.create( dst='1.1.1.2/32', gateway='127.0.0.10', oif=1, table=5001 ).commit() ndb.routes.create( dst='1.1.1.3/32', gateway='127.0.0.10', oif=1, table=5002 ).commit() .. testsetup:: metrics from pyroute2 import NDB from pyroute2 import config config.mock_iproute = True ndb = NDB() ndb.routes.create( dst='10.0.0.0/24', gateway='127.0.0.10' ).commit() Simple routes ============= Ordinary routes management is really simple: .. testcode:: # create a route ndb.routes.create( dst='10.0.0.0/24', gateway='192.168.122.1' ).commit() # retrieve a route and change it with ndb.routes['10.0.0.0/24'] as route: route.set(gateway='192.168.122.10') # remove a route with ndb.routes['10.0.0.0/24'] as route: route.remove() Multiple routing tables ======================= But Linux systems have more than one routing table: .. doctest:: tables >>> set((x.table for x in ndb.routes.summary())) {101, 5001, 5002, 254, 255} The main routing table is 254. All the routes people mostly work with are in that table. To address routes in other routing tables, you can use dict specs: .. testcode:: ndb.routes.create( dst='10.0.0.0/24', gateway='192.168.122.1', table=101 ).commit() with ndb.routes[{'table': 101, 'dst': '10.0.0.0/24'}] as route: route.set('gateway', '192.168.122.10') route.set('priority', 500) with ndb.routes[{'table': 101, 'dst': '10.0.0.0/24'}] as route: route.remove() Route metrics ============= `route['metrics']` attribute provides a dictionary-like object that reflects route metrics like hop limit, mtu etc: .. testcode:: metrics # set up all metrics from a dictionary with ndb.routes['10.0.0.0/24'] as route: route.set('metrics', {'mtu': 1500, 'hoplimit': 20}) # fix individual metrics with ndb.routes['10.0.0.0/24']['metrics'] as metrics: metrics.set('mtu', 1500) metrics.set('hoplimit', 20) MPLS routes =========== See here: :ref:`mpls` ''' import json import struct import time import uuid from collections import OrderedDict from functools import partial from socket import AF_INET, inet_pton from pyroute2.common import AF_MPLS, basestring from pyroute2.netlink.rtnl.rtmsg import LWTUNNEL_ENCAP_MPLS, nh, rtmsg from pyroute2.requests.common import MPLSTarget from pyroute2.requests.route import RouteFieldFilter from ..auth_manager import check_auth from ..objects import RTNL_Object from ..report import Record _dump_rt = ['main.f_%s' % x[0] for x in rtmsg.sql_schema()][:-2] _dump_nh = ['nh.f_%s' % x[0] for x in nh.sql_schema()][:-2] F_RTA_MULTIPATH = 1 F_RTA_ENCAP = 2 F_RTA_METRICS = 4 def get_route_id(schema, target, event): keys = ['f_target = %s' % schema.plch] values = [target] for key in schema.indices['routes']: keys.append('f_%s = %s' % (key, schema.plch)) values.append(event.get(key) or event.get_attr(key)) # spec = 'WHERE %s' % ' AND '.join(keys) s_req = 'SELECT f_route_id FROM routes %s' % spec # # get existing route_id for route_id in schema.execute(s_req, values).fetchall(): # # if exists return route_id[0][0] # # or create a new route_id return str(uuid.uuid4()) def load_rtmsg(schema, target, event): route_id = None post = [] # fix RTA_TABLE rta_table = event.get_attr('RTA_TABLE', -1) if rta_table == -1: event['attrs'].append(['RTA_TABLE', 254]) # # manage gc marks on related routes # # only for automatic routes: # - table 254 (main) # - proto 2 (kernel) # - scope 253 (link) elif ( (event.get_attr('RTA_TABLE') == 254) and (event['proto'] == 2) and (event['scope'] == 253) and (event['family'] == AF_INET) ): evt = event['header']['type'] # # set f_gc_mark = timestamp for "del" events # and clean it for "new" events # try: rtmsg_gc_mark( schema, target, event, int(time.time()) if (evt % 2) else None ) except Exception as e: schema.log.error('gc_mark event: %s' % (event,)) schema.log.error('gc_mark: %s' % (e,)) # # only for RTM_NEWROUTE events # if not event['header']['type'] % 2: event['deps'] = 0 # # RTA_MULTIPATH # mp = event.get_attr('RTA_MULTIPATH') if mp: # # create key route_id = route_id or get_route_id(schema, target, event) # # load multipath for idx in range(len(mp)): mp[idx]['header'] = {} # for load_netlink() mp[idx]['route_id'] = route_id # set route_id on NH mp[idx]['nh_id'] = idx # add NH number post.append( partial( schema.load_netlink, 'nh', target, mp[idx], 'routes' ) ) event['deps'] |= F_RTA_MULTIPATH # # RTA_ENCAP # encap = event.get_attr('RTA_ENCAP') encap_type = event.get_attr('RTA_ENCAP_TYPE') if encap_type == LWTUNNEL_ENCAP_MPLS: route_id = route_id or get_route_id(schema, target, event) # encap['header'] = {} encap['route_id'] = route_id post.append( partial( schema.load_netlink, 'enc_mpls', target, encap, 'routes' ) ) event['deps'] |= F_RTA_ENCAP # # RTA_METRICS # metrics = event.get_attr('RTA_METRICS') if metrics: # # create key route_id = route_id or get_route_id(schema, target, event) # metrics['header'] = {} metrics['route_id'] = route_id post.append( partial( schema.load_netlink, 'metrics', target, metrics, 'routes' ) ) event['deps'] |= F_RTA_METRICS # if route_id is not None: event['route_id'] = route_id schema.load_netlink('routes', target, event) # for procedure in post: procedure() def rtmsg_gc_mark(schema, target, event, gc_mark=None): # if gc_mark is None: gc_clause = ' AND f_gc_mark IS NOT NULL' else: gc_clause = '' # # select all routes for that OIF where f_gc_mark is not null # key_fields = ','.join(['f_%s' % x for x in schema.indices['routes']]) key_query = ' AND '.join( ['f_%s = %s' % (x, schema.plch) for x in schema.indices['routes']] ) routes = schema.execute( ''' SELECT %s,f_RTA_GATEWAY FROM routes WHERE f_target = %s AND f_RTA_OIF = %s AND f_RTA_GATEWAY IS NOT NULL %s AND f_family = 2 ''' % (key_fields, schema.plch, schema.plch, gc_clause), (target, event.get_attr('RTA_OIF')), ).fetchmany() # # get the route's RTA_DST and calculate the network # addr = event.get_attr('RTA_DST') net = struct.unpack('>I', inet_pton(AF_INET, addr))[0] & ( 0xFFFFFFFF << (32 - event['dst_len']) ) # # now iterate all the routes from the query above and # mark those with matching RTA_GATEWAY # for route in routes: # get route GW gw = route[-1] try: gwnet = struct.unpack('>I', inet_pton(AF_INET, gw))[0] & net if gwnet == net: ( schema.execute( 'UPDATE routes SET f_gc_mark = %s ' 'WHERE f_target = %s AND %s' % (schema.plch, schema.plch, key_query), (gc_mark, target) + route[:-1], ) ) except Exception as e: schema.log.error('gc_mark event: %s' % (event,)) schema.log.error('gc_mark: %s : %s' % (e, route)) rt_schema = ( rtmsg.sql_schema() .push('route_id', 'TEXT UNIQUE') .push('gc_mark', 'INTEGER') .push('deps', 'INTEGER') .unique_index( 'family', 'dst_len', 'tos', 'scope', 'type', 'RTA_DST', 'RTA_OIF', 'RTA_PRIORITY', 'RTA_TABLE', 'RTA_VIA', 'RTA_NEWDST', ) .foreign_key( 'interfaces', ('f_target', 'f_tflags', 'f_RTA_OIF'), ('f_target', 'f_tflags', 'f_index'), ) .foreign_key( 'interfaces', ('f_target', 'f_tflags', 'f_RTA_IIF'), ('f_target', 'f_tflags', 'f_index'), ) ) nh_schema = ( nh.sql_schema() .push('route_id', 'TEXT') .push('nh_id', 'INTEGER') .unique_index('route_id', 'nh_id') .foreign_key('routes', ('f_route_id',), ('f_route_id',)) .foreign_key( 'interfaces', ('f_target', 'f_tflags', 'f_oif'), ('f_target', 'f_tflags', 'f_index'), ) ) metrics_schema = ( rtmsg.metrics.sql_schema() .push('route_id', 'TEXT') .unique_index('route_id') .foreign_key('routes', ('f_route_id',), ('f_route_id',)) ) mpls_enc_schema = ( rtmsg.mpls_encap_info.sql_schema() .push('route_id', 'TEXT') .unique_index('route_id') .foreign_key('routes', ('f_route_id',), ('f_route_id',)) ) init = { 'specs': [ ['routes', rt_schema], ['nh', nh_schema], ['metrics', metrics_schema], ['enc_mpls', mpls_enc_schema], ], 'classes': [ ['routes', rtmsg], ['nh', nh], ['metrics', rtmsg.metrics], ['enc_mpls', rtmsg.mpls_encap_info], ], 'event_map': {rtmsg: [load_rtmsg]}, } class Via(OrderedDict): def __init__(self, prime=None): super(OrderedDict, self).__init__() if prime is None: prime = {} elif not isinstance(prime, dict): raise TypeError() self['family'] = prime.get('family', AF_INET) self['addr'] = prime.get('addr', '0.0.0.0') def __eq__(self, right): return ( isinstance(right, (dict)) and self['family'] == right.get('family', AF_INET) and self['addr'] == right.get('addr', '0.0.0.0') ) def __repr__(self): return repr(dict(self)) class Route(RTNL_Object): table = 'routes' msg_class = rtmsg hidden_fields = ['route_id'] api = 'route' field_filter = RouteFieldFilter _replace_on_key_change = True @classmethod def _count(cls, view): if view.chain: return view.ndb.task_manager.db_fetchone( 'SELECT count(*) FROM %s WHERE f_RTA_OIF = %s' % (view.table, view.ndb.schema.plch), [view.chain['index']], ) else: return view.ndb.task_manager.db_fetchone( 'SELECT count(*) FROM %s' % view.table ) @classmethod def _dump_where(cls, view): if view.chain: plch = view.ndb.schema.plch where = ''' WHERE main.f_target = %s AND main.f_RTA_OIF = %s ''' % ( plch, plch, ) values = [view.chain['target'], view.chain['index']] else: where = '' values = [] return (where, values) @classmethod def summary(cls, view): req = ''' WITH main AS (SELECT nr.f_target, nr.f_tflags, nr.f_RTA_TABLE, nr.f_RTA_DST, nr.f_dst_len, CASE WHEN nh.f_oif > nr.f_RTA_OIF THEN nh.f_oif ELSE nr.f_RTA_OIF END AS f_RTA_OIF, CASE WHEN nh.f_RTA_GATEWAY IS NOT NULL THEN nh.f_RTA_GATEWAY ELSE nr.f_RTA_GATEWAY END AS f_RTA_GATEWAY FROM routes AS nr LEFT JOIN nh ON nr.f_route_id = nh.f_route_id AND nr.f_target = nh.f_target) SELECT main.f_target, main.f_tflags, main.f_RTA_TABLE, intf.f_IFLA_IFNAME, main.f_RTA_DST, main.f_dst_len, main.f_RTA_GATEWAY FROM main INNER JOIN interfaces AS intf ON main.f_rta_oif = intf.f_index AND main.f_target = intf.f_target ''' yield ( 'target', 'tflags', 'table', 'ifname', 'dst', 'dst_len', 'gateway', ) where, values = cls._dump_where(view) for record in view.ndb.task_manager.db_fetch(req + where, values): yield record @classmethod def dump(cls, view): req = ''' SELECT main.f_target,main.f_tflags,%s FROM routes AS main LEFT JOIN nh AS nh ON main.f_route_id = nh.f_route_id AND main.f_target = nh.f_target ''' % ','.join( ['%s' % x for x in _dump_rt + _dump_nh + ['main.f_route_id']] ) header = ( ['target', 'tflags'] + [rtmsg.nla2name(x[7:]) for x in _dump_rt] + ['nh_%s' % nh.nla2name(x[5:]) for x in _dump_nh] + ['metrics', 'encap'] ) yield header plch = view.ndb.schema.plch where, values = cls._dump_where(view) for record in view.ndb.task_manager.db_fetch(req + where, values): route_id = record[-1] record = list(record[:-1]) if route_id is not None: # # fetch metrics metrics = tuple( view.ndb.task_manager.db_fetch( ''' SELECT * FROM metrics WHERE f_route_id = %s ''' % (plch,), (route_id,), ) ) if metrics: ret = {} names = view.ndb.schema.compiled['metrics']['norm_names'] for k, v in zip(names, metrics[0]): if v is not None and k not in ( 'target', 'route_id', 'tflags', ): ret[k] = v record.append(json.dumps(ret)) else: record.append(None) # # fetch encap enc_mpls = tuple( view.ndb.task_manager.db_fetch( ''' SELECT * FROM enc_mpls WHERE f_route_id = %s ''' % (plch,), (route_id,), ) ) if enc_mpls: record.append(enc_mpls[0][2]) else: record.append(None) else: record.extend((None, None)) yield record @classmethod def spec_normalize(cls, processed, spec): if isinstance(spec, basestring): processed['dst'] = spec return processed @classmethod def compare_record(self, left, right): if isinstance(right, str): return right == f'{left["dst"]}/{left["dst_len"]}' def _cmp_target(key, self, right): right = [MPLSTarget(x) for x in json.loads(right)] return all([x[0] == x[1] for x in zip(self[key], right)]) def _cmp_via(self, right): return self['via'] == Via(json.loads(right)) def _cmp_encap(self, right): return all([x[0] == x[1] for x in zip(self.get('encap', []), right)]) fields_cmp = { 'dst': partial(_cmp_target, 'dst'), 'src': partial(_cmp_target, 'src'), 'newdst': partial(_cmp_target, 'newdst'), 'encap': _cmp_encap, 'via': _cmp_via, } def mark_tflags(self, mark): plch = (self.schema.plch,) * 4 self.schema.execute( ''' UPDATE interfaces SET f_tflags = %s WHERE (f_index = %s OR f_index = %s) AND f_target = %s ''' % plch, (mark, self['iif'], self['oif'], self['target']), ) def __init__(self, *argv, **kwarg): kwarg['iclass'] = rtmsg self.event_map = {rtmsg: "load_rtnlmsg"} dict.__setitem__(self, 'multipath', []) dict.__setitem__(self, 'metrics', MetricsStub(self)) dict.__setitem__(self, 'deps', 0) super(Route, self).__init__(*argv, **kwarg) def complete_key(self, key): ret_key = {} if isinstance(key, basestring): ret_key['dst'] = key elif isinstance(key, (Record, tuple, list)): return super(Route, self).complete_key(key) elif isinstance(key, dict): ret_key.update(key) else: raise TypeError('unsupported key type') if 'target' not in ret_key: ret_key['target'] = self.ndb.localhost ## # previously here was a code that injected the default # table == 254 into the key: # # table = ret_key.get('table', ret_key.get('RTA_TABLE', 254)) # if 'table' not in ret_key: # ret_key['table'] = table # # the issue with the code is that self.exists() didn't use # it, thus it was possible to get self.exists() == True and # at the same time loading from the DB resulted in an empty # record # # probably more correct behaviour would be to raise KeyError # if a route spec has no table defined, and the route is # in another table than 254; but for now routes['ipaddr/mask'] # returns records even outside of the main table if isinstance(ret_key.get('dst_len'), basestring): ret_key['dst_len'] = int(ret_key['dst_len']) if isinstance(ret_key.get('dst'), basestring): if ret_key.get('dst') == 'default': ret_key['dst'] = '' ret_key['dst_len'] = 0 elif '/' in ret_key['dst']: ret_key['dst'], ret_key['dst_len'] = ret_key['dst'].split('/') if ret_key.get('family', 0) == AF_MPLS: for field in ('dst', 'src', 'newdst', 'via'): value = ret_key.get(field, key.get(field, None)) if isinstance(value, (list, tuple, dict)): ret_key[field] = json.dumps(value) return super(Route, self).complete_key(ret_key) @property def clean(self): clean = True for s in (self['metrics'],) + tuple(self['multipath']): if hasattr(s, 'changed'): clean &= len(s.changed) == 0 return clean & super(Route, self).clean def make_req(self, prime): req = dict(prime) for key in self.changed: req[key] = self[key] if self['multipath']: req['multipath'] = self['multipath'] if self['metrics']: req['metrics'] = self['metrics'] if self.get('encap') and self.get('encap_type'): req['encap'] = { 'type': self['encap_type'], 'labels': self['encap'], } if self.get('gateway'): req['gateway'] = self['gateway'] return req @check_auth('obj:modify') def __setitem__(self, key, value): if key == 'route_id': raise ValueError('route_id is read only') elif key == 'multipath': super(Route, self).__setitem__('multipath', []) for mp in value: mp = dict(mp) if self.state == 'invalid': mp['create'] = True obj = NextHop( self, self.view, mp, auth_managers=self.auth_managers ) obj.state.set(self.state.get()) self['multipath'].append(obj) if key in self.changed: self.changed.remove(key) elif key == 'metrics': value = dict(value) if not isinstance(self['metrics'], Metrics): value['create'] = True obj = Metrics( self, self.view, value, auth_managers=self.auth_managers ) obj.state.set(self.state.get()) super(Route, self).__setitem__('metrics', obj) if key in self.changed: self.changed.remove(key) elif self.get('family', 0) == AF_MPLS and key in ( 'dst', 'src', 'newdst', ): if isinstance(value, (dict, int)): value = [value] na = [] target = None for label in value: target = MPLSTarget(label) target['bos'] = 0 na.append(target) target['bos'] = 1 super(Route, self).__setitem__(key, na) else: super(Route, self).__setitem__(key, value) @check_auth('obj:modify') def apply(self, rollback=False, req_filter=None, mode='apply'): if ( (self.get('table') == 255) and (self.get('family') == 10) and (self.get('proto') == 2) ): # skip automatic ipv6 routes with proto kernel return self else: if self.get('family', AF_INET) == AF_MPLS and not self.get('dst'): dict.__setitem__(self, 'dst', [MPLSTarget()]) return super(Route, self).apply(rollback, req_filter, mode) def load_sql(self, *argv, **kwarg): super(Route, self).load_sql(*argv, **kwarg) # transform MPLS if self.get('family', 0) == AF_MPLS: for field in ('newdst', 'dst', 'src', 'via'): value = self.get(field, None) if isinstance(value, basestring) and value != '': if field == 'via': na = json.loads(value) else: na = [MPLSTarget(x) for x in json.loads(value)] dict.__setitem__(self, field, na) # # fetch encap deps if self['deps'] & F_RTA_ENCAP: for _ in range(5): enc = tuple( self.task_manager.db_fetch( 'SELECT * FROM enc_mpls WHERE f_route_id = %s' % (self.schema.plch,), (self['route_id'],), ) ) if enc: na = [MPLSTarget(x) for x in json.loads(enc[0][2])] self.load_value('encap', na) break time.sleep(0.1) else: self.log.error('no encap loaded for %s' % (self['route_id'],)) # # if not self.load_event.is_set(): return # # fetch metrics if self['deps'] & F_RTA_METRICS: for _ in range(5): metrics = tuple( self.task_manager.db_fetch( 'SELECT * FROM metrics WHERE f_route_id = %s' % (self.schema.plch,), (self['route_id'],), ) ) if metrics: self['metrics'] = Metrics( self, self.view, {'route_id': self['route_id']}, auth_managers=self.auth_managers, ) break time.sleep(0.1) else: self.log.error( 'no metrics loaded for %s' % (self['route_id'],) ) # # fetch multipath # # FIXME: use self['deps'] if 'nh_id' not in self and self.get('route_id') is not None: nhs = self.task_manager.db_fetch( 'SELECT * FROM nh WHERE f_route_id = %s' % (self.schema.plch,), (self['route_id'],), ) flush = False idx = 0 for nexthop in tuple(self['multipath']): if not isinstance(nexthop, NextHop): flush = True if not flush: try: spec = next(nhs) except StopIteration: flush = True for key, value in zip(nexthop.names, spec): if key in nexthop and value is None: continue else: nexthop.load_value(key, value) if flush: self['multipath'].pop(idx) continue idx += 1 for nexthop in nhs: key = {'route_id': self['route_id'], 'nh_id': nexthop[-1]} ( self['multipath'].append( NextHop( self, self.view, key, auth_managers=self.auth_managers, ) ) ) class RouteSub: def apply(self, rollback=False, req_filter=None, mode='apply'): return self.route.apply(rollback, req_filter, mode) def commit(self): return self.route.commit() def set(self, key, value): self[key] = value def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.commit() class NextHop(RouteSub, RTNL_Object): msg_class = nh table = 'nh' hidden_fields = ('route_id', 'target') def mark_tflags(self, mark): plch = (self.schema.plch,) * 4 self.schema.execute( ''' UPDATE interfaces SET f_tflags = %s WHERE (f_index = %s OR f_index = %s) AND f_target = %s ''' % plch, (mark, self.route['iif'], self.route['oif'], self.route['target']), ) def __init__(self, route, *argv, **kwarg): self.route = route kwarg['iclass'] = nh kwarg['check'] = False super(NextHop, self).__init__(*argv, **kwarg) class MetricsStub(RouteSub, dict): def __init__(self, route): self.route = route def __setitem__(self, key, value): # This assignment forces the Metrics object to replace # MetricsStub; it is the MetricsStub object end of life self.route['metrics'] = {key: value} def __getitem__(self, key): raise KeyError('metrics not initialized for this route') class Metrics(RouteSub, RTNL_Object): msg_class = rtmsg.metrics table = 'metrics' hidden_fields = ('route_id', 'target') def mark_tflags(self, mark): plch = (self.schema.plch,) * 4 self.schema.execute( ''' UPDATE interfaces SET f_tflags = %s WHERE (f_index = %s OR f_index = %s) AND f_target = %s ''' % plch, (mark, self.route['iif'], self.route['oif'], self.route['target']), ) def __init__(self, route, *argv, **kwarg): self.route = route kwarg['iclass'] = rtmsg.metrics kwarg['check'] = False super(Metrics, self).__init__(*argv, **kwarg) pyroute2-0.7.11/pyroute2/ndb/objects/rule.py000066400000000000000000000046101455030217500207020ustar00rootroot00000000000000from collections import OrderedDict from pyroute2.common import get_address_family from pyroute2.netlink.rtnl.fibmsg import fibmsg from ..objects import RTNL_Object schema = fibmsg.sql_schema().unique_index( 'family', 'dst_len', 'src_len', 'tos', 'action', 'flags', 'FRA_DST', 'FRA_SRC', 'FRA_IIFNAME', 'FRA_GOTO', 'FRA_PRIORITY', 'FRA_FWMARK', 'FRA_FLOW', 'FRA_TUN_ID', 'FRA_SUPPRESS_IFGROUP', 'FRA_SUPPRESS_PREFIXLEN', 'FRA_TABLE', 'FRA_FWMASK', 'FRA_OIFNAME', 'FRA_L3MDEV', 'FRA_UID_RANGE', 'FRA_PROTOCOL', 'FRA_IP_PROTO', 'FRA_SPORT_RANGE', 'FRA_DPORT_RANGE', ) init = { 'specs': [['rules', schema]], 'classes': [['rules', fibmsg]], 'event_map': {fibmsg: ['rules']}, } class Rule(RTNL_Object): table = 'rules' msg_class = fibmsg api = 'rule' _replace_on_key_change = True @classmethod def summary(cls, view): req = ''' SELECT f_target, f_tflags, f_family, f_FRA_PRIORITY, f_action, f_FRA_TABLE FROM rules ''' yield ('target', 'tflags', 'family', 'priority', 'action', 'table') for record in view.ndb.task_manager.db_fetch(req): yield record def __init__(self, *argv, **kwarg): kwarg['iclass'] = fibmsg self._fields = [x[0] for x in fibmsg.fields] self.event_map = {fibmsg: "load_rtnlmsg"} key = argv[1] families = set( [ get_address_family(x) for x in (key.get('src'), key.get('dst')) if x != '' and x is not None ] ) if len(families) > 1: raise TypeError('src and dst must be of the same IP family') elif len(families) == 1: key['family'] = families.pop() super(Rule, self).__init__(*argv, **kwarg) def load_sql(self, *argv, **kwarg): spec = super(Rule, self).load_sql(*argv, **kwarg) if spec is None: return nkey = OrderedDict() for name_norm, name_raw, value in zip(self.names, self.spec, spec): if name_raw in self.kspec: nkey[name_raw] = value if name_norm not in self._fields and value in (0, ''): dict.__setitem__(self, name_norm, None) self._key = nkey return spec pyroute2-0.7.11/pyroute2/ndb/query.py000066400000000000000000000114031455030217500174450ustar00rootroot00000000000000from .report import RecordSet class Query(object): def __init__(self, schema, fmt='raw'): self._schema = schema self._fmt = fmt def _formatter(self, cursor, fmt=None, header=None, transform=None): fmt = fmt or self._fmt if fmt == 'csv': if header: yield ','.join(header) for record in cursor: if transform: record = transform(record) if isinstance(record, (list, tuple)): yield ','.join([str(x) for x in record]) else: yield record elif fmt == 'raw': if header: yield header for record in cursor: if transform: record = transform(record) yield record else: raise TypeError('format not supported') def nodes(self, fmt=None): ''' List all the nodes within the cluster. ''' header = ('nodename',) return RecordSet( self._formatter( self._schema.fetch( ''' SELECT DISTINCT f_target FROM interfaces ''' ), fmt, header, ) ) def p2p_edges(self, fmt=None): ''' List point to point edges within the cluster, like GRE or PPP interfaces. ''' header = ('left_node', 'right_node') return RecordSet( self._formatter( self._schema.fetch( ''' SELECT DISTINCT l.f_target, r.f_target FROM p2p AS l INNER JOIN p2p AS r ON l.f_p2p_local = r.f_p2p_remote AND l.f_target != r.f_target ''' ), fmt, header, ) ) def l2_edges(self, fmt=None): ''' List l2 links within the cluster, reconstructed from the ARP caches on the nodes. Works as follows: 1. for every node take the ARP cache 2. for every record in the cache reconstruct two triplets: * the interface index -> the local interface name * the neighbour lladdr -> the remote node and interface name Issues: does not filter out fake lladdr, so CARP interfaces produce fake l2 edges within the cluster. ''' header = ( 'left_node', 'left_ifname', 'left_lladdr', 'right_node', 'right_ifname', 'right_lladdr', ) return RecordSet( self._formatter( self._schema.fetch( ''' SELECT DISTINCT j.f_target, j.f_IFLA_IFNAME, j.f_IFLA_ADDRESS, d.f_target, d.f_IFLA_IFNAME, j.f_NDA_LLADDR FROM (SELECT n.f_target, i.f_IFLA_IFNAME, i.f_IFLA_ADDRESS, n.f_NDA_LLADDR FROM neighbours AS n INNER JOIN interfaces AS i ON n.f_target = i.f_target AND i.f_IFLA_ADDRESS != '00:00:00:00:00:00' AND n.f_ifindex = i.f_index) AS j INNER JOIN interfaces AS d ON j.f_NDA_LLADDR = d.f_IFLA_ADDRESS AND j.f_target != d.f_target ''' ), fmt, header, ) ) def l3_edges(self, fmt=None): ''' List l3 edges. For every address on every node look if it is used as a gateway on remote nodes. Such cases are reported as l3 edges. Issues: does not report routes (edges) via point to point connections like GRE where local addresses are used as gateways. To be fixed. ''' header = ( 'source_node', 'gateway_node', 'gateway_address', 'dst', 'dst_len', ) return RecordSet( self._formatter( self._schema.fetch( ''' SELECT DISTINCT r.f_target, a.f_target, a.f_IFA_ADDRESS, r.f_RTA_DST, r.f_dst_len FROM addresses AS a INNER JOIN routes AS r ON r.f_target != a.f_target AND r.f_RTA_GATEWAY = a.f_IFA_ADDRESS AND r.f_RTA_GATEWAY NOT IN (SELECT f_IFA_ADDRESS FROM addresses WHERE f_target = r.f_target) ''' ), fmt, header, ) ) pyroute2-0.7.11/pyroute2/ndb/report.py000066400000000000000000000275631455030217500176310ustar00rootroot00000000000000''' .. note:: New in verision 0.5.11 .. testsetup:: from pyroute2 import NDB ndb = NDB(sources=[{'target': 'localhost', 'kind': 'IPMock'}]) .. testcleanup:: * for key, value in tuple(globals().items()): if key.startswith('ndb') and hasattr(value, 'close'): value.close() Filtering example: .. testcode:: report = ndb.interfaces.dump() report.select_fields('index', 'ifname', 'address', 'state') report.transform_fields( address=lambda r: '%s%s.%s%s.%s%s' % tuple(r.address.split(':')) ) for record in report.format('csv'): print(record) .. testoutput:: 'index','ifname','address','state' 1,'lo','0000.0000.0000','up' 2,'eth0','5254.0072.58b2','up' ''' import json import warnings from itertools import chain from pyroute2 import cli MAX_REPORT_LINES = 10000 deprecation_notice = ''' RecordSet API is deprecated, pls refer to: https://docs.pyroute2.org/ndb_reports.html ''' def format_json(dump, headless=False): buf = [] fnames = None yield '[' for record in dump: if fnames is None: if headless: fnames = record._names else: fnames = record continue if buf: buf[-1] += ',' for line in buf: yield line buf = [] lines = json.dumps(dict(zip(fnames, record)), indent=4).split('\n') buf.append(' {') for line in sorted(lines[1:-1]): if line[-1] == ',': line = line[:-1] buf.append(' %s,' % line) buf[-1] = buf[-1][:-1] buf.append(' }') for line in buf: yield line yield ']' def format_csv(dump, headless=False): def dump_record(rec): row = [] for field in rec: if isinstance(field, int): row.append('%i' % field) elif field is None: row.append('') else: row.append("'%s'" % field) return row fnames = None for record in dump: if fnames is None and headless: fnames = True yield ','.join(dump_record(record._names)) yield ','.join(dump_record(record)) class Record: def __init__(self, names, values, ref_class=None): self._names = tuple(names) self._values = tuple(values) if len(self._names) != len(self._values): raise ValueError('names and values must have the same length') self._ref_class = ref_class def __getitem__(self, key): idx = len(self._names) for i in reversed(self._names): idx -= 1 if i == key: return self._values[idx] def __setitem__(self, *argv, **kwarg): raise TypeError('immutable object') def __getattribute__(self, key): if key.startswith('_'): return object.__getattribute__(self, key) else: return self[key] def __setattr__(self, key, value): if not key.startswith('_'): raise TypeError('immutable object') return object.__setattr__(self, key, value) def __iter__(self): return iter(self._values) def __repr__(self): return repr(self._values) def __len__(self): return len(self._values) def _select_fields(self, *fields): return Record(fields, map(lambda x: self[x], fields), self._ref_class) def _transform_fields(self, **spec): data = self._as_dict() for key, func in spec.items(): data[key] = func(self) return Record(data.keys(), data.values(), self._ref_class) def _match(self, f=None, **spec): if callable(f): return f(self) for key, value in spec.items(): if not ( value(self[key]) if callable(value) else (self[key] == value) ): return False return True def _as_dict(self): ret = {} for key, value in zip(self._names, self._values): ret[key] = value return ret def __eq__(self, right): if hasattr(right, '_names'): n = all(x[0] == x[1] for x in zip(self._names, right._names)) v = all(x[0] == x[1] for x in zip(self._values, right._values)) return n and v elif isinstance(right, dict): for key, value in right.items(): if value != self[key]: break else: return True return False elif self._ref_class is not None and isinstance(right, (str, int)): return self._ref_class.compare_record(self, right) else: return all(x[0] == x[1] for x in zip(self._values, right)) class BaseRecordSet(object): def __init__(self, generator, ellipsis='(...)'): self.generator = generator self.ellipsis = ellipsis def __iter__(self): return self def __next__(self): return next(self.generator) def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): pass def __repr__(self): counter = 0 ret = [] for record in self: if isinstance(record, str): ret.append(record) else: ret.append(repr(record)) ret.append('\n') counter += 1 if self.ellipsis and counter > MAX_REPORT_LINES: ret.append(self.ellipsis) break if ret: ret.pop() return ''.join(ret) class RecordSetConfig(dict): def __init__(self, prime): if isinstance(prime, dict): for key, value in prime.items(): self[key] = value else: raise ValueError('only dict allowed') def __setitem__(self, key, value): if isinstance(value, str): value = json.loads(value) return super().__setitem__(key, value) class RecordSet(BaseRecordSet): ''' NDB views return objects of this class with `summary()` and `dump()` methods. RecordSet objects are generator-based, they do not store the data in the memory, but transform them on the fly. RecordSet filters also return objects of this class, thus making possible to make chains of filters. ''' def __init__(self, generator, config=None, ellipsis=True): super().__init__(generator, ellipsis) self.filters = [] self.config = RecordSetConfig(config) if config is not None else {} def __next__(self): while True: record = next(self.generator) for f in self.filters: record = f(record) if record is None: break else: return record @cli.show_result def select_fields(self, *fields): ''' Select only chosen fields for every record: .. testcode:: report = ndb.interfaces.dump() report.select_fields('index', 'ifname') for line in report.format('csv'): print(line) .. testoutput:: 'index','ifname' 1,'lo' 2,'eth0' ''' self.filters.append(lambda x: x._select_fields(*fields)) if self.config.get('recordset_pipe'): return RecordSet(self, config=self.config) @cli.show_result def select_records(self, f=None, **spec): ''' Select records based on a function f() or a spec match. A spec is dictionary of pairs `field: constant` or `field: callable`: .. testcode:: report = ndb.addresses.summary() report.select_records(ifname=lambda x: x.startswith('eth')) for line in report.format('csv'): print(line) .. testoutput:: 'target','tflags','ifname','address','prefixlen' 'localhost',0,'eth0','192.168.122.28',24 ''' self.filters.append(lambda x: x if x._match(f, **spec) else None) if self.config.get('recordset_pipe'): return RecordSet(self, config=self.config) @cli.show_result def transform_fields(self, **kwarg): ''' Transform fields with a function. Function must accept the record as the only argument: .. testcode:: report = ndb.addresses.summary() report.transform_fields( address=lambda r: f'{r.address}/{r.prefixlen}' ) report.select_fields('ifname', 'address') for line in report.format('csv'): print(line) .. testoutput:: 'ifname','address' 'lo','127.0.0.1/8' 'eth0','192.168.122.28/24' ''' self.filters.append(lambda x: x._transform_fields(**kwarg)) if self.config.get('recordset_pipe'): return RecordSet(self, config=self.config) @cli.show_result def transform(self, **kwarg): warnings.warn(deprecation_notice, DeprecationWarning) def g(): for record in self.generator: if isinstance(record, Record): values = [] names = record._names for name, value in zip(names, record._values): if name in kwarg: value = kwarg[name](value) values.append(value) record = Record(names, values, record._ref_class) yield record return RecordSet(g()) @cli.show_result def filter(self, f=None, **kwarg): warnings.warn(deprecation_notice, DeprecationWarning) def g(): for record in self.generator: m = True for key in kwarg: if kwarg[key] != getattr(record, key): m = False if m: if f is None: yield record elif f(record): yield record return RecordSet(g()) @cli.show_result def select(self, *argv): warnings.warn(deprecation_notice, DeprecationWarning) return self.fields(*argv) @cli.show_result def fields(self, *fields): warnings.warn(deprecation_notice, DeprecationWarning) def g(): for record in self.generator: yield record._select_fields(*fields) return RecordSet(g()) @cli.show_result def join(self, right, condition=lambda r1, r2: True, prefix=''): warnings.warn(deprecation_notice, DeprecationWarning) # fetch all the records from the right # ACHTUNG it may consume a lot of memory right = tuple(right) def g(): for r1 in self.generator: for r2 in right: if condition(r1, r2): n = tuple( chain( r1._names, ['%s%s' % (prefix, x) for x in r2._names], ) ) v = tuple(chain(r1._values, r2._values)) yield Record(n, v, r1._ref_class) return RecordSet(g()) @cli.show_result def format(self, kind): ''' Return an iterator over text lines in the chosen format. Supported formats: 'json', 'csv'. ''' if kind == 'json': return BaseRecordSet(format_json(self, headless=True)) elif kind == 'csv': return BaseRecordSet(format_csv(self, headless=True)) else: raise ValueError() def count(self): ''' Return number of records. The method exhausts the generator. ''' counter = 0 for record in self: counter += 1 return counter def __getitem__(self, key): return list(self)[key] pyroute2-0.7.11/pyroute2/ndb/schema.py000066400000000000000000000777621455030217500175640ustar00rootroot00000000000000''' Backends -------- NDB stores all the records in an SQL database. By default it uses the SQLite3 module, which is a part of the Python stdlib, so no extra packages are required:: # SQLite3 -- simple in-memory DB ndb = NDB() # SQLite3 -- same as above with explicit arguments ndb = NDB(db_provider='sqlite3', db_spec=':memory:') # SQLite3 -- file DB ndb = NDB(db_provider='sqlite3', db_spec='test.db') It is also possible to use a PostgreSQL database via psycopg2 module:: # PostgreSQL -- local DB ndb = NDB(db_provider='psycopg2', db_spec={'dbname': 'test'}) # PostgreSQL -- remote DB ndb = NDB(db_provider='psycopg2', db_spec={'dbname': 'test', 'host': 'db1.example.com'}) Database backup --------------- Built-in database backup is implemented now only for SQLite3 backend. For the PostgresSQL backend you have to use external utilities like `pg_dump`:: # create an NDB instance ndb = NDB() # the defaults: db_provider='sqlite3', db_spec=':memory:' ... # dump the DB to a file ndb.backup('backup.db') SQL schema ---------- By default NDB deletes the data from the DB upon exit. In order to preserve the data, use `NDB(db_cleanup=False, ...)` Here is an example schema (may be changed with releases):: List of relations Schema | Name | Type | Owner --------+------------------+-------+------- public | addresses | table | root public | af_bridge_fdb | table | root public | af_bridge_ifs | table | root public | af_bridge_vlans | table | root public | enc_mpls | table | root public | ifinfo_bond | table | root public | ifinfo_bridge | table | root public | ifinfo_gre | table | root public | ifinfo_gretap | table | root public | ifinfo_ip6gre | table | root public | ifinfo_ip6gretap | table | root public | ifinfo_ip6tnl | table | root public | ifinfo_ipip | table | root public | ifinfo_ipvlan | table | root public | ifinfo_macvlan | table | root public | ifinfo_macvtap | table | root public | ifinfo_sit | table | root public | ifinfo_tun | table | root public | ifinfo_vlan | table | root public | ifinfo_vrf | table | root public | ifinfo_vti | table | root public | ifinfo_vti6 | table | root public | ifinfo_vxlan | table | root public | interfaces | table | root public | metrics | table | root public | neighbours | table | root public | netns | table | root public | nh | table | root public | p2p | table | root public | routes | table | root public | rules | table | root public | sources | table | root public | sources_options | table | root (33 rows) rtnl=# select f_index, f_ifla_ifname from interfaces; f_index | f_ifla_ifname ---------+--------------- 1 | lo 2 | eth0 28 | ip_vti0 31 | ip6tnl0 32 | ip6_vti0 36445 | br0 11434 | dummy0 3 | eth1 (8 rows) rtnl=# select f_index, f_ifla_br_stp_state from ifinfo_bridge; f_index | f_ifla_br_stp_state ---------+--------------------- 36445 | 0 (1 row) Database upgrade ---------------- There is no DB schema upgrade from release to release. All the data stored in the DB is being fetched from the OS in the runtime, thus no persistence required. If you're using a PostgreSQL DB or a file based SQLite, simply drop all the tables from the DB, and NDB will create them from scratch on startup. ''' import enum import json import random import sqlite3 import sys import time import traceback from collections import OrderedDict from functools import partial from pyroute2 import config from pyroute2.common import basestring, uuid32 # from .objects import address, interface, neighbour, netns, route, rule try: import psycopg2 except ImportError: psycopg2 = None # # the order is important # plugins = [interface, address, neighbour, route, netns, rule] MAX_ATTEMPTS = 5 class DBProvider(enum.Enum): sqlite3 = 'sqlite3' psycopg2 = 'psycopg2' def __eq__(self, r): return str(self) == r def publish(f): if isinstance(f, str): def decorate(m): m.publish = f return m return decorate f.publish = True return f class DBDict(dict): def __init__(self, schema, table): self.schema = schema self.table = table @publish('get') def __getitem__(self, key): for (record,) in self.schema.fetch( f''' SELECT f_value FROM {self.table} WHERE f_key = {self.schema.plch} ''', (key,), ): return json.loads(record) raise KeyError(f'key {key} not found') @publish('set') def __setitem__(self, key, value): del self[key] self.schema.execute( f''' INSERT INTO {self.table} VALUES ({self.schema.plch}, {self.schema.plch}) ''', (key, json.dumps(value)), ) @publish('del') def __delitem__(self, key): self.schema.execute( f''' DELETE FROM {self.table} WHERE f_key = {self.schema.plch} ''', (key,), ) @publish def keys(self): for (key,) in self.schema.fetch(f'SELECT f_key FROM {self.table}'): yield key @publish def items(self): for key, value in self.schema.fetch( f'SELECT f_key, f_value FROM {self.table}' ): yield key, json.loads(value) @publish def values(self): for (value,) in self.schema.fetch(f'SELECT f_value FROM {self.table}'): yield json.loads(value) class DBSchema: connection = None event_map = None key_defaults = None snapshots = None # : spec = OrderedDict() classes = {} # # OBS: field names MUST go in the same order as in the spec, # that's for the load_netlink() to work correctly -- it uses # one loop to fetch both index and row values # indices = {} foreign_keys = {} def __init__(self, config, sources, event_map, log_channel): global plugins self.sources = sources self.config = DBDict(self, 'config') self.stats = {} self.connection = None self.cursor = None self.log = log_channel self.snapshots = {} self.key_defaults = {} self.event_map = {} # cache locally these variables so they will not be # loaded from SQL for every incoming message; this # means also that these variables can not be changed # in runtime self.rtnl_log = config['rtnl_debug'] self.provider = config['provider'] # for plugin in plugins: # # 1. spec # for name, spec in plugin.init['specs']: self.spec[name] = spec.as_dict() self.indices[name] = spec.index self.foreign_keys[name] = spec.foreign_keys # # 2. classes # for name, cls in plugin.init['classes']: self.classes[name] = cls # self.initdb(config) # for plugin in plugins: # emap = plugin.init['event_map'] # for etype, ehndl in emap.items(): handlers = [] for h in ehndl: if isinstance(h, basestring): handlers.append(partial(self.load_netlink, h)) else: handlers.append(partial(h, self)) self.event_map[etype] = handlers self.gctime = self.ctime = time.time() def initdb(self, config): if self.connection is not None: self.close() if config['provider'] == DBProvider.sqlite3: self.connection = sqlite3.connect(config['spec']) self.plch = '?' self.connection.execute('PRAGMA foreign_keys = ON') elif config['provider'] == DBProvider.psycopg2: self.connection = psycopg2.connect(**config['spec']) self.plch = '%s' else: raise TypeError('DB provider not supported') self.cursor = self.connection.cursor() # # compile request lines # self.compiled = {} for table in self.spec.keys(): self.compiled[table] = self.compile_spec( table, self.spec[table], self.indices[table] ) self.create_table(table) # # service tables # self.execute( ''' DROP TABLE IF EXISTS sources_options ''' ) self.execute( ''' DROP TABLE IF EXISTS sources ''' ) self.execute( ''' DROP TABLE IF EXISTS config ''' ) self.execute( ''' CREATE TABLE config (f_key TEXT PRIMARY KEY, f_value TEXT NOT NULL) ''' ) self.execute( ''' CREATE TABLE IF NOT EXISTS sources (f_target TEXT PRIMARY KEY, f_kind TEXT NOT NULL) ''' ) self.execute( ''' CREATE TABLE IF NOT EXISTS sources_options (f_target TEXT NOT NULL, f_name TEXT NOT NULL, f_type TEXT NOT NULL, f_value TEXT NOT NULL, FOREIGN KEY (f_target) REFERENCES sources(f_target) ON UPDATE CASCADE ON DELETE CASCADE) ''' ) for key, value in config.items(): self.config[key] = value def merge_spec(self, table1, table2, table, schema_idx): spec1 = self.compiled[table1] spec2 = self.compiled[table2] names = spec1['names'] + spec2['names'][:-1] all_names = spec1['all_names'] + spec2['all_names'][2:-1] norm_names = spec1['norm_names'] + spec2['norm_names'][2:-1] idx = ('target', 'tflags') + schema_idx f_names = ['f_%s' % x for x in all_names] f_set = ['f_%s = %s' % (x, self.plch) for x in all_names] f_idx = ['f_%s' % x for x in idx] f_idx_match = ['%s.%s = %s' % (table2, x, self.plch) for x in f_idx] plchs = [self.plch] * len(f_names) return { 'names': names, 'all_names': all_names, 'norm_names': norm_names, 'idx': idx, 'fnames': ','.join(f_names), 'plchs': ','.join(plchs), 'fset': ','.join(f_set), 'knames': ','.join(f_idx), 'fidx': ' AND '.join(f_idx_match), } def compile_spec(self, table, schema_names, schema_idx): # e.g.: index, flags, IFLA_IFNAME # names = [] # # same + two internal fields # all_names = ['target', 'tflags'] # # norm_names = ['target', 'tflags'] bclass = self.classes.get(table) for name in schema_names: names.append(name[-1]) all_names.append(name[-1]) iclass = bclass if len(name) > 1: for step in name[:-1]: imap = dict(iclass.nla_map) iclass = getattr(iclass, imap[step]) norm_names.append(iclass.nla2name(name[-1])) # # escaped names: f_index, f_flags, f_IFLA_IFNAME # # the reason: words like "index" are keywords in SQL # and we can not use them; neither can we change the # C structure # f_names = ['f_%s' % x for x in all_names] # # set the fields # # e.g.: f_flags = ?, f_IFLA_IFNAME = ? # # there are different placeholders: # ? -- SQLite3 # %s -- PostgreSQL # so use self.plch here # f_set = ['f_%s = %s' % (x, self.plch) for x in all_names] # # the set of the placeholders to use in the INSERT statements # plchs = [self.plch] * len(f_names) # # the index schema; use target and tflags in every index # idx = ('target', 'tflags') + schema_idx # # the same, escaped: f_target, f_tflags etc. # f_idx = ['f_%s' % x for x in idx] # # normalized idx names # norm_idx = [iclass.nla2name(x) for x in idx] # # match the index fields, fully qualified # # interfaces.f_index = ?, interfaces.f_IFLA_IFNAME = ? # # the same issue with the placeholders # f_idx_match = ['%s.%s = %s' % (table, x, self.plch) for x in f_idx] return { 'names': names, 'all_names': all_names, 'norm_names': norm_names, 'idx': idx, 'norm_idx': norm_idx, 'fnames': ','.join(f_names), 'plchs': ','.join(plchs), 'fset': ','.join(f_set), 'knames': ','.join(f_idx), 'fidx': ' AND '.join(f_idx_match), } @publish def add_nl_source(self, target, kind, spec): ''' A temprorary method, to be moved out ''' # flush self.execute( ''' DELETE FROM sources_options WHERE f_target = %s ''' % self.plch, (target,), ) self.execute( ''' DELETE FROM sources WHERE f_target = %s ''' % self.plch, (target,), ) # add self.execute( ''' INSERT INTO sources (f_target, f_kind) VALUES (%s, %s) ''' % (self.plch, self.plch), (target, kind), ) for key, value in spec.items(): vtype = 'int' if isinstance(value, int) else 'str' self.execute( ''' INSERT INTO sources_options (f_target, f_name, f_type, f_value) VALUES (%s, %s, %s, %s) ''' % (self.plch, self.plch, self.plch, self.plch), (target, key, vtype, value), ) def execute(self, *argv, **kwarg): try: # # FIXME: add logging # for _ in range(MAX_ATTEMPTS): try: self.cursor.execute(*argv, **kwarg) break except (sqlite3.InterfaceError, sqlite3.OperationalError) as e: self.log.debug('%s' % e) # # Retry on: # -- InterfaceError: Error binding parameter ... # -- OperationalError: SQL logic error # pass else: raise Exception('DB execute error: %s %s' % (argv, kwarg)) except Exception: raise finally: self.connection.commit() # no performance optimisation yet return self.cursor @publish def fetchone(self, *argv, **kwarg): for row in self.fetch(*argv, **kwarg): return row return None @publish def fetch(self, *argv, **kwarg): self.execute(*argv, **kwarg) while True: row_set = self.cursor.fetchmany() if not row_set: return for row in row_set: yield row @publish def backup(self, spec): if sys.version_info >= (3, 7) and self.provider == DBProvider.sqlite3: backup_connection = sqlite3.connect(spec) self.connection.backup(backup_connection) backup_connection.close() else: raise NotImplementedError() @publish def export(self, f='stdout'): close = False if f in ('stdout', 'stderr'): f = getattr(sys, f) elif isinstance(f, basestring): f = open(f, 'w') close = True try: for table in self.spec.keys(): f.write('\ntable %s\n' % table) for record in self.execute('SELECT * FROM %s' % table): f.write(' '.join([str(x) for x in record])) f.write('\n') if self.rtnl_log: f.write('\ntable %s_log\n' % table) for record in self.execute('SELECT * FROM %s_log' % table): f.write(' '.join([str(x) for x in record])) f.write('\n') finally: if close: f.close() def close(self): if self.config['spec'] != ':memory:': # simply discard in-memory sqlite db on exit self.purge_snapshots() self.connection.commit() self.connection.close() @publish def commit(self): self.connection.commit() def create_table(self, table): req = ['f_target TEXT NOT NULL', 'f_tflags BIGINT NOT NULL DEFAULT 0'] fields = [] self.key_defaults[table] = {} for field in self.spec[table].items(): # # Why f_? # 'Cause there are attributes like 'index' and such # names may not be used in SQL statements # field = (field[0][-1], field[1]) fields.append('f_%s %s' % field) req.append('f_%s %s' % field) if field[1].strip().startswith('TEXT'): self.key_defaults[table][field[0]] = '' else: self.key_defaults[table][field[0]] = 0 if table in self.foreign_keys: for key in self.foreign_keys[table]: spec = ( '(%s)' % ','.join(key['fields']), '%s(%s)' % (key['parent'], ','.join(key['parent_fields'])), ) req.append( 'FOREIGN KEY %s REFERENCES %s ' 'ON UPDATE CASCADE ' 'ON DELETE CASCADE ' % spec ) # # make a unique index for compound keys on # the parent table # # https://sqlite.org/foreignkeys.html # if len(key['fields']) > 1: idxname = 'uidx_%s_%s' % ( key['parent'], '_'.join(key['parent_fields']), ) self.execute( 'CREATE UNIQUE INDEX ' 'IF NOT EXISTS %s ON %s' % (idxname, spec[1]) ) req = ','.join(req) req = 'CREATE TABLE IF NOT EXISTS ' '%s (%s)' % (table, req) self.execute(req) index = ','.join( ['f_target', 'f_tflags'] + ['f_%s' % x for x in self.indices[table]] ) req = 'CREATE UNIQUE INDEX IF NOT EXISTS ' '%s_idx ON %s (%s)' % ( table, table, index, ) self.execute(req) # # create table for the transaction buffer: there go the system # updates while the transaction is not committed. # # w/o keys (yet) # # req = ['f_target TEXT NOT NULL', # 'f_tflags INTEGER NOT NULL DEFAULT 0'] # req = ','.join(req) # self.execute('CREATE TABLE IF NOT EXISTS ' # '%s_buffer (%s)' % (table, req)) # # create the log table, if required # if self.rtnl_log: req = [ 'f_tstamp BIGINT NOT NULL', 'f_target TEXT NOT NULL', 'f_event INTEGER NOT NULL', ] + fields req = ','.join(req) self.execute( 'CREATE TABLE IF NOT EXISTS ' '%s_log (%s)' % (table, req) ) def mark(self, target, mark): for table in self.spec: self.execute( ''' UPDATE %s SET f_tflags = %s WHERE f_target = %s ''' % (table, self.plch, self.plch), (mark, target), ) @publish def flush(self, target): for table in self.spec: self.execute( ''' DELETE FROM %s WHERE f_target = %s ''' % (table, self.plch), (target,), ) @publish def save_deps(self, ctxid, weak_ref, iclass): uuid = uuid32() obj = weak_ref() obj_k = obj.key idx = self.indices[obj.table] conditions = [] values = [] for key in idx: conditions.append('f_%s = %s' % (key, self.plch)) if key in obj_k: values.append(obj_k[key]) else: values.append(obj.get(iclass.nla2name(key))) # # save the old f_tflags value # tflags = self.execute( ''' SELECT f_tflags FROM %s WHERE %s ''' % (obj.table, ' AND '.join(conditions)), values, ).fetchone()[0] # # mark tflags for obj # obj.mark_tflags(uuid) # # f_tflags is used in foreign keys ON UPDATE CASCADE, so all # related records will be marked # for table in self.spec: self.log.debug('create snapshot %s_%s' % (table, ctxid)) # # create the snapshot table # self.execute( ''' CREATE TABLE IF NOT EXISTS %s_%s AS SELECT * FROM %s WHERE f_tflags IS NULL ''' % (table, ctxid, table) ) # # copy the data -- is it possible to do it in one step? # self.execute( ''' INSERT INTO %s_%s SELECT * FROM %s WHERE f_tflags = %s ''' % (table, ctxid, table, self.plch), [uuid], ) # # unmark all the data # obj.mark_tflags(tflags) for table in self.spec: self.execute( ''' UPDATE %s_%s SET f_tflags = %s ''' % (table, ctxid, self.plch), [tflags], ) self.snapshots['%s_%s' % (table, ctxid)] = weak_ref @publish def purge_snapshots(self): for table in tuple(self.snapshots): for _ in range(MAX_ATTEMPTS): try: if self.provider == DBProvider.sqlite3: self.execute('DROP TABLE %s' % table) elif self.provider == DBProvider.psycopg2: self.execute('DROP TABLE %s CASCADE' % table) self.connection.commit() del self.snapshots[table] break except sqlite3.OperationalError: # # Retry on: # -- OperationalError: database table is locked # time.sleep(random.random()) else: raise Exception('DB snapshot error') @publish def get(self, table, spec): # # Retrieve info from the DB # # ndb.interfaces.get({'ifname': 'eth0'}) # conditions = [] values = [] cls = self.classes[table] cspec = self.compiled[table] for key, value in spec.items(): if key not in cspec['all_names']: key = cls.name2nla(key) if key not in cspec['all_names']: raise KeyError('field name not found') conditions.append('f_%s = %s' % (key, self.plch)) values.append(value) req = 'SELECT * FROM %s WHERE %s' % (table, ' AND '.join(conditions)) for record in self.fetch(req, values): yield dict(zip(self.compiled[table]['all_names'], record)) def log_netlink(self, table, target, event, ctable=None): # # RTNL Logs # fkeys = self.compiled[table]['names'] fields = ','.join( ['f_tstamp', 'f_target', 'f_event'] + ['f_%s' % x for x in fkeys] ) pch = ','.join([self.plch] * (len(fkeys) + 3)) values = [ int(time.time() * 1000), target, event.get('header', {}).get('type', 0), ] for field in fkeys: value = event.get_attr(field) or event.get(field) if value is None and field in self.indices[ctable or table]: value = self.key_defaults[table][field] if isinstance(value, (dict, list, tuple, set)): value = json.dumps(value) values.append(value) self.execute( 'INSERT INTO %s_log (%s) VALUES (%s)' % (table, fields, pch), values, ) def load_netlink(self, table, target, event, ctable=None, propagate=False): # if self.rtnl_log: self.log_netlink(table, target, event, ctable) # # Update metrics # if 'stats' in event['header']: self.stats[target] = event['header']['stats'] # # Periodic jobs # if time.time() - self.gctime > config.gc_timeout: self.gctime = time.time() # clean dead snapshots after GC timeout for name, wref in tuple(self.snapshots.items()): if wref() is None: del self.snapshots[name] try: self.execute('DROP TABLE %s' % name) except Exception as e: self.log.debug( 'failed to remove table %s: %s' % (name, e) ) # clean marked routes self.execute( 'DELETE FROM routes WHERE ' '(f_gc_mark + 5) < %s' % self.plch, (int(time.time()),), ) # # The event type # if event['header'].get('type', 0) % 2: # # Delete an object # conditions = ['f_target = %s' % self.plch] values = [target] for key in self.indices[table]: conditions.append('f_%s = %s' % (key, self.plch)) value = event.get(key) or event.get_attr(key) if value is None: value = self.key_defaults[table][key] if isinstance(value, (dict, list, tuple, set)): value = json.dumps(value) values.append(value) self.execute( 'DELETE FROM %s WHERE' ' %s' % (table, ' AND '.join(conditions)), values, ) else: # # Create or set an object # # field values values = [target, 0] # index values ivalues = [target, 0] compiled = self.compiled[table] # a map of sub-NLAs nodes = {} # fetch values (exc. the first two columns) for fname, ftype in self.spec[table].items(): node = event # if the field is located in a sub-NLA if len(fname) > 1: # see if we tried to get it already if fname[:-1] not in nodes: # descend for steg in fname[:-1]: node = node.get_attr(steg) if node is None: break nodes[fname[:-1]] = node # lookup the sub-NLA in the map node = nodes[fname[:-1]] # the event has no such sub-NLA if node is None: values.append(None) continue # NLA have priority value = node.get_attr(fname[-1]) if value is None: value = node.get(fname[-1]) if value is None and fname[-1] in self.compiled[table]['idx']: value = self.key_defaults[table][fname[-1]] node['attrs'].append((fname[-1], value)) if isinstance(value, (dict, list, tuple, set)): value = json.dumps(value) if fname[-1] in compiled['idx']: ivalues.append(value) values.append(value) try: if self.provider == DBProvider.psycopg2: # # run UPSERT -- the DB provider must support it # ( self.execute( 'INSERT INTO %s (%s) VALUES (%s) ' 'ON CONFLICT (%s) ' 'DO UPDATE SET %s WHERE %s' % ( table, compiled['fnames'], compiled['plchs'], compiled['knames'], compiled['fset'], compiled['fidx'], ), (values + values + ivalues), ) ) # elif self.provider == DBProvider.sqlite3: # # SQLite3 >= 3.24 actually has UPSERT, but ... # # We can not use here INSERT OR REPLACE as well, since # it drops (almost always) records with foreign key # dependencies. Maybe a bug in SQLite3, who knows. # count = ( self.execute( ''' SELECT count(*) FROM %s WHERE %s ''' % (table, compiled['fidx']), ivalues, ).fetchone() )[0] if count == 0: self.execute( ''' INSERT INTO %s (%s) VALUES (%s) ''' % (table, compiled['fnames'], compiled['plchs']), values, ) else: self.execute( ''' UPDATE %s SET %s WHERE %s ''' % (table, compiled['fset'], compiled['fidx']), (values + ivalues), ) else: raise NotImplementedError() # except Exception as e: # if propagate: raise e # # A good question, what should we do here self.log.debug( 'load_netlink: %s %s %s' % (table, target, event) ) self.log.error('load_netlink: %s' % traceback.format_exc()) pyroute2-0.7.11/pyroute2/ndb/source.py000066400000000000000000000410761455030217500176110ustar00rootroot00000000000000''' Local RTNL ---------- Local RTNL source is a simple `IPRoute` instance. By default NDB starts with one local RTNL source names `localhost`:: >>> ndb = NDB() >>> ndb.sources.summary().format("json") [ { "name": "localhost", "spec": "{'target': 'localhost', 'nlm_generator': 1}", "state": "running" }, { "name": "localhost/nsmanager", "spec": "{'target': 'localhost/nsmanager'}", "state": "running" } ] >>> ndb.sources['localhost'] [running] The `localhost` RTNL source starts an additional async cache thread. The `nlm_generator` option means that instead of collections the `IPRoute` object returns generators, so `IPRoute` responses will not consume memory regardless of the RTNL objects number:: >>> ndb.sources['localhost'].nl.link('dump') See also: :ref:`iproute` Network namespaces ------------------ There are two ways to connect additional sources to an NDB instance. One is to specify sources when creating an NDB object:: ndb = NDB(sources=[{'target': 'localhost'}, {'netns': 'test01'}]) Another way is to call `ndb.sources.add()` method:: ndb.sources.add(netns='test01') This syntax: `{target': 'localhost'}` and `{'netns': 'test01'}` is the short form. The full form would be:: {'target': 'localhost', # the label for the DB 'kind': 'local', # use IPRoute class to start the source 'nlm_generator': 1} # {'target': 'test01', # the label 'kind': 'netns', # use NetNS class 'netns': 'test01'} # See also: :ref:`netns` Remote systems -------------- It is possible also to connect to remote systems using SSH. In order to use this kind of sources it is required to install the `mitogen `_ module. The `remote` kind of sources uses the `RemoteIPRoute` class. The short form:: ndb.sources.add(hostname='worker1.example.com') In some more extended form:: ndb.sources.add(**{'target': 'worker1.example.com', 'kind': 'remote', 'hostname': 'worker1.example.com', 'username': 'jenkins', 'check_host_keys': False}) See also: :ref:`remote` ''' import errno import importlib import queue import socket import struct import sys import threading import time import uuid from pyroute2.common import basestring from pyroute2.iproute import IPRoute from pyroute2.netlink.exceptions import NetlinkError from pyroute2.netlink.nlsocket import NetlinkSocketBase from pyroute2.netlink.rtnl.ifinfmsg import ifinfmsg from pyroute2.remote import RemoteIPRoute from .events import ShutdownException, State from .messages import cmsg_event, cmsg_failed, cmsg_sstart if sys.platform.startswith('linux'): from pyroute2 import netns from pyroute2.netns.manager import NetNSManager from pyroute2.nslink.nslink import NetNS else: NetNS = None NetNSManager = None SOURCE_FAIL_PAUSE = 1 SOURCE_MAX_ERRORS = 3 class SourceProxy(object): def __init__(self, ndb, target): self.ndb = ndb self.events = queue.Queue() self.target = target def api(self, name, *argv, **kwarg): call_id = str(uuid.uuid4().hex) self.ndb._call_registry[call_id] = event = threading.Event() event.clear() ( self.ndb.messenger.emit( { 'type': 'api', 'target': self.target, 'call_id': call_id, 'name': name, 'argv': argv, 'kwarg': kwarg, } ) ) event.wait() response = self.ndb._call_registry.pop(call_id) if 'return' in response: return response['return'] elif 'exception' in response: raise response['exception'] class Source(dict): ''' The RNTL source. The source that is used to init the object must comply to IPRoute API, must support the async_cache. If the source starts additional threads, they must be joined in the source.close() ''' table_alias = 'src' dump_header = None summary_header = None view = None table = 'sources' vmap = { 'local': IPRoute, 'netns': NetNS, 'remote': RemoteIPRoute, 'nsmanager': NetNSManager, } def __init__(self, ndb, **spec): self.th = None self.nl = None self.ndb = ndb self.evq = self.ndb._event_queue # the target id -- just in case self.target = spec['target'] self.kind = spec.pop('kind', 'local') self.max_errors = spec.pop('max_errors', SOURCE_MAX_ERRORS) self.event = spec.pop('event') # RTNL API self.nl_prime = self.get_prime(self.kind) self.nl_kwarg = spec # if self.ndb.messenger is not None: self.ndb.messenger.targets.add(self.target) # self.errors_counter = 0 self.shutdown = threading.Event() self.started = threading.Event() self.lock = threading.RLock() self.shutdown_lock = threading.RLock() self.started.clear() self.log = ndb.log.channel('sources.%s' % self.target) self.state = State(log=self.log, wait_list=['running']) self.state.set('init') self.ndb.task_manager.db_add_nl_source(self.target, self.kind, spec) self.load_sql() @classmethod def _count(cls, view): return view.ndb.task_manager.db_fetchone( "SELECT count(*) FROM %s" % view.table ) @property def must_restart(self): if self.max_errors < 0 or self.errors_counter <= self.max_errors: return True return False @property def bind_arguments(self): return dict( filter( lambda x: x[1] is not None, ( ('async_cache', True), ('clone_socket', True), ('groups', self.nl_kwarg.get('groups')), ), ) ) def set_ready(self): try: if self.event is not None: self.evq.put( (cmsg_event(self.target, self.event),), source=self.target ) else: self.evq.put((cmsg_sstart(self.target),), source=self.target) except ShutdownException: self.state.set('stop') return False return True @classmethod def defaults(cls, spec): ret = dict(spec) defaults = {} if 'hostname' in spec: defaults['kind'] = 'remote' defaults['protocol'] = 'ssh' defaults['target'] = spec['hostname'] if 'netns' in spec: defaults['kind'] = 'netns' defaults['target'] = spec['netns'] ret['netns'] = netns._get_netnspath(spec['netns']) for key in defaults: if key not in ret: ret[key] = defaults[key] return ret def __repr__(self): if isinstance(self.nl_prime, NetlinkSocketBase): name = self.nl_prime.__class__.__name__ elif isinstance(self.nl_prime, type): name = self.nl_prime.__name__ return '[%s] <%s %s>' % (self.state.get(), name, self.nl_kwarg) @classmethod def nla2name(cls, name): return name @classmethod def name2nla(cls, name): return name @classmethod def summary(cls, view): yield ('state', 'name', 'spec') for key in view.keys(): yield (view[key].state.get(), key, '%s' % (view[key].nl_kwarg,)) @classmethod def dump(cls, view): return cls.summary(view) @classmethod def compare_record(self, left, right): # specific compare if isinstance(right, basestring): return right == left['name'] def get_prime(self, name): return self.vmap.get(self.kind, None) or getattr( importlib.import_module('pyroute2'), self.kind ) def api(self, name, *argv, **kwarg): for _ in range(100): # FIXME make a constant with self.lock: try: self.log.debug(f'source api run {name} {argv} {kwarg}') return getattr(self.nl, name)(*argv, **kwarg) except ( NetlinkError, AttributeError, ValueError, KeyError, TypeError, socket.error, struct.error, ): raise except Exception as e: # probably the source is restarting self.errors_counter += 1 self.log.debug(f'source api error: <{e}>') time.sleep(1) raise RuntimeError('api call failed') def fake_zero_if(self): url = 'https://github.com/svinota/pyroute2/issues/737' zero_if = ifinfmsg() zero_if['index'] = 0 zero_if['state'] = 'up' zero_if['flags'] = 1 zero_if['header']['flags'] = 2 zero_if['header']['type'] = 16 zero_if['header']['target'] = self.target zero_if['event'] = 'RTM_NEWLINK' zero_if['attrs'] = [ ('IFLA_IFNAME', url), ('IFLA_ADDRESS', '00:00:00:00:00:00'), ] zero_if.encode() self.evq.put([zero_if], source=self.target) def receiver(self): # # The source thread routine -- get events from the # channel and forward them into the common event queue # # The routine exists on an event with error code == 104 # while self.state.get() != 'stop': if self.shutdown.is_set(): break with self.lock: if self.nl is not None: try: self.nl.close(code=0) except Exception as e: self.log.warning('source restart: %s' % e) try: self.state.set('connecting') if isinstance(self.nl_prime, type): spec = {} spec.update(self.nl_kwarg) if self.kind in ('nsmanager',): spec['libc'] = self.ndb.libc self.nl = self.nl_prime(**spec) else: raise TypeError('source channel not supported') self.state.set('loading') # self.nl.bind(**self.bind_arguments) # # Initial load -- enqueue the data # try: self.ndb.task_manager.db_flush(self.target) if self.kind in ('local', 'netns', 'remote'): self.fake_zero_if() self.evq.put(self.nl.dump(), source=self.target) finally: pass self.errors_counter = 0 except Exception as e: self.errors_counter += 1 self.started.set() self.state.set(f'failed, counter {self.errors_counter}') self.log.error(f'source error: {type(e)} {e}') try: self.evq.put( (cmsg_failed(self.target),), source=self.target ) except ShutdownException: self.state.set('stop') break if self.must_restart: self.log.debug('sleeping before restart') self.state.set('restart') self.shutdown.wait(SOURCE_FAIL_PAUSE) if self.shutdown.is_set(): self.log.debug('source shutdown') self.state.set('stop') break else: return self.set_ready() continue with self.lock: if self.state.get() == 'loading': if not self.set_ready(): break self.started.set() self.shutdown.clear() self.state.set('running') while self.state.get() not in ('stop', 'restart'): try: msg = tuple(self.nl.get()) except Exception as e: self.errors_counter += 1 self.log.error('source error: %s %s' % (type(e), e)) msg = None if self.must_restart: self.state.set('restart') else: self.state.set('stop') break code = 0 if msg and msg[0]['header']['error']: code = msg[0]['header']['error'].code if msg is None or code == errno.ECONNRESET: self.state.set('stop') break try: self.evq.put(msg, source=self.target) except ShutdownException: self.state.set('stop') break # thus we make sure that all the events from # this source are consumed by the main loop # in __dbm__() routine try: self.sync() self.log.debug('flush DB for the target') self.ndb.task_manager.db_flush(self.target) except ShutdownException: self.log.debug('shutdown handled by the main thread') pass self.state.set('stopped') def sync(self): self.log.debug('sync') sync = threading.Event() self.evq.put((cmsg_event(self.target, sync),), source=self.target) sync.wait() def start(self): # # Start source thread with self.lock: self.log.debug('starting the source') if (self.th is not None) and self.th.is_alive(): raise RuntimeError('source is running') self.th = threading.Thread( target=self.receiver, name='NDB event source: %s' % (self.target), ) self.th.start() return self def close(self, code=errno.ECONNRESET, sync=True): with self.shutdown_lock: if self.shutdown.is_set(): self.log.debug('already stopped') return self.log.debug('source shutdown') self.shutdown.set() if self.nl is not None: try: self.nl.close(code=code) except Exception as e: self.log.error('source close: %s' % e) if sync: if self.th is not None: self.th.join() self.th = None else: self.log.debug('receiver thread missing') def restart(self, reason='unknown'): with self.lock: with self.shutdown_lock: self.log.debug('restarting the source, reason <%s>' % (reason)) self.started.clear() try: self.close() if self.th: self.th.join() self.shutdown.clear() self.start() finally: pass self.started.wait() def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.close() def load_sql(self): # spec = self.ndb.task_manager.db_fetchone( ''' SELECT * FROM sources WHERE f_target = %s ''' % self.ndb.schema.plch, (self.target,), ) self['target'], self['kind'] = spec for spec in self.ndb.task_manager.db_fetch( ''' SELECT * FROM sources_options WHERE f_target = %s ''' % self.ndb.schema.plch, (self.target,), ): f_target, f_name, f_type, f_value = spec self[f_name] = int(f_value) if f_type == 'int' else f_value pyroute2-0.7.11/pyroute2/ndb/task_manager.py000066400000000000000000000231201455030217500207330ustar00rootroot00000000000000import inspect import logging import queue import threading import time import traceback from functools import partial from pyroute2 import config from . import schema from .events import ( DBMExitException, InvalidateHandlerException, RescheduleException, ShutdownException, ) from .messages import cmsg, cmsg_event, cmsg_failed, cmsg_sstart log = logging.getLogger(__name__) def Events(*argv): for sequence in argv: if sequence is not None: for item in sequence: yield item class NDBConfig(dict): def __init__(self, task_manager): self.task_manager = task_manager def __getitem__(self, key): return self.task_manager.config_get(key) def __setitem__(self, key, value): return self.task_manager.config_set(key, value) def __delitem__(self, key): return self.task_manager.config_del(key) def keys(self): return self.task_manager.config_keys() def items(self): return self.task_manager.config_items() def values(self): return self.task_manager.config_values() class TaskManager: def __init__(self, ndb): self.ndb = ndb self.log = ndb.log self.event_map = {} self.event_queue = ndb._event_queue self.thread = None self.ctime = self.gctime = time.time() def register_handler(self, event, handler): if event not in self.event_map: self.event_map[event] = [] self.event_map[event].append(handler) def unregister_handler(self, event, handler): self.event_map[event].remove(handler) def default_handler(self, target, event): if isinstance(getattr(event, 'payload', None), Exception): raise event.payload log.debug('unsupported event ignored: %s' % type(event)) def check_sources_started(self, _locals, target, event): _locals['countdown'] -= 1 if _locals['countdown'] == 0: self.ndb._dbm_ready.set() def wrap_method(self, method): # # this wrapper will be published in the DBM thread # def _do_local_generator(target, request): try: for item in method(*request.argv, **request.kwarg): request.response.put(item) request.response.put(StopIteration()) except Exception as e: request.response.put(e) def _do_local_single(target, request): try: (request.response.put(method(*request.argv, **request.kwarg))) except Exception as e: (request.response.put(e)) # # this class will be used to map the requests # class cmsg_req(cmsg): def __init__(self, response, *argv, **kwarg): self['header'] = {'target': None} self.response = response self.argv = argv self.kwarg = kwarg # # this method will proxy the original one # def _do_dispatch_generator(self, *argv, **kwarg): if self.thread == id(threading.current_thread()): # same thread, run method locally for item in method(*argv, **kwarg): yield item else: # another thread, run via message bus response = queue.Queue() request = cmsg_req(response, *argv, **kwarg) self.event_queue.put((request,)) while True: item = response.get() if isinstance(item, StopIteration): return elif isinstance(item, Exception): raise item else: yield item def _do_dispatch_single(self, *argv, **kwarg): if self.thread == id(threading.current_thread()): # same thread, run method locally return method(*argv, **kwarg) else: # another thread, run via message bus response = queue.Queue(maxsize=1) request = cmsg_req(response, *argv, **kwarg) self.event_queue.put((request,)) ret = response.get() if isinstance(ret, Exception): raise ret else: return ret # # return the method spec to be announced # handler = _do_local_single proxy = _do_dispatch_single if inspect.isgeneratorfunction(method): handler = _do_local_generator proxy = _do_dispatch_generator return (cmsg_req, handler, proxy) def register_api(self, api_obj, prefix=''): for name in dir(api_obj): method = getattr(api_obj, name, None) if hasattr(method, 'publish'): if isinstance(method.publish, str): name = method.publish name = f'{prefix}{name}' event, handler, proxy = self.wrap_method(method) setattr(self, name, partial(proxy, self)) self.event_map[event] = [handler] def run(self): _locals = {'countdown': len(self.ndb._nl)} self.thread = id(threading.current_thread()) # init the events map event_map = { cmsg_event: [lambda t, x: x.payload.set()], cmsg_failed: [lambda t, x: (self.ndb.schema.mark(t, 1))], cmsg_sstart: [partial(self.check_sources_started, _locals)], } self.event_map = event_map try: self.ndb.schema = schema.DBSchema( self.ndb.config, self.ndb.sources, self.event_map, self.log.channel('schema'), ) self.register_api(self.ndb.schema, 'db_') self.register_api(self.ndb.schema.config, 'config_') self.ndb.bonfig = NDBConfig(self) except Exception as e: self.ndb._dbm_error = e self.ndb._dbm_ready.set() return for spec in self.ndb._nl: spec['event'] = None self.ndb.sources.add(**spec) for event, handlers in self.ndb.schema.event_map.items(): for handler in handlers: self.register_handler(event, handler) stop = False source = None reschedule = [] while not stop: source, events = self.event_queue.get() events = Events(events, reschedule) reschedule = [] try: for event in events: handlers = event_map.get( event.__class__, [self.default_handler] ) for handler in tuple(handlers): try: target = event['header']['target'] handler(target, event) except RescheduleException: if 'rcounter' not in event['header']: event['header']['rcounter'] = 0 if event['header']['rcounter'] < 3: event['header']['rcounter'] += 1 self.log.debug('reschedule %s' % (event,)) reschedule.append(event) else: self.log.error('drop %s' % (event,)) except InvalidateHandlerException: try: handlers.remove(handler) except Exception: self.log.error( 'could not invalidate ' 'event handler:\n%s' % traceback.format_exc() ) except ShutdownException: stop = True break except DBMExitException: return except Exception: self.log.error( 'could not load event:\n%s\n%s' % (event, traceback.format_exc()) ) if time.time() - self.gctime > config.gc_timeout: self.gctime = time.time() except Exception as e: self.log.error(f'exception <{e}> in source {source}') # restart the target try: self.log.debug(f'requesting source {source} restart') self.ndb.sources[source].state.set('restart') except KeyError: self.log.debug(f'key error for {source}') pass # release all the sources for target in tuple(self.ndb.sources.cache): source = self.ndb.sources.remove(target, sync=False) if source is not None and source.th is not None: self.log.debug(f'closing source {source}') source.close() if self.ndb.schema.config['db_cleanup']: self.log.debug('flush DB for the target %s' % target) self.ndb.schema.flush(target) else: self.log.debug('leave DB for debug') # close the database self.ndb.schema.commit() self.ndb.schema.close() # close the logging for handler in self.log.logger.handlers: handler.close() pyroute2-0.7.11/pyroute2/ndb/transaction.py000066400000000000000000000260611455030217500206330ustar00rootroot00000000000000''' One object ---------- All the changes done using one object are applied in the order defined by the corresponding object class. .. code-block:: python eth0 = ndb.interfaces["eth0"] eth0.add_ip(address="10.0.0.1", prefixlen=24) eth0.set(state="up") eth0.set(mtu=1400) eth0.commit() In the example above first the interface attributes like state, mtu, ifname etc. will be applied, and only then IP addresses, bridge ports and like that, regardless the order they are referenced before the `commit()` call. The order is ok for most of cases. But if not, one can control it by calling `commit()` in the required places, breaking one transaction into several sequential transactions. And since RTNL object methods return the object itself, it is possible to write chains with multiple `commit()`: .. code-block:: python ( ndb.interfaces .create(ifname="test", kind="dummy") .add_ip(address="10.0.0.1", prefixlen=24) .commit() .set(state="up") .commit() ) Here the order is forced by explicit commits. Multiple objects ---------------- An important functionality of NDB are rollbacks. And there is a way to batch changes on multiple objects so one failure will trigger rollback of all the changes on all the objects. .. code-block:: python ctx = ndb.begin() ctx.push( # first set up a bridge ( ndb.interfaces .create(ifname="br0", kind="bridge") .add_port("eth0") .add_port("eth1") .set(state="up") .add_ip("10.0.0.2/24") ), # and only then create a route ( ndb.routes .create( dst="192.168.0.0", dst_len=24, gateway="10.0.0.1" ) ) ) ctx.commit() # if something goes wrong, the whole batch will be reverted Ping a remote host ------------------ The simplest usecase for external checks is to test if a remote IP is still reachable after the changes are applied: .. code-block:: python from pyroute2.ndb.transaction import PingAddress ctx = ndb.begin() ctx.push( ndb.routes.create(dst="10.0.0.0", dst_len=24, gateway="172.16.0.1"), PingAddress("10.0.0.1") ) ctx.commit() # the route will be removed if ping fails Or on the contrary, don't run transaction if a remote IP is reachable: .. code-block:: python from pyroute2.ndb.transaction import Not, PingAddress ctx = ndb.begin() ctx.push( Not(PingAddress("10.0.0.1")), ndb.routes.create(dst="10.0.0.0", dst_len=24, gateway="172.16.0.1") ) try: ctx.commit() except CheckProcessException: pass In this example, the route will be added only if `10.0.0.1` is not reachable. The default ping timeout is set to 1, but it is possible to customize it: .. code-block:: python PingAddress("10.0.0.1", timeout=10) Check an external processes --------------------------- A more generic type of check is CheckProcess: .. code-block:: python from pyroute2.ndb.transaction import CheckProcess with ndb.begin() as ctx: ctx.push(ndb.routes.create( dst="10.0.0.0", dst_len=24, gateway="172.16.0.1" )) ctx.push(CheckProcess('/path/to/script.sh')) # # --> <-- the route will be removed if the script fails `CheckProcess` is `subprocess.Popen` based, is not a shell call, thus no pipes or other shell syntax are allowed. `CheckProcess` also accepts `timeout` argument: .. code-block:: python CheckProcess('/path/to/script.sh', timeout=10).commit() If the subprocess doens't finish within the timeout, it will be terminated with SIGTERM. SIGKILL is not used. Logging and debug ----------------- `CheckProcess` and `PingAddress` accept log as an argument: .. code-block:: python PingAddress("10.0.0.1", log=ndb.log.channel("util")).commit() CheckProcess("/path/to/script.sh", log=ndb.log.channel("util")).commit() The check objects are thread safe and reusable, it is possible to run `commit()` on them multiple times. The subprocess' stdout and stderr will be both logged and saved: .. code-block:: python check = CheckProcess("/path/to/script.sh") while True: check.commit() # periodic check, the loop breaks on failure print(f'stdout: {check.out}') print(f'stderr: {check.err}') print(f'return code: {check.return_code}') time.sleep(10) Check negation -------------- It is possible to negate the check for `CheckProcess` and child classes .. code-block:: python from pyroute2.ndb.transaction import Not, CheckProcess check = Not(CheckProcess('/path/to/script.sh')) check.commit() API --- ''' import logging import shlex import shutil import subprocess import threading global_log = logging.getLogger(__name__) class CheckProcessException(Exception): pass class CheckProcess: ''' Run an external process on `commit()` and raise `CheckProcessException` if the return code is not 0. Objects of this class are thread safe and reusable. ''' def __init__(self, command, log=None, timeout=None): if not isinstance(command, str): raise TypeError('command must be a non empty string') if not len(command) > 0: raise TypeError('command must be a non empty string') self.log = log or global_log self.command = command self.args = shlex.split(command) self.timeout = timeout self.return_code = None self.out = None self.err = None self.lock = threading.Lock() def commit(self): with self.lock: self.args[0] = shutil.which(self.args[0]) if self.args[0] is None: raise FileNotFoundError() process = subprocess.Popen( self.args, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) try: self.log.debug(f'process check {self.args}') self.out, self.err = process.communicate(timeout=self.timeout) self.log.debug(f'process output: {self.out}') self.log.debug(f'process stderr: {self.err}') except subprocess.TimeoutExpired: self.log.debug('process timeout expired') process.terminate() process.stdout.close() process.stderr.close() finally: self.return_code = process.wait() if self.return_code != 0: raise CheckProcessException('CheckProcess failed') def rollback(self): pass def __repr__(self): return f'[{self.command}]' class PingAddress(CheckProcess): def __init__(self, address, log=None, timeout=1): super(PingAddress, self).__init__( f'ping -c 1 -W {timeout} {address}', log=log ) class Not: ''' Negate the `CheckProcess` results. If `CheckProcess.commit()` succeeds, raise CheckProcessException, and vice versa, if `CheckProcess.commit()` fails, return success. ''' def __init__(self, transaction): self.tx = transaction def commit(self): success = True try: self.tx.commit() except Exception: success = False if success: raise CheckProcessException(f'{self.tx} succeeded') def rollback(self): pass class Transaction: ''' `Transaction` class is an independent utility class. Being designed to be used with NDB object transactions, it may be used with any object implementing commit/rollback protocol, see `commit()` method. The class supports the context manager protocol and `Transaction` objects may be used in `with` statements: .. code-block:: python with Transaction() as tx: tx.push(obj0) # enqueue objects tx.push(obj1) # --> <-- run commit() for every object in self.queue # # if any commit() fails, run rollback() for every # executed commit() in the reverse order NDB provides a utility method to create `Transaction` objects: .. code-block:: python with ndb.begin() as tx: tx.push(ndb.interfaces["eth0"].set(state="up")) tx.push(ndb.interfaces["eth1"].set(state="up")) ''' def __init__(self, log=None): self.queue = [] self.event = threading.Event() self.event.clear() self.log = global_log or log self.log.debug('begin transaction') def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): if exc_type is None: self.commit() def push(self, *argv): ''' Push objects to the transaction queue. One may use any number of positional arguments: .. code-block:: python tx.push(obj0) tx.push(obj0, obj1, obj2) tx.push(*[obj0, obj1, obj2]) ''' for obj in argv: self.log.debug('queue %s' % type(obj)) self.queue.append(obj) return self def append(self, obj): ''' Append one object to the queue. ''' self.log.debug('queue %s' % type(obj)) self.push(obj) return self def pop(self, index=-1): ''' Pop an object from the queue. If an index is not specified, pop the last (the rightmost) object. ''' self.log.debug('pop %s' % index) self.queue.pop(index) return self def insert(self, index, obj): ''' Insert an object into the queue. The position index is required. ''' self.log.debug('insert %i %s' % (index, type(obj))) self.queue.insert(index, obj) return self def cancel(self): ''' Cancel the transaction and empty the queue. ''' self.log.debug('cancel transaction') self.queue = [] return self def wait(self, timeout=None): ''' Wait until the transaction to be successfully committed. ''' return self.event.wait(timeout) def done(self): ''' Check if the done event is set. ''' return self.event.is_set() def commit(self): ''' Execute `commit()` for every queued object. If an execution fails, execute `rollback()` for every executed commit. All objects in the queue that follows the failed one will remain intact. Raises the original exception of the failed `commit()`. All the `rollback()` exceptions are ignored. ''' self.log.debug('commit') rollbacks = [] for obj in self.queue: rollbacks.append(obj) try: obj.commit() except Exception: for rb in reversed(rollbacks): try: rb.rollback() except Exception as e: self.log.warning('ignore rollback exception: %s' % e) raise self.event.set() return self pyroute2-0.7.11/pyroute2/ndb/transport.py000066400000000000000000000143161455030217500203420ustar00rootroot00000000000000import pickle import select import socket import struct import time import uuid class IdCache(dict): def invalidate(self): current_time = time.time() collect_time = current_time - 60 for mid, meta in tuple(self.items()): if meta < collect_time: self.pop(mid) def __setitem__(self, key, value): if len(self) > 100: self.invalidate() dict.__setitem__(self, key, value) class Peer(object): def __init__(self, remote_id, local_id, address, port, cache): self.address = address self.port = port self.socket = None self.remote_id = remote_id self.local_id = local_id self.cache = cache self.version = 0 self.last_exception_time = 0 @property def connected(self): return self.socket is not None def __repr__(self): if self.connected: connected = 'not connected' else: connected = 'connected' return '[%s-%s] %s:%s [%s]' % ( self.local_id, self.remote_id, self.address, self.port, connected, ) def hello(self): while True: message_id = str(uuid.uuid4().hex) if message_id not in self.cache: self.cache[message_id] = time.time() break data = pickle.dumps( {'type': 'system', 'id': message_id, 'data': 'HELLO'} ) self.send(data) def send(self, data): length = len(data) data = struct.pack('III', length, self.version, self.local_id) + data if self.socket is None: if time.time() - self.last_exception_time < 5: return self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: self.socket.connect((self.address, self.port)) self.hello() except Exception: self.last_exception_time = time.time() self.socket = None return try: self.socket.send(data) except Exception: try: self.socket.close() except Exception: pass self.socket = None def close(self): self.socket.close() class Transport(object): def __init__(self, address, port): self.peers = [] self.address = address self.port = port self.version = 0 self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1048576) self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.socket.bind((self.address, self.port)) self.socket.listen(16) self.stream_endpoints = [] def add_peer(self, peer): self.peers.append(peer) def send(self, data, exclude=None): exclude = exclude or [] ret = [] for peer in self.peers: if peer.remote_id not in exclude: ret.append(peer.send(data)) return ret def get(self): while True: fds = [self.socket] + self.stream_endpoints [rlist, wlist, xlist] = select.select(fds, [], fds) for fd in xlist: if fd in self.stream_endpoints: ( self.stream_endpoints.pop( self.stream_endpoints.index(fd) ) ) for fd in rlist: if fd == self.socket: new_fd, raddr = self.socket.accept() self.stream_endpoints.append(new_fd) else: data = fd.recv(8) if len(data) == 0: ( self.stream_endpoints.pop( self.stream_endpoints.index(fd) ) ) continue length, version, remote_id = struct.unpack('III', data) if version != self.version: continue data = b'' while len(data) < length: data += fd.recv(length - len(data)) return data, remote_id def close(self): self.socket.close() class Messenger(object): def __init__(self, local_id, transport=None): self.local_id = local_id self.transport = transport or Transport('127.0.0.1', 5680) self.targets = set() self.id_cache = IdCache() def __iter__(self): return self def __next__(self): while True: msg = self.handle() if msg is not None: return msg def handle(self): data, remote_id = self.transport.get() message = pickle.loads(data) if message['id'] in self.id_cache: # discard message return None if message['type'] == 'system': # forward system messages self.transport.send(data, exclude=[remote_id]) return message self.id_cache[message['id']] = time.time() if ( message['type'] == 'transport' and message['target'] in self.targets ): # ignore DB updates with the same target message = None elif ( message['type'] == 'api' and message['target'] not in self.targets ): # ignore API messages with other targets message = None self.transport.send(data, exclude=[remote_id]) return message def emit(self, message): while True: message_id = '%s-%s' % ( message.get('target', '-'), uuid.uuid4().hex, ) if message_id not in self.id_cache: self.id_cache[message_id] = time.time() break message['id'] = message_id return self.transport.send(pickle.dumps(message)) def add_peer(self, remote_id, address, port): peer = Peer(remote_id, self.local_id, address, port, self.id_cache) self.transport.add_peer(peer) pyroute2-0.7.11/pyroute2/ndb/view.py000066400000000000000000000377121455030217500172650ustar00rootroot00000000000000''' Accessing objects ================= NDB objects are grouped into "views": * interfaces * addresses * routes * neighbours * rules * netns * ... Views are dictionary-like objects that accept strings or dict selectors:: # access eth0 ndb.interfaces["eth0"] # access eth0 in the netns test01 ndb.sources.add(netns="test01") ndb.interfaces[{"target": "test01", "ifname": "eth0"}] # access a route to 10.4.0.0/24 ndb.routes["10.4.0.0/24"] # same with a dict selector ndb.routes[{"dst": "10.4.0.0", "dst_len": 24}] Objects cache ============= NDB create objects on demand, it doesn't create thousands of route objects for thousands of routes by default. The object is being created only when accessed for the first time, and stays in the cache as long as it has any not committed changes. To inspect cached objects, use views' `.cache`:: >>> ndb.interfaces.cache.keys() [(('target', u'localhost'), ('tflags', 0), ('index', 1)), # lo (('target', u'localhost'), ('tflags', 0), ('index', 5))] # eth3 There is no asynchronous cache invalidation, the cache is being cleaned up every time when an object is accessed. API === ''' import errno import gc import json import queue import threading import time from collections import OrderedDict from functools import partial from pyroute2 import cli, config from pyroute2.common import basestring ## # NDB stuff from .auth_manager import check_auth from .objects import RSLV_DELETE from .objects.address import Address from .objects.interface import Interface, Vlan from .objects.neighbour import FDBRecord, Neighbour from .objects.netns import NetNS from .objects.route import Route from .objects.rule import Rule from .report import Record, RecordSet from .source import Source, SourceProxy class TmpHandler: def __init__(self, ndb, event, handler): self.ndb = ndb self.event = event self.handler = handler def __enter__(self): self.ndb.task_manager.register_handler( self.ndb.schema.classes[self.event], self.handler ) return self def __exit__(self, exc_type, exc_value, traceback): self.ndb.task_manager.unregister_handler( self.ndb.schema.classes[self.event], self.handler ) class View(dict): ''' The View() object returns RTNL objects on demand:: ifobj1 = ndb.interfaces['eth0'] ifobj2 = ndb.interfaces['eth0'] # ifobj1 != ifobj2 ''' def __init__(self, ndb, table, chain=None, auth_managers=None): self.ndb = ndb self.log = ndb.log.channel('view.%s' % table) self.table = table self.event = table # FIXME self.chain = chain self.cache = {} if auth_managers is None: auth_managers = [] if chain: auth_managers += chain.auth_managers self.auth_managers = auth_managers self.constraints = {} self.classes = OrderedDict() self.classes['interfaces'] = Interface self.classes['addresses'] = Address self.classes['neighbours'] = Neighbour self.classes['af_bridge_fdb'] = FDBRecord self.classes['routes'] = Route self.classes['rules'] = Rule self.classes['netns'] = NetNS self.classes['af_bridge_vlans'] = Vlan def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): pass @property def default_target(self): if self.table == 'netns': return self.ndb.nsmanager else: return self.ndb.localhost @property def context(self): if self.chain is not None: return self.chain.context else: return {} def getmany(self, spec, table=None): return self.ndb.task_manager.db_get(table or self.table, spec) def getone(self, spec, table=None): for obj in self.getmany(spec, table): return obj @cli.change_pointer @check_auth('obj:read') def get(self, spec=None, table=None, **kwarg): spec = spec or kwarg try: return self.__getitem__(spec, table) except KeyError: return None def template(self, key, table=None): if self.chain: context = self.chain.context else: context = {} iclass = self.classes[table or self.table] spec = iclass.new_spec(key, context, self.default_target) return iclass( self, spec, load=False, master=self.chain, auth_managers=self.auth_managers, ) @cli.change_pointer @check_auth('obj:modify') def create(self, *argspec, **kwspec): iclass = self.classes[self.table] if self.chain: context = self.chain.context else: context = {} spec = iclass.new_spec( kwspec or argspec[0], context, self.default_target ) if self.chain: spec['ndb_chain'] = self.chain spec['create'] = True return self[spec] @cli.change_pointer @check_auth('obj:modify') def add(self, *argspec, **kwspec): self.log.warning( '''\n The name add() will be removed in future releases, use create() instead. If you believe that the idea to rename is wrong, please file your opinion to the project's bugtracker. The reason behind the rename is not to confuse interfaces.add() with bridge and bond port operations, that don't create any new interfaces but work on existing ones. ''' ) return self.create(*argspec, **kwspec) @check_auth('obj:read') def wait(self, **spec): ret = None timeout = spec.pop('timeout', -1) action = spec.pop('action', 'add') ctime = time.time() # install a limited events queue -- for a possible immediate reaction evq = queue.Queue(maxsize=100) def handler(evq, target, event): # ignore the "queue full" exception # # if we miss some events here, nothing bad happens: we just # load them from the DB after a timeout, falling back to # the DB polling # # the most important here is not to allocate too much memory try: evq.put_nowait((target, event)) except queue.Full: pass with TmpHandler(self.ndb, self.event, partial(handler, evq)): while True: ret = self.get(spec) if (ret and action == 'add') or ( ret is None and action == 'remove' ): return ret try: target, msg = evq.get(timeout=1) except queue.Empty: pass if timeout > -1: if ctime + timeout < time.time(): raise TimeoutError() @check_auth('obj:read') def locate(self, spec=None, table=None, **kwarg): ''' This method works like `__getitem__()`, but the important difference is that it uses only key fields to locate the object in the DB, ignoring all other keys. It is useful to locate objects that may change attributes during request, like an interface may come up/down, or an address may become primary/secondary, so plain `__getitem__()` will not match while the object still exists. ''' if isinstance(spec, Record): spec = spec._as_dict() spec = spec or kwarg if not spec: raise TypeError('got an empty spec') table = table or self.table iclass = self.classes[table] spec = iclass.new_spec(spec) kspec = self.ndb.schema.compiled[table]['norm_idx'] request = {} for name in kspec: name = iclass.nla2name(name) if name in spec: request[name] = spec[name] if not request: raise KeyError('got an empty key') return self[request] @check_auth('obj:read') def __getitem__(self, key, table=None): ret = self.template(key, table) # rtnl_object.key() returns a dictionary that can not # be used as a cache key. Create here a tuple from it. # The key order guaranteed by the dictionary. cache_key = tuple(ret.key.items()) rtime = time.time() # Iterate all the cache to remove unused and clean # (without any started transaction) objects. for ckey in tuple(self.cache): # Skip the current cache_key to avoid extra # cache del/add records in the logs if ckey == cache_key: continue # 1. Remove only expired items # 2. The number of changed rtnl_object fields must # be 0 which means that no transaction is started # 3. The number of referrers must be > 1, the first # one is the cache itself <- this op is expensive! if ( rtime - self.cache[ckey].atime > config.cache_expire and self.cache[ckey].clean and gc.get_referrers(self.cache[ckey]) ): self.log.debug('cache del %s' % (ckey,)) self.cache.pop(ckey, None) if cache_key in self.cache: self.log.debug('cache hit %s' % (cache_key,)) # Explicitly get rid of the created object del ret # The object from the cache has already # registered callbacks, simply return it ret = self.cache[cache_key] ret.atime = rtime return ret else: # Cache only existing objects if self.exists(key): ret.load_sql() self.log.debug('cache add %s' % (cache_key,)) self.cache[cache_key] = ret ret.register() return ret def exists(self, key, table=None): ''' Check if the specified object exists in the database:: ndb.interfaces.exists('eth0') ndb.interfaces.exists({'ifname': 'eth0', 'target': 'localhost'}) ndb.addresses.exists('127.0.0.1/8') ''' if self.chain: context = self.chain.context else: context = {} iclass = self.classes[self.table] key = iclass.new_spec(key, context, self.default_target) iclass.resolve( view=self, spec=key, fields=iclass.resolve_fields, policy=RSLV_DELETE, ) table = table or self.table schema = self.ndb.schema task_manager = self.ndb.task_manager names = schema.compiled[self.table]['all_names'] self.log.debug('check if the key %s exists in table %s' % (key, table)) keys = [] values = [] for name, value in key.items(): nla_name = iclass.name2nla(name) if nla_name in names: name = nla_name if value is not None and name in names: keys.append('f_%s = %s' % (name, schema.plch)) if isinstance(value, (dict, list, tuple, set)): value = json.dumps(value) values.append(value) spec = task_manager.db_fetchone( 'SELECT * FROM %s WHERE %s' % (self.table, ' AND '.join(keys)), values, ) if spec is not None: self.log.debug('exists') return True else: self.log.debug('not exists') return False def __setitem__(self, key, value): raise NotImplementedError() def __delitem__(self, key): raise NotImplementedError() def __iter__(self): return self.keys() def __contains__(self, key): return key in self.dump() @check_auth('obj:list') def keys(self): for record in self.dump(): yield record @check_auth('obj:list') def values(self): for key in self.keys(): yield self[key] @check_auth('obj:list') def items(self): for key in self.keys(): yield (key, self[key]) @cli.show_result def count(self): return self.classes[self.table]._count(self)[0] def __len__(self): return self.count() def _keys(self, iclass): return ['target', 'tflags'] + self.ndb.schema.compiled[ iclass.view or iclass.table ]['names'] def _native(self, dump): fnames = next(dump) for record in dump: yield Record(fnames, record, self.classes[self.table]) @cli.show_result @check_auth('obj:list') def dump(self): iclass = self.classes[self.table] return RecordSet( self._native(iclass.dump(self)), config={ 'recordset_pipe': self.ndb.config.get( 'recordset_pipe', 'false' ) }, ) @cli.show_result @check_auth('obj:list') def summary(self): iclass = self.classes[self.table] return RecordSet( self._native(iclass.summary(self)), config={ 'recordset_pipe': self.ndb.config.get( 'recordset_pipe', 'false' ) }, ) def __repr__(self): if self.chain and 'ifname' in self.chain: parent = f'{self.chain["ifname"]}/' else: parent = '' return f''' NDB view for {parent}{self.table} Number of objects: {self.count()} to list objects use .summary() or .dump() -> RecordSet (generator) -> Record key: Union[Record, dict, spec] to get objects use ...[key] / .__getitem__(key) -> RTNL_Object ''' class SourcesView(View): def __init__(self, ndb, auth_managers=None): super(SourcesView, self).__init__(ndb, 'sources') self.classes['sources'] = Source self.cache = {} self.proxy = {} self.lock = threading.Lock() if auth_managers is None: auth_managers = [] self.auth_managers = auth_managers def async_add(self, **spec): spec = dict(Source.defaults(spec)) self.cache[spec['target']] = Source(self.ndb, **spec).start() return self.cache[spec['target']] def add(self, **spec): spec = dict(Source.defaults(spec)) target = spec['target'] if target in self: raise KeyError(f'source {target} exists') if 'event' not in spec: sync = True spec['event'] = threading.Event() else: sync = False self.cache[spec['target']] = Source(self.ndb, **spec).start() if sync: self.cache[spec['target']].event.wait() return self.cache[spec['target']] def remove(self, target, code=errno.ECONNRESET, sync=True): if target not in self: raise KeyError(f'source {target} does not exist') with self.lock: if target in self.cache: source = self.cache[target] source.close(code=code, sync=sync) return self.cache.pop(target) @check_auth('obj:list') def keys(self): for key in self.cache: yield key def _keys(self, iclass): return ['target', 'kind'] def wait(self, **spec): raise NotImplementedError() def _summary(self, *argv, **kwarg): return self._dump(*argv, **kwarg) def __getitem__(self, key, table=None): if isinstance(key, basestring): target = key elif isinstance(key, dict) and 'target' in key.keys(): target = key['target'] else: raise KeyError() if target in self.cache: return self.cache[target] elif target in self.proxy: return self.proxy[target] else: proxy = SourceProxy(self.ndb, target) self.proxy[target] = proxy return proxy pyroute2-0.7.11/pyroute2/netlink/000077500000000000000000000000001455030217500166305ustar00rootroot00000000000000pyroute2-0.7.11/pyroute2/netlink/__init__.py000066400000000000000000002161551455030217500207530ustar00rootroot00000000000000''' Netlink ------- basics ====== General netlink packet structure:: nlmsg packet: header data Generic netlink message header:: nlmsg header: uint32 length uint16 type uint16 flags uint32 sequence number uint32 pid The `length` field is the length of all the packet, including data and header. The `type` field is used to distinguish different message types, commands etc. Please note, that there is no explicit protocol field -- you choose a netlink protocol, when you create a socket. The `sequence number` is very important. Netlink is an asynchronous protocol -- it means, that the packet order doesn't matter and is not guaranteed. But responses to a request are always marked with the same sequence number, so you can treat it as a cookie. Please keep in mind, that a netlink request can initiate a cascade of events, and netlink messages from these events can carry sequence number == 0. E.g., it is so when you remove a primary IP addr from an interface, when `promote_secondaries` sysctl is set. Beside of incapsulated headers and other protocol-specific data, netlink messages can carry NLA (netlink attributes). NLA structure is as follows:: NLA header: uint16 length uint16 type NLA data: data-specific struct # optional: NLA NLA ... So, NLA structures can be nested, forming a tree. Complete structure of a netlink packet:: nlmsg header: uint32 length uint16 type uint16 flags uint32 sequence number uint32 pid [ optional protocol-specific data ] [ optional NLA tree ] More information about netlink protocol you can find in the man pages. pyroute2 and netlink ==================== packets ~~~~~~~ To simplify the development, pyroute2 provides an easy way to describe packet structure. As an example, you can take the ifaddrmsg description -- `pyroute2/netlink/rtnl/ifaddrmsg.py`. To describe a packet, you need to inherit from `nlmsg` class:: from pyroute2.netlink import nlmsg class foo_msg(nlmsg): fields = ( ... ) nla_map = ( ... ) NLA are described in the same way, but the parent class should be `nla`, instead of `nlmsg`. And yes, it is important to use the proper parent class -- it affects the header structure. fields attribute ~~~~~~~~~~~~~~~~ The `fields` attribute describes the structure of the protocol-specific data. It is a tuple of tuples, where each member contains a field name and its data format. Field data format should be specified as for Python `struct` module. E.g., ifaddrmsg structure:: struct ifaddrmsg { __u8 ifa_family; __u8 ifa_prefixlen; __u8 ifa_flags; __u8 ifa_scope; __u32 ifa_index; }; should be described as follows:: class ifaddrmsg(nlmsg): fields = (('family', 'B'), ('prefixlen', 'B'), ('flags', 'B'), ('scope', 'B'), ('index', 'I')) Format strings are passed directly to the `struct` module, so you can use all the notations like `>I`, `16s` etc. All fields are parsed from the stream separately, so if you want to explicitly fix alignemt, as if it were C struct, use the `pack` attribute:: class tstats(nla): pack = 'struct' fields = (('version', 'H'), ('ac_exitcode', 'I'), ('ac_flag', 'B'), ...) Explicit padding bytes also can be used, when struct packing doesn't work well:: class ipq_mode_msg(nlmsg): pack = 'struct' fields = (('value', 'B'), ('__pad', '7x'), ('range', 'I'), ('__pad', '12x')) nla_map attribute ~~~~~~~~~~~~~~~~~ The `nla_map` attribute is a tuple of NLA descriptions. Each description is also a tuple in two different forms: either two fields, name and format, or three fields: type, name and format. Please notice, that the format field is a string name of corresponding NLA class:: class ifaddrmsg(nlmsg): ... nla_map = (('IFA_UNSPEC', 'hex'), ('IFA_ADDRESS', 'ipaddr'), ('IFA_LOCAL', 'ipaddr'), ...) This code will create mapping, where IFA_ADDRESS NLA will be of type 1 and IFA_LOCAL -- of type 2, etc. Both NLA will be decoded as IP addresses (class `ipaddr`). IFA_UNSPEC will be of type 0, and if it will be in the NLA tree, it will be just dumped in hex. NLA class names are should be specified as strings, since they are resolved in runtime. There are several pre-defined NLA types, that you will get with `nla` class: - `none` -- ignore this NLA - `flag` -- boolean flag NLA (no payload; NLA exists = True) - `uint8`, `uint16`, `uint32`, `uint64` -- unsigned int - `be8`, `be16`, `be32`, `be64` -- big-endian unsigned int - `ipaddr` -- IP address, IPv4 or IPv6 - `ip4addr` -- only IPv4 address type - `ip6addr` -- only IPv6 address type - `target` -- a univeral target (IPv4, IPv6, MPLS) - `l2addr` -- MAC address - `lladdr` -- link layer address (MAC, IPv4, IPv6) - `hex` -- hex dump as a string -- useful for debugging - `cdata` -- a binary data - `string` -- UTF-8 string - `asciiz` -- zero-terminated ASCII string, no decoding - `array` -- array of simple types (uint8, uint16 etc.) Please refer to `pyroute2/netlink/__init__.py` for details. You can also make your own NLA descriptions:: class ifaddrmsg(nlmsg): ... nla_map = (... ('IFA_CACHEINFO', 'cacheinfo'), ...) class cacheinfo(nla): fields = (('ifa_preferred', 'I'), ('ifa_valid', 'I'), ('cstamp', 'I'), ('tstamp', 'I')) Custom NLA descriptions should be defined in the same class, where they are used. explicit NLA type ids ~~~~~~~~~~~~~~~~~~~~~ Also, it is possible to use not autogenerated type numbers, as for ifaddrmsg, but specify them explicitly:: class iw_event(nla): ... nla_map = ((0x8B00, 'SIOCSIWCOMMIT', 'hex'), (0x8B01, 'SIOCGIWNAME', 'hex'), (0x8B02, 'SIOCSIWNWID', 'hex'), (0x8B03, 'SIOCGIWNWID', 'hex'), ...) Here you can see custom NLA type numbers -- 0x8B00, 0x8B01 etc. It is not permitted to mix these two forms in one class: you should use ether autogenerated type numbers (two fields tuples), or explicit numbers (three fields typles). nla map adapters ~~~~~~~~~~~~~~~~ If the default declarative NLA map is not flexible enough, one can use a custom map adapter. In order to do so, one should define at least one function to return `pyroute2.netlink.NlaSpec()`, and one optional function to tell the parser if the attribute is supported. The simplest definition only to decode packets: .. code-block:: python from pyroute2.netlink import NlaMapAdapter, NlaSpec, nlmsg def my_flexible_nla_spec(key): return NlaSpec(nlmsg_atoms.hex, key, f'NLA_CLASS_{key}') class my_msg(nlmsg): nla_map = NlaMapAdapter(my_flexible_nla_spec) # example result [ { 'attrs': [ ('NLA_CLASS_1', '00:00:00:00'), ('NLA_CLASS_5', '00:00:00:00'), ], 'header': { ... }, }, ] In this example the same routine is used both for decoding and encoding workflows, but the workflows are not equal, thus the example will fail on encoding. Still the example may be useful if you don't plan to encode packets of this type. The decoding workflow will pass an integer as the `key` for NLA type, while the encoding workflow passes a string as the `key` for NLA name. To correctly handle both workflows, you can use either the `key` type discrimination, or the explicit declaration syntax: .. code-block:: python # discriminate workflows by the key type def my_flexible_nla_spec(key): if isinstance(key, int): # decoding workflow ... else: # encoding workflow ... class my_msg(nlmsg): nla_map = NlaMapAdapter(my_flexible_nla_spec) .. code-block:: python # declarate separate workflows def my_flexible_nla_spec_encode(key): # receives a string -- nla type name ... def my_flexible_nla_spec_decode(key): # receives an int -- nla type id ... class my_msg(nlmsg): nla_map = { 'decode': NlaMapAdapter(my_flexible_nla_spec_decode), 'encode': NlaMapAdapter(my_flexible_nla_spec_encode), } array types ~~~~~~~~~~~ There are different array-like NLA types in the kernel, and some of them are covered by pyroute2. An array of simple type elements:: # declaration nla_map = (('NLA_TYPE', 'array(uint8)'), ...) # data layout +======+======+---------------------------- | len | type | uint8 | uint8 | uint 8 | ... +======+======+---------------------------- # decoded {'attrs': [['NLA_TYPE', (2, 3, 4, 5, ...)], ...], ...} An array of NLAs:: # declaration nla_map = (('NLA_TYPE', '*type'), ...) # data layout +=======+=======+-----------------------+-----------------------+-- | len | type* | len | type | payload | len | type | payload | ... +=======+=======+-----------------------+-----------------------+-- # type* -- in that case the type is OR'ed with NLA_F_NESTED # decoded {'attrs': [['NLA_TYPE', [payload, payload, ...]], ...], ...} parsed netlink message ~~~~~~~~~~~~~~~~~~~~~~ Netlink messages are represented by pyroute2 as dictionaries as follows:: {'header': {'pid': ..., 'length: ..., 'flags': ..., 'error': None, # if you are lucky 'type': ..., 'sequence_number': ...}, # fields attributes 'field_name1': value, ... 'field_nameX': value, # nla tree 'attrs': [['NLA_NAME1', value], ... ['NLA_NAMEX', value], ['NLA_NAMEY', {'field_name1': value, ... 'field_nameX': value, 'attrs': [['NLA_NAME.... ]]}]]} As an example, a message from the wireless subsystem about new scan event:: {'index': 4, 'family': 0, '__align': 0, 'header': {'pid': 0, 'length': 64, 'flags': 0, 'error': None, 'type': 16, 'sequence_number': 0}, 'flags': 69699, 'ifi_type': 1, 'event': 'RTM_NEWLINK', 'change': 0, 'attrs': [['IFLA_IFNAME', 'wlp3s0'], ['IFLA_WIRELESS', {'attrs': [['SIOCGIWSCAN', '00:00:00:00:00:00:00:00:00:00:00:00']]}]]} One important detail is that NLA chain is represented as a list of elements `['NLA_TYPE', value]`, not as a dictionary. The reason is that though in the kernel *usually* NLA chain is a dictionary, the netlink protocol by itself doesn't require elements of each type to be unique. In a message there may be several NLA of the same type. encoding and decoding algo ~~~~~~~~~~~~~~~~~~~~~~~~~~ The message encoding works as follows: 1. Reserve space for the message header (if there is) 2. Iterate defined `fields`, encoding values with `struct.pack()` 3. Iterate NLA from the `attrs` field, looking up types in `nla_map` 4. Encode the header Since every NLA is also an `nlmsg` object, there is a recursion. The decoding process is a bit simpler: 1. Decode the header 2. Iterate `fields`, decoding values with `struct.unpack()` 3. Iterate NLA until the message ends If the `fields` attribute is an empty list, the step 2 will be skipped. The step 3 will be skipped in the case of the empty `nla_map`. If both attributes are empty lists, only the header will be encoded/decoded. create and send messages ~~~~~~~~~~~~~~~~~~~~~~~~ Using high-level interfaces like `IPRoute` or `IPDB`, you will never need to manually construct and send netlink messages. But in the case you really need it, it is simple as well. Having a description class, like `ifaddrmsg` from above, you need to: - instantiate it - fill the fields - encode the packet - send the encoded data The code:: from pyroute2.netlink import NLM_F_REQUEST from pyroute2.netlink import NLM_F_ACK from pyroute2.netlink import NLM_F_CREATE from pyroute2.netlink import NLM_F_EXCL from pyroute2.iproute import RTM_NEWADDR from pyroute2.netlink.rtnl.ifaddrmsg import ifaddrmsg ## # add an addr to an interface # # create the message msg = ifaddrmsg() # fill the protocol-specific fields msg['index'] = index # index of the interface msg['family'] = AF_INET # address family msg['prefixlen'] = 24 # the address mask msg['scope'] = scope # see /etc/iproute2/rt_scopes # attach NLA -- it MUST be a list / mutable msg['attrs'] = [['IFA_LOCAL', '192.168.0.1'], ['IFA_ADDRESS', '192.162.0.1']] # fill generic netlink fields msg['header']['sequence_number'] = nonce # an unique seq number msg['header']['pid'] = os.getpid() msg['header']['type'] = RTM_NEWADDR msg['header']['flags'] = NLM_F_REQUEST |\\ NLM_F_ACK |\\ NLM_F_CREATE |\\ NLM_F_EXCL # encode the packet msg.encode() # send the buffer nlsock.sendto(msg.data, (0, 0)) Please notice, that NLA list *MUST* be mutable. ''' import io import logging import struct import sys import threading import traceback import types import weakref from collections import OrderedDict from socket import AF_INET, AF_INET6, AF_UNSPEC, inet_ntop, inet_pton from pyroute2.common import AF_MPLS, basestring, hexdump from pyroute2.netlink.exceptions import ( NetlinkDecodeError, NetlinkError, NetlinkNLADecodeError, ) log = logging.getLogger(__name__) # make pep8 happy _ne = NetlinkError # reexport for compatibility _de = NetlinkDecodeError # class NotInitialized(Exception): pass ## # That's a hack for the code linter, which works under # Python3, see unicode reference in the code below if sys.version[0] == '3': unicode = str NLMSG_MIN_TYPE = 0x10 GENL_NAMSIZ = 16 # length of family name GENL_MIN_ID = NLMSG_MIN_TYPE GENL_MAX_ID = 1023 GENL_ADMIN_PERM = 0x01 GENL_CMD_CAP_DO = 0x02 GENL_CMD_CAP_DUMP = 0x04 GENL_CMD_CAP_HASPOL = 0x08 # # List of reserved static generic netlink identifiers: # GENL_ID_GENERATE = 0 GENL_ID_CTRL = NLMSG_MIN_TYPE # # Controller # CTRL_CMD_UNSPEC = 0x0 CTRL_CMD_NEWFAMILY = 0x1 CTRL_CMD_DELFAMILY = 0x2 CTRL_CMD_GETFAMILY = 0x3 CTRL_CMD_NEWOPS = 0x4 CTRL_CMD_DELOPS = 0x5 CTRL_CMD_GETOPS = 0x6 CTRL_CMD_NEWMCAST_GRP = 0x7 CTRL_CMD_DELMCAST_GRP = 0x8 CTRL_CMD_GETMCAST_GRP = 0x9 # unused CTRL_CMD_GETPOLICY = 0xA CTRL_ATTR_UNSPEC = 0x0 CTRL_ATTR_FAMILY_ID = 0x1 CTRL_ATTR_FAMILY_NAME = 0x2 CTRL_ATTR_VERSION = 0x3 CTRL_ATTR_HDRSIZE = 0x4 CTRL_ATTR_MAXATTR = 0x5 CTRL_ATTR_OPS = 0x6 CTRL_ATTR_MCAST_GROUPS = 0x7 CTRL_ATTR_POLICY = 0x8 CTRL_ATTR_OP_POLICY = 0x9 CTRL_ATTR_OP = 0xA CTRL_ATTR_OP_UNSPEC = 0x0 CTRL_ATTR_OP_ID = 0x1 CTRL_ATTR_OP_FLAGS = 0x2 CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 CTRL_ATTR_MCAST_GRP_NAME = 0x1 CTRL_ATTR_MCAST_GRP_ID = 0x2 NL_ATTR_TYPE_INVALID = 0 NL_ATTR_TYPE_FLAG = 1 NL_ATTR_TYPE_U8 = 2 NL_ATTR_TYPE_U16 = 3 NL_ATTR_TYPE_U32 = 4 NL_ATTR_TYPE_U64 = 5 NL_ATTR_TYPE_S8 = 6 NL_ATTR_TYPE_S16 = 7 NL_ATTR_TYPE_S32 = 8 NL_ATTR_TYPE_S64 = 9 NL_ATTR_TYPE_BINARY = 10 NL_ATTR_TYPE_STRING = 11 NL_ATTR_TYPE_NUL_STRING = 12 NL_ATTR_TYPE_NESTED = 13 NL_ATTR_TYPE_NESTED_ARRAY = 14 NL_ATTR_TYPE_BITFIELD32 = 15 NL_POLICY_TYPE_ATTR_UNSPEC = 0 NL_POLICY_TYPE_ATTR_TYPE = 1 NL_POLICY_TYPE_ATTR_MIN_VALUE_S = 2 NL_POLICY_TYPE_ATTR_MAX_VALUE_S = 3 NL_POLICY_TYPE_ATTR_MIN_VALUE_U = 4 NL_POLICY_TYPE_ATTR_MAX_VALUE_U = 5 NL_POLICY_TYPE_ATTR_MIN_LENGTH = 6 NL_POLICY_TYPE_ATTR_MAX_LENGTH = 7 NL_POLICY_TYPE_ATTR_POLICY_IDX = 8 NL_POLICY_TYPE_ATTR_POLICY_MAXTYPE = 9 NL_POLICY_TYPE_ATTR_BITFIELD32_MASK = 10 NL_POLICY_TYPE_ATTR_PAD = 11 NL_POLICY_TYPE_ATTR_MASK = 12 # Different Netlink families # NETLINK_ROUTE = 0 # Routing/device hook NETLINK_UNUSED = 1 # Unused number NETLINK_USERSOCK = 2 # Reserved for user mode socket protocols NETLINK_FIREWALL = 3 # Firewalling hook NETLINK_SOCK_DIAG = 4 # INET socket monitoring NETLINK_NFLOG = 5 # netfilter/iptables ULOG NETLINK_XFRM = 6 # ipsec NETLINK_SELINUX = 7 # SELinux event notifications NETLINK_ISCSI = 8 # Open-iSCSI NETLINK_AUDIT = 9 # auditing NETLINK_FIB_LOOKUP = 10 NETLINK_CONNECTOR = 11 NETLINK_NETFILTER = 12 # netfilter subsystem NETLINK_IP6_FW = 13 NETLINK_DNRTMSG = 14 # DECnet routing messages NETLINK_KOBJECT_UEVENT = 15 # Kernel messages to userspace NETLINK_GENERIC = 16 # leave room for NETLINK_DM (DM Events) NETLINK_SCSITRANSPORT = 18 # SCSI Transports # NLA flags NLA_F_NESTED = 1 << 15 NLA_F_NET_BYTEORDER = 1 << 14 # Netlink message flags values (nlmsghdr.flags) # NLM_F_REQUEST = 1 # It is request message. NLM_F_MULTI = 2 # Multipart message, terminated by NLMSG_DONE NLM_F_ACK = 4 # Reply with ack, with zero or error code NLM_F_ECHO = 8 # Echo this request NLM_F_DUMP_INTR = 0x10 # Dump was inconsistent due to sequence change NLM_F_DUMP_FILTERED = 0x20 # Dump was filtered as requested # Modifiers to GET request NLM_F_ROOT = 0x100 # specify tree root NLM_F_MATCH = 0x200 # return all matching NLM_F_ATOMIC = 0x400 # atomic GET NLM_F_DUMP = NLM_F_ROOT | NLM_F_MATCH # Modifiers to NEW request NLM_F_REPLACE = 0x100 # Override existing NLM_F_EXCL = 0x200 # Do not touch, if it exists NLM_F_CREATE = 0x400 # Create, if it does not exist NLM_F_APPEND = 0x800 # Add to end of list NLM_F_CAPPED = 0x100 NLM_F_ACK_TLVS = 0x200 NLMSG_NOOP = 0x1 # Nothing NLMSG_ERROR = 0x2 # Error NLMSG_DONE = 0x3 # End of a dump NLMSG_OVERRUN = 0x4 # Data lost NLMSG_CONTROL = 0xE # Custom message type for messaging control NLMSG_TRANSPORT = 0xF # Custom message type for NL as a transport NLMSG_MIN_TYPE = 0x10 # < 0x10: reserved control messages NLMSG_MAX_LEN = 0xFFFF # Max message length mtypes = { 1: 'NLMSG_NOOP', 2: 'NLMSG_ERROR', 3: 'NLMSG_DONE', 4: 'NLMSG_OVERRUN', } IPRCMD_NOOP = 0 IPRCMD_STOP = 1 IPRCMD_ACK = 2 IPRCMD_ERR = 3 IPRCMD_REGISTER = 4 IPRCMD_RELOAD = 5 IPRCMD_ROUTE = 6 IPRCMD_CONNECT = 7 IPRCMD_DISCONNECT = 8 IPRCMD_SERVE = 9 IPRCMD_SHUTDOWN = 10 IPRCMD_SUBSCRIBE = 11 IPRCMD_UNSUBSCRIBE = 12 IPRCMD_PROVIDE = 13 IPRCMD_REMOVE = 14 IPRCMD_DISCOVER = 15 IPRCMD_UNREGISTER = 16 SOL_NETLINK = 270 NETLINK_ADD_MEMBERSHIP = 1 NETLINK_DROP_MEMBERSHIP = 2 NETLINK_PKTINFO = 3 NETLINK_BROADCAST_ERROR = 4 NETLINK_NO_ENOBUFS = 5 NETLINK_RX_RING = 6 NETLINK_TX_RING = 7 NETLINK_LISTEN_ALL_NSID = 8 NETLINK_EXT_ACK = 11 NETLINK_GET_STRICT_CHK = 12 clean_cbs = threading.local() # Cached results for some struct operations. # No cache invalidation required. cache_fmt = {} cache_hdr = {} cache_jit = {} class NlaSpec(dict): def __init__( self, nla_class, nla_type, nla_name, nla_flags=0, nla_array=False, init=None, ): self.update( { 'class': nla_class, 'type': nla_type, 'name': nla_name, 'nla_flags': nla_flags, 'nla_array': nla_array, 'init': init, } ) class NlaMapAdapter: def __init__(self, api_get, api_contains=lambda x: True): self.api_get = api_get self.api_contains = api_contains self.types = None def __contains__(self, key): return self.api_contains(key) def __getitem__(self, key): ret = self.api_get(key) if isinstance(ret['class'], str): ret['class'] = getattr(self.types, ret['class']) return ret class SQLSchema: def __init__(self, cls): ret = [] for field in cls.fields: if field[0][0] != '_': ret.append( ( (field[0],), ' '.join( ('BIGINT', cls.sql_constraints.get(field[0], '')) ), ) ) for nla_tuple in cls.nla_map: if isinstance(nla_tuple[0], basestring): nla_name = nla_tuple[0] nla_type = nla_tuple[1] else: nla_name = nla_tuple[1] nla_type = nla_tuple[2] nla_type = getattr(cls, nla_type, None) sql_type = getattr(nla_type, 'sql_type', None) if sql_type: sql_type = ' '.join( (sql_type, cls.sql_constraints.get(nla_name, '')) ) ret.append(((nla_name,), sql_type)) for fname, ftype in cls.sql_extra_fields: if isinstance(fname, basestring): fname = (fname,) ret.append((fname, ftype)) for dcls, prefix in cls.sql_extend: for fname, ftype in dcls.sql_schema(): ret.append(((prefix,) + fname, ftype)) self.spec = ret self.index = [] self.foreign_keys = [] def unique_index(self, *index): self.index = index return self def constraint(self, name, spec): idx = 0 for field, tspec in self.spec: if field[0] == name: break idx += 1 else: raise KeyError() self.spec[idx] = (field, f'{tspec} {spec}') return self def foreign_key(self, parent, fields, parent_fields): self.foreign_keys.append( { 'fields': fields, 'parent_fields': parent_fields, 'parent': parent, } ) return self def push(self, *spec): f_type = spec[-1] f_name = spec[:-1] self.spec.append((f_name, f_type)) return self def __iter__(self): return iter(self.spec) def as_dict(self): return OrderedDict(self.spec) class nlmsg_base(dict): ''' Netlink base class. You do not need to inherit it directly, unless you're inventing completely new protocol structure. Use nlmsg or nla classes. The class provides several methods, but often one need to customize only `decode()` and `encode()`. ''' fields = () header = () pack = None # pack pragma cell_header = None align = 4 nla_map = {} # NLA mapping sql_constraints = {} sql_extra_fields = () sql_extend = () nla_flags = 0 # NLA flags value_map = {} is_nla = False prefix = None own_parent = False header_type = None # caches __compiled_nla = False __compiled_ft = False __t_nla_map = None __r_nla_map = None # schema __schema = None __slots__ = ( "_buf", "data", "chain", "offset", "length", "parent", "decoded", "_nla_init", "_nla_array", "_nla_flags", "value", "_r_value_map", "__weakref__", ) def msg_align(self, length): return (length + self.align - 1) & ~(self.align - 1) def __init__( self, data=None, offset=0, length=None, parent=None, init=None ): global cache_jit dict.__init__(self) for i in self.fields: self[i[0]] = 0 # FIXME: only for number values self._buf = None self.data = data or bytearray() self.offset = offset self.length = length or 0 self.chain = [self] if parent is not None: # some structures use parents, some not, # so don't create cycles without need self.parent = parent if self.own_parent else weakref.proxy(parent) else: self.parent = None self.decoded = False self._nla_init = init self._nla_array = False self._nla_flags = self.nla_flags self['attrs'] = [] self.value = NotInitialized # work only on non-empty mappings if self.nla_map and not self.__class__.__compiled_nla: self.compile_nla() if self.header: self['header'] = {} @classmethod def sql_schema(cls): return SQLSchema(cls) @property def buf(self): logging.error( 'nlmsg.buf is deprecated:\n%s', ''.join(traceback.format_stack()) ) if self._buf is None: self._buf = io.BytesIO() self._buf.write(self.data[self.offset : self.length or None]) self._buf.seek(0) return self._buf def copy(self): ''' Return a decoded copy of the netlink message. Works correctly only if the message was encoded, or is received from the socket. ''' ret = type(self)(data=self.data, offset=self.offset) ret.decode() return ret def reset(self, buf=None): self.data = bytearray() self.offset = 0 self.decoded = False def register_clean_cb(self, cb): global clean_cbs if self.parent is not None: return self.parent.register_clean_cb(cb) else: # get the msg_seq -- if applicable seq = self.get('header', {}).get('sequence_number', None) if seq is not None and seq not in clean_cbs.__dict__: clean_cbs.__dict__[seq] = [] # attach the callback clean_cbs.__dict__[seq].append(cb) def unregister_clean_cb(self): global clean_cbs seq = self.get('header', {}).get('sequence_number', None) msf = self.get('header', {}).get('flags', 0) if ( (seq is not None) and (not msf & NLM_F_REQUEST) and seq in clean_cbs.__dict__ ): for cb in clean_cbs.__dict__[seq]: try: cb() except: log.error('Cleanup callback fail: %s' % (cb)) log.error(traceback.format_exc()) del clean_cbs.__dict__[seq] def _strip_one(self, name): for i in tuple(self['attrs']): if i[0] == name: self['attrs'].remove(i) return self def strip(self, attrs): ''' Remove an NLA from the attrs chain. The `attrs` parameter can be either string, or iterable. In the latter case, will be stripped NLAs, specified in the provided list. ''' if isinstance(attrs, basestring): self._strip_one(attrs) else: for name in attrs: self._strip_one(name) return self def __ops(self, rvalue, op0, op1): if rvalue is None: return None lvalue = self.getvalue() res = self.__class__() for key, _ in res.fields: del res[key] if 'header' in res: del res['header'] if 'value' in res: del res['value'] for key in lvalue: if key not in ('header', 'attrs', '__align'): if op0 == '__sub__': # operator -, complement if (key not in rvalue) or (lvalue[key] != rvalue[key]): res[key] = lvalue[key] elif op0 == '__and__': # operator &, intersection if (key in rvalue) and (lvalue[key] == rvalue[key]): res[key] = lvalue[key] if 'attrs' in lvalue: res['attrs'] = [] for attr in lvalue['attrs']: if isinstance(attr[1], nlmsg_base): print("recursion") diff = getattr(attr[1], op0)(rvalue.get_attr(attr[0])) if diff is not None: res['attrs'].append([attr[0], diff]) else: print("fail", type(attr[1])) if op0 == '__sub__': # operator -, complement if rvalue.get_attr(attr[0]) != attr[1]: res['attrs'].append(attr) elif op0 == '__and__': # operator &, intersection if rvalue.get_attr(attr[0]) == attr[1]: res['attrs'].append(attr) if 'attrs' in res and not res['attrs']: del res['attrs'] if not res: return None print(res) return res def __bool__(self): return len(self.keys()) > 0 def __sub__(self, rvalue): ''' Subjunction operation. ''' return self.__ops(rvalue, '__sub__', '__ne__') def __and__(self, rvalue): ''' Conjunction operation. ''' return self.__ops(rvalue, '__and__', '__eq__') def __ne__(self, rvalue): return not self.__eq__(rvalue) def __eq__(self, rvalue): ''' Having nla, we are able to use it in operations like:: if nla == 'some value': ... ''' lvalue = self.getvalue() if lvalue is self: if isinstance(rvalue, type(self)): return (self - rvalue) is None if isinstance(rvalue, dict): return dict(self) == rvalue return False return lvalue == rvalue @classmethod def get_size(self): size = 0 for field in self.fields: size += struct.calcsize(field[1]) return size @classmethod def nla2name(self, name): ''' Convert NLA name into human-friendly name Example: IFLA_ADDRESS -> address Requires self.prefix to be set ''' return name[(name.find(self.prefix) + 1) * len(self.prefix) :].lower() @classmethod def name2nla(self, name): ''' Convert human-friendly name into NLA name Example: address -> IFLA_ADDRESS Requires self.prefix to be set ''' name = name.upper() if name.find(self.prefix) == -1: name = "%s%s" % (self.prefix, name) return name def decode(self): ''' Decode the message. The message should have the `buf` attribute initialized. e.g.:: data = sock.recv(16384) msg = ifinfmsg(data) If you want to customize the decoding process, override the method, but don't forget to call parent's `decode()`:: class CustomMessage(nlmsg): def decode(self): nlmsg.decode(self) ... # do some custom data tuning ''' offset = self.offset global cache_hdr global clean_cbs # Decode the header if self.header is not None: ## # ~~ self['header'][name] = struct.unpack_from(...) # # Instead of `struct.unpack()` all the NLA headers, it is # much cheaper to cache decoded values. The resulting dict # will be not much bigger than some hundreds ov values. # # The code might look ugly, but line_profiler shows here # a notable performance gain. # # The chain is: # dict.get(key, None) or dict.set(unpack(key, ...)) or dict[key] # # If there is no such key in the dict, get() returns None, and # Python executes __setitem__(), which always return None, and # then dict[key] is returned. # # If the key exists, the statement after the first `or` is not # executed. if self.is_nla: key = tuple(self.data[offset : offset + 4]) self['header'] = ( cache_hdr.get(key, None) or ( cache_hdr.__setitem__( key, dict( zip( ('length', 'type'), struct.unpack_from( 'HH', self.data, offset ), ) ), ) ) or cache_hdr[key] ) ## offset += 4 self.length = self['header']['length'] else: for name, fmt in self.header: self['header'][name] = struct.unpack_from( fmt, self.data, offset )[0] offset += struct.calcsize(fmt) # update length from header # it can not be less than 4 if 'header' in self: self.length = max(self['header']['length'], 4) # handle the array case if self._nla_array: self.setvalue([]) while offset < self.offset + self.length: cell = type(self)(data=self.data, offset=offset, parent=self) cell._nla_array = False if cell.cell_header is not None: cell.header = cell.cell_header cell.decode() self.value.append(cell) offset += (cell.length + 4 - 1) & ~(4 - 1) else: self.ft_decode(offset) if clean_cbs.__dict__: self.unregister_clean_cb() self.decoded = True def encode(self): ''' Encode the message into the binary buffer:: msg.encode() sock.send(msg.data) If you want to customize the encoding process, override the method:: class CustomMessage(nlmsg): def encode(self): ... # do some custom data tuning nlmsg.encode(self) ''' offset = self.offset diff = 0 # reserve space for the header if self.header is not None: hsize = struct.calcsize(''.join([x[1] for x in self.header])) self.data.extend([0] * hsize) offset += hsize # handle the array case if self._nla_array: header_type = 1 for value in self.getvalue(): cell = type(self)(data=self.data, offset=offset, parent=self) cell._nla_array = False if cell.cell_header is not None: cell.header = cell.cell_header cell.setvalue(value) # overwrite header type after calling setvalue cell['header']['type'] = self.header_type or ( header_type | self._nla_flags ) header_type += 1 cell.encode() offset += (cell.length + 4 - 1) & ~(4 - 1) elif self.getvalue() is not None: offset, diff = self.ft_encode(offset) # write NLA chain if self.nla_map: offset = self.encode_nlas(offset) # calculate the size and write it if 'header' in self and self.header is not None: self.length = self['header']['length'] = ( offset - self.offset - diff ) offset = self.offset for name, fmt in self.header: struct.pack_into( fmt, self.data, offset, self['header'].get(name, 0) ) offset += struct.calcsize(fmt) def setvalue(self, value): if isinstance(value, dict): self.update(value) if 'attrs' in value: self['attrs'] = [] for nla_tuple in value['attrs']: nlv = nlmsg_base() nlv.setvalue(nla_tuple[1]) self['attrs'].append([nla_tuple[0], nlv.getvalue()]) else: try: if value in self.value_map.values(): reverse_map = dict( [(x[1], x[0]) for x in self.value_map.items()] ) value = reverse_map.get(value, value) except TypeError: pass self['value'] = value self.value = value return self def get_encoded(self, attr, default=None): ''' Return the first encoded NLA by name ''' cells = [i[1] for i in self['attrs'] if i[0] == attr] if cells: return cells[0] def get(self, key, default=None): ''' Universal get() for a netlink message. ''' if isinstance(key, str): key = (key,) ret = self.get_nested(*key) return ret if ret is not None else default def get_nested(self, *keys): ''' Return nested NLA or None ''' pointer = self for attr in keys: if isinstance(pointer, nlmsg_base): # descendant nodes: NLA or fields # nla = attr if pointer.prefix: nla = pointer.name2nla(attr) else: nla = attr.upper() # try to descend to NLA value = pointer.get_attr(nla) # try to descend to a field if value is None and attr in pointer: value = pointer[attr] # replace pointer pointer = value elif isinstance(pointer, dict): # descendant nodes: dict values # pointer = pointer.get(attr) else: # stop descending; search failed return return pointer def get_attr(self, attr, default=None): ''' Return the first NLA with that name or None ''' try: attrs = self.get_attrs(attr) except KeyError: return default if attrs: return attrs[0] else: return default def get_attrs(self, attr): ''' Return attrs by name or an empty list ''' return [i[1] for i in self['attrs'] if i[0] == attr] def nla(self, attr=None, default=NotInitialized): ''' ''' if default is NotInitialized: response = nlmsg_base() del response['value'] del response['attrs'] response.value = None chain = self.get('attrs', []) if attr is not None: chain = [i.nla for i in chain if i.name == attr] else: chain = [i.nla for i in chain] if chain: for link in chain: link.chain = chain response = chain[0] return response def __getitem__(self, key): if isinstance(key, int): return self.chain[key] if key == 'value' and key not in self: return NotInitialized return dict.__getitem__(self, key) def __delitem__(self, key): if key == 'value' and key not in self: return return dict.__delitem__(self, key) def __setstate__(self, state): return self.load(state) def __reduce__(self): return (type(self), (), self.dump()) def load(self, dump): ''' Load packet from a dict:: ipr = IPRoute() lo = ipr.link('dump', ifname='lo')[0] msg_type, msg_value = type(lo), lo.dump() ... lo = msg_type() lo.load(msg_value) The same methods -- `dump()`/`load()` -- implement the pickling protocol for the nlmsg class, see `__reduce__()` and `__setstate__()`. ''' if isinstance(dump, dict): for k, v in dump.items(): if k == 'header': self['header'].update(dump['header']) else: self[k] = v else: self.setvalue(dump) return self def dump(self): ''' Dump packet as a dict ''' a = self.getvalue() if isinstance(a, dict): ret = {} for k, v in a.items(): if k == 'header': ret['header'] = dict(a['header']) elif k == 'attrs': ret['attrs'] = attrs = [] for i in a['attrs']: if isinstance(i[1], nlmsg_base): attrs.append([i[0], i[1].dump()]) elif isinstance(i[1], set): attrs.append([i[0], tuple(i[1])]) else: attrs.append([i[0], i[1]]) else: ret[k] = v else: ret = a return ret def getvalue(self): ''' Atomic NLAs return their value in the 'value' field, not as a dictionary. Complex NLAs return whole dictionary. ''' if ( self._nla_array and len(self.value) and hasattr(self.value[0], 'getvalue') ): return [x.getvalue() for x in self.value] if self.value != NotInitialized: # value decoded by custom decoder return self.value if 'value' in self and self['value'] != NotInitialized: # raw value got by generic decoder return self.value_map.get(self['value'], self['value']) return self def compile_nla(self): # Bug-Url: https://github.com/svinota/pyroute2/issues/980 # Bug-Url: https://github.com/svinota/pyroute2/pull/981 if isinstance(self.nla_map, NlaMapAdapter): self.nla_map.types = self self.__class__.__t_nla_map = self.nla_map self.__class__.__r_nla_map = self.nla_map self.__class__.__compiled_nla = True return elif isinstance(self.nla_map, dict): if isinstance(self.nla_map['decode'], NlaMapAdapter): self.nla_map['decode'].types = self if isinstance(self.nla_map['encode'], NlaMapAdapter): self.nla_map['encode'].types = self self.__class__.__t_nla_map = self.nla_map['decode'] self.__class__.__r_nla_map = self.nla_map['encode'] self.__class__.__compiled_nla = True return # clean up NLA mappings t_nla_map = {} r_nla_map = {} # fix nla flags nla_map = [] for item in self.nla_map: if not isinstance(item[-1], int): item = list(item) item.append(0) nla_map.append(item) # detect, whether we have pre-defined keys if not isinstance(nla_map[0][0], int): # create enumeration nla_types = enumerate((i[0] for i in nla_map)) # that's a little bit tricky, but to reduce # the required amount of code in modules, we have # to jump over the head zipped = [ (k[1][0], k[0][0], k[0][1], k[0][2]) for k in zip(nla_map, nla_types) ] else: zipped = nla_map for key, name, nla_class, nla_flags in zipped: # it is an array if nla_class[0] == '*': nla_class = nla_class[1:] nla_array = True else: nla_array = False # are there any init call in the string? lb = nla_class.find('(') rb = nla_class.find(')') if 0 < lb < rb: init = nla_class[lb + 1 : rb] nla_class = nla_class[:lb] else: init = None # lookup NLA class if nla_class == 'recursive': nla_class = type(self) elif nla_class == 'nested': nla_class = type(self) nla_flags |= NLA_F_NESTED else: nla_class = getattr(self, nla_class) # update mappings prime = { 'class': nla_class, 'type': key, 'name': name, 'nla_flags': nla_flags, 'nla_array': nla_array, 'init': init, } t_nla_map[key] = r_nla_map[name] = prime self.__class__.__t_nla_map = t_nla_map self.__class__.__r_nla_map = r_nla_map self.__class__.__compiled_nla = True def valid_nla(self, nla): return nla in self.__class__.__r_nla_map.keys() def encode_nlas(self, offset): ''' Encode the NLA chain. Should not be called manually, since it is called from `encode()` routine. ''' r_nla_map = self.__class__.__r_nla_map for i in range(len(self['attrs'])): cell = self['attrs'][i] if cell[0] in r_nla_map: prime = r_nla_map[cell[0]] msg_class = prime['class'] # is it a class or a function? if isinstance(msg_class, types.FunctionType): # if it is a function -- use it to get the class msg_class = msg_class(self, value=cell[1]) # encode NLA nla_instance = msg_class( data=self.data, offset=offset, parent=self, init=prime['init'], ) nla_instance._nla_flags |= prime['nla_flags'] if isinstance(cell, tuple) and len(cell) > 2: nla_instance._nla_flags |= cell[2] nla_instance._nla_array = prime['nla_array'] nla_instance.setvalue(cell[1]) # overwrite header type after calling setvalue nla_instance['header']['type'] = ( prime['type'] | nla_instance._nla_flags ) try: nla_instance.encode() except: raise else: nla_instance.decoded = True self['attrs'][i] = nla_slot(prime['name'], nla_instance) offset += (nla_instance.length + 4 - 1) & ~(4 - 1) return offset def decode_nlas(self, offset): ''' Decode the NLA chain. Should not be called manually, since it is called from `decode()` routine. ''' t_nla_map = self.__class__.__t_nla_map while offset - self.offset <= self.length - 4: nla_instance = None # pick the length and the type (length, base_msg_type) = struct.unpack_from( 'HH', self.data, offset ) # first two bits of msg_type are flags: msg_type = base_msg_type & ~(NLA_F_NESTED | NLA_F_NET_BYTEORDER) # rewind to the beginning length = min(max(length, 4), (self.length - offset + self.offset)) # we have a mapping for this NLA if msg_type in t_nla_map: prime = t_nla_map[msg_type] # get the class msg_class = t_nla_map[msg_type]['class'] # is it a class or a function? if isinstance(msg_class, types.FunctionType): # if it is a function -- use it to get the class msg_class = msg_class(self, data=self.data, offset=offset) # decode NLA nla_instance = msg_class( data=self.data, offset=offset, parent=self, length=length, init=prime['init'], ) nla_instance._nla_array = prime['nla_array'] nla_instance._nla_flags = base_msg_type & ( NLA_F_NESTED | NLA_F_NET_BYTEORDER ) name = prime['name'] else: name = 'UNKNOWN' nla_instance = nla_base( data=self.data, offset=offset, length=length ) self['attrs'].append(nla_slot(name, nla_instance)) offset += (length + 4 - 1) & ~(4 - 1) ## # 8<--------------------------------------------------------------------- # # NLMSG fields codecs, mixin classes # class nlmsg_decoder_generic(object): def ft_decode(self, offset): global cache_fmt for name, fmt in self.fields: ## # ~~ size = struct.calcsize(efmt) # # The use of the cache gives here a tiny performance # improvement, but it is an improvement anyways # size = ( cache_fmt.get(fmt, None) or cache_fmt.__setitem__(fmt, struct.calcsize(fmt)) or cache_fmt[fmt] ) ## value = struct.unpack_from(fmt, self.data, offset) offset += size if len(value) == 1: self[name] = value[0] else: self[name] = value # read NLA chain if self.nla_map: offset = (offset + 4 - 1) & ~(4 - 1) try: self.decode_nlas(offset) except Exception as e: log.warning(traceback.format_exc()) raise NetlinkNLADecodeError(e) else: del self['attrs'] if self['value'] is NotInitialized: del self['value'] class nlmsg_decoder_string(object): def ft_decode(self, offset): (value,) = struct.unpack_from( '%is' % (self.length - 4), self.data, offset ) if self.zstring == 1: self['value'] = value.strip(b'\0') else: self['value'] = value class nlmsg_decoder_struct(object): def ft_decode(self, offset): names = [] fmt = '' for field in self.fields: names.append(field[0]) fmt += field[1] value = struct.unpack_from(fmt, self.data, offset) values = list(value) for name in names: if name[0] != '_': self[name] = values.pop(0) # read NLA chain if self.nla_map: offset = (offset + 4 - 1) & ~(4 - 1) try: self.decode_nlas(offset) except Exception as e: log.warning(traceback.format_exc()) raise NetlinkNLADecodeError(e) else: del self['attrs'] if self['value'] is NotInitialized: del self['value'] class nlmsg_encoder_generic(object): def ft_encode(self, offset): for name, fmt in self.fields: value = self[name] if fmt == 's': length = len(value or '') + self.zstring efmt = '%is' % (length) else: length = struct.calcsize(fmt) efmt = fmt self.data.extend([0] * length) # in python3 we should force it if sys.version[0] == '3': if isinstance(value, str): value = bytes(value, 'utf-8') elif isinstance(value, float): value = int(value) elif sys.version[0] == '2': if isinstance(value, unicode): value = value.encode('utf-8') try: if fmt[-1] == 'x': struct.pack_into(efmt, self.data, offset) elif type(value) in (list, tuple, set): struct.pack_into(efmt, self.data, offset, *value) else: struct.pack_into(efmt, self.data, offset, value) except struct.error: log.error(''.join(traceback.format_stack())) log.error(traceback.format_exc()) log.error("error pack: %s %s %s" % (efmt, value, type(value))) raise offset += length diff = ((offset + 4 - 1) & ~(4 - 1)) - offset offset += diff self.data.extend([0] * diff) return offset, diff # # 8<--------------------------------------------------------------------- ## class nla_slot(object): __slots__ = ("cell",) def __init__(self, name, value): self.cell = (name, value) def try_to_decode(self): try: cell = self.cell[1] if not cell.decoded: cell.decode() return True except Exception: log.warning("decoding %s" % (self.cell[0])) log.warning(traceback.format_exc()) return False def get_value(self): cell = self.cell[1] if self.try_to_decode(): return cell.getvalue() else: return cell.data[cell.offset : cell.offset + cell.length] def get_flags(self): if self.try_to_decode(): return self.cell[1]._nla_flags return None @property def name(self): return self.cell[0] @property def value(self): return self.get_value() @property def nla(self): self.try_to_decode() return self.cell[1] def __getitem__(self, key): if key == 1: return self.get_value() elif key == 0: return self.cell[0] elif isinstance(key, slice): s = list(self.cell.__getitem__(key)) if self.cell[1] in s: s[s.index(self.cell[1])] = self.get_value() return s else: raise IndexError(key) def __repr__(self): if self.get_flags(): return repr((self.cell[0], self.get_value(), self.get_flags())) return repr((self.cell[0], self.get_value())) ## # 8<--------------------------------------------------------------------- # # NLA base classes # class nla_header(object): __slots__ = () is_nla = True header = (('length', 'H'), ('type', 'H')) class nla_base( nla_header, nlmsg_base, nlmsg_encoder_generic, nlmsg_decoder_generic ): ''' Generic NLA base class. ''' __slots__ = () zstring = 0 class nla_base_string( nla_header, nlmsg_base, nlmsg_encoder_generic, nlmsg_decoder_string ): ''' NLA base class, string decoder. ''' __slots__ = () fields = [('value', 's')] zstring = 0 class nla_base_struct( nla_header, nlmsg_base, nlmsg_encoder_generic, nlmsg_decoder_struct ): ''' NLA base class, packed struct decoder. ''' __slots__ = () # # 8<--------------------------------------------------------------------- ## class nlmsg_atoms(object): ''' A collection of base NLA types ''' __slots__ = () class none(nla_base): ''' 'none' type is used to skip decoding of NLA. You can also use 'hex' type to dump NLA's content. ''' __slots__ = () def decode(self): nla_base.decode(self) self.value = None class flag(nla_base): ''' 'flag' type is used to denote attrs that have no payload ''' __slots__ = () fields = [] def decode(self): nla_base.decode(self) self.value = True class uint8(nla_base): __slots__ = () sql_type = 'INTEGER' fields = [('value', 'B')] class uint16(nla_base): __slots__ = () sql_type = 'INTEGER' fields = [('value', 'H')] class uint32(nla_base): __slots__ = () sql_type = 'BIGINT' fields = [('value', 'I')] class uint64(nla_base): __slots__ = () sql_type = 'BIGINT' fields = [('value', 'Q')] class int8(nla_base): __slots__ = () sql_type = 'INTEGER' fields = [('value', 'b')] class int16(nla_base): __slots__ = () sql_type = 'INTEGER' fields = [('value', 'h')] class int32(nla_base): __slots__ = () sql_type = 'BIGINT' fields = [('value', 'i')] class int64(nla_base): __slots__ = () sql_type = 'BIGINT' fields = [('value', 'q')] class be8(nla_base): __slots__ = () sql_type = 'INTEGER' fields = [('value', '>B')] class be16(nla_base): __slots__ = () sql_type = 'INTEGER' fields = [('value', '>H')] class be32(nla_base): __slots__ = () sql_type = 'BIGINT' fields = [('value', '>I')] class be64(nla_base): __slots__ = () sql_type = 'BIGINT' fields = [('value', '>Q')] class sbe8(nla_base): __slots__ = () sql_type = 'INTEGER' fields = [('value', '>b')] class sbe16(nla_base): __slots__ = () sql_type = 'INTEGER' fields = [('value', '>h')] class sbe32(nla_base): __slots__ = () sql_type = 'BIGINT' fields = [('value', '>i')] class sbe64(nla_base): __slots__ = () sql_type = 'BIGINT' fields = [('value', '>q')] class ipXaddr(nla_base_string): __slots__ = () sql_type = 'TEXT' family = None def encode(self): self['value'] = inet_pton(self.family, self.value) nla_base_string.encode(self) def decode(self): nla_base_string.decode(self) self.value = inet_ntop(self.family, self['value']) class ip4addr(ipXaddr): ''' Explicit IPv4 address type class. ''' __slots__ = () family = AF_INET class ip6addr(ipXaddr): ''' Explicit IPv6 address type class. ''' __slots__ = () family = AF_INET6 class ipaddr(nla_base_string): ''' This class is used to decode IP addresses according to the family. Socket library currently supports only two families, AF_INET and AF_INET6. We do not specify here the string size, it will be calculated in runtime. ''' __slots__ = () sql_type = 'TEXT' def ft_encode(self, offset): # use real provided family, not implicit if self.value.find(':') > -1: family = AF_INET6 else: family = AF_INET self['value'] = inet_pton(family, self.value) return nla_base_string.ft_encode(self, offset) def ft_decode(self, offset): nla_base_string.ft_decode(self, offset) # use real provided family, not implicit if self.length > 8: family = AF_INET6 else: family = AF_INET self.value = inet_ntop(family, self['value']) class target(nla_base_string): ''' A universal target class. The target type depends on the msg family: * AF_INET: IPv4 addr, string: "127.0.0.1" * AF_INET6: IPv6 addr, string: "::1" * AF_MPLS: MPLS labels, 0 .. k: [{"label": 0x20, "ttl": 16}, ...] ''' __slots__ = () sql_type = 'TEXT' family = None own_parent = True def get_family(self): if self.family is not None: return self.family pointer = self while pointer.parent is not None: pointer = pointer.parent return pointer.get('family', AF_UNSPEC) def encode(self): family = self.get_family() if family in (AF_INET, AF_INET6): self['value'] = inet_pton(family, self.value) elif family == AF_MPLS: self['value'] = b'' if isinstance(self.value, (set, list, tuple)): labels = self.value else: if 'label' in self: labels = [ { 'label': self.get('label', 0), 'tc': self.get('tc', 0), 'bos': self.get('bos', 0), 'ttl': self.get('ttl', 0), } ] else: labels = [] for record in labels: label = ( (record.get('label', 0) << 12) | (record.get('tc', 0) << 9) | ((1 if record.get('bos') else 0) << 8) | record.get('ttl', 0) ) self['value'] += struct.pack('>I', label) else: raise TypeError('socket family not supported') nla_base_string.encode(self) def decode(self): nla_base_string.decode(self) family = self.get_family() if family in (AF_INET, AF_INET6): self.value = inet_ntop(family, self['value']) elif family == AF_MPLS: self.value = [] for i in range(len(self['value']) // 4): label = struct.unpack( '>I', self['value'][i * 4 : i * 4 + 4] )[0] record = { 'label': (label & 0xFFFFF000) >> 12, 'tc': (label & 0x00000E00) >> 9, 'bos': (label & 0x00000100) >> 8, 'ttl': label & 0x000000FF, } self.value.append(record) else: raise TypeError('socket family not supported') class mpls_target(target): __slots__ = () family = AF_MPLS class l2addr(nla_base): ''' Decode MAC address. ''' __slots__ = () sql_type = 'TEXT' fields = [('value', '=6s')] def encode(self): self['value'] = struct.pack( 'BBBBBB', *[int(i, 16) for i in self.value.split(':')] ) nla_base.encode(self) def decode(self): nla_base.decode(self) self.value = ':'.join( '%02x' % (i) for i in struct.unpack('BBBBBB', self['value']) ) class lladdr(nla_base_string): ''' Decode link layer address: a MAC, IPv4 or IPv6 address. This type depends on the link layer address length: * 6: MAC addr, string: "52:ff:ff:ff:ff:03" * 4: IPv4 addr, string: "127.0.0.1" * 16: IPv6 addr, string: "::1" * any other length: hex dump ''' __slots__ = () sql_type = 'TEXT' def encode(self): if ':' in self.value: if len(self.value) == 17 and '::' not in self.value: self['value'] = struct.pack( 'BBBBBB', *[int(i, 16) for i in self.value.split(':')] ) else: self['value'] = inet_pton(AF_INET6, self.value) elif '.' in self.value: self['value'] = inet_pton(AF_INET, self.value) else: raise TypeError('Unsupported value {}'.format(self.value)) nla_base_string.encode(self) def decode(self): nla_base_string.decode(self) if len(self['value']) == 6: self.value = ':'.join( '%02x' % (i) for i in struct.unpack('BBBBBB', self['value']) ) elif len(self['value']) == 4: self.value = inet_ntop(AF_INET, self['value']) elif len(self['value']) == 16: self.value = inet_ntop(AF_INET6, self['value']) elif len(self['value']) == 0: self.value = '' else: # unknown / invalid lladdr # extract data for the whole message offset = self.parent.offset length = self.parent.length data = self.parent.data[offset : offset + length] # report logging.warning( 'unknown or invalid lladdr size, please report to: ' 'https://github.com/svinota/pyroute2/issues/717 \n' 'packet data: %s', hexdump(data), ) # continue with hex dump as the value self.value = hexdump(self['value']) class hex(nla_base_string): ''' Represent NLA's content with header as hex string. ''' __slots__ = () def decode(self): nla_base_string.decode(self) self.value = hexdump(self['value']) class array(nla_base_string): ''' Array of simple data type ''' __slots__ = ("_fmt",) own_parent = True @property def fmt(self): # try to get format from parent # work only with elementary types if getattr(self, "_fmt", None) is not None: return self._fmt try: fclass = getattr(self.parent, self._nla_init) self._fmt = fclass.fields[0][1] except Exception: self._fmt = self._nla_init return self._fmt def encode(self): fmt = '%s%i%s' % (self.fmt[:-1], len(self.value), self.fmt[-1:]) self['value'] = struct.pack(fmt, *self.value) nla_base_string.encode(self) def decode(self): nla_base_string.decode(self) data_length = len(self['value']) element_size = struct.calcsize(self.fmt) array_size = data_length // element_size trail = (data_length % element_size) or -data_length data = self['value'][:-trail] fmt = '%s%i%s' % (self.fmt[:-1], array_size, self.fmt[-1:]) self.value = struct.unpack(fmt, data) class cdata(nla_base_string): ''' Binary data ''' __slots__ = () class string(nla_base_string): ''' UTF-8 string. ''' __slots__ = () sql_type = 'TEXT' def encode(self): if isinstance(self['value'], str) and sys.version[0] == '3': self['value'] = bytes(self['value'], 'utf-8') nla_base_string.encode(self) def decode(self): nla_base_string.decode(self) self.value = self['value'] if sys.version_info[0] >= 3: try: self.value = self.value.decode('utf-8') except UnicodeDecodeError: pass # Failed to decode, keep undecoded value class asciiz(string): ''' Zero-terminated string. ''' __slots__ = () zstring = 1 # FIXME: support NLA_FLAG and NLA_MSECS as well. # # aliases to support standard kernel attributes: # binary = cdata # NLA_BINARY nul_string = asciiz # NLA_NUL_STRING ## # 8<--------------------------------------------------------------------- # # NLA base classes # class nla(nla_base, nlmsg_atoms): ''' Main NLA class ''' __slots__ = () def decode(self): nla_base.decode(self) del self['header'] class nla_string(nla_base_string, nlmsg_atoms): ''' NLA + string decoder ''' __slots__ = () def decode(self): nla_base_string.decode(self) del self['header'] class nla_struct(nla_base_struct, nlmsg_atoms): ''' NLA + packed struct decoder ''' __slots__ = () def decode(self): nla_base_struct.decode(self) del self['header'] # # 8<--------------------------------------------------------------------- ## class nlmsg( nlmsg_base, nlmsg_encoder_generic, nlmsg_decoder_generic, nlmsg_atoms ): ''' Main netlink message class ''' __slots__ = () header = ( ('length', 'I'), ('type', 'H'), ('flags', 'H'), ('sequence_number', 'I'), ('pid', 'I'), ) class nlmsgerr(nlmsg): ''' Extended ack error message ''' __slots__ = () fields = (('error', 'i'),) nla_map = ( ('NLMSGERR_ATTR_UNUSED', 'none'), ('NLMSGERR_ATTR_MSG', 'asciiz'), ('NLMSGERR_ATTR_OFFS', 'uint32'), ('NLMSGERR_ATTR_COOKIE', 'uint8'), ) class genlmsg(nlmsg): ''' Generic netlink message ''' __slots__ = () fields = (('cmd', 'B'), ('version', 'B'), ('reserved', 'H')) class ctrlmsg(genlmsg): ''' Netlink control message ''' __slots__ = () # FIXME: to be extended nla_map = ( ('CTRL_ATTR_UNSPEC', 'none'), ('CTRL_ATTR_FAMILY_ID', 'uint16'), ('CTRL_ATTR_FAMILY_NAME', 'asciiz'), ('CTRL_ATTR_VERSION', 'uint32'), ('CTRL_ATTR_HDRSIZE', 'uint32'), ('CTRL_ATTR_MAXATTR', 'uint32'), ('CTRL_ATTR_OPS', '*ops'), ('CTRL_ATTR_MCAST_GROUPS', '*mcast_groups'), ('CTRL_ATTR_POLICY', 'policy_nest'), ('CTRL_ATTR_OP_POLICY', 'command_nest'), ('CTRL_ATTR_OP', 'uint32'), ) class ops(nla): __slots__ = () nla_map = ( ('CTRL_ATTR_OP_UNSPEC', 'none'), ('CTRL_ATTR_OP_ID', 'uint32'), ('CTRL_ATTR_OP_FLAGS', 'uint32'), ) class mcast_groups(nla): __slots__ = () nla_map = ( ('CTRL_ATTR_MCAST_GRP_UNSPEC', 'none'), ('CTRL_ATTR_MCAST_GRP_NAME', 'asciiz'), ('CTRL_ATTR_MCAST_GRP_ID', 'uint32'), ) class policy_nest(nla): __slots__ = () nla_map = { 'decode': NlaMapAdapter( lambda x: NlaSpec('attribute_nest', x, f'POLICY({x})') ), 'encode': NlaMapAdapter( lambda x: NlaSpec('attribute_nest', int(x[7:-1]), x) ), } class attribute_nest(nla): __slots__ = () nla_map = { 'decode': NlaMapAdapter( lambda x: NlaSpec('nl_policy_type_attr', x, f'ATTR({x})') ), 'encode': NlaMapAdapter( lambda x: NlaSpec('nl_policy_type_attr', int(x[5:-1]), x) ), } class nl_policy_type_attr(nla): __slots__ = () nla_map = ( ('NL_POLICY_TYPE_ATTR_UNSPEC', 'none'), ('NL_POLICY_TYPE_ATTR_TYPE', 'uint32'), ('NL_POLICY_TYPE_ATTR_MIN_VALUE_S', 'int64'), ('NL_POLICY_TYPE_ATTR_MAX_VALUE_S', 'int64'), ('NL_POLICY_TYPE_ATTR_MIN_VALUE_U', 'int64'), ('NL_POLICY_TYPE_ATTR_MAX_VALUE_U', 'int64'), ('NL_POLICY_TYPE_ATTR_MIN_LENGTH', 'uint32'), ('NL_POLICY_TYPE_ATTR_MAX_LENGTH', 'uint32'), ('NL_POLICY_TYPE_ATTR_POLICY_IDX', 'uint32'), ('NL_POLICY_TYPE_ATTR_POLICY_MAXTYPE', 'uint32'), ('NL_POLICY_TYPE_ATTR_BITFIELD32_MASK', 'uint32'), ('NL_POLICY_TYPE_ATTR_PAD', 'uint64'), ('NL_POLICY_TYPE_ATTR_MASK', 'uint64'), ) class command_nest(nla): __slots__ = () nla_map = { 'decode': NlaMapAdapter( lambda x: NlaSpec('command_nest_attrs', x, f'OP({x})') ), 'encode': NlaMapAdapter( lambda x: NlaSpec('command_nest_attrs', int(x[3:-1]), x) ), } class command_nest_attrs(nla): __slots__ = () nla_map = ( ('CTRL_ATTR_POLICY_UNSPEC', 'none'), ('CTRL_ATTR_POLICY_DO', 'uint32'), ('CTRL_ATTR_POLICY_DUMP', 'uint32'), ) pyroute2-0.7.11/pyroute2/netlink/buffer.py000066400000000000000000000036331455030217500204600ustar00rootroot00000000000000try: from multiprocessing import shared_memory except ImportError: shared_memory = None class Page: ''' Memory page. ''' def __init__(self, view, offset): self.view = view self.offset = offset self.is_free = True def use(self): self.is_free = False def free(self): self.is_free = True def close(self): self.view.release() class Buffer: ''' Manage the buffer memory to receive raw netlink data. ''' def __init__(self, mode='internal', size=10485760, page_size=32768): self.mode = mode self.size = size self.page_size = page_size if self.mode == 'internal': self.mem = None self.buf = bytearray(self.size) elif self.mode == 'shared': if shared_memory is None: raise ModuleNotFoundError('shared memory buffer not supported') self.mem = shared_memory.SharedMemory(create=True, size=self.size) self.buf = self.mem.buf self.view = memoryview(self.buf) self.directory = {} for index in range(size // page_size): offset = index * page_size self.directory[index] = Page( self.view[offset : offset + self.page_size], offset ) def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.close() def get_free_page(self): for index, page in self.directory.items(): if page.is_free: page.use() return page raise MemoryError('no free memory pages available') def close(self): for page in self.directory.values(): page.close() self.view.release() if self.mode == 'shared': self.mem.close() self.mem.unlink() def __getitem__(self, key): return self.directory[key] pyroute2-0.7.11/pyroute2/netlink/connector/000077500000000000000000000000001455030217500206225ustar00rootroot00000000000000pyroute2-0.7.11/pyroute2/netlink/connector/__init__.py000066400000000000000000000006251455030217500227360ustar00rootroot00000000000000from pyroute2.netlink import NETLINK_CONNECTOR, nlmsg from pyroute2.netlink.nlsocket import NetlinkSocket class cn_msg(nlmsg): fields = ( ('idx', 'I'), ('val', 'I'), ('seq', 'I'), ('ack', 'I'), ('len', 'H'), ('flags', 'H'), ) class ConnectorSocket(NetlinkSocket): def __init__(self, fileno=None): super().__init__(NETLINK_CONNECTOR) pyroute2-0.7.11/pyroute2/netlink/connector/cn_proc.py000066400000000000000000000076361455030217500226330ustar00rootroot00000000000000from pyroute2.common import map_namespace from pyroute2.netlink import NLMSG_DONE from pyroute2.netlink.nlsocket import Marshal from . import ConnectorSocket, cn_msg CN_IDX_PROC = 0x1 PROC_EVENT_NONE = 0x0 PROC_EVENT_FORK = 0x1 PROC_EVENT_EXEC = 0x2 PROC_EVENT_UID = 0x4 PROC_EVENT_GID = 0x40 PROC_EVENT_SID = 0x80 PROC_EVENT_PTRACE = 0x100 PROC_EVENT_COMM = 0x200 PROC_EVENT_COREDUMP = 0x40000000 PROC_EVENT_EXIT = 0x80000000 (PROC_BY_NAMES, PROC_BY_IDS) = map_namespace('PROC_', globals()) CN_IDX_PROC = 0x1 CN_VAL_PROC = 0x1 PROC_CN_MCAST_LISTEN = 0x1 PROC_CN_MCAST_IGNORE = 0x2 class proc_event_base(cn_msg): fields = cn_msg.fields + ( ('what', 'I'), ('cpu', 'I'), ('timestamp_ns', 'Q'), ) def decode(self): super().decode() self['event'] = PROC_BY_IDS.get(self['what'], 'UNDEFINED') class proc_event_fork(proc_event_base): fields = proc_event_base.fields + ( ('parent_pid', 'I'), ('parent_tgid', 'I'), ('child_pid', 'I'), ('child_tgid', 'I'), ) class proc_event_exec(proc_event_base): fields = proc_event_base.fields + ( ('process_pid', 'I'), ('process_tgid', 'I'), ) class proc_event_uid(proc_event_base): fields = proc_event_base.fields + ( ('process_pid', 'I'), ('process_tgid', 'I'), ('ruid', 'I'), ('rgid', 'I'), ) class proc_event_gid(proc_event_base): fields = proc_event_base.fields + ( ('process_pid', 'I'), ('process_tgid', 'I'), ('euid', 'I'), ('egid', 'I'), ) class proc_event_sid(proc_event_base): fields = proc_event_base.fields + ( ('process_pid', 'I'), ('process_tgid', 'I'), ) class proc_event_ptrace(proc_event_base): fields = proc_event_base.fields + ( ('process_pid', 'I'), ('process_tgid', 'I'), ('tracer_pid', 'I'), ('tracer_tgid', 'I'), ) class proc_event_comm(proc_event_base): fields = proc_event_base.fields + ( ('process_pid', 'I'), ('process_tgid', 'I'), ('comm', '16s'), ) def decode(self): super().decode() self['comm'] = self['comm'].decode('utf-8').strip('\x00') class proc_event_coredump(proc_event_base): fields = proc_event_base.fields + ( ('process_pid', 'I'), ('process_tgid', 'I'), ('parent_pid', 'I'), ('parent_tgid', 'I'), ) class proc_event_exit(proc_event_base): fields = proc_event_base.fields + ( ('process_pid', 'I'), ('process_tgid', 'I'), ('exit_code', 'I'), ('exit_signal', 'I'), ('parent_pid', 'I'), ('parent_tgid', 'I'), ) class proc_event_control(cn_msg): fields = cn_msg.fields + (('action', 'I'),) class ProcEventMarshal(Marshal): key_format = 'I' key_offset = 36 error_type = -1 msg_map = { PROC_EVENT_NONE: proc_event_base, PROC_EVENT_FORK: proc_event_fork, PROC_EVENT_EXEC: proc_event_exec, PROC_EVENT_UID: proc_event_uid, PROC_EVENT_GID: proc_event_gid, PROC_EVENT_SID: proc_event_sid, PROC_EVENT_PTRACE: proc_event_ptrace, PROC_EVENT_COMM: proc_event_comm, PROC_EVENT_COREDUMP: proc_event_coredump, PROC_EVENT_EXIT: proc_event_exit, } class ProcEventSocket(ConnectorSocket): def __init__(self, fileno=None): super().__init__(fileno=fileno) self.marshal = ProcEventMarshal() def bind(self): return super().bind(groups=CN_IDX_PROC) def control(self, listen): msg = proc_event_control() msg['action'] = ( PROC_CN_MCAST_LISTEN if listen else PROC_CN_MCAST_IGNORE ) msg['idx'] = CN_IDX_PROC msg['val'] = CN_VAL_PROC msg['len'] = 4 # FIXME payload length calculation msg_type = NLMSG_DONE self.put(msg, msg_type, msg_flags=0, msg_seq=0) return tuple(self.get(msg_seq=-1)) pyroute2-0.7.11/pyroute2/netlink/devlink/000077500000000000000000000000001455030217500202645ustar00rootroot00000000000000pyroute2-0.7.11/pyroute2/netlink/devlink/__init__.py000066400000000000000000000554271455030217500224120ustar00rootroot00000000000000''' devlink module ============== ''' from pyroute2.common import map_namespace from pyroute2.netlink import genlmsg, nla from pyroute2.netlink.generic import GenericNetlinkSocket from pyroute2.netlink.nlsocket import Marshal # devlink commands DEVLINK_CMD_UNSPEC = 0 DEVLINK_CMD_GET = 1 DEVLINK_CMD_SET = 2 DEVLINK_CMD_NEW = 3 DEVLINK_CMD_DEL = 4 DEVLINK_CMD_PORT_GET = 5 DEVLINK_CMD_PORT_SET = 6 DEVLINK_CMD_PORT_NEW = 7 DEVLINK_CMD_PORT_DEL = 8 DEVLINK_CMD_PORT_SPLIT = 9 DEVLINK_CMD_PORT_UNSPLIT = 10 DEVLINK_CMD_SB_GET = 11 DEVLINK_CMD_SB_SET = 12 DEVLINK_CMD_SB_NEW = 13 DEVLINK_CMD_SB_DEL = 14 DEVLINK_CMD_SB_POOL_GET = 15 DEVLINK_CMD_SB_POOL_SET = 16 DEVLINK_CMD_SB_POOL_NEW = 17 DEVLINK_CMD_SB_POOL_DEL = 18 DEVLINK_CMD_SB_PORT_POOL_GET = 19 DEVLINK_CMD_SB_PORT_POOL_SET = 20 DEVLINK_CMD_SB_PORT_POOL_NEW = 21 DEVLINK_CMD_SB_PORT_POOL_DEL = 22 DEVLINK_CMD_SB_TC_POOL_BIND_GET = 23 DEVLINK_CMD_SB_TC_POOL_BIND_SET = 24 DEVLINK_CMD_SB_TC_POOL_BIND_NEW = 25 DEVLINK_CMD_SB_TC_POOL_BIND_DEL = 26 DEVLINK_CMD_SB_OCC_SNAPSHOT = 27 DEVLINK_CMD_SB_OCC_MAX_CLEAR = 28 DEVLINK_CMD_ESWITCH_GET = 29 DEVLINK_CMD_ESWITCH_SET = 30 DEVLINK_CMD_DPIPE_TABLE_GET = 31 DEVLINK_CMD_DPIPE_ENTRIES_GET = 32 DEVLINK_CMD_DPIPE_HEADERS_GET = 33 DEVLINK_CMD_DPIPE_TABLE_COUNTERS_SET = 34 DEVLINK_CMD_RESOURCE_SET = 35 DEVLINK_CMD_RESOURCE_DUMP = 36 DEVLINK_CMD_RELOAD = 37 DEVLINK_CMD_PARAM_GET = 38 DEVLINK_CMD_PARAM_SET = 39 DEVLINK_CMD_PARAM_NEW = 40 DEVLINK_CMD_PARAM_DEL = 41 DEVLINK_CMD_REGION_GET = 42 DEVLINK_CMD_REGION_SET = 43 DEVLINK_CMD_REGION_NEW = 44 DEVLINK_CMD_REGION_DEL = 45 DEVLINK_CMD_REGION_READ = 46 DEVLINK_CMD_PORT_PARAM_GET = 47 DEVLINK_CMD_PORT_PARAM_SET = 48 DEVLINK_CMD_PORT_PARAM_NEW = 49 DEVLINK_CMD_PORT_PARAM_DEL = 50 DEVLINK_CMD_INFO_GET = 51 DEVLINK_CMD_HEALTH_REPORTER_GET = 52 DEVLINK_CMD_HEALTH_REPORTER_SET = 53 DEVLINK_CMD_HEALTH_REPORTER_RECOVER = 54 DEVLINK_CMD_HEALTH_REPORTER_DIAGNOSE = 55 DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET = 56 DEVLINK_CMD_HEALTH_REPORTER_DUMP_CLEAR = 57 DEVLINK_CMD_FLASH_UPDATE = 58 DEVLINK_CMD_FLASH_UPDATE_END = 59 DEVLINK_CMD_FLASH_UPDATE_STATUS = 60 DEVLINK_CMD_TRAP_GET = 61 DEVLINK_CMD_TRAP_SET = 62 DEVLINK_CMD_TRAP_NEW = 63 DEVLINK_CMD_TRAP_DEL = 64 DEVLINK_CMD_TRAP_GROUP_GET = 65 DEVLINK_CMD_TRAP_GROUP_SET = 66 DEVLINK_CMD_TRAP_GROUP_NEW = 67 DEVLINK_CMD_TRAP_GROUP_DEL = 68 DEVLINK_CMD_TRAP_POLICER_GET = 69 DEVLINK_CMD_TRAP_POLICER_SET = 70 DEVLINK_CMD_TRAP_POLICER_NEW = 71 DEVLINK_CMD_TRAP_POLICER_DEL = 72 DEVLINK_CMD_MAX = DEVLINK_CMD_TRAP_POLICER_DEL (DEVLINK_NAMES, DEVLINK_VALUES) = map_namespace('DEVLINK_CMD_', globals()) # port type DEVLINK_PORT_TYPE_NOTSET = 0 DEVLINK_PORT_TYPE_AUTO = 1 DEVLINK_PORT_TYPE_ETH = 2 DEVLINK_PORT_TYPE_IB = 3 # threshold type DEVLINK_SB_POOL_TYPE_INGRESS = 0 DEVLINK_SB_POOL_TYPE_EGRESS = 1 DEVLINK_SB_THRESHOLD_TO_ALPHA_MAX = 20 class devlinkcmd(genlmsg): prefix = 'DEVLINK_ATTR_' nla_map = ( ('DEVLINK_ATTR_UNSPEC', 'none'), ('DEVLINK_ATTR_BUS_NAME', 'asciiz'), ('DEVLINK_ATTR_DEV_NAME', 'asciiz'), ('DEVLINK_ATTR_PORT_INDEX', 'uint32'), ('DEVLINK_ATTR_PORT_TYPE', 'uint16'), ('DEVLINK_ATTR_PORT_DESIRED_TYPE', 'uint16'), ('DEVLINK_ATTR_PORT_NETDEV_IFINDEX', 'uint32'), ('DEVLINK_ATTR_PORT_NETDEV_NAME', 'asciiz'), ('DEVLINK_ATTR_PORT_IBDEV_NAME', 'asciiz'), ('DEVLINK_ATTR_PORT_SPLIT_COUNT', 'uint32'), ('DEVLINK_ATTR_PORT_SPLIT_GROUP', 'uint32'), ('DEVLINK_ATTR_SB_INDEX', 'uint32'), ('DEVLINK_ATTR_SB_SIZE', 'uint32'), ('DEVLINK_ATTR_SB_INGRESS_POOL_COUNT', 'uint16'), ('DEVLINK_ATTR_SB_EGRESS_POOL_COUNT', 'uint16'), ('DEVLINK_ATTR_SB_INGRESS_TC_COUNT', 'uint16'), ('DEVLINK_ATTR_SB_EGRESS_TC_COUNT', 'uint16'), ('DEVLINK_ATTR_SB_POOL_INDEX', 'uint16'), ('DEVLINK_ATTR_SB_POOL_TYPE', 'uint8'), ('DEVLINK_ATTR_SB_POOL_SIZE', 'uint32'), ('DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE', 'uint8'), ('DEVLINK_ATTR_SB_THRESHOLD', 'uint32'), ('DEVLINK_ATTR_SB_TC_INDEX', 'uint16'), ('DEVLINK_ATTR_SB_OCC_CUR', 'uint32'), ('DEVLINK_ATTR_SB_OCC_MAX', 'uint32'), ('DEVLINK_ATTR_ESWITCH_MODE', 'uint16'), ('DEVLINK_ATTR_ESWITCH_INLINE_MODE', 'uint8'), ('DEVLINK_ATTR_DPIPE_TABLES', 'devlink'), ('DEVLINK_ATTR_DPIPE_TABLE', 'devlink'), ('DEVLINK_ATTR_DPIPE_TABLE_NAME', 'asciiz'), ('DEVLINK_ATTR_DPIPE_TABLE_SIZE', 'uint64'), ('DEVLINK_ATTR_DPIPE_TABLE_MATCHES', 'devlink'), ('DEVLINK_ATTR_DPIPE_TABLE_ACTIONS', 'devlink'), ('DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED', 'uint8'), ('DEVLINK_ATTR_DPIPE_ENTRIES', 'devlink'), ('DEVLINK_ATTR_DPIPE_ENTRY', 'devlink'), ('DEVLINK_ATTR_DPIPE_ENTRY_INDEX', 'uint64'), ('DEVLINK_ATTR_DPIPE_ENTRY_MATCH_VALUES', 'devlink'), ('DEVLINK_ATTR_DPIPE_ENTRY_ACTION_VALUES', 'devlink'), ('DEVLINK_ATTR_DPIPE_ENTRY_COUNTER', 'uint64'), ('DEVLINK_ATTR_DPIPE_MATCH', 'devlink'), ('DEVLINK_ATTR_DPIPE_MATCH_VALUE', 'devlink'), ('DEVLINK_ATTR_DPIPE_MATCH_TYPE', 'uint32'), ('DEVLINK_ATTR_DPIPE_ACTION', 'devlink'), ('DEVLINK_ATTR_DPIPE_ACTION_VALUE', 'devlink'), ('DEVLINK_ATTR_DPIPE_ACTION_TYPE', 'uint32'), ('DEVLINK_ATTR_DPIPE_VALUE', 'none'), ('DEVLINK_ATTR_DPIPE_VALUE_MASK', 'none'), ('DEVLINK_ATTR_DPIPE_VALUE_MAPPING', 'uint32'), ('DEVLINK_ATTR_DPIPE_HEADERS', 'devlink'), ('DEVLINK_ATTR_DPIPE_HEADER', 'devlink'), ('DEVLINK_ATTR_DPIPE_HEADER_NAME', 'asciiz'), ('DEVLINK_ATTR_DPIPE_HEADER_ID', 'uint32'), ('DEVLINK_ATTR_DPIPE_HEADER_FIELDS', 'devlink'), ('DEVLINK_ATTR_DPIPE_HEADER_GLOBAL', 'uint8'), ('DEVLINK_ATTR_DPIPE_HEADER_INDEX', 'uint32'), ('DEVLINK_ATTR_DPIPE_FIELD', 'devlink'), ('DEVLINK_ATTR_DPIPE_FIELD_NAME', 'asciiz'), ('DEVLINK_ATTR_DPIPE_FIELD_ID', 'uint32'), ('DEVLINK_ATTR_DPIPE_FIELD_BITWIDTH', 'uint32'), ('DEVLINK_ATTR_DPIPE_FIELD_MAPPING_TYPE', 'uint32'), ('DEVLINK_ATTR_PAD', 'none'), ('DEVLINK_ATTR_ESWITCH_ENCAP_MODE', 'uint8'), ('DEVLINK_ATTR_RESOURCE_LIST', 'devlink'), ('DEVLINK_ATTR_RESOURCE', 'devlink'), ('DEVLINK_ATTR_RESOURCE_NAME', 'asciiz'), ('DEVLINK_ATTR_RESOURCE_ID', 'uint64'), ('DEVLINK_ATTR_RESOURCE_SIZE', 'uint64'), ('DEVLINK_ATTR_RESOURCE_SIZE_NEW', 'uint64'), ('DEVLINK_ATTR_RESOURCE_SIZE_VALID', 'uint8'), ('DEVLINK_ATTR_RESOURCE_SIZE_MIN', 'uint64'), ('DEVLINK_ATTR_RESOURCE_SIZE_MAX', 'uint64'), ('DEVLINK_ATTR_RESOURCE_SIZE_GRAN', 'uint64'), ('DEVLINK_ATTR_RESOURCE_UNIT', 'uint8'), ('DEVLINK_ATTR_RESOURCE_OCC', 'uint64'), ('DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID', 'uint64'), ('DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS', 'uint64'), ('DEVLINK_ATTR_PORT_FLAVOUR', 'uint16'), ('DEVLINK_ATTR_PORT_NUMBER', 'uint32'), ('DEVLINK_ATTR_PORT_SPLIT_SUBPORT_NUMBER', 'uint32'), ('DEVLINK_ATTR_PARAM', 'devlink'), ('DEVLINK_ATTR_PARAM_NAME', 'asciiz'), ('DEVLINK_ATTR_PARAM_GENERIC', 'flag'), ('DEVLINK_ATTR_PARAM_TYPE', 'uint8'), ('DEVLINK_ATTR_PARAM_VALUES_LIST', 'devlink'), ('DEVLINK_ATTR_PARAM_VALUE', 'devlink'), ('DEVLINK_ATTR_PARAM_VALUE_DATA', 'none'), ('DEVLINK_ATTR_PARAM_VALUE_CMODE', 'uint8'), ('DEVLINK_ATTR_REGION_NAME', 'asciiz'), ('DEVLINK_ATTR_REGION_SIZE', 'uint64'), ('DEVLINK_ATTR_REGION_SNAPSHOTS', 'devlink'), ('DEVLINK_ATTR_REGION_SNAPSHOT', 'devlink'), ('DEVLINK_ATTR_REGION_SNAPSHOT_ID', 'uint32'), ('DEVLINK_ATTR_REGION_CHUNKS', 'devlink'), ('DEVLINK_ATTR_REGION_CHUNK', 'devlink'), ('DEVLINK_ATTR_REGION_CHUNK_DATA', 'binary'), ('DEVLINK_ATTR_REGION_CHUNK_ADDR', 'uint64'), ('DEVLINK_ATTR_REGION_CHUNK_LEN', 'uint64'), ('DEVLINK_ATTR_INFO_DRIVER_NAME', 'asciiz'), ('DEVLINK_ATTR_INFO_SERIAL_NUMBER', 'asciiz'), ('DEVLINK_ATTR_INFO_VERSION_FIXED', 'devlink'), ('DEVLINK_ATTR_INFO_VERSION_RUNNING', 'devlink'), ('DEVLINK_ATTR_INFO_VERSION_STORED', 'devlink'), ('DEVLINK_ATTR_INFO_VERSION_NAME', 'asciiz'), ('DEVLINK_ATTR_INFO_VERSION_VALUE', 'asciiz'), ('DEVLINK_ATTR_SB_POOL_CELL_SIZE', 'uint32'), ('DEVLINK_ATTR_FMSG', 'devlink'), ('DEVLINK_ATTR_FMSG_OBJ_NEST_START', 'flag'), ('DEVLINK_ATTR_FMSG_PAIR_NEST_START', 'flag'), ('DEVLINK_ATTR_FMSG_ARR_NEST_START', 'flag'), ('DEVLINK_ATTR_FMSG_NEST_END', 'flag'), ('DEVLINK_ATTR_FMSG_OBJ_NAME', 'asciiz'), ('DEVLINK_ATTR_FMSG_OBJ_VALUE_TYPE', 'uint8'), ('DEVLINK_ATTR_FMSG_OBJ_VALUE_DATA', 'none'), ('DEVLINK_ATTR_HEALTH_REPORTER', 'devlink'), ('DEVLINK_ATTR_HEALTH_REPORTER_NAME', 'asciiz'), ('DEVLINK_ATTR_HEALTH_REPORTER_STATE', 'uint8'), ('DEVLINK_ATTR_HEALTH_REPORTER_ERR_COUNT', 'uint64'), ('DEVLINK_ATTR_HEALTH_REPORTER_RECOVER_COUNT', 'uint64'), ('DEVLINK_ATTR_HEALTH_REPORTER_DUMP_TS', 'uint64'), ('DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD', 'uint64'), ('DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER', 'uint8'), ('DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME', 'asciiz'), ('DEVLINK_ATTR_FLASH_UPDATE_COMPONENT', 'asciiz'), ('DEVLINK_ATTR_FLASH_UPDATE_STATUS_MSG', 'asciiz'), ('DEVLINK_ATTR_FLASH_UPDATE_STATUS_DONE', 'uint64'), ('DEVLINK_ATTR_FLASH_UPDATE_STATUS_TOTAL', 'uint64'), ('DEVLINK_ATTR_PORT_PCI_PF_NUMBER', 'uint16'), ('DEVLINK_ATTR_PORT_PCI_VF_NUMBER', 'uint16'), ('DEVLINK_ATTR_STATS', 'devlink'), ('DEVLINK_ATTR_TRAP_NAME', 'asciiz'), ('DEVLINK_ATTR_TRAP_ACTION', 'uint8'), ('DEVLINK_ATTR_TRAP_TYPE', 'uint8'), ('DEVLINK_ATTR_TRAP_GENERIC', 'flag'), ('DEVLINK_ATTR_TRAP_METADATA', 'devlink'), ('DEVLINK_ATTR_TRAP_GROUP_NAME', 'asciiz'), ('DEVLINK_ATTR_RELOAD_FAILED', 'uint8'), ('DEVLINK_ATTR_HEALTH_REPORTER_DUMP_TS_NS', 'uint64'), ('DEVLINK_ATTR_NETNS_FD', 'uint32'), ('DEVLINK_ATTR_NETNS_PID', 'uint32'), ('DEVLINK_ATTR_NETNS_ID', 'uint32'), ('DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP', 'uint8'), ('DEVLINK_ATTR_TRAP_POLICER_ID', 'uint32'), ('DEVLINK_ATTR_TRAP_POLICER_RATE', 'uint64'), ('DEVLINK_ATTR_TRAP_POLICER_BURST', 'uint64'), ('DEVLINK_ATTR_PORT_FUNCTION', 'devlink'), ('DEVLINK_ATTR_INFO_BOARD_SERIAL_NUMBER', 'asciiz'), ('DEVLINK_ATTR_PORT_LANES', 'uint32'), ('DEVLINK_ATTR_PORT_SPLITTABLE', 'uint8'), ) class devlink(nla): prefix = 'DEVLINK_ATTR_' nla_map = ( ('DEVLINK_ATTR_UNSPEC', 'none'), ('DEVLINK_ATTR_BUS_NAME', 'asciiz'), ('DEVLINK_ATTR_DEV_NAME', 'asciiz'), ('DEVLINK_ATTR_PORT_INDEX', 'uint32'), ('DEVLINK_ATTR_PORT_TYPE', 'uint16'), ('DEVLINK_ATTR_PORT_DESIRED_TYPE', 'uint16'), ('DEVLINK_ATTR_PORT_NETDEV_IFINDEX', 'uint32'), ('DEVLINK_ATTR_PORT_NETDEV_NAME', 'asciiz'), ('DEVLINK_ATTR_PORT_IBDEV_NAME', 'asciiz'), ('DEVLINK_ATTR_PORT_SPLIT_COUNT', 'uint32'), ('DEVLINK_ATTR_PORT_SPLIT_GROUP', 'uint32'), ('DEVLINK_ATTR_SB_INDEX', 'uint32'), ('DEVLINK_ATTR_SB_SIZE', 'uint32'), ('DEVLINK_ATTR_SB_INGRESS_POOL_COUNT', 'uint16'), ('DEVLINK_ATTR_SB_EGRESS_POOL_COUNT', 'uint16'), ('DEVLINK_ATTR_SB_INGRESS_TC_COUNT', 'uint16'), ('DEVLINK_ATTR_SB_EGRESS_TC_COUNT', 'uint16'), ('DEVLINK_ATTR_SB_POOL_INDEX', 'uint16'), ('DEVLINK_ATTR_SB_POOL_TYPE', 'uint8'), ('DEVLINK_ATTR_SB_POOL_SIZE', 'uint32'), ('DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE', 'uint8'), ('DEVLINK_ATTR_SB_THRESHOLD', 'uint32'), ('DEVLINK_ATTR_SB_TC_INDEX', 'uint16'), ('DEVLINK_ATTR_SB_OCC_CUR', 'uint32'), ('DEVLINK_ATTR_SB_OCC_MAX', 'uint32'), ('DEVLINK_ATTR_ESWITCH_MODE', 'uint16'), ('DEVLINK_ATTR_ESWITCH_INLINE_MODE', 'uint8'), ('DEVLINK_ATTR_DPIPE_TABLES', 'recursive'), ('DEVLINK_ATTR_DPIPE_TABLE', 'recursive'), ('DEVLINK_ATTR_DPIPE_TABLE_NAME', 'asciiz'), ('DEVLINK_ATTR_DPIPE_TABLE_SIZE', 'uint64'), ('DEVLINK_ATTR_DPIPE_TABLE_MATCHES', 'recursive'), ('DEVLINK_ATTR_DPIPE_TABLE_ACTIONS', 'recursive'), ('DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED', 'uint8'), ('DEVLINK_ATTR_DPIPE_ENTRIES', 'recursive'), ('DEVLINK_ATTR_DPIPE_ENTRY', 'recursive'), ('DEVLINK_ATTR_DPIPE_ENTRY_INDEX', 'uint64'), ('DEVLINK_ATTR_DPIPE_ENTRY_MATCH_VALUES', 'recursive'), ('DEVLINK_ATTR_DPIPE_ENTRY_ACTION_VALUES', 'recursive'), ('DEVLINK_ATTR_DPIPE_ENTRY_COUNTER', 'uint64'), ('DEVLINK_ATTR_DPIPE_MATCH', 'recursive'), ('DEVLINK_ATTR_DPIPE_MATCH_VALUE', 'recursive'), ('DEVLINK_ATTR_DPIPE_MATCH_TYPE', 'uint32'), ('DEVLINK_ATTR_DPIPE_ACTION', 'recursive'), ('DEVLINK_ATTR_DPIPE_ACTION_VALUE', 'recursive'), ('DEVLINK_ATTR_DPIPE_ACTION_TYPE', 'uint32'), ('DEVLINK_ATTR_DPIPE_VALUE', 'none'), ('DEVLINK_ATTR_DPIPE_VALUE_MASK', 'none'), ('DEVLINK_ATTR_DPIPE_VALUE_MAPPING', 'uint32'), ('DEVLINK_ATTR_DPIPE_HEADERS', 'recursive'), ('DEVLINK_ATTR_DPIPE_HEADER', 'recursive'), ('DEVLINK_ATTR_DPIPE_HEADER_NAME', 'asciiz'), ('DEVLINK_ATTR_DPIPE_HEADER_ID', 'uint32'), ('DEVLINK_ATTR_DPIPE_HEADER_FIELDS', 'recursive'), ('DEVLINK_ATTR_DPIPE_HEADER_GLOBAL', 'uint8'), ('DEVLINK_ATTR_DPIPE_HEADER_INDEX', 'uint32'), ('DEVLINK_ATTR_DPIPE_FIELD', 'recursive'), ('DEVLINK_ATTR_DPIPE_FIELD_NAME', 'asciiz'), ('DEVLINK_ATTR_DPIPE_FIELD_ID', 'uint32'), ('DEVLINK_ATTR_DPIPE_FIELD_BITWIDTH', 'uint32'), ('DEVLINK_ATTR_DPIPE_FIELD_MAPPING_TYPE', 'uint32'), ('DEVLINK_ATTR_PAD', 'none'), ('DEVLINK_ATTR_ESWITCH_ENCAP_MODE', 'uint8'), ('DEVLINK_ATTR_RESOURCE_LIST', 'recursive'), ('DEVLINK_ATTR_RESOURCE', 'recursive'), ('DEVLINK_ATTR_RESOURCE_NAME', 'asciiz'), ('DEVLINK_ATTR_RESOURCE_ID', 'uint64'), ('DEVLINK_ATTR_RESOURCE_SIZE', 'uint64'), ('DEVLINK_ATTR_RESOURCE_SIZE_NEW', 'uint64'), ('DEVLINK_ATTR_RESOURCE_SIZE_VALID', 'uint8'), ('DEVLINK_ATTR_RESOURCE_SIZE_MIN', 'uint64'), ('DEVLINK_ATTR_RESOURCE_SIZE_MAX', 'uint64'), ('DEVLINK_ATTR_RESOURCE_SIZE_GRAN', 'uint64'), ('DEVLINK_ATTR_RESOURCE_UNIT', 'uint8'), ('DEVLINK_ATTR_RESOURCE_OCC', 'uint64'), ('DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID', 'uint64'), ('DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS', 'uint64'), ('DEVLINK_ATTR_PORT_FLAVOUR', 'uint16'), ('DEVLINK_ATTR_PORT_NUMBER', 'uint32'), ('DEVLINK_ATTR_PORT_SPLIT_SUBPORT_NUMBER', 'uint32'), ('DEVLINK_ATTR_PARAM', 'recursive'), ('DEVLINK_ATTR_PARAM_NAME', 'asciiz'), ('DEVLINK_ATTR_PARAM_GENERIC', 'flag'), ('DEVLINK_ATTR_PARAM_TYPE', 'uint8'), ('DEVLINK_ATTR_PARAM_VALUES_LIST', 'recursive'), ('DEVLINK_ATTR_PARAM_VALUE', 'recursive'), ('DEVLINK_ATTR_PARAM_VALUE_DATA', 'none'), ('DEVLINK_ATTR_PARAM_VALUE_CMODE', 'uint8'), ('DEVLINK_ATTR_REGION_NAME', 'asciiz'), ('DEVLINK_ATTR_REGION_SIZE', 'uint64'), ('DEVLINK_ATTR_REGION_SNAPSHOTS', 'recursive'), ('DEVLINK_ATTR_REGION_SNAPSHOT', 'recursive'), ('DEVLINK_ATTR_REGION_SNAPSHOT_ID', 'uint32'), ('DEVLINK_ATTR_REGION_CHUNKS', 'recursive'), ('DEVLINK_ATTR_REGION_CHUNK', 'recursive'), ('DEVLINK_ATTR_REGION_CHUNK_DATA', 'binary'), ('DEVLINK_ATTR_REGION_CHUNK_ADDR', 'uint64'), ('DEVLINK_ATTR_REGION_CHUNK_LEN', 'uint64'), ('DEVLINK_ATTR_INFO_DRIVER_NAME', 'asciiz'), ('DEVLINK_ATTR_INFO_SERIAL_NUMBER', 'asciiz'), ('DEVLINK_ATTR_INFO_VERSION_FIXED', 'recursive'), ('DEVLINK_ATTR_INFO_VERSION_RUNNING', 'recursive'), ('DEVLINK_ATTR_INFO_VERSION_STORED', 'recursive'), ('DEVLINK_ATTR_INFO_VERSION_NAME', 'asciiz'), ('DEVLINK_ATTR_INFO_VERSION_VALUE', 'asciiz'), ('DEVLINK_ATTR_SB_POOL_CELL_SIZE', 'uint32'), ('DEVLINK_ATTR_FMSG', 'recursive'), ('DEVLINK_ATTR_FMSG_OBJ_NEST_START', 'flag'), ('DEVLINK_ATTR_FMSG_PAIR_NEST_START', 'flag'), ('DEVLINK_ATTR_FMSG_ARR_NEST_START', 'flag'), ('DEVLINK_ATTR_FMSG_NEST_END', 'flag'), ('DEVLINK_ATTR_FMSG_OBJ_NAME', 'asciiz'), ('DEVLINK_ATTR_FMSG_OBJ_VALUE_TYPE', 'uint8'), ('DEVLINK_ATTR_FMSG_OBJ_VALUE_DATA', 'none'), ('DEVLINK_ATTR_HEALTH_REPORTER', 'recursive'), ('DEVLINK_ATTR_HEALTH_REPORTER_NAME', 'asciiz'), ('DEVLINK_ATTR_HEALTH_REPORTER_STATE', 'uint8'), ('DEVLINK_ATTR_HEALTH_REPORTER_ERR_COUNT', 'uint64'), ('DEVLINK_ATTR_HEALTH_REPORTER_RECOVER_COUNT', 'uint64'), ('DEVLINK_ATTR_HEALTH_REPORTER_DUMP_TS', 'uint64'), ('DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD', 'uint64'), ('DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER', 'uint8'), ('DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME', 'asciiz'), ('DEVLINK_ATTR_FLASH_UPDATE_COMPONENT', 'asciiz'), ('DEVLINK_ATTR_FLASH_UPDATE_STATUS_MSG', 'asciiz'), ('DEVLINK_ATTR_FLASH_UPDATE_STATUS_DONE', 'uint64'), ('DEVLINK_ATTR_FLASH_UPDATE_STATUS_TOTAL', 'uint64'), ('DEVLINK_ATTR_PORT_PCI_PF_NUMBER', 'uint16'), ('DEVLINK_ATTR_PORT_PCI_VF_NUMBER', 'uint16'), ('DEVLINK_ATTR_STATS', 'recursive'), ('DEVLINK_ATTR_TRAP_NAME', 'asciiz'), ('DEVLINK_ATTR_TRAP_ACTION', 'uint8'), ('DEVLINK_ATTR_TRAP_TYPE', 'uint8'), ('DEVLINK_ATTR_TRAP_GENERIC', 'flag'), ('DEVLINK_ATTR_TRAP_METADATA', 'recursive'), ('DEVLINK_ATTR_TRAP_GROUP_NAME', 'asciiz'), ('DEVLINK_ATTR_RELOAD_FAILED', 'uint8'), ('DEVLINK_ATTR_HEALTH_REPORTER_DUMP_TS_NS', 'uint64'), ('DEVLINK_ATTR_NETNS_FD', 'uint32'), ('DEVLINK_ATTR_NETNS_PID', 'uint32'), ('DEVLINK_ATTR_NETNS_ID', 'uint32'), ('DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP', 'uint8'), ('DEVLINK_ATTR_TRAP_POLICER_ID', 'uint32'), ('DEVLINK_ATTR_TRAP_POLICER_RATE', 'uint64'), ('DEVLINK_ATTR_TRAP_POLICER_BURST', 'uint64'), ('DEVLINK_ATTR_PORT_FUNCTION', 'recursive'), ('DEVLINK_ATTR_INFO_BOARD_SERIAL_NUMBER', 'asciiz'), ('DEVLINK_ATTR_PORT_LANES', 'uint32'), ('DEVLINK_ATTR_PORT_SPLITTABLE', 'uint8'), ) class MarshalDevlink(Marshal): msg_map = { DEVLINK_CMD_UNSPEC: devlinkcmd, DEVLINK_CMD_GET: devlinkcmd, DEVLINK_CMD_SET: devlinkcmd, DEVLINK_CMD_NEW: devlinkcmd, DEVLINK_CMD_DEL: devlinkcmd, DEVLINK_CMD_PORT_GET: devlinkcmd, DEVLINK_CMD_PORT_SET: devlinkcmd, DEVLINK_CMD_PORT_NEW: devlinkcmd, DEVLINK_CMD_PORT_DEL: devlinkcmd, DEVLINK_CMD_PORT_SPLIT: devlinkcmd, DEVLINK_CMD_PORT_UNSPLIT: devlinkcmd, DEVLINK_CMD_SB_GET: devlinkcmd, DEVLINK_CMD_SB_SET: devlinkcmd, DEVLINK_CMD_SB_NEW: devlinkcmd, DEVLINK_CMD_SB_DEL: devlinkcmd, DEVLINK_CMD_SB_POOL_GET: devlinkcmd, DEVLINK_CMD_SB_POOL_SET: devlinkcmd, DEVLINK_CMD_SB_POOL_NEW: devlinkcmd, DEVLINK_CMD_SB_POOL_DEL: devlinkcmd, DEVLINK_CMD_SB_PORT_POOL_GET: devlinkcmd, DEVLINK_CMD_SB_PORT_POOL_SET: devlinkcmd, DEVLINK_CMD_SB_PORT_POOL_NEW: devlinkcmd, DEVLINK_CMD_SB_PORT_POOL_DEL: devlinkcmd, DEVLINK_CMD_SB_TC_POOL_BIND_GET: devlinkcmd, DEVLINK_CMD_SB_TC_POOL_BIND_SET: devlinkcmd, DEVLINK_CMD_SB_TC_POOL_BIND_NEW: devlinkcmd, DEVLINK_CMD_SB_TC_POOL_BIND_DEL: devlinkcmd, DEVLINK_CMD_SB_OCC_SNAPSHOT: devlinkcmd, DEVLINK_CMD_SB_OCC_MAX_CLEAR: devlinkcmd, DEVLINK_CMD_ESWITCH_GET: devlinkcmd, DEVLINK_CMD_ESWITCH_SET: devlinkcmd, DEVLINK_CMD_DPIPE_TABLE_GET: devlinkcmd, DEVLINK_CMD_DPIPE_ENTRIES_GET: devlinkcmd, DEVLINK_CMD_DPIPE_HEADERS_GET: devlinkcmd, DEVLINK_CMD_DPIPE_TABLE_COUNTERS_SET: devlinkcmd, DEVLINK_CMD_RESOURCE_SET: devlinkcmd, DEVLINK_CMD_RESOURCE_DUMP: devlinkcmd, DEVLINK_CMD_RELOAD: devlinkcmd, DEVLINK_CMD_PARAM_GET: devlinkcmd, DEVLINK_CMD_PARAM_SET: devlinkcmd, DEVLINK_CMD_PARAM_NEW: devlinkcmd, DEVLINK_CMD_PARAM_DEL: devlinkcmd, DEVLINK_CMD_REGION_GET: devlinkcmd, DEVLINK_CMD_REGION_SET: devlinkcmd, DEVLINK_CMD_REGION_NEW: devlinkcmd, DEVLINK_CMD_REGION_DEL: devlinkcmd, DEVLINK_CMD_REGION_READ: devlinkcmd, DEVLINK_CMD_PORT_PARAM_GET: devlinkcmd, DEVLINK_CMD_PORT_PARAM_SET: devlinkcmd, DEVLINK_CMD_PORT_PARAM_NEW: devlinkcmd, DEVLINK_CMD_PORT_PARAM_DEL: devlinkcmd, DEVLINK_CMD_INFO_GET: devlinkcmd, DEVLINK_CMD_HEALTH_REPORTER_GET: devlinkcmd, DEVLINK_CMD_HEALTH_REPORTER_SET: devlinkcmd, DEVLINK_CMD_HEALTH_REPORTER_RECOVER: devlinkcmd, DEVLINK_CMD_HEALTH_REPORTER_DIAGNOSE: devlinkcmd, DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET: devlinkcmd, DEVLINK_CMD_HEALTH_REPORTER_DUMP_CLEAR: devlinkcmd, DEVLINK_CMD_FLASH_UPDATE: devlinkcmd, DEVLINK_CMD_FLASH_UPDATE_END: devlinkcmd, DEVLINK_CMD_FLASH_UPDATE_STATUS: devlinkcmd, DEVLINK_CMD_TRAP_GET: devlinkcmd, DEVLINK_CMD_TRAP_SET: devlinkcmd, DEVLINK_CMD_TRAP_NEW: devlinkcmd, DEVLINK_CMD_TRAP_DEL: devlinkcmd, DEVLINK_CMD_TRAP_GROUP_GET: devlinkcmd, DEVLINK_CMD_TRAP_GROUP_SET: devlinkcmd, DEVLINK_CMD_TRAP_GROUP_NEW: devlinkcmd, DEVLINK_CMD_TRAP_GROUP_DEL: devlinkcmd, DEVLINK_CMD_TRAP_POLICER_GET: devlinkcmd, DEVLINK_CMD_TRAP_POLICER_SET: devlinkcmd, DEVLINK_CMD_TRAP_POLICER_NEW: devlinkcmd, DEVLINK_CMD_TRAP_POLICER_DEL: devlinkcmd, } def fix_message(self, msg): try: msg['event'] = DEVLINK_VALUES[msg['cmd']] except Exception: pass class DevlinkSocket(GenericNetlinkSocket): def __init__(self, *args, **kwargs): GenericNetlinkSocket.__init__(self, *args, **kwargs) self.marshal = MarshalDevlink() def bind(self, groups=0, **kwarg): GenericNetlinkSocket.bind( self, 'devlink', devlinkcmd, groups, None, **kwarg ) pyroute2-0.7.11/pyroute2/netlink/diag/000077500000000000000000000000001455030217500175345ustar00rootroot00000000000000pyroute2-0.7.11/pyroute2/netlink/diag/__init__.py000066400000000000000000000244721455030217500216560ustar00rootroot00000000000000from socket import AF_INET, AF_INET6, AF_UNIX, IPPROTO_TCP, inet_ntop from struct import pack from pyroute2.netlink import ( NETLINK_SOCK_DIAG, NLM_F_MATCH, NLM_F_REQUEST, NLM_F_ROOT, nla, nlmsg, ) from pyroute2.netlink.nlsocket import Marshal, NetlinkSocket SOCK_DIAG_BY_FAMILY = 20 SOCK_DESTROY = 21 # states SS_UNKNOWN = 0 SS_ESTABLISHED = 1 SS_SYN_SENT = 2 SS_SYN_RECV = 3 SS_FIN_WAIT1 = 4 SS_FIN_WAIT2 = 5 SS_TIME_WAIT = 6 SS_CLOSE = 7 SS_CLOSE_WAIT = 8 SS_LAST_ACK = 9 SS_LISTEN = 10 SS_CLOSING = 11 SS_MAX = 12 SS_ALL = (1 << SS_MAX) - 1 SS_CONN = SS_ALL & ~( (1 << SS_LISTEN) | (1 << SS_CLOSE) | (1 << SS_TIME_WAIT) | (1 << SS_SYN_RECV) ) # multicast groups ids (for use with {add,drop}_membership) SKNLGRP_NONE = 0 SKNLGRP_INET_TCP_DESTROY = 1 SKNLGRP_INET_UDP_DESTROY = 2 SKNLGRP_INET6_TCP_DESTROY = 3 SKNLGRP_INET6_UDP_DESTROY = 4 class sock_diag_req(nlmsg): fields = (('sdiag_family', 'B'), ('sdiag_protocol', 'B')) UDIAG_SHOW_NAME = 0x01 UDIAG_SHOW_VFS = 0x02 UDIAG_SHOW_PEER = 0x04 UDIAG_SHOW_ICONS = 0x08 UDIAG_SHOW_RQLEN = 0x10 UDIAG_SHOW_MEMINFO = 0x20 class inet_addr_codec(nlmsg): def encode(self): # FIXME: add human-friendly API to specify IP addresses as str # (see also decode()) if self['idiag_src'] == 0: self['idiag_src'] = (0, 0, 0, 0) if self['idiag_dst'] == 0: self['idiag_dst'] = (0, 0, 0, 0) nlmsg.encode(self) def decode(self): nlmsg.decode(self) if self[self.ffname] == AF_INET: self['idiag_dst'] = inet_ntop( AF_INET, pack('>I', self['idiag_dst'][0]) ) self['idiag_src'] = inet_ntop( AF_INET, pack('>I', self['idiag_src'][0]) ) elif self[self.ffname] == AF_INET6: self['idiag_dst'] = inet_ntop( AF_INET6, pack('>IIII', *self['idiag_dst']) ) self['idiag_src'] = inet_ntop( AF_INET6, pack('>IIII', *self['idiag_src']) ) class inet_diag_req(inet_addr_codec): ffname = 'sdiag_family' fields = ( ('sdiag_family', 'B'), ('sdiag_protocol', 'B'), ('idiag_ext', 'B'), ('__pad', 'B'), ('idiag_states', 'I'), ('idiag_sport', '>H'), ('idiag_dport', '>H'), ('idiag_src', '>4I'), ('idiag_dst', '>4I'), ('idiag_if', 'I'), ('idiag_cookie', 'Q'), ) class inet_diag_msg(inet_addr_codec): ffname = 'idiag_family' fields = ( ('idiag_family', 'B'), ('idiag_state', 'B'), ('idiag_timer', 'B'), ('idiag_retrans', 'B'), ('idiag_sport', '>H'), ('idiag_dport', '>H'), ('idiag_src', '>4I'), ('idiag_dst', '>4I'), ('idiag_if', 'I'), ('idiag_cookie', 'Q'), ('idiag_expires', 'I'), ('idiag_rqueue', 'I'), ('idiag_wqueue', 'I'), ('idiag_uid', 'I'), ('idiag_inode', 'I'), ) nla_map = ( ('INET_DIAG_NONE', 'none'), ('INET_DIAG_MEMINFO', 'inet_diag_meminfo'), # FIXME: must be protocol specific? ('INET_DIAG_INFO', 'tcp_info'), ('INET_DIAG_VEGASINFO', 'tcpvegas_info'), ('INET_DIAG_CONG', 'asciiz'), ('INET_DIAG_TOS', 'hex'), ('INET_DIAG_TCLASS', 'hex'), ('INET_DIAG_SKMEMINFO', 'hex'), ('INET_DIAG_SHUTDOWN', 'uint8'), ('INET_DIAG_DCTCPINFO', 'tcp_dctcp_info'), ('INET_DIAG_PROTOCOL', 'hex'), ('INET_DIAG_SKV6ONLY', 'uint8'), ('INET_DIAG_LOCALS', 'hex'), ('INET_DIAG_PEERS', 'hex'), ('INET_DIAG_PAD', 'hex'), ('INET_DIAG_MARK', 'hex'), ('INET_DIAG_BBRINFO', 'tcp_bbr_info'), ('INET_DIAG_CLASS_ID', 'uint32'), ('INET_DIAG_MD5SIG', 'hex'), ('INET_DIAG_ULP_INFO', 'hex'), ('INET_DIAG_SK_BPF_STORAGES', 'hex'), ('INET_DIAG_CGROUP_ID', 'uint64'), ) class inet_diag_meminfo(nla): fields = ( ('idiag_rmem', 'I'), ('idiag_wmem', 'I'), ('idiag_fmem', 'I'), ('idiag_tmem', 'I'), ) class tcpvegas_info(nla): fields = ( ('tcpv_enabled', 'I'), ('tcpv_rttcnt', 'I'), ('tcpv_rtt', 'I'), ('tcpv_minrtt', 'I'), ) class tcp_dctcp_info(nla): fields = ( ('dctcp_enabled', 'H'), ('dctcp_ce_state', 'H'), ('dctcp_alpha', 'I'), ('dctcp_ab_ecn', 'I'), ('dctcp_ab_tot', 'I'), ) class tcp_bbr_info(nla): fields = ( ('bbr_bw_lo', 'I'), ('bbr_bw_hi', 'I'), ('bbr_min_rtt', 'I'), ('bbr_pacing_gain', 'I'), ('bbr_cwnd_gain', 'I'), ) class tcp_info(nla): fields = ( ('tcpi_state', 'B'), ('tcpi_ca_state', 'B'), ('tcpi_retransmits', 'B'), ('tcpi_probes', 'B'), ('tcpi_backoff', 'B'), ('tcpi_options', 'B'), ('tcpi_snd_wscale', 'B'), # tcpi_rcv_wscale -- in decode() ('tcpi_delivery_rate_app_limited', 'B'), ('tcpi_rto', 'I'), ('tcpi_ato', 'I'), ('tcpi_snd_mss', 'I'), ('tcpi_rcv_mss', 'I'), ('tcpi_unacked', 'I'), ('tcpi_sacked', 'I'), ('tcpi_lost', 'I'), ('tcpi_retrans', 'I'), ('tcpi_fackets', 'I'), # Times ('tcpi_last_data_sent', 'I'), ('tcpi_last_ack_sent', 'I'), ('tcpi_last_data_recv', 'I'), ('tcpi_last_ack_recv', 'I'), # Metrics ('tcpi_pmtu', 'I'), ('tcpi_rcv_ssthresh', 'I'), ('tcpi_rtt', 'I'), ('tcpi_rttvar', 'I'), ('tcpi_snd_ssthresh', 'I'), ('tcpi_snd_cwnd', 'I'), ('tcpi_advmss', 'I'), ('tcpi_reordering', 'I'), ('tcpi_rcv_rtt', 'I'), ('tcpi_rcv_space', 'I'), ('tcpi_total_retrans', 'I'), ('tcpi_pacing_rate', 'Q'), ('tcpi_max_pacing_rate', 'Q'), ('tcpi_bytes_acked', 'Q'), ('tcpi_bytes_received', 'Q'), ('tcpi_segs_out', 'I'), ('tcpi_segs_in', 'I'), ('tcpi_notsent_bytes', 'I'), ('tcpi_min_rtt', 'I'), ('tcpi_data_segs_in', 'I'), ('tcpi_data_segs_out', 'I'), ('tcpi_delivery_rate', 'Q'), ('tcpi_busy_time', 'Q'), ('tcpi_rwnd_limited', 'Q'), ('tcpi_sndbuf_limited', 'Q'), ('tcpi_delivered', 'I'), ('tcpi_delivered_ce', 'I'), ('tcpi_bytes_sent', 'Q'), ('tcpi_bytes_retrans', 'Q'), ('tcpi_dsack_dups', 'I'), ('tcpi_reord_seen', 'I'), ('tcpi_rcv_ooopack', 'I'), ('tcpi_snd_wnd', 'I'), ) def decode(self): # Fix tcpi_rcv_scale amd delivery_rate bit fields. # In the C: # # __u8 tcpi_snd_wscale : 4, tcpi_rcv_wscale : 4; # __u8 tcpi_delivery_rate_app_limited:1; # nla.decode(self) self['tcpi_rcv_wscale'] = self['tcpi_snd_wscale'] & 0xF self['tcpi_snd_wscale'] = self['tcpi_snd_wscale'] & 0xF0 >> 4 self['tcpi_delivery_rate_app_limited'] = ( self['tcpi_delivery_rate_app_limited'] & 0x80 >> 7 ) class unix_diag_req(nlmsg): fields = ( ('sdiag_family', 'B'), ('sdiag_protocol', 'B'), ('__pad', 'H'), ('udiag_states', 'I'), ('udiag_ino', 'I'), ('udiag_show', 'I'), ('udiag_cookie', 'Q'), ) class unix_diag_msg(nlmsg): fields = ( ('udiag_family', 'B'), ('udiag_type', 'B'), ('udiag_state', 'B'), ('__pad', 'B'), ('udiag_ino', 'I'), ('udiag_cookie', 'Q'), ) nla_map = ( ('UNIX_DIAG_NAME', 'asciiz'), ('UNIX_DIAG_VFS', 'unix_diag_vfs'), ('UNIX_DIAG_PEER', 'uint32'), ('UNIX_DIAG_ICONS', 'hex'), ('UNIX_DIAG_RQLEN', 'unix_diag_rqlen'), ('UNIX_DIAG_MEMINFO', 'hex'), ('UNIX_DIAG_SHUTDOWN', 'uint8'), ) class unix_diag_vfs(nla): fields = (('udiag_vfs_ino', 'I'), ('udiag_vfs_dev', 'I')) class unix_diag_rqlen(nla): fields = (('udiag_rqueue', 'I'), ('udiag_wqueue', 'I')) class MarshalDiag(Marshal): key_format = 'B' # The family goes after the nlmsg header, # IHHII = 4 + 2 + 2 + 4 + 4 = 16 bytes key_offset = 16 # Please notice that the SOCK_DIAG Marshal # uses not the nlmsg type, but sdiag_family # to choose the proper class msg_map = { AF_UNIX: unix_diag_msg, AF_INET: inet_diag_msg, AF_INET6: inet_diag_msg, } # error type NLMSG_ERROR == 2 == AF_INET, # it doesn't work for DiagSocket that way, # so disable the error messages for now error_type = -1 class DiagSocket(NetlinkSocket): ''' Usage:: from pyroute2 import DiagSocket with DiagSocket() as ds: ds.bind() sstats = ds.get_sock_stats() ''' def __init__(self, fileno=None): super(DiagSocket, self).__init__(NETLINK_SOCK_DIAG) self.marshal = MarshalDiag() def get_sock_stats( self, family=AF_UNIX, states=SS_ALL, protocol=IPPROTO_TCP, extensions=0, show=( UDIAG_SHOW_NAME | UDIAG_SHOW_VFS | UDIAG_SHOW_PEER | UDIAG_SHOW_ICONS ), ): ''' Get sockets statistics. ACHTUNG: the get_sock_stats() signature will be changed before the next release, this one is a WIP-code! ''' if family == AF_UNIX: req = unix_diag_req() req['udiag_states'] = states req['udiag_show'] = show elif family in (AF_INET, AF_INET6): req = inet_diag_req() req['idiag_states'] = states req['sdiag_protocol'] = protocol req['idiag_ext'] = extensions else: raise NotImplementedError() req['sdiag_family'] = family return tuple( self.nlm_request( req, SOCK_DIAG_BY_FAMILY, NLM_F_REQUEST | NLM_F_ROOT | NLM_F_MATCH, ) ) pyroute2-0.7.11/pyroute2/netlink/diag/ss2.py000077500000000000000000000447351455030217500206350ustar00rootroot00000000000000#!/usr/bin/env python # pyroute2 - ss2 # Copyright (C) 2018 Matthias Tafelmeier # # ss2 is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; If not, see . import argparse import json import os import re import socket from socket import AF_INET, AF_UNIX try: import psutil except ImportError: psutil = None from pyroute2.netlink.diag import ( SS_ALL, SS_CLOSE, SS_CLOSE_WAIT, SS_CLOSING, SS_CONN, SS_ESTABLISHED, SS_FIN_WAIT1, SS_FIN_WAIT2, SS_LAST_ACK, SS_LISTEN, SS_SYN_RECV, SS_SYN_SENT, SS_TIME_WAIT, UDIAG_SHOW_NAME, UDIAG_SHOW_PEER, UDIAG_SHOW_VFS, DiagSocket, ) try: from collections.abc import Callable, Mapping except ImportError: from collections import Callable, Mapping # UDIAG_SHOW_ICONS, # UDIAG_SHOW_RQLEN, # UDIAG_SHOW_MEMINFO RUN_AS_MODULE = False class UserCtxtMap(Mapping): _sk_inode_re = re.compile(r"socket:\[(?P\d+)\]") _proc_sk_fd_cast = "/proc/%d/fd/%d" _BUILD_RECURS_PATH = ["inode", "usr", "pid", "fd"] def _parse_inode(self, sconn): sk_path = self._proc_sk_fd_cast % (sconn.pid, sconn.fd) inode = None sk_inode_raw = os.readlink(sk_path) inode = self._sk_inode_re.search(sk_inode_raw).group("ino") if not inode: raise RuntimeError("Unexpected kernel sk inode outline") return inode def __recurs_enter( self, _sk_inode=None, _sk_fd=None, _usr=None, _pid=None, _ctxt=None, _recurs_path=[], ): step = _recurs_path.pop(0) if self._BUILD_RECURS_PATH[0] == step: if _sk_inode not in self._data.keys(): self._data[_sk_inode] = {} elif self._BUILD_RECURS_PATH[1] == step: if _usr not in self._data[_sk_inode].keys(): self._data[_sk_inode][_usr] = {} elif self._BUILD_RECURS_PATH[2] == step: if _pid not in self._data[_sk_inode][_usr].keys(): self._data[_sk_inode][_usr].__setitem__(_pid, _ctxt) elif self._BUILD_RECURS_PATH[3] == step: self._data[_sk_inode][_usr][_pid]["fds"].append(_sk_fd) # end recursion return else: raise RuntimeError("Unexpected step in recursion") # descend self.__recurs_enter( _sk_inode=_sk_inode, _sk_fd=_sk_fd, _usr=_usr, _pid=_pid, _ctxt=_ctxt, _recurs_path=_recurs_path, ) def _enter_item(self, usr, flow, ctxt): if not flow.pid: # corner case of eg anonnymous AddressFamily.AF_UNIX # sockets return sk_inode = int(self._parse_inode(flow)) sk_fd = flow.fd recurs_path = list(self._BUILD_RECURS_PATH) self.__recurs_enter( _sk_inode=sk_inode, _sk_fd=sk_fd, _usr=usr, _pid=flow.pid, _ctxt=ctxt, _recurs_path=recurs_path, ) def _build(self): for flow in psutil.net_connections(kind="all"): try: proc = psutil.Process(flow.pid) usr = proc.username() ctxt = { "cmd": proc.exe(), "full_cmd": proc.cmdline(), "fds": [], } self._enter_item(usr, flow, ctxt) except (FileNotFoundError, AttributeError, psutil.NoSuchProcess): # Handling edge case of race condition between build and parse # time. That's for very volatile, shortlived flows that can # exist during build but are gone once we want to parse the # inode. pass def __init__(self): self._data = {} self._build() def __getitem__(self, key): return self._data[key] def __len__(self): return len(self._data.keys()) def __delitem__(self, key): raise RuntimeError("Not implemented") def __iter__(self): raise RuntimeError("Not implemented") class Protocol(Callable): class Resolver: @staticmethod def getHost(ip): try: data = socket.gethostbyaddr(ip) host = str(data[0]) return host except Exception: # gracefully return None def __init__(self, sk_states, fmt="json"): self._states = sk_states fmter = "_fmt_%s" % fmt self._fmt = getattr(self, fmter, None) def __call__(self, nl_diag_sk, args, usr_ctxt): raise RuntimeError("not implemented") def _fmt_json(self, refined_stats): return json.dumps(refined_stats, indent=4) class UNIX(Protocol): def __init__(self, sk_states=SS_CONN, _fmt="json"): super(UNIX, self).__init__(sk_states, fmt=_fmt) def __call__(self, nl_diag_sk, args, usr_ctxt): sstats = nl_diag_sk.get_sock_stats( states=self._states, family=AF_UNIX, show=(UDIAG_SHOW_NAME | UDIAG_SHOW_VFS | UDIAG_SHOW_PEER), ) refined_stats = self._refine_diag_raw(sstats, usr_ctxt) return refined_stats def _refine_diag_raw(self, raw_stats, usr_ctxt): refined = {"UNIX": {"flows": []}} def vfs_cb(raw_val): out = {} out["inode"] = raw_val["udiag_vfs_ino"] out["dev"] = raw_val["udiag_vfs_dev"] return out k_idx = 0 val_idx = 1 cb_idx = 1 idiag_attr_refine_map = { "UNIX_DIAG_NAME": ("path_name", None), "UNIX_DIAG_VFS": ("vfs", vfs_cb), "UNIX_DIAG_PEER": ("peer_inode", None), "UNIX_DIAG_SHUTDOWN": ("shutdown", None), } for raw_flow in raw_stats: vessel = {} vessel["inode"] = raw_flow["udiag_ino"] for attr in raw_flow["attrs"]: attr_k = attr[k_idx] attr_val = attr[val_idx] k = idiag_attr_refine_map[attr_k][k_idx] cb = idiag_attr_refine_map[attr_k][cb_idx] if cb: attr_val = cb(attr_val) vessel[k] = attr_val refined["UNIX"]["flows"].append(vessel) if usr_ctxt: for flow in refined["UNIX"]["flows"]: try: sk_inode = flow["inode"] flow["usr_ctxt"] = usr_ctxt[sk_inode] except KeyError: # might define sentinel val pass return refined class TCP(Protocol): INET_DIAG_MEMINFO = 1 INET_DIAG_INFO = 2 INET_DIAG_VEGASINFO = 3 INET_DIAG_CONG = 4 def __init__(self, sk_states=SS_CONN, _fmt="json"): super(TCP, self).__init__(sk_states, fmt=_fmt) IDIAG_EXT_FLAGS = [ self.INET_DIAG_MEMINFO, self.INET_DIAG_INFO, self.INET_DIAG_VEGASINFO, self.INET_DIAG_CONG, ] self.ext_f = 0 for f in IDIAG_EXT_FLAGS: self.ext_f |= 1 << (f - 1) def __call__(self, nl_diag_sk, args, usr_ctxt): sstats = nl_diag_sk.get_sock_stats( states=self._states, family=AF_INET, extensions=self.ext_f ) refined_stats = self._refine_diag_raw(sstats, args.resolve, usr_ctxt) return refined_stats def _refine_diag_raw(self, raw_stats, do_resolve, usr_ctxt): refined = {"TCP": {"flows": []}} idiag_refine_map = { "src": "idiag_src", "dst": "idiag_dst", "src_port": "idiag_sport", "dst_port": "idiag_dport", "inode": "idiag_inode", "iface_idx": "idiag_if", "retrans": "idiag_retrans", } for raw_flow in raw_stats: vessel = {} for k1, k2 in idiag_refine_map.items(): vessel[k1] = raw_flow[k2] for ext_bundle in raw_flow["attrs"]: vessel = self._refine_extension(vessel, ext_bundle) refined["TCP"]["flows"].append(vessel) if usr_ctxt: for flow in refined["TCP"]["flows"]: try: sk_inode = flow["inode"] flow["usr_ctxt"] = usr_ctxt[sk_inode] except KeyError: # might define sentinel val pass if do_resolve: for flow in refined["TCP"]["flows"]: src_host = Protocol.Resolver.getHost(flow["src"]) if src_host: flow["src_host"] = src_host dst_host = Protocol.Resolver.getHost(flow["dst"]) if dst_host: flow["dst_host"] = dst_host return refined def _refine_extension(self, vessel, raw_ext): k, content = raw_ext ext_refine_map = { "meminfo": { "r": "idiag_rmem", "w": "idiag_wmem", "f": "idiag_fmem", "t": "idiag_tmem", } } if k == "INET_DIAG_MEMINFO": mem_k = "meminfo" vessel[mem_k] = {} for k1, k2 in ext_refine_map[mem_k].items(): vessel[mem_k][k1] = content[k2] elif k == "INET_DIAG_CONG": vessel["cong_algo"] = content elif k == "INET_DIAG_INFO": vessel = self._refine_tcp_info(vessel, content) elif k == "INET_DIAG_SHUTDOWN": pass return vessel # interim approach # tcpinfo call backs class InfoCbCore: # normalizer @staticmethod def rto_n_cb(key, value, **ctx): out = None if value != 3000000: out = value / 1000.0 return out @staticmethod def generic_1k_n_cb(key, value, **ctx): return value / 1000.0 # predicates @staticmethod def snd_thresh_p_cb(key, value, **ctx): if value < 0xFFFF: return value return None @staticmethod def rtt_p_cb(key, value, **ctx): tcp_info_raw = ctx["raw"] try: if ( tcp_info_raw["tcpv_enabled"] != 0 and tcp_info_raw["tcpv_rtt"] != 0x7FFFFFFF ): return tcp_info_raw["tcpv_rtt"] except KeyError: # ill practice, yet except quicker path pass return tcp_info_raw["tcpi_rtt"] / 1000.0 # converter @staticmethod def state_c_cb(key, value, **ctx): state_str_map = { SS_ESTABLISHED: "established", SS_SYN_SENT: "syn-sent", SS_SYN_RECV: "syn-recv", SS_FIN_WAIT1: "fin-wait-1", SS_FIN_WAIT2: "fin-wait-2", SS_TIME_WAIT: "time-wait", SS_CLOSE: "unconnected", SS_CLOSE_WAIT: "close-wait", SS_LAST_ACK: "last-ack", SS_LISTEN: "listening", SS_CLOSING: "closing", } return state_str_map[value] @staticmethod def opts_c_cb(key, value, **ctx): tcp_info_raw = ctx["raw"] # tcp_info opt flags TCPI_OPT_TIMESTAMPS = 1 TCPI_OPT_SACK = 2 TCPI_OPT_ECN = 8 out = [] opts = tcp_info_raw["tcpi_options"] if opts & TCPI_OPT_TIMESTAMPS: out.append("ts") if opts & TCPI_OPT_SACK: out.append("sack") if opts & TCPI_OPT_ECN: out.append("ecn") return out def _refine_tcp_info(self, vessel, tcp_info_raw): ti = TCP.InfoCbCore info_refine_tabl = { "tcpi_state": ("state", ti.state_c_cb), "tcpi_pmtu": ("pmtu", None), "tcpi_retrans": ("retrans", None), "tcpi_ato": ("ato", ti.generic_1k_n_cb), "tcpi_rto": ("rto", ti.rto_n_cb), # TODO consider wscale baking "tcpi_snd_wscale": ("snd_wscale", None), "tcpi_rcv_wscale": ("rcv_wscale", None), # TODO bps baking "tcpi_snd_mss": ("snd_mss", None), "tcpi_snd_cwnd": ("snd_cwnd", None), "tcpi_snd_ssthresh": ("snd_ssthresh", ti.snd_thresh_p_cb), # TODO consider rtt agglomeration - needs nesting "tcpi_rtt": ("rtt", ti.rtt_p_cb), "tcpi_rttvar": ("rttvar", ti.generic_1k_n_cb), "tcpi_rcv_rtt": ("rcv_rtt", ti.generic_1k_n_cb), "tcpi_rcv_space": ("rcv_space", None), "tcpi_options": ("opts", ti.opts_c_cb), # unclear, NB not in use by iproute2 ss latest "tcpi_last_data_sent": ("last_data_sent", None), "tcpi_rcv_ssthresh": ("rcv_ssthresh", None), "tcpi_rcv_ssthresh": ("rcv_ssthresh", None), "tcpi_segs_in": ("segs_in", None), "tcpi_segs_out": ("segs_out", None), "tcpi_data_segs_in": ("data_segs_in", None), "tcpi_data_segs_out": ("data_segs_out", None), "tcpi_lost": ("lost", None), "tcpi_notsent_bytes": ("notsent_bytes", None), "tcpi_rcv_mss": ("rcv_mss", None), "tcpi_pacing_rate": ("pacing_rate", None), "tcpi_retransmits": ("retransmits", None), "tcpi_min_rtt": ("min_rtt", None), "tcpi_rwnd_limited": ("rwnd_limited", None), "tcpi_max_pacing_rate": ("max_pacing_rate", None), "tcpi_probes": ("probes", None), "tcpi_reordering": ("reordering", None), "tcpi_last_data_recv": ("last_data_recv", None), "tcpi_bytes_received": ("bytes_received", None), "tcpi_fackets": ("fackets", None), "tcpi_last_ack_recv": ("last_ack_recv", None), "tcpi_last_ack_sent": ("last_ack_sent", None), "tcpi_unacked": ("unacked", None), "tcpi_sacked": ("sacked", None), "tcpi_bytes_acked": ("bytes_acked", None), "tcpi_delivery_rate_app_limited": ( "delivery_rate_app_limited", None, ), "tcpi_delivery_rate": ("delivery_rate", None), "tcpi_sndbuf_limited": ("sndbuf_limited", None), "tcpi_ca_state": ("ca_state", None), "tcpi_busy_time": ("busy_time", None), "tcpi_total_retrans": ("total_retrans", None), "tcpi_advmss": ("advmss", None), "tcpi_backoff": (None, None), "tcpv_enabled": (None, "skip"), "tcpv_rttcnt": (None, "skip"), "tcpv_rtt": (None, "skip"), "tcpv_minrtt": (None, "skip"), # BBR "bbr_bw_lo": ("bbr_bw_lo", None), "bbr_bw_hi": ("bbr_bw_hi", None), "bbr_min_rtt": ("bbr_min_rtt", None), "bbr_pacing_gain": ("bbr_pacing_gain", None), "bbr_cwnd_gain": ("bbr_cwnd_gain", None), # DCTCP "dctcp_enabled": ("dctcp_enabled", None), "dctcp_ce_state": ("dctcp_ce_state", None), "dctcp_alpha": ("dctcp_alpha", None), "dctcp_ab_ecn": ("dctcp_ab_ecn", None), "dctcp_ab_tot": ("dctcp_ab_tot", None), } k_idx = 0 cb_idx = 1 info_k = "tcp_info" vessel[info_k] = {} # BUG - pyroute2 diag - seems always last info instance from kernel if type(tcp_info_raw) != str: for k, v in tcp_info_raw.items(): if k not in info_refine_tabl: continue refined_k = info_refine_tabl[k][k_idx] cb = info_refine_tabl[k][cb_idx] refined_v = v if cb and cb == "skip": continue elif cb: ctx = {"raw": tcp_info_raw} refined_v = cb(k, v, **ctx) vessel[info_k][refined_k] = refined_v return vessel def prepare_args(): parser = argparse.ArgumentParser( description=""" ss2 - socket statistics depictor meant as a complete and convenient surrogate for iproute2/misc/ss2""" ) parser.add_argument( "-x", "--unix", help="Display Unix domain sockets.", action="store_true", ) parser.add_argument( "-t", "--tcp", help="Display TCP sockets.", action="store_true" ) parser.add_argument( "-l", "--listen", help="Display listening sockets.", action="store_true", ) parser.add_argument( "-a", "--all", help="Display all sockets.", action="store_true" ) parser.add_argument( "-p", "--process", help="show socket holding context", action="store_true", ) parser.add_argument( "-r", "--resolve", help="resolve host names in addition", action="store_true", ) args = parser.parse_args() return args def run(args=None): if psutil is None: raise RuntimeError("ss2 requires python-psutil >= 5.0 to run") if not args: args = prepare_args() _states = SS_CONN if args.listen: _states = 1 << SS_LISTEN if args.all: _states = SS_ALL protocols = [] if args.tcp: protocols.append(TCP(sk_states=_states)) if args.unix: protocols.append(UNIX(sk_states=_states)) if not protocols: raise RuntimeError("not implemented - ss2 in fledging mode") _user_ctxt_map = None if args.process: _user_ctxt_map = UserCtxtMap() result = list() with DiagSocket() as ds: ds.bind() for p in protocols: sub_statistics = p(ds, args, _user_ctxt_map) result.append(sub_statistics) if RUN_AS_MODULE: return result else: print(json.dumps(result, indent=4)) if __name__ == "__main__": run() pyroute2-0.7.11/pyroute2/netlink/event/000077500000000000000000000000001455030217500177515ustar00rootroot00000000000000pyroute2-0.7.11/pyroute2/netlink/event/__init__.py000066400000000000000000000013541455030217500220650ustar00rootroot00000000000000from pyroute2.config import kernel from pyroute2.netlink.generic import GenericNetlinkSocket class EventSocket(GenericNetlinkSocket): marshal_class = None genl_family = None def __init__(self, *args, **kwargs): GenericNetlinkSocket.__init__(self, *args, **kwargs) self.marshal = self.marshal_class() if kernel[0] <= 2: self.bind(groups=0xFFFFFF) else: self.bind() for group in self.mcast_groups: self.add_membership(group) def bind(self, groups=0, **kwarg): GenericNetlinkSocket.bind( self, self.genl_family, self.marshal_class.msg_map[0], groups, None, **kwarg ) pyroute2-0.7.11/pyroute2/netlink/event/acpi_event.py000066400000000000000000000040361455030217500224430ustar00rootroot00000000000000''' Monitor and receive ACPI events messages via generic netlink. .. testsetup:: from pyroute2.netlink.event import acpi_event import pyroute2 pyroute2.AcpiEventSocket = acpi_event.AcpiEventMock .. testcode:: from pprint import pprint from pyroute2 import AcpiEventSocket acpi = AcpiEventSocket() for message in acpi.get(): pprint(message.get('ACPI_GENL_ATTR_EVENT')) .. testoutput:: {'bus_id': b'LEN0268:00', 'data': 1251328, 'device_class': b'ibm/hotkey', 'type': 32768} ''' from pyroute2.common import load_dump from pyroute2.netlink import genlmsg, nla from pyroute2.netlink.event import EventSocket from pyroute2.netlink.nlsocket import Marshal ACPI_GENL_CMD_UNSPEC = 0 ACPI_GENL_CMD_EVENT = 1 class acpimsg(genlmsg): nla_map = ( ('ACPI_GENL_ATTR_UNSPEC', 'none'), ('ACPI_GENL_ATTR_EVENT', 'acpiev'), ) class acpiev(nla): fields = ( ('device_class', '20s'), ('bus_id', '15s'), ('type', 'I'), ('data', 'I'), ) def decode(self): nla.decode(self) dc = self['device_class'] bi = self['bus_id'] self['device_class'] = dc[: dc.find(b'\x00')] self['bus_id'] = bi[: bi.find(b'\x00')] class MarshalAcpiEvent(Marshal): msg_map = {ACPI_GENL_CMD_UNSPEC: acpimsg, ACPI_GENL_CMD_EVENT: acpimsg} class AcpiEventSocket(EventSocket): marshal_class = MarshalAcpiEvent genl_family = 'acpi_event' class AcpiEventMock(AcpiEventSocket): input_from_buffer_queue = True sample_data = ''' 44:00:00:00 1b:00:00:00 9b:8b:00:00 00:00:00:00 01:01:00:00 30:00:01:00 69:62:6d:2f 68:6f:74:6b 65:79:00:00 00:00:00:00 00:00:00:00 4c:45:4e:30 32:36:38:3a 30:30:00:00 00:00:00:00 80:00:00:00 18:13:00:00 ''' def bind(self, groups=0, **kwarg): self.marshal.msg_map[27] = acpimsg def get(self): self.buffer_queue.put(load_dump(self.sample_data)) return super().get() pyroute2-0.7.11/pyroute2/netlink/event/dquot.py000066400000000000000000000037621455030217500214670ustar00rootroot00000000000000''' Disk quota events monitoring: .. testsetup:: from pyroute2.netlink.event import dquot import pyroute2 pyroute2.DQuotSocket = dquot.DQuotMock .. testcode:: from pyroute2 import DQuotSocket with DQuotSocket() as ds: for message in ds.get(): uid = message.get('QUOTA_NL_A_EXCESS_ID') major = message.get('QUOTA_NL_A_DEV_MAJOR') minor = message.get('QUOTA_NL_A_DEV_MINOR') warning = message.get('QUOTA_NL_A_WARNING') print(f'quota warning {warning} for uid {uid} on {major}:{minor}') .. testoutput:: quota warning 8 for uid 0 on 7:0 ''' from pyroute2.common import load_dump from pyroute2.netlink import genlmsg from pyroute2.netlink.event import EventSocket from pyroute2.netlink.nlsocket import Marshal QUOTA_NL_C_UNSPEC = 0 QUOTA_NL_C_WARNING = 1 class dquotmsg(genlmsg): prefix = 'QUOTA_NL_A_' nla_map = ( ('QUOTA_NL_A_UNSPEC', 'none'), ('QUOTA_NL_A_QTYPE', 'uint32'), ('QUOTA_NL_A_EXCESS_ID', 'uint64'), ('QUOTA_NL_A_WARNING', 'uint32'), ('QUOTA_NL_A_DEV_MAJOR', 'uint32'), ('QUOTA_NL_A_DEV_MINOR', 'uint32'), ('QUOTA_NL_A_CAUSED_ID', 'uint64'), ('QUOTA_NL_A_PAD', 'uint64'), ) class MarshalDQuot(Marshal): msg_map = {QUOTA_NL_C_UNSPEC: dquotmsg, QUOTA_NL_C_WARNING: dquotmsg} class DQuotSocket(EventSocket): marshal_class = MarshalDQuot genl_family = 'VFS_DQUOT' class DQuotMock(DQuotSocket): input_from_buffer_queue = True sample_data = ''' 4c:00:00:00 11:00:00:00 06:00:00:00 00:00:00:00 01:01:00:00 08:00:01:00 00:00:00:00 0c:00:02:00 00:00:00:00 00:00:00:00 08:00:03:00 08:00:00:00 08:00:04:00 07:00:00:00 08:00:05:00 00:00:00:00 0c:00:06:00 00:00:00:00 00:00:00:00 ''' def bind(self, groups=0, **kwarg): self.marshal.msg_map[17] = dquotmsg def get(self): self.buffer_queue.put(load_dump(self.sample_data)) return super().get() pyroute2-0.7.11/pyroute2/netlink/event/thermal.py000066400000000000000000000050421455030217500217600ustar00rootroot00000000000000''' ''' from enum import Enum from pyroute2.netlink import genlmsg from pyroute2.netlink.event import EventSocket from pyroute2.netlink.nlsocket import Marshal class ThermalGenlCmd(Enum): THERMAL_GENL_CMD_UNSPEC = 0 THERMAL_GENL_CMD_TZ_GET_ID = 1 THERMAL_GENL_CMD_TZ_GET_TRIP = 2 THERMAL_GENL_CMD_TZ_GET_TEMP = 3 THERMAL_GENL_CMD_TZ_GET_GOV = 4 THERMAL_GENL_CMD_TZ_GET_MODE = 5 THERMAL_GENL_CMD_CDEV_GET = 6 class ThermalGenlEvent(Enum): THERMAL_GENL_EVENT_UNSPEC = 0 THERMAL_GENL_EVENT_TZ_CREATE = 1 THERMAL_GENL_EVENT_TZ_DELETE = 2 THERMAL_GENL_EVENT_TZ_DISABLE = 3 THERMAL_GENL_EVENT_TZ_ENABLE = 4 THERMAL_GENL_EVENT_TZ_TRIP_UP = 5 THERMAL_GENL_EVENT_TZ_TRIP_DOWN = 6 THERMAL_GENL_EVENT_TZ_TRIP_CHANGE = 7 THERMAL_GENL_EVENT_TZ_TRIP_ADD = 8 THERMAL_GENL_EVENT_TZ_TRIP_DELETE = 9 THERMAL_GENL_EVENT_CDEV_ADD = 10 THERMAL_GENL_EVENT_CDEV_DELETE = 11 THERMAL_GENL_EVENT_CDEV_STATE_UPDATE = 12 THERMAL_GENL_EVENT_TZ_GOV_CHANGE = 13 THERMAL_GENL_EVENT_CPU_CAPABILITY_CHANGE = 14 class thermal_msg(genlmsg): nla_map = ( ('THERMAL_GENL_ATTR_UNSPEC', 'none'), ('THERMAL_GENL_ATTR_TZ', 'nested'), ('THERMAL_GENL_ATTR_TZ_ID', 'uint32'), ('THERMAL_GENL_ATTR_TZ_TEMP', 'uint32'), ('THERMAL_GENL_ATTR_TZ_TRIP', 'nested'), ('THERMAL_GENL_ATTR_TZ_TRIP_ID', 'uint32'), ('THERMAL_GENL_ATTR_TZ_TRIP_TYPE', 'uint32'), ('THERMAL_GENL_ATTR_TZ_TRIP_TEMP', 'uint32'), ('THERMAL_GENL_ATTR_TZ_TRIP_HYST', 'uint32'), ('THERMAL_GENL_ATTR_TZ_MODE', 'uint32'), ('THERMAL_GENL_ATTR_TZ_NAME', 'asciiz'), ('THERMAL_GENL_ATTR_TZ_CDEV_WEIGHT', 'uint32'), ('THERMAL_GENL_ATTR_TZ_GOV', 'nested'), ('THERMAL_GENL_ATTR_TZ_GOV_NAME', 'asciiz'), ('THERMAL_GENL_ATTR_CDEV', 'nested'), ('THERMAL_GENL_ATTR_CDEV_ID', 'uint32'), ('THERMAL_GENL_ATTR_CDEV_CUR_STATE', 'uint32'), ('THERMAL_GENL_ATTR_CDEV_MAX_STATE', 'uint32'), ('THERMAL_GENL_ATTR_CDEV_NAME', 'asciiz'), ('THERMAL_GENL_ATTR_GOV_NAME', 'asciiz'), ('THERMAL_GENL_ATTR_CPU_CAPABILITY', 'nested'), ('THERMAL_GENL_ATTR_CPU_CAPABILITY_ID', 'uint32'), ('THERMAL_GENL_ATTR_CPU_CAPABILITY_PERFORMANCE', 'uint32'), ('THERMAL_GENL_ATTR_CPU_CAPABILITY_EFFICIENCY', 'uint32'), ) class MarshalThermalEvent(Marshal): msg_map = {x.value: thermal_msg for x in ThermalGenlEvent} class ThermalEventSocket(EventSocket): marshal_class = MarshalThermalEvent genl_family = 'thermal' pyroute2-0.7.11/pyroute2/netlink/exceptions.py000066400000000000000000000031201455030217500213570ustar00rootroot00000000000000import os import time class NetlinkError(Exception): ''' Base netlink error ''' def __init__(self, code, msg=None): msg = msg or os.strerror(code) super(NetlinkError, self).__init__(code, msg) self.code = code self.extra_code = 0 class NetlinkDecodeError(Exception): ''' Base decoding error class. Incapsulates underlying error for the following analysis ''' def __init__(self, exception): self.exception = exception class NetlinkHeaderDecodeError(NetlinkDecodeError): ''' The error occured while decoding a header ''' pass class NetlinkDataDecodeError(NetlinkDecodeError): ''' The error occured while decoding the message fields ''' pass class NetlinkNLADecodeError(NetlinkDecodeError): ''' The error occured while decoding NLA chain ''' pass class IPSetError(NetlinkError): ''' Netlink error with IPSet special error codes. Messages are imported from errcode.c ''' pass class NetlinkDumpInterrupted(NetlinkError): ''' Raised when NLM_F_DUMP_INTR is set in the flags. ''' def __init__(self, code=-1, msg='dump interrupted'): super(NetlinkDumpInterrupted, self).__init__(code, msg) class SkipInode(Exception): def __init__(self, code=0, msg=None): super(SkipInode, self).__init__(code, msg) self.code = code class ChaoticException(Exception): def __init__(self): chaotic_id = str(time.time()) super(ChaoticException, self).__init__(chaotic_id) self.chaotic_id = chaotic_id pyroute2-0.7.11/pyroute2/netlink/generic/000077500000000000000000000000001455030217500202445ustar00rootroot00000000000000pyroute2-0.7.11/pyroute2/netlink/generic/__init__.py000066400000000000000000000075061455030217500223650ustar00rootroot00000000000000# -*- coding: utf-8 -*- ''' Generic netlink =============== Describe ''' import errno import logging from pyroute2.netlink import ( CTRL_CMD_GETFAMILY, CTRL_CMD_GETPOLICY, GENL_ID_CTRL, NETLINK_ADD_MEMBERSHIP, NETLINK_DROP_MEMBERSHIP, NLM_F_ACK, NLM_F_DUMP, NLM_F_REQUEST, SOL_NETLINK, ctrlmsg, ) from pyroute2.netlink.nlsocket import NetlinkSocket class GenericNetlinkSocket(NetlinkSocket): ''' Low-level socket interface. Provides all the usual socket does, can be used in poll/select, doesn't create any implicit threads. ''' mcast_groups = {} module_err_message = None module_err_level = 'error' _prid = None @property def prid(self): if self._prid is None: raise RuntimeError( 'generic netlink protocol id is not obtained' ' yet, run bind() before placing any requests' ) else: return self._prid def bind(self, proto, msg_class, groups=0, pid=None, **kwarg): ''' Bind the socket and performs generic netlink proto lookup. The `proto` parameter is a string, like "TASKSTATS", `msg_class` is a class to parse messages with. ''' NetlinkSocket.bind(self, groups, pid, **kwarg) self.marshal.msg_map[GENL_ID_CTRL] = ctrlmsg msg = self.discovery(proto) self._prid = msg.get_attr('CTRL_ATTR_FAMILY_ID') self.mcast_groups = dict( [ ( x.get_attr('CTRL_ATTR_MCAST_GRP_NAME'), x.get_attr('CTRL_ATTR_MCAST_GRP_ID'), ) for x in msg.get_attr('CTRL_ATTR_MCAST_GROUPS', []) ] ) self.marshal.msg_map[self.prid] = msg_class def add_membership(self, group): self.setsockopt( SOL_NETLINK, NETLINK_ADD_MEMBERSHIP, self.mcast_groups[group] ) def drop_membership(self, group): self.setsockopt( SOL_NETLINK, NETLINK_DROP_MEMBERSHIP, self.mcast_groups[group] ) def discovery(self, proto): ''' Resolve generic netlink protocol -- takes a string as the only parameter, return protocol description ''' msg = ctrlmsg() msg['cmd'] = CTRL_CMD_GETFAMILY msg['version'] = 1 msg['attrs'].append(['CTRL_ATTR_FAMILY_NAME', proto]) msg['header']['type'] = GENL_ID_CTRL msg['header']['flags'] = NLM_F_REQUEST msg['header']['pid'] = self.pid msg.encode() self.sendto(msg.data, (0, 0)) msg = self.get()[0] err = msg['header'].get('error', None) if err is not None: if hasattr(err, 'code') and err.code == errno.ENOENT: err.extra_code = errno.ENOTSUP logger = getattr(logging, self.module_err_level) logger('Generic netlink protocol %s not found' % proto) logger('Please check if the protocol module is loaded') if self.module_err_message is not None: logger(self.module_err_message) raise err return msg def policy(self, proto): ''' Extract policy information for a generic netlink protocol -- takes a string as the only parameter, return protocol policy ''' self.marshal.msg_map[GENL_ID_CTRL] = ctrlmsg msg = ctrlmsg() msg['cmd'] = CTRL_CMD_GETPOLICY msg['attrs'].append(['CTRL_ATTR_FAMILY_NAME', proto]) return self.nlm_request( msg, msg_type=GENL_ID_CTRL, msg_flags=NLM_F_REQUEST | NLM_F_DUMP | NLM_F_ACK, ) def get(self, *argv, **kwarg): return tuple(super().get(*argv, **kwarg)) def nlm_request(self, *argv, **kwarg): return tuple(super().nlm_request(*argv, **kwarg)) pyroute2-0.7.11/pyroute2/netlink/generic/ethtool.py000066400000000000000000000222601455030217500222760ustar00rootroot00000000000000from pyroute2.netlink import ( NLA_F_NESTED, NLM_F_ACK, NLM_F_REQUEST, genlmsg, nla, ) from pyroute2.netlink.exceptions import NetlinkError from pyroute2.netlink.generic import GenericNetlinkSocket ETHTOOL_GENL_NAME = "ethtool" ETHTOOL_GENL_VERSION = 1 ETHTOOL_MSG_USER_NONE = 0 ETHTOOL_MSG_STRSET_GET = 1 ETHTOOL_MSG_LINKINFO_GET = 2 ETHTOOL_MSG_LINKINFO_SET = 3 ETHTOOL_MSG_LINKMODES_GET = 4 ETHTOOL_MSG_LINKMODES_SET = 5 ETHTOOL_MSG_LINKSTATE_GET = 6 ETHTOOL_MSG_DEBUG_GET = 7 ETHTOOL_MSG_DEBUG_SET = 8 ETHTOOL_MSG_WOL_GET = 9 ETHTOOL_MSG_WOL_SET = 10 ETHTOOL_MSG_FEATURES_GET = 11 ETHTOOL_MSG_FEATURES_SET = 12 ETHTOOL_MSG_PRIVFLAGS_GET = 13 ETHTOOL_MSG_PRIVFLAGS_SET = 14 ETHTOOL_MSG_RINGS_GET = 15 ETHTOOL_MSG_RINGS_SET = 16 class ethtoolheader(nla): nla_flags = NLA_F_NESTED nla_map = ( ('ETHTOOL_A_HEADER_UNSPEC', 'none'), ('ETHTOOL_A_HEADER_DEV_INDEX', 'uint32'), ('ETHTOOL_A_HEADER_DEV_NAME', 'asciiz'), ('ETHTOOL_A_HEADER_FLAGS', 'uint32'), ) class ethtoolbitset(nla): nla_flags = NLA_F_NESTED nla_map = ( ("ETHTOOL_A_BITSET_UNSPEC", 'none'), ("ETHTOOL_A_BITSET_NOMASK", "flag"), ("ETHTOOL_A_BITSET_SIZE", 'uint32'), ("ETHTOOL_A_BITSET_BITS", 'bitset_bits'), ("ETHTOOL_A_BITSET_VALUE", 'hex'), ("ETHTOOL_A_BITSET_MASK", 'hex'), ) class bitset_bits(nla): nla_flags = NLA_F_NESTED nla_map = ( ('ETHTOOL_A_BITSET_BIT_UNSPEC', 'none'), ('ETHTOOL_A_BITSET_BITS_BIT', 'bitset_bits_bit'), ) class bitset_bits_bit(nla): nla_flags = NLA_F_NESTED nla_map = ( ('ETHTOOL_A_BITSET_BIT_UNSPEC', 'none'), ('ETHTOOL_A_BITSET_BIT_INDEX', 'uint32'), ('ETHTOOL_A_BITSET_BIT_NAME', 'asciiz'), ('ETHTOOL_A_BITSET_BIT_VALUE', 'flag'), ) class ethtool_strset_msg(genlmsg): nla_map = ( ('ETHTOOL_A_STRSET_UNSPEC', 'none'), ('ETHTOOL_A_STRSET_HEADER', 'ethtoolheader'), ('ETHTOOL_A_STRSET_STRINGSETS', 'strings_sets'), ('ETHTOOL_A_STRSET_COUNTS_ONLY', 'flag'), ) ethtoolheader = ethtoolheader class strings_sets(nla): nla_flags = NLA_F_NESTED nla_map = ( ('ETHTOOL_A_STRINGSETS_UNSPEC', 'none'), ('ETHTOOL_A_STRINGSETS_STRINGSET', 'string_set'), ) class string_set(nla): nla_flags = NLA_F_NESTED nla_map = ( ('ETHTOOL_A_STRINGSET_UNSPEC', 'none'), ('ETHTOOL_A_STRINGSET_ID', 'uint32'), ('ETHTOOL_A_STRINGSET_COUNT', 'uint32'), ('ETHTOOL_A_STRINGSET_STRINGS', 'stringset_strings'), ) class stringset_strings(nla): nla_flags = NLA_F_NESTED nla_map = ( ('ETHTOOL_A_STRINGS_UNSPEC', 'none'), ('ETHTOOL_A_STRINGS_STRING', 'strings_string'), ) class strings_string(nla): nla_flags = NLA_F_NESTED nla_map = ( ('ETHTOOL_A_STRING_UNSPEC', 'none'), ('ETHTOOL_A_STRING_INDEX', 'uint32'), ('ETHTOOL_A_STRING_VALUE', 'asciiz'), ) class ethtool_linkinfo_msg(genlmsg): nla_map = ( ('ETHTOOL_A_LINKINFO_UNSPEC', 'none'), ('ETHTOOL_A_LINKINFO_HEADER', 'ethtoolheader'), ('ETHTOOL_A_LINKINFO_PORT', 'uint8'), ('ETHTOOL_A_LINKINFO_PHYADDR', 'uint8'), ('ETHTOOL_A_LINKINFO_TP_MDIX', 'uint8'), ('ETHTOOL_A_LINKINFO_TP_MDIX_CTR', 'uint8'), ('ETHTOOL_A_LINKINFO_TRANSCEIVER', 'uint8'), ) ethtoolheader = ethtoolheader class ethtool_linkmode_msg(genlmsg): nla_map = ( ('ETHTOOL_A_LINKMODES_UNSPEC', 'none'), ('ETHTOOL_A_LINKMODES_HEADER', 'ethtoolheader'), ('ETHTOOL_A_LINKMODES_AUTONEG', 'uint8'), ('ETHTOOL_A_LINKMODES_OURS', 'ethtoolbitset'), ('ETHTOOL_A_LINKMODES_PEER', 'ethtoolbitset'), ('ETHTOOL_A_LINKMODES_SPEED', 'uint32'), ('ETHTOOL_A_LINKMODES_DUPLEX', 'uint8'), ) ethtoolheader = ethtoolheader ethtoolbitset = ethtoolbitset class ethtool_linkstate_msg(genlmsg): nla_map = ( ('ETHTOOL_A_LINKSTATE_UNSPEC', 'none'), ('ETHTOOL_A_LINKSTATE_HEADER', 'ethtoolheader'), ('ETHTOOL_A_LINKSTATE_LINK', 'uint8'), ) ethtoolheader = ethtoolheader class ethtool_wol_msg(genlmsg): nla_map = ( ('ETHTOOL_A_WOL_UNSPE', 'none'), ('ETHTOOL_A_WOL_HEADER', 'ethtoolheader'), ('ETHTOOL_A_WOL_MODES', 'ethtoolbitset'), ('ETHTOOL_A_WOL_SOPASS', 'hex'), ) ethtoolheader = ethtoolheader ethtoolbitset = ethtoolbitset class ethtool_rings_msg(genlmsg): nla_map = ( ('ETHTOOL_A_RINGS_UNSPEC', 'none'), ('ETHTOOL_A_RINGS_HEADER', 'ethtoolheader'), ('ETHTOOL_A_RINGS_RX_MAX', 'uint32'), ('ETHTOOL_A_RINGS_RX_MINI_MAX', 'uint32'), ('ETHTOOL_A_RINGS_RX_JUMBO_MAX', 'uint32'), ('ETHTOOL_A_RINGS_TX_MAX', 'uint32'), ('ETHTOOL_A_RINGS_RX', 'uint32'), ('ETHTOOL_A_RINGS_RX_MINI', 'uint32'), ('ETHTOOL_A_RINGS_RX_JUMBO', 'uint32'), ('ETHTOOL_A_RINGS_TX', 'uint32'), ('ETHTOOL_A_RINGS_RX_BUF_LEN', 'uint32'), ('ETHTOOL_A_RINGS_TCP_DATA_SPLIT', 'uint8'), ('ETHTOOL_A_RINGS_CQE_SIZE', 'uint32'), ('ETHTOOL_A_RINGS_TX_PUSH', 'uint8'), ('ETHTOOL_A_RINGS_RX_PUSH', 'uint8'), ('ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN', 'uint32'), ('ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX', 'uint32'), ) ethtoolheader = ethtoolheader class NlEthtool(GenericNetlinkSocket): def _do_request(self, msg, msg_flags=NLM_F_REQUEST): return self.nlm_request(msg, msg_type=self.prid, msg_flags=msg_flags) def is_nlethtool_in_kernel(self): try: self.bind(ETHTOOL_GENL_NAME, ethtool_linkinfo_msg) except NetlinkError: return False return True def _get_dev_header(self, ifname=None, ifindex=None): if ifindex is not None: return {'attrs': [['ETHTOOL_A_HEADER_DEV_INDEX', ifindex]]} elif ifname is not None: return {'attrs': [['ETHTOOL_A_HEADER_DEV_NAME', ifname]]} else: raise ValueError("Need ifname or ifindex") def get_linkinfo(self, ifname=None, ifindex=None): msg = ethtool_linkinfo_msg() msg["cmd"] = ETHTOOL_MSG_LINKINFO_GET msg['version'] = ETHTOOL_GENL_VERSION msg["attrs"].append( ( 'ETHTOOL_A_LINKINFO_HEADER', self._get_dev_header(ifname, ifindex), ) ) self.bind(ETHTOOL_GENL_NAME, ethtool_linkinfo_msg) return self._do_request(msg) def get_linkmode(self, ifname=None, ifindex=None): msg = ethtool_linkmode_msg() msg["cmd"] = ETHTOOL_MSG_LINKMODES_GET msg['version'] = ETHTOOL_GENL_VERSION msg["attrs"].append( ( 'ETHTOOL_A_LINKMODES_HEADER', self._get_dev_header(ifname, ifindex), ) ) self.bind(ETHTOOL_GENL_NAME, ethtool_linkmode_msg) return self._do_request(msg) def get_stringset(self, ifname=None, ifindex=None): msg = ethtool_strset_msg() msg["cmd"] = ETHTOOL_MSG_STRSET_GET msg['version'] = ETHTOOL_GENL_VERSION msg["attrs"].append( ('ETHTOOL_A_STRSET_HEADER', self._get_dev_header(ifname, ifindex)) ) self.bind(ETHTOOL_GENL_NAME, ethtool_strset_msg) return self._do_request(msg) def get_linkstate(self, ifname=None, ifindex=None): msg = ethtool_linkstate_msg() msg["cmd"] = ETHTOOL_MSG_LINKSTATE_GET msg['version'] = ETHTOOL_GENL_VERSION msg["attrs"].append( ( 'ETHTOOL_A_LINKSTATE_HEADER', self._get_dev_header(ifname, ifindex), ) ) self.bind(ETHTOOL_GENL_NAME, ethtool_linkstate_msg) return self._do_request(msg) def get_wol(self, ifname=None, ifindex=None): msg = ethtool_wol_msg() msg["cmd"] = ETHTOOL_MSG_WOL_GET msg['version'] = ETHTOOL_GENL_VERSION msg["attrs"].append( ('ETHTOOL_A_WOL_HEADER', self._get_dev_header(ifname, ifindex)) ) self.bind(ETHTOOL_GENL_NAME, ethtool_wol_msg) return self._do_request(msg) def get_rings(self, ifname=None, ifindex=None): msg = ethtool_rings_msg() msg["cmd"] = ETHTOOL_MSG_RINGS_GET msg["version"] = ETHTOOL_GENL_VERSION msg["attrs"].append( ('ETHTOOL_A_RINGS_HEADER', self._get_dev_header(ifname, ifindex)) ) self.bind(ETHTOOL_GENL_NAME, ethtool_rings_msg) return self._do_request(msg) def set_rings(self, rings, ifname=None, ifindex=None): rings["cmd"] = ETHTOOL_MSG_RINGS_SET rings["version"] = ETHTOOL_GENL_VERSION rings["attrs"].append( ('ETHTOOL_A_RINGS_HEADER', self._get_dev_header(ifname, ifindex)) ) self.bind(ETHTOOL_GENL_NAME, ethtool_rings_msg) return self._do_request(rings, msg_flags=NLM_F_REQUEST | NLM_F_ACK) pyroute2-0.7.11/pyroute2/netlink/generic/l2tp.py000066400000000000000000000456621455030217500215140ustar00rootroot00000000000000from pyroute2.netlink import ( NLA_F_NESTED, NLM_F_ACK, NLM_F_DUMP, NLM_F_REQUEST, genlmsg, nla, ) from pyroute2.netlink.generic import GenericNetlinkSocket # Defines from uapi/linux/l2tp.h L2TP_GENL_NAME = "l2tp" L2TP_GENL_VERSION = 1 L2TP_CMD_NOOP = 0 L2TP_CMD_TUNNEL_CREATE = 1 L2TP_CMD_TUNNEL_DELETE = 2 L2TP_CMD_TUNNEL_MODIFY = 3 L2TP_CMD_TUNNEL_GET = 4 L2TP_CMD_SESSION_CREATE = 5 L2TP_CMD_SESSION_DELETE = 6 L2TP_CMD_SESSION_MODIFY = 7 L2TP_CMD_SESSION_GET = 8 # ATTR types defined for L2TP L2TP_ATTR_NONE = 0 L2TP_ATTR_PW_TYPE = 1 L2TP_ATTR_ENCAP_TYPE = 2 L2TP_ATTR_OFFSET = 3 L2TP_ATTR_DATA_SEQ = 4 L2TP_ATTR_L2SPEC_TYPE = 5 L2TP_ATTR_L2SPEC_LEN = 6 L2TP_ATTR_PROTO_VERSION = 7 L2TP_ATTR_IFNAME = 8 L2TP_ATTR_CONN_ID = 9 L2TP_ATTR_PEER_CONN_ID = 10 L2TP_ATTR_SESSION_ID = 11 L2TP_ATTR_PEER_SESSION_ID = 12 L2TP_ATTR_UDP_CSUM = 13 L2TP_ATTR_VLAN_ID = 14 L2TP_ATTR_COOKIE = 15 L2TP_ATTR_PEER_COOKIE = 16 L2TP_ATTR_DEBUG = 17 L2TP_ATTR_RECV_SEQ = 18 L2TP_ATTR_SEND_SEQ = 19 L2TP_ATTR_LNS_MODE = 20 L2TP_ATTR_USING_IPSEC = 21 L2TP_ATTR_RECV_TIMEOUT = 22 L2TP_ATTR_FD = 23 L2TP_ATTR_IP_SADDR = 24 L2TP_ATTR_IP_DADDR = 25 L2TP_ATTR_UDP_SPORT = 26 L2TP_ATTR_UDP_DPORT = 27 L2TP_ATTR_MTU = 28 L2TP_ATTR_MRU = 29 L2TP_ATTR_STATS = 30 L2TP_ATTR_IP6_SADDR = 31 L2TP_ATTR_IP6_DADDR = 32 L2TP_ATTR_UDP_ZERO_CSUM6_TX = 33 L2TP_ATTR_UDP_ZERO_CSUM6_RX = 34 L2TP_ATTR_PAD = 35 # Nested L2TP_ATTR_STATS L2TP_ATTR_STATS_NONE = 0 L2TP_ATTR_TX_PACKETS = 1 L2TP_ATTR_TX_BYTES = 2 L2TP_ATTR_TX_ERRORS = 3 L2TP_ATTR_RX_PACKETS = 4 L2TP_ATTR_RX_BYTES = 5 L2TP_ATTR_RX_SEQ_DISCARDS = 6 L2TP_ATTR_RX_OOS_PACKETS = 7 L2TP_ATTR_RX_ERRORS = 8 L2TP_ATTR_STATS_PAD = 9 L2TP_PWTYPE_NONE = 0x0000 L2TP_PWTYPE_ETH_VLAN = 0x0004 L2TP_PWTYPE_ETH = 0x0005 L2TP_PWTYPE_PPP = 0x0007 L2TP_PWTYPE_PPP_AC = 0x0008 L2TP_PWTYPE_IP = 0x000B L2TP_L2SPECTYPE_NONE = 0 L2TP_L2SPECTYPE_DEFAULT = 1 L2TP_ENCAPTYPE_UDP = 0 L2TP_ENCAPTYPE_IP = 1 class l2tpmsg(genlmsg): prefix = "L2TP_ATTR_" nla_map = ( ("L2TP_ATTR_NONE", "none"), ("L2TP_ATTR_PW_TYPE", "uint16"), ("L2TP_ATTR_ENCAP_TYPE", "uint16"), ("L2TP_ATTR_OFFSET", "uint16"), ("L2TP_ATTR_DATA_SEQ", "uint8"), ("L2TP_ATTR_L2SPEC_TYPE", "uint8"), ("L2TP_ATTR_L2SPEC_LEN", "uint8"), ("L2TP_ATTR_PROTO_VERSION", "uint8"), ("L2TP_ATTR_IFNAME", "asciiz"), ("L2TP_ATTR_CONN_ID", "uint32"), ("L2TP_ATTR_PEER_CONN_ID", "uint32"), ("L2TP_ATTR_SESSION_ID", "uint32"), ("L2TP_ATTR_PEER_SESSION_ID", "uint32"), ("L2TP_ATTR_UDP_CSUM", "uint8"), ("L2TP_ATTR_VLAN_ID", "uint16"), ("L2TP_ATTR_COOKIE", "hex"), ("L2TP_ATTR_PEER_COOKIE", "hex"), ("L2TP_ATTR_DEBUG", "uint32"), ("L2TP_ATTR_RECV_SEQ", "uint8"), ("L2TP_ATTR_SEND_SEQ", "uint8"), ("L2TP_ATTR_LNS_MODE", "uint8"), ("L2TP_ATTR_USING_IPSEC", "uint8"), ("L2TP_ATTR_RECV_TIMEOUT", "uint64"), ("L2TP_ATTR_FD", "uint32"), ("L2TP_ATTR_IP_SADDR", "ip4addr"), ("L2TP_ATTR_IP_DADDR", "ip4addr"), ("L2TP_ATTR_UDP_SPORT", "uint16"), ("L2TP_ATTR_UDP_DPORT", "uint16"), ("L2TP_ATTR_MTU", "uint16"), ("L2TP_ATTR_MRU", "uint16"), ("L2TP_ATTR_STATS", "l2tp_stats"), ("L2TP_ATTR_IP6_SADDR", "ip6addr"), ("L2TP_ATTR_IP6_DADDR", "ip6addr"), ("L2TP_ATTR_UDP_ZERO_CSUM6_TX", "flag"), ("L2TP_ATTR_UDP_ZERO_CSUM6_RX", "flag"), ("L2TP_ATTR_PAD", "none"), ) class l2tp_stats(nla): nla_flags = NLA_F_NESTED nla_map = ( ("L2TP_ATTR_STATS_NONE", "none"), ("L2TP_ATTR_TX_PACKETS", "uint64"), ("L2TP_ATTR_TX_BYTES", "uint64"), ("L2TP_ATTR_TX_ERRORS", "uint64"), ("L2TP_ATTR_RX_PACKETS", "uint64"), ("L2TP_ATTR_RX_BYTES", "uint64"), ("L2TP_ATTR_RX_SEQ_DISCARDS", "uint64"), ("L2TP_ATTR_RX_OOS_PACKETS", "uint64"), ("L2TP_ATTR_RX_ERRORS", "uint64"), ("L2TP_ATTR_STATS_PAD", "none"), ) class L2tp(GenericNetlinkSocket): def __init__(self, *args, **kwargs): GenericNetlinkSocket.__init__(self, *args, **kwargs) self.bind(L2TP_GENL_NAME, l2tpmsg) def _do_request(self, msg, msg_flags=NLM_F_REQUEST | NLM_F_ACK): return self.nlm_request(msg, msg_type=self.prid, msg_flags=msg_flags) def _send_tunnel( self, cmd, tunnel_id, peer_tunnel_id=None, protocol=3, remote=None, local=None, fd=None, encap="udp", udp_sport=None, udp_dport=None, udp_csum=None, udp6_csum_rx=None, udp6_csum_tx=None, debug=None, ): """ Send L2TP tunnel create or modify commands :param cmd: Netlink command to use :param tunnel_id: local tunnel id :param peer_tunnel_id: remote tunnel id :param protocol: L2TP version :param remote: IP address of the remote peer :param local: IP address of the local interface :param fd: file descriptor of socket to use :param encap: encapsulation type of the tunnel (udp, ip) :param udp_sport: UDP source port to be used for the tunnel :param udp_dport: UDP destination port to be used for the tunnel :param udp_csum: control if IPv4 UDP checksums should be calculated and checked :param udp6_csum_rx: control if IPv6 UDP rx checksums should be calculated :param udp6_csum_tx: control if IPv6 UDP tx checksums should be calculated :param debug: enable or disable debugging using kernel printk for the tunnel :return: Netlink response """ msg = l2tpmsg() msg["cmd"] = cmd msg["version"] = L2TP_GENL_VERSION msg["attrs"].append(["L2TP_ATTR_CONN_ID", tunnel_id]) if cmd == L2TP_CMD_TUNNEL_CREATE: msg["attrs"].append(["L2TP_ATTR_PEER_CONN_ID", peer_tunnel_id]) msg["attrs"].append(["L2TP_ATTR_PROTO_VERSION", protocol]) if encap == "ip": msg["attrs"].append(["L2TP_ATTR_ENCAP_TYPE", L2TP_ENCAPTYPE_IP]) elif encap == "udp": msg["attrs"].append(["L2TP_ATTR_ENCAP_TYPE", L2TP_ENCAPTYPE_UDP]) if fd: msg["attrs"].append(["L2TP_ATTR_FD", fd]) else: local_ip_version = 4 if local: if local.find(":") > -1: local_ip_version = 6 if local_ip_version == 6: msg["attrs"].append(["L2TP_ATTR_IP6_SADDR", local]) else: msg["attrs"].append(["L2TP_ATTR_IP_SADDR", local]) remote_ip_version = 4 if remote: if remote.find(":") > -1: remote_ip_version = 6 if remote_ip_version == 6: msg["attrs"].append(["L2TP_ATTR_IP6_DADDR", remote]) else: msg["attrs"].append(["L2TP_ATTR_IP_DADDR", remote]) if local and remote: if remote_ip_version != local_ip_version: raise ValueError( "Local and remote peer address version mismatch" ) if encap == "udp" and cmd == L2TP_CMD_TUNNEL_CREATE: if udp_sport: msg["attrs"].append(["L2TP_ATTR_UDP_SPORT", udp_sport]) if udp_dport: msg["attrs"].append(["L2TP_ATTR_UDP_DPORT", udp_dport]) if udp_csum: msg["attrs"].append(["L2TP_ATTR_UDP_CSUM", True]) if udp6_csum_rx: msg["attrs"].append(["L2TP_ATTR_UDP_ZERO_CSUM6_TX", True]) if udp6_csum_tx: msg["attrs"].append(["L2TP_ATTR_UDP_ZERO_CSUM6_RX", True]) if debug is not None: msg["attrs"].append(["L2TP_ATTR_DEBUG", debug]) return self._do_request(msg) def create_tunnel( self, tunnel_id, peer_tunnel_id, protocol=3, remote=None, local=None, fd=None, encap="udp", udp_sport=None, udp_dport=None, udp_csum=None, udp6_csum_rx=None, udp6_csum_tx=None, debug=False, ): """ Create a new L2TP tunnel :param tunnel_id: local tunnel id :param peer_tunnel_id: remote tunnel id :param protocol: L2TP version :param remote: IP address of the remote peer :param local: IP address of the local interface :param fd: file descriptor of socket to use :param encap: encapsulation type of the tunnel (udp, ip) :param udp_sport: UDP source port to be used for the tunnel :param udp_dport: UDP destination port to be used for the tunnel :param udp_csum: control if IPv4 UDP checksums should be calculated and checked :param udp6_csum_rx: control if IPv6 UDP rx checksums should be calculated :param udp6_csum_tx: control if IPv6 UDP tx checksums should be calculated :param debug: enable or disable debugging using kernel printk for the tunnel :return: Netlink response """ if not remote: raise ValueError("remote endpoint missing") if not local: raise ValueError("local endpoint missing") if encap == "udp": if not udp_sport: raise ValueError( "udp_sport is required when UDP encapsulation is " "selected" ) if not udp_dport: raise ValueError( "udp_dport is required when UDP encapsulation is " "selected" ) return self._send_tunnel( cmd=L2TP_CMD_TUNNEL_CREATE, tunnel_id=tunnel_id, peer_tunnel_id=peer_tunnel_id, protocol=protocol, remote=remote, local=local, fd=fd, encap=encap, udp_sport=udp_sport, udp_dport=udp_dport, udp_csum=udp_csum, udp6_csum_rx=udp6_csum_rx, udp6_csum_tx=udp6_csum_tx, debug=debug, ) def modify_tunnel(self, tunnel_id, debug): """ Modify an existing L2TP tunnel :param tunnel_id: local tunnel id :param debug: enable or disable debugging using kernel printk for the tunnel :return: netlink response """ return self._send_tunnel( L2TP_CMD_TUNNEL_MODIFY, tunnel_id=tunnel_id, debug=debug ) def delete_tunnel(self, tunnel_id): """ Delete a tunnel :param tunnel_id: tunnel id of the tunnel to be deleted :return: netlink response """ msg = l2tpmsg() msg["cmd"] = L2TP_CMD_TUNNEL_DELETE msg["version"] = L2TP_GENL_VERSION msg["attrs"].append(["L2TP_ATTR_CONN_ID", tunnel_id]) return self._do_request(msg) def _send_session( self, cmd, tunnel_id, session_id, peer_session_id=None, ifname=None, l2spec_type=None, cookie=None, peer_cookie=None, debug=None, seq=None, lns_mode=None, recv_timeout=None, pwtype=L2TP_PWTYPE_ETH, ): """ Send session create or modify commands :param cmd: Netlink command to use :param tunnel_id: local tunnel id :param session_id: local session id :param peer_session_id: remote session id :param ifname: interface name :param l2spec_type: layer2 specific header type of the session :param cookie: local cookie value to be assigned to the session :param peer_cookie: remote cookie value to be assigned to the session :param debug: enable or disable debugging using kernel printk for the session :param seq: controls sequence numbering to prevent or detect out of order packets :param lns_mode: LNS mode :param recv_timeout: Reorder timeout :param pwtype: pseudowire type :return: netlink response """ msg = l2tpmsg() msg["cmd"] = cmd msg["version"] = L2TP_GENL_VERSION msg["attrs"].append(["L2TP_ATTR_CONN_ID", tunnel_id]) msg["attrs"].append(["L2TP_ATTR_SESSION_ID", session_id]) if cmd == L2TP_CMD_SESSION_CREATE: if peer_session_id: msg["attrs"].append( ["L2TP_ATTR_PEER_SESSION_ID", peer_session_id] ) else: raise ValueError( "peer_session_id required when creating a session" ) if ifname: msg["attrs"].append(["L2TP_ATTR_IFNAME", ifname]) if cmd == L2TP_CMD_SESSION_CREATE: if l2spec_type == "none": l2spec_type_value = L2TP_L2SPECTYPE_NONE else: l2spec_type_value = L2TP_L2SPECTYPE_DEFAULT msg["attrs"].append(["L2TP_ATTR_L2SPEC_TYPE", l2spec_type_value]) if cookie: if len(cookie) - 2 not in (8, 16): raise ValueError("cookie must be either 8 or 16 hex digits") msg["attrs"].append(["L2TP_ATTR_COOKIE", cookie]) if peer_cookie: if len(peer_cookie) - 2 not in (8, 16): raise ValueError( "peer_cookie must be either 8 or 16 hex digits" ) msg["attrs"].append(["L2TP_ATTR_PEER_COOKIE", peer_cookie]) if debug is not None: msg["attrs"].append(["L2TP_ATTR_DEBUG", debug]) if seq == "both": msg["attrs"].append(["L2TP_ATTR_RECV_SEQ", True]) msg["attrs"].append(["L2TP_ATTR_SEND_SEQ", True]) elif seq == "recv": msg["attrs"].append(["L2TP_ATTR_RECV_SEQ", True]) elif seq == "send": msg["attrs"].append(["L2TP_ATTR_SEND_SEQ", True]) if lns_mode: msg["attrs"].append(["L2TP_ATTR_LNS_MODE", lns_mode]) if recv_timeout is not None: msg["attrs"].append(["L2TP_ATTR_RECV_TIMEOUT", recv_timeout]) if cmd == L2TP_CMD_SESSION_CREATE: msg["attrs"].append(["L2TP_ATTR_PW_TYPE", pwtype]) return self._do_request(msg) def create_session( self, tunnel_id, session_id, peer_session_id=None, ifname=None, l2spec_type=None, cookie=None, peer_cookie=None, debug=None, seq=None, lns_mode=None, recv_timeout=None, pwtype=L2TP_PWTYPE_ETH, ): """ Add a new session to a tunnel :param tunnel_id: local tunnel id :param session_id: local session id :param peer_session_id: remote session id :param ifname: interface name :param l2spec_type: layer2 specific header type of the session :param cookie: local cookie value to be assigned to the session :param peer_cookie: remote cookie value to be assigned to the session :param debug: enable or disable debugging using kernel printk for the session :param seq: controls sequence numbering to prevent or detect out of order packets :param lns_mode: LNS mode :param recv_timeout: Reorder timeout :param pwtype: pseudowire type :return: netlink response """ self._send_session( cmd=L2TP_CMD_SESSION_CREATE, tunnel_id=tunnel_id, session_id=session_id, peer_session_id=peer_session_id, ifname=ifname, l2spec_type=l2spec_type, cookie=cookie, peer_cookie=peer_cookie, debug=debug, seq=seq, lns_mode=lns_mode, recv_timeout=recv_timeout, pwtype=pwtype, ) def modify_session( self, tunnel_id, session_id, debug=None, seq=None, lns_mode=None, recv_timeout=None, ): """ Modify an existing session :param tunnel_id: local tunnel id :param session_id: local session id :param debug: enable or disable debugging for the session :param seq: controls sequence numbering to prevent or detect out of order packets :param lns_mode: LNS mode :param recv_timeout: Reorder timeout :return: netlink response """ self._send_session( cmd=L2TP_CMD_SESSION_MODIFY, tunnel_id=tunnel_id, session_id=session_id, debug=debug, seq=seq, lns_mode=lns_mode, recv_timeout=recv_timeout, ) def delete_session(self, tunnel_id, session_id): """ Delete a session :param tunnel_id: tunnel id in which the session to be deleted is located :param session_id: session id of the session to be deleted :return:z """ msg = l2tpmsg() msg["cmd"] = L2TP_CMD_SESSION_DELETE msg["version"] = L2TP_GENL_VERSION msg["attrs"].append(["L2TP_ATTR_CONN_ID", tunnel_id]) msg["attrs"].append(["L2TP_ATTR_SESSION_ID", session_id]) return self._do_request(msg) def get_tunnel(self, tunnel_id): """ Get one tunnel :param tunnel_id: tunnel id of the tunnel to show :return: netlink response """ msg = l2tpmsg() msg["cmd"] = L2TP_CMD_TUNNEL_GET msg["version"] = L2TP_GENL_VERSION msg["attrs"].append(["L2TP_ATTR_CONN_ID", tunnel_id]) return self._do_request(msg, msg_flags=NLM_F_REQUEST)[0] def dump_tunnels(self, tunnel_id): """ Dump all tunnels :return: netlink response """ msg = l2tpmsg() msg["cmd"] = L2TP_CMD_TUNNEL_GET msg["version"] = L2TP_GENL_VERSION return self._do_request(msg, msg_flags=NLM_F_REQUEST | NLM_F_DUMP) def get_session(self, tunnel_id, session_id): """ Get one session :param tunnel_id: tunnel id of the session :param session_id: session id of the session :return: netlink response """ msg = l2tpmsg() msg["cmd"] = L2TP_CMD_SESSION_GET msg["version"] = L2TP_GENL_VERSION msg["attrs"].append(["L2TP_ATTR_CONN_ID", tunnel_id]) msg["attrs"].append(["L2TP_ATTR_SESSION_ID", session_id]) return self._do_request(msg, msg_flags=NLM_F_REQUEST)[0] def dump_sessions(self): """ Dump all sessions :return: netlink response """ msg = l2tpmsg() msg["cmd"] = L2TP_CMD_SESSION_GET msg["version"] = L2TP_GENL_VERSION return self._do_request(msg, msg_flags=NLM_F_REQUEST | NLM_F_DUMP) pyroute2-0.7.11/pyroute2/netlink/generic/mptcp.py000066400000000000000000000075301455030217500217460ustar00rootroot00000000000000from socket import AF_INET, AF_INET6 from pyroute2.netlink import NLM_F_ACK, NLM_F_DUMP, NLM_F_REQUEST, genlmsg, nla from pyroute2.netlink.generic import GenericNetlinkSocket MPTCP_GENL_NAME = 'mptcp_pm' MPTCP_PM_CMD_UNSPEC = 0 MPTCP_PM_CMD_ADD_ADDR = 1 MPTCP_PM_CMD_DEL_ADDR = 2 MPTCP_PM_CMD_GET_ADDR = 3 MPTCP_PM_CMD_FLUSH_ADDRS = 4 MPTCP_PM_CMD_SET_LIMITS = 5 MPTCP_PM_CMD_GET_LIMITS = 6 MPTCP_PM_CMD_SET_FLAGS = 7 class mptcp_msg(genlmsg): prefix = 'MPTCP_PM_ATTR_' nla_map = ( ('MPTCP_PM_ATTR_UNSPEC', 'none'), ('MPTCP_PM_ATTR_ADDR', 'pm_addr'), ('MPTCP_PM_ATTR_RCV_ADD_ADDRS', 'uint32'), ('MPTCP_PM_ATTR_SUBFLOWS', 'uint32'), ) class pm_addr(nla): prefix = 'MPTCP_PM_ADDR_ATTR_' nla_map = ( ('MPTCP_PM_ADDR_ATTR_UNSPEC', 'none'), ('MPTCP_PM_ADDR_ATTR_FAMILY', 'uint16'), ('MPTCP_PM_ADDR_ATTR_ID', 'uint8'), ('MPTCP_PM_ADDR_ATTR_ADDR4', 'ipaddr'), ('MPTCP_PM_ADDR_ATTR_ADDR6', 'ipaddr'), ('MPTCP_PM_ADDR_ATTR_PORT', 'uint16'), ('MPTCP_PM_ADDR_ATTR_FLAGS', 'uint32'), ('MPTCP_PM_ADDR_ATTR_IF_IDX', 'hex'), ) class MPTCP(GenericNetlinkSocket): def __init__(self, ext_ack=True): super(MPTCP, self).__init__(ext_ack=ext_ack) try: self.bind(MPTCP_GENL_NAME, mptcp_msg) except Exception as e: self.close() raise e def endpoint(self, cmd, **kwarg): ''' Usage:: mptcp.endpoint('show') mptcp.endpoint('add', addr='172.17.20.2') mptcp.endpoint('del', id=4) mptcp.endpoint('flush') Argument `addr` is equal to `addr4` and implies `family=AF_INET`, while `addr6` implies `family=AF_INET6` ''' flags_dump = NLM_F_REQUEST | NLM_F_DUMP flags_base = NLM_F_REQUEST | NLM_F_ACK commands = { 'show': (MPTCP_PM_CMD_GET_ADDR, flags_dump), 'add': (MPTCP_PM_CMD_ADD_ADDR, flags_base), 'del': (MPTCP_PM_CMD_DEL_ADDR, flags_base), 'flush': (MPTCP_PM_CMD_FLUSH_ADDRS, flags_base), } (command, flags) = commands.get(cmd, cmd) msg = mptcp_msg() msg['cmd'] = command msg['version'] = 1 if cmd in ('add', 'del'): addr_info = {'attrs': []} if 'addr' in kwarg: kwarg['addr4'] = kwarg.pop('addr') if 'addr4' in kwarg: kwarg['family'] = AF_INET elif 'addr6' in kwarg: kwarg['family'] = AF_INET6 for key, value in kwarg.items(): addr_info['attrs'].append( (mptcp_msg.pm_addr.name2nla(key), value) ) msg['attrs'] = [('MPTCP_PM_ATTR_ADDR', addr_info, 0x8000)] return self.nlm_request(msg, msg_type=self.prid, msg_flags=flags) def limits(self, cmd, **kwarg): ''' Usage:: mptcp.limits('show') mptcp.limits('set', subflows=10) ''' flags_dump = NLM_F_REQUEST flags_base = NLM_F_REQUEST | NLM_F_ACK commands = { 'show': (MPTCP_PM_CMD_GET_LIMITS, flags_dump), 'set': (MPTCP_PM_CMD_SET_LIMITS, flags_base), } (command, flags) = commands.get(cmd, cmd) msg = mptcp_msg() msg['cmd'] = command msg['version'] = 1 if cmd == 'set': if not set(kwarg) < set( ('subflows', 'rcv_add_addrs', 'add_addr_accepted') ): raise TypeError('invalid parameter') for key, value in kwarg.items(): if key == 'add_addr_accepted': key = 'rcv_add_addrs' msg['attrs'].append((mptcp_msg.name2nla(key), value)) return self.nlm_request(msg, msg_type=self.prid, msg_flags=flags) pyroute2-0.7.11/pyroute2/netlink/generic/wireguard.py000066400000000000000000000303361455030217500226140ustar00rootroot00000000000000''' Usage:: # Imports from pyroute2 import NDB, WireGuard IFNAME = 'wg1' # Create a WireGuard interface with NDB() as ndb: with ndb.interfaces.create(kind='wireguard', ifname=IFNAME) as link: link.add_ip('10.0.0.1/24') link.set(state='up') # Create WireGuard object wg = WireGuard() # Add a WireGuard configuration + first peer peer = {'public_key': 'TGFHcm9zc2VCaWNoZV9DJ2VzdExhUGx1c0JlbGxlPDM=', 'endpoint_addr': '8.8.8.8', 'endpoint_port': 8888, 'persistent_keepalive': 15, 'allowed_ips': ['10.0.0.0/24', '8.8.8.8/32']} wg.set(IFNAME, private_key='RCdhcHJlc0JpY2hlLEplU2VyYWlzTGFQbHVzQm9ubmU=', fwmark=0x1337, listen_port=2525, peer=peer) # Add second peer with preshared key peer = {'public_key': 'RCdBcHJlc0JpY2hlLFZpdmVMZXNQcm9iaW90aXF1ZXM=', 'preshared_key': 'Pz8/V2FudFRvVHJ5TXlBZXJvR3Jvc3NlQmljaGU/Pz8=', 'endpoint_addr': '8.8.8.8', 'endpoint_port': 9999, 'persistent_keepalive': 25, 'allowed_ips': ['::/0']} wg.set(IFNAME, peer=peer) # Delete second peer peer = {'public_key': 'RCdBcHJlc0JpY2hlLFZpdmVMZXNQcm9iaW90aXF1ZXM=', 'remove': True} wg.set(IFNAME, peer=peer) # Get information of the interface wg.info(IFNAME) # Get specific value from the interface wg.info(IFNAME)[0].get('WGDEVICE_A_PRIVATE_KEY') NOTES: * The `get()` method always returns iterable * Using `set()` method only requires an interface name * The `peer` structure is described as follow:: struct peer_s { public_key: # Base64 public key - required remove: # Boolean - optional preshared_key: # Base64 preshared key - optional endpoint_addr: # IPv4 or IPv6 endpoint - optional endpoint_port : # endpoint Port - required only if endpoint_addr persistent_keepalive: # time in seconds to send keep alive - optional allowed_ips: # list of CIDRs allowed - optional } ''' import errno import logging import struct from base64 import b64decode, b64encode from binascii import a2b_hex from socket import AF_INET, AF_INET6, inet_ntop, inet_pton from time import ctime from pyroute2.netlink import ( NLA_F_NESTED, NLM_F_ACK, NLM_F_DUMP, NLM_F_REQUEST, genlmsg, nla, ) from pyroute2.netlink.generic import GenericNetlinkSocket # Defines from uapi/wireguard.h WG_GENL_NAME = "wireguard" WG_GENL_VERSION = 1 WG_KEY_LEN = 32 # WireGuard Device commands WG_CMD_GET_DEVICE = 0 WG_CMD_SET_DEVICE = 1 # Wireguard Device attributes WGDEVICE_A_UNSPEC = 0 WGDEVICE_A_IFINDEX = 1 WGDEVICE_A_IFNAME = 2 WGDEVICE_A_PRIVATE_KEY = 3 WGDEVICE_A_PUBLIC_KEY = 4 WGDEVICE_A_FLAGS = 5 WGDEVICE_A_LISTEN_PORT = 6 WGDEVICE_A_FWMARK = 7 WGDEVICE_A_PEERS = 8 # WireGuard Device flags WGDEVICE_F_REPLACE_PEERS = 1 # WireGuard Allowed IP attributes WGALLOWEDIP_A_UNSPEC = 0 WGALLOWEDIP_A_FAMILY = 1 WGALLOWEDIP_A_IPADDR = 2 WGALLOWEDIP_A_CIDR_MASK = 3 # WireGuard Peer flags WGPEER_F_REMOVE_ME = 0 WGPEER_F_REPLACE_ALLOWEDIPS = 1 WGPEER_F_UPDATE_ONLY = 2 # Specific defines WG_MAX_PEERS = 1000 WG_MAX_ALLOWEDIPS = 1000 class wgmsg(genlmsg): prefix = 'WGDEVICE_A_' nla_map = ( ('WGDEVICE_A_UNSPEC', 'none'), ('WGDEVICE_A_IFINDEX', 'uint32'), ('WGDEVICE_A_IFNAME', 'asciiz'), ('WGDEVICE_A_PRIVATE_KEY', 'parse_wg_key'), ('WGDEVICE_A_PUBLIC_KEY', 'parse_wg_key'), ('WGDEVICE_A_FLAGS', 'uint32'), ('WGDEVICE_A_LISTEN_PORT', 'uint16'), ('WGDEVICE_A_FWMARK', 'uint32'), ('WGDEVICE_A_PEERS', '*wgdevice_peer'), ) class wgdevice_peer(nla): prefix = 'WGPEER_A_' nla_flags = NLA_F_NESTED nla_map = ( ('WGPEER_A_UNSPEC', 'none'), ('WGPEER_A_PUBLIC_KEY', 'parse_peer_key'), ('WGPEER_A_PRESHARED_KEY', 'parse_peer_key'), ('WGPEER_A_FLAGS', 'uint32'), ('WGPEER_A_ENDPOINT', 'parse_endpoint'), ('WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL', 'uint16'), ('WGPEER_A_LAST_HANDSHAKE_TIME', 'parse_handshake_time'), ('WGPEER_A_RX_BYTES', 'uint64'), ('WGPEER_A_TX_BYTES', 'uint64'), ('WGPEER_A_ALLOWEDIPS', '*wgpeer_allowedip'), ('WGPEER_A_PROTOCOL_VERSION', 'uint32'), ) class parse_peer_key(nla): fields = (('key', '32s'),) def decode(self): nla.decode(self) self['value'] = b64encode(self['key']) def encode(self): self['key'] = b64decode(self['value']) nla.encode(self) @staticmethod def parse_endpoint(nla, *argv, **kwarg): family = AF_INET if 'data' in kwarg: # decoding, fetch the famliy from the NLA data data = kwarg['data'] offset = kwarg['offset'] family = struct.unpack('H', data[offset + 4 : offset + 6])[0] elif kwarg['value']['addr'].find(':') > -1: # encoding, setup family from the addr format family = AF_INET6 if family == AF_INET: return nla.endpoint_ipv4 else: return nla.endpoint_ipv6 class endpoint_ipv4(nla): fields = ( ('family', 'H'), ('port', '>H'), ('addr', '4s'), ('__pad', '8x'), ) def decode(self): nla.decode(self) self['addr'] = inet_ntop(AF_INET, self['addr']) def encode(self): self['family'] = AF_INET self['addr'] = inet_pton(AF_INET, self['addr']) nla.encode(self) class endpoint_ipv6(nla): fields = ( ('family', 'H'), ('port', '>H'), ('flowinfo', '>I'), ('addr', '16s'), ('scope_id', '>I'), ) def decode(self): nla.decode(self) self['addr'] = inet_ntop(AF_INET6, self['addr']) def encode(self): self['family'] = AF_INET6 self['addr'] = inet_pton(AF_INET6, self['addr']) nla.encode(self) class parse_handshake_time(nla): fields = (('tv_sec', 'Q'), ('tv_nsec', 'Q')) def decode(self): nla.decode(self) self['latest handshake'] = ctime(self['tv_sec']) class wgpeer_allowedip(nla): prefix = 'WGALLOWEDIP_A_' nla_flags = NLA_F_NESTED nla_map = ( ('WGALLOWEDIP_A_UNSPEC', 'none'), ('WGALLOWEDIP_A_FAMILY', 'uint16'), ('WGALLOWEDIP_A_IPADDR', 'hex'), ('WGALLOWEDIP_A_CIDR_MASK', 'uint8'), ) def decode(self): nla.decode(self) family = self.get_attr('WGALLOWEDIP_A_FAMILY') if family is None: # Prevent when decode() is called without attrs because all # datas transfered to 'value' entry. # {'attrs': [], 'value': [{'attrs' ... return ipaddr = self.get_attr('WGALLOWEDIP_A_IPADDR') cidr = self.get_attr('WGALLOWEDIP_A_CIDR_MASK') self['addr'] = '{ipaddr}/{cidr}'.format( ipaddr=inet_ntop(family, a2b_hex(ipaddr.replace(':', ''))), cidr=cidr, ) class parse_wg_key(nla): fields = (('key', '32s'),) def decode(self): nla.decode(self) self['value'] = b64encode(self['key']) def encode(self): self['key'] = b64decode(self['value']) nla.encode(self) class WireGuard(GenericNetlinkSocket): def __init__(self, *args, **kwargs): GenericNetlinkSocket.__init__(self, *args, **kwargs) self.bind(WG_GENL_NAME, wgmsg) def info(self, interface): msg = wgmsg() msg['cmd'] = WG_CMD_GET_DEVICE msg['attrs'].append(['WGDEVICE_A_IFNAME', interface]) return self.nlm_request( msg, msg_type=self.prid, msg_flags=NLM_F_REQUEST | NLM_F_DUMP ) def set( self, interface, listen_port=None, fwmark=None, private_key=None, peer=None, ): msg = wgmsg() msg['attrs'].append(['WGDEVICE_A_IFNAME', interface]) if private_key is not None: self._wg_test_key(private_key) msg['attrs'].append(['WGDEVICE_A_PRIVATE_KEY', private_key]) if listen_port is not None: msg['attrs'].append(['WGDEVICE_A_LISTEN_PORT', listen_port]) if fwmark is not None: msg['attrs'].append(['WGDEVICE_A_FWMARK', fwmark]) if peer is not None: self._wg_set_peer(msg, peer) # Message attributes msg['cmd'] = WG_CMD_SET_DEVICE msg['version'] = WG_GENL_VERSION msg['header']['type'] = self.prid msg['header']['flags'] = NLM_F_REQUEST | NLM_F_ACK msg['header']['pid'] = self.pid msg.encode() self.sendto(msg.data, (0, 0)) msg = self.get()[0] err = msg['header'].get('error', None) if err is not None: if hasattr(err, 'code') and err.code == errno.ENOENT: logging.error( 'Generic netlink protocol %s not found' % self.prid ) logging.error('Please check if the protocol module is loaded') raise err return msg def _wg_test_key(self, key): try: if len(b64decode(key)) != WG_KEY_LEN: raise ValueError('Invalid WireGuard key length') except TypeError: raise ValueError('Failed to decode Base64 key') def _wg_set_peer(self, msg, peer): attrs = [] wg_peer = [{'attrs': attrs}] if 'public_key' not in peer: raise ValueError('Peer Public key required') # Check public key validity public_key = peer['public_key'] self._wg_test_key(public_key) attrs.append(['WGPEER_A_PUBLIC_KEY', public_key]) # If peer removal is set to True if 'remove' in peer and peer['remove']: attrs.append(['WGPEER_A_FLAGS', WGDEVICE_F_REPLACE_PEERS]) msg['attrs'].append(['WGDEVICE_A_PEERS', wg_peer]) return # Set Endpoint if 'endpoint_addr' in peer and 'endpoint_port' in peer: attrs.append( [ 'WGPEER_A_ENDPOINT', { 'addr': peer['endpoint_addr'], 'port': peer['endpoint_port'], }, ] ) # Set Preshared key if 'preshared_key' in peer: pkey = peer['preshared_key'] self._wg_test_key(pkey) attrs.append(['WGPEER_A_PRESHARED_KEY', pkey]) # Set Persistent Keepalive time if 'persistent_keepalive' in peer: keepalive = peer['persistent_keepalive'] attrs.append(['WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL', keepalive]) # Set Peer flags attrs.append(['WGPEER_A_FLAGS', WGPEER_F_UPDATE_ONLY]) # Set allowed IPs if 'allowed_ips' in peer: allowed_ips = self._wg_build_allowedips(peer['allowed_ips']) attrs.append(['WGPEER_A_ALLOWEDIPS', allowed_ips]) msg['attrs'].append(['WGDEVICE_A_PEERS', wg_peer]) def _wg_build_allowedips(self, allowed_ips): ret = [] for index, ip in enumerate(allowed_ips): allowed_ip = [] ret.append({'attrs': allowed_ip}) if ip.find("/") == -1: raise ValueError('No CIDR set in allowed ip #{}'.format(index)) addr, mask = ip.split('/') family = AF_INET if addr.find(":") == -1 else AF_INET6 allowed_ip.append(['WGALLOWEDIP_A_FAMILY', family]) allowed_ip.append( ['WGALLOWEDIP_A_IPADDR', inet_pton(family, addr)] ) allowed_ip.append(['WGALLOWEDIP_A_CIDR_MASK', int(mask)]) return ret pyroute2-0.7.11/pyroute2/netlink/ipq/000077500000000000000000000000001455030217500174215ustar00rootroot00000000000000pyroute2-0.7.11/pyroute2/netlink/ipq/__init__.py000066400000000000000000000063631455030217500215420ustar00rootroot00000000000000''' IPQ -- userspace firewall ========================= Netlink family for dealing with `QUEUE` iptables target. All the packets routed to the target `QUEUE` should be handled by a userspace program and the program should response with a verdict. E.g., the verdict can be `NF_DROP` and in that case the packet will be silently dropped, or `NF_ACCEPT`, and the packet will be pass the rule. ''' from pyroute2.netlink import NLM_F_REQUEST, nlmsg from pyroute2.netlink.nlsocket import Marshal, NetlinkSocket # constants IFNAMSIZ = 16 IPQ_MAX_PAYLOAD = 0x800 # IPQ messages IPQM_BASE = 0x10 IPQM_MODE = IPQM_BASE + 1 IPQM_VERDICT = IPQM_BASE + 2 IPQM_PACKET = IPQM_BASE + 3 # IPQ modes IPQ_COPY_NONE = 0 IPQ_COPY_META = 1 IPQ_COPY_PACKET = 2 # verdict types NF_DROP = 0 NF_ACCEPT = 1 NF_STOLEN = 2 NF_QUEUE = 3 NF_REPEAT = 4 NF_STOP = 5 class ipq_base_msg(nlmsg): def decode(self): nlmsg.decode(self) self['payload'] = self.buf.read(self['data_len']) def encode(self): init = self.buf.tell() nlmsg.encode(self) if 'payload' in self: self.buf.write(self['payload']) self.update_length(init) class ipq_packet_msg(ipq_base_msg): fields = ( ('packet_id', 'L'), ('mark', 'L'), ('timestamp_sec', 'l'), ('timestamp_usec', 'l'), ('hook', 'I'), ('indev_name', '%is' % IFNAMSIZ), ('outdev_name', '%is' % IFNAMSIZ), ('hw_protocol', '>H'), ('hw_type', 'H'), ('hw_addrlen', 'B'), ('hw_addr', '6B'), ('__pad', '9x'), ('data_len', 'I'), ('__pad', '4x'), ) class ipq_mode_msg(nlmsg): pack = 'struct' fields = ( ('value', 'B'), ('__pad', '7x'), ('range', 'I'), ('__pad', '12x'), ) class ipq_verdict_msg(ipq_base_msg): pack = 'struct' fields = ( ('value', 'I'), ('__pad', '4x'), ('id', 'L'), ('data_len', 'I'), ('__pad', '4x'), ) class MarshalIPQ(Marshal): msg_map = { IPQM_MODE: ipq_mode_msg, IPQM_VERDICT: ipq_verdict_msg, IPQM_PACKET: ipq_packet_msg, } class IPQSocket(NetlinkSocket): ''' Low-level socket interface. Provides all the usual socket does, can be used in poll/select, doesn't create any implicit threads. ''' def bind(self, mode=IPQ_COPY_PACKET): ''' Bind the socket and performs IPQ mode configuration. The only parameter is mode, the default value is IPQ_COPY_PACKET (copy all the packet data). ''' NetlinkSocket.bind(self, groups=0, pid=0) self.register_policy(MarshalIPQ.msg_map) msg = ipq_mode_msg() msg['value'] = mode msg['range'] = IPQ_MAX_PAYLOAD msg['header']['type'] = IPQM_MODE msg['header']['flags'] = NLM_F_REQUEST msg.encode() self.sendto(msg.data, (0, 0)) def verdict(self, seq, v): ''' Issue a verdict `v` for a packet `seq`. ''' msg = ipq_verdict_msg() msg['value'] = v msg['id'] = seq msg['data_len'] = 0 msg['header']['type'] = IPQM_VERDICT msg['header']['flags'] = NLM_F_REQUEST msg.encode() self.sendto(msg.buf.getvalue(), (0, 0)) pyroute2-0.7.11/pyroute2/netlink/nfnetlink/000077500000000000000000000000001455030217500206205ustar00rootroot00000000000000pyroute2-0.7.11/pyroute2/netlink/nfnetlink/__init__.py000066400000000000000000000020141455030217500227260ustar00rootroot00000000000000''' Nfnetlink ========= The support of nfnetlink families is now at the very beginning. So there is no public exports yet, but you can review the code. Work is in progress, stay tuned. nf-queue ++++++++ Netfilter protocol for NFQUEUE iptables target. ''' from pyroute2.netlink import nlmsg NFNL_SUBSYS_NONE = 0 NFNL_SUBSYS_CTNETLINK = 1 NFNL_SUBSYS_CTNETLINK_EXP = 2 NFNL_SUBSYS_QUEUE = 3 NFNL_SUBSYS_ULOG = 4 NFNL_SUBSYS_OSF = 5 NFNL_SUBSYS_IPSET = 6 NFNL_SUBSYS_ACCT = 7 NFNL_SUBSYS_CTNETLINK_TIMEOUT = 8 NFNL_SUBSYS_CTHELPER = 9 NFNL_SUBSYS_NFTABLES = 10 NFNL_SUBSYS_NFT_COMPAT = 11 NFNL_SUBSYS_COUNT = 12 # multicast group ids (for use with {add,drop}_membership) NFNLGRP_NONE = 0 NFNLGRP_CONNTRACK_NEW = 1 NFNLGRP_CONNTRACK_UPDATE = 2 NFNLGRP_CONNTRACK_DESTROY = 3 NFNLGRP_CONNTRACK_EXP_NEW = 4 NFNLGRP_CONNTRACK_EXP_UPDATE = 5 NFNLGRP_CONNTRACK_EXP_DESTROY = 6 NFNLGRP_NFTABLES = 7 NFNLGRP_ACCT_QUOTA = 8 NFNLGRP_NFTRACE = 9 class nfgen_msg(nlmsg): fields = (('nfgen_family', 'B'), ('version', 'B'), ('res_id', '!H')) pyroute2-0.7.11/pyroute2/netlink/nfnetlink/ipset.py000066400000000000000000000157511455030217500223270ustar00rootroot00000000000000from pyroute2.netlink import NLA_F_NESTED, NLA_F_NET_BYTEORDER, nla from pyroute2.netlink.nfnetlink import NFNL_SUBSYS_IPSET, nfgen_msg IPSET_MAXNAMELEN = 32 IPSET_DEFAULT_MAXELEM = 65536 IPSET_CMD_NONE = 0 IPSET_CMD_PROTOCOL = 1 # Return protocol version IPSET_CMD_CREATE = 2 # Create a new (empty) set IPSET_CMD_DESTROY = 3 # Destroy a (empty) set IPSET_CMD_FLUSH = 4 # Remove all elements from a set IPSET_CMD_RENAME = 5 # Rename a set IPSET_CMD_SWAP = 6 # Swap two sets IPSET_CMD_LIST = 7 # List sets IPSET_CMD_SAVE = 8 # Save sets IPSET_CMD_ADD = 9 # Add an element to a set IPSET_CMD_DEL = 10 # Delete an element from a set IPSET_CMD_TEST = 11 # Test an element in a set IPSET_CMD_HEADER = 12 # Get set header data only IPSET_CMD_TYPE = 13 # 13: Get set type IPSET_CMD_GET_BYNAME = 14 # 14: Get set index by name IPSET_CMD_GET_BYINDEX = 15 # 15: Get set index by index # flags at command level (IPSET_ATTR_FLAGS) IPSET_FLAG_LIST_SETNAME = 1 << 1 IPSET_FLAG_LIST_HEADER = 1 << 2 IPSET_FLAG_SKIP_COUNTER_UPDATE = 1 << 3 IPSET_FLAG_SKIP_SUBCOUNTER_UPDATE = 1 << 4 IPSET_FLAG_MATCH_COUNTERS = 1 << 5 IPSET_FLAG_RETURN_NOMATCH = 1 << 7 # flags at cadt attribute (IPSET_ATTR_CADT_FLAGS) IPSET_FLAG_PHYSDEV = 1 << 1 IPSET_FLAG_NOMATCH = 1 << 2 IPSET_FLAG_WITH_COUNTERS = 1 << 3 IPSET_FLAG_WITH_COMMENT = 1 << 4 IPSET_FLAG_WITH_FORCEADD = 1 << 5 IPSET_FLAG_WITH_SKBINFO = 1 << 6 IPSET_FLAG_IFACE_WILDCARD = 1 << 7 IPSET_ERR_PROTOCOL = 4097 IPSET_ERR_FIND_TYPE = 4098 IPSET_ERR_MAX_SETS = 4099 IPSET_ERR_BUSY = 4100 IPSET_ERR_EXIST_SETNAME2 = 4101 IPSET_ERR_TYPE_MISMATCH = 4102 IPSET_ERR_EXIST = 4103 IPSET_ERR_INVALID_CIDR = 4104 IPSET_ERR_INVALID_NETMASK = 4105 IPSET_ERR_INVALID_FAMILY = 4106 IPSET_ERR_TIMEOUT = 4107 IPSET_ERR_REFERENCED = 4108 IPSET_ERR_IPADDR_IPV4 = 4109 IPSET_ERR_IPADDR_IPV6 = 4110 IPSET_ERR_COUNTER = 4111 IPSET_ERR_COMMENT = 4112 IPSET_ERR_INVALID_MARKMASK = 4113 IPSET_ERR_SKBINFO = 4114 IPSET_ERR_TYPE_SPECIFIC = 4352 class ipset_base(nla): class ipset_ip(nla): nla_flags = NLA_F_NESTED nla_map = ( ('IPSET_ATTR_UNSPEC', 'none'), ('IPSET_ATTR_IPADDR_IPV4', 'ip4addr', NLA_F_NET_BYTEORDER), ('IPSET_ATTR_IPADDR_IPV6', 'ip6addr', NLA_F_NET_BYTEORDER), ) class ipset_msg(nfgen_msg): ''' Since the support just begins to be developed, many attrs are still in `hex` format -- just to dump the content. ''' nla_map = ( ('IPSET_ATTR_UNSPEC', 'none'), ('IPSET_ATTR_PROTOCOL', 'uint8'), ('IPSET_ATTR_SETNAME', 'asciiz'), ('IPSET_ATTR_TYPENAME', 'asciiz'), ('IPSET_ATTR_REVISION', 'uint8'), ('IPSET_ATTR_FAMILY', 'uint8'), ('IPSET_ATTR_FLAGS', 'be32'), ('IPSET_ATTR_DATA', 'get_data_type'), ('IPSET_ATTR_ADT', 'attr_adt'), ('IPSET_ATTR_LINENO', 'hex'), ('IPSET_ATTR_PROTOCOL_MIN', 'uint8'), ('IPSET_ATTR_INDEX', 'be16'), ) @staticmethod def get_data_type(self, *args, **kwargs): # create and list commands have specific attributes. See linux_ip_set.h # for more information (and/or lib/PROTOCOL of ipset sources) cmd = self['header']['type'] & ~(NFNL_SUBSYS_IPSET << 8) if cmd == IPSET_CMD_CREATE or cmd == IPSET_CMD_LIST: return self.cadt_data return self.ipset_generic.adt_data class ipset_generic(ipset_base): class adt_data(ipset_base): nla_flags = NLA_F_NESTED nla_map = ( (0, 'IPSET_ATTR_UNSPEC', 'none'), (1, 'IPSET_ATTR_IP', 'ipset_ip'), (1, 'IPSET_ATTR_IP_FROM', 'ipset_ip'), (2, 'IPSET_ATTR_IP_TO', 'ipset_ip'), (3, 'IPSET_ATTR_CIDR', 'be8', NLA_F_NET_BYTEORDER), (4, 'IPSET_ATTR_PORT', 'be16', NLA_F_NET_BYTEORDER), (4, 'IPSET_ATTR_PORT_FROM', 'be16', NLA_F_NET_BYTEORDER), (5, 'IPSET_ATTR_PORT_TO', 'be16', NLA_F_NET_BYTEORDER), (6, 'IPSET_ATTR_TIMEOUT', 'be32', NLA_F_NET_BYTEORDER), (7, 'IPSET_ATTR_PROTO', 'be8', NLA_F_NET_BYTEORDER), (8, 'IPSET_ATTR_CADT_FLAGS', 'be32', NLA_F_NET_BYTEORDER), (9, 'IPSET_ATTR_CADT_LINENO', 'be32'), (10, 'IPSET_ATTR_MARK', 'be32', NLA_F_NET_BYTEORDER), (11, 'IPSET_ATTR_MARKMASK', 'be32', NLA_F_NET_BYTEORDER), (17, 'IPSET_ATTR_ETHER', 'l2addr'), (18, 'IPSET_ATTR_NAME', 'asciiz'), (19, 'IPSET_ATTR_NAMEREF', 'be32'), (20, 'IPSET_ATTR_IP2', 'ipset_ip'), (21, 'IPSET_ATTR_CIDR2', 'be8', NLA_F_NET_BYTEORDER), (22, 'IPSET_ATTR_IP2_TO', 'ipset_ip'), (23, 'IPSET_ATTR_IFACE', 'asciiz'), (24, 'IPSET_ATTR_BYTES', 'be64', NLA_F_NET_BYTEORDER), (25, 'IPSET_ATTR_PACKETS', 'be64', NLA_F_NET_BYTEORDER), (26, 'IPSET_ATTR_COMMENT', 'asciiz'), (27, 'IPSET_ATTR_SKBMARK', 'skbmark'), (28, 'IPSET_ATTR_SKBPRIO', 'skbprio'), (29, 'IPSET_ATTR_SKBQUEUE', 'be16', NLA_F_NET_BYTEORDER), ) class skbmark(nla): nla_flags = NLA_F_NET_BYTEORDER fields = [('value', '>II')] class skbprio(nla): nla_flags = NLA_F_NET_BYTEORDER fields = [('value', '>HH')] class cadt_data(ipset_base): nla_flags = NLA_F_NESTED nla_map = ( (0, 'IPSET_ATTR_UNSPEC', 'none'), (1, 'IPSET_ATTR_IP', 'ipset_ip'), (1, 'IPSET_ATTR_IP_FROM', 'ipset_ip'), (2, 'IPSET_ATTR_IP_TO', 'ipset_ip'), (3, 'IPSET_ATTR_CIDR', 'be8', NLA_F_NET_BYTEORDER), (4, 'IPSET_ATTR_PORT', 'be16', NLA_F_NET_BYTEORDER), (4, 'IPSET_ATTR_PORT_FROM', 'be16', NLA_F_NET_BYTEORDER), (5, 'IPSET_ATTR_PORT_TO', 'be16', NLA_F_NET_BYTEORDER), (6, 'IPSET_ATTR_TIMEOUT', 'be32', NLA_F_NET_BYTEORDER), (7, 'IPSET_ATTR_PROTO', 'be8', NLA_F_NET_BYTEORDER), (8, 'IPSET_ATTR_CADT_FLAGS', 'be32', NLA_F_NET_BYTEORDER), (9, 'IPSET_ATTR_CADT_LINENO', 'be32'), (10, 'IPSET_ATTR_MARK', 'be32', NLA_F_NET_BYTEORDER), (11, 'IPSET_ATTR_MARKMASK', 'be32', NLA_F_NET_BYTEORDER), (17, 'IPSET_ATTR_INITVAL', 'be32', NLA_F_NET_BYTEORDER), (18, 'IPSET_ATTR_HASHSIZE', 'be32', NLA_F_NET_BYTEORDER), (19, 'IPSET_ATTR_MAXELEM', 'be32', NLA_F_NET_BYTEORDER), (20, 'IPSET_ATTR_NETMASK', 'hex'), (21, 'IPSET_ATTR_BUCKETSIZE', 'uint8'), (22, 'IPSET_ATTR_RESIZE', 'hex'), (23, 'IPSET_ATTR_SIZE', 'be32', NLA_F_NET_BYTEORDER), (24, 'IPSET_ATTR_ELEMENTS', 'be32', NLA_F_NET_BYTEORDER), (25, 'IPSET_ATTR_REFERENCES', 'be32', NLA_F_NET_BYTEORDER), (26, 'IPSET_ATTR_MEMSIZE', 'be32', NLA_F_NET_BYTEORDER), ) class attr_adt(ipset_generic): nla_flags = NLA_F_NESTED nla_map = ((7, 'IPSET_ATTR_DATA', 'adt_data'),) pyroute2-0.7.11/pyroute2/netlink/nfnetlink/nfctsocket.py000066400000000000000000000660731455030217500233510ustar00rootroot00000000000000""" NFCTSocket -- low level connection tracking API See also: pyroute2.conntrack """ import socket from pyroute2.netlink import ( NETLINK_NETFILTER, NLA_F_NESTED, NLM_F_ACK, NLM_F_CREATE, NLM_F_DUMP, NLM_F_EXCL, NLM_F_REQUEST, NLMSG_ERROR, nla, ) from pyroute2.netlink.nfnetlink import NFNL_SUBSYS_CTNETLINK, nfgen_msg from pyroute2.netlink.nlsocket import NetlinkSocket IPCTNL_MSG_CT_NEW = 0 IPCTNL_MSG_CT_GET = 1 IPCTNL_MSG_CT_DELETE = 2 IPCTNL_MSG_CT_GET_CTRZERO = 3 IPCTNL_MSG_CT_GET_STATS_CPU = 4 IPCTNL_MSG_CT_GET_STATS = 5 IPCTNL_MSG_CT_GET_DYING = 6 IPCTNL_MSG_CT_GET_UNCONFIRMED = 7 IPCTNL_MSG_MAX = 8 try: IP_PROTOCOLS = { num: name[8:] for name, num in vars(socket).items() if name.startswith("IPPROTO") } except (IOError, OSError): IP_PROTOCOLS = {} # Window scaling is advertised by the sender IP_CT_TCP_FLAG_WINDOW_SCALE = 0x01 # SACK is permitted by the sender IP_CT_TCP_FLAG_SACK_PERM = 0x02 # This sender sent FIN first IP_CT_TCP_FLAG_CLOSE_INIT = 0x04 # Be liberal in window checking IP_CT_TCP_FLAG_BE_LIBERAL = 0x08 # Has unacknowledged data IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED = 0x10 # The field td_maxack has been set IP_CT_TCP_FLAG_MAXACK_SET = 0x20 # From linux/include/net/tcp_states.h TCPF_ESTABLISHED = 1 << 1 TCPF_SYN_SENT = 1 << 2 TCPF_SYN_RECV = 1 << 3 TCPF_FIN_WAIT1 = 1 << 4 TCPF_FIN_WAIT2 = 1 << 5 TCPF_TIME_WAIT = 1 << 6 TCPF_CLOSE = 1 << 7 TCPF_CLOSE_WAIT = 1 << 8 TCPF_LAST_ACK = 1 << 9 TCPF_LISTEN = 1 << 10 TCPF_CLOSING = 1 << 11 TCPF_NEW_SYN_RECV = 1 << 12 TCPF_TO_NAME = { TCPF_ESTABLISHED: 'ESTABLISHED', TCPF_SYN_SENT: 'SYN_SENT', TCPF_SYN_RECV: 'SYN_RECV', TCPF_FIN_WAIT1: 'FIN_WAIT1', TCPF_FIN_WAIT2: 'FIN_WAIT2', TCPF_TIME_WAIT: 'TIME_WAIT', TCPF_CLOSE: 'CLOSE', TCPF_CLOSE_WAIT: 'CLOSE_WAIT', TCPF_LAST_ACK: 'LAST_ACK', TCPF_LISTEN: 'LISTEN', TCPF_CLOSING: 'CLOSING', TCPF_NEW_SYN_RECV: 'NEW_SYN_RECV', } # From include/uapi/linux/netfilter/nf_conntrack_common.h IPS_EXPECTED = 1 << 0 IPS_SEEN_REPLY = 1 << 1 IPS_ASSURED = 1 << 2 IPS_CONFIRMED = 1 << 3 IPS_SRC_NAT = 1 << 4 IPS_DST_NAT = 1 << 5 IPS_NAT_MASK = IPS_DST_NAT | IPS_SRC_NAT IPS_SEQ_ADJUST = 1 << 6 IPS_SRC_NAT_DONE = 1 << 7 IPS_DST_NAT_DONE = 1 << 8 IPS_NAT_DONE_MASK = IPS_DST_NAT_DONE | IPS_SRC_NAT_DONE IPS_DYING = 1 << 9 IPS_FIXED_TIMEOUT = 1 << 10 IPS_TEMPLATE = 1 << 11 IPS_UNTRACKED = 1 << 12 IPS_HELPER = 1 << 13 IPS_OFFLOAD = 1 << 14 IPS_UNCHANGEABLE_MASK = ( IPS_NAT_DONE_MASK | IPS_NAT_MASK | IPS_EXPECTED | IPS_CONFIRMED | IPS_DYING | IPS_SEQ_ADJUST | IPS_TEMPLATE | IPS_OFFLOAD ) IPSBIT_TO_NAME = { IPS_EXPECTED: 'EXPECTED', IPS_SEEN_REPLY: 'SEEN_REPLY', IPS_ASSURED: 'ASSURED', IPS_CONFIRMED: 'CONFIRMED', IPS_SRC_NAT: 'SRC_NAT', IPS_DST_NAT: 'DST_NAT', IPS_SEQ_ADJUST: 'SEQ_ADJUST', IPS_SRC_NAT_DONE: 'SRC_NAT_DONE', IPS_DST_NAT_DONE: 'DST_NAT_DONE', IPS_DYING: 'DYING', IPS_FIXED_TIMEOUT: 'FIXED_TIMEOUT', IPS_TEMPLATE: 'TEMPLATE', IPS_UNTRACKED: 'UNTRACKED', IPS_HELPER: 'HELPER', IPS_OFFLOAD: 'OFFLOAD', } # From include/uapi/linux/netfilter/nf_conntrack_tcp.h IP_CT_TCP_FLAG_WINDOW_SCALE = 0x01 IP_CT_TCP_FLAG_SACK_PERM = 0x02 IP_CT_TCP_FLAG_CLOSE_INIT = 0x04 IP_CT_TCP_FLAG_BE_LIBERAL = 0x08 IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED = 0x10 IP_CT_TCP_FLAG_MAXACK_SET = 0x20 IP_CT_EXP_CHALLENGE_ACK = 0x40 IP_CT_TCP_SIMULTANEOUS_OPEN = 0x80 IP_CT_TCP_FLAG_TO_NAME = { IP_CT_TCP_FLAG_WINDOW_SCALE: 'WINDOW_SCALE', IP_CT_TCP_FLAG_SACK_PERM: 'SACK_PERM', IP_CT_TCP_FLAG_CLOSE_INIT: 'CLOSE_INIT', IP_CT_TCP_FLAG_BE_LIBERAL: 'BE_LIBERAL', IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED: 'DATA_UNACKNOWLEDGED', IP_CT_TCP_FLAG_MAXACK_SET: 'MAXACK_SET', IP_CT_EXP_CHALLENGE_ACK: 'CHALLENGE_ACK', IP_CT_TCP_SIMULTANEOUS_OPEN: 'SIMULTANEOUS_OPEN', } # From linux/include/uapi/linux/netfilter/nf_conntrack_tcp.h TCP_CONNTRACK_SYN_SENT = 1 TCP_CONNTRACK_SYN_RECV = 2 TCP_CONNTRACK_ESTABLISHED = 3 TCP_CONNTRACK_FIN_WAIT = 4 TCP_CONNTRACK_CLOSE_WAIT = 5 TCP_CONNTRACK_LAST_ACK = 6 TCP_CONNTRACK_TIME_WAIT = 7 TCP_CONNTRACK_CLOSE = 8 TCP_CONNTRACK_LISTEN = 9 TCP_CONNTRACK_MAX = 10 TCP_CONNTRACK_IGNORE = 11 TCP_CONNTRACK_RETRANS = 12 TCP_CONNTRACK_UNACK = 13 TCP_CONNTRACK_TIMEOUT_MAX = 14 TCP_CONNTRACK_TO_NAME = { TCP_CONNTRACK_SYN_SENT: "SYN_SENT", TCP_CONNTRACK_SYN_RECV: "SYN_RECV", TCP_CONNTRACK_ESTABLISHED: "ESTABLISHED", TCP_CONNTRACK_FIN_WAIT: "FIN_WAIT", TCP_CONNTRACK_CLOSE_WAIT: "CLOSE_WAIT", TCP_CONNTRACK_LAST_ACK: "LAST_ACK", TCP_CONNTRACK_TIME_WAIT: "TIME_WAIT", TCP_CONNTRACK_CLOSE: "CLOSE", TCP_CONNTRACK_LISTEN: "LISTEN", TCP_CONNTRACK_MAX: "MAX", TCP_CONNTRACK_IGNORE: "IGNORE", TCP_CONNTRACK_RETRANS: "RETRANS", TCP_CONNTRACK_UNACK: "UNACK", TCP_CONNTRACK_TIMEOUT_MAX: "TIMEOUT_MAX", } def terminate_single_msg(msg): return msg def terminate_error_msg(msg): return msg['header']['type'] == NLMSG_ERROR class nfct_stats(nfgen_msg): nla_map = ( ('CTA_STATS_GLOBAL_UNSPEC', 'none'), ('CTA_STATS_GLOBAL_ENTRIES', 'be32'), ('CTA_STATS_GLOBAL_MAX_ENTRIES', 'be32'), ) class nfct_stats_cpu(nfgen_msg): nla_map = ( ('CTA_STATS_UNSPEC', 'none'), ('CTA_STATS_SEARCHED', 'be32'), ('CTA_STATS_FOUND', 'be32'), ('CTA_STATS_NEW', 'be32'), ('CTA_STATS_INVALID', 'be32'), ('CTA_STATS_IGNORE', 'be32'), ('CTA_STATS_DELETE', 'be32'), ('CTA_STATS_DELETE_LIST', 'be32'), ('CTA_STATS_INSERT', 'be32'), ('CTA_STATS_INSERT_FAILED', 'be32'), ('CTA_STATS_DROP', 'be32'), ('CTA_STATS_EARLY_DROP', 'be32'), ('CTA_STATS_ERROR', 'be32'), ('CTA_STATS_SEARCH_RESTART', 'be32'), ) class nfct_msg(nfgen_msg): prefix = 'CTA_' nla_map = ( ('CTA_UNSPEC', 'none'), ('CTA_TUPLE_ORIG', 'cta_tuple'), ('CTA_TUPLE_REPLY', 'cta_tuple'), ('CTA_STATUS', 'be32'), ('CTA_PROTOINFO', 'cta_protoinfo'), ('CTA_HELP', 'asciiz'), ('CTA_NAT_SRC', 'cta_nat'), ('CTA_TIMEOUT', 'be32'), ('CTA_MARK', 'be32'), ('CTA_COUNTERS_ORIG', 'cta_counters'), ('CTA_COUNTERS_REPLY', 'cta_counters'), ('CTA_USE', 'be32'), ('CTA_ID', 'be32'), ('CTA_NAT_DST', 'cta_nat'), ('CTA_TUPLE_MASTER', 'cta_tuple'), ('CTA_SEQ_ADJ_ORIG', 'cta_nat_seq_adj'), ('CTA_SEQ_ADJ_REPLY', 'cta_nat_seq_adj'), ('CTA_SECMARK', 'be32'), ('CTA_ZONE', 'be16'), ('CTA_SECCTX', 'cta_secctx'), ('CTA_TIMESTAMP', 'cta_timestamp'), ('CTA_MARK_MASK', 'be32'), ('CTA_LABELS', 'cta_labels'), ('CTA_LABELS_MASK', 'cta_labels'), ('CTA_SYNPROXY', 'cta_synproxy'), ('CTA_FILTER', 'cta_filter'), ) @classmethod def create_from(cls, **kwargs): self = cls() for key, value in kwargs.items(): if isinstance(value, NFCTAttr): value = {'attrs': value.attrs()} if value is not None: self['attrs'].append([self.name2nla(key), value]) return self class cta_tuple(nla): nla_map = ( ('CTA_TUPLE_UNSPEC', 'none'), ('CTA_TUPLE_IP', 'cta_ip'), ('CTA_TUPLE_PROTO', 'cta_proto'), ) class cta_ip(nla): nla_map = ( ('CTA_IP_UNSPEC', 'none'), ('CTA_IP_V4_SRC', 'ip4addr'), ('CTA_IP_V4_DST', 'ip4addr'), ('CTA_IP_V6_SRC', 'ip6addr'), ('CTA_IP_V6_DST', 'ip6addr'), ) class cta_proto(nla): nla_map = ( ('CTA_PROTO_UNSPEC', 'none'), ('CTA_PROTO_NUM', 'uint8'), ('CTA_PROTO_SRC_PORT', 'be16'), ('CTA_PROTO_DST_PORT', 'be16'), ('CTA_PROTO_ICMP_ID', 'be16'), ('CTA_PROTO_ICMP_TYPE', 'uint8'), ('CTA_PROTO_ICMP_CODE', 'uint8'), ('CTA_PROTO_ICMPV6_ID', 'be16'), ('CTA_PROTO_ICMPV6_TYPE', 'uint8'), ('CTA_PROTO_ICMPV6_CODE', 'uint8'), ) class cta_protoinfo(nla): nla_map = ( ('CTA_PROTOINFO_UNSPEC', 'none'), ('CTA_PROTOINFO_TCP', 'cta_protoinfo_tcp'), ('CTA_PROTOINFO_DCCP', 'cta_protoinfo_dccp'), ('CTA_PROTOINFO_SCTP', 'cta_protoinfo_sctp'), ) class cta_protoinfo_tcp(nla): nla_map = ( ('CTA_PROTOINFO_TCP_UNSPEC', 'none'), ('CTA_PROTOINFO_TCP_STATE', 'uint8'), ('CTA_PROTOINFO_TCP_WSCALE_ORIGINAL', 'uint8'), ('CTA_PROTOINFO_TCP_WSCALE_REPLY', 'uint8'), ('CTA_PROTOINFO_TCP_FLAGS_ORIGINAL', 'cta_tcp_flags'), ('CTA_PROTOINFO_TCP_FLAGS_REPLY', 'cta_tcp_flags'), ) class cta_tcp_flags(nla): fields = [('value', 'BB')] class cta_protoinfo_dccp(nla): nla_map = ( ('CTA_PROTOINFO_DCCP_UNSPEC', 'none'), ('CTA_PROTOINFO_DCCP_STATE', 'uint8'), ('CTA_PROTOINFO_DCCP_ROLE', 'uint8'), ('CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ', 'be64'), ) class cta_protoinfo_sctp(nla): nla_map = ( ('CTA_PROTOINFO_SCTP_UNSPEC', 'none'), ('CTA_PROTOINFO_SCTP_STATE', 'uint8'), ('CTA_PROTOINFO_SCTP_VTAG_ORIGINAL', 'be32'), ('CTA_PROTOINFO_SCTP_VTAG_REPLY', 'be32'), ) class cta_nat(nla): nla_map = ( ('CTA_NAT_UNSPEC', 'none'), ('CTA_NAT_V4_MINIP', 'ip4addr'), ('CTA_NAT_V4_MAXIP', 'ip4addr'), ('CTA_NAT_PROTO', 'cta_protonat'), ('CTA_NAT_V6_MINIP', 'ip6addr'), ('CTA_NAT_V6_MAXIP', 'ip6addr'), ) class cta_protonat(nla): nla_map = ( ('CTA_PROTONAT_UNSPEC', 'none'), ('CTA_PROTONAT_PORT_MIN', 'be16'), ('CTA_PROTONAT_PORT_MAX', 'be16'), ) class cta_nat_seq_adj(nla): nla_map = ( ('CTA_NAT_SEQ_UNSPEC', 'none'), ('CTA_NAT_SEQ_CORRECTION_POS', 'be32'), ('CTA_NAT_SEQ_OFFSET_BEFORE', 'be32'), ('CTA_NAT_SEQ_OFFSET_AFTER', 'be32'), ) class cta_counters(nla): nla_map = ( ('CTA_COUNTERS_UNSPEC', 'none'), ('CTA_COUNTERS_PACKETS', 'be64'), ('CTA_COUNTERS_BYTES', 'be64'), ('CTA_COUNTERS32_PACKETS', 'be32'), ('CTA_COUNTERS32_BYTES', 'be32'), ) class cta_secctx(nla): nla_map = ( ('CTA_SECCTX_UNSPEC', 'none'), ('CTA_SECCTX_NAME', 'asciiz'), ) class cta_timestamp(nla): nla_map = ( ('CTA_TIMESTAMP_UNSPEC', 'none'), ('CTA_TIMESTAMP_START', 'be64'), ('CTA_TIMESTAMP_STOP', 'be64'), ) class cta_filter(nla): nla_flags = NLA_F_NESTED nla_map = ( ('CTA_FILTER_UNSPEC', 'none'), ('CTA_FILTER_ORIG_FLAGS', 'uint32'), ('CTA_FILTER_REPLY_FLAGS', 'uint32'), ) class cta_labels(nla): fields = [('value', 'QQ')] def encode(self): if not isinstance(self['value'], tuple): self['value'] = ( self['value'] & 0xFFFFFFFFFFFFFFFF, self['value'] >> 64, ) nla.encode(self) def decode(self): nla.decode(self) if isinstance(self['value'], tuple): self['value'] = (self['value'][0] & 0xFFFFFFFFFFFFFFFF) | ( self['value'][1] << 64 ) class cta_synproxy(nla): nla_map = ( ('CTA_SYNPROXY_UNSPEC', 'none'), ('CTA_SYNPROXY_ISN', 'be32'), ('CTA_SYNPROXY_ITS', 'be32'), ('CTA_SYNPROXY_TSOFF', 'be32'), ) FILTER_FLAG_CTA_IP_SRC = 1 << 0 FILTER_FLAG_CTA_IP_DST = 1 << 1 FILTER_FLAG_CTA_TUPLE_ZONE = 1 << 2 FILTER_FLAG_CTA_PROTO_NUM = 1 << 3 FILTER_FLAG_CTA_PROTO_SRC_PORT = 1 << 4 FILTER_FLAG_CTA_PROTO_DST_PORT = 1 << 5 FILTER_FLAG_CTA_PROTO_ICMP_TYPE = 1 << 6 FILTER_FLAG_CTA_PROTO_ICMP_CODE = 1 << 7 FILTER_FLAG_CTA_PROTO_ICMP_ID = 1 << 8 FILTER_FLAG_CTA_PROTO_ICMPV6_TYPE = 1 << 9 FILTER_FLAG_CTA_PROTO_ICMPV6_CODE = 1 << 10 FILTER_FLAG_CTA_PROTO_ICMPV6_ID = 1 << 11 FILTER_FLAG_ALL_CTA_PROTO = ( FILTER_FLAG_CTA_PROTO_SRC_PORT | FILTER_FLAG_CTA_PROTO_DST_PORT | FILTER_FLAG_CTA_PROTO_ICMP_TYPE | FILTER_FLAG_CTA_PROTO_ICMP_CODE | FILTER_FLAG_CTA_PROTO_ICMP_ID | FILTER_FLAG_CTA_PROTO_ICMPV6_TYPE | FILTER_FLAG_CTA_PROTO_ICMPV6_CODE | FILTER_FLAG_CTA_PROTO_ICMPV6_ID ) FILTER_FLAG_ALL = 0xFFFFFFFF class NFCTAttr(object): def attrs(self): return [] class NFCTAttrTuple(NFCTAttr): __slots__ = ( 'saddr', 'daddr', 'proto', 'sport', 'dport', 'icmp_id', 'icmp_type', 'family', '_attr_ip', '_attr_icmp', ) def __init__( self, family=socket.AF_INET, saddr=None, daddr=None, proto=None, sport=None, dport=None, icmp_id=None, icmp_type=None, icmp_code=None, ): self.saddr = saddr self.daddr = daddr self.proto = proto self.sport = sport self.dport = dport self.icmp_id = icmp_id self.icmp_type = icmp_type self.icmp_code = icmp_code self.family = family self._attr_ip, self._attr_icmp = { socket.AF_INET: ['CTA_IP_V4', 'CTA_PROTO_ICMP'], socket.AF_INET6: ['CTA_IP_V6', 'CTA_PROTO_ICMPV6'], }[self.family] def proto_name(self): return IP_PROTOCOLS.get(self.proto, None) def reverse(self): return NFCTAttrTuple( family=self.family, saddr=self.daddr, daddr=self.saddr, proto=self.proto, sport=self.dport, dport=self.sport, icmp_id=self.icmp_id, icmp_type=self.icmp_type, icmp_code=self.icmp_code, ) def attrs(self): cta_ip = [] cta_proto = [] cta_tuple = [] self.flags = 0 if self.saddr is not None: cta_ip.append([self._attr_ip + '_SRC', self.saddr]) self.flags |= FILTER_FLAG_CTA_IP_SRC if self.daddr is not None: cta_ip.append([self._attr_ip + '_DST', self.daddr]) self.flags |= FILTER_FLAG_CTA_IP_DST if self.proto is not None: cta_proto.append(['CTA_PROTO_NUM', self.proto]) self.flags |= FILTER_FLAG_CTA_PROTO_NUM if self.sport is not None: cta_proto.append(['CTA_PROTO_SRC_PORT', self.sport]) self.flags |= FILTER_FLAG_CTA_PROTO_SRC_PORT if self.dport is not None: cta_proto.append(['CTA_PROTO_DST_PORT', self.dport]) self.flags |= FILTER_FLAG_CTA_PROTO_DST_PORT if self.icmp_id is not None: cta_proto.append([self._attr_icmp + '_ID', self.icmp_id]) if self.icmp_type is not None: cta_proto.append([self._attr_icmp + '_TYPE', self.icmp_type]) if self.icmp_code is not None: cta_proto.append([self._attr_icmp + '_CODE', self.icmp_code]) if cta_ip: cta_tuple.append(['CTA_TUPLE_IP', {'attrs': cta_ip}]) if cta_proto: cta_tuple.append(['CTA_TUPLE_PROTO', {'attrs': cta_proto}]) return cta_tuple @classmethod def from_netlink(cls, family, ndmsg): cta_ip = ndmsg.get_attr('CTA_TUPLE_IP') cta_proto = ndmsg.get_attr('CTA_TUPLE_PROTO') kwargs = {'family': family} if family == socket.AF_INET: kwargs['saddr'] = cta_ip.get_attr('CTA_IP_V4_SRC') kwargs['daddr'] = cta_ip.get_attr('CTA_IP_V4_DST') elif family == socket.AF_INET6: kwargs['saddr'] = cta_ip.get_attr('CTA_IP_V6_SRC') kwargs['daddr'] = cta_ip.get_attr('CTA_IP_V6_DST') else: raise NotImplementedError(family) proto = cta_proto.get_attr('CTA_PROTO_NUM') kwargs['proto'] = proto if proto == socket.IPPROTO_ICMP: kwargs['icmp_id'] = cta_proto.get_attr('CTA_PROTO_ICMP_ID') kwargs['icmp_type'] = cta_proto.get_attr('CTA_PROTO_ICMP_TYPE') kwargs['icmp_code'] = cta_proto.get_attr('CTA_PROTO_ICMP_CODE') elif proto == socket.IPPROTO_ICMPV6: kwargs['icmp_id'] = cta_proto.get_attr('CTA_PROTO_ICMPV6_ID') kwargs['icmp_type'] = cta_proto.get_attr('CTA_PROTO_ICMPV6_TYPE') kwargs['icmp_code'] = cta_proto.get_attr('CTA_PROTO_ICMPV6_CODE') elif proto in (socket.IPPROTO_TCP, socket.IPPROTO_UDP): kwargs['sport'] = cta_proto.get_attr('CTA_PROTO_SRC_PORT') kwargs['dport'] = cta_proto.get_attr('CTA_PROTO_DST_PORT') return cls(**kwargs) def is_attr_match(self, other, attrname): l_attr = getattr(self, attrname) if l_attr is not None: r_attr = getattr(other, attrname) if l_attr != r_attr: return False return True def nla_eq(self, family, ndmsg): if self.family != family: return False test_attr = [] cta_ip = ndmsg.get_attr('CTA_TUPLE_IP') if family == socket.AF_INET: test_attr.append((self.saddr, cta_ip, 'CTA_IP_V4_SRC')) test_attr.append((self.daddr, cta_ip, 'CTA_IP_V4_DST')) elif family == socket.AF_INET6: test_attr.append((self.saddr, cta_ip, 'CTA_IP_V6_SRC')) test_attr.append((self.daddr, cta_ip, 'CTA_IP_V6_DST')) else: raise NotImplementedError(family) if self.proto is not None: cta_proto = ndmsg.get_attr('CTA_TUPLE_PROTO') if self.proto != cta_proto.get_attr('CTA_PROTO_NUM'): return False if self.proto == socket.IPPROTO_ICMP: ( test_attr.append( (self.icmp_id, cta_proto, 'CTA_PROTO_ICMP_ID') ) ) ( test_attr.append( (self.icmp_type, cta_proto, 'CTA_PROTO_ICMP_TYPE') ) ) ( test_attr.append( (self.icmp_code, cta_proto, 'CTA_PROTO_ICMP_CODE') ) ) elif self.proto == socket.IPPROTO_ICMPV6: ( test_attr.append( (self.icmp_id, cta_proto, 'CTA_PROTO_ICMPV6_ID') ) ) ( test_attr.append( (self.icmp_type, cta_proto, 'CTA_PROTO_ICMPV6_TYPE') ) ) ( test_attr.append( (self.icmp_code, cta_proto, 'CTA_PROTO_ICMPV6_CODE') ) ) elif self.proto in (socket.IPPROTO_TCP, socket.IPPROTO_UDP): ( test_attr.append( (self.sport, cta_proto, 'CTA_PROTO_SRC_PORT') ) ) ( test_attr.append( (self.dport, cta_proto, 'CTA_PROTO_DST_PORT') ) ) for val, ndmsg, attrname in test_attr: if val is not None and val != ndmsg.get_attr(attrname): return False return True def __ne__(self, other): return not self.__eq__(other) def __eq__(self, other): if not isinstance(other, self.__class__): raise NotImplementedError() if self.family != other.family: return False for attrname in ('saddr', 'daddr'): if not self.is_attr_match(other, attrname): return False if self.proto is not None: if self.proto != other.proto: return False if self.proto in (socket.IPPROTO_UDP, socket.IPPROTO_TCP): for attrname in ('sport', 'dport'): if not self.is_attr_match(other, attrname): return False elif self.proto in (socket.IPPROTO_ICMP, socket.IPPROTO_ICMPV6): for attrname in ('icmp_id', 'icmp_type', 'icmp_code'): if not self.is_attr_match(other, attrname): return False return True def __repr__(self): proto_name = self.proto_name() if proto_name is None: proto_name = 'UNKNOWN' if self.family == socket.AF_INET: r = 'IPv4(' elif self.family == socket.AF_INET6: r = 'IPv6(' else: r = 'Unkown[family={}]('.format(self.family) r += 'saddr={}, daddr={}, '.format(self.saddr, self.daddr) r += '{}('.format(proto_name) if self.proto in (socket.IPPROTO_ICMP, socket.IPPROTO_ICMPV6): r += 'id={}, type={}, code={}'.format( self.icmp_id, self.icmp_type, self.icmp_code ) elif self.proto in (socket.IPPROTO_TCP, socket.IPPROTO_UDP): r += 'sport={}, dport={}'.format(self.sport, self.dport) return r + '))' class NFCTSocket(NetlinkSocket): policy = { k | (NFNL_SUBSYS_CTNETLINK << 8): v for k, v in { IPCTNL_MSG_CT_NEW: nfct_msg, IPCTNL_MSG_CT_GET: nfct_msg, IPCTNL_MSG_CT_DELETE: nfct_msg, IPCTNL_MSG_CT_GET_CTRZERO: nfct_msg, IPCTNL_MSG_CT_GET_STATS_CPU: nfct_stats_cpu, IPCTNL_MSG_CT_GET_STATS: nfct_stats, IPCTNL_MSG_CT_GET_DYING: nfct_msg, IPCTNL_MSG_CT_GET_UNCONFIRMED: nfct_msg, }.items() } def __init__(self, nfgen_family=socket.AF_INET, **kwargs): super(NFCTSocket, self).__init__(family=NETLINK_NETFILTER, **kwargs) self.register_policy(self.policy) self._nfgen_family = nfgen_family def request(self, msg, msg_type, **kwargs): msg['nfgen_family'] = self._nfgen_family msg_type |= NFNL_SUBSYS_CTNETLINK << 8 return tuple(self.nlm_request(msg, msg_type, **kwargs)) def dump( self, mark=None, mark_mask=0xFFFFFFFF, tuple_orig=None, tuple_reply=None, ): """Dump conntrack entries Several kernel side filtering are supported: * mark and mark_mask, for almost all kernel * tuple_orig and tuple_reply, since kernel 5.8 and newer. Warning: tuple_reply has a bug in kernel, fixed only recently. tuple_orig and tuple_reply are type NFCTAttrTuple. You can give only some attribute for filtering. Example:: # Get only connections from 192.168.1.1 filter = NFCTAttrTuple(saddr='192.168.1.1') ct.dump_entries(tuple_orig=filter) # Get HTTPS connections filter = NFCTAttrTuple(proto=socket.IPPROTO_TCP, dport=443) ct.dump_entries(tuple_orig=filter) Note that NFCTAttrTuple attributes are working like one AND operator. Example:: # Get connections from 192.168.1.1 AND on port 443 TCP = socket.IPPROTO_TCP filter = NFCTAttrTuple(saddr='192.168.1.1', proto=TCP, dport=443) ct.dump_entries(tuple_orig=filter) """ if tuple_orig is not None: tuple_orig.attrs() # for creating flags cta_filter = { 'attrs': [['CTA_FILTER_ORIG_FLAGS', tuple_orig.flags]] } msg = nfct_msg.create_from( tuple_orig=tuple_orig, cta_filter=cta_filter ) elif tuple_reply is not None: tuple_reply.attrs() cta_filter = { 'attrs': [['CTA_FILTER_REPLY_FLAGS', tuple_reply.flags]] } msg = nfct_msg.create_from( tuple_reply=tuple_reply, cta_filter=cta_filter ) elif mark: msg = nfct_msg.create_from(mark=mark, mark_mask=mark_mask) else: msg = nfct_msg.create_from() return self.request( msg, IPCTNL_MSG_CT_GET, msg_flags=NLM_F_REQUEST | NLM_F_DUMP ) def stat(self): return self.request( nfct_msg(), IPCTNL_MSG_CT_GET_STATS_CPU, msg_flags=NLM_F_REQUEST | NLM_F_DUMP, ) def count(self): return self.request( nfct_msg(), IPCTNL_MSG_CT_GET_STATS, msg_flags=NLM_F_REQUEST | NLM_F_DUMP, terminate=terminate_single_msg, ) def flush(self, mark=None, mark_mask=None): msg = nfct_msg.create_from(mark=mark, mark_mask=mark_mask) return self.request( msg, IPCTNL_MSG_CT_DELETE, msg_flags=NLM_F_REQUEST | NLM_F_ACK, terminate=terminate_error_msg, ) def conntrack_max_size(self): return self.request( nfct_msg(), IPCTNL_MSG_CT_GET_STATS, msg_flags=NLM_F_REQUEST | NLM_F_DUMP, terminate=terminate_single_msg, ) def entry(self, cmd, **kwargs): """ Get or change a conntrack entry. Examples:: # add an entry ct.entry('add', timeout=30, tuple_orig=NFCTAttrTuple( saddr='192.168.122.1', daddr='192.168.122.67', proto=6, sport=34857, dport=5599), tuple_reply=NFCTAttrTuple( saddr='192.168.122.67', daddr='192.168.122.1', proto=6, sport=5599, dport=34857)) # set mark=5 on the matching entry ct.entry('set', mark=5, tuple_orig=NFCTAttrTuple( saddr='192.168.122.1', daddr='192.168.122.67', proto=6, sport=34857, dport=5599)) # get an entry ct.entry('get', tuple_orig=NFCTAttrTuple( saddr='192.168.122.1', daddr='192.168.122.67', proto=6, sport=34857, dport=5599)) # delete an entry ct.entry('del', tuple_orig=NFCTAttrTuple( saddr='192.168.122.1', daddr='192.168.122.67', proto=6, sport=34857, dport=5599)) """ msg_type, msg_flags = { 'add': [IPCTNL_MSG_CT_NEW, NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE], 'set': [IPCTNL_MSG_CT_NEW, NLM_F_ACK], 'get': [IPCTNL_MSG_CT_GET, NLM_F_ACK], 'del': [IPCTNL_MSG_CT_DELETE, NLM_F_ACK], }[cmd] if msg_type == IPCTNL_MSG_CT_DELETE and not ( 'tuple_orig' in kwargs or 'tuple_reply' in kwargs ): raise ValueError('Deletion requires a tuple at least') return self.request( nfct_msg.create_from(**kwargs), msg_type, msg_flags=NLM_F_REQUEST | msg_flags, terminate=terminate_error_msg, ) pyroute2-0.7.11/pyroute2/netlink/nfnetlink/nftsocket.py000066400000000000000000001241631455030217500232010ustar00rootroot00000000000000""" NFTSocket -- low level nftables API See also: pyroute2.nftables """ import struct import threading from pyroute2.netlink import ( NETLINK_NETFILTER, NLM_F_ACK, NLM_F_APPEND, NLM_F_CREATE, NLM_F_DUMP, NLM_F_EXCL, NLM_F_REPLACE, NLM_F_REQUEST, nla, nla_base_string, nlmsg_atoms, ) from pyroute2.netlink.nfnetlink import NFNL_SUBSYS_NFTABLES, nfgen_msg from pyroute2.netlink.nlsocket import NetlinkSocket NFT_MSG_NEWTABLE = 0 NFT_MSG_GETTABLE = 1 NFT_MSG_DELTABLE = 2 NFT_MSG_NEWCHAIN = 3 NFT_MSG_GETCHAIN = 4 NFT_MSG_DELCHAIN = 5 NFT_MSG_NEWRULE = 6 NFT_MSG_GETRULE = 7 NFT_MSG_DELRULE = 8 NFT_MSG_NEWSET = 9 NFT_MSG_GETSET = 10 NFT_MSG_DELSET = 11 NFT_MSG_NEWSETELEM = 12 NFT_MSG_GETSETELEM = 13 NFT_MSG_DELSETELEM = 14 NFT_MSG_NEWGEN = 15 NFT_MSG_GETGEN = 16 NFT_MSG_TRACE = 17 NFT_MSG_NEWOBJ = 18 NFT_MSG_GETOBJ = 19 NFT_MSG_DELOBJ = 20 NFT_MSG_GETOBJ_RESET = 21 NFT_MSG_NEWFLOWTABLE = 22 NFT_MSG_GETFLOWTABLE = 23 NFT_MSG_DELFLOWTABLE = 24 # from nftables/include/datatype.h DATA_TYPE_INVALID = 0 DATA_TYPE_VERDICT = 1 DATA_TYPE_NFPROTO = 2 DATA_TYPE_BITMASK = 3 DATA_TYPE_INTEGER = 4 DATA_TYPE_STRING = 5 DATA_TYPE_LLADDR = 6 DATA_TYPE_IPADDR = 7 DATA_TYPE_IP6ADDR = 8 DATA_TYPE_ETHERADDR = 9 DATA_TYPE_ETHERTYPE = 10 DATA_TYPE_ARPOP = 11 DATA_TYPE_INET_PROTOCOL = 12 DATA_TYPE_INET_SERVICE = 13 DATA_TYPE_ICMP_TYPE = 14 DATA_TYPE_TCP_FLAG = 15 DATA_TYPE_DCCP_PKTTYPE = 16 DATA_TYPE_MH_TYPE = 17 DATA_TYPE_TIME = 18 DATA_TYPE_MARK = 19 DATA_TYPE_IFINDEX = 20 DATA_TYPE_ARPHRD = 21 DATA_TYPE_REALM = 22 DATA_TYPE_CLASSID = 23 DATA_TYPE_UID = 24 DATA_TYPE_GID = 25 DATA_TYPE_CT_STATE = 26 DATA_TYPE_CT_DIR = 27 DATA_TYPE_CT_STATUS = 28 DATA_TYPE_ICMP6_TYPE = 29 DATA_TYPE_CT_LABEL = 30 DATA_TYPE_PKTTYPE = 31 DATA_TYPE_ICMP_CODE = 32 DATA_TYPE_ICMPV6_CODE = 33 DATA_TYPE_ICMPX_CODE = 34 DATA_TYPE_DEVGROUP = 35 DATA_TYPE_DSCP = 36 DATA_TYPE_ECN = 37 DATA_TYPE_FIB_ADDR = 38 DATA_TYPE_BOOLEAN = 39 DATA_TYPE_CT_EVENTBIT = 40 DATA_TYPE_IFNAME = 41 DATA_TYPE_IGMP_TYPE = 42 DATA_TYPE_TIME_DATE = 43 DATA_TYPE_TIME_HOUR = 44 DATA_TYPE_TIME_DAY = 45 DATA_TYPE_CGROUPV2 = 46 # from include/uapi/linux/netfilter.h NFPROTO_INET = 1 NFPROTO_IPV4 = 2 NFPROTO_ARP = 3 NFPROTO_NETDEV = 5 NFPROTO_BRIDGE = 7 NFPROTO_IPV6 = 10 class nftnl_udata(nla_base_string): # TLV structures: # nftnl_udata # <-------- HEADER --------> <------ PAYLOAD ------> # +------------+-------------+- - - - - - - - - - - -+ # | type | len | value | # | (1 byte) | (1 byte) | | # +--------------------------+- - - - - - - - - - - -+ # <-- sizeof(nftnl_udata) -> <-- nftnl_udata->len --> __slots__ = () @classmethod def udata_decode(cls, data): offset = 0 result = [] while offset + 2 < len(data): utype = data[offset] ulen = data[offset + 1] offset += 2 if offset + ulen > len(data): return None # bad decode try: type_name = cls.udata_types[utype] except IndexError: return None # bad decode value = data[offset : offset + ulen] if type_name.endswith("_COMMENT") and value[-1] == 0: value = value[:-1] # remove \x00 c *str result.append((type_name, value)) offset += ulen return result @classmethod def udata_encode(cls, values): value = b"" for type_name, udata in values: if isinstance(udata, str): udata = udata.encode() if type_name.endswith("_COMMENT") and udata[-1] != 0: udata = udata + b"\x00" utype = cls.udata_types.index(type_name) value += struct.pack("BB", utype, len(udata)) + udata return value def decode(self): nla_base_string.decode(self) value = self.udata_decode(self['value']) if value is not None: self.value = value def encode(self): if not isinstance(self.value, (bytes, str)): self['value'] = self.udata_encode(self.value) nla_base_string.encode(self) class nft_map_uint8(nla): ops = {} fields = [('value', 'B')] def decode(self): nla.decode(self) self.value = self.ops.get(self['value']) class nft_map_be32(nft_map_uint8): fields = [('value', '>I')] class nft_map_be32_signed(nft_map_uint8): fields = [('value', '>i')] class nft_flags_be32(nla): fields = [('value', '>I')] ops = None def decode(self): nla.decode(self) self.value = frozenset( o for i, o in enumerate(self.ops) if self['value'] & 1 << i ) def encode(self): value = 0 for i, name in enumerate(self.ops): if name in self.value: value |= 1 << i self["value"] = value nla.encode(self) class nft_flags_be16(nla): fields = [('value', '>H')] ops = None def decode(self): nla.decode(self) self.value = frozenset( o for i, o in enumerate(self.ops) if self['value'] & 1 << i ) class nft_device(nla): class device_attributes(nla): nla_map = ( ('NFTA_DEVICE_UNSPEC', 'none'), ('NFTA_DEVICE_NAME', 'asciiz'), ) class nft_gen_msg(nfgen_msg): nla_map = ( ('NFTA_GEN_UNSPEC', 'none'), ('NFTA_GEN_ID', 'be32'), ('NFTA_GEN_PROC_PID', 'be32'), ('NFTA_GEN_PROC_NAME', 'asciiz'), ) class nft_chain_msg(nfgen_msg): prefix = 'NFTA_CHAIN_' nla_map = ( ('NFTA_CHAIN_UNSPEC', 'none'), ('NFTA_CHAIN_TABLE', 'asciiz'), ('NFTA_CHAIN_HANDLE', 'be64'), ('NFTA_CHAIN_NAME', 'asciiz'), ('NFTA_CHAIN_HOOK', 'hook'), ('NFTA_CHAIN_POLICY', 'be32'), ('NFTA_CHAIN_USE', 'be32'), ('NFTA_CHAIN_TYPE', 'asciiz'), ('NFTA_CHAIN_COUNTERS', 'counters'), ('NFTA_CHAIN_PAD', 'hex'), ('NFTA_CHAIN_FLAGS', 'flags'), ('NFTA_CHAIN_ID', 'be32'), ('NFTA_CHAIN_USERDATA', 'hex'), ) class counters(nla): nla_map = ( ('NFTA_COUNTER_UNSPEC', 'none'), ('NFTA_COUNTER_BYTES', 'be64'), ('NFTA_COUNTER_PACKETS', 'be64'), ) class hook(nft_device): nla_map = ( ('NFTA_HOOK_UNSPEC', 'none'), ('NFTA_HOOK_HOOKNUM', 'be32'), ('NFTA_HOOK_PRIORITY', 'sbe32'), ('NFTA_HOOK_DEV', 'asciiz'), ('NFTA_HOOK_DEVS', 'device_attributes'), ) class flags(nft_flags_be32): ops = ('NFT_CHAIN_HW_OFFLOAD', 'NFT_CHAIN_BINDING') class nat_flags(nla): class nat_range(nft_flags_be32): ops = ( 'NF_NAT_RANGE_MAP_IPS', 'NF_NAT_RANGE_PROTO_SPECIFIED', 'NF_NAT_RANGE_PROTO_RANDOM', 'NF_NAT_RANGE_PERSISTENT', 'NF_NAT_RANGE_PROTO_RANDOM_FULLY', 'NF_NAT_RANGE_PROTO_OFFSET', 'NF_NAT_RANGE_NETMAP', ) class nft_regs(nla): class regs(nft_map_be32): ops = { 0x00: 'NFT_REG_VERDICT', 0x01: 'NFT_REG_1', 0x02: 'NFT_REG_2', 0x03: 'NFT_REG_3', 0x04: 'NFT_REG_4', 0x08: 'NFT_REG32_00', 0x09: 'NFT_REG32_01', 0x0A: 'NFT_REG32_02', 0x0B: 'NFT_REG32_03', 0x0C: 'NFT_REG32_04', 0x0D: 'NFT_REG32_05', 0x0E: 'NFT_REG32_06', 0x0F: 'NFT_REG32_07', 0x10: 'NFT_REG32_08', 0x11: 'NFT_REG32_09', 0x12: 'NFT_REG32_10', 0x13: 'NFT_REG32_11', 0x14: 'NFT_REG32_12', 0x15: 'NFT_REG32_13', 0x16: 'NFT_REG32_14', 0x17: 'NFT_REG32_15', } class nft_data(nla): class nfta_data(nla): nla_map = ( ('NFTA_DATA_UNSPEC', 'none'), ('NFTA_DATA_VALUE', 'cdata'), ('NFTA_DATA_VERDICT', 'verdict'), ) class verdict(nla): nla_map = ( ('NFTA_VERDICT_UNSPEC', 'none'), ('NFTA_VERDICT_CODE', 'verdict_code'), ('NFTA_VERDICT_CHAIN', 'asciiz'), ) class verdict_code(nft_map_be32_signed): ops = { 0: 'NF_DROP', 1: 'NF_ACCEPT', 2: 'NF_STOLEN', 3: 'NF_QUEUE', 4: 'NF_REPEAT', 5: 'NF_STOP', -1: 'NFT_CONTINUE', -2: 'NFT_BREAK', -3: 'NFT_JUMP', -4: 'NFT_GOTO', -5: 'NFT_RETURN', } # nft_expr struct, used for rules and set class nft_contains_expr: class nft_expr(nla): header_type = 1 nla_map = ( ('NFTA_EXPR_UNSPEC', 'none'), ('NFTA_EXPR_NAME', 'asciiz'), ('NFTA_EXPR_DATA', 'expr'), ) class nft_bitwise(nft_data, nft_regs): nla_map = ( ('NFTA_BITWISE_UNSPEC', 'none'), ('NFTA_BITWISE_SREG', 'regs'), ('NFTA_BITWISE_DREG', 'regs'), ('NFTA_BITWISE_LEN', 'be32'), ('NFTA_BITWISE_MASK', 'nfta_data'), ('NFTA_BITWISE_XOR', 'nfta_data'), ('NFTA_BITWISE_OP', 'bitwise_op'), ('NFTA_BITWISE_DATA', 'nfta_data'), ) class bitwise_op(nft_map_be32): ops = { 0: 'NFT_BITWISE_BOOL', 1: 'NFT_BITWISE_LSHIFT', 2: 'NFT_BITWISE_RSHIFT', } class nft_byteorder(nft_regs): nla_map = ( ('NFTA_BYTEORDER_UNSPEC', 'none'), ('NFTA_BYTEORDER_SREG', 'regs'), ('NFTA_BYTEORDER_DREG', 'regs'), ('NFTA_BYTEORDER_OP', 'ops'), ('NFTA_BYTEORDER_LEN', 'be32'), ('NFTA_BYTEORDER_SIZE', 'be32'), ) class ops(nft_map_be32): ops = {0: 'NFT_BYTEORDER_NTOH', 1: 'NFT_BYTEORDER_HTON'} class nft_cmp(nft_data, nft_regs): nla_map = ( ('NFTA_CMP_UNSPEC', 'none'), ('NFTA_CMP_SREG', 'regs'), ('NFTA_CMP_OP', 'ops'), ('NFTA_CMP_DATA', 'nfta_data'), ) class ops(nft_map_be32): ops = { 0: 'NFT_CMP_EQ', 1: 'NFT_CMP_NEQ', 2: 'NFT_CMP_LT', 3: 'NFT_CMP_LTE', 4: 'NFT_CMP_GT', 5: 'NFT_CMP_GTE', } class nft_match(nla): nla_map = ( ('NFTA_MATCH_UNSPEC', 'none'), ('NFTA_MATCH_NAME', 'asciiz'), ('NFTA_MATCH_REV', 'be32'), ('NFTA_MATCH_INFO', 'hex'), ('NFTA_MATCH_PROTOCOL', 'hex'), ('NFTA_MATCH_FLAGS', 'hex'), ) class nft_target(nla): nla_map = ( ('NFTA_TARGET_UNSPEC', 'none'), ('NFTA_TARGET_NAME', 'asciiz'), ('NFTA_TARGET_REV', 'be32'), ('NFTA_TARGET_INFO', 'hex'), ('NFTA_TARGET_PROTOCOL', 'hex'), ('NFTA_TARGET_FLAGS', 'hex'), ) class nft_connlimit(nla): nla_map = ( ('NFTA_CONNLIMIT_UNSPEC', 'none'), ('NFTA_CONNLIMIT_COUNT', 'be32'), ('NFTA_CONNLIMIT_FLAGS', 'connlimit_flags'), ) class connlimit_flags(nft_flags_be32): ops = ('NFT_LIMIT_F_INV',) class nft_counter(nla): nla_map = ( ('NFTA_COUNTER_UNSPEC', 'none'), ('NFTA_COUNTER_BYTES', 'be64'), ('NFTA_COUNTER_PACKETS', 'be64'), ) class nft_ct(nft_regs): nla_map = ( ('NFTA_CT_UNSPEC', 'none'), ('NFTA_CT_DREG', 'regs'), ('NFTA_CT_KEY', 'keys'), ('NFTA_CT_DIRECTION', 'uint8'), ('NFTA_CT_SREG', 'regs'), ) class keys(nft_map_be32): ops = { 0x00: 'NFT_CT_STATE', 0x01: 'NFT_CT_DIRECTION', 0x02: 'NFT_CT_STATUS', 0x03: 'NFT_CT_MARK', 0x04: 'NFT_CT_SECMARK', 0x05: 'NFT_CT_EXPIRATION', 0x06: 'NFT_CT_HELPER', 0x07: 'NFT_CT_L3PROTOCOL', 0x08: 'NFT_CT_SRC', 0x09: 'NFT_CT_DST', 0x0A: 'NFT_CT_PROTOCOL', 0x0B: 'NFT_CT_PROTO_SRC', 0x0C: 'NFT_CT_PROTO_DST', 0x0D: 'NFT_CT_LABELS', 0x0E: 'NFT_CT_PKTS', 0x0F: 'NFT_CT_BYTES', 0x10: 'NFT_CT_AVGPKT', 0x11: 'NFT_CT_ZONE', 0x12: 'NFT_CT_EVENTMASK', 0x13: 'NFT_CT_SRC_IP', 0x14: 'NFT_CT_DST_IP', 0x15: 'NFT_CT_SRC_IP6', 0x16: 'NFT_CT_DST_IP6', 0x17: 'NFT_CT_ID', } class nft_dup(nft_regs): nla_map = ( ('NFTA_DUP_UNSPEC', 'none'), ('NFTA_DUP_SREG_ADDR', 'regs'), ('NFTA_DUP_SREG_DEV', 'regs'), ) class nft_exthdr(nft_regs): nla_map = ( ('NFTA_EXTHDR_UNSPEC', 'none'), ('NFTA_EXTHDR_DREG', 'regs'), ('NFTA_EXTHDR_TYPE', 'uint8'), ('NFTA_EXTHDR_OFFSET', 'be32'), ('NFTA_EXTHDR_LEN', 'be32'), ('NFTA_EXTHDR_FLAGS', 'exthdr_flags'), ('NFTA_EXTHDR_OP', 'exthdr_op'), ('NFTA_EXTHDR_SREG', 'regs'), ) class exthdr_flags(nft_flags_be32): ops = ('NFT_EXTHDR_F_PRESENT',) class exthdr_op(nft_map_be32): ops = { 0: 'NFT_EXTHDR_OP_IPV6', 1: 'NFT_EXTHDR_OP_TCPOPT', 2: 'NFT_EXTHDR_OP_IPV4', } class nft_fib(nft_regs): nla_map = ( ('NFTA_FIB_UNSPEC', 'none'), ('NFTA_FIB_DREG', 'regs'), ('NFTA_FIB_RESULT', 'fib_result'), ('NFTA_FIB_FLAGS', 'fib_flags'), ) class fib_result(nft_flags_be32): ops = ( 'NFT_FIB_RESULT_UNSPEC', 'NFT_FIB_RESULT_OIF', 'NFT_FIB_RESULT_OIFNAME', 'NFT_FIB_RESULT_ADDRTYPE', ) class fib_flags(nft_map_be32): ops = { 0: 'NFTA_FIB_F_SADDR', 1: 'NFTA_FIB_F_DADDR', 2: 'NFTA_FIB_F_MARK', 3: 'NFTA_FIB_F_IIF', 4: 'NFTA_FIB_F_OIF', 5: 'NFTA_FIB_F_PRESENT', } class nft_fwd(nft_regs): nla_map = ( ('NFTA_FWD_UNSPEC', 'none'), ('NFTA_FWD_SREG_DEV', 'regs'), ('NFTA_FWD_SREG_ADDR', 'regs'), ('NFTA_FWD_NFPROTO', 'u32'), ) class nft_hash(nft_regs): nla_map = ( ('NFTA_HASH_UNSPEC', 'none'), ('NFTA_HASH_SREG', 'regs'), ('NFTA_HASH_DREG', 'regs'), ('NFTA_HASH_LEN', 'be32'), ('NFTA_HASH_MODULUS', 'be32'), ('NFTA_HASH_SEED', 'be32'), ('NFTA_HASH_OFFSET', 'be32'), ('NFTA_HASH_TYPE', 'hash_type'), ('NFTA_HASH_SET_NAME', 'asciiz'), ('NFTA_HASH_SET_ID', 'be32'), ) class hash_type(nft_map_be32): ops = {0: 'NFT_HASH_JENKINS', 1: 'NFT_HASH_SYM'} class nft_immediate(nft_data, nft_regs): nla_map = ( ('NFTA_IMMEDIATE_UNSPEC', 'none'), ('NFTA_IMMEDIATE_DREG', 'regs'), ('NFTA_IMMEDIATE_DATA', 'nfta_data'), ) class nft_limit(nla): nla_map = ( ('NFTA_LIMIT_UNSPEC', 'none'), ('NFTA_LIMIT_RATE', 'be64'), ('NFTA_LIMIT_UNIT', 'be64'), ('NFTA_LIMIT_BURST', 'be32'), ('NFTA_LIMIT_TYPE', 'types'), ('NFTA_LIMIT_FLAGS', 'be32'), ) # make flags type class types(nft_map_be32): ops = {0: 'NFT_LIMIT_PKTS', 1: 'NFT_LIMIT_PKT_BYTES'} class nft_log(nla): nla_map = ( ('NFTA_LOG_UNSPEC', 'none'), ('NFTA_LOG_GROUP', 'be32'), ('NFTA_LOG_PREFIX', 'asciiz'), ('NFTA_LOG_SNAPLEN', 'be32'), ('NFTA_LOG_QTHRESHOLD', 'be32'), ('NFTA_LOG_LEVEL', 'log_level'), ('NFTA_LOG_FLAGS', 'log_flags'), ) class log_level(nft_map_be32): ops = { 0: 'NFT_LOGLEVEL_EMERG', 1: 'NFT_LOGLEVEL_ALERT', 2: 'NFT_LOGLEVEL_CRIT', 3: 'NFT_LOGLEVEL_ERR', 4: 'NFT_LOGLEVEL_WARNING', 5: 'NFT_LOGLEVEL_NOTICE', 6: 'NFT_LOGLEVEL_INFO', 7: 'NFT_LOGLEVEL_DEBUG', 8: 'NFT_LOGLEVEL_AUDIT', } class log_flags(nft_flags_be32): ops = ( 'NF_LOG_TCPSEQ', 'NF_LOG_TCPOPT', 'NF_LOG_IPOPT', 'NF_LOG_UID', 'NF_LOG_NFLOG', 'NF_LOG_MACDECODE', ) class nft_lookup(nft_regs): nla_map = ( ('NFTA_LOOKUP_UNSPEC', 'none'), ('NFTA_LOOKUP_SET', 'asciiz'), ('NFTA_LOOKUP_SREG', 'regs'), ('NFTA_LOOKUP_DREG', 'regs'), ('NFTA_LOOKUP_SET_ID', 'be32'), ('NFTA_LOOKUP_FLAGS', 'lookup_flags'), ) class lookup_flags(nft_flags_be32): ops = ('NFT_LOOKUP_F_INV',) class nft_masq(nft_regs, nat_flags): nla_map = ( ('NFTA_MASQ_UNSPEC', 'none'), ('NFTA_MASQ_FLAGS', 'nat_range'), ('NFTA_MASQ_REG_PROTO_MIN', 'regs'), ('NFTA_MASQ_REG_PROTO_MAX', 'regs'), ) class nft_meta(nft_regs): nla_map = ( ('NFTA_META_UNSPEC', 'none'), ('NFTA_META_DREG', 'regs'), ('NFTA_META_KEY', 'meta_key'), ('NFTA_META_SREG', 'regs'), ) class meta_key(nft_map_be32): ops = { 0: 'NFT_META_LEN', 1: 'NFT_META_PROTOCOL', 2: 'NFT_META_PRIORITY', 3: 'NFT_META_MARK', 4: 'NFT_META_IIF', 5: 'NFT_META_OIF', 6: 'NFT_META_IIFNAME', 7: 'NFT_META_OIFNAME', 8: 'NFT_META_IIFTYPE', 9: 'NFT_META_OIFTYPE', 10: 'NFT_META_SKUID', 11: 'NFT_META_SKGID', 12: 'NFT_META_NFTRACE', 13: 'NFT_META_RTCLASSID', 14: 'NFT_META_SECMARK', 15: 'NFT_META_NFPROTO', 16: 'NFT_META_L4PROTO', 17: 'NFT_META_BRI_IIFNAME', 18: 'NFT_META_BRI_OIFNAME', 19: 'NFT_META_PKTTYPE', 20: 'NFT_META_CPU', 21: 'NFT_META_IIFGROUP', 22: 'NFT_META_OIFGROUP', 23: 'NFT_META_CGROUP', 24: 'NFT_META_PRANDOM', 25: 'NFT_META_SECPATH', 26: 'NFT_META_IIFKIND', 27: 'NFT_META_OIFKIND', 28: 'NFT_META_BRI_IIFPVID', 29: 'NFT_META_BRI_IIFVPROTO', 30: 'NFT_META_TIME_NS', 31: 'NFT_META_TIME_DAY', 32: 'NFT_META_TIME_HOUR', 33: 'NFT_META_SDIF', 34: 'NFT_META_SDIFNAME', } class nft_nat(nft_regs, nat_flags): nla_map = ( ('NFTA_NAT_UNSPEC', 'none'), ('NFTA_NAT_TYPE', 'types'), ('NFTA_NAT_FAMILY', 'be32'), ('NFTA_NAT_REG_ADDR_MIN', 'regs'), ('NFTA_NAT_REG_ADDR_MAX', 'regs'), ('NFTA_NAT_REG_PROTO_MIN', 'regs'), ('NFTA_NAT_REG_PROTO_MAX', 'regs'), ('NFTA_NAT_FLAGS', 'nat_range'), ) class types(nft_map_be32): ops = {0: 'NFT_NAT_SNAT', 1: 'NFT_NAT_DNAT'} class nft_numgen(nft_regs): nla_map = ( ('NFTA_NG_UNSPEC', 'none'), ('NFTA_NG_DREG', 'regs'), ('NFTA_NG_MODULUS', 'be32'), ('NFTA_NG_TYPE', 'types'), ('NFTA_NG_OFFSET', 'be32'), ('NFTA_NG_SET_NAME', 'asciiz'), ('NFTA_NG_SET_ID', 'be32'), ) class types(nft_map_be32): ops = {0: 'NFT_NG_INCREMENTAL', 1: 'NFT_NG_RANDOM'} class nft_objref(nft_regs): nla_map = ( ('NFTA_OBJREF_UNSPEC', 'none'), ('NFTA_OBJREF_IMM_TYPE', 'regs'), ('NFTA_OBJREF_IMM_NAME', 'asciiz'), ('NFTA_OBJREF_SET_SREG', 'regs'), ('NFTA_OBJREF_SET_NAME', 'asciiz'), ('NFTA_OBJREF_SET_ID', 'be32'), ) class nft_offload(nla): nla_map = ( ('NFTA_FLOW_UNSPEC', 'none'), ('NFTA_FLOW_TABLE_NAME', 'asciiz'), ) class nft_osf(nft_regs): nla_map = ( ('NFTA_OSF_UNSPEC', 'none'), ('NFTA_OSF_DREG', 'regs'), ('NFTA_OSF_TTL', 'uint8'), ('NFTA_OSF_FLAGS', 'osf_flags'), ) class osf_flags(nft_flags_be32): ops = ('NFT_OSF_F_VERSION',) class nft_payload(nft_regs): nla_map = ( ('NFTA_PAYLOAD_UNSPEC', 'none'), ('NFTA_PAYLOAD_DREG', 'regs'), ('NFTA_PAYLOAD_BASE', 'base_type'), ('NFTA_PAYLOAD_OFFSET', 'be32'), ('NFTA_PAYLOAD_LEN', 'be32'), ('NFTA_PAYLOAD_SREG', 'regs'), ('NFTA_PAYLOAD_CSUM_TYPE', 'csum_type'), ('NFTA_PAYLOAD_CSUM_OFFSET', 'be32'), ('NFTA_PAYLOAD_CSUM_FLAGS', 'csum_flags'), ) class base_type(nft_map_be32): ops = { 0: 'NFT_PAYLOAD_LL_HEADER', 1: 'NFT_PAYLOAD_NETWORK_HEADER', 2: 'NFT_PAYLOAD_TRANSPORT_HEADER', } class csum_type(nft_map_be32): ops = { 0: 'NFT_PAYLOAD_CSUM_NONE', 1: 'NFT_PAYLOAD_CSUM_INET', # RFC 791 2: 'NFT_PAYLOAD_CSUM_SCTP', } # RFC 3309 class csum_flags(nft_flags_be32): ops = ('NFT_PAYLOAD_L4CSUM_PSEUDOHDR',) class nft_queue(nft_regs): nla_map = ( ('NFTA_QUEUE_UNSPEC', 'none'), ('NFTA_QUEUE_NUM', 'be16'), ('NFTA_QUEUE_TOTAL', 'be16'), ('NFTA_QUEUE_FLAGS', 'queue_flags'), ('NFTA_QUEUE_SREG_QNUM', 'regs'), ) class queue_flags(nft_flags_be16): ops = ('NFT_QUEUE_FLAG_BYPASS', 'NFT_QUEUE_FLAG_CPU_FANOUT') class nft_quota(nla): nla_map = ( ('NFTA_QUOTA_UNSPEC', 'none'), ('NFTA_QUOTA_BYTES', 'be16'), ('NFTA_QUOTA_FLAGS', 'quota_flags'), ('NFTA_QUOTA_PAD', 'hex'), ('NFTA_QUOTA_CONSUMED', 'be64'), ) class quota_flags(nft_flags_be32): ops = ('NFT_QUOTA_F_INV', 'NFT_QUOTA_F_DEPLETED') class nft_range(nft_regs, nft_data): nla_map = ( ('NFTA_RANGE_UNSPEC', 'none'), ('NFTA_RANGE_SREG', 'regs'), ('NFTA_RANGE_OP', 'range_op'), ('NFTA_RANGE_FROM_DATA', 'nfta_data'), ('NFTA_RANGE_TO_DATA', 'nfta_data'), ) class range_op(nft_map_be32): ops = {0: 'NFT_RANGE_EQ', 1: 'NFT_RANGE_NEQ'} class nft_redir(nft_regs, nat_flags): nla_map = ( ('NFTA_REDIR_UNSPEC', 'none'), ('NFTA_REDIR_REG_PROTO_MIN', 'regs'), ('NFTA_REDIR_REG_PROTO_MAX', 'regs'), ('NFTA_REDIR_FLAGS', 'nat_range'), ) class nft_reject(nla): nla_map = ( ('NFTA_REJECT_UNSPEC', 'none'), ('NFTA_REJECT_TYPE', 'types'), ('NFTA_REJECT_ICMP_CODE', 'codes'), ) class types(nft_map_be32): ops = { 0: 'NFT_REJECT_ICMP_UNREACH', 1: 'NFT_REJECT_TCP_RST', 2: 'NFT_REJECT_ICMPX_UNREACH', } class codes(nft_map_uint8): ops = { 0: 'NFT_REJECT_ICMPX_NO_ROUTE', 1: 'NFT_REJECT_ICMPX_PORT_UNREACH', 2: 'NFT_REJECT_ICMPX_HOST_UNREACH', 3: 'NFT_REJECT_ICMPX_ADMIN_PROHIBITED', } class nft_rt(nft_regs): nla_map = ( ('NFTA_RT_UNSPEC', 'none'), ('NFTA_RT_DREG', 'regs'), ('NFTA_RT_KEY', 'rt_keys'), ) class rt_keys(nft_map_be32): ops = { 0: 'NFT_RT_CLASSID', 1: 'NFT_RT_NEXTHOP4', 2: 'NFT_RT_NEXTHOP6', 3: 'NFT_RT_TCPMSS', 4: 'NFT_RT_XFRM', } class nft_secmark(nla): nla_map = ( ('NFTA_SECMARK_UNSPEC', 'none'), ('NFTA_SECMARK_CTX', 'asciiz'), ) class nft_socket(nft_regs): nla_map = ( ('NFTA_SOCKET_UNSPEC', 'none'), ('NFTA_SOCKET_KEY', 'socket_keys'), ('NFTA_SOCKET_DREG', 'regs'), ) class socket_keys(nft_map_be32): ops = { 0: 'NFT_SOCKET_TRANSPARENT', 1: 'NFT_SOCKET_MARK', 2: 'NFT_SOCKET_WILDCARD', } class nft_synproxy(nla): nla_map = ( ('NFTA_SYNPROXY_UNSPEC', 'none'), ('NFTA_SYNPROXY_MSS', 'u16'), ('NFTA_SYNPROXY_WSCALE', 'uint8'), ('NFTA_SYNPROXY_FLAGS', 'synproxy_flags'), ) class synproxy_flags(nft_flags_be32): ops = ( 'NF_SYNPROXY_OPT_MSS', 'NF_SYNPROXY_OPT_WSCALE', 'NF_SYNPROXY_OPT_SACK_PERM', 'NF_SYNPROXY_OPT_TIMESTAMP', 'NF_SYNPROXY_OPT_ECN', ) class nft_tproxy(nft_regs): nla_map = ( ('NFTA_TPROXY_UNSPEC', 'none'), ('NFTA_TPROXY_FAMILY', 'regs'), ('NFTA_TPROXY_REG_ADDR', 'regs'), ('NFTA_TPROXY_REG_PORT', 'regs'), ) class nft_dynset(nft_regs): rule_expr = None nla_map = ( ('NFTA_DYNSET_UNSPEC', 'none'), ('NFTA_DYNSET_SET_NAME', 'asciiz'), ('NFTA_DYNSET_SET_ID', 'be32'), ('NFTA_DYNSET_OP', 'dynset_op'), ('NFTA_DYNSET_SREG_KEY', 'regs'), ('NFTA_DYNSET_SREG_DATA', 'regs'), ('NFTA_DYNSET_TIMEOUT', 'be64'), ('NFTA_DYNSET_EXPR', 'rule_expr'), ('NFTA_DYNSET_PAD', 'hex'), ('NFTA_DYNSET_FLAGS', 'dynset_flags'), ) class dynset_flags(nft_flags_be32): ops = ('NFT_DYNSET_F_INV',) class dynset_op(nft_map_be32): ops = { 0: 'NFT_DYNSET_OP_ADD', 1: 'NFT_DYNSET_OP_UPDATE', 2: 'NFT_DYNSET_OP_DELETE', } class nft_xfrm(nft_regs): nla_map = ( ('NFTA_XFRM_UNSPEC', 'none'), ('NFTA_XFRM_DREG', 'regs'), ('NFTA_XFRM_KEY', 'xfrm_key'), ('NFTA_XFRM_DIR', 'uint8'), ('NFTA_XFRM_SPNUM', 'be32'), ) class xfrm_key(nft_map_be32): ops = { 0: 'NFT_XFRM_KEY_UNSPEC', 1: 'NFT_XFRM_KEY_DADDR_IP4', 2: 'NFT_XFRM_KEY_DADDR_IP6', 3: 'NFT_XFRM_KEY_SADDR_IP4', 4: 'NFT_XFRM_KEY_SADDR_IP6', 5: 'NFT_XFRM_KEY_REQID', 6: 'NFT_XFRM_KEY_SPI', } @staticmethod def expr(self, *argv, **kwarg): data_type = self.get_attr('NFTA_EXPR_NAME') expr = getattr(self, 'nft_%s' % data_type, self.hex) if hasattr(expr, 'rule_expr'): expr.rule_expr = self.__class__ return expr class nft_rule_msg(nfgen_msg, nft_contains_expr): prefix = 'NFTA_RULE_' nla_map = ( ('NFTA_RULE_UNSPEC', 'none'), ('NFTA_RULE_TABLE', 'asciiz'), ('NFTA_RULE_CHAIN', 'asciiz'), ('NFTA_RULE_HANDLE', 'be64'), ('NFTA_RULE_EXPRESSIONS', '*nft_expr'), ('NFTA_RULE_COMPAT', 'hex'), ('NFTA_RULE_POSITION', 'be64'), ('NFTA_RULE_USERDATA', 'hex'), ('NFTA_RULE_PAD', 'hex'), ('NFTA_RULE_ID', 'be32'), ('NFTA_RULE_POSITION_ID', 'be32'), ('NFTA_RULE_CHAIN_ID', 'be32'), ) class nft_set_msg(nfgen_msg, nft_contains_expr): prefix = 'NFTA_SET_' nla_map = ( ('NFTA_SET_UNSPEC', 'none'), ('NFTA_SET_TABLE', 'asciiz'), ('NFTA_SET_NAME', 'asciiz'), ('NFTA_SET_FLAGS', 'set_flags'), ('NFTA_SET_KEY_TYPE', 'be32'), ('NFTA_SET_KEY_LEN', 'be32'), ('NFTA_SET_DATA_TYPE', 'be32'), ('NFTA_SET_DATA_LEN', 'be32'), ('NFTA_SET_POLICY', 'set_policy'), ('NFTA_SET_DESC', 'set_desc'), ('NFTA_SET_ID', 'be32'), ('NFTA_SET_TIMEOUT', 'be64'), ('NFTA_SET_GC_INTERVAL', 'be32'), ('NFTA_SET_USERDATA', 'set_udata'), ('NFTA_SET_PAD', 'hex'), ('NFTA_SET_OBJ_TYPE', 'be32'), ('NFTA_SET_HANDLE', 'be64'), ('NFTA_SET_EXPR', 'nft_expr'), ('NFTA_SET_EXPRESSIONS', '*nft_expr'), ) class set_udata(nftnl_udata): udata_types = ( "NFTNL_UDATA_SET_KEYBYTEORDER", "NFTNL_UDATA_SET_DATABYTEORDER", "NFTNL_UDATA_SET_MERGE_ELEMENTS", "NFTNL_UDATA_SET_KEY_TYPEOF", "NFTNL_UDATA_SET_DATA_TYPEOF", "NFTNL_UDATA_SET_EXPR", "NFTNL_UDATA_SET_DATA_INTERVAL", "NFTNL_UDATA_SET_COMMENT", ) class set_flags(nft_flags_be32): ops = ( 'NFT_SET_ANONYMOUS', 'NFT_SET_CONSTANT', 'NFT_SET_INTERVAL', 'NFT_SET_MAP', 'NFT_SET_TIMEOUT', 'NFT_SET_EVAL', 'NFT_SET_OBJECT', 'NFT_SET_CONCAT', ) class set_policy(nft_map_be32): ops = {0: 'NFT_SET_POL_PERFORMANCE', 1: 'NFT_SET_POL_MEMORY'} class set_desc(nla): nla_map = ( ('NFTA_SET_DESC_UNSPEC', 'none'), ('NFTA_SET_DESC_SIZE', 'be32'), ('NFTA_SET_DESC_CONCAT', '*list_elem'), ) class list_elem(nla): nla_map = ( ('NFTA_LIST_UNSPEC', 'none'), ('NFTA_LIST_ELEM', '*set_field_attribute'), ) class set_field_attribute(nla): nla_map = ( ('NFTA_SET_FIELD_UNSPEC', 'none'), ('NFTA_SET_FIELD_LEN', 'be32'), ) class nft_table_msg(nfgen_msg, nft_contains_expr): prefix = 'NFTA_TABLE_' nla_map = ( ('NFTA_TABLE_UNSPEC', 'none'), ('NFTA_TABLE_NAME', 'asciiz'), ('NFTA_TABLE_FLAGS', 'be32'), ('NFTA_TABLE_USE', 'be32'), ('NFTA_TABLE_HANDLE', 'be64'), ('NFTA_TABLE_PAD', 'hex'), ('NFTA_TABLE_USERDATA', 'hex'), ) class nft_set_elem_list_msg(nfgen_msg): prefix = 'NFTA_SET_ELEM_LIST_' nla_map = ( ('NFTA_SET_ELEM_LIST_UNSPEC', 'none'), ('NFTA_SET_ELEM_LIST_TABLE', 'asciiz'), ('NFTA_SET_ELEM_LIST_SET', 'asciiz'), ('NFTA_SET_ELEM_LIST_ELEMENTS', '*set_elem'), ('NFTA_SET_ELEM_LIST_SET_ID', 'be32'), ) class set_elem(nla, nft_contains_expr): nla_map = ( ('NFTA_SET_ELEM_UNSPEC', 'none'), ('NFTA_SET_ELEM_KEY', 'data_attributes'), ('NFTA_SET_ELEM_DATA', 'data_attributes'), ('NFTA_SET_ELEM_FLAGS', 'set_elem_flags'), ('NFTA_SET_ELEM_TIMEOUT', 'be64'), ('NFTA_SET_ELEM_EXPIRATION', 'be64'), ('NFTA_SET_ELEM_USERDATA', 'set_elem_udata'), ('NFTA_SET_ELEM_EXPR', 'nft_expr'), ('NFTA_SET_ELEM_PAD', 'hex'), ('NFTA_SET_ELEM_OBJREF', 'asciiz'), ('NFTA_SET_ELEM_KEY_END', 'data_attributes'), ('NFTA_SET_ELEM_EXPRESSIONS', '*nft_expr'), ) class set_elem_udata(nftnl_udata): udata_types = ( "NFTNL_UDATA_SET_ELEM_COMMENT", "NFTNL_UDATA_SET_ELEM_FLAGS", ) class set_elem_flags(nft_flags_be32): ops = {1: 'NFT_SET_ELEM_INTERVAL_END'} class data_attributes(nla): nla_map = ( ('NFTA_DATA_UNSPEC', 'none'), ('NFTA_DATA_VALUE', 'binary'), ('NFTA_DATA_VERDICT', 'verdict_attributes'), ) class verdict_attributes(nla): nla_map = ( ('NFTA_VERDICT_UNSPEC', 'none'), ('NFTA_VERDICT_CODE', 'verdict_code'), ('NFTA_VERDICT_CHAIN', 'asciiz'), ('NFTA_VERDICT_CHAIN_ID', 'be32'), ) class verdict_code(nft_map_be32_signed): ops = { 0: 'NF_DROP', 1: 'NF_ACCEPT', 2: 'NF_STOLEN', 3: 'NF_QUEUE', 4: 'NF_REPEAT', 5: 'NF_STOP', -1: 'NFT_CONTINUE', -2: 'NFT_BREAK', -3: 'NFT_JUMP', -4: 'NFT_GOTO', -5: 'NFT_RETURN', } class nft_flowtable_msg(nfgen_msg): prefix = 'NFTA_FLOWTABLE_' nla_map = ( ('NFTA_FLOWTABLE_UNSPEC', 'none'), ('NFTA_FLOWTABLE_TABLE', 'asciiz'), ('NFTA_FLOWTABLE_NAME', 'asciiz'), ('NFTA_FLOWTABLE_HOOK', 'flowtable_hook'), ('NFTA_FLOWTABLE_USE', 'be32'), ('NFTA_FLOWTABLE_HANDLE', 'be64'), ('NFTA_FLOWTABLE_PAD', 'hex'), ('NFTA_FLOWTABLE_FLAGS', 'nft_flowtable_flags'), ) class nft_flowtable_flags(nft_flags_be32): ops = ('NFT_FLOWTABLE_HW_OFFLOAD', 'NFT_FLOWTABLE_COUNTER') class flowtable_hook(nft_device): nla_map = ( ('NFTA_FLOWTABLE_HOOK_UNSPEC', 'none'), ('NFTA_FLOWTABLE_HOOK_NUM', 'be32'), ('NFTA_FLOWTABLE_HOOK_PRIORITY', 'be32'), ('NFTA_FLOWTABLE_HOOK_DEVS', 'device_attributes'), ) class NFTSocket(NetlinkSocket): ''' NFNetlink socket (family=NETLINK_NETFILTER). Implements API to the nftables functionality. ''' policy = { NFT_MSG_NEWTABLE: nft_table_msg, NFT_MSG_GETTABLE: nft_table_msg, NFT_MSG_DELTABLE: nft_table_msg, NFT_MSG_NEWCHAIN: nft_chain_msg, NFT_MSG_GETCHAIN: nft_chain_msg, NFT_MSG_DELCHAIN: nft_chain_msg, NFT_MSG_NEWRULE: nft_rule_msg, NFT_MSG_GETRULE: nft_rule_msg, NFT_MSG_DELRULE: nft_rule_msg, NFT_MSG_NEWSET: nft_set_msg, NFT_MSG_GETSET: nft_set_msg, NFT_MSG_DELSET: nft_set_msg, NFT_MSG_NEWGEN: nft_gen_msg, NFT_MSG_GETGEN: nft_gen_msg, NFT_MSG_NEWSETELEM: nft_set_elem_list_msg, NFT_MSG_GETSETELEM: nft_set_elem_list_msg, NFT_MSG_DELSETELEM: nft_set_elem_list_msg, NFT_MSG_NEWFLOWTABLE: nft_flowtable_msg, NFT_MSG_GETFLOWTABLE: nft_flowtable_msg, NFT_MSG_DELFLOWTABLE: nft_flowtable_msg, } def __init__(self, version=1, attr_revision=0, nfgen_family=2): super(NFTSocket, self).__init__(family=NETLINK_NETFILTER) policy = dict( [ (x | (NFNL_SUBSYS_NFTABLES << 8), y) for (x, y) in self.policy.items() ] ) self.register_policy(policy) self._proto_version = version self._attr_revision = attr_revision self._nfgen_family = nfgen_family self._ts = threading.local() self._write_lock = threading.RLock() def begin(self): with self._write_lock: if hasattr(self._ts, 'data'): # transaction is already started return False self._ts.data = b'' self._ts.seqnum = ( self.addr_pool.alloc(), # begin self.addr_pool.alloc(), # tx self.addr_pool.alloc(), ) # commit msg = nfgen_msg() msg['res_id'] = NFNL_SUBSYS_NFTABLES msg['header']['type'] = 0x10 msg['header']['flags'] = NLM_F_REQUEST msg['header']['sequence_number'] = self._ts.seqnum[0] msg.encode() self._ts.data += msg.data return True def commit(self): with self._write_lock: msg = nfgen_msg() msg['res_id'] = NFNL_SUBSYS_NFTABLES msg['header']['type'] = 0x11 msg['header']['flags'] = NLM_F_REQUEST msg['header']['sequence_number'] = self._ts.seqnum[2] msg.encode() self._ts.data += msg.data self.sendto(self._ts.data, (0, 0)) for seqnum in self._ts.seqnum: self.addr_pool.free(seqnum, ban=10) del self._ts.data def request_get( self, msg, msg_type, msg_flags=NLM_F_REQUEST | NLM_F_DUMP, terminate=None, ): ''' Read-only requests do not require transactions. Just run the request and get an answer. ''' msg['nfgen_family'] = self._nfgen_family return tuple( self.nlm_request( msg, msg_type | (NFNL_SUBSYS_NFTABLES << 8), msg_flags, terminate=terminate, ) ) def request_put(self, msg, msg_type, msg_flags=NLM_F_REQUEST): ''' Read-write requests. ''' one_shot = self.begin() msg['header']['type'] = (NFNL_SUBSYS_NFTABLES << 8) | msg_type msg['header']['flags'] = msg_flags msg['header']['sequence_number'] = self._ts.seqnum[1] msg['nfgen_family'] = self._nfgen_family msg.encode() self._ts.data += msg.data if one_shot: self.commit() def _command(self, msg_class, commands, cmd, kwarg): flags = kwarg.pop('flags', NLM_F_ACK) cmd_name = cmd cmd_flags = { 'add': NLM_F_CREATE | NLM_F_APPEND, 'create': NLM_F_CREATE | NLM_F_APPEND | NLM_F_EXCL, 'insert': NLM_F_CREATE, 'replace': NLM_F_REPLACE, } flags |= cmd_flags.get(cmd, 0) flags |= NLM_F_REQUEST cmd = commands[cmd] msg = msg_class() msg['attrs'] = [] # # a trick to pass keyword arguments as On rderedDict instance: # # ordered_args = OrderedDict() # ordered_args['arg1'] = value1 # ordered_args['arg2'] = value2 # ... # nft.rule('add', kwarg=ordered_args) # if 'kwarg' in kwarg: kwarg = kwarg['kwarg'] # for key, value in kwarg.items(): nla = msg_class.name2nla(key) msg['attrs'].append([nla, value]) msg['header']['type'] = (NFNL_SUBSYS_NFTABLES << 8) | cmd msg['header']['flags'] = flags | NLM_F_REQUEST msg['nfgen_family'] = self._nfgen_family if cmd_name != 'get': trans_start = nfgen_msg() trans_start['res_id'] = NFNL_SUBSYS_NFTABLES trans_start['header']['type'] = 0x10 trans_start['header']['flags'] = NLM_F_REQUEST trans_end = nfgen_msg() trans_end['res_id'] = NFNL_SUBSYS_NFTABLES trans_end['header']['type'] = 0x11 trans_end['header']['flags'] = NLM_F_REQUEST messages = [trans_start, msg, trans_end] self.nlm_request_batch(messages, noraise=(flags & NLM_F_ACK) == 0) # Only throw an error when the request fails. For now, # do not return anything. else: return self.request_get(msg, msg['header']['type'], flags)[0] # call nft describe "data_type" for more informations DATA_TYPE_NAME_TO_INFO = { "verdict": (DATA_TYPE_VERDICT, 4, nft_data.nfta_data.verdict.verdict_code), "nf_proto": (DATA_TYPE_NFPROTO, 1, nlmsg_atoms.uint8), "bitmask": (DATA_TYPE_BITMASK, 4, nlmsg_atoms.uint32), "integer": (DATA_TYPE_INTEGER, 4, nlmsg_atoms.int32), "string": (DATA_TYPE_STRING, 0, nlmsg_atoms.asciiz), "lladdr": (DATA_TYPE_LLADDR, 0, nlmsg_atoms.lladdr), "ipv4_addr": (DATA_TYPE_IPADDR, 4, nlmsg_atoms.ip4addr), "ipv6_addr": (DATA_TYPE_IP6ADDR, 16, nlmsg_atoms.ip6addr), "ether_addr": (DATA_TYPE_ETHERADDR, 6, nlmsg_atoms.l2addr), "ether_type": (DATA_TYPE_ETHERADDR, 2, nlmsg_atoms.uint16), "inet_proto": (DATA_TYPE_INET_PROTOCOL, 1, nlmsg_atoms.uint8), } DATA_TYPE_ID_TO_NAME = { value[0]: key for key, value in DATA_TYPE_NAME_TO_INFO.items() } pyroute2-0.7.11/pyroute2/netlink/nl80211/000077500000000000000000000000001455030217500176355ustar00rootroot00000000000000pyroute2-0.7.11/pyroute2/netlink/nl80211/__init__.py000066400000000000000000001705031455030217500217540ustar00rootroot00000000000000''' NL80211 module ============== TODO ''' import datetime import struct from pyroute2.common import map_namespace from pyroute2.netlink import genlmsg, nla, nla_base from pyroute2.netlink.generic import GenericNetlinkSocket from pyroute2.netlink.nlsocket import Marshal # Define from uapi/linux/nl80211.h NL80211_GENL_NAME = "nl80211" # nl80211 commands NL80211_CMD_UNSPEC = 0 NL80211_CMD_GET_WIPHY = 1 NL80211_CMD_SET_WIPHY = 2 NL80211_CMD_NEW_WIPHY = 3 NL80211_CMD_DEL_WIPHY = 4 NL80211_CMD_GET_INTERFACE = 5 NL80211_CMD_SET_INTERFACE = 6 NL80211_CMD_NEW_INTERFACE = 7 NL80211_CMD_DEL_INTERFACE = 8 NL80211_CMD_GET_KEY = 9 NL80211_CMD_SET_KEY = 10 NL80211_CMD_NEW_KEY = 11 NL80211_CMD_DEL_KEY = 12 NL80211_CMD_GET_BEACON = 13 NL80211_CMD_SET_BEACON = 14 NL80211_CMD_START_AP = 15 NL80211_CMD_NEW_BEACON = NL80211_CMD_START_AP NL80211_CMD_STOP_AP = 16 NL80211_CMD_DEL_BEACON = NL80211_CMD_STOP_AP NL80211_CMD_GET_STATION = 17 NL80211_CMD_SET_STATION = 18 NL80211_CMD_NEW_STATION = 19 NL80211_CMD_DEL_STATION = 20 NL80211_CMD_GET_MPATH = 21 NL80211_CMD_SET_MPATH = 22 NL80211_CMD_NEW_MPATH = 23 NL80211_CMD_DEL_MPATH = 24 NL80211_CMD_SET_BSS = 25 NL80211_CMD_SET_REG = 26 NL80211_CMD_REQ_SET_REG = 27 NL80211_CMD_GET_MESH_CONFIG = 28 NL80211_CMD_SET_MESH_CONFIG = 29 NL80211_CMD_SET_MGMT_EXTRA_IE = 30 NL80211_CMD_GET_REG = 31 NL80211_CMD_GET_SCAN = 32 NL80211_CMD_TRIGGER_SCAN = 33 NL80211_CMD_NEW_SCAN_RESULTS = 34 NL80211_CMD_SCAN_ABORTED = 35 NL80211_CMD_REG_CHANGE = 36 NL80211_CMD_AUTHENTICATE = 37 NL80211_CMD_ASSOCIATE = 38 NL80211_CMD_DEAUTHENTICATE = 39 NL80211_CMD_DISASSOCIATE = 40 NL80211_CMD_MICHAEL_MIC_FAILURE = 41 NL80211_CMD_REG_BEACON_HINT = 42 NL80211_CMD_JOIN_IBSS = 43 NL80211_CMD_LEAVE_IBSS = 44 NL80211_CMD_TESTMODE = 45 NL80211_CMD_CONNECT = 46 NL80211_CMD_ROAM = 47 NL80211_CMD_DISCONNECT = 48 NL80211_CMD_SET_WIPHY_NETNS = 49 NL80211_CMD_GET_SURVEY = 50 NL80211_CMD_NEW_SURVEY_RESULTS = 51 NL80211_CMD_SET_PMKSA = 52 NL80211_CMD_DEL_PMKSA = 53 NL80211_CMD_FLUSH_PMKSA = 54 NL80211_CMD_REMAIN_ON_CHANNEL = 55 NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL = 56 NL80211_CMD_SET_TX_BITRATE_MASK = 57 NL80211_CMD_REGISTER_FRAME = 58 NL80211_CMD_REGISTER_ACTION = NL80211_CMD_REGISTER_FRAME NL80211_CMD_FRAME = 59 NL80211_CMD_ACTION = NL80211_CMD_FRAME NL80211_CMD_FRAME_TX_STATUS = 60 NL80211_CMD_ACTION_TX_STATUS = NL80211_CMD_FRAME_TX_STATUS NL80211_CMD_SET_POWER_SAVE = 61 NL80211_CMD_GET_POWER_SAVE = 62 NL80211_CMD_SET_CQM = 63 NL80211_CMD_NOTIFY_CQM = 64 NL80211_CMD_SET_CHANNEL = 65 NL80211_CMD_SET_WDS_PEER = 66 NL80211_CMD_FRAME_WAIT_CANCEL = 67 NL80211_CMD_JOIN_MESH = 68 NL80211_CMD_LEAVE_MESH = 69 NL80211_CMD_UNPROT_DEAUTHENTICATE = 70 NL80211_CMD_UNPROT_DISASSOCIATE = 71 NL80211_CMD_NEW_PEER_CANDIDATE = 72 NL80211_CMD_GET_WOWLAN = 73 NL80211_CMD_SET_WOWLAN = 74 NL80211_CMD_START_SCHED_SCAN = 75 NL80211_CMD_STOP_SCHED_SCAN = 76 NL80211_CMD_SCHED_SCAN_RESULTS = 77 NL80211_CMD_SCHED_SCAN_STOPPED = 78 NL80211_CMD_SET_REKEY_OFFLOAD = 79 NL80211_CMD_PMKSA_CANDIDATE = 80 NL80211_CMD_TDLS_OPER = 81 NL80211_CMD_TDLS_MGMT = 82 NL80211_CMD_UNEXPECTED_FRAME = 83 NL80211_CMD_PROBE_CLIENT = 84 NL80211_CMD_REGISTER_BEACONS = 85 NL80211_CMD_UNEXPECTED_4ADDR_FRAME = 86 NL80211_CMD_SET_NOACK_MAP = 87 NL80211_CMD_CH_SWITCH_NOTIFY = 88 NL80211_CMD_START_P2P_DEVICE = 89 NL80211_CMD_STOP_P2P_DEVICE = 90 NL80211_CMD_CONN_FAILED = 91 NL80211_CMD_SET_MCAST_RATE = 92 NL80211_CMD_SET_MAC_ACL = 93 NL80211_CMD_RADAR_DETECT = 94 NL80211_CMD_GET_PROTOCOL_FEATURES = 95 NL80211_CMD_UPDATE_FT_IES = 96 NL80211_CMD_FT_EVENT = 97 NL80211_CMD_CRIT_PROTOCOL_START = 98 NL80211_CMD_CRIT_PROTOCOL_STOP = 99 NL80211_CMD_GET_COALESCE = 100 NL80211_CMD_SET_COALESCE = 101 NL80211_CMD_CHANNEL_SWITCH = 102 NL80211_CMD_VENDOR = 103 NL80211_CMD_SET_QOS_MAP = 104 NL80211_CMD_ADD_TX_TS = 105 NL80211_CMD_DEL_TX_TS = 106 NL80211_CMD_GET_MPP = 107 NL80211_CMD_JOIN_OCB = 108 NL80211_CMD_LEAVE_OCB = 109 NL80211_CMD_CH_SWITCH_STARTED_NOTIFY = 110 NL80211_CMD_TDLS_CHANNEL_SWITCH = 111 NL80211_CMD_TDLS_CANCEL_CHANNEL_SWITCH = 112 NL80211_CMD_WIPHY_REG_CHANGE = 113 NL80211_CMD_MAX = NL80211_CMD_WIPHY_REG_CHANGE (NL80211_NAMES, NL80211_VALUES) = map_namespace('NL80211_CMD_', globals()) NL80211_BSS_ELEMENTS_SSID = 0 NL80211_BSS_ELEMENTS_SUPPORTED_RATES = 1 NL80211_BSS_ELEMENTS_CHANNEL = 3 NL80211_BSS_ELEMENTS_TIM = 5 NL80211_BSS_ELEMENTS_RSN = 48 NL80211_BSS_ELEMENTS_HT_OPERATION = 61 NL80211_BSS_ELEMENTS_EXTENDED_RATE = 50 NL80211_BSS_ELEMENTS_VHT_OPERATION = 192 NL80211_BSS_ELEMENTS_VENDOR = 221 BSS_HT_OPER_CHAN_WIDTH_20 = "20 Mhz" BSS_HT_OPER_CHAN_WIDTH_20_OR_40 = "20 or 40 MHz" BSS_VHT_OPER_CHAN_WIDTH_20_OR_40 = BSS_HT_OPER_CHAN_WIDTH_20_OR_40 BSS_VHT_OPER_CHAN_WIDTH_80 = "80 MHz" BSS_VHT_OPER_CHAN_WIDTH_80P80 = "80+80 MHz" BSS_VHT_OPER_CHAN_WIDTH_160 = "160 MHz" BSS_MEMBERSHIP_SELECTOR_HT_PHY = 127 BSS_MEMBERSHIP_SELECTOR_VHT_PHY = 126 # interface types NL80211_IFTYPE_UNSPECIFIED = 0 NL80211_IFTYPE_ADHOC = 1 NL80211_IFTYPE_STATION = 2 NL80211_IFTYPE_AP = 3 NL80211_IFTYPE_AP_VLAN = 4 NL80211_IFTYPE_WDS = 5 NL80211_IFTYPE_MONITOR = 6 NL80211_IFTYPE_MESH_POINT = 7 NL80211_IFTYPE_P2P_CLIENT = 8 NL80211_IFTYPE_P2P_GO = 9 NL80211_IFTYPE_P2P_DEVICE = 10 NL80211_IFTYPE_OCB = 11 (IFTYPE_NAMES, IFTYPE_VALUES) = map_namespace( 'NL80211_IFTYPE_', globals(), normalize=True ) # channel width NL80211_CHAN_WIDTH_20_NOHT = 0 # 20 MHz non-HT channel NL80211_CHAN_WIDTH_20 = 1 # 20 MHz HT channel NL80211_CHAN_WIDTH_40 = 2 # 40 MHz HT channel NL80211_CHAN_WIDTH_80 = 3 # 80 MHz channel NL80211_CHAN_WIDTH_80P80 = 4 # 80+80 MHz channel NL80211_CHAN_WIDTH_160 = 5 # 160 MHz channel NL80211_CHAN_WIDTH_5 = 6 # 5 MHz OFDM channel NL80211_CHAN_WIDTH_10 = 7 # 10 MHz OFDM channel (CHAN_WIDTH, WIDTH_VALUES) = map_namespace( 'NL80211_CHAN_WIDTH_', globals(), normalize=True ) # BSS "status" NL80211_BSS_STATUS_AUTHENTICATED = 0 # Authenticated with this BS NL80211_BSS_STATUS_ASSOCIATED = 1 # Associated with this BSS NL80211_BSS_STATUS_IBSS_JOINED = 2 # Joined to this IBSS (BSS_STATUS_NAMES, BSS_STATUS_VALUES) = map_namespace( 'NL80211_BSS_STATUS_', globals(), normalize=True ) # TX power adjustment NL80211_TX_POWER_AUTOMATIC = 0 # automatically determine transmit power NL80211_TX_POWER_LIMITED = 1 # limit TX power by the mBm parameter NL80211_TX_POWER_FIXED = 2 # fix TX power to the mBm parameter (TX_POWER_NAMES, TX_POWER_VALUES) = map_namespace( 'NL80211_TX_POWER_', globals(), normalize=True ) NL80211_SCAN_FLAG_LOW_PRIORITY = 1 << 0 NL80211_SCAN_FLAG_FLUSH = 1 << 1 NL80211_SCAN_FLAG_AP = 1 << 2 NL80211_SCAN_FLAG_RANDOM_ADDR = 1 << 3 NL80211_SCAN_FLAG_FILS_MAX_CHANNEL_TIME = 1 << 4 NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP = 1 << 5 NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE = 1 << 6 NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION = 1 << 7 (SCAN_FLAGS_NAMES, SCAN_FLAGS_VALUES) = map_namespace( 'NL80211_SCAN_FLAG_', globals() ) NL80211_STA_FLAG_AUTHORIZED = 1 NL80211_STA_FLAG_SHORT_PREAMBLE = 2 NL80211_STA_FLAG_WME = 3 NL80211_STA_FLAG_MFP = 4 NL80211_STA_FLAG_AUTHENTICATED = 5 NL80211_STA_FLAG_TDLS_PEER = 6 NL80211_STA_FLAG_ASSOCIATED = 7 (STA_FLAG_NAMES, STA_FLAG_VALUES) = map_namespace( 'NL80211_STA_FLAG_', globals() ) # Cipher suites WLAN_CIPHER_SUITE_USE_GROUP = 0x00FAC00 WLAN_CIPHER_SUITE_WEP40 = 0x00FAC01 WLAN_CIPHER_SUITE_TKIP = 0x00FAC02 WLAN_CIPHER_SUITE_RESERVED = 0x00FAC03 WLAN_CIPHER_SUITE_CCMP = 0x00FAC04 WLAN_CIPHER_SUITE_WEP104 = 0x00FAC05 WLAN_CIPHER_SUITE_AES_CMAC = 0x00FAC06 WLAN_CIPHER_SUITE_GCMP = 0x00FAC08 WLAN_CIPHER_SUITE_GCMP_256 = 0x00FAC09 WLAN_CIPHER_SUITE_CCMP_256 = 0x00FAC0A WLAN_CIPHER_SUITE_BIP_GMAC_128 = 0x00FAC0B WLAN_CIPHER_SUITE_BIP_GMAC_256 = 0x00FAC0C WLAN_CIPHER_SUITE_BIP_CMAC_256 = 0x00FAC0D (WLAN_CIPHER_SUITE_NAMES, WLAN_CIPHER_SUITE_VALUES) = map_namespace( 'WLAN_CIPHER_SUITE_', globals() ) class nl80211cmd(genlmsg): prefix = 'NL80211_ATTR_' nla_map = ( ('NL80211_ATTR_UNSPEC', 'none'), ('NL80211_ATTR_WIPHY', 'uint32'), ('NL80211_ATTR_WIPHY_NAME', 'asciiz'), ('NL80211_ATTR_IFINDEX', 'uint32'), ('NL80211_ATTR_IFNAME', 'asciiz'), ('NL80211_ATTR_IFTYPE', 'uint32'), ('NL80211_ATTR_MAC', 'l2addr'), ('NL80211_ATTR_KEY_DATA', 'hex'), ('NL80211_ATTR_KEY_IDX', 'hex'), ('NL80211_ATTR_KEY_CIPHER', 'uint32'), ('NL80211_ATTR_KEY_SEQ', 'hex'), ('NL80211_ATTR_KEY_DEFAULT', 'hex'), ('NL80211_ATTR_BEACON_INTERVAL', 'hex'), ('NL80211_ATTR_DTIM_PERIOD', 'hex'), ('NL80211_ATTR_BEACON_HEAD', 'hex'), ('NL80211_ATTR_BEACON_TAIL', 'hex'), ('NL80211_ATTR_STA_AID', 'hex'), ('NL80211_ATTR_STA_FLAGS', 'hex'), ('NL80211_ATTR_STA_LISTEN_INTERVAL', 'hex'), ('NL80211_ATTR_STA_SUPPORTED_RATES', 'hex'), ('NL80211_ATTR_STA_VLAN', 'hex'), ('NL80211_ATTR_STA_INFO', 'STAInfo'), ('NL80211_ATTR_WIPHY_BANDS', '*band'), ('NL80211_ATTR_MNTR_FLAGS', 'hex'), ('NL80211_ATTR_MESH_ID', 'hex'), ('NL80211_ATTR_STA_PLINK_ACTION', 'hex'), ('NL80211_ATTR_MPATH_NEXT_HOP', 'hex'), ('NL80211_ATTR_MPATH_INFO', 'hex'), ('NL80211_ATTR_BSS_CTS_PROT', 'hex'), ('NL80211_ATTR_BSS_SHORT_PREAMBLE', 'hex'), ('NL80211_ATTR_BSS_SHORT_SLOT_TIME', 'hex'), ('NL80211_ATTR_HT_CAPABILITY', 'hex'), ('NL80211_ATTR_SUPPORTED_IFTYPES', 'supported_iftypes'), ('NL80211_ATTR_REG_ALPHA2', 'asciiz'), ('NL80211_ATTR_REG_RULES', '*reg_rule'), ('NL80211_ATTR_MESH_CONFIG', 'hex'), ('NL80211_ATTR_BSS_BASIC_RATES', 'hex'), ('NL80211_ATTR_WIPHY_TXQ_PARAMS', 'hex'), ('NL80211_ATTR_WIPHY_FREQ', 'uint32'), ('NL80211_ATTR_WIPHY_CHANNEL_TYPE', 'hex'), ('NL80211_ATTR_KEY_DEFAULT_MGMT', 'hex'), ('NL80211_ATTR_MGMT_SUBTYPE', 'hex'), ('NL80211_ATTR_IE', 'hex'), ('NL80211_ATTR_MAX_NUM_SCAN_SSIDS', 'uint8'), ('NL80211_ATTR_SCAN_FREQUENCIES', 'hex'), ('NL80211_ATTR_SCAN_SSIDS', '*string'), ('NL80211_ATTR_GENERATION', 'uint32'), ('NL80211_ATTR_BSS', 'bss'), ('NL80211_ATTR_REG_INITIATOR', 'hex'), ('NL80211_ATTR_REG_TYPE', 'hex'), ('NL80211_ATTR_SUPPORTED_COMMANDS', 'supported_commands'), ('NL80211_ATTR_FRAME', 'hex'), ('NL80211_ATTR_SSID', 'string'), ('NL80211_ATTR_AUTH_TYPE', 'uint32'), ('NL80211_ATTR_REASON_CODE', 'uint16'), ('NL80211_ATTR_KEY_TYPE', 'hex'), ('NL80211_ATTR_MAX_SCAN_IE_LEN', 'uint16'), ('NL80211_ATTR_CIPHER_SUITES', 'cipher_suites'), ('NL80211_ATTR_FREQ_BEFORE', 'hex'), ('NL80211_ATTR_FREQ_AFTER', 'hex'), ('NL80211_ATTR_FREQ_FIXED', 'hex'), ('NL80211_ATTR_WIPHY_RETRY_SHORT', 'uint8'), ('NL80211_ATTR_WIPHY_RETRY_LONG', 'uint8'), ('NL80211_ATTR_WIPHY_FRAG_THRESHOLD', 'hex'), ('NL80211_ATTR_WIPHY_RTS_THRESHOLD', 'hex'), ('NL80211_ATTR_TIMED_OUT', 'hex'), ('NL80211_ATTR_USE_MFP', 'hex'), ('NL80211_ATTR_STA_FLAGS2', 'hex'), ('NL80211_ATTR_CONTROL_PORT', 'hex'), ('NL80211_ATTR_TESTDATA', 'hex'), ('NL80211_ATTR_PRIVACY', 'hex'), ('NL80211_ATTR_DISCONNECTED_BY_AP', 'hex'), ('NL80211_ATTR_STATUS_CODE', 'hex'), ('NL80211_ATTR_CIPHER_SUITES_PAIRWISE', 'hex'), ('NL80211_ATTR_CIPHER_SUITE_GROUP', 'hex'), ('NL80211_ATTR_WPA_VERSIONS', 'hex'), ('NL80211_ATTR_AKM_SUITES', 'hex'), ('NL80211_ATTR_REQ_IE', 'hex'), ('NL80211_ATTR_RESP_IE', 'hex'), ('NL80211_ATTR_PREV_BSSID', 'hex'), ('NL80211_ATTR_KEY', 'hex'), ('NL80211_ATTR_KEYS', 'hex'), ('NL80211_ATTR_PID', 'uint32'), ('NL80211_ATTR_4ADDR', 'hex'), ('NL80211_ATTR_SURVEY_INFO', 'survey_info'), ('NL80211_ATTR_PMKID', 'hex'), ('NL80211_ATTR_MAX_NUM_PMKIDS', 'uint8'), ('NL80211_ATTR_DURATION', 'hex'), ('NL80211_ATTR_COOKIE', 'hex'), ('NL80211_ATTR_WIPHY_COVERAGE_CLASS', 'uint8'), ('NL80211_ATTR_TX_RATES', 'hex'), ('NL80211_ATTR_FRAME_MATCH', 'hex'), ('NL80211_ATTR_ACK', 'hex'), ('NL80211_ATTR_PS_STATE', 'hex'), ('NL80211_ATTR_CQM', 'hex'), ('NL80211_ATTR_LOCAL_STATE_CHANGE', 'hex'), ('NL80211_ATTR_AP_ISOLATE', 'hex'), ('NL80211_ATTR_WIPHY_TX_POWER_SETTING', 'uint32'), ('NL80211_ATTR_WIPHY_TX_POWER_LEVEL', 'uint32'), ('NL80211_ATTR_TX_FRAME_TYPES', 'hex'), ('NL80211_ATTR_RX_FRAME_TYPES', 'hex'), ('NL80211_ATTR_FRAME_TYPE', 'hex'), ('NL80211_ATTR_CONTROL_PORT_ETHERTYPE', 'hex'), ('NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT', 'hex'), ('NL80211_ATTR_SUPPORT_IBSS_RSN', 'hex'), ('NL80211_ATTR_WIPHY_ANTENNA_TX', 'uint32'), ('NL80211_ATTR_WIPHY_ANTENNA_RX', 'uint32'), ('NL80211_ATTR_MCAST_RATE', 'hex'), ('NL80211_ATTR_OFFCHANNEL_TX_OK', 'hex'), ('NL80211_ATTR_BSS_HT_OPMODE', 'hex'), ('NL80211_ATTR_KEY_DEFAULT_TYPES', 'hex'), ('NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION', 'hex'), ('NL80211_ATTR_MESH_SETUP', 'hex'), ('NL80211_ATTR_WIPHY_ANTENNA_AVAIL_TX', 'uint32'), ('NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX', 'uint32'), ('NL80211_ATTR_SUPPORT_MESH_AUTH', 'hex'), ('NL80211_ATTR_STA_PLINK_STATE', 'hex'), ('NL80211_ATTR_WOWLAN_TRIGGERS', 'hex'), ('NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED', 'hex'), ('NL80211_ATTR_SCHED_SCAN_INTERVAL', 'hex'), ('NL80211_ATTR_INTERFACE_COMBINATIONS', 'hex'), ('NL80211_ATTR_SOFTWARE_IFTYPES', 'hex'), ('NL80211_ATTR_REKEY_DATA', 'hex'), ('NL80211_ATTR_MAX_NUM_SCHED_SCAN_SSIDS', 'uint8'), ('NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN', 'uint16'), ('NL80211_ATTR_SCAN_SUPP_RATES', 'hex'), ('NL80211_ATTR_HIDDEN_SSID', 'hex'), ('NL80211_ATTR_IE_PROBE_RESP', 'hex'), ('NL80211_ATTR_IE_ASSOC_RESP', 'hex'), ('NL80211_ATTR_STA_WME', 'hex'), ('NL80211_ATTR_SUPPORT_AP_UAPSD', 'hex'), ('NL80211_ATTR_ROAM_SUPPORT', 'hex'), ('NL80211_ATTR_SCHED_SCAN_MATCH', 'hex'), ('NL80211_ATTR_MAX_MATCH_SETS', 'uint8'), ('NL80211_ATTR_PMKSA_CANDIDATE', 'hex'), ('NL80211_ATTR_TX_NO_CCK_RATE', 'hex'), ('NL80211_ATTR_TDLS_ACTION', 'hex'), ('NL80211_ATTR_TDLS_DIALOG_TOKEN', 'hex'), ('NL80211_ATTR_TDLS_OPERATION', 'hex'), ('NL80211_ATTR_TDLS_SUPPORT', 'hex'), ('NL80211_ATTR_TDLS_EXTERNAL_SETUP', 'hex'), ('NL80211_ATTR_DEVICE_AP_SME', 'hex'), ('NL80211_ATTR_DONT_WAIT_FOR_ACK', 'hex'), ('NL80211_ATTR_FEATURE_FLAGS', 'hex'), ('NL80211_ATTR_PROBE_RESP_OFFLOAD', 'hex'), ('NL80211_ATTR_PROBE_RESP', 'hex'), ('NL80211_ATTR_DFS_REGION', 'hex'), ('NL80211_ATTR_DISABLE_HT', 'hex'), ('NL80211_ATTR_HT_CAPABILITY_MASK', 'hex'), ('NL80211_ATTR_NOACK_MAP', 'hex'), ('NL80211_ATTR_INACTIVITY_TIMEOUT', 'hex'), ('NL80211_ATTR_RX_SIGNAL_DBM', 'hex'), ('NL80211_ATTR_BG_SCAN_PERIOD', 'hex'), ('NL80211_ATTR_WDEV', 'uint64'), ('NL80211_ATTR_USER_REG_HINT_TYPE', 'hex'), ('NL80211_ATTR_CONN_FAILED_REASON', 'hex'), ('NL80211_ATTR_SAE_DATA', 'hex'), ('NL80211_ATTR_VHT_CAPABILITY', 'hex'), ('NL80211_ATTR_SCAN_FLAGS', 'uint32'), ('NL80211_ATTR_CHANNEL_WIDTH', 'uint32'), ('NL80211_ATTR_CENTER_FREQ1', 'uint32'), ('NL80211_ATTR_CENTER_FREQ2', 'uint32'), ('NL80211_ATTR_P2P_CTWINDOW', 'hex'), ('NL80211_ATTR_P2P_OPPPS', 'hex'), ('NL80211_ATTR_LOCAL_MESH_POWER_MODE', 'hex'), ('NL80211_ATTR_ACL_POLICY', 'hex'), ('NL80211_ATTR_MAC_ADDRS', 'hex'), ('NL80211_ATTR_MAC_ACL_MAX', 'hex'), ('NL80211_ATTR_RADAR_EVENT', 'hex'), ('NL80211_ATTR_EXT_CAPA', 'array(uint8)'), ('NL80211_ATTR_EXT_CAPA_MASK', 'array(uint8)'), ('NL80211_ATTR_STA_CAPABILITY', 'hex'), ('NL80211_ATTR_STA_EXT_CAPABILITY', 'hex'), ('NL80211_ATTR_PROTOCOL_FEATURES', 'hex'), ('NL80211_ATTR_SPLIT_WIPHY_DUMP', 'hex'), ('NL80211_ATTR_DISABLE_VHT', 'hex'), ('NL80211_ATTR_VHT_CAPABILITY_MASK', 'array(uint8)'), ('NL80211_ATTR_MDID', 'hex'), ('NL80211_ATTR_IE_RIC', 'hex'), ('NL80211_ATTR_CRIT_PROT_ID', 'hex'), ('NL80211_ATTR_MAX_CRIT_PROT_DURATION', 'hex'), ('NL80211_ATTR_PEER_AID', 'hex'), ('NL80211_ATTR_COALESCE_RULE', 'hex'), ('NL80211_ATTR_CH_SWITCH_COUNT', 'hex'), ('NL80211_ATTR_CH_SWITCH_BLOCK_TX', 'hex'), ('NL80211_ATTR_CSA_IES', 'hex'), ('NL80211_ATTR_CSA_C_OFF_BEACON', 'hex'), ('NL80211_ATTR_CSA_C_OFF_PRESP', 'hex'), ('NL80211_ATTR_RXMGMT_FLAGS', 'hex'), ('NL80211_ATTR_STA_SUPPORTED_CHANNELS', 'hex'), ('NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES', 'hex'), ('NL80211_ATTR_HANDLE_DFS', 'hex'), ('NL80211_ATTR_SUPPORT_5_MHZ', 'hex'), ('NL80211_ATTR_SUPPORT_10_MHZ', 'hex'), ('NL80211_ATTR_OPMODE_NOTIF', 'hex'), ('NL80211_ATTR_VENDOR_ID', 'hex'), ('NL80211_ATTR_VENDOR_SUBCMD', 'hex'), ('NL80211_ATTR_VENDOR_DATA', 'hex'), ('NL80211_ATTR_VENDOR_EVENTS', 'hex'), ('NL80211_ATTR_QOS_MAP', 'hex'), ('NL80211_ATTR_MAC_HINT', 'hex'), ('NL80211_ATTR_WIPHY_FREQ_HINT', 'hex'), ('NL80211_ATTR_MAX_AP_ASSOC_STA', 'hex'), ('NL80211_ATTR_TDLS_PEER_CAPABILITY', 'hex'), ('NL80211_ATTR_SOCKET_OWNER', 'hex'), ('NL80211_ATTR_CSA_C_OFFSETS_TX', 'hex'), ('NL80211_ATTR_MAX_CSA_COUNTERS', 'hex'), ('NL80211_ATTR_TDLS_INITIATOR', 'hex'), ('NL80211_ATTR_USE_RRM', 'hex'), ('NL80211_ATTR_WIPHY_DYN_ACK', 'hex'), ('NL80211_ATTR_TSID', 'hex'), ('NL80211_ATTR_USER_PRIO', 'hex'), ('NL80211_ATTR_ADMITTED_TIME', 'hex'), ('NL80211_ATTR_SMPS_MODE', 'hex'), ('NL80211_ATTR_OPER_CLASS', 'hex'), ('NL80211_ATTR_MAC_MASK', 'hex'), ('NL80211_ATTR_WIPHY_SELF_MANAGED_REG', 'hex'), ('NL80211_ATTR_EXT_FEATURES', 'hex'), ('NL80211_ATTR_SURVEY_RADIO_STATS', 'hex'), ('NL80211_ATTR_NETNS_FD', 'uint32'), ('NL80211_ATTR_SCHED_SCAN_DELAY', 'hex'), ('NL80211_ATTR_REG_INDOOR', 'hex'), ('NL80211_ATTR_MAX_NUM_SCHED_SCAN_PLANS', 'hex'), ('NL80211_ATTR_MAX_SCAN_PLAN_INTERVAL', 'hex'), ('NL80211_ATTR_MAX_SCAN_PLAN_ITERATIONS', 'hex'), ('NL80211_ATTR_SCHED_SCAN_PLANS', 'hex'), ('NL80211_ATTR_PBSS', 'hex'), ('NL80211_ATTR_BSS_SELECT', 'hex'), ('NL80211_ATTR_STA_SUPPORT_P2P_PS', 'hex'), ('NL80211_ATTR_PAD', 'hex'), ('NL80211_ATTR_IFTYPE_EXT_CAPA', 'hex'), ('NL80211_ATTR_MU_MIMO_GROUP_DATA', 'hex'), ('NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR', 'hex'), ('NL80211_ATTR_SCAN_START_TIME_TSF', 'hex'), ('NL80211_ATTR_SCAN_START_TIME_TSF_BSSID', 'hex'), ('NL80211_ATTR_MEASUREMENT_DURATION', 'hex'), ('NL80211_ATTR_MEASUREMENT_DURATION_MANDATORY', 'hex'), ('NL80211_ATTR_MESH_PEER_AID', 'hex'), ('NL80211_ATTR_NAN_MASTER_PREF', 'hex'), ('NL80211_ATTR_BANDS', 'hex'), ('NL80211_ATTR_NAN_FUNC', 'hex'), ('NL80211_ATTR_NAN_MATCH', 'hex'), ('NL80211_ATTR_FILS_KEK', 'hex'), ('NL80211_ATTR_FILS_NONCES', 'hex'), ('NL80211_ATTR_MULTICAST_TO_UNICAST_ENABLED', 'hex'), ('NL80211_ATTR_BSSID', 'hex'), ('NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI', 'hex'), ('NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST', 'hex'), ('NL80211_ATTR_TIMEOUT_REASON', 'hex'), ('NL80211_ATTR_FILS_ERP_USERNAME', 'hex'), ('NL80211_ATTR_FILS_ERP_REALM', 'hex'), ('NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM', 'hex'), ('NL80211_ATTR_FILS_ERP_RRK', 'hex'), ('NL80211_ATTR_FILS_CACHE_ID', 'hex'), ('NL80211_ATTR_PMK', 'hex'), ('NL80211_ATTR_SCHED_SCAN_MULTI', 'hex'), ('NL80211_ATTR_SCHED_SCAN_MAX_REQS', 'hex'), ('NL80211_ATTR_WANT_1X_4WAY_HS', 'hex'), ('NL80211_ATTR_PMKR0_NAME', 'hex'), ('NL80211_ATTR_PORT_AUTHORIZED', 'hex'), ('NL80211_ATTR_EXTERNAL_AUTH_ACTION', 'hex'), ('NL80211_ATTR_EXTERNAL_AUTH_SUPPORT', 'hex'), ('NL80211_ATTR_NSS', 'hex'), ('NL80211_ATTR_ACK_SIGNAL', 'hex'), ('NL80211_ATTR_CONTROL_PORT_OVER_NL80211', 'hex'), ('NL80211_ATTR_TXQ_STATS', 'hex'), ('NL80211_ATTR_TXQ_LIMIT', 'hex'), ('NL80211_ATTR_TXQ_MEMORY_LIMIT', 'hex'), ('NL80211_ATTR_TXQ_QUANTUM', 'hex'), ('NL80211_ATTR_HE_CAPABILITY', 'hex'), ('NL80211_ATTR_FTM_RESPONDER', 'hex'), ('NL80211_ATTR_FTM_RESPONDER_STATS', 'hex'), ('NL80211_ATTR_TIMEOUT', 'hex'), ('NL80211_ATTR_PEER_MEASUREMENTS', 'hex'), ('NL80211_ATTR_AIRTIME_WEIGHT', 'hex'), ('NL80211_ATTR_STA_TX_POWER_SETTING', 'hex'), ('NL80211_ATTR_STA_TX_POWER', 'hex'), ('NL80211_ATTR_SAE_PASSWORD', 'hex'), ('NL80211_ATTR_TWT_RESPONDER', 'hex'), ('NL80211_ATTR_HE_OBSS_PD', 'hex'), ('NL80211_ATTR_WIPHY_EDMG_CHANNELS', 'hex'), ('NL80211_ATTR_WIPHY_EDMG_BW_CONFIG', 'hex'), ('NL80211_ATTR_VLAN_ID', 'hex'), ('NL80211_ATTR_HE_BSS_COLOR', 'hex'), ('NL80211_ATTR_IFTYPE_AKM_SUITES', 'hex'), ('NL80211_ATTR_TID_CONFIG', 'hex'), ('NL80211_ATTR_CONTROL_PORT_NO_PREAUTH', 'hex'), ('NL80211_ATTR_PMK_LIFETIME', 'hex'), ('NL80211_ATTR_PMK_REAUTH_THRESHOLD', 'hex'), ('NL80211_ATTR_RECEIVE_MULTICAST', 'hex'), ('NL80211_ATTR_WIPHY_FREQ_OFFSET', 'hex'), ('NL80211_ATTR_CENTER_FREQ1_OFFSET', 'hex'), ('NL80211_ATTR_SCAN_FREQ_KHZ', 'hex'), ('NL80211_ATTR_HE_6GHZ_CAPABILITY', 'hex'), ('NL80211_ATTR_FILS_DISCOVERY', 'hex'), ('NL80211_ATTR_UNSOL_BCAST_PROBE_RESP', 'hex'), ('NL80211_ATTR_S1G_CAPABILITY', 'hex'), ('NL80211_ATTR_S1G_CAPABILITY_MASK', 'hex'), ('NL80211_ATTR_SAE_PWE', 'hex'), ('NL80211_ATTR_RECONNECT_REQUESTED', 'hex'), ('NL80211_ATTR_SAR_SPEC', 'hex'), ('NL80211_ATTR_DISABLE_HE', 'hex'), ('NUM_NL80211_ATTR', 'hex'), ) class survey_info(nla): prefix = 'NL80211_SURVEY_INFO_' nla_map = ( ('__NL80211_SURVEY_INFO_INVALID', 'none'), ('NL80211_SURVEY_INFO_FREQUENCY', 'uint32'), ('NL80211_SURVEY_INFO_NOISE', 'uint8'), ('NL80211_SURVEY_INFO_IN_USE', 'flag'), ('NL80211_SURVEY_INFO_TIME', 'uint64'), ('NL80211_SURVEY_INFO_TIME_BUSY', 'uint64'), ('NL80211_SURVEY_INFO_TIME_EXT_BUSY', 'uint64'), ('NL80211_SURVEY_INFO_TIME_RX', 'uint64'), ('NL80211_SURVEY_INFO_TIME_TX', 'uint64'), ('NL80211_SURVEY_INFO_TIME_SCAN', 'uint64'), ('NL80211_SURVEY_INFO_PAD', 'hex'), ('NL80211_SURVEY_INFO_TIME_BSS_RX', 'uint64'), ('NL80211_SURVEY_INFO_FREQUENCY_OFFSET', 'hex'), ) class band(nla): class bitrate(nla): prefix = 'NL80211_BITRATE_ATTR_' nla_map = ( ('__NL80211_BITRATE_ATTR_INVALID', 'hex'), ('NL80211_BITRATE_ATTR_RATE', 'uint32'), # 10x Mbps ('NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE', 'flag'), ) class frequency(nla): class wmm_rule(nla): prefix = 'NL80211_WMMR_' nla_map = ( ('__NL80211_WMMR_INVALID', 'hex'), ('NL80211_WMMR_CW_MIN', 'uint16'), ('NL80211_WMMR_CW_MAX', 'uint16'), ('NL80211_WMMR_AIFSN', 'uint8'), ('NL80211_WMMR_TXOP', 'uint16'), ) prefix = 'NL80211_FREQUENCY_ATTR_' nla_map = ( ('__NL80211_FREQUENCY_ATTR_INVALID', 'hex'), ('NL80211_FREQUENCY_ATTR_FREQ', 'uint32'), ('NL80211_FREQUENCY_ATTR_DISABLED', 'flag'), ('NL80211_FREQUENCY_ATTR_NO_IR', 'flag'), ('__NL80211_FREQUENCY_ATTR_NO_IBSS', 'flag'), ('NL80211_FREQUENCY_ATTR_RADAR', 'flag'), ('NL80211_FREQUENCY_ATTR_MAX_TX_POWER', 'uint32'), ('NL80211_FREQUENCY_ATTR_DFS_STATE', 'uint32'), ('NL80211_FREQUENCY_ATTR_DFS_TIME', 'uint32'), ('NL80211_FREQUENCY_ATTR_NO_HT40_MINUS', 'flag'), ('NL80211_FREQUENCY_ATTR_NO_HT40_PLUS', 'flag'), ('NL80211_FREQUENCY_ATTR_NO_80MHZ', 'flag'), ('NL80211_FREQUENCY_ATTR_NO_160MHZ', 'flag'), ('NL80211_FREQUENCY_ATTR_DFS_CAC_TIME', 'uint32'), ('NL80211_FREQUENCY_ATTR_INDOOR_ONLY', 'flag'), ('NL80211_FREQUENCY_ATTR_IR_CONCURRENT', 'flag'), ('NL80211_FREQUENCY_ATTR_NO_20MHZ', 'flag'), ('NL80211_FREQUENCY_ATTR_NO_10MHZ', 'flag'), ('NL80211_FREQUENCY_ATTR_WMM', '*wmm_rule'), ('NL80211_FREQUENCY_ATTR_NO_HE', 'flag'), ('NL80211_FREQUENCY_ATTR_OFFSET', 'uint32'), ('NL80211_FREQUENCY_ATTR_1MHZ', 'flag'), ('NL80211_FREQUENCY_ATTR_2MHZ', 'flag'), ('NL80211_FREQUENCY_ATTR_4MHZ', 'flag'), ('NL80211_FREQUENCY_ATTR_8MHZ', 'flag'), ('NL80211_FREQUENCY_ATTR_16MHZ', 'flag'), ) class iftype_data(nla): class iftype(nla): prefix = 'NL80211_IFTYPE_' nla_map = ( ('NL80211_IFTYPE_UNSPECIFIED', 'flag'), ('NL80211_IFTYPE_ADHOC', 'flag'), ('NL80211_IFTYPE_STATION', 'flag'), ('NL80211_IFTYPE_AP', 'flag'), ('NL80211_IFTYPE_AP_VLAN', 'flag'), ('NL80211_IFTYPE_WDS', 'flag'), ('NL80211_IFTYPE_MONITOR', 'flag'), ('NL80211_IFTYPE_MESH_POINT', 'flag'), ('NL80211_IFTYPE_P2P_CLIENT', 'flag'), ('NL80211_IFTYPE_P2P_GO', 'flag'), ('NL80211_IFTYPE_P2P_DEVICE', 'flag'), ('NL80211_IFTYPE_OCB', 'flag'), ('NL80211_IFTYPE_NAN', 'flag'), ) class mcs_nss(nla): ''' HE Tx/Rx HE MCS NSS Support Field C structure:: struct ieee80211_he_mcs_nss_supp { __le16 rx_mcs_80; __le16 tx_mcs_80; __le16 rx_mcs_160; __le16 tx_mcs_160; __le16 rx_mcs_80p80; __le16 tx_mcs_80p80; } __packed; ''' fields = ( ('rx_mcs_80', ' len(data): # raise Exception(f"* bogus tail data ({count}):") return rsn_values data = data[2:] for _ in range(count): rsn_values["pairwise_cipher"].append( self._get_cipher_list(data) ) data = data[4:] if len(data) < 2: rsn_values["auth_suites"] = [defauth] count = data[0] | (data[1] << 8) if 2 + (count * 4) > len(data): # raise Exception(f"* bogus tail data ({count}):") return rsn_values data = data[2:] for _ in range(count): rsn_values["auth_suites"].append(self._get_auth_list(data)) data = data[4:] if len(data) >= 2: capabilities = [] capa = data[0] | (data[1] << 8) data = data[2:] if capa & 0x0001: capabilities.append("PreAuth") if capa & 0x0002: capabilities.append("NoPairwise") capabilities.append( [ "1-PTKSA-RC", "2-PTKSA-RC", "4-PTKSA-RC", "16-PTKSA-RC", ][(capa & 0x000C) >> 2] ) capabilities.append( [ "1-GTKSA-RC", "2-GTKSA-RC", "4-GTKSA-RC", "16-GTKSA-RC", ][(capa & 0x0030) >> 4] ) if capa & 0x0040: capabilities.append("MFP-required") if capa & 0x0080: capabilities.append("MFP-capable") if capa & 0x0200: capabilities.append("Peerkey-enabled") if capa & 0x0400: capabilities.append("SPP-AMSDU-capable") if capa & 0x0800: capabilities.append("SPP-AMSDU-required") if capa & 0x2000: capabilities.append("Extended-Key-ID") rsn_values["capabilities"] = capabilities if len(data) >= 2: pmkid_count = data[0] | (data[1] << 8) if len(data) < 2 + 16 * pmkid_count: # raise Exception("invalid") return rsn_values data = data[2:] for _ in range(pmkid_count): rsn_values["pmkid_ids"].append(data[:16]) data = data[16:] if len(data) >= 4: rsn_values[ "group_mgmt_cipher_suite" ] = self._get_cipher_list(data) data = data[4:] return rsn_values def binary_ht_operation(self, offset, length): data = self.data[offset : offset + length] ht_operation = {} ht_operation["PRIMARY_CHANNEL"] = data[0] ht_operation["SECONDARY_CHANNEL"] = data[1] & 0x3 try: ht_operation["CHANNEL_WIDTH"] = [ BSS_HT_OPER_CHAN_WIDTH_20, BSS_HT_OPER_CHAN_WIDTH_20_OR_40, ][(data[1] & 0x4) >> 2] except IndexError: ht_operation["CHANNEL_WIDTH"] = None try: ht_operation["HT_PROTECTION"] = [ "no", "nonmember", "20 MHz", "non-HT mixed", ][data[2] & 0x3] except IndexError: ht_operation["HT_PROTECTION"] = None ht_operation.update( { "RIFS": (data[1] & 0x8) >> 3, "NON_GF_PRESENT": (data[2] & 0x4) >> 2, "OBSS_NON_GF_PRESENT": (data[2] & 0x10) >> 4, "DUAL_BEACON": (data[4] & 0x40) >> 6, "DUAL_CTS_PROTECTION": (data[4] & 0x80) >> 7, "STBC_BEACON": data[5] & 0x1, "L_SIG_TXOP_PROT": (data[5] & 0x2) >> 1, "PCO_ACTIVE": (data[5] & 0x4) >> 2, "PCO_PHASE": (data[5] & 0x8) >> 3, } ) return ht_operation def binary_vht_operation(self, offset, length): data = self.data[offset : offset + length] vht_operation = { "CENTER_FREQ_SEG_1": data[1], "CENTER_FREQ_SEG_2": data[1], "VHT_BASIC_MCS_SET": (data[4], data[3]), } try: vht_operation["CHANNEL_WIDTH"] = [ BSS_VHT_OPER_CHAN_WIDTH_20_OR_40, BSS_VHT_OPER_CHAN_WIDTH_80, BSS_VHT_OPER_CHAN_WIDTH_80P80, BSS_VHT_OPER_CHAN_WIDTH_160, ][data[0]] except IndexError: vht_operation["CHANNEL_WIDTH"] = None return vht_operation def decode_nlas(self): return def decode(self): nla_base.decode(self) self.value = {} init = offset = self.offset + 4 while (offset - init) < (self.length - 4): (msg_type, length) = struct.unpack_from( 'BB', self.data, offset ) if msg_type == NL80211_BSS_ELEMENTS_SSID: (self.value["SSID"],) = struct.unpack_from( '%is' % length, self.data, offset + 2 ) if msg_type == NL80211_BSS_ELEMENTS_SUPPORTED_RATES: supported_rates = self.binary_rates(offset + 2, length) self.value["SUPPORTED_RATES"] = supported_rates if msg_type == NL80211_BSS_ELEMENTS_CHANNEL: (channel,) = struct.unpack_from( 'B', self.data, offset + 2 ) self.value["CHANNEL"] = channel if msg_type == NL80211_BSS_ELEMENTS_TIM: self.value["TRAFFIC INDICATION MAP"] = self.binary_tim( offset + 2 ) if msg_type == NL80211_BSS_ELEMENTS_RSN: self.value["RSN"] = self.binary_rsn( offset + 2, length, "CCMP", "IEEE 802.1X" ) if msg_type == NL80211_BSS_ELEMENTS_EXTENDED_RATE: extended_rates = self.binary_rates(offset + 2, length) self.value["EXTENDED_RATES"] = extended_rates if msg_type == NL80211_BSS_ELEMENTS_VENDOR: # There may be multiple vendor IEs, create a list if "VENDOR" not in self.value.keys(): self.value["VENDOR"] = [] (vendor_ie,) = struct.unpack_from( '%is' % length, self.data, offset + 2 ) self.value["VENDOR"].append(vendor_ie) if msg_type == NL80211_BSS_ELEMENTS_HT_OPERATION: self.value["HT_OPERATION"] = self.binary_ht_operation( offset + 2, length ) if msg_type == NL80211_BSS_ELEMENTS_VHT_OPERATION: self.value[ "VHT_OPERATION" ] = self.binary_vht_operation(offset + 2, length) offset += length + 2 class TSF(nla_base): """Timing Synchronization Function""" def decode(self): nla_base.decode(self) offset = self.offset + 4 self.value = {} (tsf,) = struct.unpack_from('Q', self.data, offset) self.value["VALUE"] = tsf # TSF is in microseconds self.value["TIME"] = datetime.timedelta(microseconds=tsf) class SignalMBM(nla_base): def decode(self): nla_base.decode(self) offset = self.offset + 4 self.value = {} (ss,) = struct.unpack_from('i', self.data, offset) self.value["VALUE"] = ss self.value["SIGNAL_STRENGTH"] = { "VALUE": ss / 100.0, "UNITS": "dBm", } class capability(nla_base): # iw scan.c WLAN_CAPABILITY_ESS = 1 << 0 WLAN_CAPABILITY_IBSS = 1 << 1 WLAN_CAPABILITY_CF_POLLABLE = 1 << 2 WLAN_CAPABILITY_CF_POLL_REQUEST = 1 << 3 WLAN_CAPABILITY_PRIVACY = 1 << 4 WLAN_CAPABILITY_SHORT_PREAMBLE = 1 << 5 WLAN_CAPABILITY_PBCC = 1 << 6 WLAN_CAPABILITY_CHANNEL_AGILITY = 1 << 7 WLAN_CAPABILITY_SPECTRUM_MGMT = 1 << 8 WLAN_CAPABILITY_QOS = 1 << 9 WLAN_CAPABILITY_SHORT_SLOT_TIME = 1 << 10 WLAN_CAPABILITY_APSD = 1 << 11 WLAN_CAPABILITY_RADIO_MEASURE = 1 << 12 WLAN_CAPABILITY_DSSS_OFDM = 1 << 13 WLAN_CAPABILITY_DEL_BACK = 1 << 14 WLAN_CAPABILITY_IMM_BACK = 1 << 15 # def decode_nlas(self): # return def decode(self): nla_base.decode(self) offset = self.offset + 4 self.value = {} (capa,) = struct.unpack_from('H', self.data, offset) self.value["VALUE"] = capa s = [] if capa & self.WLAN_CAPABILITY_ESS: s.append("ESS") if capa & self.WLAN_CAPABILITY_IBSS: s.append("IBSS") if capa & self.WLAN_CAPABILITY_CF_POLLABLE: s.append("CfPollable") if capa & self.WLAN_CAPABILITY_CF_POLL_REQUEST: s.append("CfPollReq") if capa & self.WLAN_CAPABILITY_PRIVACY: s.append("Privacy") if capa & self.WLAN_CAPABILITY_SHORT_PREAMBLE: s.append("ShortPreamble") if capa & self.WLAN_CAPABILITY_PBCC: s.append("PBCC") if capa & self.WLAN_CAPABILITY_CHANNEL_AGILITY: s.append("ChannelAgility") if capa & self.WLAN_CAPABILITY_SPECTRUM_MGMT: s.append("SpectrumMgmt") if capa & self.WLAN_CAPABILITY_QOS: s.append("QoS") if capa & self.WLAN_CAPABILITY_SHORT_SLOT_TIME: s.append("ShortSlotTime") if capa & self.WLAN_CAPABILITY_APSD: s.append("APSD") if capa & self.WLAN_CAPABILITY_RADIO_MEASURE: s.append("RadioMeasure") if capa & self.WLAN_CAPABILITY_DSSS_OFDM: s.append("DSSS-OFDM") if capa & self.WLAN_CAPABILITY_DEL_BACK: s.append("DelayedBACK") if capa & self.WLAN_CAPABILITY_IMM_BACK: s.append("ImmediateBACK") self.value['CAPABILITIES'] = " ".join(s) prefix = 'NL80211_BSS_' nla_map = ( ('__NL80211_BSS_INVALID', 'hex'), ('NL80211_BSS_BSSID', 'hex'), ('NL80211_BSS_FREQUENCY', 'uint32'), ('NL80211_BSS_TSF', 'TSF'), ('NL80211_BSS_BEACON_INTERVAL', 'uint16'), ('NL80211_BSS_CAPABILITY', 'capability'), ('NL80211_BSS_INFORMATION_ELEMENTS', 'elementsBinary'), ('NL80211_BSS_SIGNAL_MBM', 'SignalMBM'), ('NL80211_BSS_SIGNAL_UNSPEC', 'uint8'), ('NL80211_BSS_STATUS', 'uint32'), ('NL80211_BSS_SEEN_MS_AGO', 'uint32'), ('NL80211_BSS_BEACON_IES', 'elementsBinary'), ('NL80211_BSS_CHAN_WIDTH', 'uint32'), ('NL80211_BSS_BEACON_TSF', 'uint64'), ('NL80211_BSS_PRESP_DATA', 'hex'), ('NL80211_BSS_MAX', 'hex'), ) class reg_rule(nla): prefix = 'NL80211_ATTR_' nla_map = ( ('__NL80211_REG_RULE_ATTR_INVALID', 'hex'), ('NL80211_ATTR_REG_RULE_FLAGS', 'uint32'), ('NL80211_ATTR_FREQ_RANGE_START', 'uint32'), ('NL80211_ATTR_FREQ_RANGE_END', 'uint32'), ('NL80211_ATTR_FREQ_RANGE_MAX_BW', 'uint32'), ('NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN', 'uint32'), ('NL80211_ATTR_POWER_RULE_MAX_EIRP', 'uint32'), ('NL80211_ATTR_DFS_CAC_TIME', 'uint32'), ) class STAInfo(nla): class STAFlags(nla_base): ''' Decode the flags that may be set. See nl80211.h: struct nl80211_sta_flag_update, NL80211_STA_INFO_STA_FLAGS ''' def decode_nlas(self): return def decode(self): nla_base.decode(self) self.value = {} self.value["AUTHORIZED"] = False self.value["SHORT_PREAMBLE"] = False self.value["WME"] = False self.value["MFP"] = False self.value["AUTHENTICATED"] = False self.value["TDLS_PEER"] = False self.value["ASSOCIATED"] = False init = offset = self.offset + 4 while (offset - init) < (self.length - 4): (msg_type, length) = struct.unpack_from( 'BB', self.data, offset ) mask, set_ = struct.unpack_from( 'II', self.data, offset + 2 ) if mask & NL80211_STA_FLAG_AUTHORIZED: if set_ & NL80211_STA_FLAG_AUTHORIZED: self.value["AUTHORIZED"] = True if mask & NL80211_STA_FLAG_SHORT_PREAMBLE: if set_ & NL80211_STA_FLAG_SHORT_PREAMBLE: self.value["SHORT_PREAMBLE"] = True if mask & NL80211_STA_FLAG_WME: if set_ & NL80211_STA_FLAG_WME: self.value["WME"] = True if mask & NL80211_STA_FLAG_MFP: if set_ & NL80211_STA_FLAG_MFP: self.value["MFP"] = True if mask & NL80211_STA_FLAG_AUTHENTICATED: if set_ & NL80211_STA_FLAG_AUTHENTICATED: self.value["AUTHENTICATED"] = True if mask & NL80211_STA_FLAG_TDLS_PEER: if set_ & NL80211_STA_FLAG_TDLS_PEER: self.value["TDLS_PEER"] = True if mask & NL80211_STA_FLAG_ASSOCIATED: if set_ & NL80211_STA_FLAG_ASSOCIATED: self.value["ASSOCIATED"] = True offset += length + 2 prefix = 'NL80211_STA_INFO_' nla_map = ( ('__NL80211_STA_INFO_INVALID', 'hex'), ('NL80211_STA_INFO_INACTIVE_TIME', 'uint32'), ('NL80211_STA_INFO_RX_BYTES', 'uint32'), ('NL80211_STA_INFO_TX_BYTES', 'uint32'), ('NL80211_STA_INFO_LLID', 'uint16'), ('NL80211_STA_INFO_PLID', 'uint16'), ('NL80211_STA_INFO_PLINK_STATE', 'uint8'), ('NL80211_STA_INFO_SIGNAL', 'int8'), ('NL80211_STA_INFO_TX_BITRATE', 'hex'), ('NL80211_STA_INFO_RX_PACKETS', 'uint32'), ('NL80211_STA_INFO_TX_PACKETS', 'uint32'), ('NL80211_STA_INFO_TX_RETRIES', 'uint32'), ('NL80211_STA_INFO_TX_FAILED', 'uint32'), ('NL80211_STA_INFO_SIGNAL_AVG', 'int8'), ('NL80211_STA_INFO_RX_BITRATE', 'hex'), ('NL80211_STA_INFO_BSS_PARAM', 'hex'), ('NL80211_STA_INFO_CONNECTED_TIME', 'uint32'), ('NL80211_STA_INFO_STA_FLAGS', 'STAFlags'), ('NL80211_STA_INFO_BEACON_LOSS', 'uint32'), ('NL80211_STA_INFO_T_OFFSET', 'int64'), ('NL80211_STA_INFO_LOCAL_PM', 'hex'), ('NL80211_STA_INFO_PEER_PM', 'hex'), ('NL80211_STA_INFO_NONPEER_PM', 'hex'), ('NL80211_STA_INFO_RX_BYTES64', 'uint64'), ('NL80211_STA_INFO_TX_BYTES64', 'uint64'), ('NL80211_STA_INFO_CHAIN_SIGNAL', 'string'), ('NL80211_STA_INFO_CHAIN_SIGNAL_AVG', 'string'), ('NL80211_STA_INFO_EXPECTED_THROUGHPUT', 'uint32'), ('NL80211_STA_INFO_RX_DROP_MISC', 'uint32'), ('NL80211_STA_INFO_BEACON_RX', 'uint64'), ('NL80211_STA_INFO_BEACON_SIGNAL_AVG', 'uint8'), ('NL80211_STA_INFO_TID_STATS', 'hex'), ('NL80211_STA_INFO_RX_DURATION', 'uint64'), ('NL80211_STA_INFO_PAD', 'hex'), ('NL80211_STA_INFO_MAX', 'hex'), ) class supported_commands(nla_base): ''' Supported commands format NLA structure header:: +++++++++++++++++++++++ | uint16_t | uint16_t | | length | NLA type | +++++++++++++++++++++++ followed by multiple command entries:: ++++++++++++++++++++++++++++++++++ | uint16_t | uint16_t | uint32_t | | type | index | cmd | ++++++++++++++++++++++++++++++++++ ''' def decode(self): nla_base.decode(self) self.value = [] # Skip the first four bytes: NLA length and NLA type length = self.length - 4 offset = self.offset + 4 while length > 0: (msg_type, index, cmd_index) = struct.unpack_from( 'HHI', self.data, offset ) length -= 8 offset += 8 # Lookup for command name or assign a default name name = NL80211_VALUES.get( cmd_index, 'NL80211_CMD_{}'.format(cmd_index) ) self.value.append(name) class cipher_suites(nla_base): ''' Cipher suites format NLA structure header:: +++++++++++++++++++++++ | uint16_t | uint16_t | | length | NLA type | +++++++++++++++++++++++ followed by multiple entries:: ++++++++++++ | uint32_t | | cipher | ++++++++++++ ''' def decode(self): nla_base.decode(self) self.value = [] # Skip the first four bytes: NLA length and NLA type length = self.length - 4 offset = self.offset + 4 while length > 0: (cipher,) = struct.unpack_from(' 0: (iflen, iftype) = struct.unpack_from('= 0: just a bufsize If `noraise` is true, error messages will be treated as any other message. ''' ctime = time.time() with self.lock[msg_seq]: if bufsize == -1: # get bufsize from the network data bufsize = struct.unpack("I", self.recv(4, MSG_PEEK))[0] elif bufsize == 0: # get bufsize from SO_RCVBUF bufsize = self.getsockopt(SOL_SOCKET, SO_RCVBUF) // 2 tmsg = None enough = False backlog_acquired = False try: while not enough: # 8<----------------------------------------------------------- # # This stage changes the backlog, so use mutex to # prevent side changes self.backlog_lock.acquire() backlog_acquired = True ## # Stage 1. BEGIN # # 8<----------------------------------------------------------- # # Check backlog and return already collected # messages. # if msg_seq == -1 and any(self.backlog.values()): for seq, backlog in self.backlog.items(): if backlog: for msg in backlog: yield msg self.backlog[seq] = [] enough = True break elif msg_seq == 0 and self.backlog[0]: # Zero queue. # # Load the backlog, if there is valid # content in it for msg in self.backlog[0]: yield msg self.backlog[0] = [] # And just exit break elif msg_seq > 0 and len(self.backlog.get(msg_seq, [])): # Any other msg_seq. # # Collect messages up to the terminator. # Terminator conditions: # * NLMSG_ERROR != 0 # * NLMSG_DONE # * terminate() function (if defined) # * not NLM_F_MULTI # # Please note, that if terminator not occured, # more `recv()` rounds CAN be required. for msg in tuple(self.backlog[msg_seq]): # Drop the message from the backlog, if any self.backlog[msg_seq].remove(msg) # If there is an error, raise exception if ( msg['header']['error'] is not None and not noraise ): # reschedule all the remaining messages, # including errors and acks, into a # separate deque self.error_deque.extend(self.backlog[msg_seq]) # flush the backlog for this msg_seq del self.backlog[msg_seq] # The loop is done raise msg['header']['error'] # If it is the terminator message, say "enough" # and requeue all the rest into Zero queue if terminate is not None: tmsg = terminate(msg) if isinstance(tmsg, nlmsg): yield msg if (msg['header']['type'] == NLMSG_DONE) or tmsg: # The loop is done enough = True # If it is just a normal message, append it to # the response if not enough: # finish the loop on single messages if not msg['header']['flags'] & NLM_F_MULTI: enough = True yield msg # Enough is enough, requeue the rest and delete # our backlog if enough: self.backlog[0].extend(self.backlog[msg_seq]) del self.backlog[msg_seq] break # Next iteration self.backlog_lock.release() backlog_acquired = False else: # Stage 1. END # # 8<------------------------------------------------------- # # Stage 2. BEGIN # # 8<------------------------------------------------------- # # Receive the data from the socket and put the messages # into the backlog # self.backlog_lock.release() backlog_acquired = False ## # # Control the timeout. We should not be within the # function more than TIMEOUT seconds. All the locks # MUST be released here. # if (msg_seq != 0) and ( time.time() - ctime > self.get_timeout ): # requeue already received for that msg_seq self.backlog[0].extend(self.backlog[msg_seq]) del self.backlog[msg_seq] # throw an exception if self.get_timeout_exception: raise self.get_timeout_exception() else: return # if self.read_lock.acquire(False): try: self.change_master.clear() # If the socket is free to read from, occupy # it and wait for the data # # This is a time consuming process, so all the # locks, except the read lock must be released data = self.socket.recv(bufsize) # Parse data msgs = tuple( self.socket.marshal.parse( data, msg_seq, callback ) ) # Reset ctime -- timeout should be measured # for every turn separately ctime = time.time() # current = self.buffer_queue.qsize() delta = current - self.qsize delay = 0 if delta > 10: delay = min( 3, max(0.01, float(current) / 60000) ) message = ( "Packet burst: " "delta=%s qsize=%s delay=%s" % (delta, current, delay) ) if delay < 1: log.debug(message) else: log.warning(message) time.sleep(delay) self.qsize = current # We've got the data, lock the backlog again with self.backlog_lock: for msg in msgs: msg['header']['target'] = self.target msg['header']['stats'] = Stats( current, delta, delay ) seq = msg['header']['sequence_number'] if seq not in self.backlog: if ( msg['header']['type'] == NLMSG_ERROR ): # Drop orphaned NLMSG_ERROR # messages continue seq = 0 # 8<----------------------------------- # Callbacks section for cr in self.callbacks: try: if cr[0](msg): cr[1](msg, *cr[2]) except: # FIXME # # Usually such code formatting # means that the method should # be refactored to avoid such # indentation. # # Plz do something with it. # lw = log.warning lw("Callback fail: %s" % (cr)) lw(traceback.format_exc()) # 8<----------------------------------- self.backlog[seq].append(msg) # Now wake up other threads self.change_master.set() finally: # Finally, release the read lock: all data # processed self.read_lock.release() else: # If the socket is occupied and there is still no # data for us, wait for the next master change or # for a timeout self.change_master.wait(1) # 8<------------------------------------------------------- # # Stage 2. END # # 8<------------------------------------------------------- finally: if backlog_acquired: self.backlog_lock.release() class EngineThreadUnsafe(EngineBase): ''' Thread unsafe nlsocket base class. Does not implement any locks on message processing. Discards any message if the sequence number does not match. ''' def put( self, msg, msg_type, msg_flags=NLM_F_REQUEST, addr=(0, 0), msg_seq=0, msg_pid=None, ): if not isinstance(msg, nlmsg): msg_class = self.marshal.msg_map[msg_type] msg = msg_class(msg) if msg_pid is None: msg_pid = self.epid or os.getpid() msg['header']['type'] = msg_type msg['header']['flags'] = msg_flags msg['header']['sequence_number'] = msg_seq msg['header']['pid'] = msg_pid self.sendto_gate(msg, addr) def get( self, bufsize=DEFAULT_RCVBUF, msg_seq=0, terminate=None, callback=None, noraise=False, ): if bufsize == -1: # get bufsize from the network data bufsize = struct.unpack("I", self.recv(4, MSG_PEEK))[0] elif bufsize == 0: # get bufsize from SO_RCVBUF bufsize = self.getsockopt(SOL_SOCKET, SO_RCVBUF) // 2 enough = False while not enough: data = self.recv(bufsize) *messages, last = tuple( self.marshal.parse(data, msg_seq, callback) ) for msg in messages: msg['header']['target'] = self.target msg['header']['stats'] = Stats(0, 0, 0) yield msg if last['header']['type'] == NLMSG_DONE: break if ( (msg_seq == 0) or (not last['header']['flags'] & NLM_F_MULTI) or (callable(terminate) and terminate(last)) ): enough = True yield last class NetlinkSocketBase: ''' Generic netlink socket. ''' input_from_buffer_queue = False def __init__( self, family=NETLINK_GENERIC, port=None, pid=None, fileno=None, sndbuf=1048576, rcvbuf=1048576, all_ns=False, async_qsize=None, nlm_generator=None, target='localhost', ext_ack=False, strict_check=False, groups=0, nlm_echo=False, ): # 8<----------------------------------------- self.config = { 'family': family, 'port': port, 'pid': pid, 'fileno': fileno, 'sndbuf': sndbuf, 'rcvbuf': rcvbuf, 'all_ns': all_ns, 'async_qsize': async_qsize, 'target': target, 'nlm_generator': nlm_generator, 'ext_ack': ext_ack, 'strict_check': strict_check, 'groups': groups, 'nlm_echo': nlm_echo, } # 8<----------------------------------------- self.addr_pool = AddrPool(minaddr=0x000000FF, maxaddr=0x0000FFFF) self.epid = None self.port = 0 self.fixed = True self.family = family self._fileno = fileno self._sndbuf = sndbuf self._rcvbuf = rcvbuf self._use_peek = True self.backlog = {0: []} self.error_deque = collections.deque(maxlen=1000) self.callbacks = [] # [(predicate, callback, args), ...] self.buffer_thread = None self.closed = False self.compiled = None self.uname = config.uname self.target = target self.groups = groups self.capabilities = { 'create_bridge': config.kernel > [3, 2, 0], 'create_bond': config.kernel > [3, 2, 0], 'create_dummy': True, 'provide_master': config.kernel[0] > 2, } self.backlog_lock = threading.Lock() self.sys_lock = threading.RLock() self.lock = LockFactory() self._sock = None self._ctrl_read, self._ctrl_write = os.pipe() if async_qsize is None: async_qsize = config.async_qsize self.async_qsize = async_qsize if nlm_generator is None: nlm_generator = config.nlm_generator self.nlm_generator = nlm_generator self.buffer_queue = Queue(maxsize=async_qsize) self.log = [] self.all_ns = all_ns self.ext_ack = ext_ack self.strict_check = strict_check if pid is None: self.pid = os.getpid() & 0x3FFFFF self.port = port self.fixed = self.port is not None elif pid == 0: self.pid = os.getpid() else: self.pid = pid # 8<----------------------------------------- self.marshal = Marshal() # 8<----------------------------------------- if not nlm_generator: def nlm_request(*argv, **kwarg): return tuple(self._genlm_request(*argv, **kwarg)) def get(*argv, **kwarg): return tuple(self._genlm_get(*argv, **kwarg)) self._genlm_request = self.nlm_request self._genlm_get = self.get self.nlm_request = nlm_request self.get = get def nlm_request_batch(*argv, **kwarg): return tuple(self._genlm_request_batch(*argv, **kwarg)) self._genlm_request_batch = self.nlm_request_batch self.nlm_request_batch = nlm_request_batch # Set defaults self.post_init() self.engine = EngineThreadSafe(self) def post_init(self): pass def clone(self): return type(self)(**self.config) def put( self, msg, msg_type, msg_flags=NLM_F_REQUEST, addr=(0, 0), msg_seq=0, msg_pid=None, ): return self.engine.put( msg, msg_type, msg_flags, addr, msg_seq, msg_pid ) def get( self, bufsize=DEFAULT_RCVBUF, msg_seq=0, terminate=None, callback=None, noraise=False, ): return self.engine.get(bufsize, msg_seq, terminate, callback, noraise) def close(self, code=errno.ECONNRESET): if code > 0 and self.input_from_buffer_queue: self.buffer_queue.put( struct.pack('IHHQIQQ', 28, 2, 0, 0, code, 0, 0) ) try: os.close(self._ctrl_write) os.close(self._ctrl_read) except OSError: # ignore the case when it is closed already pass def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.close() def release(self): warnings.warn('deprecated, use close() instead', DeprecationWarning) self.close() def register_callback(self, callback, predicate=lambda x: True, args=None): ''' Register a callback to run on a message arrival. Callback is the function that will be called with the message as the first argument. Predicate is the optional callable object, that returns True or False. Upon True, the callback will be called. Upon False it will not. Args is a list or tuple of arguments. Simplest example, assume ipr is the IPRoute() instance:: # create a simplest callback that will print messages def cb(msg): print(msg) # register callback for any message: ipr.register_callback(cb) More complex example, with filtering:: # Set object's attribute after the message key def cb(msg, obj): obj.some_attr = msg["some key"] # Register the callback only for the loopback device, index 1: ipr.register_callback(cb, lambda x: x.get('index', None) == 1, (self, )) Please note: you do **not** need to register the default 0 queue to invoke callbacks on broadcast messages. Callbacks are iterated **before** messages get enqueued. ''' if args is None: args = [] self.callbacks.append((predicate, callback, args)) def unregister_callback(self, callback): ''' Remove the first reference to the function from the callback register ''' cb = tuple(self.callbacks) for cr in cb: if cr[1] == callback: self.callbacks.pop(cb.index(cr)) return def register_policy(self, policy, msg_class=None): ''' Register netlink encoding/decoding policy. Can be specified in two ways: `nlsocket.register_policy(MSG_ID, msg_class)` to register one particular rule, or `nlsocket.register_policy({MSG_ID1: msg_class})` to register several rules at once. E.g.:: policy = {RTM_NEWLINK: ifinfmsg, RTM_DELLINK: ifinfmsg, RTM_NEWADDR: ifaddrmsg, RTM_DELADDR: ifaddrmsg} nlsocket.register_policy(policy) One can call `register_policy()` as many times, as one want to -- it will just extend the current policy scheme, not replace it. ''' if isinstance(policy, int) and msg_class is not None: policy = {policy: msg_class} if not isinstance(policy, dict): raise TypeError('wrong policy type') for key in policy: self.marshal.msg_map[key] = policy[key] return self.marshal.msg_map def unregister_policy(self, policy): ''' Unregister policy. Policy can be: - int -- then it will just remove one policy - list or tuple of ints -- remove all given - dict -- remove policies by keys from dict In the last case the routine will ignore dict values, it is implemented so just to make it compatible with `get_policy_map()` return value. ''' if isinstance(policy, int): policy = [policy] elif isinstance(policy, dict): policy = list(policy) if not isinstance(policy, (tuple, list, set)): raise TypeError('wrong policy type') for key in policy: del self.marshal.msg_map[key] return self.marshal.msg_map def get_policy_map(self, policy=None): ''' Return policy for a given message type or for all message types. Policy parameter can be either int, or a list of ints. Always return dictionary. ''' if policy is None: return self.marshal.msg_map if isinstance(policy, int): policy = [policy] if not isinstance(policy, (list, tuple, set)): raise TypeError('wrong policy type') ret = {} for key in policy: ret[key] = self.marshal.msg_map[key] return ret def _peek_bufsize(self, socket_descriptor): data = bytearray() try: bufsize, _ = socket_descriptor.recvfrom_into( data, 0, MSG_DONTWAIT | MSG_PEEK | MSG_TRUNC ) except BlockingIOError: self._use_peek = False bufsize = socket_descriptor.getsockopt(SOL_SOCKET, SO_RCVBUF) // 2 return bufsize def sendto(self, *argv, **kwarg): return self._sendto(*argv, **kwarg) def recv(self, bufsize, flags=0): if self.input_from_buffer_queue: data_in = self.buffer_queue.get() if isinstance(data_in, Exception): raise data_in return data_in return self._sock.recv( self._peek_bufsize(self._sock) if self._use_peek else bufsize, flags, ) def recv_into(self, data, *argv, **kwarg): if self.input_from_buffer_queue: data_in = self.buffer_queue.get() if isinstance(data, Exception): raise data_in data[:] = data_in return len(data_in) return self._sock.recv_into(data, *argv, **kwarg) def buffer_thread_routine(self): poll = select.poll() poll.register(self._sock, select.POLLIN | select.POLLPRI) poll.register(self._ctrl_read, select.POLLIN | select.POLLPRI) sockfd = self._sock.fileno() while True: events = poll.poll() for fd, event in events: if fd == sockfd: try: data = bytearray(64000) self._sock.recv_into(data, 64000) self.buffer_queue.put_nowait(data) except Exception as e: self.buffer_queue.put(e) return else: return def compile(self): return CompileContext(self) def _send_batch(self, msgs, addr=(0, 0)): with self.backlog_lock: for msg in msgs: self.backlog[msg['header']['sequence_number']] = [] # We have locked the message locks in the caller already. data = bytearray() for msg in msgs: if not isinstance(msg, nlmsg): msg_class = self.marshal.msg_map[msg['header']['type']] msg = msg_class(msg) msg.reset() msg.encode() data += msg.data if self.compiled is not None: return self.compiled.append(data) self._sock.sendto(data, addr) def sendto_gate(self, msg, addr): msg.reset() msg.encode() if self.compiled is not None: return self.compiled.append(msg.data) return self._sock.sendto(msg.data, addr) def nlm_request_batch(self, msgs, noraise=False): """ This function is for messages which are expected to have side effects. Do not blindly retry in case of errors as this might duplicate them. """ expected_responses = [] acquired = 0 seqs = self.addr_pool.alloc_multi(len(msgs)) try: for seq in seqs: self.lock[seq].acquire() acquired += 1 for seq, msg in zip(seqs, msgs): msg['header']['sequence_number'] = seq if 'pid' not in msg['header']: msg['header']['pid'] = self.epid or os.getpid() if (msg['header']['flags'] & NLM_F_ACK) or ( msg['header']['flags'] & NLM_F_DUMP ): expected_responses.append(seq) self._send_batch(msgs) if self.compiled is not None: for data in self.compiled: yield data else: for seq in expected_responses: for msg in self.get(msg_seq=seq, noraise=noraise): if msg['header']['flags'] & NLM_F_DUMP_INTR: # Leave error handling to the caller raise NetlinkDumpInterrupted() yield msg finally: # Release locks in reverse order. for seq in seqs[acquired - 1 :: -1]: self.lock[seq].release() with self.backlog_lock: for seq in seqs: # Clear the backlog. We may have raised an error # causing the backlog to not be consumed entirely. if seq in self.backlog: del self.backlog[seq] self.addr_pool.free(seq, ban=0xFF) def nlm_request( self, msg, msg_type, msg_flags=NLM_F_REQUEST | NLM_F_DUMP, terminate=None, callback=None, parser=None, ): msg_seq = self.addr_pool.alloc() defer = None if callable(parser): self.marshal.seq_map[msg_seq] = parser with self.lock[msg_seq]: retry_count = 0 try: while True: try: self.put(msg, msg_type, msg_flags, msg_seq=msg_seq) if self.compiled is not None: for data in self.compiled: yield data else: for msg in self.get( msg_seq=msg_seq, terminate=terminate, callback=callback, ): # analyze the response for effects to be # deferred if ( defer is None and msg['header']['flags'] & NLM_F_DUMP_INTR ): defer = NetlinkDumpInterrupted() yield msg break except NetlinkError as e: if e.code != errno.EBUSY: raise if retry_count >= 30: raise log.warning('Error 16, retry {}.'.format(retry_count)) time.sleep(0.3) retry_count += 1 continue except Exception: raise finally: # Ban this msg_seq for 0xff rounds # # It's a long story. Modern kernels for RTM_SET.* # operations always return NLMSG_ERROR(0) == success, # even not setting NLM_F_MULTI flag on other response # messages and thus w/o any NLMSG_DONE. So, how to detect # the response end? One can not rely on NLMSG_ERROR on # old kernels, but we have to support them too. Ty, we # just ban msg_seq for several rounds, and NLMSG_ERROR, # being received, will become orphaned and just dropped. # # Hack, but true. self.addr_pool.free(msg_seq, ban=0xFF) if msg_seq in self.marshal.seq_map: self.marshal.seq_map.pop(msg_seq) if defer is not None: raise defer class BatchAddrPool: def alloc(self, *argv, **kwarg): return 0 def free(self, *argv, **kwarg): pass class BatchBacklogQueue(list): def append(self, *argv, **kwarg): pass def pop(self, *argv, **kwarg): pass class BatchBacklog(dict): def __getitem__(self, key): return BatchBacklogQueue() def __setitem__(self, key, value): pass def __delitem__(self, key): pass class BatchSocket(NetlinkSocketBase): def post_init(self): self.backlog = BatchBacklog() self.addr_pool = BatchAddrPool() self._sock = None self.reset() def reset(self): self.batch = bytearray() def nlm_request( self, msg, msg_type, msg_flags=NLM_F_REQUEST | NLM_F_DUMP, terminate=None, callback=None, ): msg_seq = self.addr_pool.alloc() msg_pid = self.epid or os.getpid() msg['header']['type'] = msg_type msg['header']['flags'] = msg_flags msg['header']['sequence_number'] = msg_seq msg['header']['pid'] = msg_pid msg.data = self.batch msg.offset = len(self.batch) msg.encode() return [] def get(self, *argv, **kwarg): pass class NetlinkSocket(NetlinkSocketBase): def post_init(self): # recreate the underlying socket with self.sys_lock: if self._sock is not None: self._sock.close() self._sock = config.SocketBase( AF_NETLINK, SOCK_DGRAM, self.family, self._fileno ) self.setsockopt(SOL_SOCKET, SO_SNDBUF, self._sndbuf) self.setsockopt(SOL_SOCKET, SO_RCVBUF, self._rcvbuf) if self.ext_ack: self.setsockopt(SOL_NETLINK, NETLINK_EXT_ACK, 1) if self.all_ns: self.setsockopt(SOL_NETLINK, NETLINK_LISTEN_ALL_NSID, 1) if self.strict_check: self.setsockopt(SOL_NETLINK, NETLINK_GET_STRICT_CHK, 1) def __getattr__(self, attr): if attr in ( 'getsockname', 'getsockopt', 'makefile', 'setsockopt', 'setblocking', 'settimeout', 'gettimeout', 'shutdown', 'recvfrom', 'recvfrom_into', 'fileno', ): return getattr(self._sock, attr) elif attr in ('_sendto', '_recv', '_recv_into'): return getattr(self._sock, attr.lstrip("_")) raise AttributeError(attr) def bind(self, groups=0, pid=None, **kwarg): ''' Bind the socket to given multicast groups, using given pid. - If pid is None, use automatic port allocation - If pid == 0, use process' pid - If pid == , use the value instead of pid ''' if pid is not None: self.port = 0 self.fixed = True self.pid = pid or os.getpid() if 'async' in kwarg: # FIXME # raise deprecation error after 0.5.3 # log.warning( 'use "async_cache" instead of "async", ' '"async" is a keyword from Python 3.7' ) async_cache = kwarg.get('async_cache') or kwarg.get('async') self.groups = groups # if we have pre-defined port, use it strictly if self.fixed: self.epid = self.pid + (self.port << 22) self._sock.bind((self.epid, self.groups)) else: for port in range(1024): try: self.port = port self.epid = self.pid + (self.port << 22) self._sock.bind((self.epid, self.groups)) break except Exception: # create a new underlying socket -- on kernel 4 # one failed bind() makes the socket useless self.post_init() else: raise KeyError('no free address available') # all is OK till now, so start async recv, if we need if async_cache: self.buffer_thread = threading.Thread( name="Netlink async cache", target=self.buffer_thread_routine ) self.input_from_buffer_queue = True self.buffer_thread.daemon = True self.buffer_thread.start() def add_membership(self, group): self.setsockopt(SOL_NETLINK, NETLINK_ADD_MEMBERSHIP, group) def drop_membership(self, group): self.setsockopt(SOL_NETLINK, NETLINK_DROP_MEMBERSHIP, group) def close(self, code=errno.ECONNRESET): ''' Correctly close the socket and free all resources. ''' with self.sys_lock: if self.closed: return self.closed = True if self.buffer_thread: os.write(self._ctrl_write, b'exit') self.buffer_thread.join() super(NetlinkSocket, self).close(code=code) # Common shutdown procedure self._sock.close() class ChaoticNetlinkSocket(NetlinkSocket): success_rate = 1 def __init__(self, *argv, **kwarg): self.success_rate = kwarg.pop('success_rate', 0.7) super(ChaoticNetlinkSocket, self).__init__(*argv, **kwarg) def get(self, *argv, **kwarg): if random.random() > self.success_rate: raise ChaoticException() return super(ChaoticNetlinkSocket, self).get(*argv, **kwarg) pyroute2-0.7.11/pyroute2/netlink/proxy.py000066400000000000000000000044151455030217500203670ustar00rootroot00000000000000''' Netlink proxy engine ''' import errno import logging import struct import threading import traceback from pyroute2.netlink.exceptions import NetlinkError log = logging.getLogger(__name__) class NetlinkProxy(object): ''' Proxy schemes:: User -> NetlinkProxy -> Kernel | <---------+ User <- NetlinkProxy <- Kernel ''' def __init__(self, policy='forward', nl=None, lock=None): self.nl = nl self.lock = lock or threading.Lock() self.pmap = {} self.policy = policy def handle(self, msg): # # match the packet # ptype = msg['header']['type'] plugin = self.pmap.get(ptype, None) if plugin is not None: with self.lock: try: ret = plugin(msg, self.nl) if ret is None: # # The packet is terminated in the plugin, # return the NLMSG_ERR == 0 # # FIXME: optimize # newmsg = struct.pack('IHH', 40, 2, 0) newmsg += msg.data[8:16] newmsg += struct.pack('I', 0) # nlmsgerr struct alignment newmsg += b'\0' * 20 return {'verdict': self.policy, 'data': newmsg} else: return ret except Exception as e: log.error(''.join(traceback.format_stack())) log.error(traceback.format_exc()) # errmsg if isinstance(e, (OSError, IOError)): code = e.errno elif isinstance(e, NetlinkError): code = e.code else: code = errno.ECOMM newmsg = struct.pack('HH', 2, 0) newmsg += msg.data[8:16] newmsg += struct.pack('I', code) newmsg += msg.data newmsg = struct.pack('I', len(newmsg) + 4) + newmsg return {'verdict': 'error', 'data': newmsg} return None pyroute2-0.7.11/pyroute2/netlink/rtnl/000077500000000000000000000000001455030217500176075ustar00rootroot00000000000000pyroute2-0.7.11/pyroute2/netlink/rtnl/__init__.py000066400000000000000000000135441455030217500217270ustar00rootroot00000000000000''' RTNetlink: network setup ======================== RTNL is a netlink protocol, used to get and set information about different network objects -- addresses, routes, interfaces etc. RTNL protocol-specific data in messages depends on the object type. E.g., complete packet with the interface address information:: nlmsg header: + uint32 length + uint16 type + uint16 flags + uint32 sequence number + uint32 pid ifaddrmsg structure: + unsigned char ifa_family + unsigned char ifa_prefixlen + unsigned char ifa_flags + unsigned char ifa_scope + uint32 ifa_index [ optional NLA tree ] NLA for this kind of packets can be of type IFA_ADDRESS, IFA_LOCAL etc. -- please refer to the corresponding source. Other objects types require different structures, sometimes really complex. All these structures are described in sources. --------------------------- Module contents: ''' from pyroute2.common import map_namespace # RTnetlink multicast group flags (for use with bind()) RTMGRP_NONE = 0x0 RTMGRP_LINK = 0x1 RTMGRP_NOTIFY = 0x2 RTMGRP_NEIGH = 0x4 RTMGRP_TC = 0x8 RTMGRP_IPV4_IFADDR = 0x10 RTMGRP_IPV4_MROUTE = 0x20 RTMGRP_IPV4_ROUTE = 0x40 RTMGRP_IPV4_RULE = 0x80 RTMGRP_IPV6_IFADDR = 0x100 RTMGRP_IPV6_MROUTE = 0x200 RTMGRP_IPV6_ROUTE = 0x400 RTMGRP_IPV6_IFINFO = 0x800 RTMGRP_DECnet_IFADDR = 0x1000 RTMGRP_NOP2 = 0x2000 RTMGRP_DECnet_ROUTE = 0x4000 RTMGRP_DECnet_RULE = 0x8000 RTMGRP_NOP4 = 0x10000 RTMGRP_IPV6_PREFIX = 0x20000 RTMGRP_IPV6_RULE = 0x40000 RTMGRP_MPLS_ROUTE = 0x4000000 # multicast group ids (for use with {add,drop}_membership) RTNLGRP_NONE = 0 RTNLGRP_LINK = 1 RTNLGRP_NOTIFY = 2 RTNLGRP_NEIGH = 3 RTNLGRP_TC = 4 RTNLGRP_IPV4_IFADDR = 5 RTNLGRP_IPV4_MROUTE = 6 RTNLGRP_IPV4_ROUTE = 7 RTNLGRP_IPV4_RULE = 8 RTNLGRP_IPV6_IFADDR = 9 RTNLGRP_IPV6_MROUTE = 10 RTNLGRP_IPV6_ROUTE = 11 RTNLGRP_IPV6_IFINFO = 12 RTNLGRP_DECnet_IFADDR = 13 RTNLGRP_NOP2 = 14 RTNLGRP_DECnet_ROUTE = 15 RTNLGRP_DECnet_RULE = 16 RTNLGRP_NOP4 = 17 RTNLGRP_IPV6_PREFIX = 18 RTNLGRP_IPV6_RULE = 19 RTNLGRP_ND_USEROPT = 20 RTNLGRP_PHONET_IFADDR = 21 RTNLGRP_PHONET_ROUTE = 22 RTNLGRP_DCB = 23 RTNLGRP_IPV4_NETCONF = 24 RTNLGRP_IPV6_NETCONF = 25 RTNLGRP_MDB = 26 RTNLGRP_MPLS_ROUTE = 27 RTNLGRP_NSID = 28 RTNLGRP_MPLS_NETCONF = 29 RTNLGRP_IPV4_MROUTE_R = 30 RTNLGRP_IPV6_MROUTE_R = 31 # Types of messages # RTM_BASE = 16 RTM_NEWLINK = 16 RTM_DELLINK = 17 RTM_GETLINK = 18 RTM_SETLINK = 19 RTM_NEWADDR = 20 RTM_DELADDR = 21 RTM_GETADDR = 22 RTM_NEWROUTE = 24 RTM_DELROUTE = 25 RTM_GETROUTE = 26 RTM_NEWNEIGH = 28 RTM_DELNEIGH = 29 RTM_GETNEIGH = 30 RTM_NEWRULE = 32 RTM_DELRULE = 33 RTM_GETRULE = 34 RTM_NEWQDISC = 36 RTM_DELQDISC = 37 RTM_GETQDISC = 38 RTM_NEWTCLASS = 40 RTM_DELTCLASS = 41 RTM_GETTCLASS = 42 RTM_NEWTFILTER = 44 RTM_DELTFILTER = 45 RTM_GETTFILTER = 46 RTM_NEWACTION = 48 RTM_DELACTION = 49 RTM_GETACTION = 50 RTM_NEWPREFIX = 52 RTM_GETMULTICAST = 58 RTM_GETANYCAST = 62 RTM_NEWNEIGHTBL = 64 RTM_GETNEIGHTBL = 66 RTM_SETNEIGHTBL = 67 RTM_NEWNDUSEROPT = 68 RTM_NEWADDRLABEL = 72 RTM_DELADDRLABEL = 73 RTM_GETADDRLABEL = 74 RTM_GETDCB = 78 RTM_SETDCB = 79 RTM_NEWNETCONF = 80 RTM_DELNETCONF = 81 RTM_GETNETCONF = 82 RTM_NEWMDB = 84 RTM_DELMDB = 85 RTM_GETMDB = 86 RTM_NEWNSID = 88 RTM_DELNSID = 89 RTM_GETNSID = 90 RTM_NEWSTATS = 92 RTM_GETSTATS = 94 RTM_NEWCACHEREPORT = 96 RTM_NEWLINKPROP = 108 RTM_DELLINKPROP = 109 RTM_GETLINKPROP = 110 # fake internal message types RTM_NEWNETNS = 500 RTM_DELNETNS = 501 RTM_GETNETNS = 502 (RTM_NAMES, RTM_VALUES) = map_namespace('RTM_', globals()) TC_H_INGRESS = 0xFFFFFFF1 TC_H_CLSACT = TC_H_INGRESS TC_H_ROOT = 0xFFFFFFFF RTMGRP_DEFAULTS = ( RTMGRP_IPV4_IFADDR | RTMGRP_IPV6_IFADDR | RTMGRP_IPV4_ROUTE | RTMGRP_IPV6_ROUTE | RTMGRP_IPV4_RULE | RTMGRP_IPV6_RULE | RTMGRP_NEIGH | RTMGRP_LINK | RTMGRP_TC | RTMGRP_MPLS_ROUTE ) encap_type = {'unspec': 0, 'mpls': 1, 0: 'unspec', 1: 'mpls'} rtypes = { 'RTN_UNSPEC': 0, 'RTN_UNICAST': 1, # Gateway or direct route 'RTN_LOCAL': 2, # Accept locally 'RTN_BROADCAST': 3, # Accept locally as broadcast # send as broadcast 'RTN_ANYCAST': 4, # Accept locally as broadcast, # but send as unicast 'RTN_MULTICAST': 5, # Multicast route 'RTN_BLACKHOLE': 6, # Drop 'RTN_UNREACHABLE': 7, # Destination is unreachable 'RTN_PROHIBIT': 8, # Administratively prohibited 'RTN_THROW': 9, # Not in this table 'RTN_NAT': 10, # Translate this address 'RTN_XRESOLVE': 11, } # Use external resolver # normalized rt_type = dict( [(x[0][4:].lower(), x[1]) for x in rtypes.items()] + [(x[1], x[0][4:].lower()) for x in rtypes.items()] ) rtprotos = { 'RTPROT_UNSPEC': 0, 'RTPROT_REDIRECT': 1, # Route installed by ICMP redirects; # not used by current IPv4 'RTPROT_KERNEL': 2, # Route installed by kernel 'RTPROT_BOOT': 3, # Route installed during boot 'RTPROT_STATIC': 4, # Route installed by administrator # Values of protocol >= RTPROT_STATIC are not # interpreted by kernel; # keep in sync with iproute2 ! 'RTPROT_GATED': 8, # gated 'RTPROT_RA': 9, # RDISC/ND router advertisements 'RTPROT_MRT': 10, # Merit MRT 'RTPROT_ZEBRA': 11, # Zebra 'RTPROT_BIRD': 12, # BIRD 'RTPROT_DNROUTED': 13, # DECnet routing daemon 'RTPROT_XORP': 14, # XORP 'RTPROT_NTK': 15, # Netsukuku 'RTPROT_DHCP': 16, } # DHCP client # normalized rt_proto = dict( [(x[0][7:].lower(), x[1]) for x in rtprotos.items()] + [(x[1], x[0][7:].lower()) for x in rtprotos.items()] ) rtscopes = { 'RT_SCOPE_UNIVERSE': 0, 'RT_SCOPE_SITE': 200, 'RT_SCOPE_LINK': 253, 'RT_SCOPE_HOST': 254, 'RT_SCOPE_NOWHERE': 255, } # normalized rt_scope = dict( [(x[0][9:].lower(), x[1]) for x in rtscopes.items()] + [(x[1], x[0][9:].lower()) for x in rtscopes.items()] ) pyroute2-0.7.11/pyroute2/netlink/rtnl/errmsg.py000066400000000000000000000002331455030217500214560ustar00rootroot00000000000000from pyroute2.netlink import nlmsg class errmsg(nlmsg): ''' Custom message type Error ersatz-message ''' fields = (('code', 'i'),) pyroute2-0.7.11/pyroute2/netlink/rtnl/fibmsg.py000066400000000000000000000045741455030217500214420ustar00rootroot00000000000000from pyroute2.common import map_namespace from pyroute2.netlink import nla, nlmsg FR_ACT_UNSPEC = 0 FR_ACT_TO_TBL = 1 FR_ACT_GOTO = 2 FR_ACT_NOP = 3 FR_ACT_BLACKHOLE = 6 FR_ACT_UNREACHABLE = 7 FR_ACT_PROHIBIT = 8 (FR_ACT_NAMES, FR_ACT_VALUES) = map_namespace('FR_ACT', globals()) class fibmsg(nlmsg): ''' IP rule message C structure:: struct fib_rule_hdr { __u8 family; __u8 dst_len; __u8 src_len; __u8 tos; __u8 table; __u8 res1; /* reserved */ __u8 res2; /* reserved */ __u8 action; __u32 flags; }; ''' prefix = 'FRA_' fields = ( ('family', 'B'), ('dst_len', 'B'), ('src_len', 'B'), ('tos', 'B'), ('table', 'B'), ('res1', 'B'), ('res2', 'B'), ('action', 'B'), ('flags', 'I'), ) # fibmsg NLA numbers are not sequential, so # give them here explicitly nla_map = ( (0, 'FRA_UNSPEC', 'none'), (1, 'FRA_DST', 'ipaddr'), (2, 'FRA_SRC', 'ipaddr'), (3, 'FRA_IIFNAME', 'asciiz'), (4, 'FRA_GOTO', 'uint32'), (6, 'FRA_PRIORITY', 'uint32'), (10, 'FRA_FWMARK', 'uint32'), (11, 'FRA_FLOW', 'uint32'), (12, 'FRA_TUN_ID', 'be64'), (13, 'FRA_SUPPRESS_IFGROUP', 'uint32'), (14, 'FRA_SUPPRESS_PREFIXLEN', 'uint32'), (15, 'FRA_TABLE', 'uint32'), (16, 'FRA_FWMASK', 'uint32'), (17, 'FRA_OIFNAME', 'asciiz'), (18, 'FRA_PAD', 'hex'), (19, 'FRA_L3MDEV', 'uint8'), (20, 'FRA_UID_RANGE', 'uid_range'), (21, 'FRA_PROTOCOL', 'uint8'), (22, 'FRA_IP_PROTO', 'uint8'), (23, 'FRA_SPORT_RANGE', 'port_range'), (24, 'FRA_DPORT_RANGE', 'port_range'), ) class fra_range(nla): __slots__ = () sql_type = 'TEXT' def encode(self): self['start'], self['end'] = [ int(x) for x in self.value.split(':') ] nla.encode(self) def decode(self): nla.decode(self) self.value = '%s:%s' % (self['start'], self['end']) class uid_range(fra_range): fields = (('start', 'I'), ('end', 'I')) class port_range(fra_range): fields = (('start', 'H'), ('end', 'H')) pyroute2-0.7.11/pyroute2/netlink/rtnl/ifaddrmsg.py000066400000000000000000000053571455030217500221330ustar00rootroot00000000000000import socket from pyroute2.common import map_namespace from pyroute2.netlink import nla, nlmsg # address attributes # # Important comment: # For IPv4, IFA_ADDRESS is a prefix address, not a local interface # address. It makes no difference for normal interfaces, but # for point-to-point ones IFA_ADDRESS means DESTINATION address, # and the local address is supplied in IFA_LOCAL attribute. # IFA_F_SECONDARY = 0x01 # IFA_F_TEMPORARY IFA_F_SECONDARY IFA_F_NODAD = 0x02 IFA_F_OPTIMISTIC = 0x04 IFA_F_DADFAILED = 0x08 IFA_F_HOMEADDRESS = 0x10 IFA_F_DEPRECATED = 0x20 IFA_F_TENTATIVE = 0x40 IFA_F_PERMANENT = 0x80 IFA_F_MANAGETEMPADDR = 0x100 IFA_F_NOPREFIXROUTE = 0x200 IFA_F_MCAUTOJOIN = 0x400 IFA_F_STABLE_PRIVACY = 0x800 (IFA_F_NAMES, IFA_F_VALUES) = map_namespace('IFA_F', globals()) # 8<---------------------------------------------- IFA_F_TEMPORARY = IFA_F_SECONDARY IFA_F_NAMES['IFA_F_TEMPORARY'] = IFA_F_TEMPORARY IFA_F_VALUES6 = IFA_F_VALUES IFA_F_VALUES6[IFA_F_TEMPORARY] = 'IFA_F_TEMPORARY' # 8<---------------------------------------------- class ifaddrmsg(nlmsg): ''' IP address information C structure:: struct ifaddrmsg { unsigned char ifa_family; /* Address type */ unsigned char ifa_prefixlen; /* Prefixlength of address */ unsigned char ifa_flags; /* Address flags */ unsigned char ifa_scope; /* Address scope */ int ifa_index; /* Interface index */ }; ''' prefix = 'IFA_' sql_constraints = {'IFA_LOCAL': "NOT NULL DEFAULT ''"} fields = ( ('family', 'B'), ('prefixlen', 'B'), ('flags', 'B'), ('scope', 'B'), ('index', 'I'), ) nla_map = ( ('IFA_UNSPEC', 'hex'), ('IFA_ADDRESS', 'ipaddr'), ('IFA_LOCAL', 'ipaddr'), ('IFA_LABEL', 'asciiz'), ('IFA_BROADCAST', 'ipaddr'), ('IFA_ANYCAST', 'ipaddr'), ('IFA_CACHEINFO', 'cacheinfo'), ('IFA_MULTICAST', 'ipaddr'), ('IFA_FLAGS', 'uint32'), ) class cacheinfo(nla): fields = ( ('ifa_preferred', 'I'), ('ifa_valid', 'I'), ('cstamp', 'I'), ('tstamp', 'I'), ) @staticmethod def flags2names(flags, family=socket.AF_INET): if family == socket.AF_INET6: ifa_f_values = IFA_F_VALUES6 else: ifa_f_values = IFA_F_VALUES ret = [] for f in ifa_f_values: if f & flags: ret.append(ifa_f_values[f]) return ret @staticmethod def names2flags(flags): ret = 0 for f in flags: if f[0] == '!': f = f[1:] else: ret |= IFA_F_NAMES[f] return ret pyroute2-0.7.11/pyroute2/netlink/rtnl/ifinfmsg/000077500000000000000000000000001455030217500214115ustar00rootroot00000000000000pyroute2-0.7.11/pyroute2/netlink/rtnl/ifinfmsg/__init__.py000066400000000000000000001244161455030217500235320ustar00rootroot00000000000000import importlib import logging import os import pkgutil import struct import sys from socket import AF_INET, AF_INET6 from pyroute2 import config from pyroute2.common import basestring, map_namespace from pyroute2.config import AF_BRIDGE from pyroute2.netlink import NLA_F_NESTED, nla, nlmsg, nlmsg_atoms from pyroute2.netlink.rtnl.ifinfmsg.plugins import ( bond, can, geneve, gtp, ipoib, ipvlan, team, tun, tuntap, vlan, vrf, vti, vti6, vxlan, xfrm, ) from pyroute2.netlink.rtnl.iw_event import iw_event log = logging.getLogger(__name__) # it's simpler to double constants here, than to change all the # module layout; but it is a subject of the future refactoring RTM_NEWLINK = 16 RTM_DELLINK = 17 # ## # # tuntap flags # IFT_TUN = 0x0001 IFT_TAP = 0x0002 IFT_NO_PI = 0x1000 IFT_ONE_QUEUE = 0x2000 IFT_VNET_HDR = 0x4000 IFT_TUN_EXCL = 0x8000 IFT_MULTI_QUEUE = 0x0100 IFT_ATTACH_QUEUE = 0x0200 IFT_DETACH_QUEUE = 0x0400 # read-only IFT_PERSIST = 0x0800 IFT_NOFILTER = 0x1000 ## # # normal flags # IFF_UP = 0x1 # interface is up IFF_BROADCAST = 0x2 # broadcast address valid IFF_DEBUG = 0x4 # turn on debugging IFF_LOOPBACK = 0x8 # is a loopback net IFF_POINTOPOINT = 0x10 # interface is has p-p link IFF_NOTRAILERS = 0x20 # avoid use of trailers IFF_RUNNING = 0x40 # interface RFC2863 OPER_UP IFF_NOARP = 0x80 # no ARP protocol IFF_PROMISC = 0x100 # receive all packets IFF_ALLMULTI = 0x200 # receive all multicast packets IFF_MASTER = 0x400 # master of a load balancer IFF_SLAVE = 0x800 # slave of a load balancer IFF_MULTICAST = 0x1000 # Supports multicast IFF_PORTSEL = 0x2000 # can set media type IFF_AUTOMEDIA = 0x4000 # auto media select active IFF_DYNAMIC = 0x8000 # dialup device with changing addresses IFF_LOWER_UP = 0x10000 # driver signals L1 up IFF_DORMANT = 0x20000 # driver signals dormant IFF_ECHO = 0x40000 # echo sent packets (IFF_NAMES, IFF_VALUES) = map_namespace('IFF', globals()) IFF_MASK = ( IFF_UP | IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP | IFF_PROMISC | IFF_ALLMULTI ) IFF_VOLATILE = ( IFF_LOOPBACK | IFF_POINTOPOINT | IFF_BROADCAST | IFF_ECHO | IFF_MASTER | IFF_SLAVE | IFF_RUNNING | IFF_LOWER_UP | IFF_DORMANT ) ## # # gre flags # GRE_ACK = 0x0080 GRE_REC = 0x0700 GRE_STRICT = 0x0800 GRE_SEQ = 0x1000 GRE_KEY = 0x2000 GRE_ROUTING = 0x4000 GRE_CSUM = 0x8000 (GRE_NAMES, GRE_VALUES) = map_namespace('GRE_', globals()) ## # # vlan filter flags # BRIDGE_VLAN_INFO_MASTER = 0x1 # operate on bridge device BRIDGE_VLAN_INFO_PVID = 0x2 # ingress untagged BRIDGE_VLAN_INFO_UNTAGGED = 0x4 # egress untagged BRIDGE_VLAN_INFO_RANGE_BEGIN = 0x8 # range start BRIDGE_VLAN_INFO_RANGE_END = 0x10 # range end BRIDGE_VLAN_INFO_BRENTRY = 0x20 # global bridge vlan entry (BRIDGE_VLAN_NAMES, BRIDGE_VLAN_VALUES) = map_namespace( 'BRIDGE_VLAN_INFO', globals() ) BRIDGE_VLAN_TUNNEL_UNSPEC = 0 BRIDGE_VLAN_TUNNEL_ID = 1 BRIDGE_VLAN_TUNNEL_VID = 2 BRIDGE_VLAN_TUNNEL_FLAGS = 3 BRIDGE_VLAN_TUNNEL_MAX = 4 BRIDGE_FLAGS_MASTER = 1 BRIDGE_FLAGS_SELF = 2 (BRIDGE_FLAGS_NAMES, BRIDGE_FLAGS_VALUES) = map_namespace( 'BRIDGE_FLAGS', globals() ) ## # # XDP flags # XDP_FLAGS_UPDATE_IF_NOEXIST = 1 << 0 XDP_FLAGS_SKB_MODE = 1 << 1 XDP_FLAGS_DRV_MODE = 1 << 2 XDP_FLAGS_HW_MODE = 1 << 3 XDP_FLAGS_REPLACE = 1 << 4 XDP_FLAGS_MODES = XDP_FLAGS_SKB_MODE | XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE XDP_FLAGS_MASK = ( XDP_FLAGS_UPDATE_IF_NOEXIST | XDP_FLAGS_MODES | XDP_FLAGS_REPLACE ) (XDP_FLAGS_NAMES, XDP_FLAGS_VALUES) = map_namespace('XDP_FLAGS', globals()) states = ( 'UNKNOWN', 'NOTPRESENT', 'DOWN', 'LOWERLAYERDOWN', 'TESTING', 'DORMANT', 'UP', ) state_by_name = {i[1]: i[0] for i in enumerate(states)} state_by_code = dict(enumerate(states)) stats_names = ( 'rx_packets', 'tx_packets', 'rx_bytes', 'tx_bytes', 'rx_errors', 'tx_errors', 'rx_dropped', 'tx_dropped', 'multicast', 'collisions', 'rx_length_errors', 'rx_over_errors', 'rx_crc_errors', 'rx_frame_errors', 'rx_fifo_errors', 'rx_missed_errors', 'tx_aborted_errors', 'tx_carrier_errors', 'tx_fifo_errors', 'tx_heartbeat_errors', 'tx_window_errors', 'rx_compressed', 'tx_compressed', ) def load_plugins_by_path(path): plugins = {} files = set( [ x.split('.')[0] for x in filter( lambda x: x.endswith(('.py', '.pyc', '.pyo')), os.listdir(path) ) if not x.startswith('_') ] ) sys.path.append(path) for name in files: try: module = __import__(name, globals(), locals(), [], 0) register_kind = getattr(module, 'register_kind', name) plugins[register_kind] = getattr(module, register_kind) except: pass sys.path.pop() return plugins def load_plugins_by_pkg(pkg): plugins = {} plugin_modules = { name: name.split('.')[-1] for loader, name, ispkg in pkgutil.iter_modules( path=pkg.__path__, prefix=pkg.__name__ + '.' ) } # Hack to make it compatible with pyinstaller # plugin loading will work with and without pyinstaller # Inspired on: # https://github.com/webcomics/dosage/blob/master/dosagelib/loader.py # see: https://github.com/pyinstaller/pyinstaller/issues/1905 importers = map(pkgutil.get_importer, pkg.__path__) toc = set() for importer in importers: if hasattr(importer, 'toc'): toc |= importer.toc for element in toc: if element.startswith(pkg.__name__) and element != pkg.__name__: plugin_modules[element] = element.split('.')[-1] for mod_path, mod_name in plugin_modules.items(): if mod_name.startswith('_'): continue module = importlib.import_module(mod_path) register_kind = getattr(module, 'register_kind', mod_name) plugins[register_kind] = getattr(module, register_kind) return plugins data_plugins = {} for module in ( bond, can, geneve, gtp, ipvlan, team, tuntap, tun, vlan, vrf, vti, vti6, vxlan, xfrm, ipoib, ): name = module.__name__.split('.')[-1] data_plugins[name] = getattr(module, name) for pkg in config.data_plugins_pkgs: data_plugins.update(load_plugins_by_pkg(pkg)) for path in config.data_plugins_path: data_plugins.update(load_plugins_by_path(path)) class ifla_bridge_id(nla): fields = [('value', '=8s')] def encode(self): r_prio = struct.pack('H', self['prio']) r_addr = struct.pack( 'BBBBBB', *[int(i, 16) for i in self['addr'].split(':')] ) self['value'] = r_prio + r_addr nla.encode(self) def decode(self): nla.decode(self) r_prio = self['value'][:2] r_addr = self['value'][2:] self.value = { 'prio': struct.unpack('H', r_prio)[0], 'addr': ':'.join( '%02x' % (i) for i in struct.unpack('BBBBBB', r_addr) ), } class protinfo_bridge(nla): prefix = 'IFLA_BRPORT_' nla_map = ( ('IFLA_BRPORT_UNSPEC', 'none'), ('IFLA_BRPORT_STATE', 'uint8'), ('IFLA_BRPORT_PRIORITY', 'uint16'), ('IFLA_BRPORT_COST', 'uint32'), ('IFLA_BRPORT_MODE', 'uint8'), ('IFLA_BRPORT_GUARD', 'uint8'), ('IFLA_BRPORT_PROTECT', 'uint8'), ('IFLA_BRPORT_FAST_LEAVE', 'uint8'), ('IFLA_BRPORT_LEARNING', 'uint8'), ('IFLA_BRPORT_UNICAST_FLOOD', 'uint8'), ('IFLA_BRPORT_PROXYARP', 'uint8'), ('IFLA_BRPORT_LEARNING_SYNC', 'uint8'), ('IFLA_BRPORT_PROXYARP_WIFI', 'uint8'), ('IFLA_BRPORT_ROOT_ID', 'br_id'), ('IFLA_BRPORT_BRIDGE_ID', 'br_id'), ('IFLA_BRPORT_DESIGNATED_PORT', 'uint16'), ('IFLA_BRPORT_DESIGNATED_COST', 'uint16'), ('IFLA_BRPORT_ID', 'uint16'), ('IFLA_BRPORT_NO', 'uint16'), ('IFLA_BRPORT_TOPOLOGY_CHANGE_ACK', 'uint8'), ('IFLA_BRPORT_CONFIG_PENDING', 'uint8'), ('IFLA_BRPORT_MESSAGE_AGE_TIMER', 'uint64'), ('IFLA_BRPORT_FORWARD_DELAY_TIMER', 'uint64'), ('IFLA_BRPORT_HOLD_TIMER', 'uint64'), ('IFLA_BRPORT_FLUSH', 'flag'), ('IFLA_BRPORT_MULTICAST_ROUTER', 'uint8'), ('IFLA_BRPORT_PAD', 'uint64'), ('IFLA_BRPORT_MCAST_FLOOD', 'uint8'), ('IFLA_BRPORT_MCAST_TO_UCAST', 'uint8'), ('IFLA_BRPORT_VLAN_TUNNEL', 'uint8'), ('IFLA_BRPORT_BCAST_FLOOD', 'uint8'), ('IFLA_BRPORT_GROUP_FWD_MASK', 'uint16'), ('IFLA_BRPORT_NEIGH_SUPPRESS', 'uint8'), ('IFLA_BRPORT_ISOLATED', 'uint8'), ('IFLA_BRPORT_BACKUP_PORT', 'uint32'), ('IFLA_BRPORT_MRP_RING_OPEN', 'uint8'), ('IFLA_BRPORT_MRP_IN_OPEN', 'uint8'), ) class br_id(ifla_bridge_id): pass class macvx_data(nla): prefix = 'IFLA_' nla_map = ( ('IFLA_MACVLAN_UNSPEC', 'none'), ('IFLA_MACVLAN_MODE', 'mode'), ('IFLA_MACVLAN_FLAGS', 'flags'), ('IFLA_MACVLAN_MACADDR_MODE', 'macaddr_mode'), ('IFLA_MACVLAN_MACADDR', 'l2addr'), ('IFLA_MACVLAN_MACADDR_DATA', 'macaddr_data'), ('IFLA_MACVLAN_MACADDR_COUNT', 'uint32'), ) class mode(nlmsg_atoms.uint32): value_map = { 0: 'none', 1: 'private', 2: 'vepa', 4: 'bridge', 8: 'passthru', 16: 'source', } class flags(nlmsg_atoms.uint16): value_map = {0: 'none', 1: 'nopromisc'} class macaddr_mode(nlmsg_atoms.uint32): value_map = {0: 'add', 1: 'del', 2: 'flush', 3: 'set'} class macaddr_data(nla): nla_map = ((4, 'IFLA_MACVLAN_MACADDR', 'l2addr'),) class iptnl_data(nla): prefix = 'IFLA_' nla_map = ( ('IFLA_IPIP_UNSPEC', 'none'), ('IFLA_IPIP_LINK', 'uint32'), ('IFLA_IPIP_LOCAL', 'ip4addr'), ('IFLA_IPIP_REMOTE', 'ip4addr'), ('IFLA_IPIP_TTL', 'uint8'), ('IFLA_IPIP_TOS', 'uint8'), ('IFLA_IPIP_ENCAP_LIMIT', 'uint8'), ('IFLA_IPIP_FLOWINFO', 'be32'), ('IFLA_IPIP_FLAGS', 'uint32'), ('IFLA_IPIP_PROTO', 'uint8'), ('IFLA_IPIP_PMTUDISC', 'uint8'), ('IFLA_IPIP_6RD_PREFIX', 'ip6addr'), ('IFLA_IPIP_6RD_RELAY_PREFIX', 'ip4addr'), ('IFLA_IPIP_6RD_PREFIXLEN', 'uint16'), ('IFLA_IPIP_6RD_RELAY_PREFIXLEN', 'uint16'), ('IFLA_IPIP_ENCAP_TYPE', 'uint16'), ('IFLA_IPIP_ENCAP_FLAGS', 'uint16'), ('IFLA_IPIP_ENCAP_SPORT', 'be16'), ('IFLA_IPIP_ENCAP_DPORT', 'be16'), ('IFLA_IPIP_COLLECT_METADATA', 'flag'), ('IFLA_IPIP_FWMARK', 'uint32'), ) class ifinfbase(object): ''' Network interface message. C structure:: struct ifinfomsg { unsigned char ifi_family; /* AF_UNSPEC */ unsigned short ifi_type; /* Device type */ int ifi_index; /* Interface index */ unsigned int ifi_flags; /* Device flags */ unsigned int ifi_change; /* change mask */ }; ''' prefix = 'IFLA_' # # Changed from PRIMARY KEY to NOT NULL to support multiple # targets in one table, so we can collect info from multiple # systems. # # To provide data integrity one should use foreign keys, # but when you create a foreign key using interfaces as # the parent table, create also a unique index on # the fields specified in the foreign key definition. # # E.g. # # CREATE TABLE interfaces (f_target TEXT NOT NULL, # f_index INTEGER NOT NULL, ...) # CREATE TABLE routes (f_target TEXT NOT NULL, # f_RTA_OIF INTEGER, ... # FOREIGN KEY (f_target, f_RTA_OIF) # REFERENCES interfaces(f_target, f_index)) # CREATE UNIQUE INDEX if_idx ON interfaces(f_target, f_index) # sql_constraints = {'index': 'NOT NULL'} sql_extra_fields = (('state', 'TEXT'),) fields = ( ('family', 'B'), ('__align', 'x'), ('ifi_type', 'H'), ('index', 'i'), ('flags', 'I'), ('change', 'I'), ) nla_map = ( ('IFLA_UNSPEC', 'none'), ('IFLA_ADDRESS', 'l2addr'), ('IFLA_BROADCAST', 'l2addr'), ('IFLA_IFNAME', 'asciiz'), ('IFLA_MTU', 'uint32'), ('IFLA_LINK', 'uint32'), ('IFLA_QDISC', 'asciiz'), ('IFLA_STATS', 'ifstats'), ('IFLA_COST', 'hex'), ('IFLA_PRIORITY', 'hex'), ('IFLA_MASTER', 'uint32'), ('IFLA_WIRELESS', 'wireless'), ('IFLA_PROTINFO', 'protinfo'), ('IFLA_TXQLEN', 'uint32'), ('IFLA_MAP', 'ifmap'), ('IFLA_WEIGHT', 'hex'), ('IFLA_OPERSTATE', 'state'), ('IFLA_LINKMODE', 'uint8'), ('IFLA_LINKINFO', 'ifinfo'), ('IFLA_NET_NS_PID', 'uint32'), ('IFLA_IFALIAS', 'asciiz'), ('IFLA_NUM_VF', 'uint32'), ('IFLA_VFINFO_LIST', 'vflist'), ('IFLA_STATS64', 'ifstats64'), ('IFLA_VF_PORTS', 'hex'), ('IFLA_PORT_SELF', 'hex'), ('IFLA_AF_SPEC', 'af_spec'), ('IFLA_GROUP', 'uint32'), ('IFLA_NET_NS_FD', 'netns_fd'), ('IFLA_EXT_MASK', 'uint32'), ('IFLA_PROMISCUITY', 'uint32'), ('IFLA_NUM_TX_QUEUES', 'uint32'), ('IFLA_NUM_RX_QUEUES', 'uint32'), ('IFLA_CARRIER', 'uint8'), ('IFLA_PHYS_PORT_ID', 'hex'), ('IFLA_CARRIER_CHANGES', 'uint32'), ('IFLA_PHYS_SWITCH_ID', 'hex'), ('IFLA_LINK_NETNSID', 'int32'), ('IFLA_PHYS_PORT_NAME', 'asciiz'), ('IFLA_PROTO_DOWN', 'uint8'), ('IFLA_GSO_MAX_SEGS', 'uint32'), ('IFLA_GSO_MAX_SIZE', 'uint32'), ('IFLA_PAD', 'hex'), ('IFLA_XDP', 'xdp'), ('IFLA_EVENT', 'uint32'), ('IFLA_NEW_NETNSID', 'be32'), ('IFLA_IF_NETNSID', 'uint32'), ('IFLA_CARRIER_UP_COUNT', 'uint32'), ('IFLA_CARRIER_DOWN_COUNT', 'uint32'), ('IFLA_NEW_IFINDEX', 'uint32'), ('IFLA_MIN_MTU', 'uint32'), ('IFLA_MAX_MTU', 'uint32'), ('IFLA_PROP_LIST', 'proplist'), ('IFLA_ALT_IFNAME', 'asciiz'), ('IFLA_PERM_ADDRESS', 'hex'), ('IFLA_PROTO_DOWN_REASON', 'down_reason'), ('IFLA_PARENT_DEV_NAME', 'asciiz'), ('IFLA_PARENT_DEV_BUS_NAME', 'asciiz'), ('IFLA_GRO_MAX_SIZE', 'uint32'), ('IFLA_TSO_MAX_SIZE', 'uint32'), ('IFLA_TSO_MAX_SEGS', 'uint32'), ) @staticmethod def flags2names(flags, mask=0xFFFFFFFF): ret = [] for flag in IFF_VALUES: if (flag & mask & flags) == flag: ret.append(IFF_VALUES[flag]) return ret @staticmethod def names2flags(flags): ret = 0 mask = 0 for flag in flags: if flag[0] == '!': flag = flag[1:] else: ret |= IFF_NAMES[flag] mask |= IFF_NAMES[flag] return (ret, mask) def encode(self): # convert flags if isinstance(self['flags'], (set, tuple, list)): self['flags'], self['change'] = self.names2flags(self['flags']) return super(ifinfbase, self).encode() class down_reason(nla): nla_flags = NLA_F_NESTED prefix = 'IFLA_' nla_map = ( ('IFLA_PROTO_DOWN_REASON_UNSPEC', 'none'), ('IFLA_PROTO_DOWN_REASON_MASK', 'uint32'), ('IFLA_PROTO_DOWN_REASON_VALUE', 'uint32'), ) class netns_fd(nla): fields = [('value', 'I')] netns_run_dir = '/var/run/netns' netns_fd = None def encode(self): # # There are two ways to specify netns # # 1. provide fd to an open file # 2. provide a file name # # In the first case, the value is passed to the kernel # as is. In the second case, the object opens appropriate # file from `self.netns_run_dir` and closes it upon # `__del__(self)` if isinstance(self.value, int): self['value'] = self.value else: if isinstance(self.value, bytes): self.value = self.value.decode('utf-8') if '/' in self.value: netns_path = self.value else: netns_path = '%s/%s' % (self.netns_run_dir, self.value) self.netns_fd = os.open(netns_path, os.O_RDONLY) self['value'] = self.netns_fd self.register_clean_cb(self.close) nla.encode(self) def close(self): if self.netns_fd is not None: os.close(self.netns_fd) class xdp(nla): nla_flags = NLA_F_NESTED prefix = 'IFLA_' nla_map = ( ('IFLA_XDP_UNSPEC', 'none'), ('IFLA_XDP_FD', 'xdp_fd'), ('IFLA_XDP_ATTACHED', 'xdp_mode'), ('IFLA_XDP_FLAGS', 'xdp_flags'), ('IFLA_XDP_PROG_ID', 'uint32'), ('IFLA_XDP_DRV_PROG_ID', 'uint32'), ('IFLA_XDP_SKB_PROG_ID', 'uint32'), ('IFLA_XDP_HW_PROG_ID', 'uint32'), ('IFLA_XDP_EXPECTED_FD', 'xdp_fd'), ) class xdp_fd(nlmsg_atoms.int32): sql_type = None class xdp_flags(nla): fields = [('value', '>H')] sql_type = 'INTEGER' def encode(self): v = self.value for flag in XDP_FLAGS_VALUES: v &= ~flag if v != 0: log.warning('possibly incorrect XDP flags') nla.encode(self) class xdp_mode(nlmsg_atoms.uint8): value_map = { 0: None, 1: 'xdp', 2: 'xdpgeneric', 3: 'xdpoffload', 4: 'xdpmulti', } class proplist(nla): nla_flags = NLA_F_NESTED # Proplist has currently only IFLA_ALT_IFNAME, but start at same # index than IFLA_ALT_IFNAME in ifinfbase() nla_map = ((53, 'IFLA_ALT_IFNAME', 'asciiz'),) def altnames(self): return ( attr[1] for attr in self["attrs"] if attr[0] == "IFLA_ALT_IFNAME" ) class vflist(nla): nla_map = (('IFLA_VF_INFO_UNSPEC', 'none'), ('IFLA_VF_INFO', 'vfinfo')) class vfinfo(nla): prefix = 'IFLA_VF_' nla_map = ( ('IFLA_VF_UNSPEC', 'none'), ('IFLA_VF_MAC', 'vf_mac'), ('IFLA_VF_VLAN', 'vf_vlan'), ('IFLA_VF_TX_RATE', 'vf_tx_rate'), ('IFLA_VF_SPOOFCHK', 'vf_spoofchk'), ('IFLA_VF_LINK_STATE', 'vf_link_state'), ('IFLA_VF_RATE', 'vf_rate'), ('IFLA_VF_RSS_QUERY_EN', 'vf_rss_query_en'), ('IFLA_VF_STATS', 'vf_stats'), ('IFLA_VF_TRUST', 'vf_trust'), ('IFLA_VF_IB_NODE_GUID', 'vf_ib_node_guid'), ('IFLA_VF_IB_PORT_GUID', 'vf_ib_port_guid'), ('IFLA_VF_VLAN_LIST', 'vf_vlist'), ) class vf_ib_node_guid(nla): fields = (('vf', 'I'), ('ib_node_guid', '32B')) def decode(self): nla.decode(self) self['ib_node_guid'] = ':'.join( ['%02x' % x for x in self['ib_node_guid'][4:12][::-1]] ) def encode(self): encoded_guid = self['ib_node_guid'].split(':')[::-1] self['ib_node_guid'] = ( [0] * 4 + [int(x, 16) for x in encoded_guid] + [0] * 20 ) nla.encode(self) class vf_ib_port_guid(nla): fields = (('vf', 'I'), ('ib_port_guid', '32B')) def decode(self): nla.decode(self) self['ib_port_guid'] = ':'.join( ['%02x' % x for x in self['ib_port_guid'][4:12][::-1]] ) def encode(self): encoded_guid = self['ib_port_guid'].split(':')[::-1] self['ib_port_guid'] = ( [0] * 4 + [int(x, 16) for x in encoded_guid] + [0] * 20 ) nla.encode(self) class vf_mac(nla): fields = (('vf', 'I'), ('mac', '32B')) def decode(self): nla.decode(self) self['mac'] = ':'.join( ['%02x' % x for x in self['mac'][:6]] ) def encode(self): self['mac'] = [ int(x, 16) for x in self['mac'].split(':') ] + [0] * 26 nla.encode(self) class vf_vlan(nla): fields = (('vf', 'I'), ('vlan', 'I'), ('qos', 'I')) class vf_tx_rate(nla): fields = (('vf', 'I'), ('tx_rate', 'I')) class vf_spoofchk(nla): fields = (('vf', 'I'), ('spoofchk', 'I')) class vf_link_state(nla): fields = (('vf', 'I'), ('link_state', 'I')) class vf_rate(nla): fields = ( ('vf', 'I'), ('min_tx_rate', 'I'), ('max_tx_rate', 'I'), ) class vf_rss_query_en(nla): fields = (('vf', 'I'), ('rss_query_en', 'I')) class vf_stats(nla): nla_map = ( ('IFLA_VF_STATS_RX_PACKETS', 'uint64'), ('IFLA_VF_STATS_TX_PACKETS', 'uint64'), ('IFLA_VF_STATS_RX_BYTES', 'uint64'), ('IFLA_VF_STATS_TX_BYTES', 'uint64'), ('IFLA_VF_STATS_BROADCAST', 'uint64'), ('IFLA_VF_STATS_MULTICAST', 'uint64'), ('IFLA_VF_STATS_PAD', 'uint64'), ('IFLA_VF_STATS_RX_DROPPED', 'uint64'), ('IFLA_VF_STATS_TX_DROPPED', 'uint64'), ) class vf_trust(nla): fields = (('vf', 'I'), ('trust', 'I')) class vf_vlist(nla): nla_map = ( ('IFLA_VF_VLAN_INFO_UNSPEC', 'none'), ('IFLA_VF_VLAN_INFO', 'ivvi'), ) class ivvi(nla): fields = ( ('vf', 'I'), ('vlan', 'I'), ('qos', 'I'), ('proto', '>H'), ) class wireless(iw_event): pass class state(nla): fields = (('value', 'B'),) sql_type = 'TEXT' def encode(self): self['value'] = state_by_name[self.value] nla.encode(self) def decode(self): nla.decode(self) self.value = state_by_code[self['value']] class ifstats(nla): fields = [(i, 'I') for i in stats_names] class ifstats64(nla): fields = [(i, 'Q') for i in stats_names] class ifmap(nla): fields = ( ('mem_start', 'Q'), ('mem_end', 'Q'), ('base_addr', 'Q'), ('irq', 'H'), ('dma', 'B'), ('port', 'B'), ) @staticmethod def protinfo(self, *argv, **kwarg): proto_map = {AF_BRIDGE: protinfo_bridge} return proto_map.get(self['family'], self.hex) class ifinfo(nla): prefix = 'IFLA_INFO_' nla_map = ( ('IFLA_INFO_UNSPEC', 'none'), ('IFLA_INFO_KIND', 'asciiz'), ('IFLA_INFO_DATA', 'info_data'), ('IFLA_INFO_XSTATS', 'hex'), ('IFLA_INFO_SLAVE_KIND', 'asciiz'), ('IFLA_INFO_SLAVE_DATA', 'info_slave_data'), ) @staticmethod def info_slave_data(self, *argv, **kwarg): ''' Return IFLA_INFO_SLAVE_DATA type based on IFLA_INFO_SLAVE_KIND or IFLA_INFO_KIND. ''' kind = self.get_attr('IFLA_INFO_SLAVE_KIND') if kind is None: kind = self.get_attr('IFLA_INFO_KIND') data_map = { 'bridge': self.bridge_slave_data, 'bridge_slave': self.bridge_slave_data, 'bond': self.bond_slave_data, } return data_map.get(kind, self.hex) class bridge_slave_data(protinfo_bridge): pass class bond_slave_data(nla): nla_map = ( ('IFLA_BOND_SLAVE_UNSPEC', 'none'), ('IFLA_BOND_SLAVE_STATE', 'uint8'), ('IFLA_BOND_SLAVE_MII_STATUS', 'uint8'), ('IFLA_BOND_SLAVE_LINK_FAILURE_COUNT', 'uint32'), ('IFLA_BOND_SLAVE_PERM_HWADDR', 'l2addr'), ('IFLA_BOND_SLAVE_QUEUE_ID', 'uint16'), ('IFLA_BOND_SLAVE_AD_AGGREGATOR_ID', 'uint16'), ) @staticmethod def info_data(self, *argv, **kwarg): ''' The function returns appropriate IFLA_INFO_DATA type according to IFLA_INFO_KIND info. Return 'hex' type for all unknown kind's and when the kind is not known. ''' kind = self.get_attr('IFLA_INFO_KIND') return self.data_map.get(kind, self.hex) class veth_data(nla): nla_map = ( ('VETH_INFO_UNSPEC', 'none'), ('VETH_INFO_PEER', 'info_peer'), ) @staticmethod def info_peer(self, *argv, **kwarg): return ifinfveth class ipip_data(iptnl_data): pass class sit_data(iptnl_data): nla_map = [ (x[0].replace('IPIP', 'SIT'), x[1]) for x in iptnl_data.nla_map ] class ip6tnl_data(nla): prefix = 'IFLA_' nla_map = ( ('IFLA_IP6TNL_UNSPEC', 'none'), ('IFLA_IP6TNL_LINK', 'uint32'), ('IFLA_IP6TNL_LOCAL', 'ip6addr'), ('IFLA_IP6TNL_REMOTE', 'ip6addr'), ('IFLA_IP6TNL_TTL', 'uint8'), ('IFLA_IP6TNL_TOS', 'uint8'), ('IFLA_IP6TNL_ENCAP_LIMIT', 'uint8'), ('IFLA_IP6TNL_FLOWINFO', 'be32'), ('IFLA_IP6TNL_FLAGS', 'uint32'), ('IFLA_IP6TNL_PROTO', 'uint8'), ('IFLA_IP6TNL_PMTUDISC', 'uint8'), ('IFLA_IP6TNL_6RD_PREFIX', 'ip6addr'), ('IFLA_IP6TNL_6RD_RELAY_PREFIX', 'ip4addr'), ('IFLA_IP6TNL_6RD_PREFIXLEN', 'uint16'), ('IFLA_IP6TNL_6RD_RELAY_PREFIXLEN', 'uint16'), ('IFLA_IP6TNL_ENCAP_TYPE', 'uint16'), ('IFLA_IP6TNL_ENCAP_FLAGS', 'uint16'), ('IFLA_IP6TNL_ENCAP_SPORT', 'be16'), ('IFLA_IP6TNL_ENCAP_DPORT', 'be16'), ('IFLA_IP6TNL_COLLECT_METADATA', 'flag'), ('IFLA_IP6TNL_FWMARK', 'uint32'), ) class gre_data(nla): prefix = 'IFLA_' nla_map = ( ('IFLA_GRE_UNSPEC', 'none'), ('IFLA_GRE_LINK', 'uint32'), ('IFLA_GRE_IFLAGS', 'gre_flags'), ('IFLA_GRE_OFLAGS', 'gre_flags'), ('IFLA_GRE_IKEY', 'be32'), ('IFLA_GRE_OKEY', 'be32'), ('IFLA_GRE_LOCAL', 'ip4addr'), ('IFLA_GRE_REMOTE', 'ip4addr'), ('IFLA_GRE_TTL', 'uint8'), ('IFLA_GRE_TOS', 'uint8'), ('IFLA_GRE_PMTUDISC', 'uint8'), ('IFLA_GRE_ENCAP_LIMIT', 'uint8'), ('IFLA_GRE_FLOWINFO', 'be32'), ('IFLA_GRE_FLAGS', 'uint32'), ('IFLA_GRE_ENCAP_TYPE', 'uint16'), ('IFLA_GRE_ENCAP_FLAGS', 'uint16'), ('IFLA_GRE_ENCAP_SPORT', 'be16'), ('IFLA_GRE_ENCAP_DPORT', 'be16'), ('IFLA_GRE_COLLECT_METADATA', 'flag'), ('IFLA_GRE_IGNORE_DF', 'uint8'), ('IFLA_GRE_FWMARK', 'uint32'), ) class gre_flags(nla): fields = [('value', '>H')] sql_type = 'INTEGER' def encode(self): # # for details see: url = 'https://github.com/svinota/pyroute2/issues/531' v = self.value for flag in GRE_VALUES: v &= ~flag if v != 0: log.warning( 'possibly incorrect GRE flags, ' 'see %s' % url ) nla.encode(self) class ip6gre_data(nla): # Ostensibly the same as ip6gre_data except that local # and remote are ipv6 addrs. # As of Linux 4.8,IFLA_GRE_COLLECT_METADATA has not been # implemented for IPv6. # Linux uses the same enum names for v6 and v4 (in if_tunnel.h); # Here we name them IFLA_IP6GRE_xxx instead to avoid conflicts # with gre_data above. prefix = 'IFLA_' nla_map = ( ('IFLA_IP6GRE_UNSPEC', 'none'), ('IFLA_IP6GRE_LINK', 'uint32'), ('IFLA_IP6GRE_IFLAGS', 'uint16'), ('IFLA_IP6GRE_OFLAGS', 'uint16'), ('IFLA_IP6GRE_IKEY', 'be32'), ('IFLA_IP6GRE_OKEY', 'be32'), ('IFLA_IP6GRE_LOCAL', 'ip6addr'), ('IFLA_IP6GRE_REMOTE', 'ip6addr'), ('IFLA_IP6GRE_TTL', 'uint8'), ('IFLA_IP6GRE_TOS', 'uint8'), ('IFLA_IP6GRE_PMTUDISC', 'uint8'), ('IFLA_IP6GRE_ENCAP_LIMIT', 'uint8'), ('IFLA_IP6GRE_FLOWINFO', 'be32'), ('IFLA_IP6GRE_FLAGS', 'uint32'), ('IFLA_IP6GRE_ENCAP_TYPE', 'uint16'), ('IFLA_IP6GRE_ENCAP_FLAGS', 'uint16'), ('IFLA_IP6GRE_ENCAP_SPORT', 'be16'), ('IFLA_IP6GRE_ENCAP_DPORT', 'be16'), ) class macvlan_data(macvx_data): pass class macvtap_data(macvx_data): nla_map = [ (x[0].replace('MACVLAN', 'MACVTAP'), x[1]) for x in macvx_data.nla_map ] class bridge_data(nla): prefix = 'IFLA_' nla_map = ( ('IFLA_BR_UNSPEC', 'none'), ('IFLA_BR_FORWARD_DELAY', 'uint32'), ('IFLA_BR_HELLO_TIME', 'uint32'), ('IFLA_BR_MAX_AGE', 'uint32'), ('IFLA_BR_AGEING_TIME', 'uint32'), ('IFLA_BR_STP_STATE', 'uint32'), ('IFLA_BR_PRIORITY', 'uint16'), ('IFLA_BR_VLAN_FILTERING', 'uint8'), ('IFLA_BR_VLAN_PROTOCOL', 'be16'), ('IFLA_BR_GROUP_FWD_MASK', 'uint16'), ('IFLA_BR_ROOT_ID', 'br_id'), ('IFLA_BR_BRIDGE_ID', 'br_id'), ('IFLA_BR_ROOT_PORT', 'uint16'), ('IFLA_BR_ROOT_PATH_COST', 'uint32'), ('IFLA_BR_TOPOLOGY_CHANGE', 'uint8'), ('IFLA_BR_TOPOLOGY_CHANGE_DETECTED', 'uint8'), ('IFLA_BR_HELLO_TIMER', 'uint64'), ('IFLA_BR_TCN_TIMER', 'uint64'), ('IFLA_BR_TOPOLOGY_CHANGE_TIMER', 'uint64'), ('IFLA_BR_GC_TIMER', 'uint64'), ('IFLA_BR_GROUP_ADDR', 'l2addr'), ('IFLA_BR_FDB_FLUSH', 'flag'), ('IFLA_BR_MCAST_ROUTER', 'uint8'), ('IFLA_BR_MCAST_SNOOPING', 'uint8'), ('IFLA_BR_MCAST_QUERY_USE_IFADDR', 'uint8'), ('IFLA_BR_MCAST_QUERIER', 'uint8'), ('IFLA_BR_MCAST_HASH_ELASTICITY', 'uint32'), ('IFLA_BR_MCAST_HASH_MAX', 'uint32'), ('IFLA_BR_MCAST_LAST_MEMBER_CNT', 'uint32'), ('IFLA_BR_MCAST_STARTUP_QUERY_CNT', 'uint32'), ('IFLA_BR_MCAST_LAST_MEMBER_INTVL', 'uint64'), ('IFLA_BR_MCAST_MEMBERSHIP_INTVL', 'uint64'), ('IFLA_BR_MCAST_QUERIER_INTVL', 'uint64'), ('IFLA_BR_MCAST_QUERY_INTVL', 'uint64'), ('IFLA_BR_MCAST_QUERY_RESPONSE_INTVL', 'uint64'), ('IFLA_BR_MCAST_STARTUP_QUERY_INTVL', 'uint64'), ('IFLA_BR_NF_CALL_IPTABLES', 'uint8'), ('IFLA_BR_NF_CALL_IP6TABLES', 'uint8'), ('IFLA_BR_NF_CALL_ARPTABLES', 'uint8'), ('IFLA_BR_VLAN_DEFAULT_PVID', 'uint16'), ('IFLA_BR_PAD', 'uint64'), ('IFLA_BR_VLAN_STATS_ENABLED', 'uint8'), ('IFLA_BR_MCAST_STATS_ENABLED', 'uint8'), ('IFLA_BR_MCAST_IGMP_VERSION', 'uint8'), ('IFLA_BR_MCAST_MLD_VERSION', 'uint8'), ) class br_id(ifla_bridge_id): pass # IFLA_INFO_DATA plugin system prototype data_map = { 'macvlan': macvlan_data, 'macvtap': macvtap_data, 'ipip': ipip_data, 'sit': sit_data, 'ip6tnl': ip6tnl_data, 'gre': gre_data, 'gretap': gre_data, 'ip6gre': ip6gre_data, 'ip6gretap': ip6gre_data, 'veth': veth_data, 'bridge': bridge_data, 'bridge_slave': bridge_slave_data, } # expand supported interface types data_map.update(data_plugins) @classmethod def register_link_kind(cls, path=None, pkg=None, module=None): cls.data_map.update(data_plugins) if path is not None: cls.data_map.update(load_plugins_by_path(path)) elif pkg is not None: cls.data_map.update(load_plugins_by_pkg(pkg)) elif module is not None: for name, link_class in module.items(): cls.data_map[name] = link_class else: raise TypeError('path or pkg required') @classmethod def unregister_link_kind(cls, kind): return cls.data_map.pop(kind) @classmethod def list_link_kind(cls): return cls.data_map sql_extend = ((ifinfo, 'IFLA_LINKINFO'), (xdp, 'IFLA_XDP')) @staticmethod def af_spec(self, *argv, **kwarg): specs = { 0: self.af_spec_inet, AF_INET: self.af_spec_inet, AF_INET6: self.af_spec_inet, AF_BRIDGE: self.af_spec_bridge, } return specs.get(self['family'], self.hex) class af_spec_bridge(nla): prefix = 'IFLA_BRIDGE_' # Bug-Url: https://github.com/svinota/pyroute2/issues/284 # resolve conflict with link()/flags # IFLA_BRIDGE_FLAGS is for compatibility, in nla dicts # IFLA_BRIDGE_VLAN_FLAGS overrides it nla_map = ( (0, 'IFLA_BRIDGE_FLAGS', 'uint16'), (0, 'IFLA_BRIDGE_VLAN_FLAGS', 'vlan_flags'), (1, 'IFLA_BRIDGE_MODE', 'uint16'), (2, 'IFLA_BRIDGE_VLAN_INFO', 'vlan_info'), (3, 'IFLA_BRIDGE_VLAN_TUNNEL_INFO', 'vlan_tunnel_info'), ) class vlan_flags(nla): fields = [('value', 'H')] def encode(self): # convert flags if isinstance(self['value'], basestring): self['value'] = BRIDGE_FLAGS_NAMES[ 'BRIDGE_FLAGS_' + self['value'].upper() ] nla.encode(self) class vlan_info(nla): prefix = '' fields = (('flags', 'H'), ('vid', 'H')) @staticmethod def flags2names(flags): ret = [] for flag in BRIDGE_VLAN_VALUES: if (flag & flags) == flag: ret.append(BRIDGE_VLAN_VALUES[flag]) return ret @staticmethod def names2flags(flags): ret = 0 for flag in flags: ret |= BRIDGE_VLAN_NAMES[ 'BRIDGE_VLAN_INFO_' + flag.upper() ] return ret def encode(self): # convert flags if isinstance(self['flags'], (set, tuple, list)): self['flags'] = self.names2flags(self['flags']) return super(nla, self).encode() class vlan_tunnel_info(nla): prefix = 'IFLA_BRIDGE_VLAN_TUNNEL_' nla_map = ( ('IFLA_BRIDGE_VLAN_TUNNEL_UNSPEC', 'none'), ('IFLA_BRIDGE_VLAN_TUNNEL_ID', 'uint32'), ('IFLA_BRIDGE_VLAN_TUNNEL_VID', 'uint16'), ('IFLA_BRIDGE_VLAN_TUNNEL_FLAGS', 'uint16'), ) class af_spec_inet(nla): nla_map = ( ('AF_UNSPEC', 'none'), ('AF_UNIX', 'hex'), ('AF_INET', 'inet'), ('AF_AX25', 'hex'), ('AF_IPX', 'hex'), ('AF_APPLETALK', 'hex'), ('AF_NETROM', 'hex'), ('AF_BRIDGE', 'hex'), ('AF_ATMPVC', 'hex'), ('AF_X25', 'hex'), ('AF_INET6', 'inet6'), ) class inet(nla): # ./include/linux/inetdevice.h: struct ipv4_devconf # ./include/uapi/linux/ip.h field_names = ( 'dummy', 'forwarding', 'mc_forwarding', 'proxy_arp', 'accept_redirects', 'secure_redirects', 'send_redirects', 'shared_media', 'rp_filter', 'accept_source_route', 'bootp_relay', 'log_martians', 'tag', 'arpfilter', 'medium_id', 'noxfrm', 'nopolicy', 'force_igmp_version', 'arp_announce', 'arp_ignore', 'promote_secondaries', 'arp_accept', 'arp_notify', 'accept_local', 'src_vmark', 'proxy_arp_pvlan', 'route_localnet', 'igmpv2_unsolicited_report_interval', 'igmpv3_unsolicited_report_interval', ) fields = [(i, 'I') for i in field_names] class inet6(nla): prefix = 'IFLA_' nla_map = ( ('IFLA_INET6_UNSPEC', 'none'), ('IFLA_INET6_FLAGS', 'uint32'), ('IFLA_INET6_CONF', 'ipv6_devconf'), ('IFLA_INET6_STATS', 'ipv6_stats'), ('IFLA_INET6_MCAST', 'hex'), ('IFLA_INET6_CACHEINFO', 'ipv6_cache_info'), ('IFLA_INET6_ICMP6STATS', 'icmp6_stats'), ('IFLA_INET6_TOKEN', 'ip6addr'), ('IFLA_INET6_ADDR_GEN_MODE', 'uint8'), ) class ipv6_devconf(nla): # ./include/uapi/linux/ipv6.h # DEVCONF_ field_names = ( 'forwarding', 'hop_limit', 'mtu', 'accept_ra', 'accept_redirects', 'autoconf', 'dad_transmits', 'router_solicitations', 'router_solicitation_interval', 'router_solicitation_delay', 'use_tempaddr', 'temp_valid_lft', 'temp_preferred_lft', 'regen_max_retry', 'max_desync_factor', 'max_addresses', 'force_mld_version', 'accept_ra_defrtr', 'accept_ra_pinfo', 'accept_ra_rtr_pref', 'router_probe_interval', 'accept_ra_rt_info_max_plen', 'proxy_ndp', 'optimistic_dad', 'accept_source_route', 'mc_forwarding', 'disable_ipv6', 'accept_dad', 'force_tllao', 'ndisc_notify', ) fields = [(i, 'I') for i in field_names] class ipv6_cache_info(nla): # ./include/uapi/linux/if_link.h: struct ifla_cacheinfo fields = ( ('max_reasm_len', 'I'), ('tstamp', 'I'), ('reachable_time', 'I'), ('retrans_time', 'I'), ) class ipv6_stats(nla): # ./include/uapi/linux/snmp.h field_names = ( 'num', 'inpkts', 'inoctets', 'indelivers', 'outforwdatagrams', 'outpkts', 'outoctets', 'inhdrerrors', 'intoobigerrors', 'innoroutes', 'inaddrerrors', 'inunknownprotos', 'intruncatedpkts', 'indiscards', 'outdiscards', 'outnoroutes', 'reasmtimeout', 'reasmreqds', 'reasmoks', 'reasmfails', 'fragoks', 'fragfails', 'fragcreates', 'inmcastpkts', 'outmcastpkts', 'inbcastpkts', 'outbcastpkts', 'inmcastoctets', 'outmcastoctets', 'inbcastoctets', 'outbcastoctets', 'csumerrors', 'noectpkts', 'ect1pkts', 'ect0pkts', 'cepkts', ) fields = [(i, 'Q') for i in field_names] class icmp6_stats(nla): # ./include/uapi/linux/snmp.h field_names = ( 'num', 'inmsgs', 'inerrors', 'outmsgs', 'outerrors', 'csumerrors', ) fields = [(i, 'Q') for i in field_names] class ifinfmsg(ifinfbase, nlmsg): def decode(self): nlmsg.decode(self) if self['flags'] & 1: self['state'] = 'up' else: self['state'] = 'down' class ifinfveth(ifinfbase, nla): pass pyroute2-0.7.11/pyroute2/netlink/rtnl/ifinfmsg/compat.py000066400000000000000000000250611455030217500232520ustar00rootroot00000000000000import json import os import subprocess from pyroute2.common import map_enoent from pyroute2.netlink.rtnl.ifinfmsg import ifinfmsg from pyroute2.netlink.rtnl.ifinfmsg.sync import sync from pyroute2.netlink.rtnl.ifinfmsg.tuntap import manage_tuntap from pyroute2.netlink.rtnl.marshal import MarshalRtnl # it's simpler to double constants here, than to change all the # module layout; but it is a subject of the future refactoring RTM_NEWLINK = 16 RTM_DELLINK = 17 # _BONDING_MASTERS = '/sys/class/net/bonding_masters' _BONDING_SLAVES = '/sys/class/net/%s/bonding/slaves' _BRIDGE_MASTER = '/sys/class/net/%s/brport/bridge/ifindex' _BONDING_MASTER = '/sys/class/net/%s/master/ifindex' IFNAMSIZ = 16 def compat_fix_attrs(msg, nl): kind = None ifname = msg.get_attr('IFLA_IFNAME') # fix master if not nl.capabilities['provide_master']: master = compat_get_master(ifname) if master is not None: msg['attrs'].append(['IFLA_MASTER', master]) # fix linkinfo & kind li = msg.get_attr('IFLA_LINKINFO') if li is not None: kind = li.get_attr('IFLA_INFO_KIND') if kind is None: kind = get_interface_type(ifname) li['attrs'].append(['IFLA_INFO_KIND', kind]) elif 'attrs' in msg: kind = get_interface_type(ifname) msg['attrs'].append( ['IFLA_LINKINFO', {'attrs': [['IFLA_INFO_KIND', kind]]}] ) else: return li = msg.get_attr('IFLA_LINKINFO') # fetch specific interface data if (kind in ('bridge', 'bond')) and [ x for x in li['attrs'] if x[0] == 'IFLA_INFO_DATA' ]: if kind == 'bridge': t = '/sys/class/net/%s/bridge/%s' ifdata = ifinfmsg.ifinfo.bridge_data elif kind == 'bond': t = '/sys/class/net/%s/bonding/%s' ifdata = ifinfmsg.ifinfo.bond_data commands = [] for cmd, _ in ifdata.nla_map: try: with open(t % (ifname, ifdata.nla2name(cmd)), 'r') as f: value = f.read() if cmd == 'IFLA_BOND_MODE': value = value.split()[1] commands.append([cmd, int(value)]) except: pass if commands: li['attrs'].append(['IFLA_INFO_DATA', {'attrs': commands}]) def proxy_linkinfo(data, nl): marshal = MarshalRtnl() inbox = marshal.parse(data) data = b'' for msg in inbox: if msg['event'] == 'NLMSG_ERROR': data += msg.data continue # Sysfs operations can require root permissions, # but the script can be run under a normal user # Bug-Url: https://github.com/svinota/pyroute2/issues/113 try: compat_fix_attrs(msg, nl) except OSError: # We can safely ignore here any OSError. # In the worst case, we just return what we have got # from the kernel via netlink pass msg.reset() msg.encode() data += msg.data return {'verdict': 'forward', 'data': data} def proxy_setlink(imsg, nl): def get_interface(index): msg = nl.get_links(index)[0] try: kind = msg.get_attr('IFLA_LINKINFO').get_attr('IFLA_INFO_KIND') except AttributeError: kind = 'unknown' return { 'ifname': msg.get_attr('IFLA_IFNAME'), 'master': msg.get_attr('IFLA_MASTER'), 'kind': kind, } msg = ifinfmsg(imsg.data) msg.decode() forward = True kind = None infodata = None ifname = ( msg.get_attr('IFLA_IFNAME') or get_interface(msg['index'])['ifname'] ) linkinfo = msg.get_attr('IFLA_LINKINFO') if linkinfo: kind = linkinfo.get_attr('IFLA_INFO_KIND') infodata = linkinfo.get_attr('IFLA_INFO_DATA') if kind in ('bond', 'bridge') and infodata is not None: code = 0 # if kind == 'bond': func = compat_set_bond elif kind == 'bridge': func = compat_set_bridge # for cmd, value in infodata.get('attrs', []): cmd = infodata.nla2name(cmd) code = func(ifname, cmd, value) or code # if code: err = OSError() err.errno = code raise err # is it a port setup? master = msg.get_attr('IFLA_MASTER') if master is not None: if master == 0: # port delete # 1. get the current master iface = get_interface(msg['index']) master = get_interface(iface['master']) cmd = 'del' else: # port add # 1. get the master master = get_interface(master) cmd = 'add' # 2. manage the port forward_map = { 'team': manage_team_port, 'bridge': compat_bridge_port, 'bond': compat_bond_port, } if master['kind'] in forward_map: func = forward_map[master['kind']] forward = func(cmd, master['ifname'], ifname, nl) if forward is not None: return {'verdict': 'forward', 'data': imsg.data} def proxy_dellink(imsg, nl): orig_msg = ifinfmsg(imsg.data) orig_msg.decode() # get full interface description msg = nl.get_links(orig_msg['index'])[0] msg['header']['type'] = orig_msg['header']['type'] # get the interface kind kind = None li = msg.get_attr('IFLA_LINKINFO') if li is not None: kind = li.get_attr('IFLA_INFO_KIND') # team interfaces can be stopped by a normal RTM_DELLINK if kind == 'bond' and not nl.capabilities['create_bond']: return compat_del_bond(msg) elif kind == 'bridge' and not nl.capabilities['create_bridge']: return compat_del_bridge(msg) return {'verdict': 'forward', 'data': imsg.data} def proxy_newlink(imsg, nl): msg = ifinfmsg(imsg.data) msg.decode() kind = None # get the interface kind linkinfo = msg.get_attr('IFLA_LINKINFO') if linkinfo is not None: kind = [x[1] for x in linkinfo['attrs'] if x[0] == 'IFLA_INFO_KIND'] if kind: kind = kind[0] if kind == 'tuntap': return manage_tuntap(msg) elif kind == 'team': return manage_team(msg) elif kind == 'bond' and not nl.capabilities['create_bond']: return compat_create_bond(msg) elif kind == 'bridge' and not nl.capabilities['create_bridge']: return compat_create_bridge(msg) return {'verdict': 'forward', 'data': imsg.data} @map_enoent @sync def manage_team(msg): if msg['header']['type'] != RTM_NEWLINK: raise ValueError('wrong command type') config = { 'device': msg.get_attr('IFLA_IFNAME'), 'runner': {'name': 'activebackup'}, 'link_watch': {'name': 'ethtool'}, } with open(os.devnull, 'w') as fnull: subprocess.check_call( ['teamd', '-d', '-n', '-c', json.dumps(config)], stdout=fnull, stderr=fnull, ) @map_enoent def manage_team_port(cmd, master, ifname, nl): with open(os.devnull, 'w') as fnull: subprocess.check_call( [ 'teamdctl', master, 'port', 'remove' if cmd == 'del' else 'add', ifname, ], stdout=fnull, stderr=fnull, ) @sync def compat_create_bridge(msg): name = msg.get_attr('IFLA_IFNAME') with open(os.devnull, 'w') as fnull: subprocess.check_call( ['brctl', 'addbr', name], stdout=fnull, stderr=fnull ) @sync def compat_create_bond(msg): name = msg.get_attr('IFLA_IFNAME') with open(_BONDING_MASTERS, 'w') as f: f.write('+%s' % (name)) def compat_set_bond(name, cmd, value): # FIXME: join with bridge # FIXME: use internal IO, not bash t = 'echo %s >/sys/class/net/%s/bonding/%s' with open(os.devnull, 'w') as fnull: return subprocess.call( ['bash', '-c', t % (value, name, cmd)], stdout=fnull, stderr=fnull ) def compat_set_bridge(name, cmd, value): t = 'echo %s >/sys/class/net/%s/bridge/%s' with open(os.devnull, 'w') as fnull: return subprocess.call( ['bash', '-c', t % (value, name, cmd)], stdout=fnull, stderr=fnull ) @sync def compat_del_bridge(msg): name = msg.get_attr('IFLA_IFNAME') with open(os.devnull, 'w') as fnull: subprocess.check_call(['ip', 'link', 'set', 'dev', name, 'down']) subprocess.check_call( ['brctl', 'delbr', name], stdout=fnull, stderr=fnull ) @sync def compat_del_bond(msg): name = msg.get_attr('IFLA_IFNAME') subprocess.check_call(['ip', 'link', 'set', 'dev', name, 'down']) with open(_BONDING_MASTERS, 'w') as f: f.write('-%s' % (name)) def compat_bridge_port(cmd, master, port, nl): if nl.capabilities['create_bridge']: return True with open(os.devnull, 'w') as fnull: subprocess.check_call( ['brctl', '%sif' % (cmd), master, port], stdout=fnull, stderr=fnull ) def compat_bond_port(cmd, master, port, nl): if nl.capabilities['create_bond']: return True remap = {'add': '+', 'del': '-'} cmd = remap[cmd] with open(_BONDING_SLAVES % (master), 'w') as f: f.write('%s%s' % (cmd, port)) def compat_get_master(name): f = None for i in (_BRIDGE_MASTER, _BONDING_MASTER): try: try: f = open(i % (name)) except UnicodeEncodeError: # a special case with python3 on Ubuntu 14 f = open(i % (name.encode('utf-8'))) break except IOError: pass if f is not None: master = int(f.read()) f.close() return master def get_interface_type(name): ''' Utility function to get interface type. Unfortunately, we can not rely on RTNL or even ioctl(). RHEL doesn't support interface type in RTNL and doesn't provide extended (private) interface flags via ioctl(). Args: * name (str): interface name Returns: * False -- sysfs info unavailable * None -- type not known * str -- interface type: - 'bond' - 'bridge' ''' # FIXME: support all interface types? Right now it is # not needed try: ifattrs = os.listdir('/sys/class/net/%s/' % (name)) except OSError as e: if e.errno == 2: return 'unknown' else: raise if 'bonding' in ifattrs: return 'bond' elif 'bridge' in ifattrs: return 'bridge' else: return 'unknown' pyroute2-0.7.11/pyroute2/netlink/rtnl/ifinfmsg/plugins/000077500000000000000000000000001455030217500230725ustar00rootroot00000000000000pyroute2-0.7.11/pyroute2/netlink/rtnl/ifinfmsg/plugins/__init__.py000066400000000000000000000000001455030217500251710ustar00rootroot00000000000000pyroute2-0.7.11/pyroute2/netlink/rtnl/ifinfmsg/plugins/bond.py000066400000000000000000000033231455030217500243670ustar00rootroot00000000000000from pyroute2.netlink import nla class bond(nla): prefix = 'IFLA_' nla_map = ( ('IFLA_BOND_UNSPEC', 'none'), ('IFLA_BOND_MODE', 'uint8'), ('IFLA_BOND_ACTIVE_SLAVE', 'uint32'), ('IFLA_BOND_MIIMON', 'uint32'), ('IFLA_BOND_UPDELAY', 'uint32'), ('IFLA_BOND_DOWNDELAY', 'uint32'), ('IFLA_BOND_USE_CARRIER', 'uint8'), ('IFLA_BOND_ARP_INTERVAL', 'uint32'), ('IFLA_BOND_ARP_IP_TARGET', '*ipaddr'), ('IFLA_BOND_ARP_VALIDATE', 'uint32'), ('IFLA_BOND_ARP_ALL_TARGETS', 'uint32'), ('IFLA_BOND_PRIMARY', 'uint32'), ('IFLA_BOND_PRIMARY_RESELECT', 'uint8'), ('IFLA_BOND_FAIL_OVER_MAC', 'uint8'), ('IFLA_BOND_XMIT_HASH_POLICY', 'uint8'), ('IFLA_BOND_RESEND_IGMP', 'uint32'), ('IFLA_BOND_NUM_PEER_NOTIF', 'uint8'), ('IFLA_BOND_ALL_SLAVES_ACTIVE', 'uint8'), ('IFLA_BOND_MIN_LINKS', 'uint32'), ('IFLA_BOND_LP_INTERVAL', 'uint32'), ('IFLA_BOND_PACKETS_PER_SLAVE', 'uint32'), ('IFLA_BOND_AD_LACP_RATE', 'uint8'), ('IFLA_BOND_AD_SELECT', 'uint8'), ('IFLA_BOND_AD_INFO', 'ad_info'), ('IFLA_BOND_AD_ACTOR_SYS_PRIO', 'uint16'), ('IFLA_BOND_AD_USER_PORT_KEY', 'uint16'), ('IFLA_BOND_AD_ACTOR_SYSTEM', 'hex'), ('IFLA_BOND_TLB_DYNAMIC_LB', 'uint8'), ) class ad_info(nla): nla_map = ( ('IFLA_BOND_AD_INFO_UNSPEC', 'none'), ('IFLA_BOND_AD_INFO_AGGREGATOR', 'uint16'), ('IFLA_BOND_AD_INFO_NUM_PORTS', 'uint16'), ('IFLA_BOND_AD_INFO_ACTOR_KEY', 'uint16'), ('IFLA_BOND_AD_INFO_PARTNER_KEY', 'uint16'), ('IFLA_BOND_AD_INFO_PARTNER_MAC', 'l2addr'), ) pyroute2-0.7.11/pyroute2/netlink/rtnl/ifinfmsg/plugins/can.py000066400000000000000000000116531455030217500242130ustar00rootroot00000000000000from pyroute2.netlink import nla, nlmsg_atoms CAN_CTRLMODE_NAMES = { 'CAN_CTRLMODE_LOOPBACK': 0x01, 'CAN_CTRLMODE_LISTENONLY': 0x02, 'CAN_CTRLMODE_3_SAMPLES': 0x04, 'CAN_CTRLMODE_ONE_SHOT': 0x08, 'CAN_CTRLMODE_BERR_REPORTING': 0x10, 'CAN_CTRLMODE_FD': 0x20, 'CAN_CTRLMODE_PRESUME_ACK': 0x40, 'CAN_CTRLMODE_FD_NON_ISO': 0x80, 'CAN_CTRLMODE_CC_LEN8_DLC': 0x100, 'CAN_CTRLMODE_TDC_AUTO': 0x200, 'CAN_CTRLMODE_TDC_MANUAL': 0x400, } CAN_CTRLMODE_VALUES = { 0x001: 'CAN_CTRLMODE_LOOPBACK', 0x002: 'CAN_CTRLMODE_LISTENONLY', 0x004: 'CAN_CTRLMODE_3_SAMPLES', 0x008: 'CAN_CTRLMODE_ONE_SHOT', 0x010: 'CAN_CTRLMODE_BERR_REPORTING', 0x020: 'CAN_CTRLMODE_FD', 0x040: 'CAN_CTRLMODE_PRESUME_ACK', 0x080: 'CAN_CTRLMODE_FD_NON_ISO', 0x100: 'CAN_CTRLMODE_CC_LEN8_DLC', 0x200: 'CAN_CTRLMODE_TDC_AUTO', 0x400: 'CAN_CTRLMODE_TDC_MANUAL', } class can(nla): prefix = 'IFLA_' nla_map = ( ('IFLA_CAN_UNSPEC', 'none'), ('IFLA_CAN_BITTIMING', 'can_bittiming'), ('IFLA_CAN_BITTIMING_CONST', 'can_bittiming_const'), # NOTE: # This is actually a struct of one member, but that doesn't parse: ('IFLA_CAN_CLOCK', 'uint32'), ('IFLA_CAN_STATE', 'can_state'), ('IFLA_CAN_CTRLMODE', 'can_ctrlmode'), ('IFLA_CAN_RESTART_MS', 'uint32'), ('IFLA_CAN_RESTART', 'flag'), ('IFLA_CAN_BERR_COUNTER', 'can_berr_counter'), ('IFLA_CAN_DATA_BITTIMING', 'can_bittiming'), ('IFLA_CAN_DATA_BITTIMING_CONST', 'can_bittiming_const'), ('IFLA_CAN_TERMINATION', 'uint16'), ('IFLA_CAN_TERMINATION_CONST', 'array(uint16)'), ('IFLA_CAN_BITRATE_CONST', 'array(uint32)'), ('IFLA_CAN_DATA_BITRATE_CONST', 'array(uint32)'), ('IFLA_CAN_BITRATE_MAX', 'uint32'), ('IFLA_CAN_TDC', 'can_tdc'), ('IFLA_CAN_CTRLMODE_EXT', 'can_ctrlmode_ext'), ) class can_bittiming(nla): fields = ( ('bitrate', 'I'), ('sample_point', 'I'), ('tq', 'I'), ('prop_seg', 'I'), ('phase_seg1', 'I'), ('phase_seg2', 'I'), ('sjw', 'I'), ('brp', 'I'), ) class can_bittiming_const(nla): fields = ( ('name', '=16s'), ('tseg1_min', 'I'), ('tseg1_max', 'I'), ('tseg2_min', 'I'), ('tseg2_max', 'I'), ('sjw_max', 'I'), ('brp_min', 'I'), ('brp_max', 'I'), ('brp_inc', 'I'), ) class can_state(nlmsg_atoms.uint32): value_map = { 0: 'ERROR_ACTIVE', 1: 'ERROR_WARNING', 2: 'ERROR_PASSIVE', 3: 'BUS_OFF', 4: 'STOPPED', 5: 'SLEEPING', 6: 'MAX', } class can_ctrlmode(nla): fields = (('mask', 'I'), ('flags', 'I')) def decode(self): super(nla, self).decode() flags = self["flags"] for value, mode in CAN_CTRLMODE_VALUES.items(): self[mode[len('CAN_CTRLMODE_') :].lower()] = ( "on" if flags & value else "off" ) del self["flags"] del self["mask"] def encode(self): mask = 0 flags = 0 for mode, value in CAN_CTRLMODE_NAMES.items(): m = mode[len('CAN_CTRLMODE_') :].lower() try: v = self[m] except KeyError: continue mask |= value if v == "on": flags |= value self['mask'] = mask self['flags'] = flags return super(nla, self).encode() class can_berr_counter(nla): fields = (('txerr', 'H'), ('rxerr', 'H')) class can_tdc(nla): prefix = "IFLA_" nla_map = ( ('IFLA_CAN_TDC_UNSPEC', 'none'), ('IFLA_CAN_TDC_TDCV_MIN', 'uint32'), ('IFLA_CAN_TDC_TDCV_MAX', 'uint32'), ('IFLA_CAN_TDC_TDCO_MIN', 'uint32'), ('IFLA_CAN_TDC_TDCO_MAX', 'uint32'), ('IFLA_CAN_TDC_TDCF_MIN', 'uint32'), ('IFLA_CAN_TDC_TDCF_MAX', 'uint32'), ('IFLA_CAN_TDC_TDCV', 'uint32'), ('IFLA_CAN_TDC_TDCO', 'uint32'), ('IFLA_CAN_TDC_TDCF', 'uint32'), ) class can_ctrlmode_ext(nla): prefix = "IFLA_" nla_map = ( ('IFLA_CAN_CTRLMODE_UNSPEC', 'none'), ('IFLA_CAN_CTRLMODE_SUPPORTED', 'can_ctrlmode_supported'), ) class can_ctrlmode_supported(nlmsg_atoms.uint32): def decode(self): super(nlmsg_atoms.uint32, self).decode() for value, mode in CAN_CTRLMODE_VALUES.items(): self[mode[len('CAN_CTRLMODE_') :].lower()] = ( 'yes' if value & self["value"] else 'no' ) del self["value"] pyroute2-0.7.11/pyroute2/netlink/rtnl/ifinfmsg/plugins/geneve.py000066400000000000000000000014101455030217500247110ustar00rootroot00000000000000from pyroute2.netlink import nla, nlmsg_atoms class geneve(nla): nla_map = ( ('IFLA_GENEVE_UNSPEC', 'none'), ('IFLA_GENEVE_ID', 'uint32'), ('IFLA_GENEVE_REMOTE', 'ip4addr'), ('IFLA_GENEVE_TTL', 'uint8'), ('IFLA_GENEVE_TOS', 'uint8'), ('IFLA_GENEVE_PORT', 'be16'), ('IFLA_GENEVE_COLLECT_METADATA', 'flag'), ('IFLA_GENEVE_REMOTE6', 'ip6addr'), ('IFLA_GENEVE_UDP_CSUM', 'uint8'), ('IFLA_GENEVE_UDP_ZERO_CSUM6_TX', 'uint8'), ('IFLA_GENEVE_UDP_ZERO_CSUM6_RX', 'uint8'), ('IFLA_GENEVE_LABEL', 'be32'), ('IFLA_GENEVE_TTL_INHERIT', 'uint8'), ('IFLA_GENEVE_DF', 'df'), ) class df(nlmsg_atoms.uint16): value_map = {0: 'unset', 1: 'set', 2: 'inherit'} pyroute2-0.7.11/pyroute2/netlink/rtnl/ifinfmsg/plugins/gtp.py000066400000000000000000000003431455030217500242360ustar00rootroot00000000000000from pyroute2.netlink import nla class gtp(nla): nla_map = ( ('IFLA_GTP_UNSPEC', 'none'), ('IFLA_GTP_FD0', 'uint32'), ('IFLA_GTP_FD1', 'uint32'), ('IFLA_GTP_PDP_HASHSIZE', 'uint32'), ) pyroute2-0.7.11/pyroute2/netlink/rtnl/ifinfmsg/plugins/ipoib.py000077500000000000000000000005701455030217500245530ustar00rootroot00000000000000from pyroute2.netlink import nla, nlmsg_atoms class ipoib(nla): prefix = 'IFLA_IPOIB_' nla_map = ( ('IFLA_IPOIB_UNSPEC', 'none'), ('IFLA_IPOIB_PKEY', 'uint16'), ('IFLA_IPOIB_MODE', 'mode'), ('IFLA_IPOIB_UMCAST', 'uint16'), ) class mode(nlmsg_atoms.uint16): value_map = {0: 'datagram', 1: 'connected'} pyroute2-0.7.11/pyroute2/netlink/rtnl/ifinfmsg/plugins/ipvlan.py000066400000000000000000000004431455030217500247360ustar00rootroot00000000000000from pyroute2.netlink import nla class ipvlan(nla): prefix = 'IFLA_' nla_map = (('IFLA_IPVLAN_UNSPEC', 'none'), ('IFLA_IPVLAN_MODE', 'uint16')) modes = { 0: 'IPVLAN_MODE_L2', 1: 'IPVLAN_MODE_L3', 'IPVLAN_MODE_L2': 0, 'IPVLAN_MODE_L3': 1, } pyroute2-0.7.11/pyroute2/netlink/rtnl/ifinfmsg/plugins/team.py000066400000000000000000000002271455030217500243730ustar00rootroot00000000000000from pyroute2.netlink import nla class team(nla): prefix = 'IFLA_' nla_map = (('IFLA_TEAM_UNSPEC', 'none'), ('IFLA_TEAM_CONFIG', 'asciiz')) pyroute2-0.7.11/pyroute2/netlink/rtnl/ifinfmsg/plugins/tun.py000066400000000000000000000007561455030217500242620ustar00rootroot00000000000000from pyroute2.netlink import nla class tun(nla): prefix = 'IFLA_' nla_map = ( ('IFLA_TUN_UNSPEC', 'none'), ('IFLA_TUN_OWNER', 'uint32'), ('IFLA_TUN_GROUP', 'uint32'), ('IFLA_TUN_TYPE', 'uint8'), ('IFLA_TUN_PI', 'uint8'), ('IFLA_TUN_VNET_HDR', 'uint8'), ('IFLA_TUN_PERSIST', 'uint8'), ('IFLA_TUN_MULTI_QUEUE', 'uint8'), ('IFLA_TUN_NUM_QUEUES', 'uint32'), ('IFLA_TUN_NUM_DISABLED_QUEUES', 'uint32'), ) pyroute2-0.7.11/pyroute2/netlink/rtnl/ifinfmsg/plugins/tuntap.py000066400000000000000000000010711455030217500247560ustar00rootroot00000000000000from pyroute2.netlink import nla class tuntap(nla): ''' Fake data type ''' prefix = 'IFTUN_' nla_map = ( ('IFTUN_UNSPEC', 'none'), ('IFTUN_MODE', 'asciiz'), ('IFTUN_UID', 'uint32'), ('IFTUN_GID', 'uint32'), ('IFTUN_IFR', 'flags'), ) class flags(nla): fields = ( ('no_pi', 'B'), ('one_queue', 'B'), ('vnet_hdr', 'B'), ('tun_excl', 'B'), ('multi_queue', 'B'), ('persist', 'B'), ('nofilter', 'B'), ) pyroute2-0.7.11/pyroute2/netlink/rtnl/ifinfmsg/plugins/vlan.py000066400000000000000000000013571455030217500244120ustar00rootroot00000000000000from pyroute2.netlink import nla flags = {'reorder_hdr': 0x1, 'gvrp': 0x2, 'loose_binding': 0x4, 'mvrp': 0x8} class vlan(nla): prefix = 'IFLA_' nla_map = ( ('IFLA_VLAN_UNSPEC', 'none'), ('IFLA_VLAN_ID', 'uint16'), ('IFLA_VLAN_FLAGS', 'vlan_flags'), ('IFLA_VLAN_EGRESS_QOS', 'qos'), ('IFLA_VLAN_INGRESS_QOS', 'qos'), ('IFLA_VLAN_PROTOCOL', 'be16'), ) class vlan_flags(nla): fields = (('flags', 'I'), ('mask', 'I')) class qos(nla): prefix = 'IFLA_' nla_map = ( ('IFLA_VLAN_QOS_UNSPEC', 'none'), ('IFLA_VLAN_QOS_MAPPING', 'qos_mapping'), ) class qos_mapping(nla): fields = (('from', 'I'), ('to', 'I')) pyroute2-0.7.11/pyroute2/netlink/rtnl/ifinfmsg/plugins/vrf.py000066400000000000000000000002221455030217500242350ustar00rootroot00000000000000from pyroute2.netlink import nla class vrf(nla): prefix = 'IFLA_' nla_map = (('IFLA_VRF_UNSPEC', 'none'), ('IFLA_VRF_TABLE', 'uint32')) pyroute2-0.7.11/pyroute2/netlink/rtnl/ifinfmsg/plugins/vti.py000066400000000000000000000004751455030217500242540ustar00rootroot00000000000000from pyroute2.netlink import nla class vti(nla): prefix = 'IFLA_' nla_map = ( ('IFLA_VTI_UNSPEC', 'none'), ('IFLA_VTI_LINK', 'uint32'), ('IFLA_VTI_IKEY', 'be32'), ('IFLA_VTI_OKEY', 'be32'), ('IFLA_VTI_LOCAL', 'ip4addr'), ('IFLA_VTI_REMOTE', 'ip4addr'), ) pyroute2-0.7.11/pyroute2/netlink/rtnl/ifinfmsg/plugins/vti6.py000066400000000000000000000005451455030217500243400ustar00rootroot00000000000000from pyroute2.netlink import nla class vti6(nla): prefix = 'IFLA_' nla_map = ( ('IFLA_VTI_UNSPEC', 'none'), ('IFLA_VTI_LINK', 'uint32'), ('IFLA_VTI_IKEY', 'be32'), ('IFLA_VTI_OKEY', 'be32'), ('IFLA_VTI_LOCAL', 'ip6addr'), ('IFLA_VTI_REMOTE', 'ip6addr'), ('IFLA_VTI_FWMARK', 'uint32'), ) pyroute2-0.7.11/pyroute2/netlink/rtnl/ifinfmsg/plugins/vxlan.py000066400000000000000000000026111455030217500245740ustar00rootroot00000000000000from pyroute2.netlink import nla class vxlan(nla): prefix = 'IFLA_' nla_map = ( ('IFLA_VXLAN_UNSPEC', 'none'), ('IFLA_VXLAN_ID', 'uint32'), ('IFLA_VXLAN_GROUP', 'ip4addr'), ('IFLA_VXLAN_LINK', 'uint32'), ('IFLA_VXLAN_LOCAL', 'ip4addr'), ('IFLA_VXLAN_TTL', 'uint8'), ('IFLA_VXLAN_TOS', 'uint8'), ('IFLA_VXLAN_LEARNING', 'uint8'), ('IFLA_VXLAN_AGEING', 'uint32'), ('IFLA_VXLAN_LIMIT', 'uint32'), ('IFLA_VXLAN_PORT_RANGE', 'port_range'), ('IFLA_VXLAN_PROXY', 'uint8'), ('IFLA_VXLAN_RSC', 'uint8'), ('IFLA_VXLAN_L2MISS', 'uint8'), ('IFLA_VXLAN_L3MISS', 'uint8'), ('IFLA_VXLAN_PORT', 'be16'), ('IFLA_VXLAN_GROUP6', 'ip6addr'), ('IFLA_VXLAN_LOCAL6', 'ip6addr'), ('IFLA_VXLAN_UDP_CSUM', 'uint8'), ('IFLA_VXLAN_UDP_ZERO_CSUM6_TX', 'uint8'), ('IFLA_VXLAN_UDP_ZERO_CSUM6_RX', 'uint8'), ('IFLA_VXLAN_REMCSUM_TX', 'uint8'), ('IFLA_VXLAN_REMCSUM_RX', 'uint8'), ('IFLA_VXLAN_GBP', 'flag'), ('IFLA_VXLAN_REMCSUM_NOPARTIAL', 'flag'), ('IFLA_VXLAN_COLLECT_METADATA', 'uint8'), ('IFLA_VXLAN_LABEL', 'uint32'), ('IFLA_VXLAN_GPE', 'flag'), ('IFLA_VXLAN_TTL_INHERIT', 'flag'), ('IFLA_VXLAN_DF', 'uint8'), ) class port_range(nla): fields = (('low', '>H'), ('high', '>H')) pyroute2-0.7.11/pyroute2/netlink/rtnl/ifinfmsg/plugins/xfrm.py000066400000000000000000000002751455030217500244240ustar00rootroot00000000000000from pyroute2.netlink import nla class xfrm(nla): nla_map = ( ('IFLA_XFRM_UNSPEC', 'none'), ('IFLA_XFRM_LINK', 'uint32'), ('IFLA_XFRM_IF_ID', 'uint32'), ) pyroute2-0.7.11/pyroute2/netlink/rtnl/ifinfmsg/proxy.py000066400000000000000000000065441455030217500231550ustar00rootroot00000000000000import json import os import subprocess from pyroute2.common import map_enoent from pyroute2.netlink.rtnl.ifinfmsg import RTM_NEWLINK from pyroute2.netlink.rtnl.ifinfmsg.sync import sync from pyroute2.netlink.rtnl.ifinfmsg.tuntap import manage_tun, manage_tuntap _BONDING_MASTERS = '/sys/class/net/bonding_masters' _BONDING_SLAVES = '/sys/class/net/%s/bonding/slaves' _BRIDGE_MASTER = '/sys/class/net/%s/brport/bridge/ifindex' _BONDING_MASTER = '/sys/class/net/%s/master/ifindex' IFNAMSIZ = 16 def proxy_setlink(msg, nl): def get_interface(index): msg = nl.get_links(index)[0] try: kind = msg.get_attr('IFLA_LINKINFO').get_attr('IFLA_INFO_KIND') except AttributeError: kind = 'unknown' return { 'ifname': msg.get_attr('IFLA_IFNAME'), 'master': msg.get_attr('IFLA_MASTER'), 'kind': kind, } forward = True # is it a port setup? master = msg.get_attr('IFLA_MASTER') if master is not None: if master == 0: # port delete # 1. get the current master iface = get_interface(msg['index']) master = get_interface(iface['master']) cmd = 'del' else: # port add # 1. get the master master = get_interface(master) cmd = 'add' ifname = ( msg.get_attr('IFLA_IFNAME') or get_interface(msg['index'])['ifname'] ) # 2. manage the port forward_map = {'team': manage_team_port} if master['kind'] in forward_map: func = forward_map[master['kind']] forward = func(cmd, master['ifname'], ifname, nl) if forward is not None: return {'verdict': 'forward', 'data': msg.data} def proxy_newlink(msg, nl): kind = None # get the interface kind linkinfo = msg.get_attr('IFLA_LINKINFO') if linkinfo is not None: kind = [x[1] for x in linkinfo['attrs'] if x[0] == 'IFLA_INFO_KIND'] if kind: kind = kind[0] if kind == 'tuntap': return manage_tuntap(msg) elif kind == 'tun': return manage_tun(msg) elif kind == 'team': return manage_team(msg) return {'verdict': 'forward', 'data': msg.data} @map_enoent @sync def manage_team(msg): if msg['header']['type'] != RTM_NEWLINK: raise ValueError('wrong command type') try: linkinfo = msg.get_attr('IFLA_LINKINFO') infodata = linkinfo.get_attr('IFLA_INFO_DATA') config = infodata.get_attr('IFLA_TEAM_CONFIG') config = json.loads(config) except AttributeError: config = { 'runner': {'name': 'activebackup'}, 'link_watch': {'name': 'ethtool'}, } # fix device config['device'] = msg.get_attr('IFLA_IFNAME') with open(os.devnull, 'w') as fnull: subprocess.check_call( ['teamd', '-d', '-n', '-c', json.dumps(config)], stdout=fnull, stderr=fnull, ) @map_enoent def manage_team_port(cmd, master, ifname, nl): with open(os.devnull, 'w') as fnull: subprocess.check_call( [ 'teamdctl', master, 'port', 'remove' if cmd == 'del' else 'add', ifname, ], stdout=fnull, stderr=fnull, ) pyroute2-0.7.11/pyroute2/netlink/rtnl/ifinfmsg/sync.py000066400000000000000000000036661455030217500227520ustar00rootroot00000000000000import os import select import threading from pyroute2.netlink.rtnl import RTM_VALUES from pyroute2.netlink.rtnl.riprsocket import RawIPRSocket def sync(f): ''' A decorator to wrap up external utility calls. A decorated function receives a netlink message as a parameter, and then: 1. Starts a monitoring thread 2. Performs the external call 3. Waits for a netlink event specified by `msg` 4. Joins the monitoring thread If the wrapped function raises an exception, the monitoring thread will be forced to stop via the control channel pipe. The exception will be then forwarded. ''' def monitor(event, ifname, cmd): with RawIPRSocket() as ipr: poll = select.poll() poll.register(ipr, select.POLLIN | select.POLLPRI) poll.register(cmd, select.POLLIN | select.POLLPRI) ipr.bind() while True: events = poll.poll() for fd, event in events: if fd == ipr.fileno(): msgs = ipr.get() for msg in msgs: if ( msg.get('event') == event and msg.get_attr('IFLA_IFNAME') == ifname ): return else: return def decorated(msg): rcmd, cmd = os.pipe() t = threading.Thread( target=monitor, args=( RTM_VALUES[msg['header']['type']], msg.get_attr('IFLA_IFNAME'), rcmd, ), ) t.start() ret = None try: ret = f(msg) except Exception: raise finally: os.write(cmd, b'q') t.join() os.close(rcmd) os.close(cmd) return ret return decorated pyroute2-0.7.11/pyroute2/netlink/rtnl/ifinfmsg/tuntap.py000066400000000000000000000074431455030217500233060ustar00rootroot00000000000000import errno import os import struct from fcntl import ioctl from pyroute2 import config from pyroute2.netlink.exceptions import NetlinkError from pyroute2.netlink.rtnl.ifinfmsg import ( IFT_MULTI_QUEUE, IFT_NO_PI, IFT_ONE_QUEUE, IFT_TAP, IFT_TUN, IFT_VNET_HDR, RTM_NEWLINK, ) from pyroute2.netlink.rtnl.ifinfmsg.sync import sync IFNAMSIZ = 16 TUNDEV = '/dev/net/tun' PLATFORMS = ( 'i386', 'i686', 'x86_64', 'armv6l', 'armv7l', 's390x', 'aarch64', 'loongarch64', ) if config.machine in PLATFORMS: TUNSETIFF = 0x400454CA TUNSETPERSIST = 0x400454CB TUNSETOWNER = 0x400454CC TUNSETGROUP = 0x400454CE elif config.machine in ('ppc64', 'mips'): TUNSETIFF = 0x800454CA TUNSETPERSIST = 0x800454CB TUNSETOWNER = 0x800454CC TUNSETGROUP = 0x800454CE else: TUNSETIFF = None @sync def manage_tun(msg): if TUNSETIFF is None: raise NetlinkError(errno.EOPNOTSUPP, 'Arch not supported') if msg['header']['type'] != RTM_NEWLINK: raise NetlinkError(errno.EOPNOTSUPP, 'Unsupported event') ifru_flags = 0 linkinfo = msg.get_attr('IFLA_LINKINFO') infodata = linkinfo.get_attr('IFLA_INFO_DATA') if infodata.get_attr('IFLA_TUN_TYPE') == 1: ifru_flags |= IFT_TUN elif infodata.get_attr('IFLA_TUN_TYPE') == 2: ifru_flags |= IFT_TAP else: raise ValueError('invalid mode') if not infodata.get_attr('IFLA_TUN_PI'): ifru_flags |= IFT_NO_PI if infodata.get_attr('IFLA_TUN_VNET_HDR'): ifru_flags |= IFT_VNET_HDR if infodata.get_attr('IFLA_TUN_MULTI_QUEUE'): ifru_flags |= IFT_MULTI_QUEUE ifr = msg.get_attr('IFLA_IFNAME') if len(ifr) > IFNAMSIZ: raise ValueError('ifname too long') ifr += (IFNAMSIZ - len(ifr)) * '\0' ifr = ifr.encode('ascii') ifr += struct.pack('H', ifru_flags) user = infodata.get_attr('IFLA_TUN_OWNER') group = infodata.get_attr('IFLA_TUN_GROUP') # fd = os.open(TUNDEV, os.O_RDWR) try: ioctl(fd, TUNSETIFF, ifr) if user is not None: ioctl(fd, TUNSETOWNER, user) if group is not None: ioctl(fd, TUNSETGROUP, group) ioctl(fd, TUNSETPERSIST, 1) except Exception: raise finally: os.close(fd) @sync def manage_tuntap(msg): if TUNSETIFF is None: raise NetlinkError(errno.EOPNOTSUPP, 'Arch not supported') if msg['header']['type'] != RTM_NEWLINK: raise NetlinkError(errno.EOPNOTSUPP, 'Unsupported event') ifru_flags = 0 linkinfo = msg.get_attr('IFLA_LINKINFO') infodata = linkinfo.get_attr('IFLA_INFO_DATA') flags = infodata.get_attr('IFTUN_IFR', None) if infodata.get_attr('IFTUN_MODE') == 'tun': ifru_flags |= IFT_TUN elif infodata.get_attr('IFTUN_MODE') == 'tap': ifru_flags |= IFT_TAP else: raise ValueError('invalid mode') if flags is not None: if flags['no_pi']: ifru_flags |= IFT_NO_PI if flags['one_queue']: ifru_flags |= IFT_ONE_QUEUE if flags['vnet_hdr']: ifru_flags |= IFT_VNET_HDR if flags['multi_queue']: ifru_flags |= IFT_MULTI_QUEUE ifr = msg.get_attr('IFLA_IFNAME') if len(ifr) > IFNAMSIZ: raise ValueError('ifname too long') ifr += (IFNAMSIZ - len(ifr)) * '\0' ifr = ifr.encode('ascii') ifr += struct.pack('H', ifru_flags) user = infodata.get_attr('IFTUN_UID') group = infodata.get_attr('IFTUN_GID') # fd = os.open(TUNDEV, os.O_RDWR) try: ioctl(fd, TUNSETIFF, ifr) if user is not None: ioctl(fd, TUNSETOWNER, user) if group is not None: ioctl(fd, TUNSETGROUP, group) ioctl(fd, TUNSETPERSIST, 1) except Exception: raise finally: os.close(fd) pyroute2-0.7.11/pyroute2/netlink/rtnl/ifstatsmsg.py000066400000000000000000000050271455030217500223510ustar00rootroot00000000000000from pyroute2.netlink import nla, nlmsg from pyroute2.netlink.rtnl.ifinfmsg import ifinfmsg class ifstatsmsg(nlmsg): fields = ( ('family', 'B'), ('pad1', 'B'), ('pad2', 'H'), ('ifindex', 'I'), ('filter_mask', 'I'), ) nla_map = ( ('IFLA_STATS_UNSPEC', 'none'), ('IFLA_STATS_LINK_64', 'ifstats64'), ('IFLA_STATS_LINK_XSTATS', 'ifxstats'), ('IFLA_STATS_LINK_XSTATS_SLAVE', 'ifxstats'), ('IFLA_STATS_LINK_OFFLOAD_XSTATS', 'hex'), ('IFLA_STATS_AF_SPEC', 'hex'), ) class ifstats64(ifinfmsg.ifstats64): pass class ifxstats(nla): nla_map = ( ('LINK_XSTATS_TYPE_UNSPEC', 'none'), ('LINK_XSTATS_TYPE_BRIDGE', 'bridge'), ('LINK_XSTATS_TYPE_BOND', 'hex'), ) class bridge(nla): nla_map = ( ('BRIDGE_XSTATS_UNSPEC', 'none'), ('BRIDGE_XSTATS_VLAN', 'vlan'), ('BRIDGE_XSTATS_MCAST', 'mcast'), ('BRIDGE_XSTATS_PAD', 'hex'), ('BRIDGE_XSTATS_STP', 'stp'), ) class vlan(nla): fields = ( ('rx_bytes', 'Q'), ('rx_packets', 'Q'), ('tx_bytes', 'Q'), ('tx_packets', 'Q'), ('vid', 'H'), ('flags', 'H'), ('pad2', 'I'), ) class mcast(nla): fields = ( ('igmp_v1queries', 'QQ'), ('igmp_v2queries', 'QQ'), ('igmp_v3queries', 'QQ'), ('igmp_leaves', 'QQ'), ('igmp_v1reports', 'QQ'), ('igmp_v2reports', 'QQ'), ('igmp_v3reports', 'QQ'), ('igmp_parse_errors', 'Q'), ('mld_v1queries', 'QQ'), ('mld_v2queries', 'QQ'), ('mld_leaves', 'QQ'), ('mld_v1reports', 'QQ'), ('mld_v2reports', 'QQ'), ('mld_parse_errors', 'Q'), ('mcast_bytes', 'QQ'), ('mcast_packets', 'QQ'), ) class stp(nla): fields = ( ('transition_blk', 'Q'), ('transition_fwd', 'Q'), ('rx_bpdu', 'Q'), ('tx_bpdu', 'Q'), ('rx_tcn', 'Q'), ('tx_tcn', 'Q'), ) pyroute2-0.7.11/pyroute2/netlink/rtnl/iprsocket.py000066400000000000000000000132521455030217500221670ustar00rootroot00000000000000import errno import sys import types from pyroute2.common import DEFAULT_RCVBUF, AddrPool, Namespace from pyroute2.netlink import NETLINK_ROUTE, rtnl from pyroute2.netlink.nlsocket import ( BatchSocket, ChaoticNetlinkSocket, NetlinkSocket, ) from pyroute2.netlink.proxy import NetlinkProxy from pyroute2.netlink.rtnl.marshal import MarshalRtnl if sys.platform.startswith('linux'): from pyroute2.netlink.rtnl.ifinfmsg.proxy import ( proxy_newlink, proxy_setlink, ) class IPRSocketBase(object): def __init__(self, *argv, **kwarg): if 'family' in kwarg: kwarg.pop('family') super(IPRSocketBase, self).__init__(NETLINK_ROUTE, *argv[1:], **kwarg) self.marshal = MarshalRtnl() if self.groups == 0: self.groups = rtnl.RTMGRP_DEFAULTS self._s_channel = None if sys.platform.startswith('linux'): send_ns = Namespace( self, {'addr_pool': AddrPool(0x10000, 0x1FFFF), 'monitor': False}, ) self._sproxy = NetlinkProxy(policy='return', nl=send_ns) self._sproxy.pmap = { rtnl.RTM_NEWLINK: proxy_newlink, rtnl.RTM_SETLINK: proxy_setlink, } def bind(self, groups=None, **kwarg): super(IPRSocketBase, self).bind( groups if groups is not None else self.groups, **kwarg ) def sendto_gate(self, msg, addr): msg.reset() msg.encode() if self.compiled is not None: return self.compiled.append(msg.data) ret = self._sproxy.handle(msg) if ret is not None: if ret['verdict'] == 'forward': return self._sendto(ret['data'], addr) elif ret['verdict'] in ('return', 'error'): if self._s_channel is not None: return self._s_channel.send(ret['data']) else: msgs = self.marshal.parse(ret['data']) for msg in msgs: seq = msg['header']['sequence_number'] if seq in self.backlog: self.backlog[seq].append(msg) else: self.backlog[seq] = [msg] return len(ret['data']) else: ValueError('Incorrect verdict') return self._sendto(msg.data, addr) class IPBatchSocket(IPRSocketBase, BatchSocket): pass class ChaoticIPRSocket(IPRSocketBase, ChaoticNetlinkSocket): pass class IPRSocket(IPRSocketBase, NetlinkSocket): ''' The simplest class, that connects together the netlink parser and a generic Python socket implementation. Provides method get() to receive the next message from netlink socket and parse it. It is just simple socket-like class, it implements no buffering or like that. It spawns no additional threads, leaving this up to developers. Please note, that netlink is an asynchronous protocol with non-guaranteed delivery. You should be fast enough to get all the messages in time. If the message flow rate is higher than the speed you parse them with, exceeding messages will be dropped. *Usage* Threadless RT netlink monitoring with blocking I/O calls: >>> from pyroute2 import IPRSocket >>> from pprint import pprint >>> s = IPRSocket() >>> s.bind() >>> pprint(s.get()) [{'attrs': [('RTA_TABLE', 254), ('RTA_DST', '2a00:1450:4009:808::1002'), ('RTA_GATEWAY', 'fe80:52:0:2282::1fe'), ('RTA_OIF', 2), ('RTA_PRIORITY', 0), ('RTA_CACHEINFO', {'rta_clntref': 0, 'rta_error': 0, 'rta_expires': 0, 'rta_id': 0, 'rta_lastuse': 5926, 'rta_ts': 0, 'rta_tsage': 0, 'rta_used': 1})], 'dst_len': 128, 'event': 'RTM_DELROUTE', 'family': 10, 'flags': 512, 'header': {'error': None, 'flags': 0, 'length': 128, 'pid': 0, 'sequence_number': 0, 'type': 25}, 'proto': 9, 'scope': 0, 'src_len': 0, 'table': 254, 'tos': 0, 'type': 1}] >>> ''' _brd_socket = None def bind(self, *argv, **kwarg): if kwarg.pop('clone_socket', False): self._brd_socket = self.clone() def get( self, bufsize=DEFAULT_RCVBUF, msg_seq=0, terminate=None, callback=None, ): if msg_seq == 0: return self._brd_socket.get( bufsize, msg_seq, terminate, callback ) else: return super(IPRSocket, self).get( bufsize, msg_seq, terminate, callback ) def close(self, code=errno.ECONNRESET): with self.sys_lock: self._brd_socket.close() return super(IPRSocket, self).close(code=code) self.get = types.MethodType(get, self) self.close = types.MethodType(close, self) kwarg['recursive'] = True return self._brd_socket.bind(*argv, **kwarg) else: return super(IPRSocket, self).bind(*argv, **kwarg) pyroute2-0.7.11/pyroute2/netlink/rtnl/iw_event.py000066400000000000000000000062541455030217500220100ustar00rootroot00000000000000from pyroute2.netlink import nla class iw_event(nla): nla_map = ( (0xB00, 'SIOCSIWCOMMIT', 'hex'), (0xB01, 'SIOCGIWNAME', 'hex'), # Basic operations (0xB02, 'SIOCSIWNWID', 'hex'), (0xB03, 'SIOCGIWNWID', 'hex'), (0xB04, 'SIOCSIWFREQ', 'hex'), (0xB05, 'SIOCGIWFREQ', 'hex'), (0xB06, 'SIOCSIWMODE', 'hex'), (0xB07, 'SIOCGIWMODE', 'hex'), (0xB08, 'SIOCSIWSENS', 'hex'), (0xB09, 'SIOCGIWSENS', 'hex'), # Informative stuff (0xB0A, 'SIOCSIWRANGE', 'hex'), (0xB0B, 'SIOCGIWRANGE', 'hex'), (0xB0C, 'SIOCSIWPRIV', 'hex'), (0xB0D, 'SIOCGIWPRIV', 'hex'), (0xB0E, 'SIOCSIWSTATS', 'hex'), (0xB0F, 'SIOCGIWSTATS', 'hex'), # Spy support (statistics per MAC address - # used for Mobile IP support) (0xB10, 'SIOCSIWSPY', 'hex'), (0xB11, 'SIOCGIWSPY', 'hex'), (0xB12, 'SIOCSIWTHRSPY', 'hex'), (0xB13, 'SIOCGIWTHRSPY', 'hex'), # Access Point manipulation (0xB14, 'SIOCSIWAP', 'hex'), (0xB15, 'SIOCGIWAP', 'hex'), (0xB17, 'SIOCGIWAPLIST', 'hex'), (0xB18, 'SIOCSIWSCAN', 'hex'), (0xB19, 'SIOCGIWSCAN', 'hex'), # 802.11 specific support (0xB1A, 'SIOCSIWESSID', 'hex'), (0xB1B, 'SIOCGIWESSID', 'hex'), (0xB1C, 'SIOCSIWNICKN', 'hex'), (0xB1D, 'SIOCGIWNICKN', 'hex'), # Other parameters useful in 802.11 and # some other devices (0xB20, 'SIOCSIWRATE', 'hex'), (0xB21, 'SIOCGIWRATE', 'hex'), (0xB22, 'SIOCSIWRTS', 'hex'), (0xB23, 'SIOCGIWRTS', 'hex'), (0xB24, 'SIOCSIWFRAG', 'hex'), (0xB25, 'SIOCGIWFRAG', 'hex'), (0xB26, 'SIOCSIWTXPOW', 'hex'), (0xB27, 'SIOCGIWTXPOW', 'hex'), (0xB28, 'SIOCSIWRETRY', 'hex'), (0xB29, 'SIOCGIWRETRY', 'hex'), # Encoding stuff (scrambling, hardware security, WEP...) (0xB2A, 'SIOCSIWENCODE', 'hex'), (0xB2B, 'SIOCGIWENCODE', 'hex'), # Power saving stuff (power management, unicast # and multicast) (0xB2C, 'SIOCSIWPOWER', 'hex'), (0xB2D, 'SIOCGIWPOWER', 'hex'), # WPA : Generic IEEE 802.11 information element # (e.g., for WPA/RSN/WMM). (0xB30, 'SIOCSIWGENIE', 'hex'), (0xB31, 'SIOCGIWGENIE', 'hex'), # WPA : IEEE 802.11 MLME requests (0xB16, 'SIOCSIWMLME', 'hex'), # WPA : Authentication mode parameters (0xB32, 'SIOCSIWAUTH', 'hex'), (0xB33, 'SIOCGIWAUTH', 'hex'), # WPA : Extended version of encoding configuration (0xB34, 'SIOCSIWENCODEEXT', 'hex'), (0xB35, 'SIOCGIWENCODEEXT', 'hex'), # WPA2 : PMKSA cache management (0xB36, 'SIOCSIWPMKSA', 'hex'), # Events s.str. (0xC00, 'IWEVTXDROP', 'hex'), (0xC01, 'IWEVQUAL', 'hex'), (0xC02, 'IWEVCUSTOM', 'hex'), (0xC03, 'IWEVREGISTERED', 'hex'), (0xC04, 'IWEVEXPIRED', 'hex'), (0xC05, 'IWEVGENIE', 'hex'), (0xC06, 'IWEVMICHAELMICFAILURE', 'hex'), (0xC07, 'IWEVASSOCREQIE', 'hex'), (0xC08, 'IWEVASSOCRESPIE', 'hex'), (0xC09, 'IWEVPMKIDCAND', 'hex'), ) pyroute2-0.7.11/pyroute2/netlink/rtnl/marshal.py000066400000000000000000000037421455030217500216160ustar00rootroot00000000000000from pyroute2.netlink import rtnl from pyroute2.netlink.nlsocket import Marshal from pyroute2.netlink.rtnl.fibmsg import fibmsg from pyroute2.netlink.rtnl.ifaddrmsg import ifaddrmsg from pyroute2.netlink.rtnl.ifinfmsg import ifinfmsg from pyroute2.netlink.rtnl.ifstatsmsg import ifstatsmsg from pyroute2.netlink.rtnl.ndmsg import ndmsg from pyroute2.netlink.rtnl.ndtmsg import ndtmsg from pyroute2.netlink.rtnl.nsidmsg import nsidmsg from pyroute2.netlink.rtnl.rtmsg import rtmsg from pyroute2.netlink.rtnl.tcmsg import tcmsg class MarshalRtnl(Marshal): msg_map = { rtnl.RTM_NEWLINK: ifinfmsg, rtnl.RTM_DELLINK: ifinfmsg, rtnl.RTM_GETLINK: ifinfmsg, rtnl.RTM_SETLINK: ifinfmsg, rtnl.RTM_NEWADDR: ifaddrmsg, rtnl.RTM_DELADDR: ifaddrmsg, rtnl.RTM_GETADDR: ifaddrmsg, rtnl.RTM_NEWROUTE: rtmsg, rtnl.RTM_DELROUTE: rtmsg, rtnl.RTM_GETROUTE: rtmsg, rtnl.RTM_NEWRULE: fibmsg, rtnl.RTM_DELRULE: fibmsg, rtnl.RTM_GETRULE: fibmsg, rtnl.RTM_NEWNEIGH: ndmsg, rtnl.RTM_DELNEIGH: ndmsg, rtnl.RTM_GETNEIGH: ndmsg, rtnl.RTM_NEWQDISC: tcmsg, rtnl.RTM_DELQDISC: tcmsg, rtnl.RTM_GETQDISC: tcmsg, rtnl.RTM_NEWTCLASS: tcmsg, rtnl.RTM_DELTCLASS: tcmsg, rtnl.RTM_GETTCLASS: tcmsg, rtnl.RTM_NEWTFILTER: tcmsg, rtnl.RTM_DELTFILTER: tcmsg, rtnl.RTM_GETTFILTER: tcmsg, rtnl.RTM_NEWNEIGHTBL: ndtmsg, rtnl.RTM_GETNEIGHTBL: ndtmsg, rtnl.RTM_SETNEIGHTBL: ndtmsg, rtnl.RTM_NEWNSID: nsidmsg, rtnl.RTM_DELNSID: nsidmsg, rtnl.RTM_GETNSID: nsidmsg, rtnl.RTM_NEWSTATS: ifstatsmsg, rtnl.RTM_GETSTATS: ifstatsmsg, rtnl.RTM_NEWLINKPROP: ifinfmsg, rtnl.RTM_DELLINKPROP: ifinfmsg, } def fix_message(self, msg): # FIXME: pls do something with it try: msg['event'] = rtnl.RTM_VALUES[msg['header']['type']] except: pass pyroute2-0.7.11/pyroute2/netlink/rtnl/ndmsg.py000066400000000000000000000050571455030217500213000ustar00rootroot00000000000000from pyroute2.common import map_namespace from pyroute2.netlink import nla, nlmsg # neighbor cache entry flags NTF_USE = 0x01 NTF_SELF = 0x02 NTF_MASTER = 0x04 NTF_PROXY = 0x08 NTF_EXT_LEARNED = 0x10 NTF_ROUTER = 0x80 # neighbor cache entry states NUD_INCOMPLETE = 0x01 NUD_REACHABLE = 0x02 NUD_STALE = 0x04 NUD_DELAY = 0x08 NUD_PROBE = 0x10 NUD_FAILED = 0x20 # dummy states NUD_NOARP = 0x40 NUD_PERMANENT = 0x80 NUD_NONE = 0x00 (NTF_NAMES, NTF_VALUES) = map_namespace('NTF_', globals()) (NUD_NAMES, NUD_VALUES) = map_namespace('NUD_', globals()) flags = dict([(x[0][4:].lower(), x[1]) for x in NTF_NAMES.items()]) states = dict([(x[0][4:].lower(), x[1]) for x in NUD_NAMES.items()]) def states_a2n(s): # parse state string ss = s.split(',') ret = 0 for state in ss: state = state.upper() if not state.startswith('NUD_'): state = 'NUD_' + state ret |= NUD_NAMES[state] return ret class ndmsg(nlmsg): ''' ARP cache update message C structure:: struct ndmsg { unsigned char ndm_family; int ndm_ifindex; /* Interface index */ __u16 ndm_state; /* State */ __u8 ndm_flags; /* Flags */ __u8 ndm_type; }; Cache info structure:: struct nda_cacheinfo { __u32 ndm_confirmed; __u32 ndm_used; __u32 ndm_updated; __u32 ndm_refcnt; }; ''' __slots__ = () prefix = 'NDA_' fields = ( ('family', 'B'), ('__pad', '3x'), ('ifindex', 'i'), ('state', 'H'), ('flags', 'B'), ('ndm_type', 'B'), ) # Please note, that nla_map creates implicit # enumeration. In this case it will be: # # NDA_UNSPEC = 0 # NDA_DST = 1 # NDA_LLADDR = 2 # NDA_CACHEINFO = 3 # NDA_PROBES = 4 # ... # nla_map = ( ('NDA_UNSPEC', 'none'), ('NDA_DST', 'ipaddr'), ('NDA_LLADDR', 'lladdr'), ('NDA_CACHEINFO', 'cacheinfo'), ('NDA_PROBES', 'uint32'), ('NDA_VLAN', 'uint16'), ('NDA_PORT', 'be16'), ('NDA_VNI', 'uint32'), ('NDA_IFINDEX', 'uint32'), ('NDA_MASTER', 'uint32'), ('NDA_LINK_NETNSID', 'uint32'), ('NDA_SRC_VNI', 'uint32'), ) class cacheinfo(nla): __slots__ = () fields = ( ('ndm_confirmed', 'I'), ('ndm_used', 'I'), ('ndm_updated', 'I'), ('ndm_refcnt', 'I'), ) pyroute2-0.7.11/pyroute2/netlink/rtnl/ndtmsg.py000066400000000000000000000042141455030217500214560ustar00rootroot00000000000000from pyroute2.netlink import nla, nlmsg class ndtmsg(nlmsg): ''' Neighbour table message ''' __slots__ = () fields = (('family', 'B'), ('__pad', '3x')) nla_map = ( ('NDTA_UNSPEC', 'none'), ('NDTA_NAME', 'asciiz'), ('NDTA_THRESH1', 'uint32'), ('NDTA_THRESH2', 'uint32'), ('NDTA_THRESH3', 'uint32'), ('NDTA_CONFIG', 'config'), ('NDTA_PARMS', 'parms'), ('NDTA_STATS', 'stats'), ('NDTA_GC_INTERVAL', 'uint64'), ) class config(nla): __slots__ = () fields = ( ('key_len', 'H'), ('entry_size', 'H'), ('entries', 'I'), ('last_flush', 'I'), # delta to now in msecs ('last_rand', 'I'), # delta to now in msecs ('hash_rnd', 'I'), ('hash_mask', 'I'), ('hash_chain_gc', 'I'), ('proxy_qlen', 'I'), ) class stats(nla): __slots__ = () fields = ( ('allocs', 'Q'), ('destroys', 'Q'), ('hash_grows', 'Q'), ('res_failed', 'Q'), ('lookups', 'Q'), ('hits', 'Q'), ('rcv_probes_mcast', 'Q'), ('rcv_probes_ucast', 'Q'), ('periodic_gc_runs', 'Q'), ('forced_gc_runs', 'Q'), ) class parms(nla): __slots__ = () nla_map = ( ('NDTPA_UNSPEC', 'none'), ('NDTPA_IFINDEX', 'uint32'), ('NDTPA_REFCNT', 'uint32'), ('NDTPA_REACHABLE_TIME', 'uint64'), ('NDTPA_BASE_REACHABLE_TIME', 'uint64'), ('NDTPA_RETRANS_TIME', 'uint64'), ('NDTPA_GC_STALETIME', 'uint64'), ('NDTPA_DELAY_PROBE_TIME', 'uint64'), ('NDTPA_QUEUE_LEN', 'uint32'), ('NDTPA_APP_PROBES', 'uint32'), ('NDTPA_UCAST_PROBES', 'uint32'), ('NDTPA_MCAST_PROBES', 'uint32'), ('NDTPA_ANYCAST_DELAY', 'uint64'), ('NDTPA_PROXY_DELAY', 'uint64'), ('NDTPA_PROXY_QLEN', 'uint32'), ('NDTPA_LOCKTIME', 'uint64'), ('NDTPA_QUEUE_LENBYTES', 'uint32'), ) pyroute2-0.7.11/pyroute2/netlink/rtnl/nsidmsg.py000066400000000000000000000005011455030217500216210ustar00rootroot00000000000000from pyroute2.netlink.rtnl.rtgenmsg import rtgenmsg class nsidmsg(rtgenmsg): nla_map = ( ('NETNSA_NONE', 'none'), ('NETNSA_NSID', 'uint32'), ('NETNSA_PID', 'uint32'), ('NETNSA_FD', 'uint32'), ('NETNSA_TARGET_NSID', 'uint32'), ('NETNSA_CURRENT_NSID', 'uint32'), ) pyroute2-0.7.11/pyroute2/netlink/rtnl/nsinfmsg.py000066400000000000000000000010121455030217500217770ustar00rootroot00000000000000from pyroute2.netlink import nlmsg, nlmsg_atoms class nsinfmsg(nlmsg): ''' Fake message type to represent network namespace information. This is a prototype, the NLA layout is subject to change without notification. ''' __slots__ = () prefix = 'NSINFO_' fields = (('inode', 'I'), ('netnsid', 'I')) nla_map = ( ('NSINFO_UNSPEC', 'none'), ('NSINFO_PATH', 'string'), ('NSINFO_PEER', 'peer'), ) class peer(nlmsg_atoms.string): sql_type = None pyroute2-0.7.11/pyroute2/netlink/rtnl/p2pmsg.py000066400000000000000000000005561455030217500213770ustar00rootroot00000000000000from pyroute2.netlink import nlmsg class p2pmsg(nlmsg): ''' Fake message type to represent peer to peer connections, be it GRE or PPP ''' __slots__ = () prefix = 'P2P_' fields = (('index', 'I'), ('family', 'I')) nla_map = ( ('P2P_UNSPEC', 'none'), ('P2P_LOCAL', 'target'), ('P2P_REMOTE', 'target'), ) pyroute2-0.7.11/pyroute2/netlink/rtnl/riprsocket.py000066400000000000000000000010131455030217500223410ustar00rootroot00000000000000from pyroute2.netlink import NETLINK_ROUTE, rtnl from pyroute2.netlink.nlsocket import NetlinkSocket from pyroute2.netlink.rtnl.marshal import MarshalRtnl class RawIPRSocketBase(object): def __init__(self, fileno=None): super(RawIPRSocketBase, self).__init__(NETLINK_ROUTE, fileno=fileno) self.marshal = MarshalRtnl() def bind(self, groups=rtnl.RTMGRP_DEFAULTS, **kwarg): super(RawIPRSocketBase, self).bind(groups, **kwarg) class RawIPRSocket(RawIPRSocketBase, NetlinkSocket): pass pyroute2-0.7.11/pyroute2/netlink/rtnl/rtgenmsg.py000066400000000000000000000001621455030217500220060ustar00rootroot00000000000000from pyroute2.netlink import nlmsg class rtgenmsg(nlmsg): fields = (('rtgen_family', 'B'), ('__pad', '3x')) pyroute2-0.7.11/pyroute2/netlink/rtnl/rtmsg.py000066400000000000000000000627361455030217500213330ustar00rootroot00000000000000import struct from socket import AF_INET, AF_INET6, AF_UNSPEC, inet_ntop, inet_pton from pyroute2.common import AF_MPLS, hexdump, map_namespace from pyroute2.netlink import nla, nla_string, nlmsg RTNH_F_DEAD = 1 RTNH_F_PERVASIVE = 2 RTNH_F_ONLINK = 4 RTNH_F_OFFLOAD = 8 RTNH_F_LINKDOWN = 16 (RTNH_F_NAMES, RTNH_F_VALUES) = map_namespace('RTNH_F_', globals()) RT_SCOPE_UNIVERSE = 0 RT_SCOPE_SITE = 200 RT_SCOPE_LINK = 253 RT_SCOPE_HOST = 254 RT_SCOPE_NOWHERE = 255 (RT_SCOPE_NAMES, RT_SCOPE_VALUES) = map_namespace('RT_SCOPE_', globals()) LWTUNNEL_ENCAP_NONE = 0 LWTUNNEL_ENCAP_MPLS = 1 LWTUNNEL_ENCAP_IP = 2 LWTUNNEL_ENCAP_ILA = 3 LWTUNNEL_ENCAP_IP6 = 4 LWTUNNEL_ENCAP_SEG6 = 5 LWTUNNEL_ENCAP_BPF = 6 LWTUNNEL_ENCAP_SEG6_LOCAL = 7 IP6_RT_PRIO_USER = 1024 class nlflags(object): def encode(self): if isinstance(self['flags'], str): self['flags'] = self['flags'].split(',') if isinstance(self['flags'], (set, tuple, list)): self['flags'] = self.names2flags(self['flags']) if isinstance(self.get('scope'), str): self['scope'] = self.name2scope(self['scope']) return super(nlflags, self).encode() @staticmethod def scope2name(scope): return RT_SCOPE_VALUES[scope].lower()[9:] @staticmethod def name2scope(scope): return RT_SCOPE_NAMES['RT_SCOPE_' + scope.upper()] @staticmethod def flags2names(flags): ret = [] for flag in RTNH_F_VALUES: if (flag & flags) == flag: ret.append(RTNH_F_VALUES[flag].lower()[7:]) return ret @staticmethod def names2flags(flags): ret = 0 if isinstance(flags, str): flags = [flags] for flag in flags: ret |= RTNH_F_NAMES['RTNH_F_' + flag.upper()] return ret class rtmsg_base(nlflags): ''' Route message C structure:: struct rtmsg { unsigned char rtm_family; /* Address family of route */ unsigned char rtm_dst_len; /* Length of destination */ unsigned char rtm_src_len; /* Length of source */ unsigned char rtm_tos; /* TOS filter */ unsigned char rtm_table; /* Routing table ID */ unsigned char rtm_protocol; /* Routing protocol; see below */ unsigned char rtm_scope; /* See below */ unsigned char rtm_type; /* See below */ unsigned int rtm_flags; }; ''' __slots__ = () prefix = 'RTA_' sql_constraints = { 'RTA_TABLE': 'NOT NULL DEFAULT 0', 'RTA_DST': "NOT NULL DEFAULT ''", 'RTA_OIF': 'NOT NULL DEFAULT 0', 'RTA_PRIORITY': 'NOT NULL DEFAULT 0', 'RTA_VIA': "NOT NULL DEFAULT ''", 'RTA_NEWDST': "NOT NULL DEFAULT ''", } fields = ( ('family', 'B'), ('dst_len', 'B'), ('src_len', 'B'), ('tos', 'B'), ('table', 'B'), ('proto', 'B'), ('scope', 'B'), ('type', 'B'), ('flags', 'I'), ) nla_map = ( ('RTA_UNSPEC', 'none'), ('RTA_DST', 'target'), ('RTA_SRC', 'target'), ('RTA_IIF', 'uint32'), ('RTA_OIF', 'uint32'), ('RTA_GATEWAY', 'target'), ('RTA_PRIORITY', 'uint32'), ('RTA_PREFSRC', 'target'), ('RTA_METRICS', 'metrics'), ('RTA_MULTIPATH', '*get_nh'), ('RTA_PROTOINFO', 'uint32'), ('RTA_FLOW', 'uint32'), ('RTA_CACHEINFO', 'cacheinfo'), ('RTA_SESSION', 'hex'), ('RTA_MP_ALGO', 'hex'), ('RTA_TABLE', 'uint32'), ('RTA_MARK', 'uint32'), ('RTA_MFC_STATS', 'rta_mfc_stats'), ('RTA_VIA', 'rtvia'), ('RTA_NEWDST', 'target'), ('RTA_PREF', 'uint8'), ('RTA_ENCAP_TYPE', 'uint16'), ('RTA_ENCAP', 'encap_info'), ('RTA_EXPIRES', 'hex'), ) @staticmethod def encap_info(self, *argv, **kwarg): encap_type = None # Check, if RTA_ENCAP_TYPE is decoded already # for name, value in self['attrs']: if name == 'RTA_ENCAP_TYPE': encap_type = value break else: # No RTA_ENCAP_TYPE met, so iterate all the chain. # Ugly, but to do otherwise would be too complicated. # data = kwarg['data'] offset = kwarg['offset'] while offset < len(data): # Shift offset to the next NLA # NLA header: # # uint16 length # uint16 type # try: offset += struct.unpack('H', data[offset : offset + 2])[0] # 21 == RTA_ENCAP_TYPE # FIXME: should not be hardcoded if ( struct.unpack('H', data[offset + 2 : offset + 4])[0] == 21 ): encap_type = struct.unpack( 'H', data[offset + 4 : offset + 6] )[0] break except: # in the case of any decoding error return self.hex break # return specific classes # return self.encaps.get(encap_type, self.hex) class mpls_encap_info(nla): prefix = 'MPLS_IPTUNNEL_' __slots__ = () nla_map = ( ('MPLS_IPTUNNEL_UNSPEC', 'none'), ('MPLS_IPTUNNEL_DST', 'mpls_target'), ('MPLS_IPTUNNEL_TTL', 'uint8'), ) class seg6_encap_info(nla): __slots__ = () nla_map = ( ('SEG6_IPTUNNEL_UNSPEC', 'none'), ('SEG6_IPTUNNEL_SRH', 'ipv6_sr_hdr'), ) class ipv6_sr_hdr(nla): __slots__ = () fields = ( ('encapmode', 'I'), ('nexthdr', 'B'), ('hdrlen', 'B'), ('type', 'B'), ('segments_left', 'B'), ('first_segment', 'B'), ('flags', 'B'), ('reserved', 'H'), ('segs', 's'), # Potentially several type-length-value ('tlvs', 's'), ) # Corresponding values for seg6 encap modes SEG6_IPTUN_MODE_INLINE = 0 SEG6_IPTUN_MODE_ENCAP = 1 # Mapping string to nla value encapmodes = { "inline": SEG6_IPTUN_MODE_INLINE, "encap": SEG6_IPTUN_MODE_ENCAP, } # Reverse mapping: mapping nla value to string r_encapmodes = {v: k for k, v in encapmodes.items()} # Nla value for seg6 type SEG6_TYPE = 4 # Flag value for hmac SR6_FLAG1_HMAC = 1 << 3 # Tlv value for hmac SR6_TLV_HMAC = 5 # Utility function to get the family from the msg def get_family(self): pointer = self while pointer.parent is not None: pointer = pointer.parent return pointer.get('family', AF_UNSPEC) def encode(self): # Retrieve the family family = self.get_family() # Seg6 can be applied only to IPv6 and IPv4 if family == AF_INET6 or family == AF_INET: # Get mode mode = self['mode'] # Get segs segs = self['segs'] # Get hmac hmac = self.get('hmac', None) # With "inline" mode there is not # encap into an outer IPv6 header if mode == "inline": # Add :: to segs segs.insert(0, "::") # Add mode to value self['encapmode'] = self.encapmodes.get( mode, self.SEG6_IPTUN_MODE_ENCAP ) # Calculate srlen srhlen = 8 + 16 * len(segs) # If we are using hmac we have a tlv as trailer data if hmac: # Since we can use sha1 or sha256 srhlen += 40 # Calculate and set hdrlen self['hdrlen'] = (srhlen >> 3) - 1 # Add seg6 type self['type'] = self.SEG6_TYPE # Add segments left self['segments_left'] = len(segs) - 1 # Add fitst segment self['first_segment'] = len(segs) - 1 # If hmac is used we have to set the flags if hmac: # Add SR6_FLAG1_HMAC self['flags'] |= self.SR6_FLAG1_HMAC # Init segs self['segs'] = b'' # Iterate over segments for seg in segs: # Convert to network byte order and add to value self['segs'] += inet_pton(AF_INET6, seg) # Initialize tlvs self['tlvs'] = b'' # If hmac is used we have to properly init tlvs if hmac: # Put type self['tlvs'] += struct.pack('B', self.SR6_TLV_HMAC) # Put length -> 40-2 self['tlvs'] += struct.pack('B', 38) # Put reserved self['tlvs'] += struct.pack('H', 0) # Put hmac key self['tlvs'] += struct.pack('>I', hmac) # Put hmac self['tlvs'] += struct.pack('QQQQ', 0, 0, 0, 0) else: raise TypeError( 'Family %s not supported for seg6 tunnel' % family ) # Finally encode as nla nla.encode(self) # Utility function to verify if hmac is present def has_hmac(self): # Useful during the decoding return self['flags'] & self.SR6_FLAG1_HMAC def decode(self): # Decode the data nla.decode(self) # Extract the encap mode self['mode'] = self.r_encapmodes.get( self['encapmode'], "encap" ) # Calculate offset of the segs offset = self.offset + 16 # Point the addresses addresses = self.data[offset:] # Extract the number of segs n_segs = self['segments_left'] + 1 # Init segs segs = [] # Move 128 bit in each step for i in range(n_segs): # Save the segment segs.append( inet_ntop(AF_INET6, addresses[i * 16 : i * 16 + 16]) ) # Save segs self['segs'] = segs # Init tlvs self['tlvs'] = '' # If hmac is used if self.has_hmac(): # Point to the start of hmac hmac = addresses[n_segs * 16 : n_segs * 16 + 40] # Save tlvs section self['tlvs'] = hexdump(hmac) # Show also the hmac key self['hmac'] = hexdump(hmac[4:8]) class bpf_encap_info(nla): __slots__ = () nla_map = ( ('LWT_BPF_UNSPEC', 'none'), ('LWT_BPF_IN', 'bpf_obj'), ('LWT_BPF_OUT', 'bpf_obj'), ('LWT_BPF_XMIT', 'bpf_obj'), ('LWT_BPF_XMIT_HEADROOM', 'uint32'), ) class bpf_obj(nla): __slots__ = () nla_map = ( ('LWT_BPF_PROG_UNSPEC', 'none'), ('LWT_BPF_PROG_FD', 'uint32'), ('LWT_BPF_PROG_NAME', 'asciiz'), ) class seg6local_encap_info(nla): __slots__ = () nla_map = ( ('SEG6_LOCAL_UNSPEC', 'none'), ('SEG6_LOCAL_ACTION', 'action'), ('SEG6_LOCAL_SRH', 'ipv6_sr_hdr'), ('SEG6_LOCAL_TABLE', 'table'), ('SEG6_LOCAL_NH4', 'nh4'), ('SEG6_LOCAL_NH6', 'nh6'), ('SEG6_LOCAL_IIF', 'iif'), ('SEG6_LOCAL_OIF', 'oif'), ('SEG6_LOCAL_BPF', 'bpf_obj'), ('SEG6_LOCAL_VRFTABLE', 'vrf_table'), ) class bpf_obj(nla): __slots__ = () nla_map = ( ('LWT_BPF_PROG_UNSPEC', 'none'), ('LWT_BPF_PROG_FD', 'uint32'), ('LWT_BPF_PROG_NAME', 'asciiz'), ) class ipv6_sr_hdr(nla): __slots__ = () fields = ( ('nexthdr', 'B'), ('hdrlen', 'B'), ('type', 'B'), ('segments_left', 'B'), ('first_segment', 'B'), ('flags', 'B'), ('reserved', 'H'), ('segs', 's'), # Potentially several type-length-value ('tlvs', 's'), ) # Corresponding values for seg6 encap modes SEG6_IPTUN_MODE_INLINE = 0 SEG6_IPTUN_MODE_ENCAP = 1 # Mapping string to nla value encapmodes = { "inline": SEG6_IPTUN_MODE_INLINE, "encap": SEG6_IPTUN_MODE_ENCAP, } # Reverse mapping: mapping nla value to string r_encapmodes = {v: k for k, v in encapmodes.items()} # Nla value for seg6 type SEG6_TYPE = 4 # Flag value for hmac SR6_FLAG1_HMAC = 1 << 3 # Tlv value for hmac SR6_TLV_HMAC = 5 # Utility function to get the family from the msg def get_family(self): pointer = self while pointer.parent is not None: pointer = pointer.parent return pointer.get('family', AF_UNSPEC) def encode(self): # Retrieve the family family = self.get_family() # Seg6 can be applied only to IPv6 if family == AF_INET6: # Get mode mode = self['mode'] # Get segs segs = self['segs'] # Get hmac hmac = self.get('hmac', None) # With "inline" mode there is not # encap into an outer IPv6 header if mode == "inline": # Add :: to segs segs.insert(0, "::") # Add mode to value self['encapmode'] = self.encapmodes.get( mode, self.SEG6_IPTUN_MODE_ENCAP ) # Calculate srlen srhlen = 8 + 16 * len(segs) # If we are using hmac we have a tlv as trailer data if hmac: # Since we can use sha1 or sha256 srhlen += 40 # Calculate and set hdrlen self['hdrlen'] = (srhlen >> 3) - 1 # Add seg6 type self['type'] = self.SEG6_TYPE # Add segments left self['segments_left'] = len(segs) - 1 # Add fitst segment self['first_segment'] = len(segs) - 1 # If hmac is used we have to set the flags if hmac: # Add SR6_FLAG1_HMAC self['flags'] |= self.SR6_FLAG1_HMAC # Init segs self['segs'] = b'' # Iterate over segments for seg in segs: # Convert to network byte order and add to value self['segs'] += inet_pton(family, seg) # Initialize tlvs self['tlvs'] = b'' # If hmac is used we have to properly init tlvs if hmac: # Put type self['tlvs'] += struct.pack('B', self.SR6_TLV_HMAC) # Put length -> 40-2 self['tlvs'] += struct.pack('B', 38) # Put reserved self['tlvs'] += struct.pack('H', 0) # Put hmac key self['tlvs'] += struct.pack('>I', hmac) # Put hmac self['tlvs'] += struct.pack('QQQQ', 0, 0, 0, 0) else: raise TypeError( 'Family %s not supported for seg6 tunnel' % family ) # Finally encode as nla nla.encode(self) # Utility function to verify if hmac is present def has_hmac(self): # Useful during the decoding return self['flags'] & self.SR6_FLAG1_HMAC def decode(self): # Decode the data nla.decode(self) # Extract the encap mode self['mode'] = self.r_encapmodes.get( self['encapmode'], "encap" ) # Calculate offset of the segs offset = self.offset + 16 # Point the addresses addresses = self.data[offset:] # Extract the number of segs n_segs = self['segments_left'] + 1 # Init segs segs = [] # Move 128 bit in each step for i in range(n_segs): # Save the segment segs.append( inet_ntop(AF_INET6, addresses[i * 16 : i * 16 + 16]) ) # Save segs self['segs'] = segs # Init tlvs self['tlvs'] = '' # If hmac is used if self.has_hmac(): # Point to the start of hmac hmac = addresses[n_segs * 16 : n_segs * 16 + 40] # Save tlvs section self['tlvs'] = hexdump(hmac) # Show also the hmac key self['hmac'] = hexdump(hmac[4:8]) class table(nla): __slots__ = () # Table ID fields = (('value', 'I'),) class action(nla): __slots__ = () # Action fields = (('value', 'I'),) SEG6_LOCAL_ACTION_UNSPEC = 0 SEG6_LOCAL_ACTION_END = 1 SEG6_LOCAL_ACTION_END_X = 2 SEG6_LOCAL_ACTION_END_T = 3 SEG6_LOCAL_ACTION_END_DX2 = 4 SEG6_LOCAL_ACTION_END_DX6 = 5 SEG6_LOCAL_ACTION_END_DX4 = 6 SEG6_LOCAL_ACTION_END_DT6 = 7 SEG6_LOCAL_ACTION_END_DT4 = 8 SEG6_LOCAL_ACTION_END_B6 = 9 SEG6_LOCAL_ACTION_END_B6_ENCAP = 10 SEG6_LOCAL_ACTION_END_BM = 11 SEG6_LOCAL_ACTION_END_S = 12 SEG6_LOCAL_ACTION_END_AS = 13 SEG6_LOCAL_ACTION_END_AM = 14 SEG6_LOCAL_ACTION_END_BPF = 15 SEG6_LOCAL_ACTION_END_DT46 = 16 actions = { 'End': SEG6_LOCAL_ACTION_END, 'End.X': SEG6_LOCAL_ACTION_END_X, 'End.T': SEG6_LOCAL_ACTION_END_T, 'End.DX2': SEG6_LOCAL_ACTION_END_DX2, 'End.DX6': SEG6_LOCAL_ACTION_END_DX6, 'End.DX4': SEG6_LOCAL_ACTION_END_DX4, 'End.DT6': SEG6_LOCAL_ACTION_END_DT6, 'End.DT4': SEG6_LOCAL_ACTION_END_DT4, 'End.B6': SEG6_LOCAL_ACTION_END_B6, 'End.B6.Encaps': SEG6_LOCAL_ACTION_END_B6_ENCAP, 'End.BM': SEG6_LOCAL_ACTION_END_BM, 'End.S': SEG6_LOCAL_ACTION_END_S, 'End.AS': SEG6_LOCAL_ACTION_END_AS, 'End.AM': SEG6_LOCAL_ACTION_END_AM, 'End.BPF': SEG6_LOCAL_ACTION_END_BPF, 'End.DT46': SEG6_LOCAL_ACTION_END_DT46, } def encode(self): # Get action type and convert string to value action = self['value'] self['value'] = self.actions.get( action, self.SEG6_LOCAL_ACTION_UNSPEC ) # Convert action type to u32 self['value'] = self['value'] & 0xFFFFFFFF # Finally encode as nla nla.encode(self) class iif(nla): __slots__ = () # Index of the incoming interface fields = (('value', 'I'),) class oif(nla): __slots__ = () # Index of the outcoming interface fields = (('value', 'I'),) class nh4(nla_string): __slots__ = () # Nexthop of the IPv4 family def encode(self): # Convert to network byte order self['value'] = inet_pton(AF_INET, self['value']) # Finally encode as nla nla_string.encode(self) def decode(self): # Decode the data nla_string.decode(self) # Convert the packed IP address to its string representation self['value'] = inet_ntop(AF_INET, self['value']) class nh6(nla_string): __slots__ = () # Nexthop of the IPv6 family def encode(self): # Convert to network byte order self['value'] = inet_pton(AF_INET6, self['value']) # Finally encode as nla nla_string.encode(self) def decode(self): # Decode the data nla_string.decode(self) # Convert the packed IP address to its string representation self['value'] = inet_ntop(AF_INET6, self['value']) class vrf_table(nla): __slots__ = () # VRF Table ID fields = (('value', 'I'),) # # TODO: add here other lwtunnel types # encaps = { LWTUNNEL_ENCAP_MPLS: mpls_encap_info, LWTUNNEL_ENCAP_SEG6: seg6_encap_info, LWTUNNEL_ENCAP_BPF: bpf_encap_info, LWTUNNEL_ENCAP_SEG6_LOCAL: seg6local_encap_info, } class rta_mfc_stats(nla): __slots__ = () fields = ( ('mfcs_packets', 'uint64'), ('mfcs_bytes', 'uint64'), ('mfcs_wrong_if', 'uint64'), ) class metrics(nla): __slots__ = () prefix = 'RTAX_' nla_map = ( ('RTAX_UNSPEC', 'none'), ('RTAX_LOCK', 'uint32'), ('RTAX_MTU', 'uint32'), ('RTAX_WINDOW', 'uint32'), ('RTAX_RTT', 'uint32'), ('RTAX_RTTVAR', 'uint32'), ('RTAX_SSTHRESH', 'uint32'), ('RTAX_CWND', 'uint32'), ('RTAX_ADVMSS', 'uint32'), ('RTAX_REORDERING', 'uint32'), ('RTAX_HOPLIMIT', 'uint32'), ('RTAX_INITCWND', 'uint32'), ('RTAX_FEATURES', 'uint32'), ('RTAX_RTO_MIN', 'uint32'), ('RTAX_INITRWND', 'uint32'), ('RTAX_QUICKACK', 'uint32'), ) @staticmethod def get_nh(self, *argv, **kwarg): return nh class rtvia(nla_string): __slots__ = () sql_type = 'TEXT' def encode(self): family = self.get('family', AF_UNSPEC) if family in (AF_INET, AF_INET6): addr = inet_pton(family, self['addr']) else: raise TypeError('Family %s not supported for RTA_VIA' % family) self['value'] = struct.pack('H', family) + addr nla_string.encode(self) def decode(self): nla_string.decode(self) family = struct.unpack('H', self['value'][:2])[0] addr = self['value'][2:] if addr: if (family == AF_INET and len(addr) == 4) or ( family == AF_INET6 and len(addr) == 16 ): addr = inet_ntop(family, addr) else: addr = hexdump(addr) self.value = {'family': family, 'addr': addr} class cacheinfo(nla): __slots__ = () fields = ( ('rta_clntref', 'I'), ('rta_lastuse', 'I'), ('rta_expires', 'i'), ('rta_error', 'I'), ('rta_used', 'I'), ('rta_id', 'I'), ('rta_ts', 'I'), ('rta_tsage', 'I'), ) class rtmsg(rtmsg_base, nlmsg): __slots__ = () def encode(self): if self.get('family') == AF_MPLS: # force fields self['dst_len'] = 20 self['table'] = 254 self['type'] = 1 # assert NLA types for n in self.get('attrs', []): if n[0] not in ( 'RTA_OIF', 'RTA_DST', 'RTA_VIA', 'RTA_NEWDST', 'RTA_MULTIPATH', ): raise TypeError('Incorrect NLA type %s for AF_MPLS' % n[0]) super(rtmsg_base, self).encode() class nh(rtmsg_base, nla): __slots__ = () is_nla = False sql_constraints = {} cell_header = (('length', 'H'),) fields = (('flags', 'B'), ('hops', 'B'), ('oif', 'i')) pyroute2-0.7.11/pyroute2/netlink/rtnl/tcmsg/000077500000000000000000000000001455030217500207245ustar00rootroot00000000000000pyroute2-0.7.11/pyroute2/netlink/rtnl/tcmsg/README.md000066400000000000000000000000571455030217500222050ustar00rootroot00000000000000See the API description in `sched_template.py` pyroute2-0.7.11/pyroute2/netlink/rtnl/tcmsg/__init__.py000066400000000000000000000052671455030217500230470ustar00rootroot00000000000000import types from pyroute2.netlink import nla, nlmsg from pyroute2.netlink.rtnl.tcmsg import ( cls_basic, cls_flow, cls_fw, cls_matchall, cls_u32, sched_bpf, sched_cake, sched_choke, sched_clsact, sched_codel, sched_drr, sched_fq_codel, sched_hfsc, sched_htb, sched_ingress, sched_netem, sched_pfifo, sched_pfifo_fast, sched_plug, sched_sfq, sched_tbf, sched_template, ) plugins = { 'plug': sched_plug, 'sfq': sched_sfq, 'clsact': sched_clsact, 'codel': sched_codel, 'fq_codel': sched_fq_codel, 'hfsc': sched_hfsc, 'htb': sched_htb, 'bpf': sched_bpf, 'tbf': sched_tbf, 'netem': sched_netem, 'fw': cls_fw, 'u32': cls_u32, 'matchall': cls_matchall, 'basic': cls_basic, 'flow': cls_flow, 'ingress': sched_ingress, 'pfifo': sched_pfifo, 'pfifo_fast': sched_pfifo_fast, 'choke': sched_choke, 'drr': sched_drr, 'prio': sched_pfifo_fast, 'cake': sched_cake, } class tcmsg(nlmsg): prefix = 'TCA_' fields = ( ('family', 'B'), ('pad1', 'B'), ('pad2', 'H'), ('index', 'i'), ('handle', 'I'), ('parent', 'I'), ('info', 'I'), ) nla_map = ( ('TCA_UNSPEC', 'none'), ('TCA_KIND', 'asciiz'), ('TCA_OPTIONS', 'get_options'), ('TCA_STATS', 'stats'), ('TCA_XSTATS', 'get_xstats'), ('TCA_RATE', 'hex'), ('TCA_FCNT', 'hex'), ('TCA_STATS2', 'get_stats2'), ('TCA_STAB', 'hex'), ) class stats(nla): fields = ( ('bytes', 'Q'), ('packets', 'I'), ('drop', 'I'), ('overlimits', 'I'), ('bps', 'I'), ('pps', 'I'), ('qlen', 'I'), ('backlog', 'I'), ) def get_plugin(self, plug, *argv, **kwarg): # get the plugin name kind = self.get_attr('TCA_KIND') # get the plugin implementation or the default one p = plugins.get(kind, sched_template) # get the interface interface = getattr(p, plug, getattr(sched_template, plug)) # if it is a method, run and return the result if isinstance(interface, types.FunctionType): return interface(self, *argv, **kwarg) else: return interface @staticmethod def get_stats2(self, *argv, **kwarg): return self.get_plugin('stats2', *argv, **kwarg) @staticmethod def get_xstats(self, *argv, **kwarg): return self.get_plugin('stats', *argv, **kwarg) @staticmethod def get_options(self, *argv, **kwarg): return self.get_plugin('options', *argv, **kwarg) pyroute2-0.7.11/pyroute2/netlink/rtnl/tcmsg/act_bpf.py000066400000000000000000000017231455030217500226770ustar00rootroot00000000000000from pyroute2.netlink import nla from pyroute2.netlink.rtnl.tcmsg.common import tc_actions class options(nla): nla_map = ( ('TCA_ACT_BPF_UNSPEC', 'none'), ('TCA_ACT_BPF_TM,', 'none'), ('TCA_ACT_BPF_PARMS', 'tca_act_bpf_parms'), ('TCA_ACT_BPF_OPS_LEN', 'uint16'), ('TCA_ACT_BPF_OPS', 'hex'), ('TCA_ACT_BPF_FD', 'uint32'), ('TCA_ACT_BPF_NAME', 'asciiz'), ) class tca_act_bpf_parms(nla): fields = ( ('index', 'I'), ('capab', 'I'), ('action', 'i'), ('refcnt', 'i'), ('bindcnt', 'i'), ) def get_parameters(kwarg): ret = {'attrs': []} if 'fd' in kwarg: ret['attrs'].append(['TCA_ACT_BPF_FD', kwarg['fd']]) if 'name' in kwarg: ret['attrs'].append(['TCA_ACT_BPF_NAME', kwarg['name']]) a = tc_actions[kwarg.get('action', 'drop')] ret['attrs'].append(['TCA_ACT_BPF_PARMS', {'action': a}]) return ret pyroute2-0.7.11/pyroute2/netlink/rtnl/tcmsg/act_connmark.py000066400000000000000000000023221455030217500237340ustar00rootroot00000000000000from pyroute2.netlink import NLA_F_NESTED, nla from pyroute2.netlink.rtnl.tcmsg.common import tc_actions """ connmark - netfilter connmark retriever action see tc-connmark(8) This filter restores the connection mark into the packet mark. Connection marks are typically handled by the CONNMARK iptables module. See iptables-extensions(8). There is no mandatory parameter, but you can specify the action, which defaults to 'pipe', and the conntrack zone (see the manual). """ class options(nla): nla_flags = NLA_F_NESTED nla_map = ( ('TCA_CONNMARK_UNSPEC', 'none'), ('TCA_CONNMARK_PARMS', 'tca_connmark_parms'), ('TCA_CONNMARK_TM', 'none'), ) class tca_connmark_parms(nla): fields = ( ('index', 'I'), ('capab', 'I'), ('action', 'i'), ('refcnt', 'i'), ('bindcnt', 'i'), ('zone', 'H'), ('__padding', 'H'), # XXX is there a better way to do this ? ) def get_parameters(kwarg): ret = {'attrs': []} parms = { 'action': tc_actions[kwarg.get('action', 'pipe')], 'zone': kwarg.get('zone', 0), } ret['attrs'].append(['TCA_CONNMARK_PARMS', parms]) return ret pyroute2-0.7.11/pyroute2/netlink/rtnl/tcmsg/act_gact.py000066400000000000000000000013051455030217500230420ustar00rootroot00000000000000from pyroute2.netlink import NLA_F_NESTED, nla from pyroute2.netlink.rtnl.tcmsg.common import tc_actions class options(nla): nla_flags = NLA_F_NESTED nla_map = ( ('TCA_GACT_UNSPEC', 'none'), ('TCA_GACT_TM', 'none'), ('TCA_GACT_PARMS', 'tca_gact_parms'), ('TCA_GACT_PROB', 'none'), ) class tca_gact_parms(nla): fields = ( ('index', 'I'), ('capab', 'I'), ('action', 'i'), ('refcnt', 'i'), ('bindcnt', 'i'), ) def get_parameters(kwarg): ret = {'attrs': []} a = tc_actions[kwarg.get('action', 'drop')] ret['attrs'].append(['TCA_GACT_PARMS', {'action': a}]) return ret pyroute2-0.7.11/pyroute2/netlink/rtnl/tcmsg/act_mirred.py000066400000000000000000000032751455030217500234160ustar00rootroot00000000000000from pyroute2.netlink import NLA_F_NESTED, nla from pyroute2.netlink.rtnl.tcmsg.common import tc_actions """ Mirred - mirror/redirect action see tc-mirred(8) Use like any other action, with the following parameters available: - direction (mandatory): ingress or egress - action (mandatory): mirror or redirect - ifindex (mandatory): destination interface for mirrored or redirected packets - index: explicit index for this action """ # see tc_mirred.h MIRRED_EACTIONS = { ("egress", "redirect"): 1, # redirect packet to egress ("egress", "mirror"): 2, # mirror packet to egress ("ingress", "redirect"): 3, # redirect packet to ingress ("ingress", "mirror"): 4, # mirror packet to ingress } class options(nla): nla_flags = NLA_F_NESTED nla_map = ( ('TCA_MIRRED_UNSPEC', 'none'), ('TCA_MIRRED_TM', 'none'), ('TCA_MIRRED_PARMS', 'tca_mirred_parms'), ) class tca_mirred_parms(nla): fields = ( ('index', 'I'), ('capab', 'I'), ('action', 'i'), ('refcnt', 'i'), ('bindcnt', 'i'), ('eaction', 'i'), ('ifindex', 'I'), ) def get_parameters(kwarg): ret = {'attrs': []} # direction, action and ifindex are mandatory parms = { 'eaction': MIRRED_EACTIONS[(kwarg['direction'], kwarg['action'])], 'ifindex': kwarg['ifindex'], } if 'index' in kwarg: parms['index'] = int(kwarg['index']) # From m_mirred.c if kwarg['action'] == 'redirect': parms['action'] = tc_actions['stolen'] else: # mirror parms['action'] = tc_actions['pipe'] ret['attrs'].append(['TCA_MIRRED_PARMS', parms]) return ret pyroute2-0.7.11/pyroute2/netlink/rtnl/tcmsg/act_police.py000066400000000000000000000035121455030217500234010ustar00rootroot00000000000000from pyroute2.netlink.rtnl.tcmsg.common import ( get_rate_parameters, nla_plus_rtab, ) actions = { 'unspec': -1, # TC_POLICE_UNSPEC 'ok': 0, # TC_POLICE_OK 'reclassify': 1, # TC_POLICE_RECLASSIFY 'shot': 2, # TC_POLICE_SHOT 'drop': 2, # TC_POLICE_SHOT 'pipe': 3, } # TC_POLICE_PIPE class options(nla_plus_rtab): nla_map = ( ('TCA_POLICE_UNSPEC', 'none'), ('TCA_POLICE_TBF', 'police_tbf'), ('TCA_POLICE_RATE', 'rtab'), ('TCA_POLICE_PEAKRATE', 'ptab'), ('TCA_POLICE_AVRATE', 'uint32'), ('TCA_POLICE_RESULT', 'uint32'), ) class police_tbf(nla_plus_rtab.parms): fields = ( ('index', 'I'), ('action', 'i'), ('limit', 'I'), ('burst', 'I'), ('mtu', 'I'), ('rate_cell_log', 'B'), ('rate___reserved', 'B'), ('rate_overhead', 'H'), ('rate_cell_align', 'h'), ('rate_mpu', 'H'), ('rate', 'I'), ('peak_cell_log', 'B'), ('peak___reserved', 'B'), ('peak_overhead', 'H'), ('peak_cell_align', 'h'), ('peak_mpu', 'H'), ('peak', 'I'), ('refcnt', 'i'), ('bindcnt', 'i'), ('capab', 'I'), ) class nla_plus_police(object): class police(options): pass def get_parameters(kwarg): # if no limit specified, set it to zero to make # the next call happy kwarg['limit'] = kwarg.get('limit', 0) tbfp = get_rate_parameters(kwarg) # create an alias -- while TBF uses 'buffer', rate # policy uses 'burst' tbfp['burst'] = tbfp['buffer'] # action resolver tbfp['action'] = actions[kwarg.get('action', 'reclassify')] return {'attrs': [['TCA_POLICE_TBF', tbfp], ['TCA_POLICE_RATE', True]]} pyroute2-0.7.11/pyroute2/netlink/rtnl/tcmsg/act_skbedit.py000066400000000000000000000064121455030217500235550ustar00rootroot00000000000000''' skbedit +++++++ Usage:: from pyroute2 import IPRoute # Assume you are working with eth1 interface IFNAME = "eth1" ipr = IPRoute() ifindex = ipr.link_lookup(ifname=IFNAME) # First create parent qdisc ipr.tc("add", "htb", index=ifindex, handle=0x10000) # Then add a matchall filter with skbedit action # Simple action example action = {"kind": "skbedit", "priority": 0x10001 # Also known as "1:1" in TC format } ipr.tc("add-filter", "matchall", index=ifindex, parent=0x10000, prio=1, action=action) # Extended action example action = {"kind": "skbedit", "priority": 0x10001, # Also known as "1:1" in TC format "mark": 0x1337, "mask": 0xFFFFFFFF, "ptype": "host" } ipr.tc("add-filter", "matchall", index=ifindex, parent=0x10000, prio=1, action=action) NOTES: Here is the list of all supported options:: - mark: integer - mask: integer - priority: integer - ptype: "host", "otherhost", "broadcast" or "multicast" - queue: integer ''' from pyroute2.netlink import nla from pyroute2.netlink.rtnl.tcmsg.common import tc_actions # Packet types defined in if_packet.h PACKET_HOST = 0 PACKET_BROADCAST = 1 PACKET_MULTICAST = 2 PACKET_OTHERHOST = 3 def convert_ptype(value): types = { 'host': PACKET_HOST, 'otherhost': PACKET_OTHERHOST, 'broadcast': PACKET_BROADCAST, 'multicast': PACKET_MULTICAST, } res = types.get(value.lower()) if res is not None: return res raise ValueError( 'Invalid ptype specified! See tc-skbedit man ' 'page for valid values.' ) def get_parameters(kwarg): ret = {'attrs': []} attrs_map = ( ('priority', 'TCA_SKBEDIT_PRIORITY'), ('queue', 'TCA_SKBEDIT_QUEUE_MAPPING'), ('mark', 'TCA_SKBEDIT_MARK'), ('ptype', 'TCA_SKBEDIT_PTYPE'), ('mask', 'TCA_SKBEDIT_MASK'), ) # Assign TCA_SKBEDIT_PARMS first parms = {} parms['action'] = tc_actions['pipe'] ret['attrs'].append(['TCA_SKBEDIT_PARMS', parms]) for k, v in attrs_map: r = kwarg.get(k, None) if r is not None: if k == 'ptype': r = convert_ptype(r) ret['attrs'].append([v, r]) return ret class options(nla): nla_map = ( ('TCA_SKBEDIT_UNSPEC', 'none'), ('TCA_SKBEDIT_TM', 'tca_parse_tm'), ('TCA_SKBEDIT_PARMS', 'tca_parse_parms'), ('TCA_SKBEDIT_PRIORITY', 'uint32'), ('TCA_SKBEDIT_QUEUE_MAPPING', 'uint16'), ('TCA_SKBEDIT_MARK', 'uint32'), ('TCA_SKBEDIT_PAD', 'hex'), ('TCA_SKBEDIT_PTYPE', 'uint16'), ('TCA_SKBEDIT_MASK', 'uint32'), ('TCA_SKBEDIT_FLAGS', 'uint64'), ) class tca_parse_parms(nla): # As described in tc_mpls.h, it uses # generic TC action fields fields = ( ('index', 'I'), ('capab', 'I'), ('action', 'i'), ('refcnt', 'i'), ('bindcnt', 'i'), ) class tca_parse_tm(nla): # See struct tcf_t fields = ( ('install', 'Q'), ('lastuse', 'Q'), ('expires', 'Q'), ('firstuse', 'Q'), ) pyroute2-0.7.11/pyroute2/netlink/rtnl/tcmsg/act_vlan.py000066400000000000000000000026711455030217500230730ustar00rootroot00000000000000from socket import htons from pyroute2.netlink import nla from pyroute2.netlink.rtnl.tcmsg.common import tc_actions v_actions = {'pop': 1, 'push': 2, 'modify': 3} class options(nla): nla_map = ( ('TCA_VLAN_UNSPEC', 'none'), ('TCA_VLAN_TM', 'none'), ('TCA_VLAN_PARMS', 'tca_vlan_parms'), ('TCA_VLAN_PUSH_VLAN_ID', 'uint16'), ('TCA_VLAN_PUSH_VLAN_PROTOCOL', 'uint16'), ('TCA_VLAN_PAD', 'none'), ('TCA_VLAN_PUSH_VLAN_PRIORITY', 'uint8'), ) class tca_vlan_parms(nla): fields = ( ('index', 'I'), ('capab', 'I'), ('action', 'i'), ('refcnt', 'i'), ('bindcnt', 'i'), ('v_action', 'i'), ) def get_parameters(kwarg): ret = {'attrs': []} parms = {'v_action': v_actions[kwarg['v_action']]} parms['action'] = tc_actions[kwarg.get('action', 'pipe')] ret['attrs'].append(['TCA_VLAN_PARMS', parms]) # Vlan id compulsory for "push" and "modify" if kwarg['v_action'] in ['push', 'modify']: ret['attrs'].append(['TCA_VLAN_PUSH_VLAN_ID', kwarg['id']]) if 'priority' in kwarg: ret['attrs'].append(['TCA_VLAN_PUSH_VLAN_PRIORITY', kwarg['priority']]) if kwarg.get('protocol', '802.1Q') == '802.1ad': ret['attrs'].append(['TCA_VLAN_PUSH_VLAN_PROTOCOL', htons(0x88A8)]) else: ret['attrs'].append(['TCA_VLAN_PUSH_VLAN_PROTOCOL', htons(0x8100)]) return ret pyroute2-0.7.11/pyroute2/netlink/rtnl/tcmsg/cls_basic.py000066400000000000000000000204361455030217500232250ustar00rootroot00000000000000''' basic +++++ Basic filter has multiple types supports. Examples with ipset matches:: # Prepare a simple match on an ipset at index 0 src # (the first ipset name that appears when running `ipset list`) match = [{"kind": "ipset", "index": 0, "mode": "src"}] ip.tc("add-filter", "basic", ifb0, parent=0x10000, classid=0x10010, match=match) # The same match but inverted, simply add inverse flag match = [{"kind": "ipset", "index": 0, "mode": "src", "inverse": True}] ip.tc("add-filter", "basic", ifb0, parent=0x10000, classid=0x10010, match=match) # Still one ipset but with multiple dimensions: # comma separated list of modes match = [{"kind": "ipset", "index": 0, "mode": "src,dst"}] ip.tc("add-filter", "basic", ifb0, parent=0x10000, classid=0x10010, match=match) # Now let's add multiple expressions (ipset 0 src and ipset 1 src) match = [{"kind": "ipset", "index": 0, "mode": "src", "relation": "and"}, {"kind": "ipset", "index": 1, "mode": "src"}] ip.tc("add-filter", "basic", ifb0, parent=0x10000, classid=0x10010, match=match) # The same works with OR (ipset 0 src or ipset 1 src) match = [{"kind": "ipset", "index": 0, "mode": "src", "relation": "OR"}, {"kind": "ipset", "index": 1, "mode": "src"}] ip.tc("add-filter", "basic", ifb0, parent=0x10000, classid=0x10010, match=match) Examples with cmp matches:: # Repeating the example given in the man page match = [{"kind": "cmp", "layer": 2, "opnd": "gt", "align": "u16", "offset": 3, "mask": 0xff00, "value": 20}] ip.tc("add-filter", "basic", ifb0, parent=0x10000, classid=0x10010, match=match) # Now, the same example but with variations # - use layer name instead of enum # - use operand sign instead of name match = [{"kind": "cmp", "layer": "transport", "opnd": ">","align": "u16", "offset": 3, "mask": 0xff00, "value": 20}] ip.tc("add-filter", "basic", ifb0, parent=0x10000, classid=0x10010, match=match) # Again, the same example with all possible keywords even if they are # ignored match = [{"kind": "cmp", "layer": "tcp", "opnd": ">", "align": "u16", "offset": 3, "mask": 0xff00, "value": 20, "trans": False}] ip.tc("add-filter", "basic", ifb0, parent=0x10000, classid=0x10010, match=match) # Another example, we want to work at the link layer # and filter incoming packets matching hwaddr 00:DE:AD:C0:DE:00 # OSI model tells us that the source hwaddr is at offset 0 of # the link layer. # Size of hwaddr is 6-bytes in length, so I use an u32 then an u16 # to do the complete match match = [{"kind": "cmp", "layer": "link", "opnd": "eq", "align": "u32", "offset": 0, "mask": 0xffffffff, "value": 0x00DEADC0, "relation": "and"}, {"kind": "cmp", "layer": "link", "opnd": "eq", "align": "u16", "offset": 4, "mask": 0xffff, "value": 0xDE00}] ip.tc("add-filter", "basic", ifb0, parent=0x10000, classid=0x10010, match=match) # As the man page says, here are the different key-value pairs you can use: # "layer": "link" or "eth" or 0 # "layer": "network" or "ip" or 1 # "layer": "transport" or "tcp" or 2 # "opnd": "eq" or "=" or 0 # "opnd": "gt" or ">" or 1 # "opnd": "lt" or "<" or 2 # "align": "u8" or "u16" or "u32" # "trans": True or False # "offset", "mask" and "value": any integer Examples with meta matches:: # Repeating the example given in the man page match = [{"kind": "meta", "object":{"kind": "nfmark", "opnd": "gt"}, "value": 24, "relation": "and"}, {"kind": "meta", "object":{"kind": "tcindex", "opnd": "eq"}, "value": 0xf0, "mask": 0xf0}] ip.tc("add-filter", "basic", ifb0, parent=0x10000, classid=0x10010, match=match) # Now, the same example but with variations # - use operand sign instead of name match = [{"kind": "meta", "object":{"kind": "nfmark", "opnd": ">"}, "value": 24, "relation": "and"}, {"kind": "meta", "object":{"kind": "tcindex", "opnd": "="}, "value": 0xf0, "mask": 0xf0}] ip.tc("add-filter", "basic", ifb0, parent=0x10000, classid=0x10010, match=match) # Another example given by the tc helper # meta(indev shift 1 eq "ppp") match = [{"kind": "meta", "object":{"kind": "dev", "opnd": "eq", "shift": 1}, "value": "ppp"}] ip.tc("add-filter", "basic", ifb0, parent=0x10000, classid=0x10010, match=match) # Another example, drop every packets arriving on ifb0 match = [{"kind": "meta", "object":{"kind": "dev", "opnd": "eq"}, "value": "ifb0"}] ip.tc("add-filter", "basic", ifb0, parent=0x10000, classid=0x10010, match=match, action="drop") # As the man page says, here are the different key-value pairs you can use: # "opnd": "eq" or "=" or 0 # "opnd": "gt" or ">" or 1 # "opnd": "lt" or "<" or 2 # "shift": any integer between 0 and 255 included # "kind" object: see `tc filter add dev iface basic match 'meta(list)'` result # "value": any string if kind matches 'dev' or 'sk_bound_if', # any integer otherwise NOTES: When not specified, `inverse` flag is set to False. Do not specify `relation` keyword on the last expression or if there is only one expression. `relation` can be written using multiple format: "and", "AND", "&&", "or", "OR", "||" You can combine multiple different types of ematch. Here is an example:: match = [{"kind": "cmp", "layer": 2, "opnd": "eq", "align": "u32", "offset": 0, "value": 32, "relation": "&&"}, {"kind": "meta", "object":{"kind": "vlan_tag", "opnd": "eq"}, "value": 100, "relation": "||"}, {"kind": "ipset", "index": 0, "mode": "src", "inverse": True} ] ''' import struct from socket import htons from pyroute2 import protocols from pyroute2.netlink import nla from pyroute2.netlink.rtnl.tcmsg.common_act import get_tca_action, tca_act_prio from pyroute2.netlink.rtnl.tcmsg.common_ematch import ( get_tcf_ematches, nla_plus_tcf_ematch_opt, ) def fix_msg(msg, kwarg): msg['info'] = htons( kwarg.get('protocol', protocols.ETH_P_ALL) & 0xFFFF ) | ((kwarg.get('prio', 0) << 16) & 0xFFFF0000) def get_parameters(kwarg): ret = {'attrs': []} attrs_map = (('classid', 'TCA_BASIC_CLASSID'),) if kwarg.get('match'): ret['attrs'].append(['TCA_BASIC_EMATCHES', get_tcf_ematches(kwarg)]) if kwarg.get('action'): ret['attrs'].append(['TCA_BASIC_ACT', get_tca_action(kwarg)]) for k, v in attrs_map: r = kwarg.get(k, None) if r is not None: ret['attrs'].append([v, r]) return ret class options(nla): nla_map = ( ('TCA_BASIC_UNSPEC', 'none'), ('TCA_BASIC_CLASSID', 'uint32'), ('TCA_BASIC_EMATCHES', 'parse_basic_ematch_tree'), ('TCA_BASIC_ACT', 'tca_act_prio'), ('TCA_BASIC_POLICE', 'hex'), ) class parse_basic_ematch_tree(nla): nla_map = ( ('TCA_EMATCH_TREE_UNSPEC', 'none'), ('TCA_EMATCH_TREE_HDR', 'tcf_parse_header'), ('TCA_EMATCH_TREE_LIST', '*tcf_parse_list'), ) class tcf_parse_header(nla): fields = (('nmatches', 'H'), ('progid', 'H')) class tcf_parse_list(nla, nla_plus_tcf_ematch_opt): fields = ( ('matchid', 'H'), ('kind', 'H'), ('flags', 'H'), ('pad', 'H'), ('opt', 's'), ) def decode(self): nla.decode(self) size = 0 for field in self.fields + self.header: if 'opt' in field: # Ignore this field as it a hack used to brain encoder continue size += struct.calcsize(field[1]) start = self.offset + size end = self.offset + self.length data = self.data[start:end] self['opt'] = self.parse_ematch_options(self, data) tca_act_prio = tca_act_prio pyroute2-0.7.11/pyroute2/netlink/rtnl/tcmsg/cls_flow.py000066400000000000000000000110051455030217500231030ustar00rootroot00000000000000''' flow ++++ Flow filter supports two types of modes:: - map - hash # Prepare a Qdisc with fq-codel ip.tc("add", "fq_codel", ifb0, parent=0x10001, handle=0x10010) # Create flow filter with hash mode # Single: keys = "src" # Multi (comma separated list of keys): keys = "src,nfct-src" ip.tc("add-filter", "flow", ifb0, mode="hash", keys=keys, divisor=1024, perturb=60, handle=0x10, baseclass=0x10010, parent=0x10001) # Create flow filter with map mode # Simple map dst with no OP: ip.tc("add-filter", "flow", ifb0, mode="map", key="dst", divisor=1024, handle=10 baseclass=0x10010) # Same filter with xor OP: ops = [{"op": "xor", "num": 0xFF}] ip.tc("add-filter", "flow", ifb0, mode="map", key="dst", divisor=1024, handle=10 baseclass=0x10010, ops=ops) # Complex one with addend OP (incl. minus support): ops = [{"op": "addend", "num": '-192.168.0.0'}] ip.tc("add-filter", "flow", ifb0, mode="map", key="dst", divisor=1024, handle=10 baseclass=0x10010, ops=ops) # Example with multiple OPS: ops = [{"op": "and", "num": 0xFF}, {"op": "rshift", "num": 4}] ip.tc("add-filter", "flow", ifb0, mode="map", key="dst", divisor=1024, handle=10 baseclass=0x10010, ops=ops) NOTES: When using `map` mode, use the keyword `key` to pass a key. When using `hash` mode, use the keyword `keys` to pass a key even if there is only one key. In `map` mode, the `num` parameter in `OPS` is always an integer unless if you use the OP `addend`, which can be a string IPv4 address. You can also add a minus sign at the beginning of the `num` value even if it is an IPv4 address. ''' from socket import htons from pyroute2 import protocols from pyroute2.netlink import nla from pyroute2.netlink.rtnl.tcmsg.common import ( get_tca_keys, get_tca_mode, get_tca_ops, tc_flow_keys, tc_flow_modes, ) from pyroute2.netlink.rtnl.tcmsg.common_act import get_tca_action, tca_act_prio def fix_msg(msg, kwarg): msg['info'] = htons( kwarg.get('protocol', protocols.ETH_P_ALL) & 0xFFFF ) | ((kwarg.get('prio', 0) << 16) & 0xFFFF0000) def get_parameters(kwarg): ret = {'attrs': []} attrs_map = ( ('baseclass', 'TCA_FLOW_BASECLASS'), ('divisor', 'TCA_FLOW_DIVISOR'), ('perturb', 'TCA_FLOW_PERTURB'), ) if kwarg.get('mode'): ret['attrs'].append(['TCA_FLOW_MODE', get_tca_mode(kwarg)]) if kwarg.get('mode') == 'hash': ret['attrs'].append(['TCA_FLOW_KEYS', get_tca_keys(kwarg, 'keys')]) if kwarg.get('mode') == 'map': ret['attrs'].append(['TCA_FLOW_KEYS', get_tca_keys(kwarg, 'key')]) # Check for OPS presence if 'ops' in kwarg: get_tca_ops(kwarg, ret['attrs']) if kwarg.get('action'): ret['attrs'].append(['TCA_FLOW_ACT', get_tca_action(kwarg)]) for k, v in attrs_map: r = kwarg.get(k, None) if r is not None: ret['attrs'].append([v, r]) return ret class options(nla): nla_map = ( ('TCA_FLOW_UNSPEC', 'none'), ('TCA_FLOW_KEYS', 'tca_parse_keys'), ('TCA_FLOW_MODE', 'tca_parse_mode'), ('TCA_FLOW_BASECLASS', 'uint32'), ('TCA_FLOW_RSHIFT', 'uint32'), ('TCA_FLOW_ADDEND', 'uint32'), ('TCA_FLOW_MASK', 'uint32'), ('TCA_FLOW_XOR', 'uint32'), ('TCA_FLOW_DIVISOR', 'uint32'), ('TCA_FLOW_ACT', 'tca_act_prio'), ('TCA_FLOW_POLICE', 'hex'), ('TCA_FLOW_EMATCHES', 'hex'), ('TCA_FLOW_PERTURB', 'uint32'), ) class tca_parse_mode(nla): fields = (('flow_mode', 'I'),) def decode(self): nla.decode(self) for key, value in tc_flow_modes.items(): if self['flow_mode'] == value: self['flow_mode'] = key break def encode(self): self['flow_mode'] = self['value'] nla.encode(self) class tca_parse_keys(nla): fields = (('flow_keys', 'I'),) def decode(self): nla.decode(self) keys = '' for key, value in tc_flow_keys.items(): if value & self['flow_keys']: keys = '{0},{1}'.format(keys, key) self['flow_keys'] = keys.strip(',') def encode(self): self['flow_keys'] = self['value'] nla.encode(self) tca_act_prio = tca_act_prio pyroute2-0.7.11/pyroute2/netlink/rtnl/tcmsg/cls_fw.py000066400000000000000000000026231455030217500225560ustar00rootroot00000000000000from socket import htons from pyroute2 import protocols from pyroute2.netlink import nla from pyroute2.netlink.rtnl.tcmsg.act_police import ( get_parameters as ap_parameters, ) from pyroute2.netlink.rtnl.tcmsg.act_police import nla_plus_police from pyroute2.netlink.rtnl.tcmsg.common_act import get_tca_action, tca_act_prio def fix_msg(msg, kwarg): msg['info'] = htons( kwarg.get('protocol', protocols.ETH_P_ALL) & 0xFFFF ) | ((kwarg.get('prio', 0) << 16) & 0xFFFF0000) def get_parameters(kwarg): ret = {'attrs': []} attrs_map = ( ('classid', 'TCA_FW_CLASSID'), # ('police', 'TCA_FW_POLICE'), # Handled in ap_parameters ('indev', 'TCA_FW_INDEV'), ('mask', 'TCA_FW_MASK'), ) if kwarg.get('rate'): ret['attrs'].append(['TCA_FW_POLICE', ap_parameters(kwarg)]) if kwarg.get('action'): ret['attrs'].append(['TCA_FW_ACT', get_tca_action(kwarg)]) for k, v in attrs_map: r = kwarg.get(k, None) if r is not None: ret['attrs'].append([v, r]) return ret class options(nla, nla_plus_police): nla_map = ( ('TCA_FW_UNSPEC', 'none'), ('TCA_FW_CLASSID', 'uint32'), ('TCA_FW_POLICE', 'police'), # TODO string? ('TCA_FW_INDEV', 'hex'), # TODO string ('TCA_FW_ACT', 'tca_act_prio'), ('TCA_FW_MASK', 'uint32'), ) tca_act_prio = tca_act_prio pyroute2-0.7.11/pyroute2/netlink/rtnl/tcmsg/cls_matchall.py000066400000000000000000000017321455030217500237270ustar00rootroot00000000000000from socket import htons from pyroute2 import protocols from pyroute2.netlink import nla from pyroute2.netlink.rtnl.tcmsg.common_act import get_tca_action, tca_act_prio def fix_msg(msg, kwarg): msg['info'] = htons( kwarg.get('protocol', protocols.ETH_P_ALL) & 0xFFFF ) | ((kwarg.get('prio', 0) << 16) & 0xFFFF0000) def get_parameters(kwarg): ret = {'attrs': []} attrs_map = ( ('classid', 'TCA_MATCHALL_CLASSID'), ('flags', 'TCA_MATCHALL_FLAGS'), ) if kwarg.get('action'): ret['attrs'].append(['TCA_MATCHALL_ACT', get_tca_action(kwarg)]) for k, v in attrs_map: r = kwarg.get(k, None) if r is not None: ret['attrs'].append([v, r]) return ret class options(nla): nla_map = ( ('TCA_MATCHALL_UNSPEC', 'none'), ('TCA_MATCHALL_CLASSID', 'be32'), ('TCA_MATCHALL_ACT', 'tca_act_prio'), ('TCA_MATCHALL_FLAGS', 'be32'), ) tca_act_prio = tca_act_prio pyroute2-0.7.11/pyroute2/netlink/rtnl/tcmsg/cls_u32.py000066400000000000000000000175411455030217500225600ustar00rootroot00000000000000''' u32 +++ Filters can take an `action` argument, which affects the packet behavior when the filter matches. Currently the gact, bpf, and police action types are supported, and can be attached to the u32 and bpf filter types:: # An action can be a simple string, which translates to a gact type action = "drop" # Or it can be an explicit type (these are equivalent) action = dict(kind="gact", action="drop") # There can also be a chain of actions, which depend on the return # value of the previous action. action = [ dict(kind="bpf", fd=fd, name=name, action="ok"), dict(kind="police", rate="10kbit", burst=10240, limit=0), dict(kind="gact", action="ok"), ] # Add the action to a u32 match-all filter ip.tc("add", "htb", eth0, 0x10000, default=0x200000) ip.tc("add-filter", "u32", eth0, parent=0x10000, prio=10, protocol=protocols.ETH_P_ALL, target=0x10020, keys=["0x0/0x0+0"], action=action) # Add two more filters: One to send packets with a src address of # 192.168.0.1/32 into 1:10 and the second to send packets with a # dst address of 192.168.0.0/24 into 1:20 ip.tc("add-filter", "u32", eth0, parent=0x10000, prio=10, protocol=protocols.ETH_P_IP, target=0x10010, keys=["0xc0a80001/0xffffffff+12"]) # 0xc0a800010 = 192.168.0.1 # 0xffffffff = 255.255.255.255 (/32) # 12 = Source network field bit offset ip.tc("add-filter", "u32", eth0, parent=0x10000, prio=10, protocol=protocols.ETH_P_IP, target=0x10020, keys=["0xc0a80000/0xffffff00+16"]) # 0xc0a80000 = 192.168.0.0 # 0xffffff00 = 255.255.255.0 (/24) # 16 = Destination network field bit offset ''' import struct from socket import htons from pyroute2.netlink import nla, nlmsg from pyroute2.netlink.rtnl.tcmsg.act_police import ( get_parameters as ap_parameters, ) from pyroute2.netlink.rtnl.tcmsg.act_police import nla_plus_police from pyroute2.netlink.rtnl.tcmsg.common_act import get_tca_action, tca_act_prio def fix_msg(msg, kwarg): msg['info'] = htons(kwarg.get('protocol', 0) & 0xFFFF) | ( (kwarg.get('prio', 0) << 16) & 0xFFFF0000 ) def get_parameters(kwarg): ret = {'attrs': []} if kwarg.get('rate'): ret['attrs'].append(['TCA_U32_POLICE', ap_parameters(kwarg)]) elif kwarg.get('action'): ret['attrs'].append(['TCA_U32_ACT', get_tca_action(kwarg)]) ret['attrs'].append(['TCA_U32_CLASSID', kwarg['target']]) ret['attrs'].append(['TCA_U32_SEL', {'keys': kwarg['keys']}]) return ret class options(nla, nla_plus_police): nla_map = ( ('TCA_U32_UNSPEC', 'none'), ('TCA_U32_CLASSID', 'uint32'), ('TCA_U32_HASH', 'uint32'), ('TCA_U32_LINK', 'hex'), ('TCA_U32_DIVISOR', 'uint32'), ('TCA_U32_SEL', 'u32_sel'), ('TCA_U32_POLICE', 'police'), ('TCA_U32_ACT', 'tca_act_prio'), ('TCA_U32_INDEV', 'hex'), ('TCA_U32_PCNT', 'u32_pcnt'), ('TCA_U32_MARK', 'u32_mark'), ) tca_act_prio = tca_act_prio class u32_sel(nla): fields = ( ('flags', 'B'), ('offshift', 'B'), ('nkeys', 'B'), ('__align', 'x'), ('offmask', '>H'), ('off', 'H'), ('offoff', 'h'), ('hoff', 'h'), ('hmask', '>I'), ) class u32_key(nlmsg): header = None fields = ( ('key_mask', '>I'), ('key_val', '>I'), ('key_off', 'i'), ('key_offmask', 'i'), ) def encode(self): ''' Key sample:: 'keys': ['0x0006/0x00ff+8', '0x0000/0xffc0+2', '0x5/0xf+0', '0x10/0xff+33'] => 00060000/00ff0000 + 8 05000000/0f00ffc0 + 0 00100000/00ff0000 + 32 ''' def cut_field(key, separator): ''' split a field from the end of the string ''' field = '0' pos = key.find(separator) new_key = key if pos > 0: field = key[pos + 1 :] new_key = key[:pos] return (new_key, field) # 'header' array to pack keys to header = [(0, 0) for i in range(256)] keys = [] # iterate keys and pack them to the 'header' for key in self['keys']: # TODO tags: filter (key, nh) = cut_field(key, '@') # FIXME: do not ignore nh (key, offset) = cut_field(key, '+') offset = int(offset, 0) # a little trick: if you provide /00ff+8, that # really means /ff+9, so we should take it into # account (key, mask) = cut_field(key, '/') if mask[:2] == '0x': mask = mask[2:] while True: if mask[:2] == '00': offset += 1 mask = mask[2:] else: break mask = '0x' + mask mask = int(mask, 0) value = int(key, 0) bits = 24 if mask == 0 and value == 0: key = self.u32_key(data=self.data) key['key_off'] = offset key['key_mask'] = mask key['key_val'] = value keys.append(key) for bmask in struct.unpack('4B', struct.pack('>I', mask)): if bmask > 0: bvalue = (value & (bmask << bits)) >> bits header[offset] = (bvalue, bmask) offset += 1 bits -= 8 # recalculate keys from 'header' key = None value = 0 mask = 0 for offset in range(256): (bvalue, bmask) = header[offset] if bmask > 0 and key is None: key = self.u32_key(data=self.data) key['key_off'] = offset key['key_mask'] = 0 key['key_val'] = 0 bits = 24 if key is not None and bits >= 0: key['key_mask'] |= bmask << bits key['key_val'] |= bvalue << bits bits -= 8 if bits < 0 or offset == 255: keys.append(key) key = None if not keys: raise ValueError('no keys specified') self['nkeys'] = len(keys) # FIXME: do not hardcode flags :) self['flags'] = 1 nla.encode(self) offset = self.offset + 20 # 4 bytes header + 16 bytes fields for key in keys: key.offset = offset key.encode() offset += 16 # keys haven't header self.length = offset - self.offset struct.pack_into('H', self.data, self.offset, offset - self.offset) def decode(self): nla.decode(self) offset = self.offset + 16 self['keys'] = [] nkeys = self['nkeys'] while nkeys: key = self.u32_key(data=self.data, offset=offset) key.decode() offset += 16 self['keys'].append(key) nkeys -= 1 class u32_mark(nla): fields = (('val', 'I'), ('mask', 'I'), ('success', 'I')) class u32_pcnt(nla): fields = (('rcnt', 'Q'), ('rhit', 'Q'), ('kcnts', 'Q')) pyroute2-0.7.11/pyroute2/netlink/rtnl/tcmsg/common.py000066400000000000000000000247441455030217500226010ustar00rootroot00000000000000import logging import os import re import struct from math import log as logfm from socket import inet_aton from pyroute2 import config from pyroute2.common import ( basestring, rate_suffixes, size_suffixes, time_suffixes, ) from pyroute2.netlink import nla, nla_string log = logging.getLogger(__name__) LINKLAYER_UNSPEC = 0 LINKLAYER_ETHERNET = 1 LINKLAYER_ATM = 2 ATM_CELL_SIZE = 53 ATM_CELL_PAYLOAD = 48 TCA_ACT_MAX_PRIO = 32 TIME_UNITS_PER_SEC = 1000000 try: with open('/proc/net/psched', 'r') as psched: [t2us, us2t, clock_res, wee] = [ int(i, 16) for i in psched.read().split() ] clock_factor = float(clock_res) / TIME_UNITS_PER_SEC tick_in_usec = float(t2us) / us2t * clock_factor except IOError as e: if config.uname[0] == 'Linux': log.warning("tcmsg: %s", e) log.warning("the tc subsystem functionality is limited") clock_res = 0 clock_factor = 1 tick_in_usec = 1 wee = 1000 _first_letter = re.compile('[^0-9]+') def get_hz(): if clock_res == 1000000: return wee else: return os.environ.get('HZ', 1000) def get_by_suffix(value, default, func): if not isinstance(value, basestring): return value pos = _first_letter.search(value) if pos is None: suffix = default else: pos = pos.start() value, suffix = value[:pos], value[pos:] value = int(value) return func(value, suffix) def get_size(size): return get_by_suffix(size, 'b', lambda x, y: x * size_suffixes[y]) def get_time(lat): return get_by_suffix( lat, 'ms', lambda x, y: (x * TIME_UNITS_PER_SEC) / time_suffixes[y] ) def get_rate(rate): return get_by_suffix(rate, 'bit', lambda x, y: (x * rate_suffixes[y]) / 8) def time2tick(time): # The code is ported from tc utility return int(time) * tick_in_usec def calc_xmittime(rate, size): # The code is ported from tc utility return int(time2tick(TIME_UNITS_PER_SEC * (float(size) / rate))) def percent2u32(pct): '''xlate a percentage to an uint32 value 0% -> 0 100% -> 2**32 - 1''' return int((2**32 - 1) * pct / 100) def red_eval_ewma(qmin, burst, avpkt): # The code is ported from tc utility wlog = 1 W = 0.5 a = float(burst) + 1 - float(qmin) / avpkt if a < 1: raise ValueError(f'wrong a = {a}') while wlog < 32: wlog += 1 W /= 2 if a <= (1 - pow(1 - W, burst)) / W: return wlog return -1 def red_eval_P(qmin, qmax, probability): # The code is ported from tc utility i = qmax - qmin if i <= 0: raise ValueError(f'qmax - qmin must be > 0 (got {i})') probability /= i for i in range(32): if probability > 1: break probability *= 2 return i def red_eval_idle_damping(Wlog, avpkt, bps): # The code is ported from tc utility xmit_time = calc_xmittime(bps, avpkt) lW = -logfm(1.0 - 1.0 / (1 << Wlog)) / xmit_time maxtime = 31.0 / lW sbuf = [] for clog in range(32): if maxtime / (1 << clog) < 512: break if clog >= 32: return -1, sbuf for i in range(255): sbuf.append((i << clog) * lW) if sbuf[i] > 31: sbuf[i] = 31 sbuf.append(31) return clog, sbuf def get_rate_parameters(kwarg): # rate and burst are required rate = get_rate(kwarg['rate']) burst = kwarg['burst'] # if peak, mtu is required peak = get_rate(kwarg.get('peak', 0)) mtu = kwarg.get('mtu', 0) if peak: assert mtu # limit OR latency is required limit = kwarg.get('limit', None) latency = get_time(kwarg.get('latency', None)) assert limit is not None or latency is not None # calculate limit from latency if limit is None: rate_limit = rate * float(latency) / TIME_UNITS_PER_SEC + burst if peak: peak_limit = peak * float(latency) / TIME_UNITS_PER_SEC + mtu if rate_limit > peak_limit: rate_limit = peak_limit limit = rate_limit return { 'rate': int(rate), 'mtu': mtu, 'buffer': calc_xmittime(rate, burst), 'limit': int(limit), } tc_flow_keys = { 'src': 0x01, 'dst': 0x02, 'proto': 0x04, 'proto-src': 0x08, 'proto-dst': 0x10, 'iif': 0x20, 'priority': 0x40, 'mark': 0x80, 'nfct': 0x0100, 'nfct-src': 0x0200, 'nfct-dst': 0x0400, 'nfct-proto-src': 0x0800, 'nfct-proto-dst': 0x1000, 'rt-classid': 0x2000, 'sk-uid': 0x4000, 'sk-gid': 0x8000, 'vlan-tag': 0x010000, 'rxhash': 0x020000, } def get_tca_keys(kwarg, name): if name not in kwarg: raise ValueError('Missing attribute: {0}'.format(name)) res = 0 keys = kwarg[name] if name == 'hash': keys = keys.split(',') for key, value in tc_flow_keys.items(): if key in keys: res |= value return res tc_flow_modes = {'map': 0, 'hash': 1} def get_tca_mode(kwarg): if 'mode' not in kwarg: raise ValueError('Missing attribute: mode') for key, value in tc_flow_modes.items(): if key == kwarg['mode']: return value raise ValueError('Unknown flow mode {0}'.format(kwarg['mode'])) def get_tca_ops(kwarg, attrs): xor_value = 0 mask_value = 0 addend_value = 0 rshift_value = 0 for elem in kwarg['ops']: op = elem['op'] num = elem['num'] if op == 'and': mask_value = num attrs.append(['TCA_FLOW_XOR', xor_value]) attrs.append(['TCA_FLOW_MASK', mask_value]) elif op == 'or': if mask_value == 0: mask_value = (~num + 1) & 0xFFFFFFFF xor_value = num attrs.append(['TCA_FLOW_XOR', xor_value]) attrs.append(['TCA_FLOW_MASK', mask_value]) elif op == 'xor': if mask_value == 0: mask_value = 0xFFFFFFFF xor_value = num attrs.append(['TCA_FLOW_XOR', xor_value]) attrs.append(['TCA_FLOW_MASK', mask_value]) elif op == 'rshift': rshift_value = num attrs.append(['TCA_FLOW_RSHIFT', rshift_value]) elif op == 'addend': # Check if an IP was specified if type(num) == str and len(num.split('.')) == 4: if num.startswith('-'): inverse = True else: inverse = False ip = num.strip('-') # Convert IP to uint32 ip = inet_aton(ip) ip = struct.unpack('>I', ip)[0] if inverse: ip = (~ip + 1) & 0xFFFFFFFF addend_value = ip else: addend_value = num attrs.append(['TCA_FLOW_ADDEND', addend_value]) tc_actions = { 'unspec': -1, # TC_ACT_UNSPEC 'ok': 0, # TC_ACT_OK 'reclassify': 1, # TC_ACT_RECLASSIFY 'shot': 2, # TC_ACT_SHOT 'drop': 2, # TC_ACT_SHOT 'pipe': 3, # TC_ACT_PIPE 'stolen': 4, # TC_ACT_STOLEN 'queued': 5, # TC_ACT_QUEUED 'repeat': 6, # TC_ACT_REPEAT 'redirect': 7, # TC_ACT_REDIRECT } class nla_plus_rtab(nla): class parms(nla): def adjust_size(self, size, mpu, linklayer): # The current code is ported from tc utility if size < mpu: size = mpu if linklayer == LINKLAYER_ATM: cells = size / ATM_CELL_PAYLOAD if size % ATM_CELL_PAYLOAD > 0: cells += 1 size = cells * ATM_CELL_SIZE return size def calc_rtab(self, kind): # The current code is ported from tc utility rtab = [] mtu = self.get('mtu', 0) or 1600 cell_log = self['%s_cell_log' % (kind)] mpu = self['%s_mpu' % (kind)] rate = self.get(kind, 'rate') # calculate cell_log if cell_log == 0: while (mtu >> cell_log) > 255: cell_log += 1 # fill up the table for i in range(256): size = self.adjust_size( (i + 1) << cell_log, mpu, LINKLAYER_ETHERNET ) rtab.append(calc_xmittime(rate, size)) self['%s_cell_align' % (kind)] = -1 self['%s_cell_log' % (kind)] = cell_log return rtab def encode(self): self.rtab = None self.ptab = None if self.get('rate', False): self.rtab = self.calc_rtab('rate') if self.get('peak', False): self.ptab = self.calc_rtab('peak') if self.get('ceil', False): self.ctab = self.calc_rtab('ceil') nla.encode(self) class rtab(nla_string): own_parent = True def encode(self): parms = ( self.parent.get_encoded('TCA_TBF_PARMS') or self.parent.get_encoded('TCA_HTB_PARMS') or self.parent.get_encoded('TCA_POLICE_TBF') ) if parms is not None: self.value = getattr(parms, self.__class__.__name__) self['value'] = struct.pack( 'I' * 256, *(int(x) for x in self.value) ) nla_string.encode(self) def decode(self): nla_string.decode(self) parms = ( self.parent.get_attr('TCA_TBF_PARMS') or self.parent.get_attr('TCA_HTB_PARMS') or self.parent.get_attr('TCA_POLICE_TBF') ) if parms is not None: rtab = struct.unpack( 'I' * (len(self['value']) / 4), self['value'] ) self.value = rtab setattr(parms, self.__class__.__name__, rtab) class ptab(rtab): pass class ctab(rtab): pass class stats2(nla): nla_map = ( ('TCA_STATS_UNSPEC', 'none'), ('TCA_STATS_BASIC', 'basic'), ('TCA_STATS_RATE_EST', 'rate_est'), ('TCA_STATS_QUEUE', 'queue'), ('TCA_STATS_APP', 'stats_app'), ) class basic(nla): fields = (('bytes', 'Q'), ('packets', 'I')) class rate_est(nla): fields = (('bps', 'I'), ('pps', 'I')) class queue(nla): fields = ( ('qlen', 'I'), ('backlog', 'I'), ('drops', 'I'), ('requeues', 'I'), ('overlimits', 'I'), ) class stats_app(nla.hex): pass pyroute2-0.7.11/pyroute2/netlink/rtnl/tcmsg/common_act.py000066400000000000000000000040171455030217500234170ustar00rootroot00000000000000from pyroute2.netlink import nla from pyroute2.netlink.rtnl.tcmsg import ( act_bpf, act_connmark, act_gact, act_mirred, act_police, act_skbedit, act_vlan, ) from pyroute2.netlink.rtnl.tcmsg.common import TCA_ACT_MAX_PRIO, stats2 plugins = { 'gact': act_gact, 'bpf': act_bpf, 'police': act_police, 'mirred': act_mirred, 'connmark': act_connmark, 'vlan': act_vlan, 'skbedit': act_skbedit, } class nla_plus_tca_act_opt(object): @staticmethod def get_act_options(self, *argv, **kwarg): kind = self.get_attr('TCA_ACT_KIND') if kind in plugins: return plugins[kind].options return self.hex class tca_act_prio(nla): nla_map = tuple( [('TCA_ACT_PRIO_%i' % x, 'tca_act') for x in range(TCA_ACT_MAX_PRIO)] ) class tca_act(nla, nla_plus_tca_act_opt): nla_map = ( ('TCA_ACT_UNSPEC', 'none'), ('TCA_ACT_KIND', 'asciiz'), ('TCA_ACT_OPTIONS', 'get_act_options'), ('TCA_ACT_INDEX', 'hex'), ('TCA_ACT_STATS', 'stats2'), ) stats2 = stats2 def get_act_parms(kwarg): if 'kind' not in kwarg: raise Exception('action requires "kind" parameter') if kwarg['kind'] in plugins: return plugins[kwarg['kind']].get_parameters(kwarg) else: return [] # All filters can use any act type, this is a generic parser for all def get_tca_action(kwarg): ret = {'attrs': []} act = kwarg.get('action', 'drop') # convert simple action='..' to kwarg style if isinstance(act, str): act = {'kind': 'gact', 'action': act} # convert single dict action to first entry in a list of actions acts = act if isinstance(act, list) else [act] for i, act in enumerate(acts, start=1): opt = { 'attrs': [ ['TCA_ACT_KIND', act['kind']], ['TCA_ACT_OPTIONS', get_act_parms(act)], ] } ret['attrs'].append(['TCA_ACT_PRIO_%d' % i, opt]) return ret pyroute2-0.7.11/pyroute2/netlink/rtnl/tcmsg/common_ematch.py000066400000000000000000000061141455030217500241110ustar00rootroot00000000000000from pyroute2.netlink.rtnl.tcmsg import em_cmp, em_ipset, em_meta plugins = { # 0: em_container, 1: em_cmp, # 2: em_nbyte, # 3: em_u32, 4: em_meta, # 5: em_text, # 6: em_vlan, # 7: em_canid, 8: em_ipset, # 9: em_ipt, } plugins_translate = { 'container': 0, 'cmp': 1, 'nbyte': 2, 'u32': 3, 'meta': 4, 'text': 5, 'vlan': 6, 'canid': 7, 'ipset': 8, 'ipt': 9, } TCF_EM_REL_END = 0 TCF_EM_REL_AND = 1 TCF_EM_REL_OR = 2 TCF_EM_INVERSE_MASK = 4 RELATIONS_DICT = { 'and': TCF_EM_REL_AND, 'AND': TCF_EM_REL_AND, '&&': TCF_EM_REL_AND, 'or': TCF_EM_REL_OR, 'OR': TCF_EM_REL_OR, '||': TCF_EM_REL_OR, } class nla_plus_tcf_ematch_opt(object): @staticmethod def parse_ematch_options(self, *argv, **kwarg): if 'kind' not in self: raise ValueError('ematch requires "kind" parameter') kind = self['kind'] if kind in plugins: ret = plugins[kind].data(data=argv[0]) ret.decode() return ret return self.hex def get_ematch_parms(kwarg): if 'kind' not in kwarg: raise ValueError('ematch requires "kind" parameter') if kwarg['kind'] in plugins: return plugins[kwarg['kind']].get_parameters(kwarg) else: return [] def get_tcf_ematches(kwarg): ret = {'attrs': []} matches = [] header = {'nmatches': 0, 'progid': 0} # Get the number of expressions expr_count = len(kwarg['match']) header['nmatches'] = expr_count # Load plugin and transfer data for i in range(0, expr_count): match = {'matchid': 0, 'kind': None, 'flags': 0, 'pad': 0, 'opt': None} cur_match = kwarg['match'][i] # Translate string kind into numeric kind kind = plugins_translate[cur_match['kind']] match['kind'] = kind data = plugins[kind].data() data.setvalue(cur_match) data.encode() # Add ematch encoded data match['opt'] = data.data # Safety check if i == expr_count - 1 and 'relation' in cur_match: raise ValueError('Could not set a relation to the last expression') if i < expr_count - 1 and 'relation' not in cur_match: raise ValueError( 'You must specify a relation for every expression' ' except the last one' ) # Set relation to flags if 'relation' in cur_match: relation = cur_match['relation'] if relation in RELATIONS_DICT: match['flags'] |= RELATIONS_DICT.get(relation) else: raise ValueError('Unknown relation {0}'.format(relation)) else: match['flags'] = TCF_EM_REL_END # Handle inverse flag if 'inverse' in cur_match: if cur_match['inverse']: match['flags'] |= TCF_EM_INVERSE_MASK # Append new match to list of matches matches.append(match) ret['attrs'].append(['TCA_EMATCH_TREE_HDR', header]) ret['attrs'].append(['TCA_EMATCH_TREE_LIST', matches]) return ret pyroute2-0.7.11/pyroute2/netlink/rtnl/tcmsg/em_cmp.py000066400000000000000000000053301455030217500225370ustar00rootroot00000000000000from pyroute2.netlink import nla TCF_EM_OPND_EQ = 0 TCF_EM_OPND_GT = 1 TCF_EM_OPND_LT = 2 OPERANDS_DICT = { TCF_EM_OPND_EQ: ('eq', '='), TCF_EM_OPND_GT: ('gt', '>'), TCF_EM_OPND_LT: ('lt', '<'), } # align types TCF_EM_ALIGN_U8 = 1 TCF_EM_ALIGN_U16 = 2 TCF_EM_ALIGN_U32 = 4 ALIGNS_DICT = { TCF_EM_ALIGN_U8: 'u8', TCF_EM_ALIGN_U16: 'u16', TCF_EM_ALIGN_U32: 'u32', } # layer types TCF_LAYER_LINK = 0 TCF_LAYER_NETWORK = 1 TCF_LAYER_TRANSPORT = 2 LAYERS_DICT = { TCF_LAYER_LINK: ('link', 'eth'), TCF_LAYER_NETWORK: ('network', 'ip'), TCF_LAYER_TRANSPORT: ('transport', 'tcp'), } # see tc_em_cmp.h TCF_EM_CMP_TRANS = 1 class data(nla): fields = ( ('val', 'I'), ('mask', 'I'), ('off', 'H'), ('align_flags', 'B'), ('layer_opnd', 'B'), ) def decode(self): self.header = None self.length = 24 nla.decode(self) self['align'] = self['align_flags'] & 0x0F self['flags'] = (self['align_flags'] & 0xF0) >> 4 self['layer'] = self['layer_opnd'] & 0x0F self['opnd'] = (self['layer_opnd'] & 0xF0) >> 4 del self['layer_opnd'] del self['align_flags'] # Perform translation for readability with nldecap self['layer'] = 'TCF_LAYER_{}'.format( LAYERS_DICT[self['layer']][0] ).upper() self['align'] = 'TCF_EM_ALIGN_{}'.format( ALIGNS_DICT[self['align']] ).upper() self['opnd'] = 'TCF_EM_OPND_{}'.format( OPERANDS_DICT[self['opnd']][0] ).upper() def encode(self): # Set default values self['layer_opnd'] = 0 self['align_flags'] = 0 # Build align_flags byte if 'trans' in self: self['align_flags'] = TCF_EM_CMP_TRANS << 4 for k, v in ALIGNS_DICT.items(): if self['align'].lower() == v: self['align_flags'] |= k break # Build layer_opnd byte if isinstance(self['opnd'], int): self['layer_opnd'] = self['opnd'] << 4 else: for k, v in OPERANDS_DICT.items(): if self['opnd'].lower() in v: self['layer_opnd'] = k << 4 break # Layer code if isinstance(self['layer'], int): self['layer_opnd'] |= self['layer'] else: for k, v in LAYERS_DICT.items(): if self['layer'].lower() in v: self['layer_opnd'] |= k break self['off'] = self.get('offset', 0) self['val'] = self.get('value', 0) nla.encode(self) # Patch NLA structure self['header']['length'] -= 4 self.data = self.data[4:] pyroute2-0.7.11/pyroute2/netlink/rtnl/tcmsg/em_ipset.py000066400000000000000000000033151455030217500231050ustar00rootroot00000000000000from pyroute2.netlink import nlmsg_base, nlmsg_encoder_generic # see em_ipset.c IPSET_DIM = { 'IPSET_DIM_ZERO': 0, 'IPSET_DIM_ONE': 1, 'IPSET_DIM_TWO': 2, 'IPSET_DIM_THREE': 3, 'IPSET_DIM_MAX': 6, } TCF_IPSET_MODE_DST = 0 TCF_IPSET_MODE_SRC = 2 def get_parameters(kwarg): ret = {'attrs': []} attrs_map = ( ('matchid', 'TCF_EM_MATCHID'), ('kind', 'TCF_EM_KIND'), ('flags', 'TCF_EM_FLAGS'), ('pad', 'TCF_EM_PAD'), ) for k, v in attrs_map: r = kwarg.get(k, None) if r is not None: ret['attrs'].append([v, r]) return ret class data(nlmsg_base, nlmsg_encoder_generic): fields = ( ('ip_set_index', 'H'), ('ip_set_dim', 'B'), ('ip_set_flags', 'B'), ) def encode(self): flags, dim = self._get_ip_set_parms() self['ip_set_index'] = self['index'] self['ip_set_dim'] = dim self['ip_set_flags'] = flags nlmsg_base.encode(self) def _get_ip_set_parms(self): flags = 0 dim = 0 mode = self['mode'] # Split to get dimension modes = mode.split(',') dim = len(modes) if dim > IPSET_DIM['IPSET_DIM_MAX']: raise ValueError( 'IPSet dimension could not be greater than {0}'.format( IPSET_DIM['IPSET_DIM_MAX'] ) ) for i in range(0, dim): if modes[i] == 'dst': flags |= TCF_IPSET_MODE_DST << i elif modes[i] == 'src': flags |= TCF_IPSET_MODE_SRC << i else: raise ValueError('Unknown IP set mode "{0}"'.format(modes[i])) return (flags, dim) pyroute2-0.7.11/pyroute2/netlink/rtnl/tcmsg/em_meta.py000066400000000000000000000134551455030217500227150ustar00rootroot00000000000000from struct import pack, unpack from pyroute2.netlink import nla TCF_EM_OPND_EQ = 0 TCF_EM_OPND_GT = 1 TCF_EM_OPND_LT = 2 OPERANDS_DICT = { TCF_EM_OPND_EQ: ('eq', '='), TCF_EM_OPND_GT: ('gt', '>'), TCF_EM_OPND_LT: ('lt', '<'), } # meta types TCF_META_TYPE_VAR = 0 TCF_META_TYPE_INT = 1 TCF_META_ID_MASK = 0x7FF TCF_META_TYPE_MASK = 0xF << 12 # see tc_em_meta.h META_ID = { 'value': 0, 'random': 1, 'loadavg_0': 2, 'loadavg_1': 3, 'loadavg_2': 4, 'dev': 5, 'priority': 6, 'protocol': 7, 'pkttype': 8, 'pktlen': 9, 'datalen': 10, 'maclen': 11, 'nfmark': 12, 'tcindex': 13, 'rtclassid': 14, 'rtiif': 15, 'sk_family': 16, 'sk_state': 17, 'sk_reuse': 18, 'sk_bound_if': 19, 'sk_refcnt': 20, 'sk_shutdown': 21, 'sk_proto': 22, 'sk_type': 23, 'sk_rcvbuf': 24, 'sk_rmem_alloc': 25, 'sk_wmem_alloc': 26, 'sk_omem_alloc': 27, 'sk_wmem_queued': 28, 'sk_rcv_qlen': 29, 'sk_snd_qlen': 30, 'sk_err_qlen': 31, 'sk_forward_allocs': 32, 'sk_sndbuf': 33, 'sk_allocs': 34, 'sk_route_caps': 35, 'sk_hash': 36, 'sk_lingertime': 37, 'sk_ack_backlog': 38, 'sk_max_ack_backlog': 39, 'sk_prio': 40, 'sk_rcvlowat': 41, 'sk_rcvtimeo': 42, 'sk_sndtimeo': 43, 'sk_sendmsg_off': 44, 'sk_write_pending': 45, 'vlan_tag': 46, 'rxhash': 47, } strings_meta = ('dev', 'sk_bound_if') class data(nla): nla_map = ( ('TCA_EM_META_UNSPEC', 'none'), ('TCA_EM_META_HDR', 'tca_em_meta_header_parse'), ('TCA_EM_META_LVALUE', 'uint32'), ('TCA_EM_META_RVALUE', 'hex'), ) def decode(self): self.header = None self.length = 24 nla.decode(self) # Patch to have a better view in nldecap attrs = dict(self['attrs']) rvalue = attrs.get('TCA_EM_META_RVALUE') meta_hdr = attrs.get('TCA_EM_META_HDR') meta_id = meta_hdr['id'] rvalue = bytearray.fromhex(rvalue.replace(':', '')) if meta_id == 'TCF_META_TYPE_VAR': rvalue.decode('utf-8') if meta_id == 'TCF_META_TYPE_INT': rvalue = unpack('> 12 if self['id'] == TCF_META_TYPE_VAR: self['id'] = 'TCF_META_TYPE_VAR' elif self['id'] == TCF_META_TYPE_INT: self['id'] = 'TCF_META_TYPE_INT' else: pass self['kind'] &= TCF_META_ID_MASK for k, v in META_ID.items(): if self['kind'] == v: self['kind'] = 'TCF_META_ID_{}'.format(k.upper()) fmt = 'TCF_EM_OPND_{}'.format( OPERANDS_DICT[self['opnd']][0].upper() ) self['opnd'] = fmt del self['pad'] def encode(self): if not isinstance(self['kind'], str): raise ValueError("kind' keywords must be set!") kind = self['kind'].lower() if kind in strings_meta: self['id'] = TCF_META_TYPE_VAR else: self['id'] = TCF_META_TYPE_INT self['id'] <<= 12 for k, v in META_ID.items(): if kind == k: self['kind'] = self['id'] | v break if isinstance(self['opnd'], str): for k, v in OPERANDS_DICT.items(): if self['opnd'].lower() in v: self['opnd'] = k break # Perform sanity checks on 'shift' value if isinstance(self['shift'], str): # If it fails, it will raise a ValueError # which is what we want self['shift'] = int(self['shift']) if not 0 <= self['shift'] <= 255: raise ValueError( "'shift' value must be between" "0 and 255 included!" ) nla.encode(self) pyroute2-0.7.11/pyroute2/netlink/rtnl/tcmsg/sched_bpf.py000066400000000000000000000050131455030217500232120ustar00rootroot00000000000000''' ''' from socket import htons from pyroute2.netlink import NLA_F_NESTED, nla from pyroute2.netlink.rtnl import TC_H_ROOT from pyroute2.netlink.rtnl.tcmsg.act_police import ( get_parameters as ap_parameters, ) from pyroute2.netlink.rtnl.tcmsg.act_police import nla_plus_police from pyroute2.netlink.rtnl.tcmsg.common import TCA_ACT_MAX_PRIO, stats2 from pyroute2.netlink.rtnl.tcmsg.common_act import ( get_tca_action, nla_plus_tca_act_opt, ) from pyroute2.protocols import ETH_P_ALL parent = TC_H_ROOT TCA_BPF_FLAG_ACT_DIRECT = 1 def fix_msg(msg, kwarg): if 'info' not in kwarg: msg['info'] = htons(kwarg.pop('protocol', ETH_P_ALL) & 0xFFFF) | ( (kwarg.pop('prio', 0) << 16) & 0xFFFF0000 ) def get_parameters(kwarg): ret = {'attrs': []} attrs_map = ( # ('action', 'TCA_BPF_ACT'), # ('police', 'TCA_BPF_POLICE'), ('classid', 'TCA_BPF_CLASSID'), ('fd', 'TCA_BPF_FD'), ('name', 'TCA_BPF_NAME'), ('flags', 'TCA_BPF_FLAGS'), ) act = kwarg.get('action') if act: ret['attrs'].append(['TCA_BPF_ACT', get_tca_action(kwarg)]) if kwarg.get('rate'): ret['attrs'].append(['TCA_BPF_POLICE', ap_parameters(kwarg)]) kwarg['flags'] = kwarg.get('flags', 0) if kwarg.get('direct_action', False): kwarg['flags'] |= TCA_BPF_FLAG_ACT_DIRECT for k, v in attrs_map: r = kwarg.get(k, None) if r is not None: ret['attrs'].append([v, r]) return ret class options(nla, nla_plus_police): nla_map = ( ('TCA_BPF_UNSPEC', 'none'), ('TCA_BPF_ACT', 'bpf_act'), ('TCA_BPF_POLICE', 'police'), ('TCA_BPF_CLASSID', 'uint32'), ('TCA_BPF_OPS_LEN', 'uint32'), ('TCA_BPF_OPS', 'uint32'), ('TCA_BPF_FD', 'uint32'), ('TCA_BPF_NAME', 'asciiz'), ('TCA_BPF_FLAGS', 'uint32'), ) class bpf_act(nla): nla_flags = NLA_F_NESTED nla_map = tuple( [ ('TCA_ACT_PRIO_%i' % x, 'tca_act_bpf') for x in range(TCA_ACT_MAX_PRIO) ] ) class tca_act_bpf(nla, nla_plus_tca_act_opt): nla_map = ( ('TCA_ACT_UNSPEC', 'none'), ('TCA_ACT_KIND', 'asciiz'), ('TCA_ACT_OPTIONS', 'get_act_options'), ('TCA_ACT_INDEX', 'hex'), ('TCA_ACT_STATS', 'get_stats2'), ) @staticmethod def get_stats2(self, *argv, **kwarg): return stats2 pyroute2-0.7.11/pyroute2/netlink/rtnl/tcmsg/sched_cake.py000066400000000000000000000310421455030217500233470ustar00rootroot00000000000000''' cake ++++ Usage: # Imports from pyroute2 import IPRoute # Add cake with 2048kbit of bandwidth capacity with IPRoute() as ipr: # Get interface index index = ipr.link_lookup(ifname='lo') ipr.tc('add', kind='cake', index=index, bandwidth='2048kbit') # Same with 15mbit of bandwidth capacity with IPRoute() as ipr: # Get interface index index = ipr.link_lookup(ifname='lo') ipr.tc('add', kind='cake', index=index, bandwidth='15mbit') # If you don't know the bandwidth capacity, use autorate with IPRoute() as ipr: # Get interface index index = ipr.link_lookup(ifname='lo') ipr.tc('add', kind='cake', index=index, bandwidth='unlimited', autorate=True) # If you want to tune ATM properties use: # atm_mode=False # For no ATM tuning # atm_mode=True # For ADSL tuning # atm_mode='ptm' # For VDSL2 tuning with IPRoute() as ipr: # Get interface index index = ipr.link_lookup(ifname='lo') ipr.tc('add', kind='cake', index=index, bandwidth='unlimited', autorate=True, atm_mode=True) # Complex example which has no-sense with IPRoute() as ipr: # Get interface index index = ipr.link_lookup(ifname='lo') ipr.tc('add', kind='cake', index=index, bandwidth='unlimited', autorate=True, nat=True, rtt='interplanetary', target=10000, flow_mode='dsthost', diffserv_mode='precedence', fwmark=0x1337) NOTES: Here is the list of all supported options with their values: - ack_filter: False, True or 'aggressive' (False by default) - atm_mode: False, True or 'ptm' (False by default) - autorate: False or True (False by default) - bandwidth: any integer, 'N[kbit|mbit|gbit]' or 'unlimited' - diffserv_mode: 'diffserv3', 'diffserv4', 'diffserv8', 'besteffort', 'precedence' ('diffserv3' by default) - ingress: False or True (False by default) - overhead: any integer between -64 and 256 inclusive (0 by default) - flow_mode: 'flowblind', 'srchost', 'dsthost', 'hosts', 'flows', 'dual-srchost', 'dual-dsthost', 'triple-isolate' ('triple-isolate' by default) - fwmark: any integer (0 by default) - memlimit: any integer (by default, calculated based on the bandwidth and RTT settings) - mpu: any integer between 0 and 256 inclusive (0 by default) - nat: False or True (False by default) - raw: False or True (True by default) - rtt: any integer or 'datacentre', 'lan', 'metro', 'regional', 'internet', 'oceanic', 'satellite', 'interplanetary' ('internet' by default) - split_gso: False or True (True by default) - target: any integer (5000 by default) - wash: False or True (False by default) ''' from pyroute2.netlink import nla from pyroute2.netlink.rtnl import TC_H_ROOT # Defines from pkt_sched.h CAKE_FLOW_NONE = 0 CAKE_FLOW_SRC_IP = 1 CAKE_FLOW_DST_IP = 2 CAKE_FLOW_HOSTS = 3 CAKE_FLOW_FLOWS = 4 CAKE_FLOW_DUAL_SRC = 5 CAKE_FLOW_DUAL_DST = 6 CAKE_FLOW_TRIPLE = 7 CAKE_DIFFSERV_DIFFSERV3 = 0 CAKE_DIFFSERV_DIFFSERV4 = 1 CAKE_DIFFSERV_DIFFSERV8 = 2 CAKE_DIFFSERV_BESTEFFORT = 3 CAKE_DIFFSERV_PRECEDENCE = 4 CAKE_ACK_NONE = 0 CAKE_ACK_FILTER = 1 CAKE_ACK_AGGRESSIVE = 2 CAKE_ATM_NONE = 0 CAKE_ATM_ATM = 1 CAKE_ATM_PTM = 2 TCA_CAKE_MAX_TINS = 8 def fix_msg(msg, kwarg): if 'parent' not in kwarg: msg['parent'] = TC_H_ROOT def convert_bandwidth(value): types = [('kbit', 1000), ('mbit', 1000000), ('gbit', 1000000000)] if 'unlimited' == value: return 0 try: # Value is passed as an int x = int(value) return x >> 3 except ValueError: value = value.lower() for t, mul in types: if len(value.split(t)) == 2: x = int(value.split(t)[0]) * mul return x >> 3 raise ValueError( 'Invalid bandwidth value. Specify either an integer, ' '"unlimited" or a value with "kbit", "mbit" or ' '"gbit" appended' ) def convert_rtt(value): types = { 'datacentre': 100, 'lan': 1000, 'metro': 10000, 'regional': 30000, 'internet': 100000, 'oceanic': 300000, 'satellite': 1000000, 'interplanetary': 3600000000, } try: # Value is passed as an int x = int(value) return x except ValueError: rtt = types.get(value.lower()) if rtt is not None: return rtt raise ValueError( 'Invalid rtt value. Specify either an integer (us), ' 'or datacentre, lan, metro, regional, internet, ' 'oceanic or interplanetary.' ) def convert_atm(value): if isinstance(value, bool): if not value: return CAKE_ATM_NONE else: return CAKE_ATM_ATM else: if value == 'ptm': return CAKE_ATM_PTM raise ValueError('Invalid ATM value!') def convert_flowmode(value): modes = { 'flowblind': CAKE_FLOW_NONE, 'srchost': CAKE_FLOW_SRC_IP, 'dsthost': CAKE_FLOW_DST_IP, 'hosts': CAKE_FLOW_HOSTS, 'flows': CAKE_FLOW_FLOWS, 'dual-srchost': CAKE_FLOW_DUAL_SRC, 'dual-dsthost': CAKE_FLOW_DUAL_DST, 'triple-isolate': CAKE_FLOW_TRIPLE, } res = modes.get(value.lower()) if res: return res raise ValueError( 'Invalid flow mode specified! See tc-cake man ' 'page for valid values.' ) def convert_diffserv(value): modes = { 'diffserv3': CAKE_DIFFSERV_DIFFSERV3, 'diffserv4': CAKE_DIFFSERV_DIFFSERV4, 'diffserv8': CAKE_DIFFSERV_DIFFSERV8, 'besteffort': CAKE_DIFFSERV_BESTEFFORT, 'precedence': CAKE_DIFFSERV_PRECEDENCE, } res = modes.get(value.lower()) if res is not None: return res raise ValueError( 'Invalid diffserv mode specified! See tc-cake man ' 'page for valid values.' ) def convert_ackfilter(value): if isinstance(value, bool): if not value: return CAKE_ACK_NONE else: return CAKE_ACK_FILTER else: if value == 'aggressive': return CAKE_ACK_AGGRESSIVE raise ValueError('Invalid ACK filter!') def check_range(name, value, start, end): if not isinstance(value, int): raise ValueError('{} value must be an integer'.format(name)) if not start <= value <= end: raise ValueError( '{0} value must be between {1} and {2} ' 'inclusive.'.format(name, start, end) ) def get_parameters(kwarg): ret = {'attrs': []} attrs_map = ( ('ack_filter', 'TCA_CAKE_ACK_FILTER'), ('atm_mode', 'TCA_CAKE_ATM'), ('autorate', 'TCA_CAKE_AUTORATE'), ('bandwidth', 'TCA_CAKE_BASE_RATE64'), ('diffserv_mode', 'TCA_CAKE_DIFFSERV_MODE'), ('ingress', 'TCA_CAKE_INGRESS'), ('overhead', 'TCA_CAKE_OVERHEAD'), ('flow_mode', 'TCA_CAKE_FLOW_MODE'), ('fwmark', 'TCA_CAKE_FWMARK'), ('memlimit', 'TCA_CAKE_MEMORY'), ('mpu', 'TCA_CAKE_MPU'), ('nat', 'TCA_CAKE_NAT'), ('raw', 'TCA_CAKE_RAW'), ('rtt', 'TCA_CAKE_RTT'), ('split_gso', 'TCA_CAKE_SPLIT_GSO'), ('target', 'TCA_CAKE_TARGET'), ('wash', 'TCA_CAKE_WASH'), ) for k, v in attrs_map: r = kwarg.get(k, None) if r is not None: if k == 'bandwidth': r = convert_bandwidth(r) elif k == 'rtt': r = convert_rtt(r) elif k == 'atm_mode': r = convert_atm(r) elif k == 'flow_mode': r = convert_flowmode(r) elif k == 'diffserv_mode': r = convert_diffserv(r) elif k == 'ack_filter': r = convert_ackfilter(r) elif k == 'mpu': check_range(k, r, 0, 256) elif k == 'overhead': check_range(k, r, -64, 256) ret['attrs'].append([v, r]) return ret class options(nla): nla_map = ( ('TCA_CAKE_UNSPEC', 'none'), ('TCA_CAKE_PAD', 'uint64'), ('TCA_CAKE_BASE_RATE64', 'uint64'), ('TCA_CAKE_DIFFSERV_MODE', 'uint32'), ('TCA_CAKE_ATM', 'uint32'), ('TCA_CAKE_FLOW_MODE', 'uint32'), ('TCA_CAKE_OVERHEAD', 'int32'), ('TCA_CAKE_RTT', 'uint32'), ('TCA_CAKE_TARGET', 'uint32'), ('TCA_CAKE_AUTORATE', 'uint32'), ('TCA_CAKE_MEMORY', 'uint32'), ('TCA_CAKE_NAT', 'uint32'), ('TCA_CAKE_RAW', 'uint32'), ('TCA_CAKE_WASH', 'uint32'), ('TCA_CAKE_MPU', 'uint32'), ('TCA_CAKE_INGRESS', 'uint32'), ('TCA_CAKE_ACK_FILTER', 'uint32'), ('TCA_CAKE_SPLIT_GSO', 'uint32'), ('TCA_CAKE_FWMARK', 'uint32'), ) def encode(self): # Set default Auto-Rate value if not self.get_attr('TCA_CAKE_AUTORATE'): self['attrs'].append(['TCA_CAKE_AUTORATE', 0]) nla.encode(self) class stats2(nla): nla_map = ( ('TCA_STATS_UNSPEC', 'none'), ('TCA_STATS_BASIC', 'basic'), ('TCA_STATS_RATE_EST', 'rate_est'), ('TCA_STATS_QUEUE', 'queue'), ('TCA_STATS_APP', 'stats_app'), ) class basic(nla): fields = (('bytes', 'Q'), ('packets', 'I')) class rate_est(nla): fields = (('bps', 'I'), ('pps', 'I')) class queue(nla): fields = ( ('qlen', 'I'), ('backlog', 'I'), ('drops', 'I'), ('requeues', 'I'), ('overlimits', 'I'), ) class stats_app(nla): nla_map = ( ('__TCA_CAKE_STATS_INVALID', 'none'), ('TCA_CAKE_STATS_PAD', 'hex'), ('TCA_CAKE_STATS_CAPACITY_ESTIMATE64', 'uint64'), ('TCA_CAKE_STATS_MEMORY_LIMIT', 'uint32'), ('TCA_CAKE_STATS_MEMORY_USED', 'uint32'), ('TCA_CAKE_STATS_AVG_NETOFF', 'uint32'), ('TCA_CAKE_STATS_MAX_NETLEN', 'uint32'), ('TCA_CAKE_STATS_MAX_ADJLEN', 'uint32'), ('TCA_CAKE_STATS_MIN_NETLEN', 'uint32'), ('TCA_CAKE_STATS_MIN_ADJLEN', 'uint32'), ('TCA_CAKE_STATS_TIN_STATS', 'tca_parse_tins'), ('TCA_CAKE_STATS_DEFICIT', 'uint32'), ('TCA_CAKE_STATS_COBALT_COUNT', 'uint32'), ('TCA_CAKE_STATS_DROPPING', 'uint32'), ('TCA_CAKE_STATS_DROP_NEXT_US', 'uint32'), ('TCA_CAKE_STATS_P_DROP', 'uint32'), ('TCA_CAKE_STATS_BLUE_TIMER_US', 'uint32'), ) class tca_parse_tins(nla): nla_map = tuple( [ ('TCA_CAKE_TIN_STATS_%i' % x, 'tca_parse_tin_stats') for x in range(TCA_CAKE_MAX_TINS) ] ) class tca_parse_tin_stats(nla): nla_map = ( ('__TCA_CAKE_TIN_STATS_INVALID', 'none'), ('TCA_CAKE_TIN_STATS_PAD', 'hex'), ('TCA_CAKE_TIN_STATS_SENT_PACKETS', 'uint32'), ('TCA_CAKE_TIN_STATS_SENT_BYTES64', 'uint64'), ('TCA_CAKE_TIN_STATS_DROPPED_PACKETS', 'uint32'), ('TCA_CAKE_TIN_STATS_DROPPED_BYTES64', 'uint64'), ('TCA_CAKE_TIN_STATS_ACKS_DROPPED_PACKETS', 'uint32'), ('TCA_CAKE_TIN_STATS_ACKS_DROPPED_BYTES64', 'uint64'), ('TCA_CAKE_TIN_STATS_ECN_MARKED_PACKETS', 'uint32'), ('TCA_CAKE_TIN_STATS_ECN_MARKED_BYTES64', 'uint64'), ('TCA_CAKE_TIN_STATS_BACKLOG_PACKETS', 'uint32'), ('TCA_CAKE_TIN_STATS_BACKLOG_BYTES', 'uint32'), ('TCA_CAKE_TIN_STATS_THRESHOLD_RATE64', 'uint64'), ('TCA_CAKE_TIN_STATS_TARGET_US', 'uint32'), ('TCA_CAKE_TIN_STATS_INTERVAL_US', 'uint32'), ('TCA_CAKE_TIN_STATS_WAY_INDIRECT_HITS', 'uint32'), ('TCA_CAKE_TIN_STATS_WAY_MISSES', 'uint32'), ('TCA_CAKE_TIN_STATS_WAY_COLLISIONS', 'uint32'), ('TCA_CAKE_TIN_STATS_PEAK_DELAY_US', 'uint32'), ('TCA_CAKE_TIN_STATS_AVG_DELAY_US', 'uint32'), ('TCA_CAKE_TIN_STATS_BASE_DELAY_US', 'uint32'), ('TCA_CAKE_TIN_STATS_SPARSE_FLOWS', 'uint32'), ('TCA_CAKE_TIN_STATS_BULK_FLOWS', 'uint32'), ('TCA_CAKE_TIN_STATS_UNRESPONSIVE_FLOWS', 'uint32'), ('TCA_CAKE_TIN_STATS_MAX_SKBLEN', 'uint32'), ('TCA_CAKE_TIN_STATS_FLOW_QUANTUM', 'uint32'), ) pyroute2-0.7.11/pyroute2/netlink/rtnl/tcmsg/sched_choke.py000066400000000000000000000065471455030217500235510ustar00rootroot00000000000000''' choke +++++ Parameters: * `limit` (required) -- int * `bandwith` (required) -- str/int * `min` -- int * `max` -- int * `avpkt` -- str/int, packet size * `burst` -- int * `probability` -- float * `ecn` -- bool Example:: ip.tc('add', 'choke', interface, limit=5500, bandwith="10mbit", ecn=True) ''' import logging import struct from pyroute2.netlink import nla, nla_string from pyroute2.netlink.rtnl import TC_H_ROOT from pyroute2.netlink.rtnl.tcmsg.common import ( get_rate, get_size, red_eval_ewma, red_eval_idle_damping, red_eval_P, ) from pyroute2.netlink.rtnl.tcmsg.common import stats2 as c_stats2 log = logging.getLogger(__name__) parent = TC_H_ROOT def get_parameters(kwarg): # The code is ported from iproute2 avpkt = 1000 probability = 0.02 opt = { 'limit': kwarg['limit'], # required 'qth_min': kwarg.get('min', 0), 'qth_max': kwarg.get('max', 0), 'Wlog': 0, 'Plog': 0, 'Scell_log': 0, 'flags': 1 if kwarg.get('ecn') else 0, } rate = get_rate(kwarg['bandwith']) # required burst = kwarg.get('burst', 0) avpkt = get_size(kwarg.get('avpkt', 1000)) probability = kwarg.get('probability', 0.02) if not opt['qth_max']: opt['qth_max'] = opt['limit'] // 4 if not opt['qth_min']: opt['qth_min'] = opt['qth_max'] // 3 if not burst: burst = (2 * opt['qth_min'] + opt['qth_max']) // 3 if opt['qth_max'] > opt['limit']: raise Exception('max is larger than limit') if opt['qth_min'] >= opt['qth_max']: raise Exception('min is not smaller than max') # Wlog opt['Wlog'] = red_eval_ewma(opt['qth_min'] * avpkt, burst, avpkt) if opt['Wlog'] < 0: raise Exception('failed to calculate EWMA') elif opt['Wlog'] > 10: log.warning('choke: burst %s seems to be too large' % burst) # Plog opt['Plog'] = red_eval_P( opt['qth_min'] * avpkt, opt['qth_max'] * avpkt, probability ) if opt['Plog'] < 0: raise Exception('choke: failed to calculate probability') # Scell_log, stab opt['Scell_log'], stab = red_eval_idle_damping(opt['Wlog'], avpkt, rate) if opt['Scell_log'] < 0: raise Exception('choke: failed to calculate idle damping table') return { 'attrs': [ ['TCA_CHOKE_PARMS', opt], ['TCA_CHOKE_STAB', stab], ['TCA_CHOKE_MAX_P', int(probability * pow(2, 32))], ] } class options(nla): nla_map = ( ('TCA_CHOKE_UNSPEC', 'none'), ('TCA_CHOKE_PARMS', 'qopt'), ('TCA_CHOKE_STAB', 'stab'), ('TCA_CHOKE_MAX_P', 'uint32'), ) class qopt(nla): fields = ( ('limit', 'I'), ('qth_min', 'I'), ('qth_max', 'I'), ('Wlog', 'B'), ('Plog', 'B'), ('Scell_log', 'B'), ('flags', 'B'), ) class stab(nla_string): def encode(self): self['value'] = struct.pack( 'B' * 256, *(int(x) for x in self.value) ) nla_string.encode(self) class stats(nla): fields = ( ('early', 'I'), ('pdrop', 'I'), ('other', 'I'), ('marked', 'I'), ('matched', 'I'), ) class stats2(c_stats2): class stats_app(stats): pass pyroute2-0.7.11/pyroute2/netlink/rtnl/tcmsg/sched_clsact.py000066400000000000000000000017621455030217500237230ustar00rootroot00000000000000''' clsact ++++++ The clsact qdisc provides a mechanism to attach integrated filter-action classifiers to an interface, either at ingress or egress, or both. The use case shown here is using a bpf program (implemented elsewhere) to direct the packet processing. The example also uses the direct-action feature to specify what to do with each packet (pass, drop, redirect, etc.). BPF ingress/egress example using clsact qdisc:: # open_bpf_fd is outside the scope of pyroute2 #fd = open_bpf_fd() eth0 = ip.get_links(ifname="eth0")[0] ip.tc("add", "clsact", eth0) # add ingress clsact ip.tc("add-filter", "bpf", idx, ":1", fd=fd, name="myprog", parent="ffff:fff2", classid=1, direct_action=True) # add egress clsact ip.tc("add-filter", "bpf", idx, ":1", fd=fd, name="myprog", parent="ffff:fff3", classid=1, direct_action=True) ''' from pyroute2.netlink.rtnl import TC_H_CLSACT parent = TC_H_CLSACT def fix_msg(msg, kwarg): msg['handle'] = 0xFFFF0000 pyroute2-0.7.11/pyroute2/netlink/rtnl/tcmsg/sched_codel.py000066400000000000000000000031071455030217500235330ustar00rootroot00000000000000import logging from pyroute2.netlink import nla from pyroute2.netlink.rtnl import TC_H_ROOT from pyroute2.netlink.rtnl.tcmsg.common import get_time from pyroute2.netlink.rtnl.tcmsg.common import stats2 as c_stats2 log = logging.getLogger(__name__) parent = TC_H_ROOT def get_parameters(kwarg): # # ACHTUNG: experimental code # # Parameters naming scheme WILL be changed in next releases # ret = {'attrs': []} transform = { 'cdl_limit': lambda x: x, 'cdl_ecn': lambda x: x, 'cdl_target': get_time, 'cdl_ce_threshold': get_time, 'cdl_interval': get_time, } for key in transform: if key in kwarg: log.warning( 'codel parameters naming will be changed ' 'in next releases (%s)' % key ) ret['attrs'].append( ['TCA_CODEL_%s' % key[4:].upper(), transform[key](kwarg[key])] ) return ret class options(nla): nla_map = ( ('TCA_CODEL_UNSPEC', 'none'), ('TCA_CODEL_TARGET', 'uint32'), ('TCA_CODEL_LIMIT', 'uint32'), ('TCA_CODEL_INTERVAL', 'uint32'), ('TCA_CODEL_ECN', 'uint32'), ('TCA_CODEL_CE_THRESHOLD', 'uint32'), ) class stats(nla): fields = ( ('maxpacket', 'I'), ('count', 'I'), ('lastcount', 'I'), ('ldelay', 'I'), ('drop_next', 'I'), ('drop_overlimit', 'I'), ('ecn_mark', 'I'), ('dropping', 'I'), ('ce_mark', 'I'), ) class stats2(c_stats2): class stats_app(stats): pass pyroute2-0.7.11/pyroute2/netlink/rtnl/tcmsg/sched_drr.py000066400000000000000000000013621455030217500232350ustar00rootroot00000000000000''' drr +++ The qdisc doesn't accept any parameters, but the class accepts `quantum` parameter:: ip.tc('add', 'drr', interface, '1:') ip.tc('add-class', 'drr', interface, '1:10', quantum=1600) ip.tc('add-class', 'drr', interface, '1:20', quantum=1600) ''' from pyroute2.netlink import nla from pyroute2.netlink.rtnl import TC_H_ROOT from pyroute2.netlink.rtnl.tcmsg.common import stats2 as c_stats2 parent = TC_H_ROOT def get_class_parameters(kwarg): return {'attrs': [['TCA_DRR_QUANTUM', kwarg.get('quantum', 0)]]} class options(nla): nla_map = (('TCA_DRR_UNSPEC', 'none'), ('TCA_DRR_QUANTUM', 'uint32')) class stats(nla): fields = (('deficit', 'I'),) class stats2(c_stats2): class stats_app(stats): pass pyroute2-0.7.11/pyroute2/netlink/rtnl/tcmsg/sched_fq_codel.py000066400000000000000000000051421455030217500242220ustar00rootroot00000000000000import logging from pyroute2.netlink import nla from pyroute2.netlink.rtnl import RTM_DELQDISC, RTM_NEWQDISC, TC_H_ROOT from pyroute2.netlink.rtnl.tcmsg.common import get_time, stats2 log = logging.getLogger(__name__) parent = TC_H_ROOT def get_parameters(kwarg): # # ACHTUNG: experimental code # # Parameters naming scheme WILL be changed in next releases # ret = {'attrs': []} transform = { 'fqc_limit': lambda x: x, 'fqc_flows': lambda x: x, 'fqc_quantum': lambda x: x, 'fqc_ecn': lambda x: x, 'fqc_target': get_time, 'fqc_ce_threshold': get_time, 'fqc_interval': get_time, } for key in transform: if key in kwarg: log.warning( 'fq_codel parameters naming will be changed ' 'in next releases (%s)' % key ) ret['attrs'].append( [ 'TCA_FQ_CODEL_%s' % key[4:].upper(), transform[key](kwarg[key]), ] ) return ret class options(nla): nla_map = ( ('TCA_FQ_CODEL_UNSPEC', 'none'), ('TCA_FQ_CODEL_TARGET', 'uint32'), ('TCA_FQ_CODEL_LIMIT', 'uint32'), ('TCA_FQ_CODEL_INTERVAL', 'uint32'), ('TCA_FQ_CODEL_ECN', 'uint32'), ('TCA_FQ_CODEL_FLOWS', 'uint32'), ('TCA_FQ_CODEL_QUANTUM', 'uint32'), ('TCA_FQ_CODEL_CE_THRESHOLD', 'uint32'), ('TCA_FQ_CODEL_DROP_BATCH_SIZE', 'uint32'), ('TCA_FQ_CODEL_MEMORY_LIMIT', 'uint32'), ) class qdisc_stats(nla): fields = ( ('type', 'I'), ('maxpacket', 'I'), ('drop_overlimit', 'I'), ('ecn_mark', 'I'), ('new_flow_count', 'I'), ('new_flows_len', 'I'), ('old_flows_len', 'I'), ('ce_mark', 'I'), ('memory_usage', 'I'), ('drop_overmemory', 'I'), ) class class_stats(nla): fields = ( ('type', 'I'), ('deficit', 'i'), ('ldelay', 'I'), ('count', 'I'), ('lastcount', 'I'), ('dropping', 'I'), ('drop_next', 'i'), ) class qdisc_stats2(stats2): class stats_app(qdisc_stats): pass class class_stats2(stats2): class stats_app(class_stats): pass def stats2(msg, *argv, **kwarg): if msg['header']['type'] in (RTM_NEWQDISC, RTM_DELQDISC): return qdisc_stats2 else: return class_stats2 # To keep the compatibility with TCA_XSTATS def stats(msg, *argv, **kwarg): if msg['header']['type'] in (RTM_NEWQDISC, RTM_DELQDISC): return qdisc_stats else: return class_stats pyroute2-0.7.11/pyroute2/netlink/rtnl/tcmsg/sched_hfsc.py000066400000000000000000000044601455030217500233730ustar00rootroot00000000000000''' hfsc ++++ Simple HFSC example:: eth0 = ip.get_links(ifname="eth0")[0] ip.tc("add", "hfsc", eth0, handle="1:", default="1:1") ip.tc("add-class", "hfsc", eth0, handle="1:1", parent="1:0" rsc={"m2": "5mbit"}) HFSC curve nla types: * `rsc`: real-time curve * `fsc`: link-share curve * `usc`: upper-limit curve ''' from pyroute2.netlink import nla from pyroute2.netlink.rtnl import RTM_DELQDISC, RTM_NEWQDISC, TC_H_ROOT from pyroute2.netlink.rtnl.tcmsg.common import get_rate, get_time from pyroute2.netlink.rtnl.tcmsg.common import stats2 as c_stats2 parent = TC_H_ROOT def get_parameters(kwarg): defcls = kwarg.get('default', kwarg.get('defcls', 0x10)) defcls &= 0xFFFF return {'defcls': defcls} def get_class_parameters(kwarg): ret = {'attrs': []} for key in ('rsc', 'fsc', 'usc'): if key in kwarg: ret['attrs'].append( [ 'TCA_HFSC_%s' % key.upper(), { 'm1': get_rate(kwarg[key].get('m1', 0)), 'd': get_time(kwarg[key].get('d', 0)), 'm2': get_rate(kwarg[key].get('m2', 0)), }, ] ) return ret class options_hfsc(nla): fields = (('defcls', 'H'),) # default class class options_hfsc_class(nla): nla_map = ( ('TCA_HFSC_UNSPEC', 'none'), ('TCA_HFSC_RSC', 'hfsc_curve'), # real-time curve ('TCA_HFSC_FSC', 'hfsc_curve'), # link-share curve ('TCA_HFSC_USC', 'hfsc_curve'), ) # upper-limit curve class hfsc_curve(nla): fields = ( ('m1', 'I'), # slope of the first segment in bps ('d', 'I'), # x-projection of the first segment in us ('m2', 'I'), ) # slope of the second segment in bps def options(msg, *argv, **kwarg): if msg['header']['type'] in (RTM_NEWQDISC, RTM_DELQDISC): return options_hfsc else: return options_hfsc_class class stats2(c_stats2): class stats_app(nla): fields = ( ('work', 'Q'), # total work done ('rtwork', 'Q'), # total work done by real-time criteria ('period', 'I'), # current period ('level', 'I'), ) # class level in hierarchy pyroute2-0.7.11/pyroute2/netlink/rtnl/tcmsg/sched_htb.py000066400000000000000000000131521455030217500232230ustar00rootroot00000000000000''' htb +++ TODO: list parameters An example with htb qdisc, lets assume eth0 == 2:: # u32 --> +--> htb 1:10 --> sfq 10:0 # | | # | | # eth0 -- htb 1:0 -- htb 1:1 # | | # | | # u32 --> +--> htb 1:20 --> sfq 20:0 eth0 = 2 # add root queue 1:0 ip.tc("add", "htb", eth0, 0x10000, default=0x200000) # root class 1:1 ip.tc("add-class", "htb", eth0, 0x10001, parent=0x10000, rate="256kbit", burst=1024 * 6) # two branches: 1:10 and 1:20 ip.tc("add-class", "htb", eth0, 0x10010, parent=0x10001, rate="192kbit", burst=1024 * 6, prio=1) ip.tc("add-class", "htb", eht0, 0x10020, parent=0x10001, rate="128kbit", burst=1024 * 6, prio=2) # two leaves: 10:0 and 20:0 ip.tc("add", "sfq", eth0, 0x100000, parent=0x10010, perturb=10) ip.tc("add", "sfq", eth0, 0x200000, parent=0x10020, perturb=10) # two filters: one to load packets into 1:10 and the # second to 1:20 ip.tc("add-filter", "u32", eth0, parent=0x10000, prio=10, protocol=socket.AF_INET, target=0x10010, keys=["0x0006/0x00ff+8", "0x0000/0xffc0+2"]) ip.tc("add-filter", "u32", eth0, parent=0x10000, prio=10, protocol=socket.AF_INET, target=0x10020, keys=["0x5/0xf+0", "0x10/0xff+33"]) ''' from pyroute2.netlink import nla from pyroute2.netlink.rtnl import RTM_DELQDISC, RTM_NEWQDISC, TC_H_ROOT from pyroute2.netlink.rtnl.tcmsg.common import ( calc_xmittime, get_hz, get_rate, nla_plus_rtab, stats2, ) parent = TC_H_ROOT def get_class_parameters(kwarg): prio = kwarg.get('prio', 0) mtu = kwarg.get('mtu', 1600) mpu = kwarg.get('mpu', 0) overhead = kwarg.get('overhead', 0) quantum = kwarg.get('quantum', 0) rate = get_rate(kwarg.get('rate', None)) ceil = get_rate(kwarg.get('ceil', 0)) or rate burst = ( kwarg.get('burst', None) or kwarg.get('maxburst', None) or kwarg.get('buffer', None) ) if rate is not None: if burst is None: burst = rate / get_hz() + mtu burst = calc_xmittime(rate, burst) cburst = ( kwarg.get('cburst', None) or kwarg.get('cmaxburst', None) or kwarg.get('cbuffer', None) ) if ceil is not None: if cburst is None: cburst = ceil / get_hz() + mtu cburst = calc_xmittime(ceil, cburst) return { 'attrs': [ [ 'TCA_HTB_PARMS', { 'buffer': burst, 'cbuffer': cburst, 'quantum': quantum, 'prio': prio, 'rate': rate, 'ceil': ceil, 'ceil_overhead': overhead, 'rate_overhead': overhead, 'rate_mpu': mpu, 'ceil_mpu': mpu, }, ], ['TCA_HTB_RTAB', True], ['TCA_HTB_CTAB', True], ] } def get_parameters(kwarg): rate2quantum = kwarg.get('r2q', 0xA) version = kwarg.get('version', 3) defcls = kwarg.get('default', 0x10) return { 'attrs': [ [ 'TCA_HTB_INIT', { 'debug': 0, 'defcls': defcls, 'direct_pkts': 0, 'rate2quantum': rate2quantum, 'version': version, }, ] ] } def fix_msg(msg, kwarg): if not kwarg: opts = get_parameters({}) msg['attrs'].append(['TCA_OPTIONS', opts]) # The tokens and ctokens are badly defined in the kernel structure # as unsigned int instead of signed int. (cf net/sched/sch_htb.c # in linux source) class stats(nla): fields = ( ('lends', 'I'), ('borrows', 'I'), ('giants', 'I'), ('tokens', 'i'), ('ctokens', 'i'), ) class qdisc_stats2(stats2): nla_map = ( ('TCA_STATS_UNSPEC', 'none'), ('TCA_STATS_BASIC', 'basic'), ('TCA_STATS_RATE_EST', 'rate_est'), ('TCA_STATS_QUEUE', 'queue'), ) class class_stats2(stats2): class stats_app(stats): pass def stats2(msg, *argv, **kwarg): if msg['header']['type'] in (RTM_NEWQDISC, RTM_DELQDISC): return qdisc_stats2 else: return class_stats2 class options(nla_plus_rtab): nla_map = ( ('TCA_HTB_UNSPEC', 'none'), ('TCA_HTB_PARMS', 'htb_parms'), ('TCA_HTB_INIT', 'htb_glob'), ('TCA_HTB_CTAB', 'ctab'), ('TCA_HTB_RTAB', 'rtab'), ) class htb_glob(nla): fields = ( ('version', 'I'), ('rate2quantum', 'I'), ('defcls', 'I'), ('debug', 'I'), ('direct_pkts', 'I'), ) class htb_parms(nla_plus_rtab.parms): fields = ( ('rate_cell_log', 'B'), ('rate___reserved', 'B'), ('rate_overhead', 'H'), ('rate_cell_align', 'h'), ('rate_mpu', 'H'), ('rate', 'I'), ('ceil_cell_log', 'B'), ('ceil___reserved', 'B'), ('ceil_overhead', 'H'), ('ceil_cell_align', 'h'), ('ceil_mpu', 'H'), ('ceil', 'I'), ('buffer', 'I'), ('cbuffer', 'I'), ('quantum', 'I'), ('level', 'I'), ('prio', 'I'), ) pyroute2-0.7.11/pyroute2/netlink/rtnl/tcmsg/sched_ingress.py000066400000000000000000000003261455030217500241170ustar00rootroot00000000000000from pyroute2.netlink import nla from pyroute2.netlink.rtnl import TC_H_INGRESS parent = TC_H_INGRESS def fix_msg(msg, kwarg): msg['handle'] = 0xFFFF0000 class options(nla): fields = (('value', 'I'),) pyroute2-0.7.11/pyroute2/netlink/rtnl/tcmsg/sched_netem.py000066400000000000000000000120261455030217500235550ustar00rootroot00000000000000from pyroute2.netlink import nla from pyroute2.netlink.rtnl import TC_H_ROOT from pyroute2.netlink.rtnl.tcmsg.common import get_rate, percent2u32, time2tick parent = TC_H_ROOT def get_parameters(kwarg): delay = time2tick(kwarg.get('delay', 0)) # in microsecond limit = kwarg.get('limit', 1000) # fifo limit (packets) see netem.c:230 loss = percent2u32(kwarg.get('loss', 0)) # int percentage gap = kwarg.get('gap', 0) duplicate = percent2u32(kwarg.get('duplicate', 0)) # int percentage jitter = time2tick(kwarg.get('jitter', 0)) # in microsecond opts = { 'delay': delay, 'limit': limit, 'loss': loss, 'gap': gap, 'duplicate': duplicate, 'jitter': jitter, 'attrs': [], } # correlation (delay, loss, duplicate) delay_corr = percent2u32(kwarg.get('delay_corr', 0)) loss_corr = percent2u32(kwarg.get('loss_corr', 0)) dup_corr = percent2u32(kwarg.get('dup_corr', 0)) if delay_corr or loss_corr or dup_corr: # delay_corr requires that both jitter and delay are != 0 if delay_corr and not (delay and jitter): raise Exception( 'delay correlation requires delay' ' and jitter to be set' ) # loss correlation and loss if loss_corr and not loss: raise Exception('loss correlation requires loss to be set') # duplicate correlation and duplicate if dup_corr and not duplicate: raise Exception( 'duplicate correlation requires ' 'duplicate to be set' ) opts['attrs'].append( [ 'TCA_NETEM_CORR', { 'delay_corr': delay_corr, 'loss_corr': loss_corr, 'dup_corr': dup_corr, }, ] ) # reorder (probability, correlation) prob_reorder = percent2u32(kwarg.get('prob_reorder', 0)) corr_reorder = percent2u32(kwarg.get('corr_reorder', 0)) if prob_reorder != 0: # gap defaults to 1 if equal to 0 if gap == 0: opts['gap'] = gap = 1 opts['attrs'].append( [ 'TCA_NETEM_REORDER', {'prob_reorder': prob_reorder, 'corr_reorder': corr_reorder}, ] ) else: if gap != 0: raise Exception('gap can only be set when prob_reorder is set') elif corr_reorder != 0: raise Exception( 'corr_reorder can only be set when ' 'prob_reorder is set' ) # corrupt (probability, correlation) prob_corrupt = percent2u32(kwarg.get('prob_corrupt', 0)) corr_corrupt = percent2u32(kwarg.get('corr_corrupt', 0)) if prob_corrupt: opts['attrs'].append( [ 'TCA_NETEM_CORRUPT', {'prob_corrupt': prob_corrupt, 'corr_corrupt': corr_corrupt}, ] ) elif corr_corrupt != 0: raise Exception( 'corr_corrupt can only be set when ' 'prob_corrupt is set' ) # rate (rate, packet_overhead, cell_size, cell_overhead) rate = get_rate(kwarg.get('rate', None)) packet_overhead = kwarg.get('packet_overhead', 0) cell_size = kwarg.get('cell_size', 0) cell_overhead = kwarg.get('cell_overhead', 0) if rate is not None: opts['attrs'].append( [ 'TCA_NETEM_RATE', { 'rate': rate, 'packet_overhead': packet_overhead, 'cell_size': cell_size, 'cell_overhead': cell_overhead, }, ] ) elif packet_overhead != 0 or cell_size != 0 or cell_overhead != 0: raise Exception( 'packet_overhead, cell_size and cell_overhead' 'can only be set when rate is set' ) # TODO # delay distribution (dist_size, dist_data) return opts class options(nla): nla_map = ( ('TCA_NETEM_UNSPEC', 'none'), ('TCA_NETEM_CORR', 'netem_corr'), ('TCA_NETEM_DELAY_DIST', 'none'), ('TCA_NETEM_REORDER', 'netem_reorder'), ('TCA_NETEM_CORRUPT', 'netem_corrupt'), ('TCA_NETEM_LOSS', 'none'), ('TCA_NETEM_RATE', 'netem_rate'), ) fields = ( ('delay', 'I'), ('limit', 'I'), ('loss', 'I'), ('gap', 'I'), ('duplicate', 'I'), ('jitter', 'I'), ) class netem_corr(nla): '''correlation''' fields = (('delay_corr', 'I'), ('loss_corr', 'I'), ('dup_corr', 'I')) class netem_reorder(nla): '''reorder has probability and correlation''' fields = (('prob_reorder', 'I'), ('corr_reorder', 'I')) class netem_corrupt(nla): '''corruption has probability and correlation''' fields = (('prob_corrupt', 'I'), ('corr_corrupt', 'I')) class netem_rate(nla): '''rate''' fields = ( ('rate', 'I'), ('packet_overhead', 'i'), ('cell_size', 'I'), ('cell_overhead', 'i'), ) pyroute2-0.7.11/pyroute2/netlink/rtnl/tcmsg/sched_pfifo.py000066400000000000000000000003041455030217500235440ustar00rootroot00000000000000from pyroute2.netlink import nla from pyroute2.netlink.rtnl import TC_H_ROOT parent = TC_H_ROOT class options(nla): fields = (('limit', 'i'),) def get_parameters(kwarg): return kwarg pyroute2-0.7.11/pyroute2/netlink/rtnl/tcmsg/sched_pfifo_fast.py000066400000000000000000000003271455030217500245660ustar00rootroot00000000000000from pyroute2.netlink import nla from pyroute2.netlink.rtnl import TC_H_ROOT parent = TC_H_ROOT class options(nla): fields = (('bands', 'i'), ('priomap', '16B')) def get_parameters(kwarg): return kwarg pyroute2-0.7.11/pyroute2/netlink/rtnl/tcmsg/sched_plug.py000066400000000000000000000006761455030217500234240ustar00rootroot00000000000000from pyroute2.netlink import nla from pyroute2.netlink.rtnl import TC_H_ROOT parent = TC_H_ROOT actions = { 'TCQ_PLUG_BUFFER': 0, 'TCQ_PLUG_RELEASE_ONE': 1, 'TCQ_PLUG_RELEASE_INDEFINITE': 2, 'TCQ_PLUG_LIMIT': 3, } def get_parameters(kwarg): return { 'action': actions.get(kwarg.get('action', 0), 0), 'limit': kwarg.get('limit', 0), } class options(nla): fields = (('action', 'i'), ('limit', 'I')) pyroute2-0.7.11/pyroute2/netlink/rtnl/tcmsg/sched_sfq.py000066400000000000000000000052351455030217500232420ustar00rootroot00000000000000from pyroute2.netlink import nla from pyroute2.netlink.rtnl import TC_H_ROOT from pyroute2.netlink.rtnl.tcmsg.common import ( get_size, red_eval_ewma, red_eval_P, ) parent = TC_H_ROOT TC_RED_ECN = 1 TC_RED_HARDDROP = 2 TC_RED_ADAPTATIVE = 4 def get_parameters(kwarg): kwarg['quantum'] = get_size(kwarg.get('quantum', 0)) kwarg['perturb_period'] = kwarg.get('perturb', 0) or kwarg.get( 'perturb_period', 0 ) limit = kwarg['limit'] = kwarg.get('limit', 0) or kwarg.get( 'redflowlimit', 0 ) qth_min = kwarg.get('min', 0) qth_max = kwarg.get('max', 0) avpkt = kwarg.get('avpkt', 1000) probability = kwarg.get('probability', 0.02) ecn = kwarg.get('ecn', False) harddrop = kwarg.get('harddrop', False) kwarg['flags'] = kwarg.get('flags', 0) if ecn: kwarg['flags'] |= TC_RED_ECN if harddrop: kwarg['flags'] |= TC_RED_HARDDROP if kwarg.get('redflowlimit'): qth_max = qth_max or limit / 4 qth_min = qth_min or qth_max / 3 kwarg['burst'] = kwarg['burst'] or (2 * qth_min + qth_max) / ( 3 * avpkt ) if limit <= qth_max: raise ValueError('limit must be > qth_max') if qth_max <= qth_min: raise ValueError('qth_max must be > qth_min') kwarg['qth_min'] = qth_min kwarg['qth_max'] = qth_max kwarg['Wlog'] = red_eval_ewma(qth_min, kwarg['burst'], avpkt) kwarg['Plog'] = red_eval_P(qth_min, qth_max, probability) if kwarg['Wlog'] < 0: raise ValueError('Wlog must be > 0') if kwarg['Plog'] < 0: raise ValueError('Plog must be > 0') kwarg['max_P'] = int(probability * pow(2, 23)) return kwarg class options_sfq_v0(nla): fields = ( ('quantum', 'I'), ('perturb_period', 'i'), ('limit', 'I'), ('divisor', 'I'), ('flows', 'I'), ) class options_sfq_v1(nla): fields = ( ('quantum', 'I'), ('perturb_period', 'i'), ('limit_v0', 'I'), ('divisor', 'I'), ('flows', 'I'), ('depth', 'I'), ('headdrop', 'I'), ('limit_v1', 'I'), ('qth_min', 'I'), ('qth_max', 'I'), ('Wlog', 'B'), ('Plog', 'B'), ('Scell_log', 'B'), ('flags', 'B'), ('max_P', 'I'), ('prob_drop', 'I'), ('forced_drop', 'I'), ('prob_mark', 'I'), ('forced_mark', 'I'), ('prob_mark_head', 'I'), ('forced_mark_head', 'I'), ) def options(*argv, **kwarg): if kwarg.get('length', 0) >= options_sfq_v1.get_size(): return options_sfq_v1 else: return options_sfq_v0 pyroute2-0.7.11/pyroute2/netlink/rtnl/tcmsg/sched_tbf.py000066400000000000000000000020771455030217500232250ustar00rootroot00000000000000from pyroute2.netlink.rtnl import TC_H_ROOT from pyroute2.netlink.rtnl.tcmsg.common import ( get_rate_parameters, nla_plus_rtab, ) parent = TC_H_ROOT def get_parameters(kwarg): parms = get_rate_parameters(kwarg) # fill parameters return {'attrs': [['TCA_TBF_PARMS', parms], ['TCA_TBF_RTAB', True]]} class options(nla_plus_rtab): nla_map = ( ('TCA_TBF_UNSPEC', 'none'), ('TCA_TBF_PARMS', 'tbf_parms'), ('TCA_TBF_RTAB', 'rtab'), ('TCA_TBF_PTAB', 'ptab'), ) class tbf_parms(nla_plus_rtab.parms): fields = ( ('rate_cell_log', 'B'), ('rate___reserved', 'B'), ('rate_overhead', 'H'), ('rate_cell_align', 'h'), ('rate_mpu', 'H'), ('rate', 'I'), ('peak_cell_log', 'B'), ('peak___reserved', 'B'), ('peak_overhead', 'H'), ('peak_cell_align', 'h'), ('peak_mpu', 'H'), ('peak', 'I'), ('limit', 'I'), ('buffer', 'I'), ('mtu', 'I'), ) pyroute2-0.7.11/pyroute2/netlink/rtnl/tcmsg/sched_template.py000066400000000000000000000023411455030217500242570ustar00rootroot00000000000000''' Template sched file. All the tcmsg plugins should be registered in `__init__.py`, see the `plugins` dict. All the methods, variables and classes are optional, but the naming scheme is fixed. ''' from pyroute2.netlink import nla from pyroute2.netlink.rtnl import TC_H_ROOT from pyroute2.netlink.rtnl.tcmsg import common # if you define the `parent` variable, it will be used # as the default parent value if no other value is # provided in the call options parent = TC_H_ROOT def fix_msg(kwarg, msg): ''' This method it called for all types -- classes, qdiscs and filters. Can be used to fix some `msg` fields. ''' pass def get_parameters(kwarg): ''' Called for qdiscs and filters. Should return the structure to be embedded as the qdisc parameters (`TCA_OPTIONS`). ''' return None def get_class_parameters(kwarg): ''' The same as above, but called only for classes. ''' return None class options(nla.hex): ''' The `TCA_OPTIONS` struct, by default not decoded. ''' pass class stats(nla.hex): ''' The struct to decode `TCA_XSTATS`. ''' pass class stats2(common.stats2): ''' The struct to decode `TCA_STATS2`. ''' pass pyroute2-0.7.11/pyroute2/netlink/taskstats/000077500000000000000000000000001455030217500206515ustar00rootroot00000000000000pyroute2-0.7.11/pyroute2/netlink/taskstats/__init__.py000066400000000000000000000113631455030217500227660ustar00rootroot00000000000000''' TaskStats module ================ All that you should know about TaskStats, is that you should not use it. But if you have to, ok:: import os from pyroute2 import TaskStats ts = TaskStats() ts.get_pid_stat(os.getpid()) It is not implemented normally yet, but some methods are already usable. ''' from pyroute2.netlink import NLM_F_REQUEST, genlmsg, nla, nla_struct from pyroute2.netlink.generic import GenericNetlinkSocket TASKSTATS_CMD_UNSPEC = 0 # Reserved TASKSTATS_CMD_GET = 1 # user->kernel request/get-response TASKSTATS_CMD_NEW = 2 class tcmd(genlmsg): nla_map = ( ('TASKSTATS_CMD_ATTR_UNSPEC', 'none'), ('TASKSTATS_CMD_ATTR_PID', 'uint32'), ('TASKSTATS_CMD_ATTR_TGID', 'uint32'), ('TASKSTATS_CMD_ATTR_REGISTER_CPUMASK', 'asciiz'), ('TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK', 'asciiz'), ) class tstats(nla_struct): fields = ( ('version', 'H'), # 2 ('ac_exitcode', 'I'), # 4 ('ac_flag', 'B'), # 1 ('ac_nice', 'B'), # 1 --- 10 ('cpu_count', 'Q'), # 8 ('cpu_delay_total', 'Q'), # 8 ('blkio_count', 'Q'), # 8 ('blkio_delay_total', 'Q'), # 8 ('swapin_count', 'Q'), # 8 ('swapin_delay_total', 'Q'), # 8 ('cpu_run_real_total', 'Q'), # 8 ('cpu_run_virtual_total', 'Q'), # 8 ('ac_comm', '32s'), # 32 +++ 112 ('ac_sched', 'B'), # 1 ('__ac_pad', '3x'), # 3 # (the ac_uid field is aligned(8), so we add more padding) ('__implicit_pad', '4x'), # 4 ('ac_uid', 'I'), # 4 +++ 120 ('ac_gid', 'I'), # 4 ('ac_pid', 'I'), # 4 ('ac_ppid', 'I'), # 4 ('ac_btime', 'I'), # 4 +++ 136 ('ac_etime', 'Q'), # 8 +++ 144 ('ac_utime', 'Q'), # 8 ('ac_stime', 'Q'), # 8 ('ac_minflt', 'Q'), # 8 ('ac_majflt', 'Q'), # 8 ('coremem', 'Q'), # 8 ('virtmem', 'Q'), # 8 ('hiwater_rss', 'Q'), # 8 ('hiwater_vm', 'Q'), # 8 ('read_char', 'Q'), # 8 ('write_char', 'Q'), # 8 ('read_syscalls', 'Q'), # 8 ('write_syscalls', 'Q'), # 8 ('read_bytes', 'Q'), # ... ('write_bytes', 'Q'), ('cancelled_write_bytes', 'Q'), ('nvcsw', 'Q'), ('nivcsw', 'Q'), ('ac_utimescaled', 'Q'), ('ac_stimescaled', 'Q'), ('cpu_scaled_run_real_total', 'Q'), ) def decode(self): nla_struct.decode(self) command = self['ac_comm'] if isinstance(command, bytes): command = command.decode('utf-8') self['ac_comm'] = command[: command.find('\0')] class taskstatsmsg(genlmsg): nla_map = ( ('TASKSTATS_TYPE_UNSPEC', 'none'), ('TASKSTATS_TYPE_PID', 'uint32'), ('TASKSTATS_TYPE_TGID', 'uint32'), ('TASKSTATS_TYPE_STATS', 'stats'), ('TASKSTATS_TYPE_AGGR_PID', 'aggr_pid'), ('TASKSTATS_TYPE_AGGR_TGID', 'aggr_tgid'), ) class stats(tstats): pass # FIXME: optimize me! class aggr_id(nla): nla_map = ( ('TASKSTATS_TYPE_UNSPEC', 'none'), ('TASKSTATS_TYPE_PID', 'uint32'), ('TASKSTATS_TYPE_TGID', 'uint32'), ('TASKSTATS_TYPE_STATS', 'stats'), ) class stats(tstats): pass class aggr_pid(aggr_id): pass class aggr_tgid(aggr_id): pass class TaskStats(GenericNetlinkSocket): def bind(self): GenericNetlinkSocket.bind(self, 'TASKSTATS', taskstatsmsg) def get_pid_stat(self, pid): ''' Get taskstats for a process. Pid should be an integer. ''' msg = tcmd() msg['cmd'] = TASKSTATS_CMD_GET msg['version'] = 1 msg['attrs'].append(['TASKSTATS_CMD_ATTR_PID', pid]) return self.nlm_request(msg, self.prid, msg_flags=NLM_F_REQUEST) def _register_mask(self, cmd, mask): msg = tcmd() msg['cmd'] = TASKSTATS_CMD_GET msg['version'] = 1 msg['attrs'].append([cmd, mask]) # there is no response to this request self.put(msg, self.prid, msg_flags=NLM_F_REQUEST) def register_mask(self, mask): ''' Start the accounting for a processors by a mask. Mask is a string, e.g.:: 0,1 -- first two CPUs 0-4,6-10 -- CPUs from 0 to 4 and from 6 to 10 Though the kernel has a procedure, that cleans up accounting, when it is not used, it is recommended to run deregister_mask() before process exit. ''' self._register_mask('TASKSTATS_CMD_ATTR_REGISTER_CPUMASK', mask) def deregister_mask(self, mask): ''' Stop the accounting. ''' self._register_mask('TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK', mask) pyroute2-0.7.11/pyroute2/netlink/uevent/000077500000000000000000000000001455030217500201365ustar00rootroot00000000000000pyroute2-0.7.11/pyroute2/netlink/uevent/__init__.py000066400000000000000000000021101455030217500222410ustar00rootroot00000000000000from pyroute2.netlink import NETLINK_KOBJECT_UEVENT, nlmsg from pyroute2.netlink.nlsocket import Marshal, NetlinkSocket class ueventmsg(nlmsg): pass class MarshalUevent(Marshal): def parse(self, data, seq=None, callback=None): ret = ueventmsg() ret['header']['sequence_number'] = 0 data = data.split(b'\x00') wtf = [] ret['header']['message'] = data[0].decode('utf-8') ret['header']['unparsed'] = b'' for line in data[1:]: if line.find(b'=') <= 0: wtf.append(line) else: if wtf: ret['header']['unparsed'] = b'\x00'.join(wtf) wtf = [] line = line.decode('utf-8').split('=') ret[line[0]] = '='.join(line[1:]) del ret['value'] return [ret] class UeventSocket(NetlinkSocket): def __init__(self): super(UeventSocket, self).__init__(NETLINK_KOBJECT_UEVENT) self.marshal = MarshalUevent() def bind(self): return super(UeventSocket, self).bind(groups=-1) pyroute2-0.7.11/pyroute2/netns/000077500000000000000000000000001455030217500163135ustar00rootroot00000000000000pyroute2-0.7.11/pyroute2/netns/__init__.py000066400000000000000000000247651455030217500204420ustar00rootroot00000000000000''' Basic network namespace management ================================== Pyroute2 provides basic namespaces management support. Here's a quick overview of typical netns tasks and related pyroute2 tools. Move an interface to a namespace -------------------------------- Though this task is managed not via `netns` module, it should be mentioned here as well. To move an interface to a netns, one should provide IFLA_NET_NS_FD nla in a set link RTNL request. The nla is an open FD number, that refers to already created netns. The pyroute2 library provides also a possibility to specify not a FD number, but a netns name as a string. In that case the library will try to lookup the corresponding netns in the standard location. Create veth and move the peer to a netns with IPRoute:: from pyroute2 import IPRoute ipr = IPRoute() ipr.link('add', ifname='v0p0', kind='veth', peer='v0p1') idx = ipr.link_lookup(ifname='v0p1')[0] ipr.link('set', index=idx, net_ns_fd='netns_name') Create veth and move the peer to a netns with IPDB:: from pyroute2 import IPDB ipdb = IPDB() ipdb.create(ifname='v0p0', kind='veth', peer='v0p1').commit() with ipdb.interfaces.v0p1 as i: i.net_ns_fd = 'netns_name' Manage interfaces within a netns -------------------------------- This task can be done with `NetNS` objects. A `NetNS` object spawns a child and runs it within a netns, providing the same API as `IPRoute` does:: from pyroute2 import NetNS ns = NetNS('netns_name') # do some stuff within the netns ns.close() One can even start `IPDB` on the top of `NetNS`:: from pyroute2 import NetNS from pyroute2 import IPDB ns = NetNS('netns_name') ipdb = IPDB(nl=ns) # do some stuff within the netns ipdb.release() ns.close() Spawn a process within a netns ------------------------------ For that purpose one can use `NSPopen` API. It works just as normal `Popen`, but starts a process within a netns. List, set, create, attach and remove netns ------------------------------------------ These functions are described below. To use them, import `netns` module:: from pyroute2 import netns netns.listnetns() Please be aware, that in order to run system calls the library uses `ctypes` module. It can fail on platforms where SELinux is enforced. If the Python interpreter, loading this module, dumps the core, one can check the SELinux state with `getenforce` command. ''' import ctypes import ctypes.util import errno import io import os import os.path import pickle import struct import traceback from pyroute2 import config from pyroute2.common import basestring try: file = file except NameError: file = io.IOBase # FIXME: arch reference __NR = { 'x86_': {'64bit': 308}, 'i386': {'32bit': 346}, 'i686': {'32bit': 346}, 'mips': {'32bit': 4344, '64bit': 5303}, # FIXME: NABI32? 'loon': {'64bit': 268}, 'armv': {'32bit': 375}, 'aarc': {'32bit': 375, '64bit': 268}, # FIXME: EABI vs. OABI? 'ppc6': {'64bit': 350}, 's390': {'64bit': 339}, 'loongarch64': {'64bit': 268}, 'risc': {'64bit': 268}, } __NR_setns = __NR.get(config.machine[:4], {}).get(config.arch, 308) CLONE_NEWNET = 0x40000000 MNT_DETACH = 0x00000002 MS_BIND = 4096 MS_REC = 16384 MS_SHARED = 1 << 20 NETNS_RUN_DIR = '/var/run/netns' __saved_ns = [] __libc = None def _get_netnspath(name): netnspath = name dirname = os.path.dirname(name) if not dirname: netnspath = '%s/%s' % (NETNS_RUN_DIR, name) if hasattr(netnspath, 'encode'): netnspath = netnspath.encode('ascii') return netnspath def _get_libc(libc=None): global __libc if libc is not None: return libc if __libc is None: __libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True) return __libc def listnetns(nspath=None): ''' List available network namespaces. ''' if nspath: nsdir = nspath else: nsdir = NETNS_RUN_DIR try: return os.listdir(nsdir) except FileNotFoundError: return [] def _get_ns_by_inode(nspath=NETNS_RUN_DIR): ''' Return a dict with inode as key and namespace name as value ''' ns_by_dev_inode = {} for ns_name in listnetns(nspath=nspath): ns_path = os.path.join(nspath, ns_name) try: st = os.stat(ns_path) except FileNotFoundError: # The path disappeared from the FS while listing, ignore it continue if st.st_dev not in ns_by_dev_inode: ns_by_dev_inode[st.st_dev] = {} ns_by_dev_inode[st.st_dev][st.st_ino] = ns_name return ns_by_dev_inode def ns_pids(nspath=NETNS_RUN_DIR): ''' List pids in all netns If a pid is in a unknown netns do not return it ''' result = {} ns_by_dev_inode = _get_ns_by_inode(nspath) for pid in os.listdir('/proc'): if not pid.isdigit(): continue try: st = os.stat(os.path.join('/proc', pid, 'ns', 'net')) except OSError as e: if e.errno in (errno.EACCES, errno.ENOENT): continue raise try: ns_name = ns_by_dev_inode[st.st_dev][st.st_ino] except KeyError: continue if ns_name not in result: result[ns_name] = [] result[ns_name].append(int(pid)) return result def pid_to_ns(pid=1, nspath=NETNS_RUN_DIR): ''' Return netns name which matches the given pid, None otherwise ''' try: st = os.stat(os.path.join('/proc', str(pid), 'ns', 'net')) ns_by_dev_inode = _get_ns_by_inode(nspath) return ns_by_dev_inode[st.st_dev][st.st_ino] except OSError as e: if e.errno in (errno.EACCES, errno.ENOENT): return None raise except KeyError: return None def _create(netns, libc=None, pid=None): libc = _get_libc(libc) netnspath = _get_netnspath(netns) netnsdir = os.path.dirname(netnspath) # init netnsdir try: os.mkdir(netnsdir) except OSError as e: if e.errno != errno.EEXIST: raise # this code is ported from iproute2 done = False while libc.mount(b'', netnsdir, b'none', MS_SHARED | MS_REC, None) != 0: if done: raise OSError(ctypes.get_errno(), 'share rundir failed', netns) if ( libc.mount(netnsdir, netnsdir, b'none', MS_BIND | MS_REC, None) != 0 ): raise OSError(ctypes.get_errno(), 'mount rundir failed', netns) done = True # create mountpoint os.close(os.open(netnspath, os.O_RDONLY | os.O_CREAT | os.O_EXCL, 0)) # unshare if pid is None: pid = 'self' if libc.unshare(CLONE_NEWNET) < 0: raise OSError(ctypes.get_errno(), 'unshare failed', netns) # bind the namespace if ( libc.mount( '/proc/{}/ns/net'.format(pid).encode('utf-8'), netnspath, b'none', MS_BIND, None, ) < 0 ): raise OSError(ctypes.get_errno(), 'mount failed', netns) def create(netns, libc=None): ''' Create a network namespace. ''' rctl, wctl = os.pipe() pid = os.fork() if pid == 0: # child error = None try: _create(netns, libc) except Exception as e: error = e error.tb = traceback.format_exc() msg = pickle.dumps(error) os.write(wctl, struct.pack('I', len(msg))) os.write(wctl, msg) os._exit(0) else: # parent msglen = struct.unpack('I', os.read(rctl, 4))[0] error = pickle.loads(os.read(rctl, msglen)) os.close(rctl) os.close(wctl) os.waitpid(pid, 0) if error is not None: raise error def attach(netns, pid, libc=None): ''' Attach the network namespace of the process `pid` to `netns` as if it were created with `create`. ''' _create(netns, libc, pid) def remove(netns, libc=None): ''' Remove a network namespace. ''' libc = _get_libc(libc) netnspath = _get_netnspath(netns) libc.umount2(netnspath, MNT_DETACH) os.unlink(netnspath) def setns(netns, flags=os.O_CREAT, libc=None): ''' Set netns for the current process. The flags semantics is the same as for the `open(2)` call: - O_CREAT -- create netns, if doesn't exist - O_CREAT | O_EXCL -- create only if doesn't exist Note that "main" netns has no name. But you can access it with:: setns('foo') # move to netns foo setns('/proc/1/ns/net') # go back to default netns See also `pushns()`/`popns()`/`dropns()` Changed in 0.5.1: the routine closes the ns fd if it's not provided via arguments. ''' newfd = False libc = _get_libc(libc) if isinstance(netns, basestring): netnspath = _get_netnspath(netns) if os.path.basename(netns) in listnetns(os.path.dirname(netns)): if flags & (os.O_CREAT | os.O_EXCL) == (os.O_CREAT | os.O_EXCL): raise OSError(errno.EEXIST, 'netns exists', netns) else: if flags & os.O_CREAT: create(netns, libc=libc) nsfd = os.open(netnspath, os.O_RDONLY) newfd = True elif isinstance(netns, file): nsfd = netns.fileno() elif isinstance(netns, int): nsfd = netns else: raise RuntimeError('netns should be a string or an open fd') error = libc.syscall(__NR_setns, nsfd, CLONE_NEWNET) if newfd: os.close(nsfd) if error != 0: raise OSError(ctypes.get_errno(), 'failed to open netns', netns) def pushns(newns=None, libc=None): ''' Save the current netns in order to return to it later. If newns is specified, change to it:: # --> the script in the "main" netns netns.pushns("test") # --> changed to "test", the "main" is saved netns.popns() # --> "test" is dropped, back to the "main" ''' global __saved_ns __saved_ns.append(os.open('/proc/self/ns/net', os.O_RDONLY)) if newns is not None: setns(newns, libc=libc) def popns(libc=None): ''' Restore the previously saved netns. ''' global __saved_ns fd = __saved_ns.pop() try: setns(fd, libc=libc) except Exception: __saved_ns.append(fd) raise os.close(fd) def dropns(libc=None): ''' Discard the last saved with `pushns()` namespace ''' global __saved_ns fd = __saved_ns.pop() try: os.close(fd) except Exception: pass pyroute2-0.7.11/pyroute2/netns/manager.py000066400000000000000000000073331455030217500203050ustar00rootroot00000000000000import errno from pyroute2 import netns from pyroute2.inotify.inotify_fd import Inotify from pyroute2.iproute.linux import IPRoute from pyroute2.netlink.exceptions import NetlinkError, SkipInode from pyroute2.netlink.rtnl import RTM_DELNETNS, RTM_NEWNETNS from pyroute2.netlink.rtnl.nsinfmsg import nsinfmsg class NetNSManager(Inotify): def __init__(self, libc=None, path=None, target='netns_manager'): path = set(path or []) super(NetNSManager, self).__init__(libc, path) if not self.path: for d in ['/var/run/netns', '/var/run/docker/netns']: try: self.register_path(d) except OSError: pass self.ipr = IPRoute(target=target) self.registry = {} self.update() self.target = target def update(self): self.ipr.netns_path = self.path for info in self.ipr.get_netns_info(): self.registry[info.get_attr('NSINFO_PATH')] = info def get(self): for msg in super(NetNSManager, self).get(): info = nsinfmsg() if msg is None: info['header']['error'] = NetlinkError(errno.ECONNRESET) info['header']['type'] = RTM_DELNETNS info['header']['target'] = self.target info['event'] = 'RTM_DELNETNS' yield info return path = '{path}/{name}'.format(**msg) info['header']['error'] = None info['header']['target'] = self.target if path not in self.registry: self.update() if path in self.registry: info.load(self.registry[path]) else: info['attrs'] = [('NSINFO_PATH', path)] del info['value'] if msg['mask'] & 0x200: info['header']['type'] = RTM_DELNETNS info['event'] = 'RTM_DELNETNS' elif not msg['mask'] & 0x100: continue yield info def close(self, code=None): self.ipr.close() super(NetNSManager, self).close() def create(self, path): netnspath = netns._get_netnspath(path) try: netns.create(netnspath, self.libc) except OSError as e: raise NetlinkError(e.errno) info = self.ipr._dump_one_ns(netnspath, set()) info['header']['type'] = RTM_NEWNETNS info['header']['target'] = self.target info['event'] = 'RTM_NEWNETNS' del info['value'] return (info,) def remove(self, path): netnspath = netns._get_netnspath(path) info = None try: info = self.ipr._dump_one_ns(netnspath, set()) except SkipInode as e: raise NetlinkError(e.code) info['header']['type'] = RTM_DELNETNS info['header']['target'] = self.target info['event'] = 'RTM_DELNETNS' del info['value'] try: netns.remove(netnspath, self.libc) except OSError as e: raise NetlinkError(e.errno) return (info,) def netns(self, cmd, *argv, **kwarg): path = kwarg.get('path', kwarg.get('NSINFO_PATH')) if path is None: raise ValueError('netns spec is required') netnspath = netns._get_netnspath(path) if cmd == 'add': return self.create(netnspath) elif cmd == 'del': return self.remove(netnspath) elif cmd not in ('get', 'set'): raise ValueError('method not supported') for item in self.dump(): if item.get_attr('NSINFO_PATH') == netnspath: return (item,) return () def dump(self, groups=None): return self.ipr.get_netns_info() pyroute2-0.7.11/pyroute2/nftables/000077500000000000000000000000001455030217500167625ustar00rootroot00000000000000pyroute2-0.7.11/pyroute2/nftables/__init__.py000066400000000000000000000000001455030217500210610ustar00rootroot00000000000000pyroute2-0.7.11/pyroute2/nftables/expressions.py000066400000000000000000000047421455030217500217250ustar00rootroot00000000000000import socket import struct from collections import OrderedDict ## # Utility functions ## def get_mask(addr): if not addr: return [None, None] ret = addr.split('/') if len(ret) == 2: return ret else: return [addr, None] ## # Expressions generators ## def genex(name, kwarg): exp_data = [] for key, value in kwarg.items(): exp_data.append(('NFTA_%s_%s' % (name.upper(), key.upper()), value)) return { 'attrs': [ ('NFTA_EXPR_NAME', name), ('NFTA_EXPR_DATA', {'attrs': exp_data}), ] } def verdict(code): kwarg = OrderedDict() kwarg['dreg'] = 0 # NFT_REG_VERDICT kwarg['data'] = { 'attrs': [ ('NFTA_DATA_VERDICT', {'attrs': [('NFTA_VERDICT_CODE', code)]}) ] } return [genex('immediate', kwarg)] def ipv4addr(src=None, dst=None): if not src and not dst: raise ValueError('must be at least one of src, dst') ret = [] # get masks src, src_mask = get_mask(src) dst, dst_mask = get_mask(dst) # load address(es) into NFT_REG_1 kwarg = OrderedDict() kwarg['dreg'] = 1 # save to NFT_REG_1 kwarg['base'] = 1 # NFT_PAYLOAD_NETWORK_HEADER kwarg['offset'] = 12 if src else 16 kwarg['len'] = 8 if (src and dst) else 4 ret.append(genex('payload', kwarg)) # run bitwise with masks -- if provided if src_mask or dst_mask: mask = b'' if src: if not src_mask: src_mask = '32' src_mask = int('1' * int(src_mask), 2) mask += struct.pack('I', src_mask) if dst: if not dst_mask: dst_mask = '32' dst_mask = int('1' * int(dst_mask), 2) mask += struct.pack('I', dst_mask) xor = '\x00' * len(mask) kwarg = OrderedDict() kwarg['sreg'] = 1 # read from NFT_REG_1 kwarg['dreg'] = 1 # save to NFT_REG_1 kwarg['len'] = 8 if (src and dst) else 4 kwarg['mask'] = {'attrs': [('NFTA_DATA_VALUE', mask)]} kwarg['xor'] = {'attrs': [('NFTA_DATA_VALUE', xor)]} ret.append(genex('bitwise', kwarg)) # run cmp packed = b'' if src: packed += socket.inet_aton(src) if dst: packed += socket.inet_aton(dst) kwarg = OrderedDict() kwarg['sreg'] = 1 # read from NFT_REG_1 kwarg['op'] = 0 # NFT_CMP_EQ kwarg['data'] = {'attrs': [('NFTA_DATA_VALUE', packed)]} ret.append(genex('cmp', kwarg)) return ret pyroute2-0.7.11/pyroute2/nftables/main.py000066400000000000000000000306501455030217500202640ustar00rootroot00000000000000''' ''' from pyroute2.netlink.nfnetlink import nfgen_msg from pyroute2.netlink.nfnetlink.nftsocket import ( DATA_TYPE_ID_TO_NAME, DATA_TYPE_NAME_TO_INFO, NFT_MSG_DELCHAIN, NFT_MSG_DELRULE, NFT_MSG_DELSET, NFT_MSG_DELSETELEM, NFT_MSG_DELTABLE, NFT_MSG_GETCHAIN, NFT_MSG_GETRULE, NFT_MSG_GETSET, NFT_MSG_GETSETELEM, NFT_MSG_GETTABLE, NFT_MSG_NEWCHAIN, NFT_MSG_NEWRULE, NFT_MSG_NEWSET, NFT_MSG_NEWSETELEM, NFT_MSG_NEWTABLE, NFTSocket, nft_chain_msg, nft_rule_msg, nft_set_elem_list_msg, nft_set_msg, nft_table_msg, ) class NFTSet: __slots__ = ('table', 'name', 'key_type', 'timeout', 'counter', 'comment') def __init__(self, table, name, **kwargs): self.table = table self.name = name for attrname in self.__slots__: if attrname in kwargs: setattr(self, attrname, kwargs[attrname]) elif attrname not in ("table", "name"): setattr(self, attrname, None) def as_netlink(self): attrs = {"NFTA_SET_TABLE": self.table, "NFTA_SET_NAME": self.name} set_flags = set() if self.key_type is not None: key_type, key_len, _ = DATA_TYPE_NAME_TO_INFO.get(self.key_type) attrs["NFTA_SET_KEY_TYPE"] = key_type attrs["NFTA_SET_KEY_LEN"] = key_len if self.timeout is not None: set_flags.add("NFT_SET_TIMEOUT") attrs["NFTA_SET_TIMEOUT"] = self.timeout if self.counter is True: attrs["NFTA_SET_EXPR"] = {'attrs': [('NFTA_EXPR_NAME', 'counter')]} if self.comment is not None: attrs["NFTA_SET_USERDATA"] = [ ("NFTNL_UDATA_SET_COMMENT", self.comment) ] # ID is used for bulk create, but not implemented attrs['NFTA_SET_ID'] = 1 attrs["NFTA_SET_FLAGS"] = set_flags return attrs @classmethod def from_netlink(cls, msg): data_type_name = DATA_TYPE_ID_TO_NAME.get( msg.get_attr("NFTA_SET_KEY_TYPE"), msg.get_attr("NFTA_SET_KEY_TYPE"), # fallback to raw value ) counter = False expr = msg.get_attr('NFTA_SET_EXPR') if expr: expr = expr.get_attrs('NFTA_EXPR_NAME') if expr and "counter" in expr: counter = True comment = None udata = msg.get_attr("NFTA_SET_USERDATA") if udata: for key, value in udata: if key == "NFTNL_UDATA_SET_COMMENT": comment = value break return cls( table=msg.get_attr('NFTA_SET_TABLE'), name=msg.get_attr('NFTA_SET_NAME'), key_type=data_type_name, timeout=msg.get_attr('NFTA_SET_TIMEOUT'), counter=counter, comment=comment, ) @classmethod def from_dict(cls, d): return cls( **{ name: value for name, value in d.items() if name in cls.__slots__ } ) def as_dict(self): return {name: getattr(self, name) for name in self.__slots__} def __repr__(self): return str(self.as_dict()) class NFTSetElem: __slots__ = ( 'value', 'timeout', 'expiration', 'counter_bytes', 'counter_packets', 'comment', ) def __init__(self, value, **kwargs): self.value = value for name in self.__slots__: if name in kwargs: setattr(self, name, kwargs[name]) elif name != "value": setattr(self, name, None) @classmethod def from_netlink(cls, msg, modifier): value = msg.get_attr('NFTA_SET_ELEM_KEY').get_attr("NFTA_DATA_VALUE") if modifier is not None: # Need to find a better way modifier.data = value modifier.length = 4 + len(modifier.data) modifier.decode() value = modifier.value kwarg = { "expiration": msg.get_attr('NFTA_SET_ELEM_EXPIRATION'), "timeout": msg.get_attr('NFTA_SET_ELEM_TIMEOUT'), } elem_expr = msg.get_attr('NFTA_SET_ELEM_EXPR') if elem_expr: if elem_expr.get_attr('NFTA_EXPR_NAME') == "counter": elem_expr = elem_expr.get_attr("NFTA_EXPR_DATA") kwarg.update( { "counter_bytes": elem_expr.get_attr( "NFTA_COUNTER_BYTES" ), "counter_packets": elem_expr.get_attr( "NFTA_COUNTER_PACKETS" ), } ) udata = msg.get_attr('NFTA_SET_ELEM_USERDATA') if udata: for type_name, data in udata: if type_name == "NFTNL_UDATA_SET_ELEM_COMMENT": kwarg["comment"] = data return cls(value=value, **kwarg) def as_netlink(self, modifier): if modifier is not None: modifier.value = self.value modifier.encode() value = modifier["value"] else: value = self.value attrs = [ ['NFTA_SET_ELEM_KEY', {'attrs': [('NFTA_DATA_VALUE', value)]}] ] if self.timeout is not None: attrs.append(['NFTA_SET_ELEM_TIMEOUT', self.timeout]) if self.expiration is not None: attrs.append(['NFTA_SET_ELEM_EXPIRATION', self.expiration]) if self.comment is not None: attrs.append( [ 'NFTA_SET_ELEM_USERDATA', [("NFTNL_UDATA_SET_ELEM_COMMENT", self.comment)], ] ) return {'attrs': attrs} @classmethod def from_dict(cls, d): return cls( **{ name: value for name, value in d.items() if name in cls.__slots__ } ) def as_dict(self): return {name: getattr(self, name) for name in self.__slots__} def __repr__(self): return str(self.as_dict()) class NFTables(NFTSocket): # TODO: documentation # TODO: tests # TODO: dump()/load() with support for json and xml def get_tables(self): return self.request_get(nfgen_msg(), NFT_MSG_GETTABLE) def get_chains(self): return self.request_get(nfgen_msg(), NFT_MSG_GETCHAIN) def get_rules(self): return self.request_get(nfgen_msg(), NFT_MSG_GETRULE) def get_sets(self): return self.request_get(nfgen_msg(), NFT_MSG_GETSET) # # The nft API is in the prototype stage and may be # changed until the release. The planned release for # the API is 0.5.2 # def table(self, cmd, **kwarg): ''' Example:: nft.table('add', name='test0') ''' commands = { 'add': NFT_MSG_NEWTABLE, 'create': NFT_MSG_NEWTABLE, 'del': NFT_MSG_DELTABLE, 'get': NFT_MSG_GETTABLE, } return self._command(nft_table_msg, commands, cmd, kwarg) def chain(self, cmd, **kwarg): ''' Example:: # # default policy 'drop' for input # nft.chain('add', table='test0', name='test_chain0', hook='input', type='filter', policy=0) ''' commands = { 'add': NFT_MSG_NEWCHAIN, 'create': NFT_MSG_NEWCHAIN, 'del': NFT_MSG_DELCHAIN, 'get': NFT_MSG_GETCHAIN, } # TODO: What about 'ingress' (netdev family)? hooks = { 'prerouting': 0, 'input': 1, 'forward': 2, 'output': 3, 'postrouting': 4, } if 'hook' in kwarg: kwarg['hook'] = { 'attrs': [ ['NFTA_HOOK_HOOKNUM', hooks[kwarg['hook']]], ['NFTA_HOOK_PRIORITY', kwarg.pop('priority', 0)], ] } if 'type' not in kwarg: kwarg['type'] = 'filter' return self._command(nft_chain_msg, commands, cmd, kwarg) def rule(self, cmd, **kwarg): ''' Example:: from pyroute2.nftables.expressions import ipv4addr, verdict # # allow all traffic from 192.168.0.0/24 # nft.rule('add', table='test0', chain='test_chain0', expressions=(ipv4addr(src='192.168.0.0/24'), verdict(code=1))) ''' commands = { 'add': NFT_MSG_NEWRULE, 'create': NFT_MSG_NEWRULE, 'insert': NFT_MSG_NEWRULE, 'replace': NFT_MSG_NEWRULE, 'del': NFT_MSG_DELRULE, 'get': NFT_MSG_GETRULE, } if 'expressions' in kwarg: expressions = [] for exp in kwarg['expressions']: expressions.extend(exp) kwarg['expressions'] = expressions return self._command(nft_rule_msg, commands, cmd, kwarg) def sets(self, cmd, **kwarg): ''' Example:: nft.sets("add", table="filter", name="test0", key_type="ipv4_addr", timeout=10000, counter=True, comment="my comment max 252 bytes") nft.sets("get", table="filter", name="test0") nft.sets("del", table="filter", name="test0") my_set = nft.sets("add", set=NFTSet(table="filter", name="test1", key_type="ipv4_addr") nft.sets("del", set=my_set) ''' commands = { 'add': NFT_MSG_NEWSET, 'get': NFT_MSG_GETSET, 'del': NFT_MSG_DELSET, } if "set" in kwarg: nft_set = kwarg.pop("set") else: nft_set = NFTSet(**kwarg) kwarg = nft_set.as_netlink() msg = self._command(nft_set_msg, commands, cmd, kwarg) if cmd == "get": return NFTSet.from_netlink(msg) return nft_set def set_elems(self, cmd, **kwarg): ''' Example:: nft.set_elems("add", table="filter", set="test0", elements={"10.2.3.4", "10.4.3.2"}) nft.set_elems("add", set=NFTSet(table="filter", name="test0"), elements=[{"value": "10.2.3.4", "timeout": 10000}]) nft.set_elems("add", table="filter", set="test0", elements=[NFTSetElem(value="10.2.3.4", timeout=10000, comment="hello world")]) nft.set_elems("get", table="filter", set="test0") nft.set_elems("del", table="filter", set="test0", elements=["10.2.3.4"]) ''' commands = { 'add': NFT_MSG_NEWSETELEM, 'get': NFT_MSG_GETSETELEM, 'del': NFT_MSG_DELSETELEM, } if isinstance(kwarg["set"], NFTSet): nft_set = kwarg.pop("set") kwarg["table"] = nft_set.table kwarg["set"] = nft_set.name else: nft_set = self.sets("get", table=kwarg["table"], name=kwarg["set"]) found = DATA_TYPE_NAME_TO_INFO.get(nft_set.key_type) if found: _, _, modifier = found modifier = modifier() modifier.header = None else: modifier = None if cmd == "get": msg = nft_set_elem_list_msg() msg['attrs'] = [ ["NFTA_SET_ELEM_LIST_TABLE", kwarg["table"]], ["NFTA_SET_ELEM_LIST_SET", kwarg["set"]], ] msg = self.request_get(msg, NFT_MSG_GETSETELEM)[0] elements = set() for elem in msg.get_attr('NFTA_SET_ELEM_LIST_ELEMENTS'): elements.add(NFTSetElem.from_netlink(elem, modifier)) return elements elements = [] for elem in kwarg.pop("elements"): if isinstance(elem, dict): elem = NFTSetElem.from_dict(elem) elif not isinstance(elem, NFTSetElem): elem = NFTSetElem(value=elem) elements.append(elem.as_netlink(modifier)) kwarg["elements"] = elements return self._command(nft_set_elem_list_msg, commands, cmd, kwarg) pyroute2-0.7.11/pyroute2/nftables/parser/000077500000000000000000000000001455030217500202565ustar00rootroot00000000000000pyroute2-0.7.11/pyroute2/nftables/parser/__init__.py000066400000000000000000000000001455030217500223550ustar00rootroot00000000000000pyroute2-0.7.11/pyroute2/nftables/parser/expr.py000066400000000000000000000244421455030217500216140ustar00rootroot00000000000000""" nf_tables expression netlink attributes See EXPRESSIONS in nft(8). """ from socket import AF_INET, AF_INET6 from pyroute2.nftables.parser.parser import conv_map_tuple, nfta_nla_parser class NFTReg(object): def __init__(self, num): self.num = num @classmethod def from_netlink(cls, nlval): # please, for more information read nf_tables.h. if nlval == 'NFT_REG_VERDICT': num = 0 else: num = int(nlval.split('_')[-1].lower()) if nlval.startswith('NFT_REG32_'): num += 8 return cls(num=num) @staticmethod def to_netlink(reg): # please, for more information read nf_tables.h. if reg.num == 0: return 'NFT_REG_VERDICT' if reg.num < 8: return 'NFT_REG_{0}'.format(reg.num) return 'NFT_REG32_{0}'.format(reg.num) @classmethod def from_dict(cls, val): return cls(num=val) def to_dict(self): return self.num class NFTVerdict(object): def __init__(self, verdict, chain): self.verdict = verdict self.chain = chain @classmethod def from_netlink(cls, ndmsg): if ndmsg.get_attr('NFTA_VERDICT_CODE') is not None: verdict = ( ndmsg.get_attr('NFTA_VERDICT_CODE').split('_')[-1].lower() ) else: verdict = None chain = ndmsg.get_attr('NFTA_VERDICT_CHAIN') return cls(verdict=verdict, chain=chain) def to_netlink(self): attrs = [('NFTA_VERDICT_CODE', 'NF_' + self.verdict)] if self.chain is not None: attrs.append(('NFTA_VERDICT_CHAIN', self.chain)) return attrs @classmethod def from_dict(cls, d): return cls(verdict=d['verdict'], chain=d.get('chain', None)) def to_dict(self): d = {'verdict': self.verdict} if self.chain is not None: d['chain'] = self.chain return d class NFTData(object): def __init__(self, data_type, data): self.type = data_type self.data = data def to_netlink(self): if self.type == 'value': return ('NFTA_DATA_VALUE', self.data) if self.type == 'verdict': return ('NFTA_DATA_VERDICT', self.data) raise NotImplementedError(self.type) @classmethod def from_netlink(cls, ndmsg): if ndmsg.get_attr('NFTA_DATA_VALUE') is not None: kwargs = { 'data_type': 'value', 'data': ndmsg.get_attr('NFTA_DATA_VALUE'), } elif ndmsg.get_attr('NFTA_DATA_VERDICT') is not None: kwargs = { 'data_type': 'verdict', 'data': NFTVerdict.from_netlink( ndmsg.get_attr('NFTA_DATA_VERDICT') ), } else: raise NotImplementedError(ndmsg) return cls(**kwargs) @classmethod def from_dict(cls, data): def from_32hex(val): return bytes(bytearray.fromhex(val[2:]))[::-1] kwargs = {} data = data["reg"] if data['type'] == 'value': value = bytes() for i in range(0, data['len'], 4): value += from_32hex(data['data{0}'.format(i / 4)]) kwargs['data'] = value elif data['type'] == 'verdict': kwargs['data'] = NFTVerdict.from_dict(data) else: raise NotImplementedError() kwargs['data_type'] = data['type'] return cls(**kwargs) def to_dict(self): def to_32hex(s): res = ''.join('{:02x}'.format(c) for c in bytearray(s)[::-1]) return '0x' + res.zfill(8) if self.type == 'value': len_data = len(self.data) d = {'type': 'value', 'len': len_data} for i in range(0, len_data, 4): d['data{0}'.format(i / 4)] = to_32hex(self.data[i : i + 4]) elif self.type == 'verdict': d = self.data.to_dict() d['type'] = 'verdict' else: raise NotImplementedError() return {"reg": d} class NFTRuleExpr(nfta_nla_parser): ####################################################################### conv_maps = (conv_map_tuple('name', 'NFTA_EXPR_NAME', 'type', 'raw'),) ####################################################################### @classmethod def from_netlink(cls, expr_type, ndmsg): inst = super(NFTRuleExpr, cls).from_netlink(ndmsg) inst.name = expr_type return inst cparser_reg = NFTReg cparser_data = NFTData class cparser_extract_str(object): STRVAL = None @classmethod def from_netlink(cls, val): magic = '{0}' left, right = cls.STRVAL.split(magic, 1) if right: val = val[len(left) : -len(right)] else: val = val[len(left) :] return val.lower() @classmethod def to_netlink(cls, val): return cls.STRVAL.format(val).upper() @staticmethod def from_dict(val): return val @staticmethod def to_dict(val): return val class cparser_inet_family(object): @staticmethod def from_netlink(val): if val == AF_INET: return 'ip' if val == AF_INET6: return 'ip6' return val @staticmethod def to_netlink(val): if val == 'ip': return AF_INET if val == 'ip6': return AF_INET6 return val @staticmethod def from_dict(val): return val @staticmethod def to_dict(val): return val class ExprMeta(NFTRuleExpr): conv_maps = NFTRuleExpr.conv_maps + ( conv_map_tuple('key', 'NFTA_META_KEY', 'key', 'meta_key'), conv_map_tuple('dreg', 'NFTA_META_DREG', 'dreg', 'reg'), ) class cparser_meta_key(NFTRuleExpr.cparser_extract_str): STRVAL = 'NFT_META_{0}' class ExprCmp(NFTRuleExpr): conv_maps = NFTRuleExpr.conv_maps + ( conv_map_tuple('sreg', 'NFTA_CMP_SREG', 'sreg', 'reg'), conv_map_tuple('op', 'NFTA_CMP_OP', 'op', 'cmp_op'), conv_map_tuple('data', 'NFTA_CMP_DATA', 'data', 'data'), ) class cparser_cmp_op(NFTRuleExpr.cparser_extract_str): STRVAL = 'NFT_CMP_{0}' class ExprImmediate(NFTRuleExpr): conv_maps = NFTRuleExpr.conv_maps + ( conv_map_tuple('dreg', 'NFTA_IMMEDIATE_DREG', 'dreg', 'reg'), conv_map_tuple('data', 'NFTA_IMMEDIATE_DATA', 'data', 'data'), ) class ExprPayload(NFTRuleExpr): conv_maps = NFTRuleExpr.conv_maps + ( conv_map_tuple('dreg', 'NFTA_PAYLOAD_DREG', 'dreg', 'reg'), conv_map_tuple('base', 'NFTA_PAYLOAD_BASE', 'base', 'payload_base'), conv_map_tuple('offset', 'NFTA_PAYLOAD_OFFSET', 'offset', 'raw'), conv_map_tuple('len', 'NFTA_PAYLOAD_LEN', 'len', 'raw'), ) class cparser_payload_base(NFTRuleExpr.cparser_extract_str): STRVAL = 'NFT_PAYLOAD_{0}_HEADER' @classmethod def from_netlink(cls, ndmsg): val = super(ExprPayload.cparser_payload_base, cls).from_netlink( ndmsg ) if val == 'll': return 'link' return val @classmethod def to_netlink(cls, val): if val == 'link': val = 'll' return super(ExprPayload.cparser_payload_base, cls).to_netlink(val) @staticmethod def from_dict(val): return val @staticmethod def to_dict(val): return val class ExprLookup(NFTRuleExpr): conv_maps = NFTRuleExpr.conv_maps + ( conv_map_tuple('setid', 'NFTA_LOOKUP_SET', 'set', 'raw'), conv_map_tuple('sreg', 'NFTA_LOOKUP_SREG', 'sreg', 'reg'), conv_map_tuple('flags', 'NFTA_LOOKUP_FLAGS', 'flags', 'raw'), ) class ExprNat(NFTRuleExpr): conv_maps = NFTRuleExpr.conv_maps + ( conv_map_tuple('nat_type', 'NFTA_NAT_TYPE', 'nat_type', 'nat_type'), conv_map_tuple('family', 'NFTA_NAT_FAMILY', 'family', 'inet_family'), conv_map_tuple( 'sreg_addr_min', 'NFTA_NAT_REG_ADDR_MIN', 'sreg_addr_min', 'reg' ), conv_map_tuple( 'sreg_addr_max', 'NFTA_NAT_REG_ADDR_MAX', 'sreg_addr_max', 'reg' ), ) class cparser_nat_type(NFTRuleExpr.cparser_extract_str): STRVAL = 'NFT_NAT_{0}' class ExprBitwise(NFTRuleExpr): conv_maps = NFTRuleExpr.conv_maps + ( conv_map_tuple('sreg', 'NFTA_BITWISE_SREG', 'sreg', 'reg'), conv_map_tuple('dreg', 'NFTA_BITWISE_DREG', 'dreg', 'reg'), conv_map_tuple('len', 'NFTA_BITWISE_LEN', 'len', 'raw'), conv_map_tuple('mask', 'NFTA_BITWISE_MASK', 'mask', 'data'), conv_map_tuple('xor', 'NFTA_BITWISE_XOR', 'xor', 'data'), ) class ExprCounter(NFTRuleExpr): conv_maps = NFTRuleExpr.conv_maps + ( conv_map_tuple('bytes', 'NFTA_COUNTER_BYTES', 'bytes', 'raw'), conv_map_tuple('packets', 'NFTA_COUNTER_PACKETS', 'pkts', 'raw'), ) class ExprMatch(NFTRuleExpr): conv_maps = NFTRuleExpr.conv_maps + ( conv_map_tuple('match_name', 'NFTA_MATCH_NAME', 'name', 'raw'), ) class ExprTarget(NFTRuleExpr): conv_maps = NFTRuleExpr.conv_maps + ( conv_map_tuple('target_name', 'NFTA_TARGET_NAME', 'name', 'raw'), ) NFTA_EXPR_NAME_MAP = { 'meta': ExprMeta, 'cmp': ExprCmp, 'immediate': ExprImmediate, 'payload': ExprPayload, 'lookup': ExprLookup, 'nat': ExprNat, 'bitwise': ExprBitwise, 'counter': ExprCounter, 'match': ExprMatch, 'target': ExprTarget, } def get_expression_from_netlink(ndmsg): name = ndmsg.get_attr('NFTA_EXPR_NAME') try: expr_cls = NFTA_EXPR_NAME_MAP[name] except KeyError: raise NotImplementedError( "can't load rule expression {0} from netlink {1}".format( name, ndmsg ) ) return expr_cls.from_netlink(name, ndmsg.get_attr('NFTA_EXPR_DATA')) def get_expression_from_dict(d): name = d['type'] if name in NFTA_EXPR_NAME_MAP: expr_cls = NFTA_EXPR_NAME_MAP[name] else: raise NotImplementedError( "can't load rule expression {0} from json {1}".format(name, d) ) return expr_cls.from_dict(d) pyroute2-0.7.11/pyroute2/nftables/parser/parser.py000066400000000000000000000045321455030217500221300ustar00rootroot00000000000000from collections import namedtuple conv_map_tuple = namedtuple( 'conv_map_tuple', 'has_attr has_netlink has_dict parser_cls' ) class nfta_nla_parser(object): conv_maps = () def __init__(self, **kwargs): for c in self.conv_maps: setattr(self, c.has_attr, kwargs[c.has_attr]) def __repr__(self): s = '' for c in self.conv_maps: s += 'c={0}, VALUE={1}\n'.format(c, getattr(self, c.has_attr)) return s @classmethod def from_netlink(cls, ndmsg): kwargs = {} for c in cls.conv_maps: if c.has_netlink is None: continue p = getattr(cls, 'cparser_' + c.parser_cls) nl_val = ndmsg.get_attr(c.has_netlink) if nl_val is None: kwargs[c.has_attr] = None else: kwargs[c.has_attr] = p.from_netlink( ndmsg.get_attr(c.has_netlink) ) return cls(**kwargs) def to_netlink(self): nla = {'attrs': []} for c in self.conv_maps: val = getattr(self, c.has_attr) if val is None: continue nla['attrs'].append( ( c.has_netlink, getattr(self, 'cparser_' + c.parser_cls).to_netlink(val), ) ) return nla @classmethod def from_dict(cls, d): kwargs = {} for c in cls.conv_maps: if c.has_dict in d: kwargs[c.has_attr] = getattr( cls, 'cparser_' + c.parser_cls ).from_dict(d[c.has_dict]) else: kwargs[c.has_attr] = None return cls(**kwargs) def to_dict(self): d = {} for c in self.conv_maps: val = getattr(self, c.has_attr) if val is not None: val = getattr(self, 'cparser_' + c.parser_cls).to_dict(val) if val is not None: d[c.has_dict] = val return d class cparser_raw(object): @staticmethod def from_netlink(val): return val @staticmethod def to_netlink(val): return val @staticmethod def from_dict(val): return val @staticmethod def to_dict(val): return val pyroute2-0.7.11/pyroute2/nftables/rule.py000066400000000000000000000074641455030217500203160ustar00rootroot00000000000000from pyroute2.nftables.parser.expr import ( get_expression_from_dict, get_expression_from_netlink, ) from pyroute2.nftables.parser.parser import conv_map_tuple, nfta_nla_parser NAME_2_NFPROTO = { "unspec": 0, "inet": 1, "ipv4": 2, "arp": 3, "netdev": 5, "bridge": 7, "ipv6": 10, "decnet": 12, } NFPROTO_2_NAME = {v: k for k, v in NAME_2_NFPROTO.items()} class NFTRule(nfta_nla_parser): conv_maps = ( conv_map_tuple('family', 'nfgen_family', 'family', 'nfproto'), conv_map_tuple('table', 'NFTA_RULE_TABLE', 'table', 'raw'), conv_map_tuple('chain', 'NFTA_RULE_CHAIN', 'chain', 'raw'), conv_map_tuple('handle', 'NFTA_RULE_HANDLE', 'handle', 'raw'), conv_map_tuple( 'expressions', 'NFTA_RULE_EXPRESSIONS', 'expr', 'expressions_list' ), conv_map_tuple('compat', 'NFTA_RULE_COMPAT', 'compat', 'raw'), conv_map_tuple('position', 'NFTA_RULE_POSITION', 'position', 'raw'), conv_map_tuple( 'userdata', 'NFTA_RULE_USERDATA', 'userdata', 'user_data' ), conv_map_tuple('rule_id', 'NFTA_RULE_ID', 'rule_id', 'raw'), conv_map_tuple( 'position_id', 'NFTA_RULE_POSITION_ID', 'position_id', 'raw' ), ) @classmethod def from_netlink(cls, ndmsg): obj = super(NFTRule, cls).from_netlink(ndmsg) obj.family = cls.cparser_nfproto.from_netlink(ndmsg['nfgen_family']) return obj class cparser_user_data(object): def __init__(self, udata_type, value): self.type = udata_type self.value = value @classmethod def from_netlink(cls, userdata): userdata = [int(d, 16) for d in userdata.split(':')] udata_type = userdata[0] udata_len = userdata[1] udata_value = ''.join( [chr(d) for d in userdata[2 : udata_len + 2]] ) if udata_type == 0: # 0 == COMMENT return cls('comment', udata_value) raise NotImplementedError("userdata type: {0}".format(udata_type)) @staticmethod def to_netlink(udata): if udata.type == 'comment': userdata = '00:' else: raise NotImplementedError( "userdata type: {0}".format(udata.type) ) userdata += "%0.2X:" % len(udata.value) userdata += ':'.join(["%0.2X" % ord(d) for d in udata.value]) return userdata @staticmethod def to_dict(udata): # Currently nft command to not export userdata to dict return None if udata.type == "comment": return {"type": "comment", "value": udata.value} raise NotImplementedError("userdata type: {0}".format(udata.type)) @classmethod def from_dict(cls, d): # See to_dict() method return None class cparser_expressions_list(object): @staticmethod def from_netlink(expressions): return [get_expression_from_netlink(e) for e in expressions] @staticmethod def to_netlink(expressions): return [e.to_netlink() for e in expressions] @staticmethod def from_dict(expressions): return [get_expression_from_dict(e) for e in expressions] @staticmethod def to_dict(expressions): return [e.to_dict() for e in expressions] class cparser_nfproto(object): @staticmethod def from_netlink(val): return NFPROTO_2_NAME[val] @staticmethod def to_netlink(val): return NAME_2_NFPROTO[val] @staticmethod def from_dict(val): return val @staticmethod def to_dict(val): return val pyroute2-0.7.11/pyroute2/nslink/000077500000000000000000000000001455030217500164625ustar00rootroot00000000000000pyroute2-0.7.11/pyroute2/nslink/__init__.py000066400000000000000000000000001455030217500205610ustar00rootroot00000000000000pyroute2-0.7.11/pyroute2/nslink/nslink.py000066400000000000000000000152011455030217500203310ustar00rootroot00000000000000''' NetNS objects ============= A NetNS object is IPRoute-like. It runs in the main network namespace, but also creates a proxy process running in the required netns. All the netlink requests are done via that proxy process. NetNS supports standard IPRoute API, so can be used instead of IPRoute, e.g., in IPDB:: # start the main network settings database: ipdb_main = IPDB() # start the same for a netns: ipdb_test = IPDB(nl=NetNS('test')) # create VETH ipdb_main.create(ifname='v0p0', kind='veth', peer='v0p1').commit() # move peer VETH into the netns with ipdb_main.interfaces.v0p1 as veth: veth.net_ns_fd = 'test' # please keep in mind, that netns move clears all the settings # on a VETH interface pair, so one should run netns assignment # as a separate operation only # assign addresses # please notice, that `v0p1` is already in the `test` netns, # so should be accessed via `ipdb_test` with ipdb_main.interfaces.v0p0 as veth: veth.add_ip('172.16.200.1/24') veth.up() with ipdb_test.interfaces.v0p1 as veth: veth.add_ip('172.16.200.2/24') veth.up() Please review also the test code, under `tests/test_netns.py` for more examples. By default, NetNS creates requested netns, if it doesn't exist, or uses existing one. To control this behaviour, one can use flags as for `open(2)` system call:: # create a new netns or fail, if it already exists netns = NetNS('test', flags=os.O_CREAT | os.O_EXCL) # create a new netns or use existing one netns = NetNS('test', flags=os.O_CREAT) # the same as above, the default behaviour netns = NetNS('test') To remove a network namespace:: from pyroute2 import NetNS netns = NetNS('test') netns.close() netns.remove() One should stop it first with `close()`, and only after that run `remove()`. ''' import atexit import errno import logging import os from functools import partial from pyroute2.iproute import RTNL_API from pyroute2.netlink.rtnl import RTMGRP_DEFAULTS from pyroute2.netlink.rtnl.iprsocket import MarshalRtnl from pyroute2.netns import remove, setns from ..remote.transport import RemoteSocket, Server, Transport log = logging.getLogger(__name__) class FD(object): def __init__(self, fd): self.fd = fd for name in ('read', 'write', 'close'): setattr(self, name, partial(getattr(os, name), self.fd)) def fileno(self): return self.fd def flush(self): return None class NetNS(RTNL_API, RemoteSocket): ''' NetNS is the IPRoute API with network namespace support. **Why not IPRoute?** The task to run netlink commands in some network namespace, being in another network namespace, requires the architecture, that differs too much from a simple Netlink socket. NetNS starts a proxy process in a network namespace and uses `multiprocessing` communication channels between the main and the proxy processes to route all `recv()` and `sendto()` requests/responses. **Any specific API calls?** Nope. `NetNS` supports all the same, that `IPRoute` does, in the same way. It provides full `socket`-compatible API and can be used in poll/select as well. The only difference is the `close()` call. In the case of `NetNS` it is **mandatory** to close the socket before exit. ''' def __init__( self, netns, flags=os.O_CREAT, target=None, libc=None, groups=RTMGRP_DEFAULTS, ): self.netns = netns self.flags = flags target = target or netns trnsp_in, self.remote_trnsp_out = [Transport(FD(x)) for x in os.pipe()] self.remote_trnsp_in, trnsp_out = [Transport(FD(x)) for x in os.pipe()] self.child = os.fork() if self.child == 0: # child process trnsp_in.close() trnsp_out.close() trnsp_in.file_obj.close() trnsp_out.file_obj.close() try: setns(self.netns, self.flags, libc=libc) except OSError as e: (self.remote_trnsp_out.send({'stage': 'init', 'error': e})) os._exit(e.errno) except Exception as e: ( self.remote_trnsp_out.send( { 'stage': 'init', 'error': OSError(errno.ECOMM, str(e), self.netns), } ) ) os._exit(255) try: Server( self.remote_trnsp_in, self.remote_trnsp_out, target=target, groups=groups, ) finally: os._exit(0) try: self.remote_trnsp_in.close() self.remote_trnsp_out.close() super(NetNS, self).__init__(trnsp_in, trnsp_out, groups=groups) self.target = target except Exception: self.close() raise atexit.register(self.close) self.marshal = MarshalRtnl() def clone(self): return type(self)(self.netns, self.flags) def _cleanup_atexit(self): if hasattr(atexit, 'unregister'): atexit.unregister(self.close) else: try: atexit._exithandlers.remove((self.close, (), {})) except ValueError: pass def close(self, code=errno.ECONNRESET): self._cleanup_atexit() try: super(NetNS, self).close(code=code) except: # something went wrong, force server shutdown try: self.trnsp_out.send({'stage': 'shutdown'}) except Exception: pass log.error('forced shutdown procedure, clean up netns manually') def open_file(self, path): '''Proxy the open_file method if we are the parent.''' if self.child != 0: return self.proxy('open_file', path) return super(NetNS, self).open_file(path) def close_file(self, fd): '''Proxy the close_file method if we are the parent.''' if self.child != 0: return self.proxy('close_file', fd) return super(NetNS, self).close_file(fd) def get_pid(self): '''Proxy the get_pid method if we are the parent.''' if self.child != 0: return self.proxy('get_pid') return super(NetNS, self).get_pid() def post_init(self): pass def remove(self): ''' Try to remove this network namespace from the system. ''' remove(self.netns) pyroute2-0.7.11/pyroute2/nslink/nspopen.py000066400000000000000000000262011455030217500205170ustar00rootroot00000000000000''' NSPopen ======= The `NSPopen` class has nothing to do with netlink at all, but it is required to have a reasonable network namespace support. ''' import atexit import fcntl import subprocess import sys import threading import types from pyroute2 import config from pyroute2.common import file, metaclass from pyroute2.netns import setns def _handle(result): if result['code'] == 500: raise result['data'] elif result['code'] == 200: return result['data'] else: raise TypeError('unsupported return code') def _make_fcntl(prime, target): def func(*argv, **kwarg): return target(prime.fileno(), *argv, **kwarg) return func def _make_func(target): def func(*argv, **kwarg): return target(*argv, **kwarg) return func def _make_property(name): def func(self): return getattr(self.prime, name) return property(func) def _map_api(api, obj): for attr_name in dir(obj): attr = getattr(obj, attr_name) api[attr_name] = {'api': None} api[attr_name]['callable'] = hasattr(attr, '__call__') api[attr_name]['doc'] = ( attr.__doc__ if hasattr(attr, '__doc__') else None ) class MetaPopen(type): ''' API definition for NSPopen. All this stuff is required to make `help()` function happy. ''' def __init__(cls, *argv, **kwarg): super(MetaPopen, cls).__init__(*argv, **kwarg) # copy docstrings and create proxy slots cls.api = {} _map_api(cls.api, subprocess.Popen) for fname in ('stdin', 'stdout', 'stderr'): m = {} cls.api[fname] = {'callable': False, 'api': m} _map_api(m, file) for ename in ('fcntl', 'ioctl', 'flock', 'lockf'): m[ename] = { 'api': None, 'callable': True, 'doc': getattr(fcntl, ename).__doc__, } def __dir__(cls): return list(cls.api.keys()) + ['release'] def __getattribute__(cls, key): try: return type.__getattribute__(cls, key) except AttributeError: attr = getattr(subprocess.Popen, key) if isinstance(attr, (types.MethodType, types.FunctionType)): def proxy(*argv, **kwarg): return attr(*argv, **kwarg) proxy.__doc__ = attr.__doc__ proxy.__objclass__ = cls return proxy else: return attr class NSPopenFile(object): def __init__(self, prime): self.prime = prime for aname in dir(prime): if aname.startswith('_'): continue target = getattr(prime, aname) if isinstance(target, (types.BuiltinMethodType, types.MethodType)): func = _make_func(target) func.__name__ = aname func.__doc__ = getattr(target, '__doc__', '') setattr(self, aname, func) del func else: setattr(self.__class__, aname, _make_property(aname)) for fname in ('fcntl', 'ioctl', 'flock', 'lockf'): target = getattr(fcntl, fname) func = _make_fcntl(prime, target) func.__name__ = fname func.__doc__ = getattr(target, '__doc__', '') setattr(self, fname, func) del func def NSPopenServer(nsname, flags, channel_in, channel_out, argv, kwarg): # set netns try: setns(nsname, flags=flags, libc=kwarg.pop('libc', None)) except Exception as e: channel_out.put(e) return # create the Popen object child = subprocess.Popen(*argv, **kwarg) for fname in ['stdout', 'stderr', 'stdin']: obj = getattr(child, fname) if obj is not None: fproxy = NSPopenFile(obj) setattr(child, fname, fproxy) # send the API map channel_out.put(None) while True: # synchronous mode # 1. get the command from the API try: call = channel_in.get() except: (et, ev, tb) = sys.exc_info() try: channel_out.put({'code': 500, 'data': ev}) except: pass break # 2. stop? if call['name'] == 'release': break # 3. run the call try: # get the object namespace ns = call.get('namespace') obj = child if ns: for step in ns.split('.'): obj = getattr(obj, step) attr = getattr(obj, call['name']) if isinstance( attr, ( types.MethodType, types.FunctionType, types.BuiltinMethodType, ), ): result = attr(*call['argv'], **call['kwarg']) else: result = attr channel_out.put({'code': 200, 'data': result}) except: (et, ev, tb) = sys.exc_info() channel_out.put({'code': 500, 'data': ev}) child.wait() class ObjNS(object): ns = None def __enter__(self): pass def __exit__(self, exc_type, exc_value, traceback): pass def __getattribute__(self, key): try: return object.__getattribute__(self, key) except AttributeError: with self.lock: if self.released: raise RuntimeError('the object is released') if self.api.get(key) and self.api[key]['callable']: def proxy(*argv, **kwarg): self.channel_out.put( { 'name': key, 'argv': argv, 'namespace': self.ns, 'kwarg': kwarg, } ) return _handle(self.channel_in.get()) if key in self.api: proxy.__doc__ = self.api[key]['doc'] return proxy else: if key in ('stdin', 'stdout', 'stderr'): objns = ObjNS() objns.ns = key objns.api = self.api.get(key, {}).get('api', {}) objns.channel_out = self.channel_out objns.channel_in = self.channel_in objns.released = self.released objns.lock = self.lock return objns else: self.channel_out.put( {'name': key, 'namespace': self.ns} ) return _handle(self.channel_in.get()) @metaclass(MetaPopen) class NSPopen(ObjNS): ''' A proxy class to run `Popen()` object in some network namespace. Sample to run `ip ad` command in `nsname` network namespace:: nsp = NSPopen('nsname', ['ip', 'ad'], stdout=subprocess.PIPE) print(nsp.communicate()) nsp.wait() nsp.release() The `NSPopen` class was intended to be a drop-in replacement for the `Popen` class, but there are still some important differences. The `NSPopen` object implicitly spawns a child python process to be run in the background in a network namespace. The target process specified as the argument of the `NSPopen` will be started in its turn from this child. Thus all the fd numbers of the running `NSPopen` object are meaningless in the context of the main process. Trying to operate on them, one will get 'Bad file descriptor' in the best case or a system call working on a wrong file descriptor in the worst case. A possible solution would be to transfer file descriptors between the `NSPopen` object and the main process, but it is not implemented yet. The process' diagram for `NSPopen('test', ['ip', 'ad'])`:: +---------------------+ +--------------+ +------------+ | main python process |<--->| child python |<--->| netns test | | NSPopen() | | Popen() | | $ ip ad | +---------------------+ +--------------+ +------------+ As a workaround for the issue with file descriptors, some additional methods are available on file objects `stdin`, `stdout` and `stderr`. E.g., one can run fcntl calls:: from fcntl import F_GETFL from pyroute2 import NSPopen from subprocess import PIPE proc = NSPopen('test', ['my_program'], stdout=PIPE) flags = proc.stdout.fcntl(F_GETFL) In that way one can use `fcntl()`, `ioctl()`, `flock()` and `lockf()` calls. Another additional method is `release()`, which can be used to explicitly stop the proxy process and release all the resources. ''' def __init__(self, nsname, *argv, **kwarg): ''' The only differences from the `subprocess.Popen` init are: * `nsname` -- network namespace name * `flags` keyword argument All other arguments are passed directly to `subprocess.Popen`. Flags usage samples. Create a network namespace, if it doesn't exist yet:: import os nsp = NSPopen('nsname', ['command'], flags=os.O_CREAT) Create a network namespace only if it doesn't exist, otherwise fail and raise an exception:: import os nsp = NSPopen('nsname', ['command'], flags=os.O_CREAT | os.O_EXCL) ''' # create a child self.nsname = nsname if 'flags' in kwarg: self.flags = kwarg.pop('flags') else: self.flags = 0 self.channel_out = config.MpQueue() self.channel_in = config.MpQueue() self.lock = threading.Lock() self.released = False self.server = config.MpProcess( target=NSPopenServer, args=( self.nsname, self.flags, self.channel_out, self.channel_in, argv, kwarg, ), ) # start the child and check the status self.server.start() response = self.channel_in.get() if isinstance(response, Exception): self.server.join() raise response else: atexit.register(self.release) def release(self): ''' Explicitly stop the proxy process and release all the resources. The `NSPopen` object can not be used after the `release()` call. ''' with self.lock: if self.released: return self.released = True self.channel_out.put({'name': 'release'}) self.channel_out.close() self.channel_in.close() self.server.join() # clean leftover pipes that would be closed at program exit del self.server del self.channel_out del self.channel_in def __dir__(self): return list(self.api.keys()) + ['release'] pyroute2-0.7.11/pyroute2/protocols/000077500000000000000000000000001455030217500172105ustar00rootroot00000000000000pyroute2-0.7.11/pyroute2/protocols/__init__.py000066400000000000000000000210141455030217500213170ustar00rootroot00000000000000import struct from socket import AF_INET, inet_ntop, inet_pton from pyroute2.common import basestring, hexdump # # IEEE = 802.3 Ethernet magic constants. The frame sizes omit # the preamble and FCS/CRC (frame check sequence). # ETH_ALEN = 6 # Octets in one ethernet addr ETH_HLEN = 14 # Total octets in header. ETH_ZLEN = 60 # Min. octets in frame sans FCS ETH_DATA_LEN = 1500 # Max. octets in payload ETH_FRAME_LEN = 1514 # Max. octets in frame sans FCS ETH_FCS_LEN = 4 # Octets in the FCS # # These are the defined Ethernet Protocol ID's. # ETH_P_LOOP = 0x0060 # Ethernet Loopback packet ETH_P_PUP = 0x0200 # Xerox PUP packet ETH_P_PUPAT = 0x0201 # Xerox PUP Addr Trans packet ETH_P_IP = 0x0800 # Internet Protocol packet ETH_P_X25 = 0x0805 # CCITT X.25 ETH_P_ARP = 0x0806 # Address Resolution packet ETH_P_BPQ = 0x08FF # G8BPQ AX.25 Ethernet Packet # ^^^ [ NOT AN OFFICIALLY REGISTERED ID ] ETH_P_IEEEPUP = 0x0A00 # Xerox IEEE802.3 PUP packet ETH_P_IEEEPUPAT = 0x0A01 # Xerox IEEE802.3 PUP Addr Trans packet ETH_P_DEC = 0x6000 # DEC Assigned proto ETH_P_DNA_DL = 0x6001 # DEC DNA Dump/Load ETH_P_DNA_RC = 0x6002 # DEC DNA Remote Console ETH_P_DNA_RT = 0x6003 # DEC DNA Routing ETH_P_LAT = 0x6004 # DEC LAT ETH_P_DIAG = 0x6005 # DEC Diagnostics ETH_P_CUST = 0x6006 # DEC Customer use ETH_P_SCA = 0x6007 # DEC Systems Comms Arch ETH_P_TEB = 0x6558 # Trans Ether Bridging ETH_P_RARP = 0x8035 # Reverse Addr Res packet ETH_P_ATALK = 0x809B # Appletalk DDP ETH_P_AARP = 0x80F3 # Appletalk AARP ETH_P_8021Q = 0x8100 # = 802.1Q VLAN Extended Header ETH_P_IPX = 0x8137 # IPX over DIX ETH_P_IPV6 = 0x86DD # IPv6 over bluebook ETH_P_PAUSE = 0x8808 # IEEE Pause frames. See = 802.3 = 31B ETH_P_SLOW = 0x8809 # Slow Protocol. See = 802.3ad = 43B ETH_P_WCCP = 0x883E # Web-cache coordination protocol # defined in draft-wilson-wrec-wccp-v2-00.txt ETH_P_PPP_DISC = 0x8863 # PPPoE discovery messages ETH_P_PPP_SES = 0x8864 # PPPoE session messages ETH_P_MPLS_UC = 0x8847 # MPLS Unicast traffic ETH_P_MPLS_MC = 0x8848 # MPLS Multicast traffic ETH_P_ATMMPOA = 0x884C # MultiProtocol Over ATM ETH_P_LINK_CTL = 0x886C # HPNA, wlan link local tunnel ETH_P_ATMFATE = 0x8884 # Frame-based ATM Transport over Ethernet ETH_P_PAE = 0x888E # Port Access Entity (IEEE = 802.1X) ETH_P_AOE = 0x88A2 # ATA over Ethernet ETH_P_8021AD = 0x88A8 # = 802.1ad Service VLAN ETH_P_802_EX1 = 0x88B5 # = 802.1 Local Experimental = 1. ETH_P_TIPC = 0x88CA # TIPC ETH_P_8021AH = 0x88E7 # = 802.1ah Backbone Service Tag ETH_P_1588 = 0x88F7 # IEEE = 1588 Timesync ETH_P_FCOE = 0x8906 # Fibre Channel over Ethernet ETH_P_TDLS = 0x890D # TDLS ETH_P_FIP = 0x8914 # FCoE Initialization Protocol ETH_P_QINQ1 = 0x9100 # deprecated QinQ VLAN # ^^^ [ NOT AN OFFICIALLY REGISTERED ID ] ETH_P_QINQ2 = 0x9200 # deprecated QinQ VLAN # ^^^ [ NOT AN OFFICIALLY REGISTERED ID ] ETH_P_QINQ3 = 0x9300 # deprecated QinQ VLAN # ^^^ [ NOT AN OFFICIALLY REGISTERED ID ] ETH_P_EDSA = 0xDADA # Ethertype DSA # ^^^ [ NOT AN OFFICIALLY REGISTERED ID ] ETH_P_AF_IUCV = 0xFBFB # IBM af_iucv # ^^^ [ NOT AN OFFICIALLY REGISTERED ID ] # # Non DIX types. Won't clash for = 1500 types. # ETH_P_802_3 = 0x0001 # Dummy type for = 802.3 frames ETH_P_AX25 = 0x0002 # Dummy protocol id for AX.25 ETH_P_ALL = 0x0003 # Every packet (be careful!!!) ETH_P_802_2 = 0x0004 # = 802.2 frames ETH_P_SNAP = 0x0005 # Internal only ETH_P_DDCMP = 0x0006 # DEC DDCMP: Internal only ETH_P_WAN_PPP = 0x0007 # Dummy type for WAN PPP frames*/ ETH_P_PPP_MP = 0x0008 # Dummy type for PPP MP frames ETH_P_LOCALTALK = 0x0009 # Localtalk pseudo type ETH_P_CAN = 0x000C # Controller Area Network ETH_P_PPPTALK = 0x0010 # Dummy type for Atalk over PPP*/ ETH_P_TR_802_2 = 0x0011 # = 802.2 frames ETH_P_MOBITEX = 0x0015 # Mobitex (kaz@cafe.net) ETH_P_CONTROL = 0x0016 # Card specific control frames ETH_P_IRDA = 0x0017 # Linux-IrDA ETH_P_ECONET = 0x0018 # Acorn Econet ETH_P_HDLC = 0x0019 # HDLC frames ETH_P_ARCNET = 0x001A # = 1A for ArcNet :-) ETH_P_DSA = 0x001B # Distributed Switch Arch. ETH_P_TRAILER = 0x001C # Trailer switch tagging ETH_P_PHONET = 0x00F5 # Nokia Phonet frames ETH_P_IEEE802154 = 0x00F6 # IEEE802.15.4 frame ETH_P_CAIF = 0x00F7 # ST-Ericsson CAIF protocol class msg(dict): buf = None data_len = None fields = () _fields_names = () types = { 'uint8': 'B', 'uint16': 'H', 'uint32': 'I', 'be16': '>H', 'ip4addr': { 'format': '4s', 'decode': lambda x: inet_ntop(AF_INET, x), 'encode': lambda x: [inet_pton(AF_INET, x)], }, 'l2addr': { 'format': '6B', 'decode': lambda x: ':'.join(['%x' % i for i in x]), 'encode': lambda x: [int(i, 16) for i in x.split(':')], }, 'l2paddr': { 'format': '6B10s', 'decode': lambda x: ':'.join(['%x' % i for i in x[:6]]), 'encode': lambda x: [int(i, 16) for i in x.split(':')] + [10 * b'\x00'], }, } def __init__(self, content=None, buf=b'', offset=0, value=None): content = content or {} dict.__init__(self, content) self.buf = buf self.offset = offset self.value = value self._register_fields() def _register_fields(self): self._fields_names = tuple([x[0] for x in self.fields]) def _get_routine(self, mode, fmt): fmt = self.types.get(fmt, fmt) if isinstance(fmt, dict): return (fmt['format'], fmt.get(mode, lambda x: x)) else: return (fmt, lambda x: x) def reset(self): self.buf = b'' def decode(self): self._register_fields() for field in self.fields: name, sfmt = field[:2] fmt, routine = self._get_routine('decode', sfmt) size = struct.calcsize(fmt) value = struct.unpack( fmt, self.buf[self.offset : self.offset + size] ) if len(value) == 1: value = value[0] if isinstance(value, basestring) and sfmt[-1] == 's': value = value[: value.find(b'\x00')] if isinstance(sfmt, str) and sfmt[-1] == 's': try: value = value.decode('utf-8') except UnicodeDecodeError: value = hexdump(value) self[name] = routine(value) self.offset += size return self def encode(self): self._register_fields() for field in self.fields: name, fmt = field[:2] default = b'\x00' if len(field) <= 2 else field[2] fmt, routine = self._get_routine('encode', fmt) # special case: string if fmt == 'string': self.buf += routine(self[name])[0] else: size = struct.calcsize(fmt) if self[name] is None: if not isinstance(default, basestring): self.buf += struct.pack(fmt, default) else: self.buf += default * (size // len(default)) else: value = routine(self[name]) if not isinstance(value, (set, tuple, list)): value = [value] self.buf += struct.pack(fmt, *value) return self def __getitem__(self, key): try: return dict.__getitem__(self, key) except KeyError: if key in self._fields_names: return None raise class ethmsg(msg): fields = (('dst', 'l2addr'), ('src', 'l2addr'), ('type', 'be16')) class ip6msg(msg): fields = ( ('version', 'uint8', 6 << 4), ('_flow0', 'uint8'), ('_flow1', 'uint8'), ('_flow2', 'uint8'), ('plen', 'uin16'), ('next_header', 'uint8'), ('hop_limit', 'uint8'), ('src', 'ip6addr'), ('dst', 'ip6addr'), ) class ip4msg(msg): fields = ( ('verlen', 'uint8', 0x45), ('dsf', 'uint8'), ('len', 'be16'), ('id', 'be16'), ('flags', 'uint16'), ('ttl', 'uint8', 128), ('proto', 'uint8'), ('csum', 'be16'), ('src', 'ip4addr'), ('dst', 'ip4addr'), ) class udp4_pseudo_header(msg): fields = ( ('src', 'ip4addr'), ('dst', 'ip4addr'), ('pad', 'uint8'), ('proto', 'uint8', 17), ('len', 'be16'), ) class udpmsg(msg): fields = ( ('sport', 'be16'), ('dport', 'be16'), ('len', 'be16'), ('csum', 'be16'), ) pyroute2-0.7.11/pyroute2/remote/000077500000000000000000000000001455030217500164575ustar00rootroot00000000000000pyroute2-0.7.11/pyroute2/remote/__init__.py000066400000000000000000000003161455030217500205700ustar00rootroot00000000000000try: from .iproute import RemoteIPRoute except ImportError: from pyroute2.common import failed_class RemoteIPRoute = failed_class('mitogen library is not installed') classes = [RemoteIPRoute] pyroute2-0.7.11/pyroute2/remote/__main__.py000066400000000000000000000001571455030217500205540ustar00rootroot00000000000000import sys from pyroute2.remote import Server, Transport Server(Transport(sys.stdin), Transport(sys.stdout)) pyroute2-0.7.11/pyroute2/remote/iproute.py000066400000000000000000000106271455030217500205260ustar00rootroot00000000000000import errno import os import threading import mitogen.core import mitogen.master from pyroute2.iproute.linux import RTNL_API from pyroute2.netlink.rtnl.iprsocket import MarshalRtnl from .transport import RemoteSocket, Server, Transport class Channel(object): def __init__(self, ch): self.ch = ch self._pfdr, self._pfdw = os.pipe() self.th = None self.closed = False self.lock = threading.RLock() self.shutdown_lock = threading.RLock() self.read = self._read_sync self.buf = '' def flush(self): pass def _read_sync(self, size): with self.lock: if self.buf: ret = self.buf[:size] self.buf = self.buf[size:] return ret ret = self.ch.get().unpickle() if len(ret) > size: self.buf = ret[size:] return ret[:size] def _read_async(self, size): with self.lock: return os.read(self._pfdr, size) def write(self, data): with self.lock: self.ch.send(data) return len(data) def start(self): with self.lock: if self.th is None: self.read = self._read_async self.th = threading.Thread( target=self._monitor_thread, name='Channel <%s> I/O' % self.ch, ) self.th.start() def fileno(self): return self._pfdr def close(self): with self.shutdown_lock: if not self.closed: os.close(self._pfdw) os.close(self._pfdr) if self.th is not None: self.th.join() self.closed = True if hasattr(self.ch, 'send'): self.ch.send(None) def _monitor_thread(self): while True: msg = self.ch.get().unpickle() if msg is None: raise EOFError() os.write(self._pfdw, msg) @mitogen.core.takes_router def MitogenServer(ch_out, netns, target, router): ch_in = mitogen.core.Receiver(router) ch_out.send(ch_in.to_sender()) trnsp_in = Transport(Channel(ch_in)) trnsp_in.file_obj.start() trnsp_out = Transport(Channel(ch_out)) return Server(trnsp_in, trnsp_out, netns, target) class RemoteIPRoute(RTNL_API, RemoteSocket): def __init__(self, *argv, **kwarg): self._argv = tuple(argv) self._kwarg = dict(kwarg) if 'router' in kwarg: self._mitogen_broker = None self._mitogen_router = kwarg.pop('router') else: self._mitogen_broker = mitogen.master.Broker() self._mitogen_router = mitogen.master.Router(self._mitogen_broker) netns = kwarg.pop('netns', None) target = kwarg.pop('target', 'remote') try: if 'context' in kwarg: context = kwarg['context'] else: protocol = kwarg.pop('protocol', 'local') context = getattr(self._mitogen_router, protocol)( *argv, **kwarg ) ch_in = mitogen.core.Receiver( self._mitogen_router, respondent=context ) self._mitogen_call = context.call_async( MitogenServer, ch_out=ch_in.to_sender(), netns=netns, target=target, ) ch_out = ch_in.get().unpickle() super(RemoteIPRoute, self).__init__( Transport(Channel(ch_in)), Transport(Channel(ch_out)) ) except Exception: if self._mitogen_broker is not None: self._mitogen_broker.shutdown() self._mitogen_broker.join() raise self.marshal = MarshalRtnl() self.target = target def clone(self): return type(self)(*self._argv, **self._kwarg) def close(self, code=errno.ECONNRESET): with self.shutdown_lock: if not self.closed: super(RemoteIPRoute, self).close(code=code) self.closed = True try: self._mitogen_call.get() except mitogen.core.ChannelError: pass if self._mitogen_broker is not None: self._mitogen_broker.shutdown() self._mitogen_broker.join() pyroute2-0.7.11/pyroute2/remote/shell.py000066400000000000000000000040441455030217500201420ustar00rootroot00000000000000import atexit import errno import logging import struct import subprocess from pyroute2.iproute import RTNL_API from pyroute2.netlink.rtnl.iprsocket import MarshalRtnl from pyroute2.remote.transport import RemoteSocket, Transport log = logging.getLogger(__name__) class ShellIPR(RTNL_API, RemoteSocket): def __init__(self, target): self.target = target cmd = '%s python -m pyroute2.remote' % target self.shell = subprocess.Popen( cmd.split(), bufsize=0, stdin=subprocess.PIPE, stdout=subprocess.PIPE, ) trnsp_in = Transport(self.shell.stdout) trnsp_out = Transport(self.shell.stdin) try: super(ShellIPR, self).__init__(trnsp_in, trnsp_out) except Exception: self.close() raise atexit.register(self.close) self.marshal = MarshalRtnl() def clone(self): return type(self)(self.target) def _cleanup_atexit(self): if hasattr(atexit, 'unregister'): atexit.unregister(self.close) else: try: atexit._exithandlers.remove((self.close, (), {})) except ValueError: pass def close(self, code=errno.ECONNRESET): self._cleanup_atexit() # something went wrong, force server shutdown try: self.trnsp_out.send({'stage': 'shutdown'}) if code > 0: data = { 'stage': 'broadcast', 'data': struct.pack('IHHQIQQ', 28, 2, 0, 0, code, 0, 0), 'error': None, } self.trnsp_in.brd_queue.put(data) except Exception: pass # force cleanup command channels for close in (self.trnsp_in.close, self.trnsp_out.close): try: close() except Exception: pass # Maybe already closed in remote.Client.close self.shell.kill() self.shell.wait() def post_init(self): pass pyroute2-0.7.11/pyroute2/remote/transport.py000066400000000000000000000271731455030217500210770ustar00rootroot00000000000000import atexit import errno import logging import os import pickle import select import signal import struct import threading import traceback from io import BytesIO from socket import SO_RCVBUF, SOL_SOCKET from pyroute2 import config from pyroute2 import netns as netnsmod from pyroute2.netlink.nlsocket import NetlinkSocketBase if config.uname[0][-3:] == 'BSD': from pyroute2.iproute.bsd import IPRoute else: from pyroute2.iproute.linux import IPRoute try: import queue except ImportError: import Queue as queue log = logging.getLogger(__name__) class Transport(object): ''' A simple transport protocols to send objects between two end-points. Requires an open file-like object at init. ''' def __init__(self, file_obj): self.file_obj = file_obj self.lock = threading.Lock() self.cmd_queue = queue.Queue() self.brd_queue = queue.Queue() self.run = True def fileno(self): return self.file_obj.fileno() def send(self, obj): dump = BytesIO() pickle.dump(obj, dump) packet = struct.pack("II", len(dump.getvalue()) + 8, 0) packet += dump.getvalue() self.file_obj.write(packet) self.file_obj.flush() def __recv(self): length, offset = struct.unpack("II", self.file_obj.read(8)) dump = BytesIO() dump.write(self.file_obj.read(length - 8)) dump.seek(0) ret = pickle.load(dump) return ret def _m_recv(self, own_queue, other_queue, check): while self.run: if self.lock.acquire(False): try: try: ret = own_queue.get(False) if ret is None: continue else: return ret except queue.Empty: pass ret = self.__recv() if not check(ret['stage']): other_queue.put(ret) else: other_queue.put(None) return ret finally: self.lock.release() else: ret = None try: ret = own_queue.get(timeout=1) except queue.Empty: pass if ret is not None: return ret def recv(self): return self._m_recv( self.brd_queue, self.cmd_queue, lambda x: x == 'broadcast' ) def recv_cmd(self): return self._m_recv( self.cmd_queue, self.brd_queue, lambda x: x != 'broadcast' ) def close(self): self.run = False class ProxyChannel(object): def __init__(self, channel, stage): self.target = channel self.stage = stage def send(self, data): return self.target.send( {'stage': self.stage, 'data': data, 'error': None} ) def Server(trnsp_in, trnsp_out, netns=None, target='localhost', groups=0): def stop_server(signum, frame): Server.run = False Server.run = True signal.signal(config.signal_stop_remote, stop_server) try: if netns is not None: netnsmod.setns(netns) ipr = IPRoute(target=target, groups=groups) lock = ipr._sproxy.lock ipr._s_channel = ProxyChannel(trnsp_out, 'broadcast') except Exception as e: trnsp_out.send({'stage': 'init', 'error': e}) return 255 inputs = [ipr.fileno(), trnsp_in.fileno()] broadcasts = {ipr.fileno(): ipr} outputs = [] # all is OK so far trnsp_out.send({'stage': 'init', 'uname': config.uname, 'error': None}) # 8<------------------------------------------------------------- while Server.run: try: events, _, _ = select.select(inputs, outputs, inputs) except: continue for fd in events: if fd in broadcasts: sock = broadcasts[fd] bufsize = sock.getsockopt(SOL_SOCKET, SO_RCVBUF) // 2 with lock: error = None data = None try: data = sock.recv(bufsize) except Exception as e: error = e error.tb = traceback.format_exc() trnsp_out.send( {'stage': 'broadcast', 'data': data, 'error': error} ) elif fd == trnsp_in.fileno(): cmd = trnsp_in.recv_cmd() if cmd['stage'] == 'shutdown': ipr.close() data = struct.pack('IHHQIQQ', 28, 2, 0, 0, 104, 0, 0) trnsp_out.send( {'stage': 'broadcast', 'data': data, 'error': None} ) return elif cmd['stage'] == 'reconstruct': error = None try: msg = cmd['argv'][0]() msg.load(pickle.loads(cmd['argv'][1])) ipr.sendto_gate(msg, cmd['argv'][2]) except Exception as e: error = e error.tb = traceback.format_exc() trnsp_out.send( { 'stage': 'reconstruct', 'error': error, 'return': None, 'cookie': cmd['cookie'], } ) elif cmd['stage'] == 'command': error = None try: ret = getattr(ipr, cmd['name'])( *cmd['argv'], **cmd['kwarg'] ) if ( cmd['name'] == 'bind' and ipr._brd_socket is not None ): inputs.append(ipr._brd_socket.fileno()) broadcasts[ ipr._brd_socket.fileno() ] = ipr._brd_socket except Exception as e: ret = None error = e error.tb = traceback.format_exc() trnsp_out.send( { 'stage': 'command', 'error': error, 'return': ret, 'cookie': cmd['cookie'], } ) class RemoteSocket(NetlinkSocketBase): trnsp_in = None trnsp_out = None remote_trnsp_in = None remote_trnsp_out = None def __init__(self, trnsp_in, trnsp_out, groups=0): super(RemoteSocket, self).__init__(groups=groups) self.trnsp_in = trnsp_in self.trnsp_out = trnsp_out self.cmdlock = threading.Lock() self.shutdown_lock = threading.RLock() self.closed = False init = self.trnsp_in.recv_cmd() if init['stage'] != 'init': raise TypeError('incorrect protocol init') if init['error'] is not None: raise init['error'] else: self.uname = init['uname'] atexit.register(self.close) def sendto_gate(self, msg, addr): with self.cmdlock: self.trnsp_out.send( { 'stage': 'reconstruct', 'cookie': None, 'name': None, 'argv': [type(msg), pickle.dumps(msg.dump()), addr], 'kwarg': None, } ) ret = self.trnsp_in.recv_cmd() if ret['error'] is not None: raise ret['error'] return ret['return'] def recv(self, bufsize, flags=0): msg = None while True: msg = self.trnsp_in.recv() if msg is None: raise EOFError() if msg['stage'] == 'signal': os.kill(os.getpid(), msg['data']) else: break if msg['error'] is not None: raise msg['error'] return msg['data'] def _cleanup_atexit(self): if hasattr(atexit, 'unregister'): atexit.unregister(self.close) else: try: atexit._exithandlers.remove((self.close, (), {})) except ValueError: pass def close(self, code=errno.ECONNRESET): with self.shutdown_lock: if not self.closed: super(RemoteSocket, self).close() self.closed = True self._cleanup_atexit() self.trnsp_out.send({'stage': 'shutdown'}) # send loopback nlmsg to terminate possible .get() if code > 0 and self.remote_trnsp_out is not None: data = struct.pack('IHHQIQQ', 28, 2, 0, 0, code, 0, 0) self.remote_trnsp_out.send( {'stage': 'broadcast', 'data': data, 'error': None} ) with self.trnsp_in.lock: pass transport_objs = ( self.trnsp_out, self.trnsp_in, self.remote_trnsp_in, self.remote_trnsp_out, ) # Stop the transport objects. for trnsp in transport_objs: try: if hasattr(trnsp, 'close'): trnsp.close() except Exception: pass # Close the file descriptors. for trnsp in transport_objs: try: trnsp.file_obj.close() except Exception: pass try: os.kill(self.child, config.signal_stop_remote) os.waitpid(self.child, 0) except OSError: pass def proxy(self, cmd, *argv, **kwarg): with self.cmdlock: self.trnsp_out.send( { 'stage': 'command', 'cookie': None, 'name': cmd, 'argv': argv, 'kwarg': kwarg, } ) ret = self.trnsp_in.recv_cmd() if ret['error'] is not None: raise ret['error'] return ret['return'] def fileno(self): return self.trnsp_in.fileno() def bind(self, *argv, **kwarg): if 'async' in kwarg: # FIXME # raise deprecation error after 0.5.3 # log.warning( 'use "async_cache" instead of "async", ' '"async" is a keyword from Python 3.7' ) del kwarg['async'] # do not work with async servers kwarg['async_cache'] = False return self.proxy('bind', *argv, **kwarg) def send(self, *argv, **kwarg): return self.proxy('send', *argv, **kwarg) def sendto(self, *argv, **kwarg): return self.proxy('sendto', *argv, **kwarg) def getsockopt(self, *argv, **kwarg): return self.proxy('getsockopt', *argv, **kwarg) def setsockopt(self, *argv, **kwarg): return self.proxy('setsockopt', *argv, **kwarg) def _sendto(self, *argv, **kwarg): return self.sendto(*argv, **kwarg) def _recv(self, *argv, **kwarg): return self.recv(*argv, **kwarg) pyroute2-0.7.11/pyroute2/requests/000077500000000000000000000000001455030217500170375ustar00rootroot00000000000000pyroute2-0.7.11/pyroute2/requests/__init__.py000066400000000000000000000000001455030217500211360ustar00rootroot00000000000000pyroute2-0.7.11/pyroute2/requests/address.py000066400000000000000000000065461455030217500210510ustar00rootroot00000000000000import ipaddress from socket import AF_INET, AF_INET6 from pyroute2.common import dqn2int, get_address_family, getbroadcast from .common import Index, IPRouteFilter, NLAKeyTransform class AddressFieldFilter(Index, NLAKeyTransform): _nla_prefix = 'IFA_' def set_prefixlen(self, context, value): if isinstance(value, str): if '.' in value: value = dqn2int(value) value = int(value) return {'prefixlen': value} def set_address(self, context, value): if not value: return {} ret = {'address': value} if isinstance(value, str): addr_spec = value.split('/') ret['address'] = addr_spec[0] if len(addr_spec) > 1: ret.update(self.set_prefixlen(context, addr_spec[1])) if ':' in ret['address']: ret['address'] = ipaddress.ip_address( ret['address'] ).compressed return ret def set_local(self, context, value): if not value: return {} return {'local': value} def set_mask(self, context, value): return {'prefixlen': value} def _cacheinfo(self, key, value): return {key: value, 'cacheinfo': {}} def set_preferred_lft(self, context, value): return self._cacheinfo('preferred', value) def set_preferred(self, context, value): return self._cacheinfo('preferred', value) def set_valid_lft(self, context, value): return self._cacheinfo('valid', value) def set_valid(self, context, value): return self._cacheinfo('valid', value) class AddressIPRouteFilter(IPRouteFilter): def set_cacheinfo(self, context, value): cacheinfo = value.copy() if self.command != 'dump': for i in ('preferred', 'valid'): cacheinfo[f'ifa_{i}'] = cacheinfo.get(i, pow(2, 32) - 1) return {'cacheinfo': cacheinfo} return {} def set_broadcast(self, context, value): ret = {} if self.command != 'dump' and isinstance(value, bool): keys = set(context.keys()) if value and 'address' in keys and 'prefixlen' in keys: if 'family' in keys: family = context['family'] else: ret['family'] = family = get_address_family( context['address'] ) ret['broadcast'] = getbroadcast( context['address'], context['prefixlen'], family ) else: ret['broadcast'] = value return ret def finalize(self, context): if self.command != 'dump': if 'family' not in context and 'address' in context: context['family'] = get_address_family(context['address']) if 'prefixlen' not in context: if context['family'] == AF_INET: context['prefixlen'] = 32 elif context['family'] == AF_INET6: context['prefixlen'] = 128 if ( 'local' not in context and 'address' in context and context['family'] == AF_INET ): # inject IFA_LOCAL, if family is AF_INET and # IFA_LOCAL is not set context['local'] = context['address'] pyroute2-0.7.11/pyroute2/requests/bridge.py000066400000000000000000000121131455030217500206430ustar00rootroot00000000000000from pyroute2.netlink.rtnl.ifinfmsg import ifinfmsg, protinfo_bridge from .common import Index, IPRouteFilter, NLAKeyTransform class BridgeFieldFilter(Index, NLAKeyTransform): _nla_prefix = ifinfmsg.prefix class BridgeIPRouteFilter(IPRouteFilter): def build_vlan_info_spec(self, orig_spec): range_vids = [int(i) for i in str(orig_spec['vid']).split('-')] if len(range_vids) == 2: if 0 < int(range_vids[0]) < range_vids[1] < 4095: new_spec = [] new_spec.append( { 'vid': range_vids[0], 'flags': self.convert_flags('range_begin'), } ) new_spec.append( { 'vid': range_vids[1], 'flags': self.convert_flags('range_end'), } ) return new_spec elif len(range_vids) == 1: if 0 < range_vids[0] < 4095: # PVID? if 'pvid' in orig_spec.keys(): if orig_spec['pvid']: orig_spec['flags'] = self.convert_flags( ['pvid', 'untagged'] ) del orig_spec['pvid'] # Make sure the vid is an int. orig_spec['vid'] = range_vids[0] return [orig_spec] return [] def build_vlan_tunnel_info_spec(self, orig_spec): # vlan_tunnel_info uses the same format as vlan_info, # just adds tunnel_id. vlan_info_spec = self.build_vlan_info_spec(orig_spec) range_ids = [int(i) for i in str(orig_spec['id']).split('-')] if len(range_ids) == 2 and len(vlan_info_spec) == 2: if 0 < range_ids[0] < range_ids[1] < 16777215: # vid to id mapping range must be the same length if ( vlan_info_spec[1]['vid'] - vlan_info_spec[0]['vid'] == range_ids[1] - range_ids[0] ): vlan_info_spec[0]['id'] = range_ids[0] vlan_info_spec[1]['id'] = range_ids[1] return [ self.create_nla_spec(vlan_info_spec[0]), self.create_nla_spec(vlan_info_spec[1]), ] elif len(range_ids) == 1 and len(vlan_info_spec) == 1: if 0 < range_ids[0] < 4095: vlan_info_spec[0]['id'] = range_ids[0] # Delete flags because vlan_tunnel_info doesn't seem # to use them, except for the RANGE. try: del vlan_info_spec[0]['flags'] except KeyError: pass return [self.create_nla_spec(vlan_info_spec[0])] return [] def create_nla_spec(self, spec): attrs = [] for key in spec.keys(): nla = ifinfmsg.af_spec_bridge.vlan_tunnel_info.name2nla(key) attrs.append([nla, spec[key]]) return {'attrs': attrs} def convert_flags(self, flags): if isinstance(flags, int): return flags elif isinstance(flags, str): return ifinfmsg.af_spec_bridge.vlan_info.names2flags([flags]) elif isinstance(flags, list): return ifinfmsg.af_spec_bridge.vlan_info.names2flags(flags) return 0 def build_spec(self, orig_spec): if 'vid' in orig_spec.keys(): if 'id' in orig_spec.keys(): return self.build_vlan_tunnel_info_spec(orig_spec) else: return self.build_vlan_info_spec(orig_spec) return [] def finalize(self, context): if self.command != 'dump': if 'IFLA_AF_SPEC' not in context: context['IFLA_AF_SPEC'] = {'attrs': []} for key in ('vlan_info', 'vlan_tunnel_info'): if key in context: nla = ifinfmsg.af_spec_bridge.name2nla(key) new_spec = self.build_spec(context[key]) for spec in new_spec: context['IFLA_AF_SPEC']['attrs'].append([nla, spec]) try: del context[key] except KeyError: pass for key in ('mode', 'vlan_flags'): if key in context: nla = ifinfmsg.af_spec_bridge.name2nla(key) context['IFLA_AF_SPEC']['attrs'].append( [nla, context[key]] ) try: del context[key] except KeyError: pass class BridgePortFieldFilter(IPRouteFilter): _nla_prefix = ifinfmsg.prefix _allowed = [x[0] for x in protinfo_bridge.nla_map] _allowed.append('attrs') def finalize(self, context): keys = tuple(context.keys()) context['attrs'] = [] for key in keys: context['attrs'].append( (protinfo_bridge.name2nla(key), context[key]) ) pyroute2-0.7.11/pyroute2/requests/common.py000066400000000000000000000117331455030217500207060ustar00rootroot00000000000000import ipaddress import json from collections import OrderedDict from socket import AF_INET, AF_INET6 from pyroute2.common import AF_MPLS, dqn2int, get_address_family class MPLSTarget(OrderedDict): def __init__(self, prime=None): super(MPLSTarget, self).__init__() if prime is None: prime = {} elif isinstance(prime, str): prime = {'label': int(prime)} elif isinstance(prime, int): prime = {'label': prime} elif isinstance(prime, dict): pass else: raise TypeError() self['label'] = prime.get('label', 16) self['tc'] = prime.get('tc', 0) self['bos'] = prime.get('bos', 1) self['ttl'] = prime.get('ttl', 0) def __eq__(self, right): return ( isinstance(right, (dict, MPLSTarget)) and self['label'] == right.get('label', 16) and self['tc'] == right.get('tc', 0) and self['bos'] == right.get('bos', 1) and self['ttl'] == right.get('ttl', 0) ) def __repr__(self): return repr(dict(self)) class IPTargets: add_defaults = True def parse_target(self, key, context, value): ret = {key: value} set_full_mask = False if isinstance(value, int): return {'family': AF_MPLS, key: MPLSTarget(value)} if isinstance(value, (list, tuple)): targets = [] target = None for spec in value: target = MPLSTarget(spec) target['bos'] = 0 targets.append(target) if target: target['bos'] = 1 return {'family': AF_MPLS, key: targets} else: return {} if isinstance(value, dict): if value.get('label'): return {'family': AF_MPLS, key: MPLSTarget(value)} # do not overwrite message family for IP VIA return {key: value} try: return self.parse_target(key, context, json.loads(value)) except (json.JSONDecodeError, TypeError): pass if isinstance(value, str): if value == '': return {} labels = value.split('/') if len(labels) > 1: # MPLS label stack simple syntax? e.g.: 16/24, 200/300 etc. try: return self.parse_target(key, context, labels) except ValueError: pass # only simple IP targets are left value, prefixlen = labels ret[key] = value if self.add_defaults: if prefixlen.find('.') > 0: ret[f'{key}_len'] = dqn2int(prefixlen, AF_INET) elif prefixlen.find(':') >= 0: ret[f'{key}_len'] = dqn2int(prefixlen, AF_INET6) else: ret[f'{key}_len'] = int(prefixlen) else: if ( self.add_defaults and key in ('dst', 'src') and f'{key}_len' not in context ): set_full_mask = True if self.add_defaults: ret['family'] = get_address_family(value) if ret['family'] == AF_INET6: ret[key] = ipaddress.ip_address(value).compressed if set_full_mask: if ret['family'] == AF_INET6: ret[f'{key}_len'] = 128 elif ret['family'] == AF_INET: ret[f'{key}_len'] = 32 return ret def set_dst(self, context, value): if value in ('', 'default'): return {'dst': ''} elif value in ('0', '0.0.0.0'): return {'dst': '', 'family': AF_INET} elif value in ('::', '::/0'): return {'dst': '', 'family': AF_INET6} return self.parse_target('dst', context, value) def set_src(self, context, value): return self.parse_target('src', context, value) def set_via(self, context, value): return self.parse_target('via', context, value) def set_newdst(self, context, value): return self.parse_target('newdst', context, value) def set_gateway(self, context, value): return self.parse_target('gateway', context, value) class Index: def set_index(self, context, value): if isinstance(value, (list, tuple)): value = value[0] return {'index': value} class IPRouteFilter: def __init__(self, command): self.command = command def policy(self, key): if self.command == 'add' and key in ('tso_max_segs', 'tso_max_size'): return False return True class NLAKeyTransform: _nla_prefix = '' def _key_transform(self, key): if isinstance(key, str) and key.startswith(self._nla_prefix): key = key[len(self._nla_prefix) :].lower() return key pyroute2-0.7.11/pyroute2/requests/link.py000066400000000000000000000217261455030217500203560ustar00rootroot00000000000000from pyroute2.netlink.rtnl.ifinfmsg import IFF_NOARP, ifinfmsg from pyroute2.netlink.rtnl.ifinfmsg.plugins.vlan import flags as vlan_flags from .common import Index, IPRouteFilter, NLAKeyTransform class LinkFieldFilter(Index, NLAKeyTransform): _nla_prefix = 'IFLA_' def _link(self, key, context, value): if isinstance(value, dict): return {key: value['index']} return {key: value} def set_vxlan_link(self, context, value): return self._link('vxlan_link', context, value) def set_link(self, context, value): return self._link('link', context, value) def set_master(self, context, value): return self._link('master', context, value) def set_address(self, context, value): if isinstance(value, str): # lower the case if not value.islower(): value = value.lower() # convert xxxx.xxxx.xxxx to xx:xx:xx:xx:xx:xx if len(value) == 14 and value[4] == value[9] == '.': value = ':'.join( [':'.join((x[:2], x[2:])) for x in value.split('.')] ) return {'address': value} def set_carrier(self, context, value): return {} def set_carrier_changes(self, context, value): return {} def set_info_slave_kind(self, context, value): return {} def set_mask(self, context, value): return {'change': value} def set_info_kind(self, context, value): return {'kind': value} class LinkIPRouteFilter(IPRouteFilter): def set_altname(self, context, value): if self.command in ('property_add', 'property_del'): if not isinstance(value, (list, tuple, set)): value = [value] return { 'IFLA_PROP_LIST': { 'attrs': [ ('IFLA_ALT_IFNAME', alt_ifname) for alt_ifname in value ] } } else: return {'IFLA_ALT_IFNAME': value} def set_xdp_fd(self, context, value): return {'xdp': {'attrs': [('IFLA_XDP_FD', value)]}} def set_vf(self, context, value): return {'IFLA_VFINFO_LIST': self.get_vf(value)} def set_state(self, context, value): ret = {} if self.command == 'dump': return {'state': value} if value == 'up': ret['flags'] = context.get('flags', 0) or 0 | 1 ret['change'] = context.get('change', 0) or 0 | 1 return ret def set_arp(self, context, value): ret = {} if not value: ret['flags'] = context.get('flags', 0) or 0 | IFF_NOARP ret['change'] = context.get('change', 0) or 0 | IFF_NOARP return ret def set_noarp(self, context, value): ret = {} if value: ret['flags'] = context.get('flags', 0) or 0 | IFF_NOARP ret['change'] = context.get('change', 0) or 0 | IFF_NOARP return ret def finalize(self, context): # set interface type specific attributes self.kind = context.pop('kind', None) if self.kind is None: return # load specific NLA names self.specific = {} cls = ifinfmsg.ifinfo.data_map.get(self.kind, None) if cls is not None: prefix = cls.prefix or 'IFLA_' for nla, _ in cls.nla_map: self.specific[nla] = nla self.specific[nla[len(prefix) :].lower()] = nla if self.command == 'dump': context[('linkinfo', 'kind')] = self.kind for key, value in tuple(context.items()): if key in self.specific: context[('linkinfo', 'data', key)] = value try: del context[key] except KeyError: pass return # get common ifinfmsg NLAs self.common = [] for key, _ in ifinfmsg.nla_map: self.common.append(key) self.common.append(key[len(ifinfmsg.prefix) :].lower()) self.common.append('family') self.common.append('ifi_type') self.common.append('index') self.common.append('flags') self.common.append('change') for key in ('index', 'change', 'flags'): if key not in context: context[key] = 0 linkinfo = {'attrs': []} self.linkinfo = linkinfo['attrs'] self._info_data = None self._info_slave_data = None context['IFLA_LINKINFO'] = linkinfo self.linkinfo.append(['IFLA_INFO_KIND', self.kind]) # flush deferred NLAs for key, value in tuple(context.items()): if self.push_specific(key, value): try: del context[key] except KeyError: pass def push_specific(self, key, value): # FIXME: vlan hack if self.kind == 'vlan': if key == 'vlan_flags': if isinstance(value, (list, tuple)): if len(value) == 2 and all( (isinstance(x, int) for x in value) ): value = {'flags': value[0], 'mask': value[1]} else: ret = 0 for x in value: ret |= vlan_flags.get(x, 1) value = {'flags': ret, 'mask': ret} elif isinstance(value, int): value = {'flags': value, 'mask': value} elif isinstance(value, str): value = vlan_flags.get(value, 1) value = {'flags': value, 'mask': value} elif not isinstance(value, dict): raise ValueError() elif key in ('vlan_egress_qos', 'vlan_ingress_qos'): if isinstance(value, dict) and {'from', 'to'} == value.keys(): value = {'attrs': (('IFLA_VLAN_QOS_MAPPING', value),)} # the kind is known: lookup the NLA if key in self.specific: # FIXME: slave hack if self.kind.endswith('_slave'): self.info_slave_data.append((self.specific[key], value)) else: self.info_data.append((self.specific[key], value)) return True elif key == 'peer' and self.kind == 'veth': # FIXME: veth hack if isinstance(value, dict): attrs = [] for k, v in value.items(): attrs.append([ifinfmsg.name2nla(k), v]) else: attrs = [['IFLA_IFNAME', value]] nla = ['VETH_INFO_PEER', {'attrs': attrs}] self.info_data.append(nla) return True elif key == 'mode': # FIXME: ipvlan / tuntap / bond hack if self.kind == 'tuntap': nla = ['IFTUN_MODE', value] else: nla = ['IFLA_%s_MODE' % self.kind.upper(), value] self.info_data.append(nla) return True return False @property def info_data(self): if self._info_data is None: info_data = ('IFLA_INFO_DATA', {'attrs': []}) self._info_data = info_data[1]['attrs'] self.linkinfo.append(info_data) return self._info_data @property def info_slave_data(self): if self._info_slave_data is None: info_slave_data = ('IFLA_INFO_SLAVE_DATA', {'attrs': []}) self._info_slave_data = info_slave_data[1]['attrs'] self.linkinfo.append(info_slave_data) return self._info_slave_data def get_vf(self, spec): vflist = [] if not isinstance(spec, (list, tuple)): spec = (spec,) for vf in spec: vfcfg = [] # pop VF index vfid = vf.pop('vf') # mandatory # pop VLAN spec vlan = vf.pop('vlan', None) # optional if isinstance(vlan, int): vfcfg.append(('IFLA_VF_VLAN', {'vf': vfid, 'vlan': vlan})) elif isinstance(vlan, dict): vlan['vf'] = vfid vfcfg.append(('IFLA_VF_VLAN', vlan)) elif isinstance(vlan, (list, tuple)): vlist = [] for vspec in vlan: vspec['vf'] = vfid vlist.append(('IFLA_VF_VLAN_INFO', vspec)) vfcfg.append(('IFLA_VF_VLAN_LIST', {'attrs': vlist})) # pop rate spec rate = vf.pop('rate', None) # optional if rate is not None: rate['vf'] = vfid vfcfg.append(('IFLA_VF_RATE', rate)) # create simple VF attrs for attr in vf: vfcfg.append( ( ifinfmsg.vflist.vfinfo.name2nla(attr), {'vf': vfid, attr: vf[attr]}, ) ) vflist.append(('IFLA_VF_INFO', {'attrs': vfcfg})) return {'attrs': vflist} pyroute2-0.7.11/pyroute2/requests/main.py000066400000000000000000000037251455030217500203440ustar00rootroot00000000000000''' General request and RTNL object data filters. ''' import weakref from collections import ChainMap class RequestProcessor(dict): def __init__(self, field_filter=None, context=None, prime=None): self.field_filter = field_filter self.context = ( context if isinstance(context, (dict, weakref.ProxyType)) else {} ) self.combined = ChainMap(self, self.context) if isinstance(prime, dict): self.update(prime) def __setitem__(self, key, value): if value is None: return if key in self: del self[key] for nkey, nvalue in self.filter(key, value).items(): super(RequestProcessor, self).__setitem__(nkey, nvalue) def filter(self, key, value): if hasattr(self.field_filter, '_key_transform'): key = self.field_filter._key_transform(key) if ( hasattr(self.field_filter, '_allowed') and key not in self.field_filter._allowed ): return {} if hasattr( self.field_filter, 'policy' ) and not self.field_filter.policy(key): return {} return getattr( self.field_filter, f'set_{key}', lambda *argv: {key: value} )(self.combined, value) def update(self, prime): for key, value in tuple(prime.items()): self[key] = value def set_filter(self, field_filter): self.field_filter = field_filter return self def apply_filter(self, field_filter): self.field_filter = field_filter self.update(self) return self def finalize(self, cmd_context=None): if hasattr(self.field_filter, 'finalize_for_iproute'): # old interface self.field_filter.finalize_for_iproute(self.combined, cmd_context) if hasattr(self.field_filter, 'finalize'): # new interface self.field_filter.finalize(self.combined) return self pyroute2-0.7.11/pyroute2/requests/neighbour.py000066400000000000000000000025311455030217500213740ustar00rootroot00000000000000from socket import AF_INET from pyroute2.common import get_address_family from pyroute2.netlink.rtnl import ndmsg from .common import Index, IPRouteFilter class NeighbourFieldFilter(Index): def set_index(self, context, value): return { 'ifindex': super(NeighbourFieldFilter, self).set_index( context, value )['index'] } def set_ifindex(self, context, value): return self.set_index(context, value) def _state(self, value): if isinstance(value, str): value = ndmsg.states_a2n(value) return {'state': value} def set_nud(self, context, value): return self._state(value) def set_state(self, context, value): return self._state(value) def set_dst(self, context, value): if value: return {'dst': value} else: return {} class NeighbourIPRouteFilter(IPRouteFilter): def set_dst(self, context, value): ret = {'dst': value} if 'family' not in context: ret['family'] = get_address_family(value) return ret def finalize(self, context): if self.command not in ('dump', 'get'): if 'state' not in context: context['state'] = ndmsg.NUD_PERMANENT if 'family' not in context: context['family'] = AF_INET pyroute2-0.7.11/pyroute2/requests/netns.py000066400000000000000000000001531455030217500205370ustar00rootroot00000000000000from .common import NLAKeyTransform class NetNSFieldFilter(NLAKeyTransform): _nla_prefix = 'NSINFO_' pyroute2-0.7.11/pyroute2/requests/route.py000066400000000000000000000462071455030217500205600ustar00rootroot00000000000000from socket import AF_INET6 from pyroute2.common import AF_MPLS from pyroute2.netlink.rtnl import encap_type, rt_proto, rt_scope, rt_type from pyroute2.netlink.rtnl.rtmsg import IP6_RT_PRIO_USER, LWTUNNEL_ENCAP_MPLS from pyroute2.netlink.rtnl.rtmsg import nh as nh_header from pyroute2.netlink.rtnl.rtmsg import rtmsg from .common import IPRouteFilter, IPTargets, MPLSTarget, NLAKeyTransform encap_types = {'mpls': 1, AF_MPLS: 1, 'seg6': 5, 'bpf': 6, 'seg6local': 7} class RouteFieldFilter(IPTargets, NLAKeyTransform): _nla_prefix = 'RTA_' def __init__(self, add_defaults=True): self.add_defaults = add_defaults def index(self, key, context, value): if isinstance(value, (list, tuple)): value = value[0] return {key: value} def set_oif(self, context, value): return self.index('oif', context, value) def set_iif(self, context, value): return self.index('iif', context, value) def set_family(self, context, value): if value == AF_MPLS: return {'family': AF_MPLS, 'dst_len': 20, 'table': 254, 'type': 1} return {'family': value} def set_priority(self, context, value): ''' In the kernel: .. code-block:: c static int inet6_rtm_newroute(...) { ... if (cfg.fc_metric == 0) cfg.fc_metric = IP6_RT_PRIO_USER; ... } ''' if context.get('family') == AF_INET6 and value == 0: return {'priority': IP6_RT_PRIO_USER} return {'priority': value} def set_flags(self, context, value): if context.get('family') == AF_MPLS: return {} if isinstance(value, (list, tuple, str)): return {'flags': rtmsg.names2flags(value)} return {'flags': value} def set_encap(self, context, value): # FIXME: planned for the next refactoring cycle if isinstance(value, dict) and value.get('type') == 'mpls': na = [] target = None labels = value.get('labels', []) if isinstance(labels, (dict, int)): labels = [labels] if isinstance(labels, str): labels = labels.split('/') for label in labels: target = MPLSTarget(label) target['bos'] = 0 na.append(target) target['bos'] = 1 return {'encap_type': LWTUNNEL_ENCAP_MPLS, 'encap': na} return {'encap': value} def set_scope(self, context, value): if isinstance(value, str): return {'scope': rt_scope[value]} return {'scope': value} def set_proto(self, context, value): if isinstance(value, str): return {'proto': rt_proto[value]} return {'proto': value} def set_encap_type(self, context, value): if isinstance(value, str): return {'encap_type': encap_type[value]} return {'encap_type': value} def set_type(self, context, value): if isinstance(value, str): return {'type': rt_type[value]} return {'type': value} class RouteIPRouteFilter(IPRouteFilter): def set_metrics(self, context, value): if value and 'attrs' not in value: metrics = {'attrs': []} for name, metric in value.items(): rtax = rtmsg.metrics.name2nla(name) if metric is not None: metrics['attrs'].append([rtax, metric]) if metrics['attrs']: return {'metrics': metrics} return {} def set_multipath(self, context, value): if value: ret = [] for v in value: if 'attrs' in v: ret.append(v) continue nh = {'attrs': []} nh_fields = [x[0] for x in nh_header.fields] for name in nh_fields: nh[name] = v.get(name, 0) for name in v: if name in nh_fields or v[name] is None: continue if name == 'encap' and isinstance(v[name], dict): if ( v[name].get('type', None) is None or v[name].get('labels', None) is None ): continue nh['attrs'].append( [ 'RTA_ENCAP_TYPE', encap_types.get( v[name]['type'], v[name]['type'] ), ] ) nh['attrs'].append( ['RTA_ENCAP', self.encap_header(v[name])] ) elif name == 'newdst': nh['attrs'].append( ['RTA_NEWDST', self.mpls_rta(v[name])] ) else: rta = rtmsg.name2nla(name) nh['attrs'].append([rta, v[name]]) ret.append(nh) if ret: return {'multipath': ret} return {} def set_encap(self, context, value): if ( isinstance(value, (list, tuple)) and context.get('encap_type') == LWTUNNEL_ENCAP_MPLS ): return {'encap': {'attrs': [['MPLS_IPTUNNEL_DST', value]]}} elif isinstance(value, dict): # human-friendly form: # # 'encap': {'type': 'mpls', # 'labels': '200/300'} # # 'type' is mandatory if 'type' in value and 'labels' in value: return { 'encap_type': encap_types.get( value['type'], value['type'] ), 'encap': self.encap_header(value), } # human-friendly form: # # 'encap': {'type': 'seg6', # 'mode': 'encap' # 'segs': '2000::5,2000::6'} # # 'encap': {'type': 'seg6', # 'mode': 'inline' # 'segs': '2000::5,2000::6' # 'hmac': 1} # # 'encap': {'type': 'seg6', # 'mode': 'encap' # 'segs': '2000::5,2000::6' # 'hmac': 0xf} # # 'encap': {'type': 'seg6', # 'mode': 'inline' # 'segs': ['2000::5', '2000::6']} # # 'type', 'mode' and 'segs' are mandatory if 'type' in value and 'mode' in value and 'segs' in value: return { 'encap_type': encap_types.get( value['type'], value['type'] ), 'encap': self.encap_header(value), } elif 'type' in value and ( 'in' in value or 'out' in value or 'xmit' in value ): return { 'encap_type': encap_types.get( value['type'], value['type'] ), 'encap': self.encap_header(value), } # human-friendly form: # # 'encap': {'type': 'seg6local', # 'action': 'End'} # # 'encap': {'type': 'seg6local', # 'action': 'End.DT6', # 'table': '10'} # # 'encap': {'type': 'seg6local', # 'action': 'End.DT4', # 'vrf_table': 10} # # 'encap': {'type': 'seg6local', # 'action': 'End.DT46', # 'vrf_table': 10} # # 'encap': {'type': 'seg6local', # 'action': 'End.DX6', # 'nh6': '2000::5'} # # 'encap': {'type': 'seg6local', # 'action': 'End.B6' # 'srh': {'segs': '2000::5,2000::6', # 'hmac': 0xf}} # # 'type' and 'action' are mandatory elif 'type' in value and 'action' in value: return { 'encap_type': encap_types.get( value['type'], value['type'] ), 'encap': self.encap_header(value), } return {} def finalize(self, context): for key in context: if context[key] in ('', None): try: del context[key] except KeyError: pass def mpls_rta(self, value): # FIXME: planned for the next refactoring cycle ret = [] if not isinstance(value, (list, tuple, set)): value = (value,) for label in value: ret.append(MPLSTarget(label)) if ret: ret[-1]['bos'] = 1 return ret def encap_header(self, header): ''' Encap header transform. Format samples: {'type': 'mpls', 'labels': '200/300'} {'type': AF_MPLS, 'labels': (200, 300)} {'type': 'mpls', 'labels': 200} {'type': AF_MPLS, 'labels': [{'bos': 0, 'label': 200, 'ttl': 16}, {'bos': 1, 'label': 300, 'ttl': 16}]} ''' if isinstance(header['type'], int) or ( header['type'] in ('mpls', AF_MPLS, LWTUNNEL_ENCAP_MPLS) ): ret = [] override_bos = True labels = header['labels'] if isinstance(labels, str): labels = labels.split('/') if not isinstance(labels, (tuple, list, set)): labels = (labels,) for label in labels: if isinstance(label, dict): # dicts append intact override_bos = False ret.append(label) else: # otherwise construct label dict if isinstance(label, str): label = int(label) ret.append({'bos': 0, 'label': label}) # the last label becomes bottom-of-stack if override_bos: ret[-1]['bos'] = 1 return {'attrs': [['MPLS_IPTUNNEL_DST', ret]]} ''' Seg6 encap header transform. Format samples: {'type': 'seg6', 'mode': 'encap', 'segs': '2000::5,2000::6'} {'type': 'seg6', 'mode': 'encap' 'segs': '2000::5,2000::6', 'hmac': 1} ''' if header['type'] == 'seg6': # Init step ret = {} # Parse segs segs = header['segs'] # If they are in the form in_addr6,in_addr6 if isinstance(segs, str): # Create an array with the splitted values temp = segs.split(',') # Init segs segs = [] # Iterate over the values for seg in temp: # Discard empty string if seg != '': # Add seg to segs segs.append(seg) # Retrieve mode mode = header['mode'] # hmac is optional and contains the hmac key hmac = header.get('hmac', None) # Construct the new object ret = {'mode': mode, 'segs': segs} # If hmac is present convert to u32 if hmac: # Add to ret the hmac key ret['hmac'] = hmac & 0xFFFFFFFF # Done return the object return {'attrs': [['SEG6_IPTUNNEL_SRH', ret]]} ''' BPF encap header transform. Format samples: {'type': 'bpf', 'in': {'fd':4, 'name':'firewall'}} {'type': 'bpf', 'in' : {'fd':4, 'name':'firewall'}, 'out' : {'fd':5, 'name':'stats'}, 'xmit': {'fd':6, 'name':'vlan_push', 'headroom':4}} ''' if header['type'] == 'bpf': attrs = {} for key, value in header.items(): if key not in ['in', 'out', 'xmit']: continue obj = [ ['LWT_BPF_PROG_FD', value['fd']], ['LWT_BPF_PROG_NAME', value['name']], ] if key == 'in': attrs['LWT_BPF_IN'] = {'attrs': obj} elif key == 'out': attrs['LWT_BPF_OUT'] = {'attrs': obj} elif key == 'xmit': attrs['LWT_BPF_XMIT'] = {'attrs': obj} if 'headroom' in value: attrs['LWT_BPF_XMIT_HEADROOM'] = value['headroom'] return {'attrs': list(attrs.items())} ''' Seg6 encap header transform. Format samples: {'type': 'seg6local', 'action': 'End.DT6', 'table': '10'} {'type': 'seg6local', 'action': 'End.DT4', 'vrf_table': 10} {'type': 'seg6local', 'action': 'End.DT46', 'vrf_table': 10} {'type': 'seg6local', 'action': 'End.B6', 'table': '10' 'srh': {'segs': '2000::5,2000::6'}} ''' if header['type'] == 'seg6local': # Init step ret = {} table = None nh4 = None nh6 = None iif = None # Actually not used oif = None srh = {} segs = [] hmac = None prog_fd = None prog_name = None vrf_table = None # Parse segs if srh: segs = header['srh']['segs'] # If they are in the form in_addr6,in_addr6 if isinstance(segs, str): # Create an array with the splitted values temp = segs.split(',') # Init segs segs = [] # Iterate over the values for seg in temp: # Discard empty string if seg != '': # Add seg to segs segs.append(seg) # hmac is optional and contains the hmac key hmac = header.get('hmac', None) # Retrieve action action = header['action'] if action == 'End.X': # Retrieve nh6 nh6 = header['nh6'] elif action == 'End.T': # Retrieve table and convert to u32 table = header['table'] & 0xFFFFFFFF elif action == 'End.DX2': # Retrieve oif and convert to u32 oif = header['oif'] & 0xFFFFFFFF elif action == 'End.DX6': # Retrieve nh6 nh6 = header['nh6'] elif action == 'End.DX4': # Retrieve nh6 nh4 = header['nh4'] elif action == 'End.DT6': # Retrieve table table = header['table'] elif action == 'End.DT4': # Retrieve vrf_table vrf_table = header['vrf_table'] elif action == 'End.DT46': # Retrieve vrf_table vrf_table = header['vrf_table'] elif action == 'End.B6': # Parse segs segs = header['srh']['segs'] # If they are in the form in_addr6,in_addr6 if isinstance(segs, str): # Create an array with the splitted values temp = segs.split(',') # Init segs segs = [] # Iterate over the values for seg in temp: # Discard empty string if seg != '': # Add seg to segs segs.append(seg) # hmac is optional and contains the hmac key hmac = header.get('hmac', None) srh['segs'] = segs # If hmac is present convert to u32 if hmac: # Add to ret the hmac key srh['hmac'] = hmac & 0xFFFFFFFF srh['mode'] = 'inline' elif action == 'End.B6.Encaps': # Parse segs segs = header['srh']['segs'] # If they are in the form in_addr6,in_addr6 if isinstance(segs, str): # Create an array with the splitted values temp = segs.split(',') # Init segs segs = [] # Iterate over the values for seg in temp: # Discard empty string if seg != '': # Add seg to segs segs.append(seg) # hmac is optional and contains the hmac key hmac = header.get('hmac', None) srh['segs'] = segs if hmac: # Add to ret the hmac key srh['hmac'] = hmac & 0xFFFFFFFF srh['mode'] = 'encap' elif action == 'End.BPF': prog_fd = header['bpf']['fd'] prog_name = header['bpf']['name'] # Construct the new object ret = [] ret.append(['SEG6_LOCAL_ACTION', {'value': action}]) if table: # Add the table to ret ret.append(['SEG6_LOCAL_TABLE', {'value': table}]) if vrf_table: # Add the vrf_table to ret ret.append(['SEG6_LOCAL_VRFTABLE', {'value': vrf_table}]) if nh4: # Add the nh4 to ret ret.append(['SEG6_LOCAL_NH4', {'value': nh4}]) if nh6: # Add the nh6 to ret ret.append(['SEG6_LOCAL_NH6', {'value': nh6}]) if iif: # Add the iif to ret ret.append(['SEG6_LOCAL_IIF', {'value': iif}]) if oif: # Add the oif to ret ret.append(['SEG6_LOCAL_OIF', {'value': oif}]) if srh: # Add the srh to ret ret.append(['SEG6_LOCAL_SRH', srh]) if prog_fd and prog_name: # Add the prog_fd and prog_name to ret ret.append( [ 'SEG6_LOCAL_BPF', { 'attrs': [ ['LWT_BPF_PROG_FD', prog_fd], ['LWT_BPF_PROG_NAME', prog_name], ] }, ] ) # Done return the object return {'attrs': ret} pyroute2-0.7.11/pyroute2/requests/rule.py000066400000000000000000000023151455030217500203610ustar00rootroot00000000000000import socket from pyroute2.netlink.rtnl.fibmsg import FR_ACT_NAMES, fibmsg from .common import Index, IPRouteFilter, IPTargets, NLAKeyTransform DEFAULT_FRA_PRIORITY = 32000 class RuleFieldFilter(IPTargets, Index, NLAKeyTransform): _nla_prefix = fibmsg.prefix class RuleIPRouteFilter(IPRouteFilter): def set_action(self, context, value): if isinstance(value, str): return { 'action': FR_ACT_NAMES.get( value, FR_ACT_NAMES.get('FR_ACT_' + value.upper(), value) ) } return {'action': value} def finalize(self, context): if self.command != 'dump': if 'family' not in context: context['family'] = socket.AF_INET if 'priority' not in context: context['priority'] = DEFAULT_FRA_PRIORITY if 'table' in context and 'action' not in context: context['action'] = 'to_tbl' for key in ('src_len', 'dst_len'): if context.get(key, None) is None and key[:3] in context: context[key] = {socket.AF_INET6: 128, socket.AF_INET: 32}[ context['family'] ] pyroute2-0.7.11/pyroute2/wiset.py000066400000000000000000000462461455030217500167050ustar00rootroot00000000000000''' High level ipset support. When :doc:`ipset` is providing a direct netlink socket with low level functions, a :class:`WiSet` object is built to map ipset objects from kernel. It helps to add/remove entries, list content, etc. For example, adding an entry with :class:`pyroute2.ipset.IPSet` object implies to set a various number of parameters: .. doctest:: :skipif: True >>> ipset = IPSet() >>> ipset.add("foo", "1.2.3.4/24", etype="net") >>> ipset.close() When they are discovered by a :class:`WiSet`: .. doctest:: :skipif: True >>> wiset = load_ipset("foo") >>> wiset.add("1.2.3.4/24") Listing entries is also easier using :class:`WiSet`, since it parses for you netlink messages: .. doctest:: :skipif: True >>> wiset.content {'1.2.3.0/24': IPStats(packets=None, bytes=None, comment=None, timeout=None, skbmark=None, physdev=False)} ''' import errno import uuid from collections import namedtuple from inspect import getcallargs from socket import AF_INET from pyroute2.common import basestring from pyroute2.ipset import IPSet from pyroute2.netlink.exceptions import IPSetError from pyroute2.netlink.nfnetlink.ipset import ( IPSET_FLAG_IFACE_WILDCARD, IPSET_FLAG_PHYSDEV, IPSET_FLAG_WITH_COMMENT, IPSET_FLAG_WITH_COUNTERS, IPSET_FLAG_WITH_SKBINFO, ) from pyroute2.netlink.nfnetlink.nfctsocket import IP_PROTOCOLS # Debug variable to detect netlink socket leaks COUNT = {"count": 0} def need_ipset_socket(fun): """Decorator to create netlink socket if needed. In many of our helpers, we need to open a netlink socket. This can be expensive for someone using many times the functions: instead to have only one socket and use several requests, we will open it again and again. This helper allow our functions to be flexible: the caller can pass an optional socket, or do nothing. In this last case, this decorator will open a socket for the caller (and close it after call) It also help to mix helpers. One helper can call another one: the socket will be opened only once. We just have to pass the ipset variable. Note that all functions using this helper *must* use ipset as variable name for the socket. """ def wrap(*args, **kwargs): callargs = getcallargs(fun, *args, **kwargs) if callargs["sock"] is None: # This variable is used only to debug leak in tests COUNT['count'] += 1 with IPSet() as sock: callargs["sock"] = sock # We must pop kwargs here, else the function will receive # a dict of dict if "kwargs" in callargs: callargs.update(callargs.pop("kwargs")) return fun(**callargs) # pylint:disable=star-args return fun(*args, **kwargs) return wrap class IPStats( namedtuple( "IPStats", [ "packets", "bytes", "comment", "timeout", "skbmark", "physdev", "wildcard", ], ) ): __slots__ = () def __new__( cls, packets, bytes, comment, timeout, skbmark, physdev=False, wildcard=False, ): return super(IPStats, cls).__new__( cls, packets, bytes, comment, timeout, skbmark, physdev=physdev, wildcard=wildcard, ) # pylint: disable=too-many-instance-attributes class WiSet(object): """Main high level ipset manipulation class. Every high level ipset operation should be possible with this class, you probably don't need other helpers of this module, except tools to load data from kernel (:func:`load_all_ipsets` and :func:`load_ipset`) For example, you can create and an entry in a ipset just with: .. doctest:: :skipif: True >>> with WiSet(name="mysuperipset") as myset: >>> myset.create() # add the ipset in the kernel >>> myset.add("198.51.100.1") # add one IP to the set Netlink sockets are opened by __enter__ and __exit__ function, so you don't have to manage it manually if you use the "with" keyword. If you want to manage it manually (for example for long operation in a daemon), you can do the following: .. doctest:: :skipif: True >>> myset = WiSet(name="mysuperipset") >>> myset.open_netlink() >>> # do stuff >>> myset.close_netlink() You can also don't initiate at all any netlink socket, this code will work: .. doctest:: :skipif: True >>> myset = WiSet(name="mysuperipset") >>> myset.create() >>> myset.destroy() But do it very carefully. In that case, a netlink socket will be opened in background for any operation. No socket will be leaked, but that can consume resources. You can also instantiate WiSet objects with :func:`load_all_ipsets` and :func:`load_ipset`: .. doctest:: :skipif: True >>> all_sets_dict = load_all_ipsets() >>> one_set = load_ipset(name="myset") Have a look on content variable if you need list of entries in the Set. """ # pylint: disable=too-many-arguments def __init__( self, name=None, attr_type='hash:ip', family=AF_INET, sock=None, timeout=None, counters=False, comment=False, hashsize=None, revision=None, skbinfo=False, ): self.name = name self.hashsize = hashsize self._attr_type = None self.entry_type = None self.attr_type = attr_type self.family = family self._content = None self.sock = sock self.timeout = timeout self.counters = counters self.comment = comment self.revision = revision self.index = None self.skbinfo = skbinfo def open_netlink(self): """ Open manually a netlink socket. You can use "with WiSet()" statement instead. """ if self.sock is None: self.sock = IPSet() def close_netlink(self): """Clone any opened netlink socket""" if self.sock is not None: self.sock.close() self.sock = None @property def attr_type(self): return self._attr_type @attr_type.setter def attr_type(self, value): self._attr_type = value self.entry_type = value.split(":", 1)[1] def __enter__(self): self.open_netlink() return self def __exit__(self, exc_type, exc_value, traceback): self.close_netlink() @classmethod def from_netlink(cls, ndmsg, content=False): """Create a ipset objects based on a parsed netlink message :param ndmsg: the netlink message to parse :param content: should we fill (and parse) entries info (can be slow on very large set) :type content: bool """ self = cls() self.attr_type = ndmsg.get_attr("IPSET_ATTR_TYPENAME") self.name = ndmsg.get_attr("IPSET_ATTR_SETNAME") self.hashsize = ndmsg.get_attr("IPSET_ATTR_HASHSIZE") self.family = ndmsg.get_attr("IPSET_ATTR_FAMILY") self.revision = ndmsg.get_attr("IPSET_ATTR_REVISION") self.index = ndmsg.get_attr("IPSET_ATTR_INDEX") data = ndmsg.get_attr("IPSET_ATTR_DATA") self.timeout = data.get_attr("IPSET_ATTR_TIMEOUT") flags = data.get_attr("IPSET_ATTR_CADT_FLAGS") if flags is not None: self.counters = bool(flags & IPSET_FLAG_WITH_COUNTERS) self.comment = bool(flags & IPSET_FLAG_WITH_COMMENT) self.skbinfo = bool(flags & IPSET_FLAG_WITH_SKBINFO) if content: self.update_dict_content(ndmsg) return self def update_dict_content(self, ndmsg): """Update a dictionary statistics with values sent in netlink message :param ndmsg: the netlink message :type ndmsg: netlink message """ family = "IPSET_ATTR_IPADDR_IPV4" ip_attr = "IPSET_ATTR_IP_FROM" if self._content is None: self._content = {} timeout = None entries = ndmsg.get_attr("IPSET_ATTR_ADT").get_attrs("IPSET_ATTR_DATA") for entry in entries: key = "" for parse_type in self.entry_type.split(","): if parse_type == "ip": ip = entry.get_attr(ip_attr).get_attr(family) key += ip elif parse_type == "net": ip = entry.get_attr(ip_attr).get_attr(family) key += ip cidr = entry.get_attr("IPSET_ATTR_CIDR") if cidr is not None: key += "/{0}".format(cidr) elif parse_type == "iface": key += entry.get_attr("IPSET_ATTR_IFACE") elif parse_type == "set": key += entry.get_attr("IPSET_ATTR_NAME") elif parse_type == "mark": key += str(hex(entry.get_attr("IPSET_ATTR_MARK"))) elif parse_type == "port": proto = entry.get_attr('IPSET_ATTR_PROTO') if proto is not None: proto = IP_PROTOCOLS.get(proto, str(proto)).lower() key += '{proto}:'.format(proto=proto) key += str(entry.get_attr("IPSET_ATTR_PORT_FROM")) elif parse_type == "mac": key += entry.get_attr("IPSET_ATTR_ETHER") key += "," key = key.strip(",") if self.timeout is not None: timeout = entry.get_attr("IPSET_ATTR_TIMEOUT") skbmark = entry.get_attr("IPSET_ATTR_SKBMARK") if skbmark is not None: # Convert integer to hex for mark/mask # Only display mask if != 0xffffffff if skbmark[1] != (2**32 - 1): skbmark = "/".join([str(hex(mark)) for mark in skbmark]) else: skbmark = str(hex(skbmark[0])) entry_flag_parsed = {"physdev": False} flags = entry.get_attr("IPSET_ATTR_CADT_FLAGS") if flags is not None: entry_flag_parsed["physdev"] = bool(flags & IPSET_FLAG_PHYSDEV) entry_flag_parsed["wildcard"] = bool( flags & IPSET_FLAG_IFACE_WILDCARD ) value = IPStats( packets=entry.get_attr("IPSET_ATTR_PACKETS"), bytes=entry.get_attr("IPSET_ATTR_BYTES"), comment=entry.get_attr("IPSET_ATTR_COMMENT"), skbmark=skbmark, timeout=timeout, **entry_flag_parsed ) self._content[key] = value def create(self, **kwargs): """Insert this Set in the kernel Many options are set with python object attributes (like comments, counters, etc). For non-supported type, kwargs are provided. See :doc:`ipset` documentation for more information. """ create_ipset( self.name, stype=self.attr_type, family=self.family, sock=self.sock, timeout=self.timeout, comment=self.comment, counters=self.counters, hashsize=self.hashsize, skbinfo=self.skbinfo, **kwargs ) def destroy(self): """Destroy this ipset in the kernel list. It does not delete this python object (any content or other stored values are keep in memory). This function will fail if the ipset is still referenced (by example in iptables rules), you have been warned. """ destroy_ipset(self.name, sock=self.sock) def add(self, entry, **kwargs): """Add an entry in this ipset. If counters are enabled on the set, reset by default the value when we add the element. Without this reset, kernel sometimes store old values and can add very strange behavior on counters. """ if isinstance(entry, dict): kwargs.update(entry) entry = kwargs.pop("entry") if self.counters: kwargs["packets"] = kwargs.pop("packets", 0) kwargs["bytes"] = kwargs.pop("bytes", 0) skbmark = kwargs.get("skbmark") if isinstance(skbmark, basestring): skbmark = skbmark.split('/') mark = int(skbmark[0], 16) try: mask = int(skbmark[1], 16) except IndexError: mask = int("0xffffffff", 16) kwargs["skbmark"] = (mark, mask) add_ipset_entry( self.name, entry, etype=self.entry_type, sock=self.sock, **kwargs ) def delete(self, entry, **kwargs): """Delete/remove an entry in this ipset""" delete_ipset_entry( self.name, entry, etype=self.entry_type, sock=self.sock, **kwargs ) def test(self, entry, **kwargs): """Test if an entry is in this ipset""" return test_ipset_entry( self.name, entry, etype=self.entry_type, sock=self.sock, **kwargs ) def test_list(self, entries, **kwargs): """Test if a list of a set of entries is in this ipset Return a set of entries found in the IPSet """ return test_ipset_entries( self.name, entries, etype=self.entry_type, sock=self.sock, **kwargs ) def update_content(self): """Update the content dictionary with values from kernel""" self._content = {} update_wiset_content(self, sock=self.sock) def flush(self): """Flush entries of the ipset""" flush_ipset(self.name, sock=self.sock) @property def content(self): """Dictionary of entries in the set. Keys are IP addresses (as string), values are IPStats tuples. """ if self._content is None: self.update_content() return self._content def insert_list(self, entries): """Just a small helper to reduce the number of loops in main code.""" for entry in entries: self.add(entry) def replace_entries(self, new_list): """Replace the content of an ipset with a new list of entries. This operation is like a flush() and adding all entries one by one. But this call is atomic: it creates a temporary ipset and swap the content. :param new_list: list of entries to add :type new_list: list or :py:class:`set` of basestring or of keyword arguments dict """ temp_name = str(uuid.uuid4())[0:8] # Get a copy of ourself temp = load_ipset(self.name, sock=self.sock) temp.name = temp_name temp.sock = self.sock temp.create() temp.insert_list(new_list) swap_ipsets(self.name, temp_name, sock=self.sock) temp.destroy() @need_ipset_socket def create_ipset( name, stype=None, family=AF_INET, exclusive=False, sock=None, **kwargs ): """Create an ipset.""" sock.create( name, stype=stype, family=family, exclusive=exclusive, **kwargs ) @need_ipset_socket def load_all_ipsets(content=False, sock=None, inherit_sock=False, prefix=None): """List all ipset as WiSet objects. Get full ipset data from kernel and parse it in WiSet objects. Result is a dictionary with ipset names as keys, and WiSet objects as values. :param content: parse the list of entries and fill it in WiSet content dictionary :type content: bool :param inherit_sock: use the netlink sock passed in ipset arg to fill WiSets sock :type inherit_sock: bool :param prefix: filter out all ipset with a name not beginning by this prefix :type prefix: str or None """ res = {} for myset in sock.list(): # on large sets, we can receive data in several messages name = myset.get_attr("IPSET_ATTR_SETNAME") if prefix is not None and not name.startswith(prefix): continue if name not in res: wiset = WiSet.from_netlink(myset, content=content) if inherit_sock: wiset.sock = sock res[wiset.name] = wiset elif content: res[wiset.name].update_dict_content(myset) return res @need_ipset_socket def load_ipset(name, content=False, sock=None, inherit_sock=False): """Get one ipset as WiSet object Helper to get current WiSet object. More efficient that :func:`load_all_ipsets` since the kernel does the filtering itself. Return None if the ipset does not exist :param name: name of the ipset :type name: str :param content: parse or not content and statistics on entries :type content: bool :param inherit_sock: use the netlink sock passed in ipset arg to fill WiSet sock :type inherit_sock: bool """ res = None try: messages = sock.list(name=name) except IPSetError as e: if e.code == errno.ENOENT: return res raise for msg in messages: if res is None: res = WiSet.from_netlink(msg, content=content) if inherit_sock: res.sock = sock elif content: res.update_dict_content(msg) return res @need_ipset_socket def update_wiset_content(wiset, sock=None): """Update content/statistics of a wiset. You should never call yourself this function. It is only a helper to use the :func:`need_ipset_socket` decorator out of WiSet object. """ for msg in sock.list(name=wiset.name): wiset.update_dict_content(msg) @need_ipset_socket def destroy_ipset(name, sock=None): """Remove an ipset in the kernel.""" sock.destroy(name) @need_ipset_socket def add_ipset_entry(name, entry, sock=None, **kwargs): """Add an entry""" sock.add(name, entry, **kwargs) @need_ipset_socket def delete_ipset_entry(name, entry, sock=None, **kwargs): """Remove one entry""" sock.delete(name, entry, **kwargs) @need_ipset_socket def test_ipset_exist(name, sock=None): """Test if the given ipset exist""" try: sock.headers(name) return True except IPSetError as e: if e.code == errno.ENOENT: return False raise @need_ipset_socket def test_ipset_entry(name, entry, sock=None, **kwargs): """Test if an entry is in one ipset""" return sock.test(name, entry, **kwargs) @need_ipset_socket def test_ipset_entries(name, entries, sock=None, **kwargs): """Test a list (or a set) of entries.""" res = set() for entry in entries: if sock.test(name, entry, **kwargs): res.add(entry) return res @need_ipset_socket def flush_ipset(name, sock=None): """Flush all ipset content""" sock.flush(name) @need_ipset_socket def swap_ipsets(name_a, name_b, sock=None): """Swap the content of ipset a and b. ipsets must have compatible content. """ sock.swap(name_a, name_b) def get_ipset_socket(**kwargs): """Get a socket that one can pass to several WiSet objects""" return IPSet(**kwargs) pyroute2-0.7.11/requirements.dev.txt000066400000000000000000000001041455030217500174270ustar00rootroot00000000000000build twine flake8 netaddr pytest pytest-cov pre-commit findimports pyroute2-0.7.11/requirements.docs.txt000066400000000000000000000001131455030217500176010ustar00rootroot00000000000000build twine aafigure sphinx==5.1.1 sphinx-code-include pre-commit docutils pyroute2-0.7.11/requirements.repo.txt000066400000000000000000000000261455030217500176210ustar00rootroot00000000000000nox pytest pytest-cov pyroute2-0.7.11/setup.cfg000066400000000000000000000031001455030217500152060ustar00rootroot00000000000000[metadata] name = pyroute2 version = file: VERSION description = Python Netlink library long_description = file: README.rst author = Peter Saveliev author_email = peter@svinota.eu long_description_content_type = text/x-rst url = https://github.com/svinota/pyroute2 license = GPL-2.0-or-later OR Apache-2.0 classifiers = License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+) License :: OSI Approved :: Apache Software License Programming Language :: Python Topic :: Software Development :: Libraries :: Python Modules Topic :: System :: Networking Topic :: System :: Systems Administration Operating System :: POSIX :: Linux Intended Audience :: Developers Intended Audience :: System Administrators Intended Audience :: Telecommunications Industry Programming Language :: Python :: 3 Programming Language :: Python :: 3.6 Programming Language :: Python :: 3.7 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.10 Programming Language :: Python :: 3.11 Programming Language :: Python :: 3.12 Development Status :: 4 - Beta [options] install_requires = win_inet_pton ; platform_system == "Windows" importlib-metadata ; python_version < "3.8" packages_dir = =pyroute2 packages = find: [options.entry_points] console_scripts = ss2 = pyroute2.netlink.diag.ss2:run [psutil] pyroute2-cli = pyroute2.ndb.cli:run pyroute2-dhcp-client = pyroute2.dhcp.client:run pyroute2-test-platform = pyroute2.config.test_platform:run pyroute2-0.7.11/setup.minimal.cfg000066400000000000000000000037461455030217500166530ustar00rootroot00000000000000[metadata] name = pyroute2.minimal version = file: VERSION description = Python Netlink library: minimal distribution long_description = file: README.minimal.rst author = Peter Saveliev author_email = peter@svinota.eu long_description_content_type = text/x-rst url = https://github.com/svinota/pyroute2 license = GPL-2.0-or-later OR Apache-2.0 classifiers = License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+) License :: OSI Approved :: Apache Software License Programming Language :: Python Topic :: Software Development :: Libraries :: Python Modules Topic :: System :: Networking Topic :: System :: Systems Administration Operating System :: POSIX :: Linux Intended Audience :: Developers Intended Audience :: System Administrators Intended Audience :: Telecommunications Industry Programming Language :: Python :: 3 Programming Language :: Python :: 3.6 Programming Language :: Python :: 3.7 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.10 Programming Language :: Python :: 3.11 Programming Language :: Python :: 3.12 Development Status :: 4 - Beta [options] install_requires = win_inet_pton ; platform_system == "Windows" importlib-metadata ; python_version < "3.8" packages_dir = =pyroute2 packages = pr2modules pyroute2 pyroute2.bsd pyroute2.bsd.pf_route pyroute2.bsd.rtmsocket pyroute2.config pyroute2.inotify pyroute2.iproute pyroute2.netlink pyroute2.netlink.devlink pyroute2.netlink.diag pyroute2.netlink.event pyroute2.netlink.generic pyroute2.netlink.ipq pyroute2.netlink.nfnetlink pyroute2.netlink.nl80211 pyroute2.netlink.rtnl pyroute2.netlink.rtnl.ifinfmsg pyroute2.netlink.rtnl.ifinfmsg.plugins pyroute2.netlink.rtnl.tcmsg pyroute2.netlink.taskstats pyroute2.netlink.uevent pyroute2.netns pyroute2.protocols pyroute2.requests pyroute2-0.7.11/setup.py000066400000000000000000000000461455030217500151050ustar00rootroot00000000000000from setuptools import setup setup() pyroute2-0.7.11/stubs/000077500000000000000000000000001455030217500145335ustar00rootroot00000000000000pyroute2-0.7.11/stubs/.gitkeep000066400000000000000000000000001455030217500161520ustar00rootroot00000000000000pyroute2-0.7.11/tests/000077500000000000000000000000001455030217500145355ustar00rootroot00000000000000pyroute2-0.7.11/tests/README.md000066400000000000000000000025011455030217500160120ustar00rootroot00000000000000Test modules ============ * `test_limits` -- resource limits, fd leaks, etc * `test_linux` -- functional tests for Linux, may require root * `test_minimal` -- test pyroute2.minimal package * `test_neutron` -- integration with OpenStack Neutron * `test_repo` -- repository checks * `test_openbsd` -- functional tests for OpenBSD * `test_unit` -- unittests Functional tests under `test_linux` directory require root access to create, destroy and set up network objects -- routes, addresses, interfaces, etc. They use mainly dummy interfaces, but the main OS setup may be affected. Requirements ============ * nox * python >= 3.6 * `-r requirements.dev.txt` Run tests ========= All the tests should be started via corresponding nox session, see `noxfile.py`. Alternatively there is a `make` target left for those who prefer:: # using nox $ nox --list $ nox -e unit # run only unit tests $ nox -e unit -- '{"pdb": true}' # provide a session config $ nox # run all the tests # using make $ sudo make test # run the default sessions $ make nox session=unit # run only unit tests $ make nox session=openbsd # OpenBSD tests Get code coverage and run PDB on failures:: $ nox -e linux -- '{"pdb": true, "coverage": true}' pyroute2-0.7.11/tests/decoder/000077500000000000000000000000001455030217500161425ustar00rootroot00000000000000pyroute2-0.7.11/tests/decoder/README.md000066400000000000000000000025071455030217500174250ustar00rootroot00000000000000Description ----------- This is the data directory for the parser testing. The test module: `general/test_parser.py` Data format ----------- The data files can be in two formats. The strace hex dump: ``` \x00\x00\x00... ``` And the pyroute2 hex dump: ``` 00:00:00... ``` When the data file gets loaded, all the spaces, comments and new lines are ignored. There can be several packets in the same file, the parser deals with it. Comments should start with `#` or `;`: ``` # field one 00:00:00 # field two 00:00:00 ... ``` All the data after `.` is also ignored. It can be used to provide detailed descriptions of the file after the dump data: ``` \x00\x00\x00... . Here goes the data description ``` How to collect -------------- To collect the data, one can use either of two approaches. First, use strace: ``` $ strace -e trace=network -f -x -s 4096 netlink_utility ... sendto(3, "\x28\x00\x00\x00\x12\x00\x01\x03\x67\x9a..."... ) ``` Then just copy and paste to the data file strings from `sendto()` and `recvmsg()` calls. Or one can use packets parsed with pyroute2: ``` >>> from pyroute2 import IPRoute >>> from pyroute2.common import hexdump >>> ipr = IPRoute() >>> pkts = ipr.get_addr() >>> hexdump(pkts[0].raw) '4c:00:00:00:14:00:02:00:ff:00:00:00:...' ``` pyroute2-0.7.11/tests/decoder/decoder.py000066400000000000000000000015451455030217500201260ustar00rootroot00000000000000#!/usr/bin/python ''' Usage:: ./decoder.py [module] [data_file] Sample:: ./decoder.py pyroute2.netlink.rtnl.tcmsg.tcmsg ./sample_packet_01.data ./decoder.py pyroute2.netlink.nl80211.nl80211cmd ./nl80211.data Module is a name within rtnl hierarchy. File should be a binary data in the escaped string format (see samples). ''' import sys from importlib import import_module from pprint import pprint from pyroute2.common import hexdump, load_dump mod = sys.argv[1] mod = mod.replace('/', '.') f = open(sys.argv[2], 'r') s = mod.split('.') package = '.'.join(s[:-1]) module = s[-1] m = import_module(package) met = getattr(m, module) data = load_dump(f) offset = 0 inbox = [] while offset < len(data): msg = met(data[offset:]) msg.decode() print(hexdump(msg.data)) pprint(msg) print('.' * 40) offset += msg['header']['length'] pyroute2-0.7.11/tests/mocklib/000077500000000000000000000000001455030217500161555ustar00rootroot00000000000000pyroute2-0.7.11/tests/mocklib/dateutil/000077500000000000000000000000001455030217500177705ustar00rootroot00000000000000pyroute2-0.7.11/tests/mocklib/dateutil/__init__.py000066400000000000000000000000001455030217500220670ustar00rootroot00000000000000pyroute2-0.7.11/tests/mocklib/dateutil/parser.py000066400000000000000000000003531455030217500216370ustar00rootroot00000000000000from unittest import mock class parse(mock.Mock): def __init__(self, *argv, **kwarg): super().__init__(*argv, **kwarg) self.timestamp = mock.Mock(name='timestamp') self.timestamp.return_value = 3313938316 pyroute2-0.7.11/tests/mocklib/keystoneauth1/000077500000000000000000000000001455030217500207615ustar00rootroot00000000000000pyroute2-0.7.11/tests/mocklib/keystoneauth1/__init__.py000066400000000000000000000000001455030217500230600ustar00rootroot00000000000000pyroute2-0.7.11/tests/mocklib/keystoneauth1/identity/000077500000000000000000000000001455030217500226125ustar00rootroot00000000000000pyroute2-0.7.11/tests/mocklib/keystoneauth1/identity/__init__.py000066400000000000000000000000001455030217500247110ustar00rootroot00000000000000pyroute2-0.7.11/tests/mocklib/keystoneauth1/identity/v3.py000066400000000000000000000000621455030217500235120ustar00rootroot00000000000000from unittest import mock Password = mock.Mock() pyroute2-0.7.11/tests/mocklib/keystoneauth1/session/000077500000000000000000000000001455030217500224445ustar00rootroot00000000000000pyroute2-0.7.11/tests/mocklib/keystoneauth1/session/__init__.py000066400000000000000000000000611455030217500245520ustar00rootroot00000000000000from unittest import mock Session = mock.Mock() pyroute2-0.7.11/tests/mocklib/keystoneclient/000077500000000000000000000000001455030217500212155ustar00rootroot00000000000000pyroute2-0.7.11/tests/mocklib/keystoneclient/__init__.py000066400000000000000000000000001455030217500233140ustar00rootroot00000000000000pyroute2-0.7.11/tests/mocklib/keystoneclient/v3/000077500000000000000000000000001455030217500215455ustar00rootroot00000000000000pyroute2-0.7.11/tests/mocklib/keystoneclient/v3/__init__.py000066400000000000000000000000001455030217500236440ustar00rootroot00000000000000pyroute2-0.7.11/tests/mocklib/keystoneclient/v3/client.py000066400000000000000000000000601455030217500233710ustar00rootroot00000000000000from unittest import mock Client = mock.Mock() pyroute2-0.7.11/tests/mocklib/keystoneclient/v3/tokens.py000066400000000000000000000004141455030217500234210ustar00rootroot00000000000000from unittest import mock class TokenManager(mock.Mock): def __init__(self, *argv, **kwarg): super().__init__(*argv, **kwarg) self.validate = mock.Mock(name='validate') self.validate.return_value = {'expires_at': '3022.07.04 00:00 CEST'} pyroute2-0.7.11/tests/mocklib/pyrad/000077500000000000000000000000001455030217500172745ustar00rootroot00000000000000pyroute2-0.7.11/tests/mocklib/pyrad/__init__.py000066400000000000000000000000001455030217500213730ustar00rootroot00000000000000pyroute2-0.7.11/tests/mocklib/pyrad/client.py000066400000000000000000000005021455030217500211210ustar00rootroot00000000000000import collections from unittest import mock class Client(mock.MagicMock): def __init__(self, *argv, **kwarg): super().__init__(*argv, **kwarg) self.SendPacket = mock.Mock(name='SendPacket') self.SendPacket.return_value = collections.namedtuple( 'reply', ['code'] )(True) pyroute2-0.7.11/tests/mocklib/pyrad/dictionary.py000066400000000000000000000000641455030217500220130ustar00rootroot00000000000000from unittest import mock Dictionary = mock.Mock() pyroute2-0.7.11/tests/mocklib/pyrad/packet.py000066400000000000000000000000511455030217500211110ustar00rootroot00000000000000AccessRequest = True AccessAccept = True pyroute2-0.7.11/tests/test_integration/000077500000000000000000000000001455030217500201175ustar00rootroot00000000000000pyroute2-0.7.11/tests/test_integration/test_kuryr.py000066400000000000000000000003271455030217500227060ustar00rootroot00000000000000import pyroute2 def test_exceptions(): assert issubclass(pyroute2.NetlinkError, Exception) assert issubclass(pyroute2.CreateException, Exception) assert issubclass(pyroute2.CommitException, Exception) pyroute2-0.7.11/tests/test_integration/test_lnst.py000066400000000000000000000060611455030217500225130ustar00rootroot00000000000000import select import pytest # NetlinkError, NetlinkDecodeError # IPRoute # IPRSocket from pyroute2 import IPRoute as IPRoute0 from pyroute2 import IPRSocket as IPRSocket0 from pyroute2 import NetlinkDecodeError as NetlinkDecodeError0 from pyroute2 import NetlinkError as NetlinkError0 from pyroute2.iproute import IPRoute as IPRoute1 # nlmsg # flags from pyroute2.netlink import ( NLM_F_DUMP, NLM_F_MATCH, NLM_F_REQUEST, NLM_F_ROOT, NLMSG_DONE, NLMSG_ERROR, ) from pyroute2.netlink import NetlinkDecodeError as NetlinkDecodeError1 from pyroute2.netlink import NetlinkError as NetlinkError1 from pyroute2.netlink import nlmsg from pyroute2.netlink.rtnl import ( RTM_DELADDR, RTM_DELLINK, RTM_GETADDR, RTM_GETLINK, RTM_NEWADDR, RTM_NEWLINK, RTMGRP_IPV4_IFADDR, RTMGRP_IPV6_IFADDR, RTMGRP_LINK, ) from pyroute2.netlink.rtnl.ifaddrmsg import ifaddrmsg from pyroute2.netlink.rtnl.ifinfmsg import ifinfmsg from pyroute2.netlink.rtnl.iprsocket import IPRSocket as IPRSocket1 from pyroute2.netlink.rtnl.rtmsg import rtmsg def test_exceptions_compat(): with pytest.raises(NetlinkError1): raise NetlinkError1(code=99) with pytest.raises(NetlinkDecodeError1): raise NetlinkDecodeError1(exception=Exception()) def test_exceptions(): with pytest.raises(NetlinkError0): raise NetlinkError0(code=99) with pytest.raises(NetlinkDecodeError0): raise NetlinkDecodeError0(exception=Exception()) def test_constants(): assert issubclass(ifinfmsg, nlmsg) assert NLM_F_REQUEST == 1 assert NLM_F_ROOT == 0x100 assert NLM_F_MATCH == 0x200 assert NLM_F_DUMP == (NLM_F_ROOT | NLM_F_MATCH) assert NLMSG_DONE == 0x3 assert NLMSG_ERROR == 0x2 assert RTM_NEWLINK == 0x10 assert RTM_DELLINK == 0x11 assert RTM_GETLINK == 0x12 assert RTM_NEWADDR == 0x14 assert RTM_DELADDR == 0x15 assert RTM_GETADDR == 0x16 assert RTMGRP_LINK == 0x1 assert RTMGRP_IPV4_IFADDR == 0x10 assert RTMGRP_IPV6_IFADDR == 0x100 @pytest.mark.parametrize('socket_class', (IPRSocket0, IPRSocket1)) def test_basic(socket_class): ip = socket_class() ip.bind() # check the `socket` interface compliance poll = select.poll() poll.register(ip, select.POLLIN | select.POLLPRI) poll.unregister(ip) ip.close() @pytest.mark.parametrize('iproute_class', (IPRoute0, IPRoute1)) def test_iproute_message_classes(iproute_class): with iproute_class() as ip: assert {ifaddrmsg, ifinfmsg, rtmsg} < {type(x) for x in ip.dump()} @pytest.mark.parametrize('iproute_class', (IPRoute0, IPRoute1)) def test_iproute_message_subclass(iproute_class): with iproute_class() as ip: assert all([issubclass(type(x), nlmsg) for x in ip.dump()]) @pytest.mark.parametrize('iprsocket_class', (IPRSocket0, IPRSocket1)) def test_iprsocket_put(iprsocket_class): NL_GROUPS = RTMGRP_IPV4_IFADDR | RTMGRP_IPV6_IFADDR | RTMGRP_LINK with iprsocket_class() as iprs: iprs.bind(groups=NL_GROUPS) iprs.put(None, RTM_GETLINK, msg_flags=NLM_F_REQUEST | NLM_F_DUMP) pyroute2-0.7.11/tests/test_lab/000077500000000000000000000000001455030217500163325ustar00rootroot00000000000000pyroute2-0.7.11/tests/test_lab/conftest.py000066400000000000000000000000341455030217500205260ustar00rootroot00000000000000pytest_plugins = "pytester" pyroute2-0.7.11/tests/test_lab/test_code_blocks.py000066400000000000000000000011071455030217500222110ustar00rootroot00000000000000import os import pathlib import sys import pytest def get_examples(*argv): root = pathlib.Path(os.environ['WORKSPACE']) examples = [ example for example in root.joinpath(*argv).iterdir() if example.is_dir() ] return { 'argnames': 'example', 'argvalues': examples, 'ids': [x.name for x in examples], } @pytest.mark.parametrize(**get_examples('examples', 'lab')) def test_block(example, pytester): os.chdir(example.as_posix()) result = pytester.run(sys.executable, 'check.py') assert result.ret == 0 pyroute2-0.7.11/tests/test_limits/000077500000000000000000000000001455030217500170755ustar00rootroot00000000000000pyroute2-0.7.11/tests/test_limits/test_nl.py000066400000000000000000000027261455030217500211260ustar00rootroot00000000000000import resource import socket import pytest from pyroute2.netlink.nlsocket import NetlinkSocket def test_ports_auto(): # create two sockets s1 = NetlinkSocket() s2 = NetlinkSocket() # both bind() should succeed s1.bind() s2.bind() # check that ports are different assert s1.port != s2.port s1.close() s2.close() def test_ports_fail(): s1 = NetlinkSocket(port=0x10) s2 = NetlinkSocket(port=0x10) # check if ports are set assert s1.port == s2.port # bind the first socket, must succeed s1.bind() # bind the second, must fail exception = None with pytest.raises(socket.error) as exception: s2.bind() # socket.error / OSError(98, 'Address already in use') assert exception.value.errno == 98 s1.close() s2.close() def test_no_free_ports(): soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE) try: resource.setrlimit(resource.RLIMIT_NOFILE, (4096, 4096)) except ValueError: pytest.skip('cannot set RLIMIT_NOFILE') # create and bind 1024 sockets ports = [NetlinkSocket() for x in range(1024)] for port in ports: port.bind() # create an extra socket fail = NetlinkSocket() # bind must fail with KeyError: no free ports available with pytest.raises(KeyError): fail.bind() # cleanup for port in ports: port.close() fail.close() resource.setrlimit(resource.RLIMIT_NOFILE, (soft, hard)) pyroute2-0.7.11/tests/test_limits/test_stress.py000066400000000000000000000050231455030217500220310ustar00rootroot00000000000000import atexit import errno import gc import getpass import os import resource import pytest from pyroute2 import NDB, IPRoute, NetNS from pyroute2.common import uifname RESPAWNS = 200 pytestmark = [ pytest.mark.skipif(getpass.getuser() != 'root', reason='no root access') ] @pytest.fixture def fds(): soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE) try: resource.setrlimit( resource.RLIMIT_NOFILE, (min(soft, RESPAWNS // 2), min(hard, RESPAWNS // 2)), ) except ValueError: pytest.skip('cannot set RLIMIT_NOFILE') fds_before = os.listdir(f'/proc/{os.getpid()}/fd/') yield fds_before gc.collect() fds_after = os.listdir(f'/proc/{os.getpid()}/fd/') assert len(fds_after) <= len(fds_before) def test_respawn_iproute_sync(fds): for _ in range(RESPAWNS): with IPRoute() as i: i.bind() i.link_lookup(ifname='lo') def test_respawn_iproute_async(fds): for _ in range(RESPAWNS): with IPRoute() as i: i.bind(async_cache=True) i.link_lookup(ifname='lo') def test_respawn_ndb(fds): for _ in range(RESPAWNS): with NDB() as i: assert i.interfaces.count() > 0 assert i.addresses.count() > 0 assert i.routes.count() > 0 assert i.neighbours.count() > 0 def test_bridge_fd_leaks(fds): ifs = [] for _ in range(RESPAWNS): ifs.append(uifname()) with NDB() as ndb: for name in ifs: ndb.interfaces.create(ifname=name, kind='bridge').apply() with NDB() as ndb: for name in ifs: ndb.interfaces[name].remove().apply() def test_tuntap_fd_leaks(fds): ifs = [] for _ in range(RESPAWNS): ifs.append(uifname()) with NDB() as ndb: for name in ifs: ndb.interfaces.create( ifname=name, kind='tuntap', mode='tun' ).apply() with NDB() as ndb: for name in ifs: ndb.interfaces[name].remove().apply() def test_fd_leaks(fds): for i in range(RESPAWNS): nsid = 'leak_%i' % i ns = NetNS(nsid) ns.close() ns.remove() if hasattr(atexit, '_exithandlers'): assert ns.close not in atexit._exithandlers def test_fd_leaks_nonexistent_ns(fds): for i in range(RESPAWNS): nsid = 'non_existent_leak_%i' % i try: with NetNS(nsid, flags=0): pass except OSError as e: assert e.errno in (errno.ENOENT, errno.EPIPE) pyroute2-0.7.11/tests/test_linux/000077500000000000000000000000001455030217500167335ustar00rootroot00000000000000pyroute2-0.7.11/tests/test_linux/conftest.py000066400000000000000000000026641455030217500211420ustar00rootroot00000000000000import errno from uuid import uuid4 import pytest from pr2test.context_manager import NDBContextManager, SpecContextManager from utils import require_user from pyroute2 import config from pyroute2.ipset import IPSet, IPSetError from pyroute2.wiset import COUNT config.nlm_generator = True pytest_plugins = "pytester" @pytest.fixture def context(request, tmpdir): ''' This fixture is used to prepare the environment and to clean it up after each test. https://docs.pytest.org/en/stable/fixture.html ''' # test stage: # ctx = NDBContextManager(request, tmpdir) # setup yield ctx # execute ctx.teardown() # cleanup @pytest.fixture def spec(request, tmpdir): ''' A simple fixture with only some variables set ''' ctx = SpecContextManager(request, tmpdir) yield ctx ctx.teardown() @pytest.fixture def ipset(): require_user('root') sock = IPSet() yield sock sock.close() @pytest.fixture def ipset_name(ipset): name = str(uuid4())[:16] yield name try: ipset.destroy(name) except IPSetError as e: if e.code != errno.ENOENT: raise @pytest.fixture(params=(None, IPSet)) def wiset_sock(request): if request.param is None: yield None else: before_count = COUNT["count"] with IPSet() as sock: yield sock assert before_count == COUNT['count'] pyroute2-0.7.11/tests/test_linux/pr2test/000077500000000000000000000000001455030217500203365ustar00rootroot00000000000000pyroute2-0.7.11/tests/test_linux/pr2test/__init__.py000066400000000000000000000000001455030217500224350ustar00rootroot00000000000000pyroute2-0.7.11/tests/test_linux/pr2test/context_manager.py000066400000000000000000000265451455030217500241020ustar00rootroot00000000000000import errno import functools import getpass import itertools import logging import os import sys import uuid from collections import namedtuple from socket import AF_INET, AF_INET6 import pytest from utils import allocate_network, free_network from pyroute2 import netns from pyroute2.common import basestring, uifname from pyroute2.iproute.linux import IPRoute from pyroute2.ndb.main import NDB from pyroute2.netlink.exceptions import NetlinkError from pyroute2.netlink.generic.wireguard import WireGuard from pyroute2.nslink.nslink import NetNS def skip_if_not_implemented(func): @functools.wraps(func) def test_wrapper(context): try: return func(context) except (AttributeError, NotImplementedError): pytest.skip('feature not implemented') return test_wrapper def skip_if_not_supported(func): @functools.wraps(func) def test_wrapper(*argv, **kwarg): try: return func(*argv, **kwarg) except NetlinkError as e: if e.code in {errno.EOPNOTSUPP, errno.ENOENT}: pytest.skip('feature not supported by platform') raise except RuntimeError as e: pytest.skip(*e.args) return test_wrapper def make_test_matrix(targets=None, tables=None, dbs=None, types=None): targets = targets or ['local'] tables = tables or [None] types = types or [None] dbs = dbs or ['sqlite3/:memory:'] ret = [] skipdb = list(filter(lambda x: x, os.environ.get('SKIPDB', '').split(':'))) for db in dbs: db_provider, db_spec = db.split('/') if any(map(db_provider.startswith, skipdb)): continue if db_provider != 'sqlite3': db_spec = {'dbname': db_spec} user = os.environ.get('PGUSER') port = os.environ.get('PGPORT') host = os.environ.get('PGHOST') if user: db_spec['user'] = user if host: if not port: db_spec['port'] = 5432 db_spec['host'] = host if port: if not host: db_spec['host'] = 'localhost' db_spec['port'] = port for target in targets: for table in tables: for kind in types: param_id = f'db={db} ' f'target={target}' if table is not None: param_id += f' table={table}' if kind is not None: param_id += f' kind={kind}' param = pytest.param( ContextParams( db_provider, db_spec, target, table, kind ), id=param_id, ) ret.append(param) return ret ContextParams = namedtuple( 'ContextParams', ('db_provider', 'db_spec', 'target', 'table', 'kind') ) Interface = namedtuple('Interface', ('index', 'ifname')) Network = namedtuple('Network', ('family', 'network', 'netmask')) class SpecContextManager(object): ''' Prepare simple common variables ''' def __init__(self, request, tmpdir): self.uid = str(uuid.uuid4()) pid = os.getpid() self.log_base = f'{tmpdir}/ndb-{pid}' self.log_spec = (f'{self.log_base}-{self.uid}.log', logging.DEBUG) self.db_spec = f'{self.log_base}-{self.uid}.sql' def teardown(self): pass class NDBContextManager(object): ''' This class is used to manage fixture contexts. * create log spec * create NDB with specified parameters * provide methods to register interfaces * automatically remove registered interfaces ''' def __init__(self, request, tmpdir, **kwarg): self.spec = SpecContextManager(request, tmpdir) self.netns = None # # the cleanup registry self.interfaces = {} self.namespaces = {} if 'log' not in kwarg: kwarg['log'] = self.spec.log_spec if 'rtnl_debug' not in kwarg: kwarg['rtnl_debug'] = True target = 'local' self.table = None self.kind = None kwarg['db_provider'] = 'sqlite3' kwarg['db_spec'] = ':memory:' if hasattr(request, 'param'): if isinstance(request.param, ContextParams): target = request.param.target self.table = request.param.table self.kind = request.param.kind kwarg['db_provider'] = request.param.db_provider kwarg['db_spec'] = request.param.db_spec elif isinstance(request.param, (tuple, list)): target, self.table = request.param else: target = request.param if target == 'local': sources = [{'target': 'localhost', 'kind': 'local'}] elif target == 'netns': self.netns = self.new_nsname sources = [ {'target': 'localhost', 'kind': 'netns', 'netns': self.netns} ] else: sources = None if sources is not None: kwarg['sources'] = sources # # select the DB to work on db_name = os.environ.get('PYROUTE2_TEST_DBNAME') if isinstance(db_name, basestring) and len(db_name): kwarg['db_provider'] = 'psycopg2' kwarg['db_spec'] = {'dbname': db_name} # # this instance is to be tested, so do NOT use it # in utility methods self.db_provider = kwarg['db_provider'] self.ndb = NDB(**kwarg) self.ipr = self.ndb.sources['localhost'].nl.clone() self.wg = WireGuard() # # IPAM self.ipnets = [allocate_network() for _ in range(3)] self.ipranges = [[str(x) for x in net] for net in self.ipnets] self.ip6net = allocate_network(AF_INET6) self.ip6counter = itertools.count(1024) self.allocated_networks = {AF_INET: [], AF_INET6: []} # # RPDB objects for cleanup self.rules = [] # # default interface (if running as root) if getpass.getuser() == 'root': ifname = self.new_ifname index = self.ndb.interfaces.create( ifname=ifname, kind='dummy', state='up' ).commit()['index'] self.default_interface = Interface(index, ifname) else: self.default_interface = Interface(1, 'lo') def register(self, ifname=None, netns=None): ''' Register an interface in `self.interfaces`. If no interface name specified, create a random one. All the saved interfaces will be removed on `teardown()` ''' if ifname is None: ifname = uifname() self.interfaces[ifname] = netns return ifname def register_netns(self, netns=None): ''' Register netns in `self.namespaces`. If no netns name is specified, create a random one. All the save namespaces will be removed on `teardown()` ''' if netns is None: netns = str(uuid.uuid4()) self.namespaces[netns] = None return netns def register_rule(self, spec, netns=None): ''' Register IP rule for cleanup on `teardown()`. ''' self.rules.append((netns, spec)) return spec def register_network(self, family=AF_INET, network=None): ''' Register or allocate a network. All the allocated networks should be deallocated on `teardown()`. ''' if network is None: network = allocate_network(family) # regsiter for cleanup self.allocated_networks[family].append(network) # return a simple convenient named tuple return Network(family, network.network.format(), network.prefixlen) def get_ipaddr(self, r=0): ''' Returns an ip address from the specified range. ''' return str(self.ipranges[r].pop()) def get_ip6addr(self, r=0): ''' Returns an ip6 address from the specified range. ''' return str(self.ip6net[next(self.ip6counter)]) @property def new_log(self, uid=None): uid = uid or str(uuid.uuid4()) return f'{self.spec.log_base}-{uid}.log' @property def new_ifname(self): ''' Returns a new unique ifname and registers it to be cleaned up on `self.teardown()` ''' return self.register() @property def new_ipaddr(self): ''' Returns a new ipaddr from the configured range ''' return self.get_ipaddr() @property def new_ip6addr(self): ''' Returns a new ip6addr from the configured range ''' return self.get_ip6addr() @property def new_ip4net(self): ''' Returns a new IPv4 network ''' return self.register_network(family=AF_INET) @property def new_ip6net(self): ''' Returns a new IPv6 network ''' return self.register_network(family=AF_INET6) @property def new_nsname(self): ''' Returns a new unique nsname and registers it to be removed on `self.teardown()` ''' return self.register_netns() def teardown(self): ''' 1. close the test NDB 2. remove the registered interfaces, ignore not existing ''' # save postmortem DB for SQLite3 if self.db_provider == 'sqlite3' and sys.version_info >= (3, 7): self.ndb.backup(f'{self.spec.uid}-post.db') self.ndb.close() self.ipr.close() self.wg.close() for ifname, nsname in self.interfaces.items(): try: ipr = None # # spawn ipr to remove the interface if nsname is not None: ipr = NetNS(nsname) else: ipr = IPRoute() # # lookup the interface index index = list(ipr.link_lookup(ifname=ifname)) if len(index): index = index[0] else: # # ignore not existing interfaces continue # # try to remove it ipr.link('del', index=index) except NetlinkError as e: # # ignore if removed (t.ex. by another process) if e.code != errno.ENODEV: raise finally: if ipr is not None: ipr.close() for nsname in self.namespaces: try: netns.remove(nsname) except FileNotFoundError: pass for nsname, rule in self.rules: try: ipr = None if nsname is not None: ipr = NetNS(nsname) else: ipr = IPRoute() ipr.rule('del', **rule) except NetlinkError as e: if e.code != errno.ENOENT: raise finally: if ipr is not None: ipr.close() for net in self.ipnets: free_network(net) for family, networks in self.allocated_networks.items(): for net in networks: free_network(net, family) pyroute2-0.7.11/tests/test_linux/pr2test/custom_link_kind/000077500000000000000000000000001455030217500236725ustar00rootroot00000000000000pyroute2-0.7.11/tests/test_linux/pr2test/custom_link_kind/__init__.py000066400000000000000000000000001455030217500257710ustar00rootroot00000000000000pyroute2-0.7.11/tests/test_linux/pr2test/custom_link_kind/foo.py000066400000000000000000000012311455030217500250240ustar00rootroot00000000000000from pyroute2.netlink import nla register_kind = 'vlan' class vlan(nla): prefix = 'IFLA_' nla_map = ( ('IFLA_FOO_UNSPEC', 'none'), ('IFLA_FOO_ID', 'uint16'), ('IFLA_FOO_FLAGS', 'vlan_flags'), ('IFLA_FOO_EGRESS_QOS', 'qos'), ('IFLA_FOO_INGRESS_QOS', 'qos'), ('IFLA_FOO_PROTOCOL', 'be16'), ) class vlan_flags(nla): fields = (('flags', 'I'), ('mask', 'I')) class qos(nla): nla_map = ( ('IFLA_VLAN_QOS_UNSPEC', 'none'), ('IFLA_VLAN_QOS_MAPPING', 'qos_mapping'), ) class qos_mapping(nla): fields = (('from', 'I'), ('to', 'I')) pyroute2-0.7.11/tests/test_linux/pr2test/marks.py000066400000000000000000000003441455030217500220260ustar00rootroot00000000000000import getpass import pytest def require_root(func=None): mark = pytest.mark.skipif( getpass.getuser() != 'root', reason='no root access' ) if func: return mark(func) else: return mark pyroute2-0.7.11/tests/test_linux/pr2test/tools.py000066400000000000000000000051431455030217500220530ustar00rootroot00000000000000from pyroute2.iproute.linux import IPRoute from pyroute2.nslink.nslink import NetNS def interface_exists(netns=None, *argv, **kwarg): ret = 0 ipr = None if netns is None: ipr = IPRoute() else: ipr = NetNS(netns) spec = {} spec.update(kwarg) ret = list(ipr.link_lookup(*argv, **spec)) ipr.close() return len(ret) >= 1 def address_exists(netns=None, **kwarg): ret = 0 ipr = None if netns is None: ipr = IPRoute() else: ipr = NetNS(netns) if 'match' in kwarg: nkw = kwarg['match'] else: nkw = dict(kwarg) for key in ('address', 'local'): if key in nkw: nkw[key] = nkw[key].split('/')[0] if 'ifname' in kwarg: links = list(ipr.link_lookup(ifname=kwarg['ifname'])) if links: nkw['index'] = links[0] nkw.pop('ifname') else: ipr.close() return 0 ret = list(ipr.addr('dump', match=nkw)) ipr.close() return len(ret) == 1 def rtnl_object_exists(api, netns, record_filter): ret = 0 ipr = None if netns is None: ipr = IPRoute() else: ipr = NetNS(netns) ret = list(getattr(ipr, api)('dump', **record_filter)) ipr.close() return len(ret) >= 1 def neighbour_exists(netns=None, **kwarg): return rtnl_object_exists('neigh', netns, kwarg) def route_exists(netns=None, **kwarg): return rtnl_object_exists('route', netns, kwarg) def rule_exists(netns=None, **kwarg): return rtnl_object_exists('rule', netns, kwarg) def fdb_record_exists(netns=None, **kwarg): return rtnl_object_exists('fdb', netns, kwarg) def qdisc_exists(netns=None, kind=None, **kwarg): if netns is None: ipr = IPRoute() else: ipr = NetNS(netns) opts = {} with ipr: if 'ifname' in kwarg: opts['index'] = ipr.link_lookup(ifname=kwarg.pop('ifname'))[0] ret = list(ipr.get_qdiscs(**opts)) if kind is not None: ret = [x for x in ret if x.get_attr('TCA_KIND') == kind] if kwarg: pre = ret ret = [] for qdisc in pre: options = qdisc.get_attr('TCA_OPTIONS') if 'attrs' in options: options = dict(options['attrs']) for opt in kwarg: if kwarg[opt] not in ( options.get(opt), options.get(qdisc.name2nla(opt)), ): break else: ret.append(qdisc) return ret pyroute2-0.7.11/tests/test_linux/test_api/000077500000000000000000000000001455030217500205435ustar00rootroot00000000000000pyroute2-0.7.11/tests/test_linux/test_api/__init__.py000066400000000000000000000000001455030217500226420ustar00rootroot00000000000000pyroute2-0.7.11/tests/test_linux/test_connector/000077500000000000000000000000001455030217500217645ustar00rootroot00000000000000pyroute2-0.7.11/tests/test_linux/test_connector/test_cn_proc.py000066400000000000000000000035041455030217500250220ustar00rootroot00000000000000import os import signal import subprocess import pytest from pr2test.marks import require_root from pyroute2 import ProcEventSocket from pyroute2.netlink.connector.cn_proc import ( PROC_EVENT_EXEC, PROC_EVENT_EXIT, PROC_EVENT_FORK, ) pytestmark = [require_root()] class ProcContext: def __init__(self, ps): self.ps = ps self.ps.bind() self.ps.control(listen=True) self.child = subprocess.Popen('true') self.child.wait() self.events = [] for _ in range(1000): if self.match( what=PROC_EVENT_EXIT, process_pid=self.child.pid, source=self.ps.get(), ): return def push(self, event): self.events.append(event) def match(self, what, source=None, **kwarg): if source: source = tuple(source) self.events.extend(source) for event in source or self.events: if event['what'] == what: for key, value in kwarg.items(): if event[key] != value: break else: return True return False @pytest.fixture def cn_proc_context(): with ProcEventSocket() as ps: yield ProcContext(ps) def test_event_fork(cn_proc_context): assert cn_proc_context.match( what=PROC_EVENT_FORK, parent_pid=os.getpid(), child_pid=cn_proc_context.child.pid, ) def test_event_exec(cn_proc_context): assert cn_proc_context.match( what=PROC_EVENT_EXEC, process_pid=cn_proc_context.child.pid ) def test_event_exit(cn_proc_context): assert cn_proc_context.match( what=PROC_EVENT_EXIT, process_pid=cn_proc_context.child.pid, exit_code=0, exit_signal=signal.SIGCHLD, ) pyroute2-0.7.11/tests/test_linux/test_conntrack.py000066400000000000000000000113701455030217500223300ustar00rootroot00000000000000import socket import subprocess import threading import time import pytest from pr2test.marks import require_root from pyroute2 import Conntrack, NFCTSocket, config from pyroute2.netlink.nfnetlink.nfctsocket import NFCTAttrTuple pytestmark = [ pytest.mark.skipif( int(config.uname[2][0]) < 5, reason='skip conntrack tests on kernels < 5 for the time being', ), require_root(), ] def server(address, port, env): ss = socket.socket(socket.AF_INET, socket.SOCK_STREAM) ss.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) ss.bind((address, port)) ss.listen(1) conn, cadd = ss.accept() env['client'] = cadd conn.recv(16) conn.shutdown(socket.SHUT_RDWR) conn.close() ss.close() class Client: def __init__(self, address, port): self.ss = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.ss.connect((address, port)) def stop(self): self.ss.send(b'\x00' * 16) self.ss.shutdown(socket.SHUT_RDWR) self.ss.close() class BasicSetup: def __init__(self, request, tmpdir): # run server / client self.env = {} self.ct = Conntrack() self.nfct = NFCTSocket() if self.ct.count() == 0: self.ct.close() self.nfct.close() pytest.skip('conntrack modules are not supported') self.server = threading.Thread( target=server, args=('127.0.0.1', 5591, self.env) ) self.server.start() e = None for x in range(5): try: self.client = Client('127.0.0.1', 5591) time.sleep(1) break except Exception as exc: e = exc else: raise e def teardown(self): self.nfct.close() self.ct.close() self.client.stop() self.server.join() self.env = {} class CheckEntries: def add_tuple(self, saddr, daddr, proto, sport, dport): tuple_orig = NFCTAttrTuple( saddr=saddr, daddr=daddr, proto=proto, sport=sport, dport=dport ) self.tuples.append(tuple_orig) return tuple_orig def __init__(self, request, tmpdir): self.tuples = [] self.COUNT_CT = 20 self.ct = Conntrack() for sport in range(20000, 20000 + self.COUNT_CT): tuple_orig = self.add_tuple( saddr='192.168.122.1', daddr='192.168.122.67', proto=socket.IPPROTO_TCP, sport=sport, dport=5599, ) self.ct.entry( 'add', timeout=60, tuple_orig=tuple_orig, tuple_reply=tuple_orig.reverse(), ) def teardown(self): for tuple_orig in self.tuples: self.ct.entry('del', tuple_orig=tuple_orig) self.ct.close() @pytest.fixture def ct_basic(request, tmpdir): ctx = BasicSetup(request, tmpdir) yield ctx ctx.teardown() @pytest.fixture def ct_inject(request, tmpdir): ctx = CheckEntries(request, tmpdir) yield ctx ctx.teardown() def test_stat(ct_basic): stat = ct_basic.ct.stat() cpus = [ x for x in ( subprocess.check_output('cat /proc/cpuinfo', shell=True).split( b'\n' ) ) if x.startswith(b'processor') ] assert len(stat) == len(cpus) def test_count_dump(ct_basic): # These values should be pretty the same, but the call is not atomic # so some sessions may end or begin that time. assert len(list(ct_basic.ct.dump())) > 0 assert ct_basic.ct.count() > 0 def test_nfct_dump(ct_basic): # "grep" our client / server connection from the dump for connection in ct_basic.nfct.dump(): addr = ( connection.get_attr('CTA_TUPLE_ORIG') .get_attr('CTA_TUPLE_IP') .get_attr('CTA_IP_V4_SRC') ) port = ( connection.get_attr('CTA_TUPLE_ORIG') .get_attr('CTA_TUPLE_PROTO') .get_attr('CTA_PROTO_SRC_PORT') ) if ct_basic.env['client'] == (addr, port): break else: raise Exception('connection not found') def test_ct_dump(ct_inject): tuple_match = NFCTAttrTuple(saddr='192.168.122.1', daddr='192.168.122.67') count_found = 0 tuple_filter = tuple_match for entry in ct_inject.ct.dump_entries(tuple_orig=tuple_match): count_found += 1 assert count_found == ct_inject.COUNT_CT count_found = 0 tuple_filter = NFCTAttrTuple(proto=socket.IPPROTO_TCP) for entry in ct_inject.ct.dump_entries(tuple_orig=tuple_filter): if tuple_match == entry.tuple_orig: count_found += 1 assert count_found == ct_inject.COUNT_CT pyroute2-0.7.11/tests/test_linux/test_devlink.py000066400000000000000000000004271455030217500220030ustar00rootroot00000000000000from pr2test.context_manager import skip_if_not_supported from pyroute2 import DL @skip_if_not_supported def test_list(): with DL() as dl: dls = dl.get_dump() if not dls: raise RuntimeError('no devlink devices found') assert dl.list() pyroute2-0.7.11/tests/test_linux/test_diag.py000066400000000000000000000012171455030217500212510ustar00rootroot00000000000000from socket import AF_UNIX from pr2test.marks import require_root from pyroute2 import DiagSocket pytestmark = [require_root()] def test_basic(): sstats_set = set() pstats_set = set() sstats = None fd = None with DiagSocket() as ds: ds.bind() sstats = ds.get_sock_stats(family=AF_UNIX) for s in sstats: sstats_set.add(s['udiag_ino']) with open('/proc/net/unix') as fd: for line in fd.readlines(): line = line.split() try: pstats_set.add(int(line[6])) except ValueError: pass assert sstats_set == pstats_set pyroute2-0.7.11/tests/test_linux/test_dquot/000077500000000000000000000000001455030217500211265ustar00rootroot00000000000000pyroute2-0.7.11/tests/test_linux/test_dquot/dquot.img.gz000066400000000000000000000035071455030217500234040ustar00rootroot00000000000000‹¨ŸYdquot.imgíŨMoEāŨĩë$Mځ_Ą…BBĩĶĀri $@âķ’S”ĻÛ/Ĩvˆm'Ō ‰#'*$$Ē åp@Š„¸åD•P9páf×vä¸4BÛ´~éõØŗë]īØīhŧã(úÕRvQŒĸ'CņS!ŠBw¯°§Ųzé'cĶ΍M—ĸ••7¯Äųzoœ›îŦÚšßp{ģO‡HB„ÍFŖ_˙üņą}ĪũųŅŨĨŗ;Μ˙Ą÷qTŽ×NĨ•…4mT^[ŦLįõĘÂ{‹ĩf#Ŧ4Ōz¸yĒÚØčh ë.}ņÃÁķĮ†^~æƒŌŗ_Ŋzį[gw¤ŊŦû8ūOņ5ę_÷Ōc (…1˜į˙BČ ĸF€ÛĖJpā÷Mmciw]nĸĢû ŋ˛?›˙ęčüv(ĘįŗšÅb;ZKŠųÜa&Üc¨Æu°t:\L‹Wŋūã|Nl3Õŧ[ŪÅŦ˙™ˆĸ—>í}ū“5}ÎŊ!î qˆBŒ…x0Äî¨õ‰‡B<boˆGūÅūŋ8Ũ*Ÿ˙llē×_§˙‹7yŒžmŧ?~Íūw9ŸōŨÛŗ˙åB˛:ŧ=ÄcÉß˙ņô•Ëãëå_hĐĄĐī~ŽŖŪÕuüûBL†(‡¨´ž˛hÔúÜĘFfž|áûņ›Øū×i˙ą˙CëėØz˛īŽDqR^Ŋž$årë7ŧīŠļ'ķĩzã‰ŖĩfõH툲īá“ėᏺåf=],WĶwĸĄū›Âp<ûvŗÖŽĪë.Fģëōu­c¸'˙˙(´ōč~ōä? ˙ųČ@ūō˙€üä? ˙[FĄŨlĶĐg’ËYŪg˙1øË™áŪĨĨKY÷˙—iļŧžÎŸ¨6ßÕl°…5ĢsĩęŅÕôČLsĒvød:טYœjVįg§ķĄ˛1UĪūB5ēc÷Å_ŗrij(<įÛÛ0€Ûėô§îS%Í}ÅH6Ē{ęīīˇ\hõ!…ŽÛ{ŧŠ€[6īūāü?ô/Cuč_Î˙C˙2Æ’žq@įûĢįķŽ]˙ģKãĶQ´öüÜŊū 6…[)īīĪ2ÛŽķöw„‰â¤Ę$ŋž$årVæõņD(GÛ×÷‡rWˆ q<éŠøGūĨ+‹pyroute2-0.7.11/tests/test_linux/test_dquot/test_dquot.py000066400000000000000000000042371455030217500237010ustar00rootroot00000000000000import os import subprocess import sys import pytest from pr2test.marks import require_root from pyroute2 import DQuotSocket pytestmark = [ pytest.mark.skipif( sys.version_info < (3, 7), reason='the test module requires Python > 3.6', ), require_root(), ] class DQuotContextManager: def __init__(self): self.root = 'test_linux/test_dquot' self.gz_image = f'{self.root}/dquot.img.gz' self.image = f'{self.root}/dquot.img' self.loop = None self.mnt = f'{self.root}/mnt' self.ds = DQuotSocket() def setup(self): self.run(f'gunzip -c {self.gz_image} >{self.image}', shell=True) self.loop = self.run(['losetup', '-f']).stdout.strip().decode('utf-8') st_rdev = os.stat(self.loop).st_rdev self.major = os.major(st_rdev) self.minor = os.minor(st_rdev) self.run(['losetup', self.loop, self.image]) self.run(['mkdir', '-p', self.mnt]) self.run(['mount', self.loop, self.mnt, '-o', 'quota']) self.run(['quotacheck', self.mnt]) self.run(['quotaon', self.mnt]) def run(self, cmd, shell=False, check=True, capture_output=True): return subprocess.run( cmd, shell=shell, check=check, capture_output=capture_output ) def teardown(self): self.ds.close() self.run(['quotaoff', self.mnt], check=False) self.run(['umount', self.mnt], check=False) self.run(['losetup', '-d', self.loop], check=False) os.unlink(self.image) def __enter__(self): self.setup() return self def __exit__(self, exc_type, exc_value, traceback): self.teardown() def get_one_msg(self): for msg in self.ds.get(): return msg def remove_file(self, fname): os.unlink(f'{self.mnt}/{fname}') @pytest.fixture def mnt(): with DQuotContextManager() as cm: yield cm def test_basic(mnt): mnt.remove_file('test/0') msg = mnt.get_one_msg() assert msg.get_attr('QUOTA_NL_A_EXCESS_ID') == os.getuid() assert msg.get_attr('QUOTA_NL_A_DEV_MAJOR') == mnt.major assert msg.get_attr('QUOTA_NL_A_DEV_MINOR') == mnt.minor pyroute2-0.7.11/tests/test_linux/test_ethtool.py000066400000000000000000000007201455030217500220210ustar00rootroot00000000000000import gc import os from pyroute2 import Ethtool def get_fds(): fd = os.open(f'/proc/{os.getpid()}/fd', os.O_RDONLY) try: return set(os.listdir(fd)) - {fd} finally: os.close(fd) def test_pipe_leak(): fds = get_fds() etht = Ethtool() etht.close() gc.collect() assert get_fds() == fds def test_context_manager(): fds = get_fds() with Ethtool(): pass gc.collect() assert get_fds() == fds pyroute2-0.7.11/tests/test_linux/test_generic/000077500000000000000000000000001455030217500214065ustar00rootroot00000000000000pyroute2-0.7.11/tests/test_linux/test_generic/__init__.py000066400000000000000000000000001455030217500235050ustar00rootroot00000000000000pyroute2-0.7.11/tests/test_linux/test_generic/test_basic.py000066400000000000000000000010431455030217500240760ustar00rootroot00000000000000import os from contextlib import ExitStack import pytest from pyroute2 import GenericNetlinkSocket, TaskStats from pyroute2.netlink import nlmsg def test_bind_first(): with ExitStack() as sockets: ts = sockets.enter_context(TaskStats()) gs = sockets.enter_context(GenericNetlinkSocket()) with pytest.raises(RuntimeError) as ets: ts.get_pid_stat(os.getpid()) with pytest.raises(RuntimeError) as egs: gs.nlm_request(nlmsg(), gs.prid) assert ets.value.args == egs.value.args pyroute2-0.7.11/tests/test_linux/test_generic/test_l2tp.py000066400000000000000000000070361455030217500237060ustar00rootroot00000000000000import errno import time import pytest from pr2test.marks import require_root from pyroute2 import L2tp, NetlinkError pytestmark = [require_root()] @pytest.fixture def l2ctx(context): try: context.l2tp = L2tp() except NetlinkError as e: if e.code == errno.ENOENT: pytest.skip('L2TP netlink API not available') raise context.local_ip = context.get_ipaddr(r=0) # <- 0.255 gateway = context.get_ipaddr(r=0) # <- 0.254 (gateway can not be 0.255) context.remote_ip = context.get_ipaddr(r=1) context.l2tpeth0 = context.new_ifname context.ndb.interfaces[context.default_interface.ifname].add_ip( f'{context.local_ip}/24' ).commit() context.ndb.routes.create( dst=str(context.ipnets[1].network), dst_len=24, gateway=gateway ).commit() yield context try: context.l2tp.delete_session(tunnel_id=2324, session_id=3435) except Exception: pass try: context.l2tp.delete_tunnel(tunnel_id=2324) except Exception: pass context.l2tp.close() @pytest.mark.xfail(reason='flaky test, only to collect failure logs') def test_complete(l2ctx): # 1. create tunnel l2ctx.l2tp.create_tunnel( tunnel_id=2324, peer_tunnel_id=2425, remote=l2ctx.remote_ip, local=l2ctx.local_ip, udp_dport=32000, udp_sport=32000, encap="udp", ) tunnel = l2ctx.l2tp.get_tunnel(tunnel_id=2324) assert tunnel.get_attr("L2TP_ATTR_CONN_ID") == 2324 assert tunnel.get_attr("L2TP_ATTR_PEER_CONN_ID") == 2425 assert tunnel.get_attr("L2TP_ATTR_IP_DADDR") == l2ctx.remote_ip assert tunnel.get_attr("L2TP_ATTR_IP_SADDR") == l2ctx.local_ip assert tunnel.get_attr("L2TP_ATTR_UDP_DPORT") == 32000 assert tunnel.get_attr("L2TP_ATTR_UDP_SPORT") == 32000 assert tunnel.get_attr("L2TP_ATTR_ENCAP_TYPE") == 0 # 0 == UDP assert tunnel.get_attr("L2TP_ATTR_DEBUG") == 0 # 2. create session l2ctx.l2tp.create_session( tunnel_id=2324, session_id=3435, peer_session_id=3536, ifname=l2ctx.l2tpeth0, ) session = l2ctx.l2tp.get_session(tunnel_id=2324, session_id=3435) assert session.get_attr("L2TP_ATTR_SESSION_ID") == 3435 assert session.get_attr("L2TP_ATTR_PEER_SESSION_ID") == 3536 assert session.get_attr("L2TP_ATTR_DEBUG") == 0 # setting up DEBUG -> 95, operation not supported; review the test # # 3. modify session # l2ctx.l2tp.modify_session(tunnel_id=2324, session_id=3435, debug=True) # session = l2ctx.l2tp.get_session(tunnel_id=2324, session_id=3435) # assert session[0].get_attr("L2TP_ATTR_DEBUG") == 1 # setting up DEBUG -> 95, operation not supported; review the test # # 4. modify tunnel # l2ctx.l2tp.modify_tunnel(tunnel_id=2324, debug=True) # tunnel = l2ctx.l2tp.get_tunnel(tunnel_id=2324) # assert tunnel[0].get_attr("L2TP_ATTR_DEBUG") == 1 # 5. destroy session l2ctx.l2tp.delete_session(tunnel_id=2324, session_id=3435) for _ in range(5): try: l2ctx.l2tp.get_session(tunnel_id=2324, session_id=3435) except NetlinkError: time.sleep(0.1) continue break else: raise Exception('could not remove L2TP session') # 6. destroy tunnel l2ctx.l2tp.delete_tunnel(tunnel_id=2324) for _ in range(5): try: l2ctx.l2tp.get_tunnel(tunnel_id=2324) except NetlinkError: time.sleep(0.1) continue break else: raise Exception('could not remove L2TP tunnel') pyroute2-0.7.11/tests/test_linux/test_generic/test_mptcp.py000066400000000000000000000027131455030217500241450ustar00rootroot00000000000000from pr2test.context_manager import skip_if_not_supported from pr2test.marks import require_root from pyroute2 import MPTCP pytestmark = [require_root()] def get_endpoints(mptcp): return dict( ( x.get_nested('MPTCP_PM_ATTR_ADDR', 'MPTCP_PM_ADDR_ATTR_ADDR4'), x.get_nested('MPTCP_PM_ATTR_ADDR', 'MPTCP_PM_ADDR_ATTR_ID'), ) for x in mptcp.endpoint('show') ) def get_limits(mptcp): return [ ( x.get_attr('MPTCP_PM_ATTR_SUBFLOWS'), x.get_attr('MPTCP_PM_ATTR_RCV_ADD_ADDRS'), ) for x in mptcp.limits('show') ][0] @skip_if_not_supported def test_enpoint_add_addr4(context): with MPTCP() as mptcp: ipaddrs = [context.new_ipaddr for _ in range(3)] for ipaddr in ipaddrs: mptcp.endpoint('add', addr=ipaddr) mapping = get_endpoints(mptcp) assert set(mapping) >= set(ipaddrs) for ipaddr in ipaddrs: mptcp.endpoint('del', addr=ipaddr, id=mapping[ipaddr]) assert not set(get_endpoints(mptcp)).intersection(set(ipaddrs)) @skip_if_not_supported def test_limits(context): with MPTCP() as mptcp: save_subflows, save_rcv_add = get_limits(mptcp) mptcp.limits('set', subflows=2, rcv_add_addrs=3) assert get_limits(mptcp) == (2, 3) mptcp.limits('set', subflows=save_subflows, rcv_add_addrs=save_rcv_add) assert get_limits(mptcp) == (save_subflows, save_rcv_add) pyroute2-0.7.11/tests/test_linux/test_generic/test_taskstats.py000066400000000000000000000010741455030217500250420ustar00rootroot00000000000000import os from pr2test.marks import require_root from pyroute2 import TaskStats pytestmark = [require_root()] def test_basic(): with TaskStats() as ts: ts.bind() ret = ts.get_pid_stat(os.getpid())[0] pid = ret.get_nested('TASKSTATS_TYPE_AGGR_PID', 'TASKSTATS_TYPE_PID') stats = ret.get_nested( 'TASKSTATS_TYPE_AGGR_PID', 'TASKSTATS_TYPE_STATS' ) assert stats['cpu_count'] > 0 assert stats['ac_pid'] == pid == os.getpid() assert stats['coremem'] > 0 assert stats['virtmem'] > 0 pyroute2-0.7.11/tests/test_linux/test_integration/000077500000000000000000000000001455030217500223155ustar00rootroot00000000000000pyroute2-0.7.11/tests/test_linux/test_integration/test_serialize.py000066400000000000000000000016121455030217500257150ustar00rootroot00000000000000import json import pickle def _check(context, loaded): names = set([x.get_attr('IFLA_IFNAME') for x in loaded]) indices = set([x['index'] for x in loaded]) assert names == {x.ifname for x in context.ndb.interfaces.dump()} assert indices == {x.index for x in context.ndb.interfaces.dump()} def test_pickle(context): links = tuple(context.ipr.link('dump')) saved = pickle.dumps(links) loaded = pickle.loads(saved) _check(context, loaded) def test_json(context): links = tuple(context.ipr.link('dump')) saved = json.dumps([x.dump() for x in links]) msg_type = type(links[0]) loaded = [msg_type().load(x) for x in json.loads(saved)] _check(context, loaded) def test_dump(context): links = tuple(context.ipr.link('dump')) saved = [(type(x), x.dump()) for x in links] loaded = [x[0]().load(x[1]) for x in saved] _check(context, loaded) pyroute2-0.7.11/tests/test_linux/test_ipdb.py000066400000000000000000000043461455030217500212710ustar00rootroot00000000000000import pytest from pr2test.marks import require_root from pyroute2 import IPDB pytestmark = [require_root()] @pytest.fixture def ictx(context): context.ipdb = IPDB(deprecation_warning=False) yield context context.ipdb.release() def test_interface_dummy(ictx): ifname = ictx.new_ifname ipaddr = ictx.new_ipaddr interface = ictx.ipdb.create(ifname=ifname, kind='dummy') interface.up() interface.add_ip(f'{ipaddr}/24') interface.commit() ictx.ndb.interfaces.wait(action='add', ifname=ifname, timeout=3) ictx.ndb.addresses.wait(action='add', address=ipaddr, timeout=3) assert ictx.ndb.interfaces[ifname]['state'] == 'up' assert ( ictx.ndb.addresses.wait(action='add', address=ipaddr, prefixlen=24)[ 'index' ] == interface['index'] ) interface.del_ip(f'{ipaddr}/24') interface.commit() ictx.ndb.addresses.wait( action='remove', address=ipaddr, prefixlen=24, timeout=3 ) def test_interface_veth(ictx): netns = ictx.new_nsname ictx.ndb.sources.add(netns=netns) v0 = ictx.new_ifname v1 = ictx.new_ifname veth0 = ictx.ipdb.create(ifname=v0, kind='veth', peer=v1) veth0.up() veth0.commit() veth1 = ictx.ipdb.interfaces[v1] veth1['net_ns_fd'] = netns veth1.commit() ictx.ndb.interfaces.wait(ifname=v0, target='localhost', timeout=3) ictx.ndb.interfaces.wait(ifname=v1, target=netns, timeout=3) def test_interface_bridge(ictx): ifname = ictx.new_ifname with ictx.ipdb.create(ifname=ifname, kind='bridge') as i: i.up() i['address'] = '00:11:22:33:44:55' i['br_stp_state'] = 1 i['br_forward_delay'] = 1000 i = ictx.ndb.interfaces.wait(ifname=ifname, timeout=3) assert i['state'] == 'up' assert i['address'] == '00:11:22:33:44:55' assert i['br_stp_state'] == 1 assert i['br_forward_delay'] == 1000 def test_route_basic(ictx): ipaddr = ictx.new_ipaddr gateway = ictx.new_ipaddr net = ictx.new_ip4net ifname = ictx.default_interface.ifname with ictx.ipdb.interfaces[ifname] as i: i.up() i.add_ip(f'{ipaddr}/24') ictx.ipdb.routes.add( gateway=gateway, dst=f'{net.network}/{net.netmask}' ).commit() pyroute2-0.7.11/tests/test_linux/test_ipr/000077500000000000000000000000001455030217500205645ustar00rootroot00000000000000pyroute2-0.7.11/tests/test_linux/test_ipr/__init__.py000066400000000000000000000000001455030217500226630ustar00rootroot00000000000000pyroute2-0.7.11/tests/test_linux/test_ipr/test_addr.py000066400000000000000000000123071455030217500231120ustar00rootroot00000000000000import errno import time import pytest from pr2test.context_manager import make_test_matrix, skip_if_not_supported from pr2test.marks import require_root from pyroute2 import NetlinkError pytestmark = [require_root()] wait_timeout = 30 test_matrix = make_test_matrix(targets=['local', 'netns']) @pytest.mark.parametrize('context', test_matrix, indirect=True) @skip_if_not_supported def test_addr_add(context): index, ifname = context.default_interface ipaddr = context.new_ipaddr ipr = context.ipr ndb = context.ndb ipr.addr('add', index=index, address=ipaddr, prefixlen=24) ndb.addresses.wait(index=index, address=ipaddr, timeout=wait_timeout) @pytest.mark.parametrize('context', test_matrix, indirect=True) @skip_if_not_supported def test_addr_replace(context): index, ifname = context.default_interface ipaddr1 = context.new_ipaddr ipaddr2 = context.new_ipaddr ipr = context.ipr ndb = context.ndb ipr.addr('add', index=index, address=ipaddr1, prefixlen=24) ndb.addresses.wait(index=index, address=ipaddr1, timeout=wait_timeout) ipr.addr('replace', index=index, address=ipaddr2, prefixlen=24) ndb.addresses.wait(index=index, address=ipaddr2, timeout=wait_timeout) @pytest.mark.parametrize('context', test_matrix, indirect=True) @skip_if_not_supported def test_addr_add_local(context): index, ifname = context.default_interface ipaddr1 = context.new_ipaddr ipaddr2 = context.new_ipaddr ipr = context.ipr ndb = context.ndb ipr.addr('add', index=index, address=ipaddr1, local=ipaddr2, prefixlen=24) ndb.addresses.wait( index=index, address=ipaddr1, local=ipaddr2, timeout=wait_timeout ) @pytest.mark.parametrize('context', test_matrix, indirect=True) @skip_if_not_supported def test_addr_add_broadcast(context): index, ifname = context.default_interface ipaddr1 = context.new_ipaddr ipaddr2 = context.new_ipaddr ipr = context.ipr ndb = context.ndb ipr.addr( 'add', index=index, address=ipaddr1, broadcast=ipaddr2, prefixlen=24 ) ndb.addresses.wait( index=index, address=ipaddr1, broadcast=ipaddr2, timeout=wait_timeout ) @pytest.mark.parametrize('context', test_matrix, indirect=True) @skip_if_not_supported def test_addr_add_broadcast_default(context): index, ifname = context.default_interface ipaddr = context.new_ipaddr ipr = context.ipr ndb = context.ndb ipr.addr('add', index=index, address=ipaddr, broadcast=True, prefixlen=24) interface = ndb.addresses.wait( index=index, address=ipaddr, timeout=wait_timeout ) assert interface['broadcast'] is not None @pytest.mark.parametrize('context', test_matrix, indirect=True) @skip_if_not_supported def test_addr_filter(context): index, ifname = context.default_interface ipaddr1 = context.new_ipaddr ipaddr2 = context.new_ipaddr ipaddrB = context.new_ipaddr ipr = context.ipr ndb = context.ndb ipr.addr( 'add', index=index, address=ipaddr1, broadcast=ipaddrB, prefixlen=24 ) ipr.addr( 'add', index=index, address=ipaddr2, broadcast=ipaddrB, prefixlen=24 ) ndb.addresses.wait(index=index, address=ipaddr1, timeout=wait_timeout) ndb.addresses.wait(index=index, address=ipaddr2, timeout=wait_timeout) assert ( len(tuple(ipr.get_addr(index=index))) >= 2 ) # remember link-local IPv6 assert len(tuple(ipr.get_addr(address=ipaddr1))) == 1 assert len(tuple(ipr.get_addr(broadcast=ipaddrB))) == 2 assert len(tuple(ipr.get_addr(match=lambda x: x['index'] == index))) >= 2 @pytest.mark.parametrize('context', test_matrix, indirect=True) @skip_if_not_supported def test_addr_flush(context): index, ifname = context.default_interface addresses = [ context.new_ipaddr, context.new_ipaddr, context.new_ipaddr, context.new_ipaddr, ] ipr = context.ipr ndb = context.ndb counter = 5 for ipaddr in addresses: ipr.addr('add', index=index, address=ipaddr, prefixlen=24) for ipaddr in addresses: ndb.addresses.wait(index=index, address=ipaddr, timeout=wait_timeout) ipr.flush_addr(index=index) while counter: for ipaddr in tuple(addresses): if ipaddr not in ndb.addresses: addresses.remove(ipaddr) if not addresses: break time.sleep(1) counter -= 1 else: raise Exception() @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_fail_no_such_device(context): ifaddr = context.new_ipaddr index = sorted([i['index'] for i in context.ipr.get_links()])[-1] + 10 with pytest.raises(NetlinkError) as e: context.ipr.addr('add', index=index, address=ifaddr, prefixlen=24) assert e.value.code == errno.ENODEV @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_symbolic_flags(context): ipaddr = context.new_ipaddr index, ifname = context.default_interface context.ipr.link('set', index=index, state='up') context.ipr.addr('add', index=index, address=ipaddr, prefixlen=24) addr = [ x for x in context.ipr.get_addr() if x.get_attr('IFA_LOCAL') == ipaddr ][0] assert 'IFA_F_PERMANENT' in addr.flags2names(addr['flags']) pyroute2-0.7.11/tests/test_linux/test_ipr/test_basic.py000066400000000000000000000030121455030217500232520ustar00rootroot00000000000000import socket import pytest from pyroute2 import IPRoute from pyroute2.netlink import nlmsg def test_context_manager(): with IPRoute() as ipr: ipr.get_links() def test_multiple_instances(): ipr1 = IPRoute() ipr2 = IPRoute() ipr1.close() ipr2.close() def test_close(): ipr = IPRoute() ipr.get_links() ipr.close() # Shouldn't be able to use the socket after closing with pytest.raises(socket.error): ipr.get_links() def test_fileno(): ipr1 = IPRoute() ipr2 = IPRoute(fileno=ipr1.fileno()) ipr1.close() with pytest.raises(OSError) as e: ipr2.get_links() assert e.value.errno == 9 # sendto -> Bad file descriptor with pytest.raises(OSError) as e: ipr2.close() assert e.value.errno == 9 # close -> Bad file descriptor def test_get_policy_map(context): assert isinstance(context.ipr.get_policy_map(), dict) def test_register_policy(context): context.ipr.register_policy(100, nlmsg) context.ipr.register_policy({101: nlmsg}) context.ipr.register_policy(102, nlmsg) assert context.ipr.get_policy_map()[100] == nlmsg assert context.ipr.get_policy_map(101)[101] == nlmsg assert context.ipr.get_policy_map([102])[102] == nlmsg context.ipr.unregister_policy(100) context.ipr.unregister_policy([101]) context.ipr.unregister_policy({102: nlmsg}) assert 100 not in context.ipr.get_policy_map() assert 101 not in context.ipr.get_policy_map() assert 102 not in context.ipr.get_policy_map() pyroute2-0.7.11/tests/test_linux/test_ipr/test_callbacks.py000066400000000000000000000025261455030217500241210ustar00rootroot00000000000000from pr2test.marks import require_root pytestmark = [require_root()] def callback(msg, cb_context): cb_context['counter'] += 1 def test_callbacks_positive(context): ifname = context.new_ifname cb_context = {'counter': 0} interface = context.ndb.interfaces.create( ifname=ifname, kind='dummy' ).commit() context.ipr.register_callback( callback, lambda x: x.get('index', None) == interface['index'], (cb_context,), ) context.ipr.link('set', index=interface['index'], state='up') context.ipr.link('get', index=interface['index']) counter = cb_context['counter'] assert counter > 0 context.ipr.unregister_callback(callback) context.ipr.link('set', index=interface['index'], state='down') context.ipr.link('get', index=interface['index']) assert cb_context['counter'] == counter def test_callbacks_negative(context): ifname = context.new_ifname cb_context = {'counter': 0} interface = context.ndb.interfaces.create( ifname=ifname, kind='dummy' ).commit() context.ipr.register_callback( callback, lambda x: x.get('index', None) == -1, (cb_context,) ) context.ipr.link('set', index=interface['index'], state='up') context.ipr.link('get', index=interface['index']) counter = cb_context['counter'] assert counter == 0 pyroute2-0.7.11/tests/test_linux/test_ipr/test_compile.py000066400000000000000000000033211455030217500236240ustar00rootroot00000000000000import struct import pytest from pyroute2 import IPRoute from pyroute2.netlink import NLM_F_DUMP, NLM_F_REQUEST from pyroute2.netlink.rtnl import ( RTM_GETLINK, RTM_GETROUTE, RTM_NEWLINK, RTM_NEWROUTE, ) @pytest.fixture def ipr(): with IPRoute() as iproute: yield iproute test_config = ( 'name,argv,kwarg,msg_type,msg_flags', ( ( 'link', ('get',), {'index': 1}, (RTM_GETLINK, RTM_NEWLINK), NLM_F_REQUEST, ), ( 'link', ('dump',), {}, (RTM_GETLINK, RTM_NEWLINK), NLM_F_DUMP | NLM_F_REQUEST, ), ( 'route', ('dump',), {}, (RTM_GETROUTE, RTM_NEWROUTE), NLM_F_DUMP | NLM_F_REQUEST, ), ), ) @pytest.mark.parametrize(*test_config) def test_compile_call(ipr, name, argv, kwarg, msg_type, msg_flags): compiler_context = ipr.compile() data = getattr(ipr, name)(*argv, **kwarg) assert msg_type[0], msg_flags == struct.unpack_from( 'HH', data[0], offset=4 ) compiler_context.close() assert ipr.compiled is None for msg in getattr(ipr, name)(*argv, **kwarg): assert msg['header']['type'] == msg_type[1] @pytest.mark.parametrize(*test_config) def test_compile_context_manager(ipr, name, argv, kwarg, msg_type, msg_flags): with ipr.compile(): data = getattr(ipr, name)(*argv, **kwarg) assert msg_type[0], msg_flags == struct.unpack_from( 'HH', data[0], offset=4 ) assert ipr.compiled is None for msg in getattr(ipr, name)(*argv, **kwarg): assert msg['header']['type'] == msg_type[1] pyroute2-0.7.11/tests/test_linux/test_ipr/test_config.py000066400000000000000000000014511455030217500234430ustar00rootroot00000000000000import pytest from pyroute2 import IPRoute @pytest.mark.parametrize('nlm_echo', (True, False)) def test_echo_route(context, nlm_echo): index, ifname = context.default_interface address = context.new_ipaddr gateway = context.get_ipaddr(r=0) target = context.get_ipaddr(r=1) spec = {'dst': target, 'dst_len': 32, 'gateway': gateway, 'oif': index} nla_check = {} for key, value in spec.items(): nla_check[key] = value if nlm_echo else None with IPRoute(nlm_echo=nlm_echo) as ipr: context.ipr.addr('add', index=index, address=address, prefixlen=24) context.ipr.poll(context.ipr.addr, 'dump', address=address) response = tuple(ipr.route('add', **spec))[0] for key, value in nla_check.items(): assert response.get(key) == value pyroute2-0.7.11/tests/test_linux/test_ipr/test_fdb.py000066400000000000000000000037761455030217500227450ustar00rootroot00000000000000from pr2test.marks import require_root pytestmark = [require_root()] def test_fdb_vxlan(context): ipaddr = context.new_ipaddr host_if = context.new_ifname vxlan_if = context.new_ifname context.ndb.interfaces.create(ifname=host_if, kind='dummy').commit() host_idx = context.ndb.interfaces[host_if]['index'] ( context.ndb.interfaces.create( ifname=vxlan_if, kind='vxlan', vxlan_link=host_idx, vxlan_id=500 ).commit() ) vxlan_idx = context.ndb.interfaces[vxlan_if]['index'] # create FDB record l2 = '00:11:22:33:44:55' ( context.ipr.fdb( 'add', lladdr=l2, ifindex=vxlan_idx, vni=600, port=5678, dst=ipaddr ) ) # dump r = tuple(context.ipr.fdb('dump', ifindex=vxlan_idx, lladdr=l2)) assert len(r) == 1 assert r[0]['ifindex'] == vxlan_idx assert r[0].get_attr('NDA_LLADDR') == l2 assert r[0].get_attr('NDA_DST') == ipaddr assert r[0].get_attr('NDA_PORT') == 5678 assert r[0].get_attr('NDA_VNI') == 600 def test_fdb_bridge_simple(context): ifname = context.new_ifname ( context.ndb.interfaces.create( ifname=ifname, kind='bridge', state='up' ).commit() ) idx = context.ndb.interfaces[ifname]['index'] # create FDB record l2 = '00:11:22:33:44:55' context.ipr.fdb('add', lladdr=l2, ifindex=idx) # dump FDB r = tuple(context.ipr.fdb('dump', ifindex=idx, lladdr=l2)) # one vlan == 1, one w/o vlan assert len(r) == 2 assert len(list(filter(lambda x: x['ifindex'] == idx, r))) == 2 assert len(list(filter(lambda x: x.get_attr('NDA_VLAN'), r))) == 1 assert len(list(filter(lambda x: x.get_attr('NDA_MASTER') == idx, r))) == 2 assert len(list(filter(lambda x: x.get_attr('NDA_LLADDR') == l2, r))) == 2 r = tuple(context.ipr.fdb('dump', ifindex=idx, lladdr=l2, vlan=1)) assert len(r) == 1 assert r[0].get_attr('NDA_VLAN') == 1 assert r[0].get_attr('NDA_MASTER') == idx assert r[0].get_attr('NDA_LLADDR') == l2 pyroute2-0.7.11/tests/test_linux/test_ipr/test_ipbatch.py000066400000000000000000000005741455030217500236150ustar00rootroot00000000000000from pr2test.marks import require_root from pyroute2 import IPBatch pytestmark = [require_root()] def test_link_add(context): ifname = context.new_ifname ipb = IPBatch() ipb.link('add', ifname=ifname, kind='dummy') data = ipb.batch ipb.reset() ipb.close() context.ipr.sendto(data, (0, 0)) context.ndb.interfaces.wait(ifname=ifname, timeout=3) pyroute2-0.7.11/tests/test_linux/test_ipr/test_link.py000066400000000000000000000137271455030217500231440ustar00rootroot00000000000000import pytest from pr2test.context_manager import make_test_matrix, skip_if_not_supported from pr2test.marks import require_root from pyroute2 import NetlinkError from pyroute2.netlink.rtnl.ifinfmsg import IFF_NOARP pytestmark = [require_root()] test_matrix = make_test_matrix(targets=['local', 'netns']) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_updown_link(context): index, ifname = context.default_interface context.ipr.link('set', index=index, state='up') assert context.ipr.get_links(ifname=ifname)[0]['flags'] & 1 context.ipr.link('set', index=index, state='down') assert not (context.ipr.get_links(ifname=ifname)[0]['flags'] & 1) @skip_if_not_supported @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_link_altname_lookup(context): altname = context.new_ifname index, ifname = context.default_interface context.ipr.link('property_add', index=index, altname=altname) assert len(context.ipr.link('get', altname=altname)) == 1 assert context.ipr.link_lookup(ifname=ifname) == [index] assert context.ipr.link_lookup(altname=altname) == [index] @skip_if_not_supported @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_link_altname(context): altname1 = context.new_ifname altname2 = context.new_ifname weird_name = "test_with_a_very_long_string" "_and_♄⚕⚚_utf8_symbol" index, ifname = context.default_interface for name in (altname1, altname2, weird_name): with pytest.raises(NetlinkError): context.ipr.link("get", altname=name) context.ipr.link("property_add", index=index, altname=[altname1, altname2]) assert len(context.ipr.link("get", altname=altname1)) == 1 assert len(context.ipr.link("get", altname=altname2)) == 1 context.ipr.link("property_del", index=index, altname=[altname1, altname2]) for name in (altname1, altname2): with pytest.raises(NetlinkError): context.ipr.link("get", altname=name) context.ipr.link("property_add", index=index, altname=weird_name) assert len(context.ipr.link("get", altname=weird_name)) == 1 context.ipr.link("property_del", index=index, altname=weird_name) assert len(tuple(context.ipr.link("dump", altname=weird_name))) == 0 with pytest.raises(NetlinkError): context.ipr.link("get", altname=weird_name) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_link_filter(context): links = tuple(context.ipr.link('dump', ifname='lo')) assert len(links) == 1 assert links[0].get_attr('IFLA_IFNAME') == 'lo' @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_link_legacy_nla(context): index, ifname = context.default_interface new_ifname = context.new_ifname context.ipr.link('set', index=index, state='down') context.ipr.link('set', index=index, IFLA_IFNAME=new_ifname) assert context.ipr.link_lookup(ifname=new_ifname) == [index] context.ipr.link('set', index=index, ifname=ifname) assert context.ipr.link_lookup(ifname=ifname) == [index] @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_link_rename(context): index, ifname = context.default_interface new_ifname = context.new_ifname context.ndb.interfaces[ifname].set('state', 'down').commit() context.ipr.link('set', index=index, ifname=new_ifname) assert context.ipr.link_lookup(ifname=new_ifname) == [index] context.ipr.link('set', index=index, ifname=ifname) assert context.ipr.link_lookup(ifname=ifname) == [index] @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_link_arp_flag(context): index, _ = context.default_interface # by default dummy interface have NOARP set assert context.ipr.get_links(index)[0]['flags'] & IFF_NOARP context.ipr.link('set', index=index, arp=True) assert not context.ipr.get_links(index)[0]['flags'] & IFF_NOARP context.ipr.link('set', index=index, arp=False) assert context.ipr.get_links(index)[0]['flags'] & IFF_NOARP context.ipr.link('set', index=index, noarp=False) assert not context.ipr.get_links(index)[0]['flags'] & IFF_NOARP context.ipr.link('set', index=index, noarp=True) assert context.ipr.get_links(index)[0]['flags'] & IFF_NOARP @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_symbolic_flags_ifinfmsg(context): index, _ = context.default_interface context.ipr.link('set', index=index, flags=['IFF_UP']) iface = context.ipr.get_links(index)[0] assert iface['flags'] & 1 assert 'IFF_UP' in iface.flags2names(iface['flags']) context.ipr.link('set', index=index, flags=['!IFF_UP']) assert not (context.ipr.get_links(index)[0]['flags'] & 1) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_remove_link(context): index, ifname = context.default_interface context.ipr.link('del', index=index) assert len(context.ipr.link_lookup(ifname=ifname)) == 0 assert len(context.ipr.link_lookup(index=index)) == 0 @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_brport_basic(context): bridge = context.new_ifname port = context.new_ifname context.ndb.interfaces.create( ifname=bridge, kind='bridge', state='up' ).commit() context.ndb.interfaces.create( ifname=port, kind='dummy', state='up' ).commit() context.ipr.link( 'set', index=context.ndb.interfaces[port]['index'], master=context.ndb.interfaces[bridge]['index'], ) context.ipr.brport( 'set', index=context.ndb.interfaces[port]['index'], unicast_flood=0, cost=200, proxyarp=1, ) port = tuple( context.ipr.brport('dump', index=context.ndb.interfaces[port]['index']) )[0] protinfo = port.get_attr('IFLA_PROTINFO') assert protinfo.get_attr('IFLA_BRPORT_COST') == 200 assert protinfo.get_attr('IFLA_BRPORT_PROXYARP') == 1 assert protinfo.get_attr('IFLA_BRPORT_UNICAST_FLOOD') == 0 pyroute2-0.7.11/tests/test_linux/test_ipr/test_link_create.py000066400000000000000000000016121455030217500244550ustar00rootroot00000000000000import pytest from pr2test.marks import require_root from pyroute2.netlink.rtnl.ifinfmsg import ifinfmsg pytestmark = [require_root()] @pytest.mark.parametrize('smode', ('IPVLAN_MODE_L2', 'IPVLAN_MODE_L3')) def test_create_ipvlan(context, smode): master = context.new_ifname ipvlan = context.new_ifname # create the master link index = context.ndb.interfaces.create( ifname=master, kind='dummy' ).commit()['index'] # check modes # maybe move modes dict somewhere else? cmode = ifinfmsg.ifinfo.data_map['ipvlan'].modes[smode] assert ifinfmsg.ifinfo.data_map['ipvlan'].modes[cmode] == smode # create ipvlan context.ipr.link( 'add', ifname=ipvlan, kind='ipvlan', link=index, mode=cmode ) interface = context.ndb.interfaces.wait(ifname=ipvlan, timeout=5) assert interface['link'] == index assert interface['ipvlan_mode'] == cmode pyroute2-0.7.11/tests/test_linux/test_ipr/test_link_custom_kind.py000066400000000000000000000043131455030217500255320ustar00rootroot00000000000000from pr2test import custom_link_kind from pr2test.custom_link_kind.foo import vlan as foo_vlan from pr2test.marks import require_root from pyroute2 import NetlinkError pytestmark = [require_root()] def test_register_fail(context): ifname = context.new_ifname try: context.ipr.link( 'add', ifname=ifname, link=context.default_interface[0], kind='vlan', foo_id=101, ) except NetlinkError as e: if e.code == 22: # Invalid argument return raise Exception('test failed') def test_register_path(context): ifname = context.new_ifname old = context.ipr.list_link_kind()['vlan'] context.ipr.register_link_kind(path='test_linux/pr2test/custom_link_kind/') context.ipr.link( 'add', ifname=ifname, link=context.default_interface[0], kind='vlan', foo_id=101, ) assert ( context.ipr.link('get', ifname=ifname)[0].get_nested( 'IFLA_LINKINFO', 'IFLA_INFO_DATA', 'IFLA_FOO_ID' ) == 101 ) context.ipr.register_link_kind(module={'vlan': old}) def test_register_pkg(context): ifname = context.new_ifname old = context.ipr.list_link_kind()['vlan'] context.ipr.register_link_kind(pkg=custom_link_kind) context.ipr.link( 'add', ifname=ifname, link=context.default_interface[0], kind='vlan', foo_id=101, ) assert ( context.ipr.link('get', ifname=ifname)[0].get_nested( 'IFLA_LINKINFO', 'IFLA_INFO_DATA', 'IFLA_FOO_ID' ) == 101 ) context.ipr.register_link_kind(module={'vlan': old}) def test_register_module(context): ifname = context.new_ifname old = context.ipr.list_link_kind()['vlan'] context.ipr.register_link_kind(module={'vlan': foo_vlan}) context.ipr.link( 'add', ifname=ifname, link=context.default_interface[0], kind='vlan', foo_id=101, ) assert ( context.ipr.link('get', ifname=ifname)[0].get_nested( 'IFLA_LINKINFO', 'IFLA_INFO_DATA', 'IFLA_FOO_ID' ) == 101 ) context.ipr.register_link_kind(module={'vlan': old}) pyroute2-0.7.11/tests/test_linux/test_ipr/test_match.py000066400000000000000000000002101455030217500232620ustar00rootroot00000000000000from functools import partial def test_match_callable(context): assert len(context.ipr.get_links(match=partial(lambda x: x))) > 0 pyroute2-0.7.11/tests/test_linux/test_ipr/test_neigh.py000066400000000000000000000055121455030217500232720ustar00rootroot00000000000000import time from socket import AF_INET, AF_INET6 import pytest from pr2test.context_manager import skip_if_not_supported from pr2test.marks import require_root pytestmark = [require_root()] def test_real_links(context): links = set([x['index'] for x in context.ipr.get_links()]) neigh = set([x['ifindex'] for x in context.ipr.get_neighbours()]) assert neigh <= links def test_filter(context): ipaddr1 = context.new_ipaddr ipaddr2 = context.new_ipaddr index, ifname = context.default_interface lladdr = '00:11:22:33:44:55' # this is required -- the default interface takes time to setup time.sleep(0.5) # inject arp records context.ipr.neigh('add', dst=ipaddr1, lladdr=lladdr, ifindex=index) context.ipr.neigh('add', dst=ipaddr2, lladdr=lladdr, ifindex=index) # assert two arp records on the interface assert ( len(tuple(context.ipr.get_neighbours(ifindex=index, family=AF_INET))) == 2 ) # filter by dst assert len(tuple(context.ipr.get_neighbours(dst=ipaddr1))) == 1 # filter with lambda assert ( len( tuple( context.ipr.get_neighbours( match=lambda x: x['ifindex'] == index and x['family'] == AF_INET ) ) ) == 2 ) @skip_if_not_supported def test_get(context): ipaddr1 = context.new_ipaddr index, ifname = context.default_interface lladdr = '00:11:22:33:44:55' # this is required -- the default interface takes time to setup time.sleep(0.5) context.ipr.neigh('add', dst=ipaddr1, lladdr=lladdr, ifindex=index) res = context.ipr.neigh('get', dst=ipaddr1, ifindex=index) assert res[0].get_attr("NDA_DST") == ipaddr1 @pytest.mark.parametrize( 'family,ipaddr_source,prefixlen', ((AF_INET, 'new_ipaddr', 24), (AF_INET6, 'new_ip6addr', 64)), ) def test_dump(context, family, ipaddr_source, prefixlen): index, ifname = context.default_interface ipaddr1 = getattr(context, ipaddr_source) # wait for the link context.ipr.poll(context.ipr.link, 'dump', index=index, state='up') context.ipr.addr( 'add', index=index, family=family, address=ipaddr1, prefixlen=prefixlen ) # wait for the address context.ipr.poll(context.ipr.addr, 'dump', index=index, address=ipaddr1) # now add neighbours; to keep it simpler, don't take care if we loose # some of the neighbour records, enough to have there at least one, so # add some and continue for last_byte in range(32): l2addr = f'00:11:22:33:44:{last_byte:02}' context.ipr.neigh( 'add', dst=getattr(context, ipaddr_source), lladdr=l2addr, ifindex=index, ) # ok, now the dump should not be empty assert len(tuple(context.ipr.neigh('dump', family=family))) > 0 pyroute2-0.7.11/tests/test_linux/test_ipr/test_netns.py000066400000000000000000000016071455030217500233300ustar00rootroot00000000000000from pr2test.marks import require_root from pyroute2 import NetNS pytestmark = [require_root()] def test_get_netns_info(context): nsname = context.new_nsname peer_name = context.new_ifname host_name = context.new_ifname with NetNS(nsname): ( context.ndb.interfaces.create( ifname=host_name, kind='veth', peer={'ifname': peer_name, 'net_ns_fd': nsname}, ).commit() ) # get veth veth = context.ipr.link('get', ifname=host_name)[0] target = veth.get_attr('IFLA_LINK_NETNSID') for info in context.ipr.get_netns_info(): path = info.get_attr('NSINFO_PATH') assert path.endswith(nsname) netnsid = info['netnsid'] if target == netnsid: break else: raise KeyError('peer netns not found') pyroute2-0.7.11/tests/test_linux/test_ipr/test_ntables.py000066400000000000000000000005171455030217500236300ustar00rootroot00000000000000def _test_ntables(self): setA = set( filter( lambda x: x is not None, [ x.get_attr('NDTA_PARMS').get_attr('NDTPA_IFINDEX') for x in self.ip.get_ntables() ], ) ) setB = set([x['index'] for x in self.ip.get_links()]) assert setA == setB pyroute2-0.7.11/tests/test_linux/test_ipr/test_route.py000066400000000000000000000337051455030217500233430ustar00rootroot00000000000000import errno import socket import time import pytest from pr2test.context_manager import skip_if_not_supported from pr2test.marks import require_root from utils import require_kernel from pyroute2 import IPRoute, NetlinkError from pyroute2.common import AF_MPLS from pyroute2.netlink.rtnl.rtmsg import RTNH_F_ONLINK pytestmark = [require_root()] def test_route_get_target_strict_check(context): if not context.ipr.get_default_routes(table=254): pytest.skip('no default IPv4 routes') require_kernel(4, 20) with IPRoute(strict_check=True) as ip: rts = ip.get_routes(family=socket.AF_INET, dst='8.8.8.8', table=254) assert len(tuple(rts)) > 0 def test_extended_error_on_route(context): require_kernel(4, 20) # specific flags, cannot use context.ip with IPRoute(ext_ack=True, strict_check=True) as ip: with pytest.raises(NetlinkError) as e: ip.route("get", dst="1.2.3.4", table=254, dst_len=0) assert abs(e.value.code) == errno.EINVAL # on 5.10 kernel, full message is 'ipv4: rtm_src_len and # rtm_dst_len must be 32 for IPv4' assert "rtm_dst_len" in str(e.value) @pytest.mark.parametrize( 'proto', (('static', 'boot'), (4, 3), ('boot', 4), (3, 'static')) ) def test_route_proto(context, proto): proto, fake = proto ipaddr = context.new_ipaddr gateway = context.new_ipaddr ipnet = context.new_ip4net ifname = context.new_ifname ( context.ndb.interfaces.create(ifname=ifname, kind='dummy', state='up') .add_ip(f'{ipaddr}/24') .commit() ) spec = { 'dst': ipnet.network, 'dst_len': ipnet.netmask, 'gateway': gateway, 'proto': proto, } context.ndb.routes.create(**spec).commit() with pytest.raises(NetlinkError): context.ipr.route( 'del', dst=f'{ipnet.network}/{ipnet.netmask}', gateway=f'{gateway}', proto=fake, ) context.ipr.route( 'del', dst=f'{ipnet.network}/{ipnet.netmask}', gateway=f'{gateway}', proto=proto, ) def test_route_oif_as_iterable(context): index, ifname = context.default_interface ipnet = context.new_ip4net spec = {'dst': ipnet.network, 'dst_len': ipnet.netmask, 'oif': (index,)} context.ndb.interfaces[ifname].set('state', 'up').commit() context.ipr.route('add', **spec) route = context.ndb.routes.wait( dst=ipnet.network, dst_len=ipnet.netmask, timeout=5 ) assert route['oif'] == index def test_route_get_target(context): if not context.ipr.get_default_routes(table=254): pytest.skip('no default IPv4 routes') rts = context.ipr.get_routes( family=socket.AF_INET, dst='8.8.8.8', table=254 ) assert len(tuple(rts)) > 0 def test_route_get_target_default_ipv4(context): rts = context.ipr.get_routes(dst='127.0.0.1') assert len(tuple(rts)) > 0 def test_route_get_target_default_ipv6(context): rts = context.ipr.get_routes(dst='::1') assert len(tuple(rts)) > 0 @skip_if_not_supported @pytest.mark.parametrize('family', (socket.AF_INET, socket.AF_INET6)) def test_route_mpls_via(context, family): if family == socket.AF_INET: address = context.new_ipaddr else: ip6net = context.new_ip6net address = str(ip6net.network) + '7c32' label = 0x20 index, ifname = context.default_interface context.ndb.interfaces[ifname].set('state', 'up').commit() context.ipr.route( 'add', **{ 'family': AF_MPLS, 'oif': index, 'via': {'family': family, 'addr': address}, 'newdst': {'label': label, 'bos': 1}, }, ) rt = tuple(context.ipr.get_routes(oif=index, family=AF_MPLS))[0] assert rt.get_attr('RTA_VIA')['addr'] == address assert rt.get_attr('RTA_VIA')['family'] == family assert rt.get_attr('RTA_NEWDST')[0]['label'] == label assert len(rt.get_attr('RTA_NEWDST')) == 1 context.ipr.route( 'del', **{ 'family': AF_MPLS, 'oif': index, 'dst': {'label': 0x10, 'bos': 1}, 'via': {'family': family, 'addr': address}, 'newdst': {'label': label, 'bos': 1}, }, ) assert len(tuple(context.ipr.get_routes(oif=index, family=AF_MPLS))) == 0 @skip_if_not_supported @pytest.mark.parametrize( 'newdst', ({'label': 0x21, 'bos': 1}, [{'label': 0x21, 'bos': 1}]) ) def test_route_mpls_swap_newdst(context, newdst): index, _ = context.default_interface req = { 'family': AF_MPLS, 'oif': index, 'dst': {'label': 0x20, 'bos': 1}, 'newdst': newdst, } context.ipr.route('add', **req) rt = tuple(context.ipr.get_routes(oif=index, family=AF_MPLS))[0] assert rt.get_attr('RTA_DST')[0]['label'] == 0x20 assert len(rt.get_attr('RTA_DST')) == 1 assert rt.get_attr('RTA_NEWDST')[0]['label'] == 0x21 assert len(rt.get_attr('RTA_NEWDST')) == 1 context.ipr.route('del', **req) assert len(tuple(context.ipr.get_routes(oif=index, family=AF_MPLS))) == 0 @pytest.mark.parametrize('mode', ('normal', 'raw')) def test_route_multipath(context, mode): index, ifname = context.default_interface ipaddr = context.new_ipaddr gateway1 = context.new_ipaddr gateway2 = context.new_ipaddr ip4net = context.new_ip4net if mode == 'normal': multipath = [{'gateway': gateway1}, {'gateway': gateway2}] elif mode == 'raw': multipath = [ {'hops': 20, 'oif': index, 'attrs': [['RTA_GATEWAY', gateway1]]}, {'hops': 30, 'oif': index, 'attrs': [['RTA_GATEWAY', gateway2]]}, ] context.ndb.interfaces[ifname].add_ip(f'{ipaddr}/24').commit() context.ipr.route( 'add', dst=ip4net.network, dst_len=ip4net.netmask, multipath=multipath ) route = context.ndb.routes.wait( dst=ip4net.network, dst_len=ip4net.netmask, timeout=5 ) nh1 = route['multipath'][0] nh2 = route['multipath'][1] assert nh1['gateway'] == gateway1 assert nh2['gateway'] == gateway2 @pytest.mark.parametrize('flags', (RTNH_F_ONLINK, ['onlink'])) def test_route_onlink(context, flags): ip4net = context.new_ip4net ipaddr = context.new_ipaddr index, ifname = context.default_interface context.ipr.route( 'add', dst=ip4net.network, dst_len=ip4net.netmask, gateway=ipaddr, oif=index, flags=flags, ) route = context.ndb.routes.wait( dst=ip4net.network, dst_len=ip4net.netmask, timeout=5 ) assert route['oif'] == index route.remove().commit() @pytest.mark.parametrize('flags', (RTNH_F_ONLINK, ['onlink'])) def test_route_onlink_multipath(context, flags): ip4net = context.new_ip4net gateway1 = context.new_ipaddr gateway2 = context.new_ipaddr index, ifname = context.default_interface context.ipr.route( 'add', dst=ip4net.network, dst_len=ip4net.netmask, multipath=[ {'gateway': gateway1, 'oif': 1, 'flags': flags}, {'gateway': gateway2, 'oif': 1, 'flags': flags}, ], ) route = context.ndb.routes.wait( dst=ip4net.network, dst_len=ip4net.netmask, timeout=5 ) nh1 = route['multipath'][0] nh2 = route['multipath'][1] assert nh1['gateway'] == gateway1 assert nh2['gateway'] == gateway2 route.remove().commit() @skip_if_not_supported def _test_lwtunnel_multipath_mpls(context): ip4net = context.new_ip4net index, ifname = context.default_interface gateway = context.new_ipaddr ipaddr = context.new_ipaddr context.ndb.interfaces[ifname].add_ip(f'{ipaddr}/24').commit() context.ipr.route( 'add', dst=f'{ip4net.network}/{ip4net.netmask}', multipath=[ {'encap': {'type': 'mpls', 'labels': 500}, 'oif': index}, { 'encap': {'type': 'mpls', 'labels': '600/700'}, 'gateway': gateway, }, ], ) routes = tuple( context.ipr.route('dump', dst=ip4net.network, dst_len=ip4net.netmask) ) assert len(routes) == 1 mp = routes[0].get_attr('RTA_MULTIPATH') assert len(mp) == 2 assert mp[0]['oif'] == 1 assert mp[0].get_attr('RTA_ENCAP_TYPE') == 1 labels = mp[0].get_attr('RTA_ENCAP').get_attr('MPLS_IPTUNNEL_DST') assert len(labels) == 1 assert labels[0]['bos'] == 1 assert labels[0]['label'] == 500 assert mp[1].get_attr('RTA_ENCAP_TYPE') == 1 labels = mp[1].get_attr('RTA_ENCAP').get_attr('MPLS_IPTUNNEL_DST') assert len(labels) == 2 assert labels[0]['bos'] == 0 assert labels[0]['label'] == 600 assert labels[1]['bos'] == 1 assert labels[1]['label'] == 700 @skip_if_not_supported @pytest.mark.parametrize( 'lid,lnum,labels', ( ('list+dict', 2, [{'bos': 0, 'label': 226}, {'bos': 1, 'label': 227}]), ('list+int', 2, [226, 227]), ('str', 2, '226/227'), ('list+dict', 1, [{'bos': 1, 'label': 227}]), ('list+int', 1, [227]), ('str', 1, '227'), ('dict', 1, {'bos': 1, 'label': 227}), ('int', 1, 227), ), ) def test_lwtunnel_mpls_labels(context, lid, lnum, labels): ip4net = context.new_ip4net ipaddr = context.new_ipaddr gateway = context.new_ipaddr index, ifname = context.default_interface context.ndb.interfaces[ifname].add_ip(f'{ipaddr}/24').commit() context.ipr.route( 'add', dst=ip4net.network, dst_len=ip4net.netmask, encap={'type': 'mpls', 'labels': labels}, gateway=gateway, ) routes = tuple( context.ipr.route('dump', dst=ip4net.network, dst_len=ip4net.netmask) ) assert len(routes) == 1 route = routes[0] assert route.get_attr('RTA_ENCAP_TYPE') == 1 assert route.get_attr('RTA_GATEWAY') == gateway labels = route.get_attr('RTA_ENCAP').get_attr('MPLS_IPTUNNEL_DST') assert len(labels) == lnum if lnum == 2: assert labels[0]['bos'] == 0 assert labels[0]['label'] == 226 assert labels[1]['bos'] == 1 assert labels[1]['label'] == 227 else: assert labels[0]['bos'] == 1 assert labels[0]['label'] == 227 context.ipr.route('del', dst=f'{ip4net.network}/{ip4net.netmask}') def test_route_change_existing(context): index, ifname = context.default_interface ipaddr = context.new_ipaddr gateway1 = context.new_ipaddr gateway2 = context.new_ipaddr ip4net = context.new_ip4net context.ndb.interfaces[ifname].add_ip(f'{ipaddr}/24').commit() context.ipr.route( 'add', dst=ip4net.network, dst_len=ip4net.netmask, gateway=gateway1 ) context.ndb.routes.wait( dst=ip4net.network, dst_len=ip4net.netmask, gateway=gateway1, timeout=5 ) context.ipr.route( 'change', dst=ip4net.network, dst_len=ip4net.netmask, gateway=gateway2 ) context.ndb.routes.wait( dst=ip4net.network, dst_len=ip4net.netmask, gateway=gateway2, timeout=5 ) def test_route_change_not_existing_fail(context): # route('change', ...) should fail, if no route exists index, ifname = context.default_interface ipaddr = context.new_ipaddr gateway2 = context.new_ipaddr ip4net = context.new_ip4net context.ndb.interfaces[ifname].add_ip(f'{ipaddr}/24').commit() with pytest.raises(NetlinkError) as e: context.ipr.route( 'change', dst=ip4net.network, dst_len=ip4net.netmask, gateway=gateway2, ) assert e.value.code == errno.ENOENT def test_route_replace_existing(context): # route('replace', ...) should succeed, if route exists index, ifname = context.default_interface ipaddr = context.new_ipaddr gateway1 = context.new_ipaddr gateway2 = context.new_ipaddr ip4net = context.new_ip4net context.ndb.interfaces[ifname].add_ip(f'{ipaddr}/24').commit() context.ipr.route( 'add', dst=ip4net.network, dst_len=ip4net.netmask, gateway=gateway1 ) context.ndb.routes.wait( dst=ip4net.network, dst_len=ip4net.netmask, gateway=gateway1, timeout=5 ) context.ipr.route( 'replace', dst=ip4net.network, dst_len=ip4net.netmask, gateway=gateway2 ) context.ndb.routes.wait( dst=ip4net.network, dst_len=ip4net.netmask, gateway=gateway2, timeout=5 ) def test_route_replace_not_existing(context): # route('replace', ...) should succeed, if route doesn't exist index, ifname = context.default_interface ipaddr = context.new_ipaddr gateway2 = context.new_ipaddr ip4net = context.new_ip4net context.ndb.interfaces[ifname].add_ip(f'{ipaddr}/24').commit() context.ipr.route( 'replace', dst=ip4net.network, dst_len=ip4net.netmask, gateway=gateway2 ) context.ndb.routes.wait( dst=ip4net.network, dst_len=ip4net.netmask, gateway=gateway2, timeout=5 ) def test_flush_routes(context): index, ifname = context.default_interface ipaddr = context.new_ipaddr gateway = context.new_ipaddr context.ndb.interfaces[ifname].add_ip(f'{ipaddr}/24').commit() for net in [context.new_ip4net for _ in range(10)]: context.ipr.route( 'add', dst=net.network, dst_len=net.netmask, gateway=gateway, table=10101, oif=index, ) context.ndb.routes.wait( dst=net.network, dst_len=net.netmask, table=10101, timeout=5 ) with context.ndb.routes.summary() as summary: summary.select_records(table=10101) assert len(tuple(summary)) == 10 context.ipr.flush_routes(table=10101, family=socket.AF_INET6) with context.ndb.routes.summary() as summary: summary.select_records(table=10101) assert len(tuple(summary)) == 10 context.ipr.flush_routes(table=10101, family=socket.AF_INET) for _ in range(5): with context.ndb.routes.summary() as summary: summary.select_records(table=10101) if len(tuple(summary)) == 0: break time.sleep(0.1) else: raise Exception('route table not flushed') pyroute2-0.7.11/tests/test_linux/test_ipr/test_rule.py000066400000000000000000000144121455030217500231460ustar00rootroot00000000000000import socket import struct import pytest from pr2test.marks import require_root pytestmark = [require_root()] def test_flush_rules(context): ifaddr1 = context.new_ipaddr ifaddr2 = context.new_ipaddr init = len(context.ipr.get_rules(family=socket.AF_INET)) assert len(context.ipr.get_rules(priority=lambda x: 100 < x < 500)) == 0 context.ipr.rule('add', table=10, priority=110) context.ipr.rule('add', table=15, priority=150, action='FR_ACT_PROHIBIT') context.ipr.rule('add', table=20, priority=200, src=ifaddr1) context.ipr.rule('add', table=25, priority=250, dst=ifaddr2) assert len(context.ipr.get_rules(priority=lambda x: 100 < x < 500)) == 4 assert len(context.ipr.get_rules(src=ifaddr1)) == 1 assert len(context.ipr.get_rules(dst=ifaddr2)) == 1 context.ipr.flush_rules( family=socket.AF_INET, priority=lambda x: 100 < x < 500 ) assert len(context.ipr.get_rules(priority=lambda x: 100 < x < 500)) == 0 assert len(context.ipr.get_rules(src=ifaddr1)) == 0 assert len(context.ipr.get_rules(dst=ifaddr2)) == 0 assert len(context.ipr.get_rules(family=socket.AF_INET)) == init def test_basic(context): context.ipr.rule('add', table=10, priority=32000) assert ( len( [ x for x in context.ipr.get_rules() if x.get_attr('FRA_PRIORITY') == 32000 and x.get_attr('FRA_TABLE') == 10 ] ) == 1 ) context.ipr.rule('delete', table=10, priority=32000) assert ( len( [ x for x in context.ipr.get_rules() if x.get_attr('FRA_PRIORITY') == 32000 and x.get_attr('FRA_TABLE') == 10 ] ) == 0 ) def test_fwmark(context): context.ipr.rule('add', table=15, priority=32006, fwmark=10) assert ( len( [ x for x in context.ipr.get_rules() if x.get_attr('FRA_PRIORITY') == 32006 and x.get_attr('FRA_TABLE') == 15 and x.get_attr('FRA_FWMARK') ] ) == 1 ) context.ipr.rule('delete', table=15, priority=32006, fwmark=10) assert ( len( [ x for x in context.ipr.get_rules() if x.get_attr('FRA_PRIORITY') == 32006 and x.get_attr('FRA_TABLE') == 15 and x.get_attr('FRA_FWMARK') ] ) == 0 ) def test_fwmark_mask_normalized(context): context.ipr.rule('add', table=15, priority=32006, fwmark=10, fwmask=20) assert ( len( [ x for x in context.ipr.get_rules() if x.get_attr('FRA_PRIORITY') == 32006 and x.get_attr('FRA_TABLE') == 15 and x.get_attr('FRA_FWMARK') and x.get_attr('FRA_FWMASK') ] ) == 1 ) context.ipr.rule('delete', table=15, priority=32006, fwmark=10, fwmask=20) assert ( len( [ x for x in context.ipr.get_rules() if x.get_attr('FRA_PRIORITY') == 32006 and x.get_attr('FRA_TABLE') == 15 and x.get_attr('FRA_FWMARK') and x.get_attr('FRA_FWMASK') ] ) == 0 ) def test_fwmark_mask_raw(context): context.ipr.rule('add', table=15, priority=32006, fwmark=10, FRA_FWMASK=20) assert ( len( [ x for x in context.ipr.get_rules() if x.get_attr('FRA_PRIORITY') == 32006 and x.get_attr('FRA_TABLE') == 15 and x.get_attr('FRA_FWMARK') and x.get_attr('FRA_FWMASK') ] ) == 1 ) context.ipr.rule( 'delete', table=15, priority=32006, fwmark=10, FRA_FWMASK=20 ) assert ( len( [ x for x in context.ipr.get_rules() if x.get_attr('FRA_PRIORITY') == 32006 and x.get_attr('FRA_TABLE') == 15 and x.get_attr('FRA_FWMARK') and x.get_attr('FRA_FWMASK') ] ) == 0 ) def test_bad_table(context): with pytest.raises(struct.error): context.ipr.rule('add', table=-1, priority=32000) def test_big_table(context): context.ipr.rule('add', table=1024, priority=32000) assert ( len( [ x for x in context.ipr.get_rules() if x.get_attr('FRA_PRIORITY') == 32000 and x.get_attr('FRA_TABLE') == 1024 ] ) == 1 ) context.ipr.rule('delete', table=1024, priority=32000) assert ( len( [ x for x in context.ipr.get_rules() if x.get_attr('FRA_PRIORITY') == 32000 and x.get_attr('FRA_TABLE') == 1024 ] ) == 0 ) def test_src_dst(context): context.ipr.rule( 'add', table=17, priority=32005, src='10.0.0.0', src_len=24, dst='10.1.0.0', dst_len=24, ) assert ( len( [ x for x in context.ipr.get_rules() if x.get_attr('FRA_PRIORITY') == 32005 and x.get_attr('FRA_TABLE') == 17 and x.get_attr('FRA_SRC') == '10.0.0.0' and x.get_attr('FRA_DST') == '10.1.0.0' and x['src_len'] == 24 and x['dst_len'] == 24 ] ) == 1 ) context.ipr.rule( 'del', table=17, priority=32005, src='10.0.0.0', src_len=24, dst='10.1.0.0', dst_len=24, ) assert ( len( [ x for x in context.ipr.get_rules() if x.get_attr('FRA_PRIORITY') == 32005 and x.get_attr('FRA_TABLE') == 17 and x.get_attr('FRA_SRC') == '10.0.0.0' and x.get_attr('FRA_DST') == '10.1.0.0' and x['src_len'] == 24 and x['dst_len'] == 24 ] ) == 0 ) pyroute2-0.7.11/tests/test_linux/test_ipr/test_stress.py000066400000000000000000000016551455030217500235270ustar00rootroot00000000000000import os import socket from pr2test.marks import require_root from pyroute2 import NetlinkDumpInterrupted pytestmark = [require_root()] def test_mass_ipv6(context): # ipv6net = context.new_ip6net base = str(ipv6net.network) + '{0}' limit = int(os.environ.get('PYROUTE2_SLIMIT', '0x800'), 16) index, ifname = context.default_interface # add addresses for idx in range(limit): context.ipr.addr( 'add', index=index, family=socket.AF_INET6, address=base.format(hex(idx)[2:]), prefixlen=48, ) # assert addresses in two steps, to ease debug addrs = [] for _ in range(3): try: addrs = tuple(context.ipr.get_addr(family=socket.AF_INET6)) break except NetlinkDumpInterrupted: pass else: raise Exception('could not dump addresses') assert len(addrs) >= limit pyroute2-0.7.11/tests/test_linux/test_ipr/test_vlan.py000066400000000000000000000104111455030217500231320ustar00rootroot00000000000000import pytest from pr2test.context_manager import make_test_matrix from pr2test.marks import require_root pytestmark = [require_root()] test_matrix = make_test_matrix(targets=['local', 'netns']) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_add(context): index, ifname = context.default_interface vlan_name = context.new_ifname vlan_id = 101 context.ipr.link( 'add', ifname=vlan_name, kind='vlan', link=index, vlan_id=vlan_id ) (vlan,) = context.ipr.poll(context.ipr.link, 'dump', ifname=vlan_name) assert vlan.get('ifname') == vlan_name assert vlan.get('link') == index assert vlan.get(('linkinfo', 'data', 'vlan_id')) == vlan_id @pytest.mark.parametrize( 'spec,key,check', ( ( {'vlan_egress_qos': {'from': 0, 'to': 3}}, ('linkinfo', 'data', 'vlan_egress_qos', 'vlan_qos_mapping'), {'from': 0, 'to': 3}, ), ( {'vlan_ingress_qos': {'from': 0, 'to': 4}}, ('linkinfo', 'data', 'vlan_ingress_qos', 'vlan_qos_mapping'), {'from': 0, 'to': 4}, ), ( { 'vlan_egress_qos': { 'attrs': (('IFLA_VLAN_QOS_MAPPING', {'from': 0, 'to': 5}),) } }, ('linkinfo', 'data', 'vlan_egress_qos', 'vlan_qos_mapping'), {'from': 0, 'to': 5}, ), ( { 'vlan_ingress_qos': { 'attrs': (('IFLA_VLAN_QOS_MAPPING', {'from': 0, 'to': 6}),) } }, ('linkinfo', 'data', 'vlan_ingress_qos', 'vlan_qos_mapping'), {'from': 0, 'to': 6}, ), ), ids=[ 'egress-short-0:3', 'ingress-short-0:4', 'egress-full-0:5', 'egress-full-0:6', ], ) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_qos_add(context, spec, key, check): index, ifname = context.default_interface vlan_name = context.new_ifname vlan_id = 101 context.ipr.link( 'add', ifname=vlan_name, kind='vlan', link=index, vlan_id=vlan_id, **spec ) (vlan,) = context.ipr.poll(context.ipr.link, 'dump', ifname=vlan_name) assert vlan.get('ifname') == vlan_name assert vlan.get('link') == index assert vlan.get(('linkinfo', 'data', 'vlan_id')) == vlan_id assert vlan.get(key) == check @pytest.mark.parametrize( 'spec,key,check', ( ( {'vlan_egress_qos': {'from': 0, 'to': 3}}, ('linkinfo', 'data', 'vlan_egress_qos', 'vlan_qos_mapping'), {'from': 0, 'to': 3}, ), ( {'vlan_ingress_qos': {'from': 0, 'to': 4}}, ('linkinfo', 'data', 'vlan_ingress_qos', 'vlan_qos_mapping'), {'from': 0, 'to': 4}, ), ( { 'vlan_egress_qos': { 'attrs': (('IFLA_VLAN_QOS_MAPPING', {'from': 0, 'to': 5}),) } }, ('linkinfo', 'data', 'vlan_egress_qos', 'vlan_qos_mapping'), {'from': 0, 'to': 5}, ), ( { 'vlan_ingress_qos': { 'attrs': (('IFLA_VLAN_QOS_MAPPING', {'from': 0, 'to': 6}),) } }, ('linkinfo', 'data', 'vlan_ingress_qos', 'vlan_qos_mapping'), {'from': 0, 'to': 6}, ), ), ids=[ 'egress-short-0:3', 'ingress-short-0:4', 'egress-full-0:5', 'egress-full-0:6', ], ) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_qos_set(context, spec, key, check): index, ifname = context.default_interface vlan_name = context.new_ifname vlan_id = 101 context.ipr.link( 'add', ifname=vlan_name, kind='vlan', link=index, vlan_id=vlan_id ) (vlan,) = context.ipr.poll(context.ipr.link, 'dump', ifname=vlan_name) assert vlan.get('ifname') == vlan_name assert vlan.get('link') == index assert vlan.get(('linkinfo', 'data', 'vlan_id')) == vlan_id assert vlan.get(key) is None context.ipr.link('set', index=vlan['index'], kind='vlan', **spec) (vlan,) = context.ipr.poll(context.ipr.link, 'dump', ifname=vlan_name) assert vlan.get(key) == check pyroute2-0.7.11/tests/test_linux/test_ipr/test_vlan_filter.py000066400000000000000000000033711455030217500245060ustar00rootroot00000000000000import pytest from pr2test.marks import require_root pytestmark = [require_root()] def test_vlan_filter_dump(context): ifname1 = context.new_ifname ifname2 = context.new_ifname context.ndb.interfaces.create( ifname=ifname1, kind='bridge', state='up' ).commit() context.ndb.interfaces.create( ifname=ifname2, kind='bridge', state='up' ).commit() assert len(tuple(context.ipr.get_vlans())) >= 2 for name in (ifname1, ifname2): assert len(tuple(context.ipr.get_vlans(ifname=name))) == 1 assert ( tuple(context.ipr.get_vlans(ifname=name))[0].get_attr( 'IFLA_IFNAME' ) ) == name assert ( tuple(context.ipr.get_vlans(ifname=name))[0].get_nested( 'IFLA_AF_SPEC', 'IFLA_BRIDGE_VLAN_INFO' ) )['vid'] == 1 @pytest.mark.parametrize( 'arg_name,vid_spec,vid', ( ('vlan_info', {'vid': 568}, 568), ('af_spec', {'attrs': [['IFLA_BRIDGE_VLAN_INFO', {'vid': 567}]]}, 567), ), ) def _test_vlan_filter_add(context, arg_name, vid_spec, vid): ifname_port = context.new_ifname ifname_bridge = context.new_ifname port = context.ndb.interfaces.create( ifname=ifname_port, kind='dummy', state='up' ).commit() ( context.ndb.interfaces.create( ifname=ifname_bridge, kind='bridge', state='up' ) .add_port(ifname_port) .commit() ) assert vid not in context.ndb.vlans spec = {'index': port['index'], arg_name: vid_spec} context.ipr.vlan_filter('add', **spec) assert context.ndb.vlans.wait(vid=vid, timeout=5) context.ipr.vlan_filter('del', **spec) assert context.ndb.vlans.wait(vid=vid, timeout=5, action='remove') pyroute2-0.7.11/tests/test_linux/test_ipset.py000066400000000000000000000333531455030217500214770ustar00rootroot00000000000000import errno import socket from time import sleep from uuid import uuid4 import pytest from pr2test.marks import require_root from pyroute2.ipset import IPSet, IPSetError, PortEntry, PortRange from pyroute2.netlink.exceptions import NetlinkError from pyroute2.netlink.nfnetlink.ipset import ( IPSET_ERR_TYPE_SPECIFIC, IPSET_FLAG_WITH_FORCEADD, ) pytestmark = [require_root()] def parse_ip(entry): ip_from = entry.get_attr("IPSET_ATTR_IP_FROM") return ip_from.get_attr("IPSET_ATTR_IPADDR_IPV4") def parse_net(entry): net = parse_ip(entry) cidr = entry.get_attr("IPSET_ATTR_CIDR") if cidr is not None: net += "/{0}".format(cidr) return net def ipset_type_to_entry_type(ipset_type): return ipset_type.split(":", 1)[1].split(",") def list_ipset(ipset_name): with IPSet() as sock: res = {} msg_list = sock.list(ipset_name) adt = "IPSET_ATTR_ADT" proto = "IPSET_ATTR_DATA" stype = "IPSET_ATTR_TYPENAME" for msg in msg_list: for x in msg.get_attr(adt).get_attrs(proto): entry = "" msg_stypes = msg.get_attr(stype) if msg_stypes is None: msg_stypes = "hash:ip" for st in ipset_type_to_entry_type(msg_stypes): if st == "ip": entry = parse_ip(x) elif st == "net": entry = parse_net(x) elif st == "iface": entry += x.get_attr("IPSET_ATTR_IFACE") elif st == "set": entry += x.get_attr("IPSET_ATTR_NAME") entry += "," entry = entry.strip(",") res[entry] = ( x.get_attr("IPSET_ATTR_PACKETS"), x.get_attr("IPSET_ATTR_BYTES"), x.get_attr("IPSET_ATTR_COMMENT"), x.get_attr("IPSET_ATTR_TIMEOUT"), x.get_attr("IPSET_ATTR_SKBMARK"), x.get_attr("IPSET_ATTR_SKBPRIO"), x.get_attr("IPSET_ATTR_SKBQUEUE"), ) return res def ipset_exists(ipset_name): with IPSet() as sock: try: sock.headers(ipset_name) return True except IPSetError as e: if e.code == errno.ENOENT: return False raise def test_create_exclusive_fail(ipset, ipset_name): ipset.create(ipset_name) assert ipset_exists(ipset_name) try: ipset.create(ipset_name) except NetlinkError as e: if e.code != errno.EEXIST: raise def test_create_exclusive_success(ipset, ipset_name): ipset.create(ipset_name) assert ipset_exists(ipset_name) ipset.create(ipset_name, exclusive=False) # do not fail def test_add_exclusive_fail(ipset, ipset_name): ipaddr = "172.16.202.202" ipset.create(ipset_name) ipset.add(ipset_name, ipaddr) assert ipaddr in list_ipset(ipset_name) try: ipset.add(ipset_name, ipaddr) except NetlinkError: pass def test_add_exclusive_success(ipset, ipset_name): ipaddr = "172.16.202.202" ipset.create(ipset_name) ipset.add(ipset_name, ipaddr) assert ipaddr in list_ipset(ipset_name) ipset.add(ipset_name, ipaddr, exclusive=False) def test_create_destroy(ipset, ipset_name): # create ipset ipset.create(ipset_name) # assert it exists assert ipset_exists(ipset_name) # remove ipset ipset.destroy(ipset_name) # assert it is removed assert not ipset_exists(ipset_name) def test_add_delete(ipset, ipset_name): ipaddr = "192.168.1.1" # create ipset ipset.create(ipset_name) assert ipset_exists(ipset_name) # add an entry ipset.add(ipset_name, ipaddr) # check it assert ipaddr in list_ipset(ipset_name) # delete an entry ipset.delete(ipset_name, ipaddr) # check it assert ipaddr not in list_ipset(ipset_name) def test_swap(ipset): ipset_name_a = str(uuid4())[:16] ipset_name_b = str(uuid4())[:16] ipaddr_a = "192.168.1.1" ipaddr_b = "10.0.0.1" # create sets ipset.create(ipset_name_a) ipset.create(ipset_name_b) # add ips ipset.add(ipset_name_a, ipaddr_a) ipset.add(ipset_name_b, ipaddr_b) assert ipaddr_a in list_ipset(ipset_name_a) assert ipaddr_b in list_ipset(ipset_name_b) # swap sets ipset.swap(ipset_name_a, ipset_name_b) assert ipaddr_a in list_ipset(ipset_name_b) assert ipaddr_b in list_ipset(ipset_name_a) # remove sets ipset.destroy(ipset_name_a) ipset.destroy(ipset_name_b) def test_counters(ipset, ipset_name): ipaddr = "172.16.202.202" ipset.create(ipset_name, counters=True) ipset.add(ipset_name, ipaddr) assert ipaddr in list_ipset(ipset_name) assert list_ipset(ipset_name)[ipaddr][0] == 0 # Bytes assert list_ipset(ipset_name)[ipaddr][1] == 0 # Packets ipset.destroy(ipset_name) ipset.create(ipset_name, counters=False) ipset.add(ipset_name, ipaddr) assert ipaddr in list_ipset(ipset_name) assert list_ipset(ipset_name)[ipaddr][0] is None assert list_ipset(ipset_name)[ipaddr][1] is None def test_comments(ipset, ipset_name): ipaddr = "172.16.202.202" comment = "a very simple comment" ipset.create(ipset_name, comment=True) ipset.add(ipset_name, ipaddr, comment=comment) assert ipaddr in list_ipset(ipset_name) assert list_ipset(ipset_name)[ipaddr][2] == comment def test_skbmark(ipset, ipset_name): ipaddr = "172.16.202.202" skbmark = (0x100, 0xFFFFFFFF) ipset.create(ipset_name, skbinfo=True) ipset.add(ipset_name, ipaddr, skbmark=skbmark) assert ipaddr in list_ipset(ipset_name) assert list_ipset(ipset_name)[ipaddr][4] == skbmark def test_skbprio(ipset, ipset_name): ipaddr = "172.16.202.202" skbprio = (1, 10) ipset.create(ipset_name, skbinfo=True) ipset.add(ipset_name, ipaddr, skbprio=skbprio) assert ipaddr in list_ipset(ipset_name) assert list_ipset(ipset_name)[ipaddr][5] == skbprio def test_skbqueue(ipset, ipset_name): ipaddr = "172.16.202.202" skbqueue = 1 ipset.create(ipset_name, skbinfo=True) ipset.add(ipset_name, ipaddr, skbqueue=skbqueue) assert ipaddr in list_ipset(ipset_name) assert list_ipset(ipset_name)[ipaddr][6] == skbqueue def test_maxelem(ipset, ipset_name): ipset.create(ipset_name, maxelem=1) data = ipset.list(ipset_name)[0].get_attr("IPSET_ATTR_DATA") maxelem = data.get_attr("IPSET_ATTR_MAXELEM") assert maxelem == 1 def test_hashsize(ipset, ipset_name): min_size = 64 ipset.create(ipset_name, hashsize=min_size) data = ipset.list(ipset_name)[0].get_attr("IPSET_ATTR_DATA") hashsize = data.get_attr("IPSET_ATTR_HASHSIZE") assert hashsize == min_size def test_forceadd(ipset, ipset_name): ipset.create(ipset_name, forceadd=True) res = ipset.list(ipset_name)[0].get_attr("IPSET_ATTR_DATA") flags = res.get_attr("IPSET_ATTR_CADT_FLAGS") assert flags & IPSET_FLAG_WITH_FORCEADD def test_flush(ipset, ipset_name): ipset.create(ipset_name) ip_a = "1.1.1.1" ip_b = "1.2.3.4" ipset.add(ipset_name, ip_a) ipset.add(ipset_name, ip_b) assert ip_a in list_ipset(ipset_name) assert ip_b in list_ipset(ipset_name) ipset.flush(ipset_name) assert ip_a not in list_ipset(ipset_name) assert ip_b not in list_ipset(ipset_name) def test_rename(ipset, ipset_name): ipset_name_bis = str(uuid4())[:16] ipset.create(ipset_name) ipset.rename(ipset_name, ipset_name_bis) assert ipset_exists(ipset_name_bis) assert not ipset_exists(ipset_name) def test_timeout(ipset, ipset_name): ip = "1.2.3.4" ipset.create(ipset_name, timeout=1) ipset.add(ipset_name, ip) sleep(2) assert ip not in list_ipset(ipset_name) # check that we can overwrite default timeout value ipset.add(ipset_name, ip, timeout=5) sleep(2) assert ip in list_ipset(ipset_name) assert list_ipset(ipset_name)[ip][3] > 0 # timeout sleep(3) assert ip not in list_ipset(ipset_name) def test_net_and_iface_stypes(ipset, ipset_name): test_values = ( ("hash:net", ("192.168.1.0/31", "192.168.12.0/24")), ("hash:net,iface", ("192.168.1.0/24,eth0", "192.168.2.0/24,wlan0")), ) for stype, test_values in test_values: ipset.create(ipset_name, stype=stype) etype = stype.split(":", 1)[1] assert ipset_exists(ipset_name) for entry in test_values: ipset.add(ipset_name, entry, etype=etype) assert entry in list_ipset(ipset_name) ipset.delete(ipset_name, entry, etype=etype) assert entry not in list_ipset(ipset_name) ipset.destroy(ipset_name) assert not ipset_exists(ipset_name) def test_tuple_support(ipset, ipset_name): test_values = ( ( "hash:net,iface", (("192.168.1.0/24", "eth0"), ("192.168.2.0/24", "wlan0")), ), ) for stype, test_values in test_values: ipset.create(ipset_name, stype=stype) etype = stype.split(":", 1)[1] assert ipset_exists(ipset_name) for entry in test_values: ipset.add(ipset_name, entry, etype=etype) assert ipset.test(ipset_name, entry, etype=etype) ipset.delete(ipset_name, entry, etype=etype) assert not ipset.test(ipset_name, entry, etype=etype) ipset.destroy(ipset_name) def test_net_with_dash(ipset, ipset_name): stype = "hash:net" ipset.create(ipset_name, stype=stype) # The kernel will split this kind of strings to subnets ipset.add(ipset_name, "192.168.1.0-192.168.1.33", etype="net") assert "192.168.1.0/27" in list_ipset(ipset_name) assert "192.168.1.32/31" in list_ipset(ipset_name) def test_double_net(ipset, ipset_name): stype = "hash:net,port,net" etype = "net,port,net" ipset.create(ipset_name, stype=stype) port = PortEntry(80, protocol=socket.getprotobyname("tcp")) ipset.add( ipset_name, ("192.168.0.0/24", port, "192.168.2.0/24"), etype=etype ) def test_custom_hash_values(ipset, ipset_name): stype = "hash:net" maxelem = 16384 hashsize = 64 ipset.create(ipset_name, stype=stype, maxelem=maxelem, hashsize=hashsize) res = ipset.list(ipset_name)[0].get_attr("IPSET_ATTR_DATA") assert res.get_attr("IPSET_ATTR_HASHSIZE") == hashsize assert res.get_attr("IPSET_ATTR_MAXELEM") == maxelem assert res.get_attr("IPSET_ATTR_REFERENCES") == 0 def test_list_set(ipset, ipset_name): subname = str(uuid4())[:16] subtype = "hash:net" ipset.create(subname, stype=subtype) ipset.create(ipset_name, "list:set") ipset.add(ipset_name, subname, etype="set") assert subname in list_ipset(ipset_name) assert ipset.test(ipset_name, subname, etype="set") res = ipset.list(subname)[0].get_attr("IPSET_ATTR_DATA") assert res.get_attr("IPSET_ATTR_REFERENCES") == 1 ipset.delete(ipset_name, subname, etype="set") assert subname not in list_ipset(ipset_name) ipset.destroy(subname) def test_bitmap_port(ipset, ipset_name): ipset_type = "bitmap:port" etype = "port" port_range = (1000, 6000) ipset.create(ipset_name, stype=ipset_type, bitmap_ports_range=port_range) ipset.add(ipset_name, 1002, etype=etype) assert ipset.test(ipset_name, 1002, etype=etype) add_range = PortRange(2000, 3000, protocol=None) ipset.add(ipset_name, add_range, etype=etype) assert ipset.test(ipset_name, 2001, etype=etype) assert ipset.test(ipset_name, 3000, etype=etype) assert not ipset.test(ipset_name, 4000, etype=etype) # Check that delete is working as well ipset.delete(ipset_name, add_range, etype=etype) assert not ipset.test(ipset_name, 2001, etype=etype) # Test PortEntry without protocol set port_entry = PortEntry(2001) ipset.add(ipset_name, port_entry, etype=etype) try: ipset.add(ipset_name, 18, etype=etype) assert False except NetlinkError as e: assert e.code == IPSET_ERR_TYPE_SPECIFIC def test_port_range_with_proto(ipset, ipset_name): ipset_type = "hash:net,port" etype = "net,port" port_range = PortRange(1000, 2000, protocol=socket.IPPROTO_UDP) port_entry = PortEntry(1001, protocol=socket.IPPROTO_UDP) ipset.create(ipset_name, stype=ipset_type) ipset.add(ipset_name, ("192.0.2.0/24", port_range), etype=etype) assert ipset.test(ipset_name, ("192.0.2.0/24", port_range), etype=etype) assert ipset.test(ipset_name, ("192.0.2.2/32", port_entry), etype=etype) # change protocol, that should not be in port_range.protocol = socket.IPPROTO_TCP assert not ipset.test( ipset_name, ("192.0.2.0/24", port_range), etype="net,port" ) port_entry.port = 2 assert not ipset.test( ipset_name, ("192.0.2.0/24", port_entry), etype="net,port" ) # same example than in ipset man pages proto = socket.getprotobyname("vrrp") port_entry.port = 0 port_entry.protocol = proto ipset.add(ipset_name, ("192.0.2.0/24", port_entry), etype=etype) ipset.test(ipset_name, ("192.0.2.0/24", port_entry), etype=etype) def test_set_by(ipset, ipset_name): old_vers = ipset._proto_version # check revision supported by kernel msg = ipset.get_proto_version() version = msg[0].get_attr("IPSET_ATTR_PROTOCOL") if version < 7: pytest.skip("Kernel does not support this feature") # set version ipset._proto_version = 7 # create set ipset.create(ipset_name) # get index msg = ipset.get_set_byname(ipset_name) idx = msg[0].get_attr("IPSET_ATTR_INDEX") # get set name by index msg = ipset.get_set_byindex(idx) name_found = msg[0].get_attr("IPSET_ATTR_SETNAME") # restore version back to original ipset._proto_version = old_vers assert ipset_name == name_found pyroute2-0.7.11/tests/test_linux/test_iwutil.py000066400000000000000000000022631455030217500216640ustar00rootroot00000000000000import collections import errno import pytest from pr2test.marks import require_root from pyroute2 import IW, IPRoute from pyroute2.netlink.exceptions import NetlinkError @pytest.fixture def ctx(): iw = None ifname = None wiphy = None index = None try: iw = IW() except NetlinkError as e: if e.code == errno.ENOENT: pytest.skip('nl80211 not supported') raise ifaces = iw.get_interfaces_dump() if not ifaces: raise pytest.skip('no wireless interfaces found') for i in ifaces: ifname = i.get_attr('NL80211_ATTR_IFNAME') index = i.get_attr('NL80211_ATTR_IFINDEX') wiphy = i.get_attr('NL80211_ATTR_WIPHY') if index: break else: pytest.skip('can not detect the interface to use') yield collections.namedtuple( 'WirelessContext', ['iw', 'ifname', 'index', 'wiphy'] )(iw, ifname, index, wiphy) iw.close() def test_list_wiphy(ctx): ctx.iw.list_wiphy() def test_list_dev(ctx): ctx.iw.list_dev() @require_root def test_scan(ctx): with IPRoute() as ipr: ipr.link('set', index=ctx.index, state='up') ctx.iw.scan(ctx.index) pyroute2-0.7.11/tests/test_linux/test_ndb/000077500000000000000000000000001455030217500205355ustar00rootroot00000000000000pyroute2-0.7.11/tests/test_linux/test_ndb/__init__.py000066400000000000000000000000001455030217500226340ustar00rootroot00000000000000pyroute2-0.7.11/tests/test_linux/test_ndb/test_address.py000066400000000000000000000122501455030217500235730ustar00rootroot00000000000000from socket import AF_INET, AF_INET6 import pytest from pr2test.context_manager import make_test_matrix from pr2test.marks import require_root from pr2test.tools import address_exists, interface_exists pytestmark = [require_root()] test_matrix = make_test_matrix( targets=['local', 'netns'], dbs=['sqlite3/:memory:', 'postgres/pr2test'] ) @pytest.mark.parametrize( 'ipam,prefixlen', (('new_ipaddr', 24), ('new_ip6addr', 64)) ) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_add_del_ip_dict(context, ipam, prefixlen): ifname = context.new_ifname ifaddr1 = getattr(context, ipam) ifaddr2 = getattr(context, ipam) ( context.ndb.interfaces.create( ifname=ifname, kind='dummy', state='down' ) .add_ip({'address': ifaddr1, 'prefixlen': prefixlen}) .add_ip({'address': ifaddr2, 'prefixlen': prefixlen}) .commit() ) assert address_exists(context.netns, ifname=ifname, address=ifaddr1) assert address_exists(context.netns, ifname=ifname, address=ifaddr2) ( context.ndb.interfaces[{'ifname': ifname}] .del_ip({'address': ifaddr2, 'prefixlen': prefixlen}) .del_ip({'address': ifaddr1, 'prefixlen': prefixlen}) .commit() ) assert not address_exists(context.netns, ifname=ifname, address=ifaddr1) assert not address_exists(context.netns, ifname=ifname, address=ifaddr2) @pytest.mark.parametrize( 'ipam,prefixlen', (('new_ipaddr', 24), ('new_ip6addr', 64)) ) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_add_del_ip_string(context, ipam, prefixlen): ifname = context.new_ifname ifaddr1 = f'{getattr(context, ipam)}/{prefixlen}' ifaddr2 = f'{getattr(context, ipam)}/{prefixlen}' ( context.ndb.interfaces.create( ifname=ifname, kind='dummy', state='down' ) .add_ip(ifaddr1) .add_ip(ifaddr2) .commit() ) assert address_exists(context.netns, ifname=ifname, address=ifaddr1) assert address_exists(context.netns, ifname=ifname, address=ifaddr2) ( context.ndb.interfaces[{'ifname': ifname}] .del_ip(ifaddr2) .del_ip(ifaddr1) .commit() ) assert not address_exists(context.netns, ifname=ifname, address=ifaddr1) assert not address_exists(context.netns, ifname=ifname, address=ifaddr2) @pytest.mark.parametrize( 'ipam,prefixlen,family', (('new_ipaddr', 24, AF_INET), ('new_ip6addr', 64, AF_INET6)), ) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_del_ip_match(context, ipam, prefixlen, family): ifname = context.new_ifname ipaddr1 = getattr(context, ipam) ipaddr2 = getattr(context, ipam) ipaddr3 = getattr(context, ipam) ( context.ndb.interfaces.create(ifname=ifname, kind='dummy', state='up') .add_ip(address=ipaddr1, prefixlen=prefixlen) .add_ip(address=ipaddr2, prefixlen=prefixlen) .add_ip(address=ipaddr3, prefixlen=prefixlen) .commit() ) assert address_exists(context.netns, ifname=ifname, address=ipaddr1) assert address_exists(context.netns, ifname=ifname, address=ipaddr2) assert address_exists(context.netns, ifname=ifname, address=ipaddr3) (context.ndb.interfaces[ifname].del_ip(family=family).commit()) assert not address_exists(context.netns, ifname=ifname, address=ipaddr1) assert not address_exists(context.netns, ifname=ifname, address=ipaddr2) assert not address_exists(context.netns, ifname=ifname, address=ipaddr3) @pytest.mark.parametrize( 'ipam,prefixlen', (('new_ipaddr', 24), ('new_ip6addr', 64)) ) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_del_ip_fail(context, ipam, prefixlen): ifname = context.new_ifname ipaddr = f'{getattr(context, ipam)}/{prefixlen}' ipaddr_fail = f'{getattr(context, ipam)}/{prefixlen}' ( context.ndb.interfaces.create(ifname=ifname, kind='dummy', state='up') .add_ip(ipaddr) .commit() ) assert interface_exists(context.netns, ifname=ifname) assert address_exists(context.netns, ifname=ifname, address=ipaddr) with pytest.raises(KeyError): (context.ndb.interfaces[ifname].del_ip(ipaddr_fail).commit()) assert interface_exists(context.netns, ifname=ifname) assert address_exists(context.netns, ifname=ifname, address=ipaddr) @pytest.mark.parametrize( 'ipam,prefixlen', (('new_ipaddr', 24), ('new_ip6addr', 64)) ) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_del_ip_match_fail(context, ipam, prefixlen): ifname = context.new_ifname ipaddr = getattr(context, ipam) ipaddr_fail = getattr(context, ipam) ( context.ndb.interfaces.create(ifname=ifname, kind='dummy', state='up') .add_ip(address=ipaddr, prefixlen=prefixlen) .commit() ) assert interface_exists(context.netns, ifname=ifname) assert address_exists(context.netns, ifname=ifname, address=ipaddr) with pytest.raises(KeyError): (context.ndb.interfaces[ifname].del_ip(address=ipaddr_fail).commit()) assert interface_exists(context.netns, ifname=ifname) assert address_exists(context.netns, ifname=ifname, address=ipaddr) pyroute2-0.7.11/tests/test_linux/test_ndb/test_altnames.py000066400000000000000000000022661455030217500237600ustar00rootroot00000000000000import pytest from pr2test.context_manager import make_test_matrix, skip_if_not_supported from pr2test.marks import require_root from pr2test.tools import interface_exists pytestmark = [require_root()] test_matrix = make_test_matrix( targets=['local', 'netns'], dbs=['sqlite3/:memory:', 'postgres/pr2test'] ) @pytest.mark.parametrize('context', test_matrix, indirect=True) @skip_if_not_supported def test_altname_complex(context): index, ifname = context.default_interface altname1 = context.new_ifname altname2 = context.new_ifname with context.ndb.interfaces[ifname] as i: i.add_altname(altname1) assert interface_exists(context.netns, altname=altname1) assert not interface_exists(context.netns, altname=altname2) with context.ndb.interfaces[ifname] as i: i.del_altname(altname1) i.add_altname(altname2) assert interface_exists(context.netns, altname=altname2) assert not interface_exists(context.netns, altname=altname1) with context.ndb.interfaces[ifname] as i: i.del_altname(altname2) assert not interface_exists(context.netns, altname=altname1) assert not interface_exists(context.netns, altname=altname2) pyroute2-0.7.11/tests/test_linux/test_ndb/test_backup.py000066400000000000000000000012301455030217500234070ustar00rootroot00000000000000import sqlite3 import sys import uuid import pytest @pytest.mark.skipif( sys.version_info < (3, 7), reason='SQLite3 backup not supported on this Python version', ) def test_file_backup(context): filename = str(uuid.uuid4()) + '-backup.db' context.ndb.backup(filename) backup = sqlite3.connect(filename) cursor = backup.cursor() cursor.execute('SELECT f_IFLA_IFNAME FROM interfaces WHERE f_index > 0') interfaces_from_backup = {x[0] for x in cursor.fetchall()} with context.ndb.interfaces.summary() as summary: interfaces_from_ndb = {x.ifname for x in summary} assert interfaces_from_ndb == interfaces_from_backup pyroute2-0.7.11/tests/test_linux/test_ndb/test_chaotic.py000066400000000000000000000030051455030217500235560ustar00rootroot00000000000000import pytest from pr2test.marks import require_root from pr2test.tools import address_exists from pyroute2 import NDB pytestmark = [require_root()] @pytest.mark.xfail(reason='flaky test, only to collect failure logs') def __test_add_del_ip_dict(context): ifname = context.new_ifname ifaddr1 = context.new_ipaddr ifaddr2 = context.new_ipaddr log_spec = ( context.spec.log_spec[0] + '.chaotic', context.spec.log_spec[1], ) with NDB( log=log_spec, sources=[ { 'target': 'localhost', 'kind': 'ChaoticIPRoute', 'success_rate': 0.98, } ], ) as test_ndb: ( test_ndb.interfaces.create( ifname=ifname, kind='dummy', state='down' ) .add_ip({'address': ifaddr1, 'prefixlen': 24}) .add_ip({'address': ifaddr2, 'prefixlen': 24}) .commit() ) assert address_exists(context.netns, ifname=ifname, address=ifaddr1) assert address_exists(context.netns, ifname=ifname, address=ifaddr2) ( test_ndb.interfaces[{'ifname': ifname}] .del_ip({'address': ifaddr2, 'prefixlen': 24}) .del_ip({'address': ifaddr1, 'prefixlen': 24}) .commit() ) assert not address_exists( context.netns, ifname=ifname, address=ifaddr1 ) assert not address_exists( context.netns, ifname=ifname, address=ifaddr2 ) pyroute2-0.7.11/tests/test_linux/test_ndb/test_db.py000066400000000000000000000021051455030217500225310ustar00rootroot00000000000000import sqlite3 import pytest from pyroute2 import NDB try: import psycopg2 except ImportError: pytest.skip('no psycopg2 module installed', allow_module_level=True) def test_no_cleanup(spec): # start and stop the DB, leaving all the data in the DB file NDB( db_provider='sqlite3', db_spec=spec.db_spec, db_cleanup=False, log=spec.log_spec, ).close() # open the DB file db = sqlite3.connect(spec.db_spec) cursor = db.cursor() cursor.execute('SELECT * FROM interfaces') interfaces = cursor.fetchall() # at least two records: idx 0 and loopback assert len(interfaces) > 1 # all the interfaces must be of the same source, 'localhost' assert set([x[0] for x in interfaces]) == set(('localhost',)) def test_postgres_fail(spec): try: NDB( db_provider='postgres', db_spec={'dbname': 'some-nonsense-db-name'}, log=spec.log_spec, ).close() except psycopg2.OperationalError: return raise Exception('postgresql exception was expected') pyroute2-0.7.11/tests/test_linux/test_ndb/test_examples.py000066400000000000000000000031031455030217500237610ustar00rootroot00000000000000import os import pathlib import sys import pytest from pr2test.marks import require_root pytestmark = [require_root()] def get_examples(*argv): root = pathlib.Path(os.environ['WORKSPACE']) examples = [ file for file in root.joinpath(*argv).iterdir() if not file.name.endswith('.swp') ] return { 'argnames': 'example', 'argvalues': examples, 'ids': [x.name for x in examples], } @pytest.mark.parametrize(**get_examples('examples', 'pyroute2-cli')) def test_cli_examples(example, pytester, context): with example.open('r') as text: result = pytester.run('pyroute2-cli', stdin=text) assert result.ret == 0 @pytest.mark.parametrize(**get_examples('examples', 'ndb')) def test_ndb_examples(example, pytester, context): argv = [] with example.open('r') as text: for line in text.readlines(): line = line.strip() if line == ':notest:': pytest.skip() elif line.startswith(':test:argv:'): argv.append(line.split(':')[-1]) elif line.startswith(':test:environ:'): key, value = line.split(':')[-1].split('=') os.environ[key] = value result = pytester.run(sys.executable, example.as_posix(), *argv) assert result.ret == 0 def test_basic(tmpdir, pytester, context): pytester.makefile('.pr2', test='interfaces lo mtu') with open('test.pr2', 'r') as text: result = pytester.run("pyroute2-cli", stdin=text) assert result.ret == 0 assert result.outlines == ['65536'] pyroute2-0.7.11/tests/test_linux/test_ndb/test_fdb.py000066400000000000000000000013211455030217500226760ustar00rootroot00000000000000import pytest from pr2test.context_manager import make_test_matrix from pr2test.marks import require_root from pr2test.tools import fdb_record_exists pytestmark = [require_root()] test_matrix = make_test_matrix( targets=['local', 'netns'], tables=[None], dbs=['sqlite3/:memory:', 'postgres/pr2test'], ) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_fdb_create(context): spec = { 'ifindex': context.default_interface.index, 'lladdr': '00:11:22:33:44:55', } context.ndb.fdb.create(**spec).commit() assert fdb_record_exists(context.netns, **spec) context.ndb.fdb[spec].remove().commit() assert not fdb_record_exists(context.netns, **spec) pyroute2-0.7.11/tests/test_linux/test_ndb/test_init.py000066400000000000000000000020471455030217500231140ustar00rootroot00000000000000import uuid from socket import AF_INET, AF_INET6 import pytest from pr2test.marks import require_root from pyroute2 import NDB from pyroute2.netlink.rtnl import RTMGRP_IPV4_IFADDR, RTMGRP_LINK pytestmark = [require_root()] @pytest.mark.parametrize('kind', ('local', 'netns')) def test_netlink_groups(kind): spec = { 'target': 'localhost', 'kind': kind, 'groups': RTMGRP_LINK | RTMGRP_IPV4_IFADDR, } if kind == 'netns': spec['netns'] = str(uuid.uuid4()) with NDB(sources=[spec]) as ndb: assert 'lo' in ndb.interfaces with ndb.interfaces['lo'] as lo: lo.set(state='up') addresses4 = ndb.addresses.dump() addresses4.select_records(family=AF_INET) assert addresses4.count() > 0 addresses6 = ndb.addresses.dump() addresses6.select_records(family=AF_INET6) assert addresses6.count() == 0 routes = ndb.routes.dump() assert routes.count() == 0 neighbours = ndb.neighbours.dump() assert neighbours.count() == 0 pyroute2-0.7.11/tests/test_linux/test_ndb/test_interface_create.py000066400000000000000000000154531455030217500254410ustar00rootroot00000000000000import pytest from pr2test.context_manager import make_test_matrix, skip_if_not_supported from pr2test.marks import require_root from pr2test.tools import address_exists, interface_exists from pyroute2 import NetlinkError pytestmark = [require_root()] test_matrix = make_test_matrix( targets=['local', 'netns'], dbs=['sqlite3/:memory:', 'postgres/pr2test'] ) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_context_manager(context): ifname = context.new_ifname address = '00:11:22:36:47:58' spec = {'ifname': ifname, 'kind': 'dummy'} ifobj = context.ndb.interfaces.create(**spec) with ifobj: pass assert interface_exists(context.netns, ifname=ifname, state='down') with ifobj: ifobj['state'] = 'up' ifobj['address'] = address assert interface_exists( context.netns, ifname=ifname, address=address, state='up' ) with ifobj: ifobj.remove() assert not interface_exists(context.netns, ifname=ifname) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_fail(context): ifname = context.new_ifname kind = context.new_ifname spec = {'ifname': ifname, 'kind': kind} ifobj = context.ndb.interfaces.create(**spec) save = dict(ifobj) try: ifobj.commit() except NetlinkError as e: assert e.code == 95 # Operation not supported assert save == dict(ifobj) assert ifobj.state == 'invalid' assert not interface_exists(context.netns, ifname=ifname) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_veth_simple(context): ifname = context.new_ifname peername = context.new_ifname spec = {'ifname': ifname, 'peer': peername, 'kind': 'veth'} context.ndb.interfaces.create(**spec).commit() spec_ifl = {'ifname': ifname} spec_pl = {'ifname': peername} iflink = context.ndb.interfaces[spec_ifl]['link'] plink = context.ndb.interfaces[spec_pl]['link'] assert iflink == context.ndb.interfaces[spec_pl]['index'] assert plink == context.ndb.interfaces[spec_ifl]['index'] assert interface_exists(context.netns, ifname=ifname) assert interface_exists(context.netns, ifname=peername) context.ndb.interfaces[spec_ifl].remove().commit() assert not interface_exists(context.netns, ifname=ifname) assert not interface_exists(context.netns, ifname=peername) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_veth_spec(context): ifname = context.new_ifname peername = context.new_ifname nsname = context.new_nsname context.ndb.sources.add(netns=nsname) spec = { 'ifname': ifname, 'kind': 'veth', 'peer': { 'ifname': peername, 'address': '00:11:22:33:44:55', 'net_ns_fd': nsname, }, } context.ndb.interfaces.create(**spec).commit() context.ndb.interfaces.wait(target=nsname, ifname=peername, timeout=5) iflink = context.ndb.interfaces[{'ifname': ifname}]['link'] plink = context.ndb.interfaces[{'target': nsname, 'ifname': peername}][ 'link' ] assert iflink == ( context.ndb.interfaces[{'target': nsname, 'ifname': peername}]['index'] ) assert plink == (context.ndb.interfaces[{'ifname': ifname}]['index']) assert interface_exists(context.netns, ifname=ifname) assert interface_exists(nsname, ifname=peername) assert not interface_exists(nsname, ifname=ifname) assert not interface_exists(context.netns, ifname=peername) (context.ndb.interfaces[{'ifname': ifname}].remove().commit()) assert not interface_exists(context.netns, ifname=ifname) assert not interface_exists(nsname, ifname=ifname) assert not interface_exists(context.netns, ifname=peername) assert not interface_exists(nsname, ifname=peername) context.ndb.sources.remove(nsname) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_dummy(context): ifname = context.new_ifname spec = {'ifname': ifname, 'kind': 'dummy', 'address': '00:11:22:33:44:55'} context.ndb.interfaces.create(**spec).commit() assert interface_exists( context.netns, ifname=ifname, address='00:11:22:33:44:55' ) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_bridge(context): bridge = context.new_ifname brport = context.new_ifname spec_br = {'ifname': bridge, 'kind': 'bridge'} spec_pt = {'ifname': brport, 'kind': 'dummy'} (context.ndb.interfaces.create(**spec_br).commit()) ( context.ndb.interfaces.create(**spec_pt) .set('master', context.ndb.interfaces[spec_br]['index']) .commit() ) assert interface_exists(context.netns, ifname=bridge) assert interface_exists( context.netns, ifname=brport, master=context.ndb.interfaces[spec_br]['index'], ) @pytest.mark.parametrize('context', test_matrix, indirect=True) @skip_if_not_supported def test_vrf(context): vrf = context.new_ifname spec = {'ifname': vrf, 'kind': 'vrf'} (context.ndb.interfaces.create(**spec).set('vrf_table', 42).commit()) assert interface_exists(context.netns, ifname=vrf) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_vlan(context): host = context.new_ifname vlan = context.new_ifname spec_host = {'ifname': host, 'kind': 'dummy'} spec_vlan = {'ifname': vlan, 'kind': 'vlan'} (context.ndb.interfaces.create(**spec_host).commit()) ( context.ndb.interfaces.create(**spec_vlan) .set('link', context.ndb.interfaces[spec_host]['index']) .set('vlan_id', 101) .commit() ) assert interface_exists(context.netns, ifname=vlan) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_vxlan(context): host = context.new_ifname vxlan = context.new_ifname spec_host = {'ifname': host, 'kind': 'dummy'} spec_vxlan = {'ifname': vxlan, 'kind': 'vxlan'} (context.ndb.interfaces.create(**spec_host).commit()) ( context.ndb.interfaces.create(**spec_vxlan) .set('vxlan_link', context.ndb.interfaces[spec_host]['index']) .set('vxlan_id', 101) .set('vxlan_group', '239.1.1.1') .set('vxlan_ttl', 16) .commit() ) assert interface_exists(context.netns, ifname=vxlan) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_basic_address(context): ifaddr = context.new_ipaddr ifname = context.new_ifname spec_if = {'ifname': ifname, 'kind': 'dummy', 'state': 'up'} i = context.ndb.interfaces.create(**spec_if) i.commit() spec_ad = {'index': i['index'], 'address': ifaddr, 'prefixlen': 24} a = context.ndb.addresses.create(**spec_ad) a.commit() assert interface_exists(context.netns, ifname=ifname) assert address_exists(context.netns, ifname=ifname, address=ifaddr) pyroute2-0.7.11/tests/test_linux/test_ndb/test_interface_set.py000066400000000000000000000033721455030217500247660ustar00rootroot00000000000000import pytest from pr2test.context_manager import make_test_matrix from pr2test.marks import require_root from pr2test.tools import interface_exists pytestmark = [require_root()] tnl_matrix = make_test_matrix( targets=['local', 'netns'], types=['gre', 'ipip', 'sit'], dbs=['sqlite3/:memory:', 'postgres/pr2test'], ) def _test_tunnel_endpoints(context, state): ifname = context.new_ifname ipaddr_local1 = context.new_ipaddr ipaddr_local2 = context.new_ipaddr ipaddr_remote = context.new_ipaddr kind = context.kind ( context.ndb.interfaces.create( **{ 'ifname': ifname, 'state': state, 'kind': kind, f'{kind}_local': ipaddr_local1, f'{kind}_remote': ipaddr_remote, } ).commit() ) def match(ifname, ipaddr): return ( lambda x: x.get_nested('IFLA_LINKINFO', 'IFLA_INFO_KIND') == kind and x.get_attr('IFLA_IFNAME') == ifname and x.get_nested( 'IFLA_LINKINFO', 'IFLA_INFO_DATA', 'IFLA_%s_LOCAL' % kind.upper(), ) == ipaddr ) assert interface_exists(context.netns, match(ifname, ipaddr_local1)) ( context.ndb.interfaces[ifname] .set(f'{kind}_local', ipaddr_local2) .commit() ) assert interface_exists(context.netns, match(ifname, ipaddr_local2)) @pytest.mark.parametrize('context', tnl_matrix, indirect=True) def test_tunnel_endpoints_down(context): return _test_tunnel_endpoints(context, 'down') @pytest.mark.parametrize('context', tnl_matrix, indirect=True) def test_tunnel_endpoints_up(context): return _test_tunnel_endpoints(context, 'up') pyroute2-0.7.11/tests/test_linux/test_ndb/test_mpls.py000066400000000000000000000033721455030217500231260ustar00rootroot00000000000000from socket import AF_INET import pytest from pr2test.context_manager import make_test_matrix, skip_if_not_supported from pr2test.marks import require_root from pyroute2.common import AF_MPLS pytestmark = [require_root()] test_matrix = make_test_matrix(dbs=['sqlite3/:memory:', 'postgres/pr2test']) def get_mpls_routes(context): return len(tuple(context.ndb.routes.getmany({'family': AF_MPLS}))) @pytest.mark.parametrize('context', test_matrix, indirect=True) @skip_if_not_supported def test_via_ipv4(context): ifname = context.new_ifname ifaddr = context.new_ipaddr router = context.new_ipaddr l1 = get_mpls_routes(context) i = ( context.ndb.interfaces.create(ifname=ifname, kind='dummy', state='up') .add_ip('%s/24' % (ifaddr,)) .commit() ) rt_spec = { 'family': AF_MPLS, 'oif': i['index'], 'via': {'family': AF_INET, 'addr': router}, 'newdst': {'label': 0x20}, } rt = context.ndb.routes.create(**rt_spec).commit() l2 = get_mpls_routes(context) assert l2 > l1 rt.remove().commit() l3 = get_mpls_routes(context) assert l3 < l2 assert rt.state == 'invalid' @pytest.mark.parametrize('context', test_matrix, indirect=True) @skip_if_not_supported def test_encap_mpls(context): ifname = context.new_ifname ifaddr = context.new_ipaddr gateway = context.new_ipaddr ipnet = str(context.ipnets[1].network) ( context.ndb.interfaces.create(ifname=ifname, kind='dummy', state='up') .add_ip('%s/24' % (ifaddr,)) .commit() ) rt_spec = { 'dst': '%s/24' % ipnet, 'gateway': gateway, 'encap': {'type': 'mpls', 'labels': [20, 30]}, } (context.ndb.routes.create(**rt_spec).commit()) pyroute2-0.7.11/tests/test_linux/test_ndb/test_neighbour.py000066400000000000000000000044121455030217500241310ustar00rootroot00000000000000import pytest from pr2test.context_manager import make_test_matrix, skip_if_not_implemented from pr2test.marks import require_root from pr2test.tools import neighbour_exists pytestmark = [require_root()] test_matrix = make_test_matrix( targets=['local', 'netns'], dbs=['sqlite3/:memory:', 'postgres/pr2test'] ) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_add_neighbour_simple(context): ifname = context.new_ifname ipaddr = context.new_ipaddr neighbour = context.new_ipaddr ( context.ndb.interfaces.create(ifname=ifname, kind='dummy', state='up') .add_ip(address=ipaddr, prefixlen=24) .commit() ) ( context.ndb.neighbours.create( ifindex=context.ndb.interfaces[ifname]['index'], dst=neighbour, lladdr='00:11:22:33:44:55', ).commit() ) assert neighbour_exists( context.netns, ifindex=context.ndb.interfaces[ifname]['index'], dst=neighbour, lladdr='00:11:22:33:44:55', ) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_add_neighbour_chain(context): ifname = context.new_ifname ipaddr = context.new_ipaddr neighbour = context.new_ipaddr ( context.ndb.interfaces.create(ifname=ifname, kind='dummy', state='up') .ipaddr.create(address=ipaddr, prefixlen=24) .commit() .chain.neighbours.create(dst=neighbour, lladdr='00:11:22:33:44:55') .commit() ) assert neighbour_exists( context.netns, ifindex=context.ndb.interfaces[ifname]['index'], dst=neighbour, lladdr='00:11:22:33:44:55', ) @pytest.mark.parametrize('context', test_matrix, indirect=True) @skip_if_not_implemented def test_add_neighbour_method(context): ifname = context.new_ifname ipaddr = context.new_ipaddr neighbour = context.new_ipaddr ( context.ndb.interfaces.create(ifname=ifname, kind='dummy', state='up') .add_ip(address=ipaddr, prefixlen=24) .add_neighbour(dst=neighbour, lladdr='00:11:22:33:44:55') .commit() ) assert neighbour_exists( context.netns, ifindex=context.ndb.interfaces[ifname]['index'], dst=neighbour, lladdr='00:11:22:33:44:55', ) pyroute2-0.7.11/tests/test_linux/test_ndb/test_netns.py000066400000000000000000000103541455030217500233000ustar00rootroot00000000000000import logging import uuid import pytest from pr2test.context_manager import make_test_matrix from pr2test.marks import require_root from pr2test.tools import address_exists, interface_exists from pyroute2 import NDB, netns pytestmark = [require_root()] test_matrix = make_test_matrix(dbs=['sqlite3/:memory:', 'postgres/pr2test']) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_create_remove(context): nsname = context.new_nsname with NDB(log=(context.new_log, logging.DEBUG)) as ndb: # create a netns via ndb.netns ndb.netns.create(nsname).commit() assert nsname in netns.listnetns() # remove the netns ndb.netns[nsname].remove().commit() assert nsname not in netns.listnetns() @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_views_contain(context): nsname = context.new_nsname v0 = context.new_ifname v1 = context.new_ifname context.ndb.sources.add(netns=nsname) context.ndb.interfaces.create( **{ 'ifname': v0, 'kind': 'veth', 'peer': {'ifname': v1, 'net_ns_fd': nsname}, } ).commit() assert v0 in context.ndb.interfaces assert v1 in context.ndb.interfaces # should be fixed? assert {'ifname': v0, 'target': 'localhost'} in context.ndb.interfaces assert {'ifname': v1, 'target': nsname} in context.ndb.interfaces @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_interface_move(context): ifname = context.new_ifname ifaddr = context.new_ipaddr nsname = context.new_nsname context.ndb.sources.add(netns=nsname) # create the interface (context.ndb.interfaces.create(ifname=ifname, kind='dummy').commit()) # move it to a netns (context.ndb.interfaces[ifname].set('net_ns_fd', nsname).commit()) # setup the interface only when it is moved ( context.ndb.interfaces.wait(target=nsname, ifname=ifname) .set('state', 'up') .set('address', '00:11:22:33:44:55') .add_ip('%s/24' % ifaddr) .commit() ) assert interface_exists( nsname, ifname=ifname, state='up', address='00:11:22:33:44:55' ) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_source_basic(context): ifname = context.new_ifname ifaddr1 = context.new_ipaddr ifaddr2 = context.new_ipaddr ifaddr3 = context.new_ipaddr nsname = context.new_nsname context.ndb.sources.add(netns=nsname) ( context.ndb.interfaces.create( target=nsname, ifname=ifname, kind='dummy' ) .ipaddr.create(address=ifaddr1, prefixlen=24) .create(address=ifaddr2, prefixlen=24) .create(address=ifaddr3, prefixlen=24) .commit() ) with NDB( sources=[{'target': 'localhost', 'netns': nsname, 'kind': 'netns'}] ) as ndb: if_idx = ndb.interfaces[ifname]['index'] addr1_idx = ndb.addresses['%s/24' % ifaddr1]['index'] addr2_idx = ndb.addresses['%s/24' % ifaddr2]['index'] addr3_idx = ndb.addresses['%s/24' % ifaddr3]['index'] assert if_idx == addr1_idx == addr2_idx == addr3_idx @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_localhost_implicit(context): ifname = context.new_ifname ipaddr = context.new_ipaddr nsname = context.new_nsname context.ndb.sources.add(netns=nsname) context.ndb.localhost = nsname ( context.ndb.interfaces.create(ifname=ifname, kind='dummy') .add_ip(address=ipaddr, prefixlen=24) .commit() ) assert interface_exists(nsname, ifname=ifname) assert address_exists(nsname, ifname=ifname, address=ipaddr) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_localhost_explicit(context): ifname = context.new_ifname ipaddr = context.new_ipaddr nsname = context.new_nsname target = str(uuid.uuid4()) context.ndb.sources.add(netns=nsname, target=target) context.ndb.localhost = target ( context.ndb.interfaces.create(ifname=ifname, kind='dummy') .add_ip(address=ipaddr, prefixlen=24) .commit() ) assert interface_exists(nsname, ifname=ifname) assert address_exists(nsname, ifname=ifname, address=ipaddr) pyroute2-0.7.11/tests/test_linux/test_ndb/test_reports.py000066400000000000000000000131441455030217500236470ustar00rootroot00000000000000import csv import json from socket import AF_INET import pytest from pr2test.context_manager import make_test_matrix, skip_if_not_supported from pr2test.marks import require_root from pyroute2.common import basestring from pyroute2.ndb.objects import RTNL_Object from pyroute2.ndb.report import Record, RecordSet pytestmark = [require_root()] test_matrix = make_test_matrix( targets=['local', 'netns'], dbs=['sqlite3/:memory:', 'postgres/pr2test'] ) @pytest.mark.parametrize( 'view,key,item', ( ('interfaces', 'ifname', 'lo'), ('routes', 'dst', '127.0.0.0/8'), ('addresses', 'address', '127.0.0.1/8'), ), ) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_contains(context, view, key, item): context.ndb.interfaces['lo'].set('state', 'up').commit() getattr(context.ndb, view).wait(**{key: item, 'timeout': 10}) assert item in getattr(context.ndb, view) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_types(context): # check for the report type here assert isinstance(context.ndb.interfaces.summary(), RecordSet) # repr must be a string assert isinstance(repr(context.ndb.interfaces.summary()), basestring) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_iter_keys(context): for name in ('interfaces', 'addresses', 'neighbours', 'routes', 'rules'): view = getattr(context.ndb, name) for key in view: assert isinstance(key, Record) obj = view.get(key) if obj is not None: assert isinstance(obj, RTNL_Object) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_slices(context): a = list(context.ndb.rules.dump()) ln = len(a) - 1 # simple indices assert a[0] == context.ndb.rules.dump()[0] assert a[1] == context.ndb.rules.dump()[1] assert a[-1] == context.ndb.rules.dump()[-1] assert context.ndb.rules.dump()[ln] == a[-1] try: context.ndb.rules.dump()[len(a)] except IndexError: pass # slices assert a[0:] == context.ndb.rules.dump()[0:] assert a[:3] == context.ndb.rules.dump()[:3] assert a[0:3] == context.ndb.rules.dump()[0:3] assert a[1:3] == context.ndb.rules.dump()[1:3] # negative slices assert a[-3:] == context.ndb.rules.dump()[-3:] assert a[-3:-1] == context.ndb.rules.dump()[-3:-1] # mixed assert a[-ln : ln - 1] == context.ndb.rules.dump()[-ln : ln - 1] # step assert a[2:ln:2] == context.ndb.rules.dump()[2:ln:2] @pytest.mark.parametrize('context', test_matrix, indirect=True) @skip_if_not_supported def test_report_chains(context): ipnet = str(context.ipnets[1].network) ipaddr = context.new_ipaddr router = context.new_ipaddr ifname = context.new_ifname ( context.ndb.interfaces.create(ifname=ifname, kind='dummy', state='up') .add_ip(address=ipaddr, prefixlen=24) .commit() ) ( context.ndb.routes.create( dst=ipnet, dst_len=24, gateway=router, encap={'type': 'mpls', 'labels': [20, 30]}, ).commit() ) with context.ndb.routes.dump() as dump: dump.select_records(oif=context.ndb.interfaces[ifname]['index']) dump.select_records(lambda x: x.encap is not None) dump.select_fields('encap') for record in dump: encap = json.loads(record.encap) break assert isinstance(encap, list) assert encap[0]['label'] == 20 assert encap[0]['bos'] == 0 assert encap[1]['label'] == 30 assert encap[1]['bos'] == 1 @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_json(context): data = json.loads(''.join(context.ndb.interfaces.summary().format('json'))) assert isinstance(data, list) for row in data: assert isinstance(row, dict) class MD(csv.Dialect): quotechar = "'" doublequote = False quoting = csv.QUOTE_MINIMAL delimiter = "," lineterminator = "\n" @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_csv(context): record_length = 0 for record in context.ndb.routes.dump(): if record_length == 0: record_length = len(record) else: assert len(record) == record_length reader = csv.reader(context.ndb.routes.dump().format('csv'), dialect=MD()) for record in reader: assert len(record) == record_length @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_nested_ipaddr(context): ifname = context.new_ifname ipaddr1 = context.new_ipaddr ipaddr2 = context.new_ipaddr with context.ndb.interfaces.create( ifname=ifname, kind='dummy', state='up' ) as interface: interface.add_ip(address=ipaddr1, prefixlen=24) interface.add_ip(address=ipaddr2, prefixlen=24) with context.ndb.interfaces[ifname].ipaddr.dump() as dump: dump.select_records(lambda x: x.family == AF_INET) assert len(repr(dump).split('\n')) == 2 @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_nested_ports(context): ifbr0 = context.new_ifname ifbr0p0 = context.new_ifname ifbr0p1 = context.new_ifname with context.ndb.interfaces as i: i.create(ifname=ifbr0p0, kind='dummy').commit() i.create(ifname=ifbr0p1, kind='dummy').commit() ( i.create(ifname=ifbr0, kind='bridge') .add_port(ifbr0p0) .add_port(ifbr0p1) .commit() ) records = len( repr(context.ndb.interfaces[ifbr0].ports.summary()).split('\n') ) # 1 port assert records == 2 pyroute2-0.7.11/tests/test_linux/test_ndb/test_rollback.py000066400000000000000000000167071455030217500237520ustar00rootroot00000000000000import pytest from pr2test.context_manager import make_test_matrix from pr2test.marks import require_root from pr2test.tools import address_exists, interface_exists, route_exists pytestmark = [require_root()] test_matrix = make_test_matrix( targets=['local', 'netns'], dbs=['sqlite3/:memory:', 'postgres/pr2test'] ) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_create(context): ifname = context.new_ifname iface = context.ndb.interfaces.create(ifname=ifname, kind='dummy').commit() assert interface_exists(context.netns, ifname=ifname) iface.rollback() assert not interface_exists(context.netns, ifname=ifname) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_remove(context): ifname = context.new_ifname iface = context.ndb.interfaces.create(ifname=ifname, kind='dummy').commit() assert interface_exists(context.netns, ifname=ifname) iface.remove().commit() assert not interface_exists(context.netns, ifname=ifname) iface.rollback() assert interface_exists(context.netns, ifname=ifname) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_set(context): ifname = context.new_ifname ( context.ndb.interfaces.create( ifname=ifname, kind='dummy', address='00:11:22:33:44:55' ).commit() ) assert interface_exists( context.netns, ifname=ifname, address='00:11:22:33:44:55' ) ( context.ndb.interfaces[ifname] .set('address', '00:11:22:aa:aa:aa') .commit() ) assert not interface_exists( context.netns, ifname=ifname, address='00:11:22:33:44:55' ) assert interface_exists( context.netns, ifname=ifname, address='00:11:22:aa:aa:aa' ) (context.ndb.interfaces[ifname].rollback()) assert not interface_exists( context.netns, ifname=ifname, address='00:11:22:aa:aa:aa' ) assert interface_exists( context.netns, ifname=ifname, address='00:11:22:33:44:55' ) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_simple_deps(context): ifname = context.new_ifname ipaddr = context.new_ipaddr router = context.new_ipaddr dst = str(context.ipnets[1].network) # # simple dummy interface with one address and # one dependent route # ( context.ndb.interfaces.create(ifname=ifname, kind='dummy') .set('state', 'up') .add_ip(address=ipaddr, prefixlen=24) .commit() ) (context.ndb.routes.create(dst=dst, dst_len=24, gateway=router).commit()) # check everything is in place assert interface_exists(context.netns, ifname=ifname) assert address_exists(context.netns, ifname=ifname, address=ipaddr) assert route_exists(context.netns, gateway=router, dst=dst, dst_len=24) # remove the interface iface = context.ndb.interfaces[ifname].remove().commit() # check there is no interface, no route assert not interface_exists(context.netns, ifname=ifname) assert not address_exists(context.netns, ifname=ifname, address=ipaddr) assert not route_exists(context.netns, gateway=router, dst=dst, dst_len=24) # revert the changes using the implicit last_save iface.rollback() assert interface_exists(context.netns, ifname=ifname) assert address_exists(context.netns, ifname=ifname, address=ipaddr) assert route_exists(context.netns, gateway=router, dst=dst, dst_len=24) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_bridge_deps(context): if_br0 = context.new_ifname if_br0p0 = context.new_ifname if_br0p1 = context.new_ifname ifaddr1 = context.new_ipaddr ifaddr2 = context.new_ipaddr router = context.new_ipaddr dst = str(context.ipnets[1].network) with context.ndb.interfaces as i: i.create(ifname=if_br0p0, kind='dummy', state='up').commit() i.create(ifname=if_br0p1, kind='dummy', state='up').commit() ( i.create(ifname=if_br0, kind='bridge', state='up') .add_port(if_br0p0) .add_port(if_br0p1) .add_ip(address=ifaddr1, prefixlen=24) .add_ip(address=ifaddr2, prefixlen=24) .commit() ) (context.ndb.routes.create(dst=dst, dst_len=24, gateway=router).commit()) assert interface_exists(context.netns, ifname=if_br0) assert interface_exists(context.netns, ifname=if_br0p0) assert interface_exists(context.netns, ifname=if_br0p1) assert address_exists(context.netns, ifname=if_br0, address=ifaddr1) assert address_exists(context.netns, ifname=if_br0, address=ifaddr2) assert route_exists(context.netns, gateway=router, dst=dst, dst_len=24) # remove the interface iface = context.ndb.interfaces[if_br0].remove().commit() assert not interface_exists(context.netns, ifname=if_br0) assert not address_exists(context.netns, ifname=if_br0, address=ifaddr1) assert not address_exists(context.netns, ifname=if_br0, address=ifaddr2) assert not route_exists(context.netns, gateway=router, dst=dst, dst_len=24) # revert the changes using the implicit last_save iface.rollback() assert interface_exists(context.netns, ifname=if_br0) assert interface_exists(context.netns, ifname=if_br0p0) assert interface_exists(context.netns, ifname=if_br0p1) assert address_exists(context.netns, ifname=if_br0, address=ifaddr1) assert address_exists(context.netns, ifname=if_br0, address=ifaddr2) assert route_exists(context.netns, gateway=router, dst=dst, dst_len=24) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_vlan_deps(context): if_host = context.new_ifname if_vlan = context.new_ifname ifaddr1 = context.new_ipaddr ifaddr2 = context.new_ipaddr router = context.new_ipaddr dst = str(context.ipnets[1].network) ( context.ndb.interfaces.create( ifname=if_host, kind='dummy', state='up' ).commit() ) ( context.ndb.interfaces.create( ifname=if_vlan, kind='vlan', state='up', vlan_id=1001, link=if_host ) .add_ip(address=ifaddr1, prefixlen=24) .add_ip(address=ifaddr2, prefixlen=24) .commit() ) (context.ndb.routes.create(dst=dst, dst_len=24, gateway=router).commit()) # check everything is in place assert interface_exists(context.netns, ifname=if_host) assert interface_exists(context.netns, ifname=if_vlan) assert address_exists(context.netns, ifname=if_vlan, address=ifaddr1) assert address_exists(context.netns, ifname=if_vlan, address=ifaddr2) assert route_exists(context.netns, dst=dst, gateway=router) # remove the interface iface = context.ndb.interfaces[if_host].remove().commit() # check there is no interface, no route assert not interface_exists(context.netns, ifname=if_host) assert not interface_exists(context.netns, ifname=if_vlan) assert not address_exists(context.netns, ifname=if_vlan, address=ifaddr1) assert not address_exists(context.netns, ifname=if_vlan, address=ifaddr2) assert not route_exists(context.netns, dst=dst, gateway=router) # revert the changes using the implicit last_save iface.rollback() assert interface_exists(context.netns, ifname=if_host) assert interface_exists(context.netns, ifname=if_vlan) assert address_exists(context.netns, ifname=if_vlan, address=ifaddr1) assert address_exists(context.netns, ifname=if_vlan, address=ifaddr2) assert route_exists(context.netns, dst=dst, gateway=router) pyroute2-0.7.11/tests/test_linux/test_ndb/test_routes.py000066400000000000000000000362661455030217500235040ustar00rootroot00000000000000import random from functools import partial import pytest from pr2test.context_manager import make_test_matrix from pr2test.marks import require_root from pr2test.tools import address_exists, interface_exists, route_exists from pyroute2.ndb.objects.route import Metrics, MetricsStub from pyroute2.netlink.rtnl.rtmsg import IP6_RT_PRIO_USER, rtmsg pytestmark = [require_root()] test_matrix_simple = make_test_matrix( targets=['local', 'netns'], dbs=['sqlite3/:memory:', 'postgres/pr2test'] ) @pytest.mark.parametrize('context', test_matrix_simple, indirect=True) def test_table_undef(context): ipaddr1 = context.new_ip6addr ipaddr2 = context.new_ip6addr index, ifname = context.default_interface ( context.ndb.routes.create( dst=ipaddr1, dst_len=128, table=5000, oif=index ).commit() ) assert route_exists(context.netns, dst=ipaddr1, table=5000) assert not route_exists(context.netns, dst=ipaddr2, table=5000) assert context.ndb.routes[f'{ipaddr1}/128']['oif'] == index with pytest.raises(KeyError): context.ndb.routes[f'{ipaddr2}/128'] test_matrix_scopes = make_test_matrix( targets=['local', 'netns'], tables=[ (None, 0), (None, 200), (None, 253), (6001, 0), (6001, 200), (6001, 253), (None, 'universe'), (None, 'site'), (None, 'link'), (6001, 'universe'), (6001, 'site'), (6001, 'link'), ], dbs=['sqlite3/:memory:', 'postgres/pr2test'], ) @pytest.mark.parametrize('context', test_matrix_scopes, indirect=True) def test_scopes(context): ipaddr = context.new_ipaddr ifname = context.new_ifname table, scope = context.table dst = '172.24.200.142' ( context.ndb.interfaces.create(ifname=ifname, kind='dummy', state='up') .add_ip(address=ipaddr, prefixlen=24) .commit() ) spec = { 'dst': dst, 'oif': context.ndb.interfaces[ifname]['index'], 'dst_len': 32, 'scope': scope, } if table: spec['table'] = table (context.ndb.routes.create(**spec).commit()) assert interface_exists(context.netns, ifname=ifname) assert route_exists(context.netns, **spec) (context.ndb.routes[spec].remove().commit()) assert not route_exists(context.netns, **spec) test_matrix_flags = make_test_matrix( targets=['local', 'netns'], tables=[ (None, 0), (None, 4), (None, 'onlink'), (None, ['onlink']), (6001, 0), (6001, 4), (6001, 'onlink'), (6001, ['onlink']), ], dbs=['sqlite3/:memory:', 'postgres/pr2test'], ) @pytest.mark.parametrize('context', test_matrix_flags, indirect=True) def test_flags(context): ipaddr = context.new_ipaddr ifname = context.new_ifname table, flags = context.table dst = '172.24.200.142' ( context.ndb.interfaces.create(ifname=ifname, kind='dummy', state='up') .add_ip(address=ipaddr, prefixlen=24) .commit() ) spec = { 'dst': dst, 'oif': context.ndb.interfaces[ifname]['index'], 'dst_len': 32, 'flags': flags, 'gateway': context.new_ipaddr, } if table: spec['table'] = table (context.ndb.routes.create(**spec).commit()) assert interface_exists(context.netns, ifname=ifname) assert route_exists(context.netns, **spec) (context.ndb.routes[spec].remove().commit()) assert not route_exists(context.netns, **spec) test_matrix = make_test_matrix( targets=['local', 'netns'], tables=[None, 501, 5001], dbs=['sqlite3/:memory:', 'postgres/pr2test'], ) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_ipv6_default_priority(context): ifname = context.new_ifname ipaddr = context.new_ip6addr table = context.table ( context.ndb.interfaces.create(ifname=ifname, kind='dummy', state='up') .add_ip(f'{ipaddr}/64') .commit() ) dst = 'beef:feed:fade::' parameters = { 'dst': f'{dst}/112', 'oif': context.ndb.interfaces[ifname]['index'], 'priority': 0, 'table': table, } context.ndb.routes.create(**parameters).commit() assert route_exists(context.netns, dst=dst, table=table or 254) assert context.ndb.routes[parameters]['priority'] == IP6_RT_PRIO_USER @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_empty_target(context): ipaddr = context.new_ip6addr table = context.table index, ifname = context.default_interface ( context.ndb.routes.create( dst=ipaddr, dst_len=128, oif=index, table=table ).commit() ) assert route_exists(context.netns, dst=ipaddr, table=table or 254) ( context.ndb.routes[{'table': table, 'dst': f'{ipaddr}/128'}] .remove() .commit() ) assert not route_exists(context.netns, dst=ipaddr, table=table or 254) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_basic(context): ifaddr = context.new_ipaddr router = context.new_ipaddr ifname = context.new_ifname ipnet = str(context.ipnets[1].network) table = context.table ( context.ndb.interfaces.create(ifname=ifname, kind='dummy', state='up') .ipaddr.create(address=ifaddr, prefixlen=24) .commit() ) spec = {'dst_len': 24, 'dst': ipnet, 'gateway': router} if table: spec['table'] = table (context.ndb.routes.create(**spec).commit()) assert interface_exists(context.netns, ifname=ifname) assert address_exists(context.netns, ifname=ifname, address=ifaddr) assert route_exists(context.netns, dst=ipnet, table=table or 254) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_default(context): ifaddr = context.new_ipaddr router = context.new_ipaddr ifname = context.new_ifname random.seed() tnum = random.randint(500, 600) ( context.ndb.interfaces.create(ifname=ifname, kind='dummy', state='up') .add_ip('%s/24' % ifaddr) .commit() ) spec = {'dst': 'default', 'gateway': router} if context.table: table = context.table else: table = tnum spec['table'] = table (context.ndb.routes.create(**spec).commit()) assert address_exists(context.netns, ifname=ifname, address=ifaddr) assert route_exists(context.netns, gateway=router, table=table) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_spec(context): ipaddr = context.new_ipaddr router = context.new_ipaddr ifname = context.new_ifname net = str(context.ipnets[1].network) table = context.table or 24000 ( context.ndb.interfaces.create(ifname=ifname, kind='dummy', state='up') .add_ip('%s/24' % ipaddr) .commit() ) ( context.ndb.routes.create( table=table, dst='default', gateway=router ).commit() ) (context.ndb.routes.create(dst=net, dst_len=24, gateway=router).commit()) assert route_exists(context.netns, gateway=router, table=table) assert context.ndb.routes['default'] # !!! the system must have this assert context.ndb.routes[{'dst': 'default', 'table': table}] assert context.ndb.routes['%s/24' % net] assert context.ndb.routes[{'dst': net, 'dst_len': 24}] @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_multipath_ipv4(context): ifname = context.new_ifname ifaddr = context.new_ipaddr hop1 = context.new_ipaddr hop2 = context.new_ipaddr ipnet = str(context.ipnets[1].network) ( context.ndb.interfaces.create(ifname=ifname, kind='dummy', state='up') .ipaddr.create(address=ifaddr, prefixlen=24) .commit() ) spec = { 'dst_len': 24, 'dst': ipnet, 'multipath': [{'gateway': hop1}, {'gateway': hop2}], } if context.table: spec['table'] = context.table (context.ndb.routes.create(**spec).commit()) def match_multipath(msg): if msg.get_attr('RTA_DST') != ipnet: return False gws_match = set((hop1, hop2)) mp = msg.get_attr('RTA_MULTIPATH') if mp is None: return False gws_msg = set([x.get_attr('RTA_GATEWAY') for x in mp]) return gws_match == gws_msg assert address_exists(context.netns, ifname=ifname, address=ifaddr) assert route_exists(context.netns, match=match_multipath) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_update_set(context): ifaddr = context.new_ipaddr router1 = context.new_ipaddr router2 = context.new_ipaddr ifname = context.new_ifname network = str(context.ipnets[1].network) ( context.ndb.interfaces.create(ifname=ifname, kind='dummy', state='up') .ipaddr.create(address=ifaddr, prefixlen=24) .commit() ) spec = {'dst_len': 24, 'dst': network, 'gateway': router1} if context.table: spec['table'] = context.table r = context.ndb.routes.create(**spec).commit() assert address_exists(context.netns, ifname=ifname, address=ifaddr) assert route_exists(context.netns, dst=network, gateway=router1) r.set('gateway', router2).commit() assert route_exists(context.netns, dst=network, gateway=router2) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_update_replace(context): ifaddr = context.new_ipaddr router = context.new_ipaddr ifname = context.new_ifname network = str(context.ipnets[1].network) ( context.ndb.interfaces.create(ifname=ifname, kind='dummy', state='up') .ipaddr.create(address=ifaddr, prefixlen=24) .commit() ) spec = {'dst_len': 24, 'dst': network, 'priority': 10, 'gateway': router} if context.table: spec['table'] = context.table (context.ndb.routes.create(**spec).commit()) assert address_exists(context.netns, ifname=ifname, address=ifaddr) assert route_exists(context.netns, dst=network, priority=10) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_same_multipath(context): ifaddr = context.new_ipaddr gateway1 = context.new_ipaddr gateway2 = context.new_ipaddr ifname = context.new_ifname ipnet1 = str(context.ipnets[1].network) ipnet2 = str(context.ipnets[2].network) ( context.ndb.interfaces.create(ifname=ifname, kind='dummy', state='up') .add_ip({'address': ifaddr, 'prefixlen': 24}) .commit() ) # first route with these gateways ( context.ndb.routes.create( dst=ipnet1, dst_len=24, multipath=[{'gateway': gateway1}, {'gateway': gateway2}], ).commit() ) # second route with these gateways ( context.ndb.routes.create( dst=ipnet2, dst_len=24, multipath=[{'gateway': gateway1}, {'gateway': gateway2}], ).commit() ) def match_multipath(msg): if msg.get_attr('RTA_DST') != ipnet2: return False gws_match = set((gateway1, gateway2)) mp = msg.get_attr('RTA_MULTIPATH') if mp is None: return False gws_msg = set([x.get_attr('RTA_GATEWAY') for x in mp]) return gws_match == gws_msg assert address_exists(context.netns, ifname=ifname, address=ifaddr) assert route_exists(context.netns, match=match_multipath) def match_metrics(target, gateway, msg): if msg.get_attr('RTA_GATEWAY') != gateway: return False mtu = msg.get_attr('RTA_METRICS', rtmsg()).get_attr('RTAX_MTU', 0) return mtu == target @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_same_metrics(context): ifaddr = context.new_ipaddr gateway1 = context.new_ipaddr gateway2 = context.new_ipaddr ifname = context.new_ifname ipnet1 = str(context.ipnets[1].network) ipnet2 = str(context.ipnets[2].network) target = 1300 ( context.ndb.interfaces.create(ifname=ifname, kind='dummy', state='up') .add_ip({'address': ifaddr, 'prefixlen': 24}) .commit() ) # first route with these metrics ( context.ndb.routes.create( dst=ipnet1, dst_len=24, gateway=gateway1, metrics={'mtu': target} ).commit() ) # second route with these metrics ( context.ndb.routes.create( dst=ipnet2, dst_len=24, gateway=gateway2, metrics={'mtu': target} ).commit() ) # at this point it's already ok - otherwise the test # would explode on the second routes.create() # but lets double check assert address_exists(context.netns, ifname=ifname, address=ifaddr) assert route_exists( context.netns, match=partial(match_metrics, target, gateway2) ) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_metrics_set(context): index, ifname = context.default_interface ifaddr = context.new_ipaddr gateway = context.new_ipaddr ipnet = str(context.ipnets[1].network) target = 1280 with context.ndb.interfaces[ifname] as dummy: dummy.add_ip(address=ifaddr, prefixlen=24) dummy.set(state='up') route = context.ndb.routes.create(dst=ipnet, dst_len=24, gateway=gateway) route.commit() assert route_exists(context.netns, dst=ipnet, dst_len=24, gateway=gateway) with pytest.raises(KeyError): assert route['metrics']['mtu'] assert isinstance(route['metrics'], MetricsStub) route['metrics']['mtu'] = target assert isinstance(route['metrics'], Metrics) route.commit() assert route_exists( context.netns, match=partial(match_metrics, target, gateway) ) def _test_metrics_update(context, method): ifaddr = context.new_ipaddr gateway1 = context.new_ipaddr ifname = context.new_ifname ipnet = str(context.ipnets[1].network) target = 1300 ( context.ndb.interfaces.create(ifname=ifname, kind='dummy', state='up') .ipaddr.create(address=ifaddr, prefixlen=24) .commit() ) spec = { 'dst_len': 24, 'dst': ipnet, 'gateway': gateway1, 'metrics': {'mtu': target}, } if context.table: spec['table'] = context.table (context.ndb.routes.create(**spec).commit()) def match_metrics(msg): if msg.get_attr('RTA_GATEWAY') != gateway1: return False mtu = msg.get_attr('RTA_METRICS', rtmsg()).get_attr('RTAX_MTU', 0) return mtu == target assert address_exists(context.netns, ifname=ifname, address=ifaddr) assert route_exists(context.netns, match=match_metrics) target = 1500 # # referencing the route via full spec instead of a # local variable is important here for the test # purposes: thus we check if the cache is working # properly and by the spec we hit the same object # every time context.ndb.routes[spec]['metrics']['mtu'] = target getattr(context.ndb.routes[spec], method)() assert route_exists(context.netns, match=match_metrics) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_metrics_update_apply(context): return _test_metrics_update(context, 'apply') @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_metrics_update_commit(context): return _test_metrics_update(context, 'commit') pyroute2-0.7.11/tests/test_linux/test_ndb/test_rules.py000066400000000000000000000027221455030217500233030ustar00rootroot00000000000000from socket import AF_INET6 import pytest from pr2test.context_manager import make_test_matrix from pr2test.marks import require_root from pr2test.tools import rule_exists pytestmark = [require_root()] test_matrix = make_test_matrix( targets=['local', 'netns'], tables=[100, 10000], dbs=['sqlite3/:memory:', 'postgres/pr2test'], ) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_explicit_ipv6_src(context): ipnet = context.new_ip6net table = context.table spec = { 'family': AF_INET6, 'src': ipnet.network, 'src_len': ipnet.netmask, 'table': table, 'priority': 50, } context.register_rule(spec) context.ndb.rules.create(**spec).commit() assert rule_exists(context.netns, **spec) context.ndb.rules[spec].remove().commit() assert not rule_exists(context.netns, **spec) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_implicit_ipv6_src(context): ipnet = context.new_ip6net table = context.table spec = { 'src': ipnet.network, 'src_len': ipnet.netmask, 'table': table, 'priority': 50, } search_spec = spec.copy() search_spec['family'] = AF_INET6 context.register_rule(search_spec) context.ndb.rules.create(**spec).commit() assert rule_exists(context.netns, **search_spec) context.ndb.rules[spec].remove().commit() assert not rule_exists(context.netns, **search_spec) pyroute2-0.7.11/tests/test_linux/test_ndb/test_sources.py000066400000000000000000000146611455030217500236410ustar00rootroot00000000000000import pytest from pr2test.context_manager import make_test_matrix from pr2test.marks import require_root from pr2test.tools import interface_exists from utils import require_user from pyroute2 import NDB pytestmark = [require_root()] test_matrix = make_test_matrix( targets=['local', 'netns'], dbs=['sqlite3/:memory:', 'postgres/pr2test'] ) def test_multiple_sources(context): ''' NDB should work with multiple netlink sources Check that it actually works: * with multiple sources of different kind * without the default "localhost" RTNL source ''' nsname = context.new_nsname # # NB: no 'localhost' record -- important ! sources = [ {'target': 'localhost0', 'kind': 'local'}, {'target': 'localhost1', 'kind': 'netns', 'netns': nsname}, {'target': 'localhost2', 'kind': 'local'}, ] ndb = None # # check that all the view has length > 0 # that means that the sources are working with NDB(sources=sources) as ndb: assert len(list(ndb.interfaces.dump())) assert len(list(ndb.neighbours.dump())) assert len(list(ndb.addresses.dump())) assert len(list(ndb.routes.dump())) assert len(ndb.sources) == len(sources) # here NDB() gets closed # # # the `ndb` variable still references the closed # NDB() object from the code block above, check # that all the sources are closed too for source in ndb.sources: assert ndb.sources[source].nl.closed @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_source_localhost_restart(context): ''' The database must be operational after a complete restart of any source. ''' require_user('root') ifname1 = context.new_ifname ifname2 = context.new_ifname ndb = context.ndb # # check that there are existing interfaces # loaded into the DB assert len(list(ndb.interfaces.dump())) # # create a dummy interface to prove the # source working (ndb.interfaces.create(ifname=ifname1, kind='dummy', state='up').commit()) # # an external check assert interface_exists(context.netns, ifname=ifname1, state='up') # # internal checks assert ifname1 in ndb.interfaces assert ndb.interfaces[ifname1]['state'] == 'up' # # now restart the source # the reason should be visible in the log ndb.sources['localhost'].restart(reason='test') # # the interface must be in the DB (after the # source restart) assert ifname1 in ndb.interfaces # # create another one ( ndb.interfaces.create( ifname=ifname2, kind='dummy', state='down' ).commit() ) # # check the interface both externally and internally assert interface_exists(context.netns, ifname=ifname2, state='down') assert ifname2 in ndb.interfaces assert ndb.interfaces[ifname2]['state'] == 'down' # # cleanup ndb.interfaces[ifname1].remove().commit() ndb.interfaces[ifname2].remove().commit() # # check assert not interface_exists(ifname=ifname1) assert not interface_exists(ifname=ifname2) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_source_netns_restart(context): ''' Netns sources should be operational after restart as well ''' require_user('root') nsname = context.new_nsname # # simple `context.new_ifname` returns ifname only for the main # netns, if we want to register the name in a netns, we should # use `context.register(netns=...)` ifname = context.register(netns=nsname) ndb = context.ndb # # add a netns source, the netns will be created automatically ndb.sources.add(netns=nsname) # # check the interfaces from the netns are loaded into the DB with ndb.interfaces.dump() as dump: dump.select_records(target=nsname) assert len(list(dump)) # # restart the DB ndb.sources[nsname].restart(reason='test') # # check the netns interfaces again with ndb.interfaces.dump() as dump: dump.select_records(target=nsname) assert len(list(dump)) # # create an interface in the netns ( ndb.interfaces.create( target=nsname, ifname=ifname, kind='dummy', state='up' ).commit() ) # # check the interface assert interface_exists(nsname, ifname=ifname) assert ( ndb.interfaces[{'target': nsname, 'ifname': ifname}]['state'] == 'up' ) # # netns will be remove automatically by the fixture as well # as interfaces inside the netns def count_interfaces(ndb, target): return ( ndb.task_manager.db_fetchone( ''' SELECT count(*) FROM interfaces WHERE f_target = '%s' AND f_index != 0 ''' % target ) )[0] @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_disconnect_localhost(context): ''' Disconnecting the `localhost` source should not break the DB ''' require_user('root') nsname = context.new_nsname localhost_ifnum = 0 nsname_ifnum = 0 total_ifnum = 0 # # attach the NetNS source context.ndb.sources.add(netns=nsname) # # lock the DB total_ifnum = context.ndb.interfaces.dump().count() with context.ndb.interfaces.dump() as dump: dump.select_records(target='localhost') localhost_ifnum = len(tuple(dump)) with context.ndb.interfaces.dump() as dump: dump.select_records(target=nsname) nsname_ifnum = len(tuple(dump)) assert localhost_ifnum == count_interfaces(context.ndb, 'localhost') assert nsname_ifnum == count_interfaces(context.ndb, nsname) assert 0 < count_interfaces(context.ndb, 'localhost') < total_ifnum assert 0 < count_interfaces(context.ndb, nsname) < total_ifnum context.ndb.sources.remove('localhost') # # the number of 'localhost' interfaces must be 0 here with context.ndb.interfaces.dump() as dump: dump.select_records(target='localhost') assert len(list(dump)) == 0 assert count_interfaces(context.ndb, 'localhost') == 0 # # the number of `nsname` interfaces must remain the same as before with context.ndb.interfaces.dump() as dump: dump.select_records(target=nsname) s = len(list(dump)) assert s > 0 assert count_interfaces(context.ndb, nsname) == s pyroute2-0.7.11/tests/test_linux/test_ndb/test_syntax.py000066400000000000000000000100211455030217500234660ustar00rootroot00000000000000from pr2test.marks import require_root from pr2test.tools import address_exists, interface_exists, route_exists pytestmark = [require_root()] def _do_test_cm_interface_create(context): ifname = context.new_ifname with context.ndb.interfaces.create( ifname=ifname, kind='dummy', state='down' ): pass assert interface_exists(context.netns, ifname=ifname, state='down') return ifname def test_cm_interface_create(context): ''' Create an interface using context manager syntax ''' _do_test_cm_interface_create(context) def test_cm_address_create(context): ''' Create an address using context manager syntax ''' ifname = _do_test_cm_interface_create(context) ipaddr = context.new_ipaddr with context.ndb.addresses.create( index=context.ndb.interfaces[ifname]['index'], address=ipaddr, prefixlen=24, ): pass assert address_exists(context.netns, ifname=ifname, address=ipaddr) def test_cm_interface_change_assign(context): ''' :: with interface as i: i['state'] = 'up' ''' ifname = _do_test_cm_interface_create(context) with context.ndb.interfaces[ifname] as i: i['state'] = 'up' assert interface_exists(context.netns, ifname=ifname, state='up') def test_cm_interface_change_set_argv(context): ''' :: with interface as i: i.set('state', 'up') ''' ifname = _do_test_cm_interface_create(context) with context.ndb.interfaces[ifname] as i: i.set('state', 'up') assert interface_exists(context.netns, ifname=ifname, state='up') def test_cm_interface_change_set_kwarg(context): ''' :: with interface as i: i.set(state='up') ''' ifname = _do_test_cm_interface_create(context) with context.ndb.interfaces[ifname] as i: i.set(state='up') assert interface_exists(context.netns, ifname=ifname, state='up') def test_routes_spec_dst_len(context): ipaddr = context.new_ipaddr gateway = context.new_ipaddr ifname = context.new_ifname ipnet = str(context.ipnets[1].network) table = 24000 ( context.ndb.interfaces.create(ifname=ifname, kind='dummy', state='up') .add_ip(address=ipaddr, prefixlen=24) .commit() ) ( context.ndb.routes.create( dst=ipnet, dst_len=24, gateway=gateway, table=table ).commit() ) assert route_exists(context.netns, dst=ipnet, table=table) r1 = context.ndb.routes.get('%s/24' % ipnet) r2 = context.ndb.routes.get({'dst': '%s/24' % ipnet}) r3 = context.ndb.routes.get({'dst': ipnet, 'dst_len': 24}) r4 = context.ndb.routes['%s/24' % ipnet] r5 = context.ndb.routes[{'dst': '%s/24' % ipnet}] r6 = context.ndb.routes[{'dst': ipnet, 'dst_len': 24}] assert r1 == r2 == r3 == r4 == r5 == r6 def test_string_key_in_interfaces(context): ifname = context.new_ifname address = '00:11:22:33:44:55' f_ifname = context.new_ifname f_address = '00:11:22:33:66:66' ( context.ndb.interfaces.create( ifname=ifname, kind='dummy', state='up', address=address ).commit() ) assert ifname in context.ndb.interfaces assert address in context.ndb.interfaces assert f_ifname not in context.ndb.interfaces assert f_address not in context.ndb.interfaces def test_string_key_in_addresses(context): ifname = context.new_ifname ipaddr = context.new_ipaddr f_ipaddr = context.new_ipaddr ( context.ndb.interfaces.create(ifname=ifname, kind='dummy', state='up') .add_ip(address=ipaddr, prefixlen=24) .commit() ) assert ipaddr in context.ndb.addresses assert '%s/%i' % (ipaddr, 24) in context.ndb.addresses assert f_ipaddr not in context.ndb.addresses assert '%s/%i' % (f_ipaddr, 24) not in context.ndb.addresses assert '%s/%i' % (ipaddr, 16) not in context.ndb.addresses assert '%s/%i' % (ipaddr, 28) not in context.ndb.addresses assert '%s/%i' % (ipaddr, 32) not in context.ndb.addresses pyroute2-0.7.11/tests/test_linux/test_ndb/test_transaction.py000066400000000000000000000132501455030217500244740ustar00rootroot00000000000000import uuid import pytest from pr2test.context_manager import make_test_matrix from pr2test.marks import require_root from pr2test.tools import address_exists, interface_exists from pyroute2.ndb.transaction import ( CheckProcess, CheckProcessException, Not, PingAddress, ) pytestmark = [require_root()] def test_check_process_basic(): test = CheckProcess('true') assert test.return_code is None test.commit() test.rollback() assert test.return_code == 0 def test_check_process_fail(): test = CheckProcess('false') assert test.return_code is None with pytest.raises(CheckProcessException): test.commit() assert test.return_code == 1 def test_check_process_file_not_found(): test = CheckProcess(str(uuid.uuid4())) assert test.return_code is None with pytest.raises(FileNotFoundError): test.commit() assert test.return_code is None def test_check_process_timeout(): test = CheckProcess('sleep 10', timeout=1) with pytest.raises(CheckProcessException): test.commit() @pytest.mark.parametrize('command', (None, '', -1, ['s1', 's2'], True)) def test_check_process_wrong_command(command): with pytest.raises(TypeError): CheckProcess(command) def test_negation(): test = CheckProcess('false') with pytest.raises(CheckProcessException): test.commit() Not(test).commit() def test_ping_ok(): test = PingAddress('127.0.0.1') test.commit() def test_ping_unreachable(): test = PingAddress('128.0.0.1') with pytest.raises(CheckProcessException): test.commit() def test_ping_unknown(): test = PingAddress(str(uuid.uuid4())) with pytest.raises(CheckProcessException): test.commit() test_matrix = make_test_matrix( targets=['local', 'netns'], dbs=['sqlite3/:memory:', 'postgres/pr2test'] ) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_multiple_interfaces(context): ifname1 = context.new_ifname ifname2 = context.new_ifname ipaddr1 = context.new_ipaddr ipaddr2 = context.new_ipaddr ( context.ndb.begin() .push( context.ndb.interfaces.create(ifname=ifname1, kind='dummy') .set(state='up') .set(address='00:11:22:aa:aa:aa') .add_ip(address=ipaddr1, prefixlen=24), context.ndb.interfaces.create(ifname=ifname2, kind='dummy') .set(state='up') .set(address='00:11:22:bb:bb:bb') .add_ip(address=ipaddr2, prefixlen=24), ) .commit() ) assert interface_exists( context.netns, ifname=ifname1, address='00:11:22:aa:aa:aa' ) assert interface_exists( context.netns, ifname=ifname2, address='00:11:22:bb:bb:bb' ) assert address_exists(context.netns, ifname=ifname1, address=ipaddr1) assert address_exists(context.netns, ifname=ifname2, address=ipaddr2) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_check_context_manager(context): ifname1 = context.new_ifname ifname2 = context.new_ifname with context.ndb.begin() as ctx: ctx.push(context.ndb.interfaces.create(ifname=ifname1, kind='dummy')) ctx.push(context.ndb.interfaces.create(ifname=ifname2, kind='dummy')) assert interface_exists(context.netns, ifname=ifname1) assert interface_exists(context.netns, ifname=ifname2) def test_interfaces_ping(context): ifname1 = context.new_ifname ifname2 = context.new_ifname ipaddr1 = context.new_ipaddr ipaddr2 = context.new_ipaddr with pytest.raises(CheckProcessException): PingAddress(ipaddr1).commit() with pytest.raises(CheckProcessException): PingAddress(ipaddr2).commit() ( context.ndb.begin() .push( context.ndb.interfaces.create(ifname=ifname1, kind='dummy') .set(state='up') .add_ip(address=ipaddr1, prefixlen=24), context.ndb.interfaces.create(ifname=ifname2, kind='dummy') .set(state='up') .add_ip(address=ipaddr2, prefixlen=24), PingAddress(ipaddr1, log=context.ndb.log.channel('util')), PingAddress(ipaddr2, log=context.ndb.log.channel('util')), ) .commit() ) assert interface_exists(ifname=ifname1) assert interface_exists(ifname=ifname2) assert address_exists(ifname=ifname1, address=ipaddr1) assert address_exists(ifname=ifname2, address=ipaddr2) def test_interfaces_ping_fail(context): ifname1 = context.new_ifname ifname2 = context.new_ifname ipaddr1 = context.new_ipaddr ipaddr2 = context.new_ipaddr with pytest.raises(CheckProcessException): PingAddress(ipaddr1).commit() with pytest.raises(CheckProcessException): PingAddress(ipaddr2).commit() ( context.ndb.begin() .push( context.ndb.interfaces.create( ifname=ifname1, kind='dummy', state='up' ), context.ndb.interfaces.create( ifname=ifname2, kind='dummy', state='up' ), ) .commit() ) assert interface_exists(ifname=ifname1) assert interface_exists(ifname=ifname2) assert not address_exists(address=ipaddr1) assert not address_exists(address=ipaddr2) with pytest.raises(CheckProcessException): ( context.ndb.begin() .push( context.ndb.interfaces[ifname1].add_ip( address=ipaddr1, prefixlen=24 ), PingAddress(ipaddr2, log=context.ndb.log.channel('util')), ) .commit() ) assert not address_exists(address=ipaddr1) assert not address_exists(address=ipaddr2) pyroute2-0.7.11/tests/test_linux/test_ndb/test_views.py000066400000000000000000000116751455030217500233150ustar00rootroot00000000000000import time from functools import partial import pytest from pr2test.context_manager import make_test_matrix from pr2test.marks import require_root from pr2test.tools import interface_exists from utils import require_user from pyroute2 import config pytestmark = [require_root()] test_matrix = make_test_matrix( targets=['local', 'netns'], dbs=['sqlite3/:memory:', 'postgres/pr2test'] ) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_view_cache(context): ''' NDB stores all the info in an SQL database, and instantiates python objects only upon request, since it isn't cheap. References to the created objects are stored in the object cache until expired. This test checks is the cache works as expected. Initially there should be no references in the cache, check if the references are properly cached and expired in time. ''' require_user('root') ifname1 = context.new_ifname ifname2 = context.new_ifname ndb = context.ndb # # the cache is empty from the beginning assert len(list(ndb.interfaces.cache)) == 0 # # create test interfaces ndb.interfaces.create(ifname=ifname1, kind='dummy').commit() ndb.interfaces.create(ifname=ifname2, kind='dummy').commit() assert interface_exists(context.netns, ifname=ifname1) assert interface_exists(context.netns, ifname=ifname2) # # the interface object must not be cached, as they # weren't referenced yet assert len(list(ndb.interfaces.cache)) == 0 # # setup the cache expiration time ce = config.cache_expire # save the old value config.cache_expire = 1 # set the new one # # access the interfaces via __getitem__() -- this must # create objects and cache the references assert ndb.interfaces[ifname1] is not None assert ndb.interfaces[ifname2] is not None # # both references must be in the cache now assert len(list(ndb.interfaces.cache)) == 2 # # expire the cache time.sleep(1) # # access the second interface to trigger the # cache invalidation assert ndb.interfaces[ifname2] is not None # # ifname1 must be out of the cache now as not # accessed within the timeout # # only ifname2 must remain assert len(list(ndb.interfaces.cache)) == 1 assert list(ndb.interfaces.cache.items())[0][1]['ifname'] == ifname2 # # restore the environment config.cache_expire = ce ndb.interfaces[ifname1].remove().commit() ndb.interfaces[ifname2].remove().commit() # # check that the interfaces are cleaned up from the system assert not interface_exists(context.netns, ifname=ifname1) assert not interface_exists(context.netns, ifname=ifname2) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_readonly(context): readonly = context.ndb.readonly() with pytest.raises(PermissionError): readonly.interfaces.create(ifname='test', kind='dummy') with readonly.interfaces.summary() as summary: summary.select_records(ifname='lo') selection = list(summary) assert len(selection) == 1 assert selection[0].ifname == 'lo' assert selection[0].address == '00:00:00:00:00:00' assert selection[0].target == 'localhost' @pytest.mark.parametrize('method', ('dump', 'summary')) @pytest.mark.parametrize( 'view,sub,func', ( ('routes', 'routes', lambda index, x: x.oif == index), ('addresses', 'ipaddr', lambda index, x: x.index == index), ('interfaces', 'ports', lambda index, x: x.master == index), ('neighbours', 'neighbours', lambda index, x: x.ifindex == index), ('vlans', 'vlans', lambda index, x: x.index == index), ), ) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_nested_count(context, view, sub, func, method): br0 = context.new_ifname br0p0 = context.new_ifname br0p1 = context.new_ifname ipaddr1 = context.new_ipaddr ipaddr2 = context.new_ipaddr gateway = context.new_ipaddr net = context.new_ip4net context.ndb.interfaces.create( ifname=br0p0, kind='dummy', state='up' ).commit() context.ndb.interfaces.create( ifname=br0p1, kind='dummy', state='up' ).commit() ( context.ndb.interfaces.create(ifname=br0, kind='bridge', state='up') .add_port(br0p0) .add_port(br0p1) .add_ip(f'{ipaddr1}/24') .add_ip(f'{ipaddr2}/24') .commit() ) context.ndb.routes.create( dst=net.network, dst_len=net.netmask, gateway=gateway ).commit() with getattr(context.ndb, view).dump() as records_a: records_a.select_records( partial(func, context.ndb.interfaces[br0]['index']) ) records_b = getattr(getattr(context.ndb.interfaces[br0], sub), method)() count = getattr(context.ndb.interfaces[br0], sub).count() assert records_b.count() == records_a.count() == count assert count < getattr(context.ndb, view).count() or count == 0 pyroute2-0.7.11/tests/test_linux/test_netns/000077500000000000000000000000001455030217500211215ustar00rootroot00000000000000pyroute2-0.7.11/tests/test_linux/test_netns/test_nspopen.py000066400000000000000000000041661455030217500242230ustar00rootroot00000000000000import fcntl import os import subprocess import pytest from pr2test.marks import require_root from pyroute2 import NSPopen pytestmark = [require_root()] def test_basic(context): nsid = context.new_nsname # create NS and run a child nsp = NSPopen( nsid, ['ip', '-o', 'link'], stdout=subprocess.PIPE, flags=os.O_CREAT ) ret = nsp.communicate()[0].decode('utf-8') host_links = [x.ifname for x in context.ndb.interfaces] netns_links = [ x.split(':')[1].split('@')[0].strip() for x in ret.split('\n') if len(x) ] assert nsp.wait() == nsp.returncode == 0 assert set(host_links) & set(netns_links) == set(netns_links) assert set(netns_links) < set(host_links) assert not set(netns_links) > set(host_links) nsp.release() def test_release(context): nsid = context.new_nsname nsp = NSPopen(nsid, ['true'], flags=os.O_CREAT, stdout=subprocess.PIPE) nsp.communicate() nsp.wait() nsp.release() with pytest.raises(RuntimeError): assert nsp.returncode def test_stdio(context): nsid = context.new_nsname nsp = NSPopen(nsid, ['ip', 'ad'], flags=os.O_CREAT, stdout=subprocess.PIPE) output = nsp.stdout.read() nsp.release() assert output is not None def test_fcntl(context): nsid = context.new_nsname nsp = NSPopen(nsid, ['ip', 'ad'], flags=os.O_CREAT, stdout=subprocess.PIPE) flags = nsp.stdout.fcntl(fcntl.F_GETFL) nsp.release() assert flags == 0 def test_api_class(context): api_nspopen = set(dir(NSPopen)) api_popen = set(dir(subprocess.Popen)) assert api_nspopen & api_popen == api_popen def test_api_object(context): nsid = context.new_nsname nsp = NSPopen(nsid, ['true'], flags=os.O_CREAT, stdout=subprocess.PIPE) smp = subprocess.Popen(['true'], stdout=subprocess.PIPE) nsp.communicate() smp.communicate() api_nspopen = set(dir(nsp)) api_popen = set(dir(smp)) minimal = set(('communicate', 'kill', 'wait')) assert minimal & (api_nspopen & api_popen) == minimal smp.wait() nsp.wait() assert nsp.returncode == smp.returncode == 0 nsp.release() pyroute2-0.7.11/tests/test_linux/test_nlmsg/000077500000000000000000000000001455030217500211125ustar00rootroot00000000000000pyroute2-0.7.11/tests/test_linux/test_nlmsg/test_nlmsg.py000066400000000000000000000027271455030217500236530ustar00rootroot00000000000000from pr2test.marks import require_root pytestmark = [require_root()] def test_nlmsg_operators(context): ifname = context.new_ifname ipaddr1 = context.new_ipaddr ipaddr2 = context.new_ipaddr interface = ( context.ndb.interfaces.create(ifname=ifname, kind='dummy', state='up') .add_ip(f'{ipaddr1}/24') .add_ip(f'{ipaddr2}/24') .commit() ) r = tuple(context.ipr.addr('dump', index=interface['index'])) complement = r[0] - r[1] intersection = r[0] & r[1] assert complement.get_attr('IFA_ADDRESS') == ipaddr1 assert complement.get_attr('IFA_LABEL') is None assert 'prefixlen' not in complement assert 'index' not in complement assert intersection.get_attr('IFA_ADDRESS') is None assert intersection.get_attr('IFA_LABEL') == ifname assert intersection['prefixlen'] == 24 assert intersection['index'] == context.ndb.interfaces[ifname]['index'] def test_nlmsg_compare_equal(context): lvalue = tuple(context.ipr.get_links())[0] rvalue = tuple(context.ipr.get_links())[0] assert lvalue is not rvalue assert lvalue == rvalue def test_nlmsg_compare_not_equal(context): lvalue = tuple(context.ipr.get_links())[0] rvalue = tuple(context.ipr.get_links())[1] assert lvalue is not rvalue assert lvalue != rvalue def test_nlmsg_compare_int(context): lvalue = tuple(context.ipr.get_links())[0] rvalue = 42 assert lvalue is not rvalue assert lvalue != rvalue pyroute2-0.7.11/tests/test_linux/test_raw/000077500000000000000000000000001455030217500205635ustar00rootroot00000000000000pyroute2-0.7.11/tests/test_linux/test_raw/test_dhcp.py000066400000000000000000000035061455030217500231160ustar00rootroot00000000000000import collections import json import subprocess import pytest from pr2test.marks import require_root from pyroute2 import NDB from pyroute2.common import dqn2int, hexdump, hexload from pyroute2.dhcp import client pytestmark = [require_root()] @pytest.fixture def ctx(): ndb = NDB() index = 0 ifname = '' # get a DHCP default route, if exists with ndb.routes.dump() as dump: dump.select_records(proto=16, dst='') for route in dump: index = route.oif ifname = ndb.interfaces[index]['ifname'] break yield collections.namedtuple('Context', ['ndb', 'index', 'ifname'])( ndb, index, ifname ) ndb.close() def _do_test_client_module(ctx): if ctx.index == 0: pytest.skip('no DHCP interfaces detected') response = client.action(ctx.ifname) options = response['options'] router = response['options']['router'][0] prefixlen = dqn2int(response['options']['subnet_mask']) address = response['yiaddr'] l2addr = response['chaddr'] # convert addresses like 96:0:1:45:fa:6c into 96:00:01:45:fa:6c assert ( hexdump(hexload(l2addr)) == ctx.ndb.interfaces[ctx.ifname]['address'] ) assert router == ctx.ndb.routes['default']['gateway'] assert { 'address': address, 'prefixlen': prefixlen, 'index': ctx.index, } in ctx.ndb.addresses assert options['lease_time'] > 0 return response def test_client_module(ctx): _do_test_client_module(ctx) def test_client_console(ctx): response_from_module = json.loads(json.dumps(_do_test_client_module(ctx))) client = subprocess.run( ['pyroute2-dhcp-client', ctx.ifname], stdout=subprocess.PIPE ) response_from_console = json.loads(client.stdout) assert response_from_module == response_from_console pyroute2-0.7.11/tests/test_linux/test_remote.py000066400000000000000000000006021455030217500216350ustar00rootroot00000000000000from pr2test.context_manager import skip_if_not_supported from pyroute2 import IPRoute, RemoteIPRoute @skip_if_not_supported def test_links(): with IPRoute() as ipr: links1 = set([x.get_attr('IFLA_IFNAME') for x in ipr.get_links()]) with RemoteIPRoute() as ipr: links2 = set([x.get_attr('IFLA_IFNAME') for x in ipr.get_links()]) assert links1 == links2 pyroute2-0.7.11/tests/test_linux/test_tc/000077500000000000000000000000001455030217500204005ustar00rootroot00000000000000pyroute2-0.7.11/tests/test_linux/test_tc/__init__.py000066400000000000000000000000001455030217500224770ustar00rootroot00000000000000pyroute2-0.7.11/tests/test_linux/test_tc/test_actions.py000066400000000000000000000064161455030217500234600ustar00rootroot00000000000000import pytest from pr2test.context_manager import make_test_matrix, skip_if_not_supported from pr2test.marks import require_root from pyroute2 import protocols pytestmark = [require_root()] test_matrix = make_test_matrix(targets=['local', 'netns']) def find_action(context, prio=1, filter_name="u32"): """Returns the first action with priority `prio` under the filter with `filter_name` """ # Fetch filters filts = context.ipr.get_filters(index=context.default_interface.index) # Find our action for i in filts: try: act = ( i.get_attr('TCA_OPTIONS') .get_attr('TCA_%s_ACT' % filter_name.upper()) .get_attr('TCA_ACT_PRIO_%d' % prio) ) assert act return act except AttributeError: continue raise FileNotFoundError('Action not found') @pytest.mark.parametrize('context', test_matrix, indirect=True) @skip_if_not_supported def test_mirred(context): index, ifname = context.default_interface # add a htb root context.ipr.tc('add', 'htb', index=index, handle='1:', default='20:0') # mirred action actions = [ dict( kind='mirred', direction='egress', action='mirror', ifindex=index ), dict( kind='mirred', direction='egress', action='redirect', ifindex=index ), ] # create a filter with this action context.ipr.tc( 'add-filter', 'u32', index=index, handle='0:0', parent='1:0', prio=10, protocol=protocols.ETH_P_IP, target='1:10', keys=['0x0006/0x00ff+8'], action=actions, ) # Check that we have two mirred actions with the right parameters act = find_action(context, 1) assert act.get_attr('TCA_ACT_KIND') == 'mirred' parms = act.get_attr('TCA_ACT_OPTIONS').get_attr('TCA_MIRRED_PARMS') assert parms['eaction'] == 2 # egress mirror, see act_mirred.py assert parms['ifindex'] == index assert parms['action'] == 3 # TC_ACT_PIPE because action == mirror act = find_action(context, 2) assert act.get_attr('TCA_ACT_KIND') == 'mirred' parms = act.get_attr('TCA_ACT_OPTIONS').get_attr('TCA_MIRRED_PARMS') assert parms['eaction'] == 1 # egress redirect, see act_mirred.py assert parms['ifindex'] == index assert parms['action'] == 4 # TC_ACT_STOLEN because action == redirect @pytest.mark.parametrize('context', test_matrix, indirect=True) @skip_if_not_supported def test_connmark(context): index, ifname = context.default_interface # add a htb root context.ipr.tc('add', 'htb', index=index, handle='1:', default='20:0') # connmark action action = {'kind': 'connmark', 'zone': 63} # create a filter with this action context.ipr.tc( 'add-filter', 'u32', index=index, handle='0:0', parent='1:0', prio=10, protocol=protocols.ETH_P_IP, target='1:10', keys=['0x0006/0x00ff+8'], action=action, ) act = find_action(context) # Check that it is a connmark action with the right parameters assert act.get_attr('TCA_ACT_KIND') == 'connmark' parms = act.get_attr('TCA_ACT_OPTIONS').get_attr('TCA_CONNMARK_PARMS') assert parms['zone'] == 63 pyroute2-0.7.11/tests/test_linux/test_tc/test_basic.py000066400000000000000000000061711455030217500230770ustar00rootroot00000000000000import errno import pytest from pr2test.context_manager import make_test_matrix from pr2test.marks import require_root from pr2test.tools import qdisc_exists from pyroute2 import NetlinkError pytestmark = [require_root()] test_matrix = make_test_matrix(targets=['local', 'netns']) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_pfifo(context): index, ifname = context.default_interface context.ipr.tc('add', 'pfifo', index=index, limit=700) assert qdisc_exists(context.netns, 'pfifo', ifname=ifname, limit=700) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_pfifo_fast(context): index, ifname = context.default_interface context.ipr.tc('add', 'pfifo_fast', index=index, handle=0) ret = qdisc_exists(context.netns, 'pfifo_fast', ifname=ifname)[0] assert ret.get_attr('TCA_OPTIONS')['priomap'] @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_plug(context): index, ifname = context.default_interface context.ipr.tc('add', 'plug', index=index, limit=13107) assert qdisc_exists(context.netns, 'plug', ifname=ifname) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_blackhole(context): index, ifname = context.default_interface context.ipr.tc('add', 'blackhole', index=index) assert qdisc_exists(context.netns, 'blackhole', ifname=ifname) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_codel(context): index, ifname = context.default_interface context.ipr.tc( 'add', 'codel', index=index, handle='1:0', cdl_interval='40ms', cdl_target='2ms', cdl_limit=5000, cdl_ecn=1, ) assert qdisc_exists( context.netns, 'codel', ifname=ifname, codel_ecn=1, codel_limit=5000 ) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_sfq(context): index, ifname = context.default_interface context.ipr.tc('add', 'sfq', index=index, handle=0, perturb=10) assert qdisc_exists(context.netns, 'sfq', ifname=ifname, perturb_period=10) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_tbf(context): index, ifname = context.default_interface context.ipr.tc( 'add', 'tbf', index=index, handle=0, rate='220kbit', latency='50ms', burst=1540, ) opts = qdisc_exists(context.netns, 'tbf', ifname=ifname)[0].get_nested( 'TCA_OPTIONS', 'TCA_TBF_PARMS' ) assert opts['rate'] == 27500 @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_choke(context): index, ifname = context.default_interface try: context.ipr.tc( 'add', 'choke', index=index, limit=5500, bandwith=3000, ecn=True ) except NetlinkError as e: if e.code == errno.ENOENT: pytest.skip('qdisc not supported: choke') raise opts = qdisc_exists(context.netns, 'choke', ifname=ifname)[0].get_nested( 'TCA_OPTIONS', 'TCA_CHOKE_PARMS' ) assert opts['limit'] == 5500 assert opts['qth_max'] == 1375 assert opts['qth_min'] == 458 pyroute2-0.7.11/tests/test_linux/test_tc/test_bpf.py000066400000000000000000000142451455030217500225660ustar00rootroot00000000000000import ctypes import ctypes.util import re import subprocess import pytest from pr2test.context_manager import skip_if_not_supported from pr2test.marks import require_root from pyroute2 import protocols from pyroute2.netlink.rtnl import TC_H_INGRESS pytestmark = [require_root()] def get_bpf_syscall_num(): # determine bpf syscall number prog = """ #include #define XSTR(x) STR(x) #define STR(x) #x #pragma message "__NR_bpf=" XSTR(__NR_bpf) """ cmd = ['gcc', '-x', 'c', '-c', '-', '-o', '/dev/null'] gcc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stderr=subprocess.PIPE) out = gcc.communicate(input=prog.encode('ascii'))[1] m = re.search('__NR_bpf=([0-9]+)', str(out)) if not m: pytest.skip('bpf syscall not available') return int(m.group(1)) def get_simple_bpf_program(prog_type): NR_bpf = get_bpf_syscall_num() class BPFAttr(ctypes.Structure): _fields_ = [ ('prog_type', ctypes.c_uint), ('insn_cnt', ctypes.c_uint), ('insns', ctypes.POINTER(ctypes.c_ulonglong)), ('license', ctypes.c_char_p), ('log_level', ctypes.c_uint), ('log_size', ctypes.c_uint), ('log_buf', ctypes.c_char_p), ('kern_version', ctypes.c_uint), ] BPF_PROG_TYPE_SCHED_CLS = 3 BPF_PROG_TYPE_SCHED_ACT = 4 BPF_PROG_LOAD = 5 insns = (ctypes.c_ulonglong * 2)() # equivalent to: int my_func(void *) { return 1; } insns[0] = 0x00000001000000B7 insns[1] = 0x0000000000000095 license = ctypes.c_char_p(b'GPL') if prog_type.lower() == "sched_cls": attr = BPFAttr( BPF_PROG_TYPE_SCHED_CLS, len(insns), insns, license, 0, 0, None, 0 ) elif prog_type.lower() == "sched_act": attr = BPFAttr( BPF_PROG_TYPE_SCHED_ACT, len(insns), insns, license, 0, 0, None, 0 ) libc = ctypes.CDLL(ctypes.util.find_library('c')) libc.syscall.argtypes = [ ctypes.c_long, ctypes.c_int, ctypes.POINTER(type(attr)), ctypes.c_uint, ] libc.syscall.restype = ctypes.c_int fd = libc.syscall(NR_bpf, BPF_PROG_LOAD, attr, ctypes.sizeof(attr)) return fd @pytest.fixture def bpf_cls(): fd = get_simple_bpf_program('sched_cls') if fd == -1: pytest.skip('bpf syscall error') yield fd @pytest.fixture def bpf_act(): fd = get_simple_bpf_program('sched_act') if fd == -1: pytest.skip('bpf syscall error') yield fd @pytest.fixture def ingress(context): context.ipr.tc( 'add', kind='ingress', index=context.default_interface.index, handle=0xFFFF0000, ) yield context @skip_if_not_supported def test_simple(ingress): qds = [ x for x in ingress.ipr.get_qdiscs() if x['index'] == ingress.default_interface.index ] # assert the list is not empty assert qds # assert there is the ingress queue for qd in qds: if qd.get_attr('TCA_KIND') == 'ingress': # assert it has proper handle and parent assert qd['handle'] == 0xFFFF0000 assert qd['parent'] == TC_H_INGRESS break else: raise Exception('no ingress qdisc found') @skip_if_not_supported def test_filter_sched_cls(ingress, bpf_cls): ingress.ipr.tc( 'add-filter', kind='bpf', index=ingress.default_interface.index, handle=0, fd=bpf_cls, name='my_func', parent=0xFFFF0000, action='ok', classid=1, rate='10kbit', burst=10240, mtu=2040, ) fls = ingress.ipr.get_filters( index=ingress.default_interface.index, parent=0xFFFF0000 ) # assert the supplied policer is returned to us intact plcs = [ x for x in fls if x.get_attr('TCA_OPTIONS') is not None and (x.get_attr('TCA_OPTIONS').get_attr('TCA_BPF_POLICE') is not None) ][0] options = plcs.get_attr('TCA_OPTIONS') police = options.get_attr('TCA_BPF_POLICE').get_attr('TCA_POLICE_TBF') assert police['rate'] == 1250 assert police['mtu'] == 2040 @skip_if_not_supported def test_filter_sched_act(ingress, bpf_cls, bpf_act): index, ifname = ingress.default_interface ingress.ipr.tc( 'add-filter', 'bpf', index=index, handle=0, fd=bpf_cls, name='my_func', parent=0xFFFF0000, action='ok', classid=1, ) action = {'kind': 'bpf', 'fd': bpf_act, 'name': 'my_func', 'action': 'ok'} ingress.ipr.tc( 'add-filter', 'u32', index=index, handle=1, protocol=protocols.ETH_P_ALL, parent=0xFFFF0000, target=0x10002, keys=['0x0/0x0+0'], action=action, ) fls = ingress.ipr.get_filters(index=index, parent=0xFFFF0000) assert fls bpf_filter = [ x for x in fls if x.get_attr('TCA_OPTIONS') is not None and (x.get_attr('TCA_OPTIONS').get_attr('TCA_BPF_ACT') is not None) ][0] bpf_options = bpf_filter.get_attr('TCA_OPTIONS') assert bpf_options.get_attr('TCA_BPF_NAME') == 'my_func' gact_parms = ( bpf_options.get_attr('TCA_BPF_ACT') .get_attr('TCA_ACT_PRIO_1') .get_attr('TCA_ACT_OPTIONS') .get_attr('TCA_GACT_PARMS') ) assert gact_parms['action'] == 0 @skip_if_not_supported def test_filter_delete(context, bpf_cls): context.ipr.tc('add', kind='clsact', index=context.default_interface.index) context.ipr.tc( 'add-filter', kind='bpf', index=context.default_interface.index, fd=bpf_cls, name='my_func', parent='ffff:fff2', classid=1, direct_action=True, ) filters = context.ipr.get_filters( index=context.default_interface.index, parent='ffff:fff2' ) # len == 2: handles 0 and 1 assert len(filters) == 2 context.ipr.tc( 'del-filter', kind='bpf', index=context.default_interface.index, parent='ffff:fff2', info=filters[0]['info'], ) filters = context.ipr.get_filters( index=context.default_interface.index, parent='ffff:fff2' ) assert len(filters) == 0 pyroute2-0.7.11/tests/test_linux/test_tc/test_classful.py000066400000000000000000000041461455030217500236320ustar00rootroot00000000000000import errno import pytest from pr2test.context_manager import make_test_matrix from pr2test.marks import require_root from pr2test.tools import qdisc_exists from pyroute2 import NetlinkError pytestmark = [require_root()] test_matrix = make_test_matrix(targets=['local', 'netns']) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_drr(context): index, ifname = context.default_interface try: context.ipr.tc('add', 'drr', index=index, handle='1:') except NetlinkError as e: if e.code == errno.ENOENT: pytest.skip('qdisc not supported: drr') raise context.ipr.tc('add-class', 'drr', index=index, handle='1:20', quantum=20) context.ipr.tc('add-class', 'drr', index=index, handle='1:30', quantum=30) assert qdisc_exists(context.netns, 'drr', ifname=ifname) cls = context.ipr.get_classes(index=index) assert len(cls) == 2 assert cls[0].get_attr('TCA_KIND') == 'drr' assert cls[1].get_attr('TCA_KIND') == 'drr' assert cls[0].get_attr('TCA_OPTIONS').get_attr('TCA_DRR_QUANTUM') == 20 assert cls[1].get_attr('TCA_OPTIONS').get_attr('TCA_DRR_QUANTUM') == 30 @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_hfsc(context): index, ifname = context.default_interface # root queue context.ipr.tc('add', 'hfsc', index=index, handle='1:0', default='1:1') assert qdisc_exists(context.netns, 'hfsc', ifname=ifname, defcls=1) # classes context.ipr.tc( 'add-class', 'hfsc', index=index, handle='1:1', parent='1:0', rsc={'m2': '3mbit'}, ) cls = context.ipr.get_classes(index=index) assert len(cls) == 2 # implicit root class + the defined one assert cls[0].get_attr('TCA_KIND') == 'hfsc' assert cls[1].get_attr('TCA_KIND') == 'hfsc' curve = cls[1].get_attr('TCA_OPTIONS').get_attr('TCA_HFSC_RSC') assert curve['m1'] == 0 assert curve['d'] == 0 assert curve['m2'] == 375000 assert cls[1].get_attr('TCA_OPTIONS').get_attr('TCA_HFSC_FSC') is None assert cls[1].get_attr('TCA_OPTIONS').get_attr('TCA_HFSC_USC') is None pyroute2-0.7.11/tests/test_linux/test_tc/test_htb.py000066400000000000000000000067521455030217500226000ustar00rootroot00000000000000import pytest from pr2test.context_manager import make_test_matrix, skip_if_not_supported from pr2test.marks import require_root from pr2test.tools import qdisc_exists from pyroute2 import protocols pytestmark = [require_root()] test_matrix = make_test_matrix(targets=['local', 'netns']) @pytest.mark.parametrize('context', test_matrix, indirect=True) @skip_if_not_supported def test_htb(context): index, ifname = context.default_interface # 8<----------------------------------------------------- # root queue, '1:0' handle notation context.ipr.tc('add', 'htb', index=index, handle='1:', default='20:0') assert qdisc_exists(context.netns, 'htb', ifname=ifname) # 8<----------------------------------------------------- # classes, both string and int handle notation context.ipr.tc( 'add-class', 'htb', index=index, handle='1:1', parent='1:0', rate='256kbit', burst=1024 * 6, ) context.ipr.tc( 'add-class', 'htb', index=index, handle=0x10010, parent=0x10001, rate='192kbit', burst=1024 * 6, prio=1, ) context.ipr.tc( 'add-class', 'htb', index=index, handle='1:20', parent='1:1', rate='128kbit', burst=1024 * 6, prio=2, ) cls = context.ipr.get_classes(index=index) assert len(cls) == 3 # 8<----------------------------------------------------- # leaves, both string and int handle notation context.ipr.tc( 'add', 'sfq', index=index, handle='10:', parent='1:10', perturb=10 ) context.ipr.tc( 'add', 'sfq', index=index, handle=0x200000, parent=0x10020, perturb=10 ) qds = [x for x in context.ipr.get_qdiscs() if x['index'] == index] types = set([x.get_attr('TCA_KIND') for x in qds]) assert types == set(('htb', 'sfq')) # 8<----------------------------------------------------- # filters, both string and int handle notation # # Please note, that u32 filter requires ethernet protocol # numbers, as defined in protocols module. Do not provide # here socket.AF_INET and so on. # context.ipr.tc( 'add-filter', 'u32', index=index, handle='0:0', parent='1:0', prio=10, protocol=protocols.ETH_P_IP, target='1:10', keys=['0x0006/0x00ff+8', '0x0000/0xffc0+2'], ) context.ipr.tc( 'add-filter', 'u32', index=index, handle=0, parent=0x10000, prio=10, protocol=protocols.ETH_P_IP, target=0x10020, keys=['0x5/0xf+0', '0x10/0xff+33'], ) # 2 filters + 2 autogenerated fls = context.ipr.get_filters(index=index) assert len(fls) == 4 @pytest.mark.parametrize('context', test_matrix, indirect=True) @skip_if_not_supported def test_replace(context): test_htb(context) index, ifname = context.default_interface # change class context.ipr.tc( 'replace-class', 'htb', index=index, handle=0x10010, parent=0x10001, rate='102kbit', burst=1024 * 6, prio=3, ) clss = context.ipr.get_classes(index=index) for cls in clss: if cls['handle'] == 0x10010: break else: raise Exception('target class not found') opts = cls.get_attr('TCA_OPTIONS') params = opts.get_attr('TCA_HTB_PARMS') assert params['prio'] == 3 assert params['quantum'] * 8 == 10200 pyroute2-0.7.11/tests/test_linux/test_tc/test_ingress.py000066400000000000000000000063201455030217500234640ustar00rootroot00000000000000from socket import AF_INET import pytest from pr2test.context_manager import make_test_matrix from pr2test.marks import require_root from pyroute2 import protocols from pyroute2.netlink.rtnl import TC_H_INGRESS, TC_H_ROOT pytestmark = [require_root()] test_matrix = make_test_matrix(targets=['local', 'netns']) def _do_test_simple(context): index, ifname = context.default_interface context.ipr.tc('add', 'ingress', index=index) qdisc = None for qdisc in context.ipr.get_qdiscs(index=index): if qdisc.get_attr('TCA_KIND') == 'ingress': break else: raise FileNotFoundError('qdisc not found') assert qdisc['handle'] == 0xFFFF0000 assert qdisc['parent'] == TC_H_INGRESS return (index, ifname) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_simple(context): _do_test_simple(context) @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_filter(context): index, ifname = _do_test_simple(context) context.ipr.tc( 'add-filter', 'u32', index=index, protocol=AF_INET, parent=0xFFFF0000, action='drop', target=0x1, rate='10kbit', burst=10240, limit=0, prio=50, keys=['0x0/0x0+12'], ) fls = context.ipr.get_filters(index=index, parent=0xFFFF0000) # assert there are filters assert fls # assert there is one police rule: prs = [ x for x in fls if x.get_attr('TCA_OPTIONS') is not None and ( x.get_attr('TCA_OPTIONS').get_attr('TCA_U32_POLICE') is not None or x.get_attr('TCA_OPTIONS').get_attr('TCA_U32_ACT') is not None ) ][0] # assert the police rule has specified parameters options = prs.get_attr('TCA_OPTIONS') police_u32 = options.get_attr('TCA_U32_POLICE') # on modern kernels there is no TCA_U32_POLICE under # TCA_OPTIONS, but there is TCA_U32_ACT if police_u32 is None: police_u32 = ( options.get_attr('TCA_U32_ACT') .get_attr('TCA_ACT_PRIO_0') .get_attr('TCA_ACT_OPTIONS') ) police_tbf = police_u32.get_attr('TCA_POLICE_TBF') assert police_tbf['rate'] == 1250 assert police_tbf['mtu'] == 2040 @pytest.mark.parametrize('context', test_matrix, indirect=True) def test_action_stats(context): index, ifname = _do_test_simple(context) context.ipr.tc( 'add-filter', 'u32', index=index, parent='ffff:', protocol=protocols.ETH_P_ALL, keys=['0x0/0x0+0'], target=TC_H_ROOT, action={'kind': 'gact', 'action:': 'ok'}, ) fls = context.ipr.get_filters(index=index, parent=TC_H_INGRESS) act = [ x for x in fls if x.get_attr('TCA_OPTIONS') is not None and (x.get_attr('TCA_OPTIONS').get_attr('TCA_U32_ACT') is not None) ][0] # assert we have a u32 filter with a gact action assert act.get_attr('TCA_KIND') == 'u32' gact = ( act.get_attr("TCA_OPTIONS") .get_attr("TCA_U32_ACT") .get_attr("TCA_ACT_PRIO_1") ) assert gact.get_attr('TCA_ACT_KIND') == 'gact' # assert our gact has stats assert gact.get_attr('TCA_ACT_STATS') pyroute2-0.7.11/tests/test_linux/test_wireguard/000077500000000000000000000000001455030217500217635ustar00rootroot00000000000000pyroute2-0.7.11/tests/test_linux/test_wireguard/__init__.py000066400000000000000000000000001455030217500240620ustar00rootroot00000000000000pyroute2-0.7.11/tests/test_linux/test_wireguard/test_peer.py000066400000000000000000000067751455030217500243460ustar00rootroot00000000000000from socket import AF_INET, AF_INET6 from pr2test.marks import require_root pytestmark = [require_root()] def test_peer_ipv4(context): ifname = context.new_ifname ipaddr = context.new_ipaddr port = 9999 listen = 2525 peer_ip_1 = context.new_ipaddr peer_ip_2 = context.new_ipaddr allowed_ip_1 = str(context.ipnets[1]) allowed_ip_2 = str(context.ipnets[2]) ( context.ndb.interfaces.create(ifname=ifname, kind='wireguard') .add_ip(f'{ipaddr}/24') .set('state', 'up') .commit() ) peer_1 = { 'public_key': 'TGFHcm9zc2VCaWNoZV9DJ2VzdExhUGx1c0JlbGxlPDM=', 'endpoint_addr': peer_ip_1, 'endpoint_port': port, 'persistent_keepalive': 15, 'allowed_ips': [f'{allowed_ip_1}'], } peer_2 = { 'public_key': 'AGFHcm9zc2VCaWNoZV9DJ2VzdExhUGx1c0JlbGxlPDM=', 'endpoint_addr': peer_ip_2, 'endpoint_port': port, 'persistent_keepalive': 15, 'allowed_ips': [f'{allowed_ip_2}'], } ( context.wg.set( ifname, private_key='RCdhcHJlc0JpY2hlLEplU2VyYWlzTGFQbHVzQm9ubmU=', fwmark=0x1337, listen_port=listen, peer=peer_1, ) ) ( context.wg.set( ifname, private_key='RCdhcHJlc0JpY2hlLEplU2VyYWlzTGFQbHVzQm9ubmU=', fwmark=0x1337, listen_port=listen, peer=peer_2, ) ) for peer in context.wg.info(ifname)[0].get_attr('WGDEVICE_A_PEERS'): endpoint = peer.get_attr('WGPEER_A_ENDPOINT') allowed = peer.get_attr('WGPEER_A_ALLOWEDIPS') assert endpoint['family'] == AF_INET assert endpoint['port'] == port assert endpoint['addr'] in (peer_ip_1, peer_ip_2) assert allowed[0]['addr'] in (allowed_ip_1, allowed_ip_2) def test_peer_ipv6(context): ifname = context.new_ifname ipaddr = context.new_ipaddr port = 9999 listen = 2525 peer_ip_1 = '::fa' peer_ip_2 = '::fb' allowed_ip_1 = 'fa::/64' allowed_ip_2 = 'fb::/64' ( context.ndb.interfaces.create(ifname=ifname, kind='wireguard') .add_ip(f'{ipaddr}/24') .set('state', 'up') .commit() ) peer_1 = { 'public_key': 'TGFHcm9zc2VCaWNoZV9DJ2VzdExhUGx1c0JlbGxlPDM=', 'endpoint_addr': peer_ip_1, 'endpoint_port': port, 'persistent_keepalive': 15, 'allowed_ips': [f'{allowed_ip_1}'], } peer_2 = { 'public_key': 'AGFHcm9zc2VCaWNoZV9DJ2VzdExhUGx1c0JlbGxlPDM=', 'endpoint_addr': peer_ip_2, 'endpoint_port': port, 'persistent_keepalive': 15, 'allowed_ips': [f'{allowed_ip_2}'], } ( context.wg.set( ifname, private_key='RCdhcHJlc0JpY2hlLEplU2VyYWlzTGFQbHVzQm9ubmU=', fwmark=0x1337, listen_port=listen, peer=peer_1, ) ) ( context.wg.set( ifname, private_key='RCdhcHJlc0JpY2hlLEplU2VyYWlzTGFQbHVzQm9ubmU=', fwmark=0x1337, listen_port=listen, peer=peer_2, ) ) for peer in context.wg.info(ifname)[0].get_attr('WGDEVICE_A_PEERS'): endpoint = peer.get_attr('WGPEER_A_ENDPOINT') allowed = peer.get_attr('WGPEER_A_ALLOWEDIPS') assert endpoint['family'] == AF_INET6 assert endpoint['port'] == port assert endpoint['addr'] in (peer_ip_1, peer_ip_2) assert allowed[0]['addr'] in (allowed_ip_1, allowed_ip_2) pyroute2-0.7.11/tests/test_linux/test_wiset.py000066400000000000000000000254141455030217500215050ustar00rootroot00000000000000from time import sleep from pr2test.marks import require_root from utils import require_kernel from pyroute2.netlink.exceptions import IPSetError from pyroute2.wiset import ( IPStats, WiSet, get_ipset_socket, load_all_ipsets, load_ipset, ) from pyroute2.wiset import test_ipset_exist as ipset_exists pytestmark = [require_root()] def test_create_one_ipset(ipset_name, wiset_sock): with WiSet(name=ipset_name, sock=wiset_sock) as myset: myset.create() list_wiset = load_all_ipsets(sock=wiset_sock) assert ipset_exists(ipset_name, sock=wiset_sock) myset.destroy() assert not ipset_exists(ipset_name, sock=wiset_sock) assert ipset_name in list_wiset assert ipset_name not in load_all_ipsets(sock=wiset_sock) def test_create_ipset_twice(ipset_name, wiset_sock): with WiSet(name=ipset_name, sock=wiset_sock) as myset: myset.create() try: myset.create(exclusive=True) assert False except IPSetError: pass myset.create(exclusive=False) myset.destroy() assert ipset_name not in load_all_ipsets(sock=wiset_sock) def test_check_ipset_stats(ipset_name, wiset_sock): def test_stats(myset, res=None, counters=False): myset.counters = counters myset.create() myset.add("8.8.8.8", packets=res, bytes=res) myset.update_content() stats = myset.content myset.flush() assert stats["8.8.8.8"].packets == res assert stats["8.8.8.8"].bytes == res myset.add("8.8.8.8") myset.update_content() stats = myset.content if res is not None: res = 0 assert stats["8.8.8.8"].packets == res assert stats["8.8.8.8"].bytes == res myset.destroy() with WiSet(name=ipset_name, sock=wiset_sock) as myset: test_stats(myset, res=10, counters=True) test_stats(myset) def test_ipset_with_comment(ipset_name, wiset_sock): comment = "test comment" with WiSet(name=ipset_name, sock=wiset_sock, comment=True) as myset: myset.create() myset.add("8.8.8.8", comment=comment) set_list = myset.content assert set_list["8.8.8.8"].comment == comment def test_ipset_with_skbinfo(ipset_name, wiset_sock): with WiSet(name=ipset_name, sock=wiset_sock, skbinfo=True) as myset: myset.create() myset.add("192.168.1.1", skbmark=(0xC8, 0xC8)) myset.add("192.168.1.2", skbmark=(0xC9, 0xFFFFFFFF)) myset.add("192.168.1.3", skbmark="0xca/0xca") myset.add("192.168.1.4", skbmark="0xCB") set_list = myset.content myset.destroy() assert set_list["192.168.1.1"].skbmark == "0xc8/0xc8" assert set_list["192.168.1.2"].skbmark == "0xc9" assert set_list["192.168.1.3"].skbmark == "0xca/0xca" assert set_list["192.168.1.4"].skbmark == "0xcb" def test_list_on_large_set(ipset_name, wiset_sock): set_size = 30000 base_ip = "10.10.%d.%d" with WiSet(name=ipset_name, sock=wiset_sock) as myset: myset.create() for i in range(0, set_size): myset.add(base_ip % (i / 255, i % 255)) stats_len = len(myset.content) stats_len2 = len( load_ipset(ipset_name, content=True, sock=wiset_sock).content ) assert stats_len == set_size assert stats_len2 == set_size def test_remove_entry(ipset_name, wiset_sock): ip = "1.1.1.1" with WiSet(name=ipset_name, sock=wiset_sock, counters=True) as myset: myset.create() myset.add(ip) assert ip in myset.content myset.delete(ip) myset.update_content() assert ip not in myset.content def test_flush(ipset_name, wiset_sock): ip_list = ["1.2.3.4", "1.1.1.1", "7.7.7.7"] with WiSet(name=ipset_name, sock=wiset_sock) as myset: myset.create() for ip in ip_list: myset.add(ip) assert myset.test(ip) myset.flush() for ip in ip_list: assert not myset.test(ip) def test_list_in(ipset_name, wiset_sock): ip_list_good = ["1.2.3.4", "1.1.1.1", "7.7.7.7"] ip_list_bad = ["4.4.4.4", "5.5.5.5", "6.6.6.6"] with WiSet(name=ipset_name, sock=wiset_sock) as myset: myset.create() myset.replace_entries(ip_list_good) res_test = myset.test_list(ip_list_good + ip_list_bad) for ip in ip_list_good: assert ip in res_test for ip in ip_list_bad: assert ip not in res_test def test_timeout(ipset_name, wiset_sock): ip = "1.2.3.4" timeout = 2 with WiSet(name=ipset_name, sock=wiset_sock, timeout=timeout) as myset: myset.create() myset.add(ip) sleep(3) myset.update_content() assert ip not in myset.content assert timeout == load_ipset(ipset_name, sock=wiset_sock).timeout myset.add(ip, timeout=0) myset.update_content() assert myset.content[ip].timeout == 0 def test_basic_attribute_reads(ipset_name, wiset_sock): for value in [True, False]: myset = WiSet( name=ipset_name, sock=wiset_sock, counters=value, comment=value ) if wiset_sock is None: myset.open_netlink() myset.create() from_netlink = load_ipset(ipset_name, sock=wiset_sock) assert value == from_netlink.comment assert value == from_netlink.counters myset.destroy() if wiset_sock is None: myset.close_netlink() def test_replace_content(ipset_name, wiset_sock): list_a = ["1.2.3.4", "2.3.4.5", "6.7.8.9"] list_b = ["1.1.1.1", "2.2.2.2", "3.3.3.3"] def test_replace(content_a, content_b): myset = WiSet(name=ipset_name, sock=wiset_sock) myset.create() myset.insert_list(content_a) myset.update_content() for value in content_a: assert value in myset.content myset.replace_entries(content_b) myset.update_content() for old, new in zip(content_a, content_b): assert old not in myset.content assert new in myset.content myset.destroy() test_replace(list_a, list_b) test_replace(set(list_a), set(list_b)) def test_replace_content_with_comment(ipset_name, wiset_sock): list_a = [ {'entry': "1.2.3.4", 'comment': 'foo'}, {'entry': "2.3.4.5", 'comment': 'foo'}, {'entry': "6.7.8.9", 'comment': 'bar'}, ] list_b = [ {'entry': "1.1.1.1", 'comment': 'foo'}, {'entry': "2.2.2.2", 'comment': 'bar'}, {'entry': "3.3.3.3", 'comment': 'foo'}, ] def test_replace(content_a, content_b): myset = WiSet(name=ipset_name, sock=wiset_sock, comment=True) myset.create() myset.insert_list(content_a) myset.update_content() for value in content_a: assert value['entry'] in myset.content assert value['comment'] == (myset.content[value['entry']].comment) myset.replace_entries(content_b) myset.update_content() for value in content_a: assert value['entry'] not in myset.content for value in content_b: assert value['entry'] in myset.content assert value['comment'] == (myset.content[value['entry']].comment) test_replace(list_a, list_b) def test_hash_net_ipset(ipset_name, wiset_sock): to_add = ["192.168.1.0/24", "192.168.2.0/23", "10.0.0.0/8"] atype = "hash:net" with WiSet(name=ipset_name, attr_type=atype, sock=wiset_sock) as myset: myset.create() myset.insert_list(to_add) for value in to_add: assert value in myset.content def test_two_dimensions_ipset(ipset_name, wiset_sock): to_add = ["192.168.1.0/24,eth0", "192.168.2.0/23,eth1", "10.0.0.0/8,tun0"] atype = "hash:net,iface" with WiSet(name=ipset_name, attr_type=atype, sock=wiset_sock) as myset: myset.create() myset.insert_list(to_add) for value in to_add: assert value in myset.content def test_stats_consistency(ipset_name, wiset_sock): """Test several way to fill the statistics of one IPSet""" entries = ["1.2.3.4", "1.2.3.5", "1.2.3.6"] myset = WiSet(name=ipset_name, sock=wiset_sock) myset.create() myset.insert_list(entries) myset_lists = load_all_ipsets(sock=wiset_sock, content=True)[ipset_name] for value in entries: assert value in myset_lists.content myset_list = load_ipset(ipset_name, sock=wiset_sock, content=True) for value in entries: assert value in myset_list.content def test_hashnet_with_comment(ipset_name, wiset_sock): comment = "abcdef" myset = WiSet( name=ipset_name, attr_type="hash:net", comment=True, sock=wiset_sock ) myset.create() inherit_sock = wiset_sock is not None myset = load_ipset(ipset_name, sock=wiset_sock, inherit_sock=inherit_sock) assert myset.comment myset.add("192.168.1.1", comment=comment) myset.update_content() assert myset.content["192.168.1.1/32"].comment == comment def test_add_ipstats(ipset_name, wiset_sock): data = IPStats( packets=10, bytes=1000, comment="hello world", skbmark="0x10/0x10", timeout=None, ) myset = WiSet( name=ipset_name, attr_type="hash:net", comment=True, skbinfo=True, counters=True, sock=wiset_sock, ) myset.create() myset.add("198.51.100.0/24", **data._asdict()) assert "198.51.100.0/24" in myset.content assert data == myset.content["198.51.100.0/24"] def test_revision(ipset_name, wiset_sock): myset = WiSet(name=ipset_name, attr_type="hash:net", sock=wiset_sock) myset.create() assert load_ipset(ipset_name, sock=wiset_sock).revision >= 6 def test_force_attr_revision(ipset_name, wiset_sock): sock = get_ipset_socket(attr_revision=2) myset = WiSet(name=ipset_name, attr_type="hash:net", sock=wiset_sock) myset.create() assert load_ipset(ipset_name, sock=wiset_sock).revision >= 2 sock.close() def test_physdev(ipset_name): myset = WiSet(name=ipset_name, attr_type="hash:net,iface") myset.create() myset.add("192.168.0.0/24,eth0", physdev=False) myset.add("192.168.1.0/24,eth0", physdev=True) content = myset.content assert content["192.168.0.0/24,eth0"].physdev is False assert content["192.168.1.0/24,eth0"].physdev is True def test_wildcard_entries(ipset_name): require_kernel(5, 5) myset = WiSet(name=ipset_name, attr_type="hash:net,iface") myset.create() myset.add("192.168.0.0/24,eth", wildcard=True) myset.add("192.168.1.0/24,wlan0", wildcard=False) content = myset.content assert content["192.168.0.0/24,eth"].wildcard is True assert content["192.168.1.0/24,wlan0"].wildcard is False def test_invalid_load_ipset(): assert load_ipset("ipsetdoesnotexists") is None pyroute2-0.7.11/tests/test_minimal/000077500000000000000000000000001455030217500172225ustar00rootroot00000000000000pyroute2-0.7.11/tests/test_minimal/test_iproute.py000066400000000000000000000023371455030217500223270ustar00rootroot00000000000000import getpass import pytest from pyroute2 import IPRoute from pyroute2.common import uifname from pyroute2.netlink import nlmsg pytestmark = [ pytest.mark.skipif(getpass.getuser() != 'root', reason='no root access') ] @pytest.fixture def ipr(): iproute = IPRoute() iproute.default_ifname = uifname() yield iproute index = iproute.link_lookup(ifname=iproute.default_ifname) if index: iproute.link('del', index=index) iproute.close() def test_dump(ipr): assert all([isinstance(message, nlmsg) for message in ipr.dump()]) def test_tuntap(ipr): ipr.link('add', ifname=ipr.default_ifname, kind='tuntap', mode='tun') ipr.poll( ipr.link, 'dump', timeout=5, ifname=ipr.default_ifname, kind='tun' ) def test_bridge(ipr): ipr.link('add', ifname=ipr.default_ifname, kind='bridge') interface = ipr.poll( ipr.link, 'dump', timeout=5, ifname=ipr.default_ifname, kind='bridge', br_stp_state=0, )[0] ipr.link('set', index=interface['index'], kind='bridge', br_stp_state=1) ipr.poll( ipr.link, 'dump', timeout=5, index=interface['index'], kind='bridge', br_stp_state=1, ) pyroute2-0.7.11/tests/test_neutron/000077500000000000000000000000001455030217500172665ustar00rootroot00000000000000pyroute2-0.7.11/tests/test_neutron/test_ip_lib.py000066400000000000000000000025431455030217500221410ustar00rootroot00000000000000from inspect import signature import pytest import pyroute2 from pyroute2 import netlink, netns from pyroute2.netlink import exceptions, rtnl from pyroute2.netlink.rtnl import ifinfmsg, ndmsg def parameters(func): try: return set(signature(func).parameters.keys()) except ValueError: pytest.skip('ginature check error, skip test') def test_imports(): assert parameters(pyroute2.NetNS) > set(('netns', 'flags', 'libc')) assert signature(pyroute2.IPRoute) assert issubclass(netlink.NetlinkError, Exception) assert issubclass(exceptions.NetlinkDumpInterrupted, Exception) assert netlink.NetlinkError == exceptions.NetlinkError assert netlink.nla_slot assert netlink.nla_base assert parameters(rtnl.rt_scope.get) == set(('key', 'default')) assert isinstance(rtnl.rt_proto, dict) and 'static' in rtnl.rt_proto assert parameters(netns._create) == set(('netns', 'libc', 'pid')) assert parameters(netns.remove) == set(('netns', 'libc')) assert parameters(netns.listnetns) == set(('nspath',)) assert ifinfmsg.IFF_ALLMULTI == 0x200 assert {state[1]: state[0] for state in ndmsg.states.items()} == { 0: 'none', 1: 'incomplete', 2: 'reachable', 4: 'stale', 8: 'delay', 16: 'probe', 32: 'failed', 64: 'noarp', 128: 'permanent', } pyroute2-0.7.11/tests/test_openbsd/000077500000000000000000000000001455030217500172265ustar00rootroot00000000000000pyroute2-0.7.11/tests/test_openbsd/conftest.py000066400000000000000000000005011455030217500214210ustar00rootroot00000000000000import pytest from pyroute2 import IPRoute class BasicContextManager: def __init__(self, request, tmpdir): self.ipr = IPRoute() def teardown(self): self.ipr.close() @pytest.fixture def context(request, tmpdir): ctx = BasicContextManager(request, tmpdir) yield ctx ctx.teardown() pyroute2-0.7.11/tests/test_openbsd/test_ipr/000077500000000000000000000000001455030217500210575ustar00rootroot00000000000000pyroute2-0.7.11/tests/test_openbsd/test_ipr/test_basic.py000066400000000000000000000026161455030217500235560ustar00rootroot00000000000000from pyroute2.common import get_address_family from pyroute2.netlink.rtnl import ( RTM_NEWADDR, RTM_NEWLINK, RTM_NEWNEIGH, RTM_NEWROUTE, ) def test_get_links(context): for msg in context.ipr.get_links(): assert msg['header']['target'] == 'localhost' assert msg['header']['type'] == RTM_NEWLINK # assert msg['index'] > 0 ifname = msg.get_attr('IFLA_IFNAME') assert isinstance(ifname, str) def test_get_addr(context): for msg in context.ipr.get_addr(): assert msg['header']['target'] == 'localhost' assert msg['header']['type'] == RTM_NEWADDR # addr = msg.get_attr('IFA_ADDRESS') assert isinstance(addr, str) assert msg['family'] == get_address_family(addr) assert 0 <= msg['prefixlen'] <= 128 def test_get_routes(context): for msg in context.ipr.get_routes(): assert msg['header']['target'] == 'localhost' assert msg['header']['type'] == RTM_NEWROUTE def test_get_neighbours(context): for msg in context.ipr.get_neighbours(): assert msg['header']['target'] == 'localhost' assert msg['header']['type'] == RTM_NEWNEIGH # dst = msg.get_attr('NDA_DST') lladdr = msg.get_attr('NDA_LLADDR') assert msg['family'] == get_address_family(dst) assert isinstance(lladdr, str) assert len(lladdr.split(':')) == 6 pyroute2-0.7.11/tests/test_repo/000077500000000000000000000000001455030217500165415ustar00rootroot00000000000000pyroute2-0.7.11/tests/test_repo/test_minimal.py000066400000000000000000000001731455030217500216010ustar00rootroot00000000000000import pyroute2 from pyroute2 import minimal def test_modules(): assert set(minimal.__all__) < set(pyroute2.__all__) pyroute2-0.7.11/tests/test_repo/test_noxfile.py000066400000000000000000000062611455030217500216230ustar00rootroot00000000000000import ast import collections import inspect import os import sys import nox import pytest # load nox sessions into the registry import noxfile # noqa: F401 # load all defined sessions using nox discovery nox_sessions = nox.registry.get().values() @pytest.fixture def session(request): # get closure vars, if any cvars = inspect.getclosurevars(request.param.func).nonlocals yield collections.namedtuple('Session', ('src_func', 'has_user_config'))( cvars['func'] if cvars and 'func' in cvars else request.param.func, hasattr(request.param.func, '__has_user_config__'), ) @pytest.mark.skipif( sys.version_info < (3, 8), reason='unsupported Python version' ) @pytest.mark.parametrize('session', nox_sessions, indirect=True) def test_options_call(session): # walk the AST tree for node in ast.walk(ast.parse(inspect.getsource(session.src_func))): # filter only plain function calls, no attributes if isinstance(node, ast.Call) and isinstance(node.func, ast.Name): # lookup call `options(...)` if node.func.id == 'options': # only for decorated by `add_session_config` assert session.has_user_config # check the arguments assert len(node.args) == 2 assert isinstance(node.args[0], ast.Constant) assert isinstance(node.args[1], ast.Name) assert node.args[1].id == 'config' @pytest.mark.parametrize('session', nox_sessions, indirect=True) def test_session_parameters(session): args = inspect.getfullargspec(session.src_func).args if session.has_user_config: assert args == ['session', 'config'] else: assert args == ['session'] @pytest.mark.skipif( sys.version_info < (3, 8), reason='unsupported Python version' ) @pytest.mark.parametrize('session', nox_sessions, indirect=True) def test_requirements_files(session): for node in ast.walk(ast.parse(inspect.getsource(session.src_func))): # # inspect function calls, filter direct or indirect install if isinstance(node, ast.Call): if ( isinstance(node.func, ast.Name) and node.func.id == 'setup_venv_common' ): # # inspect calls `setup_venv_common(...)` -- indirect install assert len(node.args) in (1, 2) if len(node.args) == 2: assert isinstance(node.args[1], ast.Constant) flavour = node.args[1].value else: flavour = ( inspect.signature(noxfile.setup_venv_common) .parameters['flavour'] .default ) assert os.stat(f'requirements.{flavour}.txt') elif ( isinstance(node.func, ast.Attribute) and isinstance(node.func.value, ast.Name) and node.func.attr == 'install' and node.args[0].value == '-r' ): # # inspect call `session.install('-r', ...)` -- direct install assert os.stat(node.args[1].value) pyroute2-0.7.11/tests/test_repo/test_version.py000066400000000000000000000015331455030217500216410ustar00rootroot00000000000000import io import re import pytest from setuptools._vendor import packaging @pytest.fixture def files(): context = {} for file in ('VERSION', 'CHANGELOG.rst'): with open(file, 'r') as f: obj = io.StringIO() obj.write(f.read()) obj.seek(0) context[file] = obj yield context def test_static_version_file(files): assert re.match( r'^[0-9]\.[0-9]\.[0-9]{1,2}(\.post[0-9]+|\.rc[0-9]+){0,1}$', files['VERSION'].getvalue().strip(), ) def test_changelog(files): line = '' for line in files['CHANGELOG.rst'].readlines(): if line[0] == '*': break static_version = packaging.version.parse(files['VERSION'].getvalue()) last_changelog_version = packaging.version.parse(line.split()[1]) assert static_version >= last_changelog_version pyroute2-0.7.11/tests/test_unit/000077500000000000000000000000001455030217500165535ustar00rootroot00000000000000pyroute2-0.7.11/tests/test_unit/test_addr_pool.py000066400000000000000000000040601455030217500221270ustar00rootroot00000000000000import pytest from pyroute2.common import AddrPool def test_alloc_aligned(): ap = AddrPool(minaddr=1, maxaddr=1024) for i in range(1024): ap.alloc() with pytest.raises(KeyError): ap.alloc() def test_alloc_odd(): ap = AddrPool(minaddr=1, maxaddr=1020) for i in range(1020): ap.alloc() with pytest.raises(KeyError): ap.alloc() def test_reverse(): ap = AddrPool(minaddr=1, maxaddr=1024, reverse=True) for i in range(512): assert ap.alloc() > ap.alloc() def test_free(): ap = AddrPool(minaddr=1, maxaddr=1024) f = ap.alloc() ap.free(f) def test_free_fail(): ap = AddrPool(minaddr=1, maxaddr=1024) with pytest.raises(KeyError): ap.free(0) def test_free_reverse_fail(): ap = AddrPool(minaddr=1, maxaddr=1024, reverse=True) with pytest.raises(KeyError): ap.free(0) def test_locate(): ap = AddrPool() f = ap.alloc() base1, bit1, is_allocated1 = ap.locate(f) base2, bit2, is_allocated2 = ap.locate(f + 1) assert base1 == base2 assert bit2 == bit1 + 1 assert is_allocated1 assert not is_allocated2 assert ap.allocated == 1 def test_setaddr_allocated(): ap = AddrPool() f = ap.alloc() base, bit, is_allocated = ap.locate(f + 1) assert not is_allocated assert ap.allocated == 1 ap.setaddr(f + 1, 'allocated') base, bit, is_allocated = ap.locate(f + 1) assert is_allocated assert ap.allocated == 2 ap.free(f + 1) base, bit, is_allocated = ap.locate(f + 1) assert not is_allocated assert ap.allocated == 1 def test_setaddr_free(): ap = AddrPool() f = ap.alloc() base, bit, is_allocated = ap.locate(f + 1) assert not is_allocated assert ap.allocated == 1 ap.setaddr(f + 1, 'free') base, bit, is_allocated = ap.locate(f + 1) assert not is_allocated assert ap.allocated == 1 ap.setaddr(f, 'free') base, bit, is_allocated = ap.locate(f) assert not is_allocated assert ap.allocated == 0 with pytest.raises(KeyError): ap.free(f) pyroute2-0.7.11/tests/test_unit/test_buffer.py000066400000000000000000000035441455030217500214430ustar00rootroot00000000000000import pytest from pyroute2.netlink.buffer import Buffer, Page buffer_settings = ( 'mode,size,page_size', (('internal', 10485760, 32768), ('shared', 10485760, 32768)), ) @pytest.mark.parametrize(*buffer_settings) def test_create_buffer(mode, size, page_size): try: buffer = Buffer(mode, size, page_size) except ModuleNotFoundError: pytest.skip(f'buffer mode "{mode}" not supported') assert buffer.mode == mode assert buffer.size == size assert buffer.page_size == page_size minimal_index = 0 maximal_index = size // page_size assert len(buffer.directory) == maximal_index for index, page in buffer.directory.items(): assert minimal_index <= index <= maximal_index assert isinstance(page, Page) assert page.offset == index * page_size assert page.is_free is True buffer.close() @pytest.mark.parametrize(*buffer_settings) def test_use_all_pages(mode, size, page_size): try: buffer = Buffer(mode, size, page_size) except ModuleNotFoundError: pytest.skip(f'buffer mode "{mode}" not supported') maximal_index = size // page_size marker = 0x05 for _ in range(maximal_index): page = buffer.get_free_page() assert not page.is_free page.view[0] = marker assert page.view[0] == marker assert buffer.view[page.offset] == marker assert buffer.buf[page.offset] == marker marker += 1 if marker == 0xFF: marker = 0x05 with pytest.raises(MemoryError): buffer.get_free_page() buffer.close() @pytest.mark.parametrize(*buffer_settings) def test_context_manager(mode, size, page_size): try: with Buffer(mode, size, page_size) as buffer: assert buffer.mode == mode except ModuleNotFoundError: pytest.skip(f'buffer mode "{mode}" not supported') pyroute2-0.7.11/tests/test_unit/test_common.py000066400000000000000000000015731455030217500214620ustar00rootroot00000000000000from pyroute2.common import dqn2int, hexdump, hexload, uifname, uuid32 def test_hexdump(): binary = b'abcdef5678' dump1 = hexdump(binary) dump2 = hexdump(binary, length=6) assert len(dump1) == 29 assert len(dump2) == 17 assert dump1[2] == dump1[-3] == dump2[2] == dump2[-3] == ':' assert hexload(dump1) == binary assert hexload(dump2) == binary[:6] def test_uuid32(): uA = uuid32() uB = uuid32() prime = __builtins__.get('long', int) assert isinstance(uA, prime) assert isinstance(uB, prime) assert uA != uB assert uA < 0x100000000 assert uB < 0x100000000 def test_dqn2int(): assert dqn2int('255.255.255.0') == 24 assert dqn2int('255.240.0.0') == 12 assert dqn2int('255.0.0.0') == 8 def test_uifname(): nA = uifname() nB = uifname() assert nA != nB assert int(nA[2:], 16) != int(nB[2:], 16) pyroute2-0.7.11/tests/test_unit/test_config.py000066400000000000000000000006461455030217500214370ustar00rootroot00000000000000from pyroute2 import config def test_kernel_version(): versions = { '1.2.3-test01': [1, 2, 3], '1.2.3.test01': [1, 2, 3], '10.1.12': [10, 1, 12], 'test.10.12': [], '2.10.test01': [2, 10], '5.16.5-200.fc35.x86_64': [5, 16, 5], '5.15.15.debug': [5, 15, 15], } for key, value in versions.items(): assert config.parse_kernel_version(key) == value pyroute2-0.7.11/tests/test_unit/test_entry_points/000077500000000000000000000000001455030217500223475ustar00rootroot00000000000000pyroute2-0.7.11/tests/test_unit/test_entry_points/test_basic.py000066400000000000000000000005731455030217500250460ustar00rootroot00000000000000from pyroute2 import IPRoute from pyroute2 import NetlinkError as E1 from pyroute2.netlink import NetlinkError as E2 from pyroute2.netlink.exceptions import NetlinkError as E3 def test_exceptions(): assert E1 == E2 == E3 with IPRoute() as ipr: for e in (E1, E2, E3): try: ipr.get_links(-1) except e: pass pyroute2-0.7.11/tests/test_unit/test_iproute_match/000077500000000000000000000000001455030217500224555ustar00rootroot00000000000000pyroute2-0.7.11/tests/test_unit/test_iproute_match/links.dump000066400000000000000000001422731455030217500244750ustar00rootroot00000000000000[ { "family": 0, "__align": [], "ifi_type": 772, "index": 1, "flags": 65609, "change": 0, "attrs": [ [ "IFLA_IFNAME", "lo" ], [ "IFLA_TXQLEN", 1000 ], [ "IFLA_OPERSTATE", "UNKNOWN" ], [ "IFLA_LINKMODE", 0 ], [ "IFLA_MTU", 65536 ], [ "IFLA_MIN_MTU", 0 ], [ "IFLA_MAX_MTU", 0 ], [ "IFLA_GROUP", 0 ], [ "IFLA_PROMISCUITY", 0 ], [ "IFLA_NUM_TX_QUEUES", 1 ], [ "IFLA_GSO_MAX_SEGS", 65535 ], [ "IFLA_GSO_MAX_SIZE", 65536 ], [ "IFLA_GRO_MAX_SIZE", 65536 ], [ "IFLA_NUM_RX_QUEUES", 1 ], [ "IFLA_CARRIER", 1 ], [ "IFLA_QDISC", "noqueue" ], [ "IFLA_CARRIER_CHANGES", 0 ], [ "IFLA_CARRIER_UP_COUNT", 0 ], [ "IFLA_CARRIER_DOWN_COUNT", 0 ], [ "IFLA_PROTO_DOWN", 0 ], [ "IFLA_MAP", { "mem_start": 0, "mem_end": 0, "base_addr": 0, "irq": 0, "dma": 0, "port": 0 } ], [ "IFLA_ADDRESS", "00:00:00:00:00:00" ], [ "IFLA_BROADCAST", "00:00:00:00:00:00" ], [ "IFLA_STATS64", { "rx_packets": 120, "tx_packets": 120, "rx_bytes": 11110, "tx_bytes": 11110, "rx_errors": 0, "tx_errors": 0, "rx_dropped": 0, "tx_dropped": 0, "multicast": 0, "collisions": 0, "rx_length_errors": 0, "rx_over_errors": 0, "rx_crc_errors": 0, "rx_frame_errors": 0, "rx_fifo_errors": 0, "rx_missed_errors": 0, "tx_aborted_errors": 0, "tx_carrier_errors": 0, "tx_fifo_errors": 0, "tx_heartbeat_errors": 0, "tx_window_errors": 0, "rx_compressed": 0, "tx_compressed": 0 } ], [ "IFLA_STATS", { "rx_packets": 120, "tx_packets": 120, "rx_bytes": 11110, "tx_bytes": 11110, "rx_errors": 0, "tx_errors": 0, "rx_dropped": 0, "tx_dropped": 0, "multicast": 0, "collisions": 0, "rx_length_errors": 0, "rx_over_errors": 0, "rx_crc_errors": 0, "rx_frame_errors": 0, "rx_fifo_errors": 0, "rx_missed_errors": 0, "tx_aborted_errors": 0, "tx_carrier_errors": 0, "tx_fifo_errors": 0, "tx_heartbeat_errors": 0, "tx_window_errors": 0, "rx_compressed": 0, "tx_compressed": 0 } ], [ "IFLA_XDP", { "attrs": [ [ "IFLA_XDP_ATTACHED", null ] ] } ], [ "IFLA_AF_SPEC", { "attrs": [ [ "UNKNOWN", { "header": { "length": 12, "type": 45 } } ], [ "AF_INET", { "dummy": 65672, "forwarding": 1, "mc_forwarding": 0, "proxy_arp": 0, "accept_redirects": 1, "secure_redirects": 1, "send_redirects": 1, "shared_media": 1, "rp_filter": 2, "accept_source_route": 0, "bootp_relay": 0, "log_martians": 0, "tag": 0, "arpfilter": 0, "medium_id": 0, "noxfrm": 1, "nopolicy": 1, "force_igmp_version": 0, "arp_announce": 0, "arp_ignore": 0, "promote_secondaries": 1, "arp_accept": 0, "arp_notify": 0, "accept_local": 0, "src_vmark": 0, "proxy_arp_pvlan": 0, "route_localnet": 0, "igmpv2_unsolicited_report_interval": 10000, "igmpv3_unsolicited_report_interval": 1000 } ], [ "AF_INET6", { "attrs": [ [ "IFLA_INET6_FLAGS", 2147483648 ], [ "IFLA_INET6_CACHEINFO", { "max_reasm_len": 65535, "tstamp": 155, "reachable_time": 43862, "retrans_time": 1000 } ], [ "IFLA_INET6_CONF", { "forwarding": 0, "hop_limit": 64, "mtu": 65536, "accept_ra": 1, "accept_redirects": 1, "autoconf": 1, "dad_transmits": 1, "router_solicitations": 4294967295, "router_solicitation_interval": 4000, "router_solicitation_delay": 1000, "use_tempaddr": 4294967295, "temp_valid_lft": 604800, "temp_preferred_lft": 86400, "regen_max_retry": 3, "max_desync_factor": 600, "max_addresses": 16, "force_mld_version": 0, "accept_ra_defrtr": 1, "accept_ra_pinfo": 1, "accept_ra_rtr_pref": 1, "router_probe_interval": 60000, "accept_ra_rt_info_max_plen": 0, "proxy_ndp": 0, "optimistic_dad": 0, "accept_source_route": 0, "mc_forwarding": 0, "disable_ipv6": 0, "accept_dad": 4294967295, "force_tllao": 0, "ndisc_notify": 0 } ], [ "IFLA_INET6_STATS", { "num": 37, "inpkts": 97, "inoctets": 8657, "indelivers": 97, "outforwdatagrams": 0, "outpkts": 97, "outoctets": 8657, "inhdrerrors": 0, "intoobigerrors": 0, "innoroutes": 0, "inaddrerrors": 0, "inunknownprotos": 0, "intruncatedpkts": 0, "indiscards": 0, "outdiscards": 0, "outnoroutes": 0, "reasmtimeout": 0, "reasmreqds": 0, "reasmoks": 0, "reasmfails": 0, "fragoks": 0, "fragfails": 0, "fragcreates": 0, "inmcastpkts": 0, "outmcastpkts": 2, "inbcastpkts": 0, "outbcastpkts": 0, "inmcastoctets": 0, "outmcastoctets": 152, "inbcastoctets": 0, "outbcastoctets": 0, "csumerrors": 0, "noectpkts": 97, "ect1pkts": 0, "ect0pkts": 0, "cepkts": 0 } ], [ "IFLA_INET6_ICMP6STATS", { "num": 6, "inmsgs": 2, "inerrors": 0, "outmsgs": 2, "outerrors": 0, "csumerrors": 0 } ], [ "IFLA_INET6_TOKEN", "::" ], [ "IFLA_INET6_ADDR_GEN_MODE", 0 ] ] } ] ] } ] ], "header": { "length": 1364, "type": 16, "flags": 2, "sequence_number": 258, "pid": 7728, "error": null, "target": "localhost", "stats": [ 0, 0, 0 ] }, "state": "up", "event": "RTM_NEWLINK" }, { "family": 0, "__align": [], "ifi_type": 1, "index": 2, "flags": 69699, "change": 0, "attrs": [ [ "IFLA_IFNAME", "wl0" ], [ "IFLA_TXQLEN", 1000 ], [ "IFLA_OPERSTATE", "UP" ], [ "IFLA_LINKMODE", 1 ], [ "IFLA_MTU", 1500 ], [ "IFLA_MIN_MTU", 256 ], [ "IFLA_MAX_MTU", 2304 ], [ "IFLA_GROUP", 0 ], [ "IFLA_PROMISCUITY", 0 ], [ "IFLA_NUM_TX_QUEUES", 1 ], [ "IFLA_GSO_MAX_SEGS", 65535 ], [ "IFLA_GSO_MAX_SIZE", 65536 ], [ "IFLA_GRO_MAX_SIZE", 65536 ], [ "IFLA_NUM_RX_QUEUES", 1 ], [ "IFLA_CARRIER", 1 ], [ "IFLA_QDISC", "noqueue" ], [ "IFLA_CARRIER_CHANGES", 2 ], [ "IFLA_CARRIER_UP_COUNT", 1 ], [ "IFLA_CARRIER_DOWN_COUNT", 1 ], [ "IFLA_PROTO_DOWN", 0 ], [ "IFLA_MAP", { "mem_start": 0, "mem_end": 0, "base_addr": 0, "irq": 0, "dma": 0, "port": 0 } ], [ "IFLA_ADDRESS", "18:56:80:11:ff:a3" ], [ "IFLA_BROADCAST", "ff:ff:ff:ff:ff:ff" ], [ "IFLA_STATS64", { "rx_packets": 835511, "tx_packets": 463567, "rx_bytes": 908228126, "tx_bytes": 181325506, "rx_errors": 0, "tx_errors": 0, "rx_dropped": 0, "tx_dropped": 0, "multicast": 0, "collisions": 0, "rx_length_errors": 0, "rx_over_errors": 0, "rx_crc_errors": 0, "rx_frame_errors": 0, "rx_fifo_errors": 0, "rx_missed_errors": 0, "tx_aborted_errors": 0, "tx_carrier_errors": 0, "tx_fifo_errors": 0, "tx_heartbeat_errors": 0, "tx_window_errors": 0, "rx_compressed": 0, "tx_compressed": 0 } ], [ "IFLA_STATS", { "rx_packets": 835511, "tx_packets": 463567, "rx_bytes": 908228126, "tx_bytes": 181325506, "rx_errors": 0, "tx_errors": 0, "rx_dropped": 0, "tx_dropped": 0, "multicast": 0, "collisions": 0, "rx_length_errors": 0, "rx_over_errors": 0, "rx_crc_errors": 0, "rx_frame_errors": 0, "rx_fifo_errors": 0, "rx_missed_errors": 0, "tx_aborted_errors": 0, "tx_carrier_errors": 0, "tx_fifo_errors": 0, "tx_heartbeat_errors": 0, "tx_window_errors": 0, "rx_compressed": 0, "tx_compressed": 0 } ], [ "IFLA_XDP", { "attrs": [ [ "IFLA_XDP_ATTACHED", null ] ] } ], [ "IFLA_PERM_ADDRESS", "18:56:80:11:ff:a3" ], [ "IFLA_AF_SPEC", { "attrs": [ [ "AF_INET", { "dummy": 65672, "forwarding": 1, "mc_forwarding": 0, "proxy_arp": 0, "accept_redirects": 1, "secure_redirects": 1, "send_redirects": 1, "shared_media": 1, "rp_filter": 2, "accept_source_route": 0, "bootp_relay": 0, "log_martians": 0, "tag": 0, "arpfilter": 0, "medium_id": 0, "noxfrm": 0, "nopolicy": 0, "force_igmp_version": 0, "arp_announce": 0, "arp_ignore": 0, "promote_secondaries": 1, "arp_accept": 0, "arp_notify": 0, "accept_local": 0, "src_vmark": 0, "proxy_arp_pvlan": 0, "route_localnet": 0, "igmpv2_unsolicited_report_interval": 10000, "igmpv3_unsolicited_report_interval": 1000 } ], [ "AF_INET6", { "attrs": [ [ "IFLA_INET6_FLAGS", 2147483648 ], [ "IFLA_INET6_CACHEINFO", { "max_reasm_len": 65535, "tstamp": 1982, "reachable_time": 37034, "retrans_time": 1000 } ], [ "IFLA_INET6_CONF", { "forwarding": 0, "hop_limit": 64, "mtu": 1500, "accept_ra": 0, "accept_redirects": 1, "autoconf": 1, "dad_transmits": 1, "router_solicitations": 4294967295, "router_solicitation_interval": 4000, "router_solicitation_delay": 1000, "use_tempaddr": 0, "temp_valid_lft": 604800, "temp_preferred_lft": 86400, "regen_max_retry": 3, "max_desync_factor": 600, "max_addresses": 16, "force_mld_version": 0, "accept_ra_defrtr": 1, "accept_ra_pinfo": 1, "accept_ra_rtr_pref": 1, "router_probe_interval": 60000, "accept_ra_rt_info_max_plen": 0, "proxy_ndp": 0, "optimistic_dad": 0, "accept_source_route": 0, "mc_forwarding": 0, "disable_ipv6": 0, "accept_dad": 1, "force_tllao": 0, "ndisc_notify": 0 } ], [ "IFLA_INET6_STATS", { "num": 37, "inpkts": 3986, "inoctets": 325147, "indelivers": 3986, "outforwdatagrams": 0, "outpkts": 40, "outoctets": 4306, "inhdrerrors": 0, "intoobigerrors": 0, "innoroutes": 0, "inaddrerrors": 0, "inunknownprotos": 0, "intruncatedpkts": 0, "indiscards": 0, "outdiscards": 0, "outnoroutes": 0, "reasmtimeout": 0, "reasmreqds": 0, "reasmoks": 0, "reasmfails": 0, "fragoks": 0, "fragfails": 0, "fragcreates": 0, "inmcastpkts": 3978, "outmcastpkts": 36, "inbcastpkts": 0, "outbcastpkts": 0, "inmcastoctets": 324235, "outmcastoctets": 4034, "inbcastoctets": 0, "outbcastoctets": 0, "csumerrors": 0, "noectpkts": 3986, "ect1pkts": 0, "ect0pkts": 0, "cepkts": 0 } ], [ "IFLA_INET6_ICMP6STATS", { "num": 6, "inmsgs": 19, "inerrors": 0, "outmsgs": 18, "outerrors": 0, "csumerrors": 0 } ], [ "IFLA_INET6_TOKEN", "::" ], [ "IFLA_INET6_ADDR_GEN_MODE", 1 ] ] } ] ] } ], [ "IFLA_PARENT_DEV_NAME", "0000:03:00.0" ], [ "IFLA_PARENT_DEV_BUS_NAME", "pci" ] ], "header": { "length": 1396, "type": 16, "flags": 2, "sequence_number": 258, "pid": 7728, "error": null, "target": "localhost", "stats": [ 0, 0, 0 ] }, "state": "up", "event": "RTM_NEWLINK" }, { "family": 0, "__align": [], "ifi_type": 1, "index": 3, "flags": 4099, "change": 0, "attrs": [ [ "IFLA_IFNAME", "br0" ], [ "IFLA_TXQLEN", 0 ], [ "IFLA_OPERSTATE", "DOWN" ], [ "IFLA_LINKMODE", 0 ], [ "IFLA_MTU", 1500 ], [ "IFLA_MIN_MTU", 68 ], [ "IFLA_MAX_MTU", 65535 ], [ "IFLA_GROUP", 0 ], [ "IFLA_PROMISCUITY", 0 ], [ "IFLA_NUM_TX_QUEUES", 1 ], [ "IFLA_GSO_MAX_SEGS", 65535 ], [ "IFLA_GSO_MAX_SIZE", 65536 ], [ "IFLA_GRO_MAX_SIZE", 65536 ], [ "IFLA_NUM_RX_QUEUES", 1 ], [ "IFLA_CARRIER", 0 ], [ "IFLA_QDISC", "noqueue" ], [ "IFLA_CARRIER_CHANGES", 1 ], [ "IFLA_CARRIER_UP_COUNT", 0 ], [ "IFLA_CARRIER_DOWN_COUNT", 1 ], [ "IFLA_PROTO_DOWN", 0 ], [ "IFLA_MAP", { "mem_start": 0, "mem_end": 0, "base_addr": 0, "irq": 0, "dma": 0, "port": 0 } ], [ "IFLA_ADDRESS", "02:42:f5:f1:32:bc" ], [ "IFLA_BROADCAST", "ff:ff:ff:ff:ff:ff" ], [ "IFLA_STATS64", { "rx_packets": 0, "tx_packets": 0, "rx_bytes": 0, "tx_bytes": 0, "rx_errors": 0, "tx_errors": 0, "rx_dropped": 0, "tx_dropped": 0, "multicast": 0, "collisions": 0, "rx_length_errors": 0, "rx_over_errors": 0, "rx_crc_errors": 0, "rx_frame_errors": 0, "rx_fifo_errors": 0, "rx_missed_errors": 0, "tx_aborted_errors": 0, "tx_carrier_errors": 0, "tx_fifo_errors": 0, "tx_heartbeat_errors": 0, "tx_window_errors": 0, "rx_compressed": 0, "tx_compressed": 0 } ], [ "IFLA_STATS", { "rx_packets": 0, "tx_packets": 0, "rx_bytes": 0, "tx_bytes": 0, "rx_errors": 0, "tx_errors": 0, "rx_dropped": 0, "tx_dropped": 0, "multicast": 0, "collisions": 0, "rx_length_errors": 0, "rx_over_errors": 0, "rx_crc_errors": 0, "rx_frame_errors": 0, "rx_fifo_errors": 0, "rx_missed_errors": 0, "tx_aborted_errors": 0, "tx_carrier_errors": 0, "tx_fifo_errors": 0, "tx_heartbeat_errors": 0, "tx_window_errors": 0, "rx_compressed": 0, "tx_compressed": 0 } ], [ "IFLA_XDP", { "attrs": [ [ "IFLA_XDP_ATTACHED", null ] ] } ], [ "IFLA_LINKINFO", { "attrs": [ [ "IFLA_INFO_KIND", "bridge" ], [ "IFLA_INFO_DATA", { "attrs": [ [ "IFLA_BR_HELLO_TIMER", 0 ], [ "IFLA_BR_TCN_TIMER", 0 ], [ "IFLA_BR_TOPOLOGY_CHANGE_TIMER", 0 ], [ "IFLA_BR_GC_TIMER", 0 ], [ "IFLA_BR_FORWARD_DELAY", 1500 ], [ "IFLA_BR_HELLO_TIME", 200 ], [ "IFLA_BR_MAX_AGE", 2000 ], [ "IFLA_BR_AGEING_TIME", 30000 ], [ "IFLA_BR_STP_STATE", 0 ], [ "IFLA_BR_PRIORITY", 32768 ], [ "IFLA_BR_VLAN_FILTERING", 0 ], [ "IFLA_BR_GROUP_FWD_MASK", 0 ], [ "IFLA_BR_BRIDGE_ID", { "prio": 128, "addr": "02:42:f5:f1:32:bc" } ], [ "IFLA_BR_ROOT_ID", { "prio": 128, "addr": "02:42:f5:f1:32:bc" } ], [ "IFLA_BR_ROOT_PORT", 0 ], [ "IFLA_BR_ROOT_PATH_COST", 0 ], [ "IFLA_BR_TOPOLOGY_CHANGE", 0 ], [ "IFLA_BR_TOPOLOGY_CHANGE_DETECTED", 0 ], [ "IFLA_BR_GROUP_ADDR", "01:80:c2:00:00:00" ], [ "UNKNOWN", { "header": { "length": 12, "type": 46 } } ], [ "IFLA_BR_VLAN_PROTOCOL", 33024 ], [ "IFLA_BR_VLAN_DEFAULT_PVID", 1 ], [ "IFLA_BR_VLAN_STATS_ENABLED", 0 ], [ "UNKNOWN", { "header": { "length": 5, "type": 45 } } ], [ "IFLA_BR_MCAST_ROUTER", 1 ], [ "IFLA_BR_MCAST_SNOOPING", 1 ], [ "IFLA_BR_MCAST_QUERY_USE_IFADDR", 0 ], [ "IFLA_BR_MCAST_QUERIER", 0 ], [ "IFLA_BR_MCAST_STATS_ENABLED", 0 ], [ "IFLA_BR_MCAST_HASH_ELASTICITY", 16 ], [ "IFLA_BR_MCAST_HASH_MAX", 4096 ], [ "IFLA_BR_MCAST_LAST_MEMBER_CNT", 2 ], [ "IFLA_BR_MCAST_STARTUP_QUERY_CNT", 2 ], [ "IFLA_BR_MCAST_IGMP_VERSION", 2 ], [ "IFLA_BR_MCAST_MLD_VERSION", 1 ], [ "IFLA_BR_MCAST_LAST_MEMBER_INTVL", 100 ], [ "IFLA_BR_MCAST_MEMBERSHIP_INTVL", 26000 ], [ "IFLA_BR_MCAST_QUERIER_INTVL", 25500 ], [ "IFLA_BR_MCAST_QUERY_INTVL", 12500 ], [ "IFLA_BR_MCAST_QUERY_RESPONSE_INTVL", 1000 ], [ "IFLA_BR_MCAST_STARTUP_QUERY_INTVL", 3125 ], [ "IFLA_BR_NF_CALL_IPTABLES", 0 ], [ "IFLA_BR_NF_CALL_IP6TABLES", 0 ], [ "IFLA_BR_NF_CALL_ARPTABLES", 0 ] ] } ] ] } ], [ "IFLA_AF_SPEC", { "attrs": [ [ "AF_INET", { "dummy": 65672, "forwarding": 1, "mc_forwarding": 0, "proxy_arp": 0, "accept_redirects": 1, "secure_redirects": 1, "send_redirects": 1, "shared_media": 1, "rp_filter": 2, "accept_source_route": 0, "bootp_relay": 0, "log_martians": 0, "tag": 0, "arpfilter": 0, "medium_id": 0, "noxfrm": 0, "nopolicy": 0, "force_igmp_version": 0, "arp_announce": 0, "arp_ignore": 0, "promote_secondaries": 1, "arp_accept": 0, "arp_notify": 0, "accept_local": 0, "src_vmark": 0, "proxy_arp_pvlan": 0, "route_localnet": 0, "igmpv2_unsolicited_report_interval": 10000, "igmpv3_unsolicited_report_interval": 1000 } ], [ "AF_INET6", { "attrs": [ [ "IFLA_INET6_FLAGS", 0 ], [ "IFLA_INET6_CACHEINFO", { "max_reasm_len": 65535, "tstamp": 2068, "reachable_time": 25102, "retrans_time": 1000 } ], [ "IFLA_INET6_CONF", { "forwarding": 0, "hop_limit": 64, "mtu": 1500, "accept_ra": 0, "accept_redirects": 1, "autoconf": 1, "dad_transmits": 1, "router_solicitations": 4294967295, "router_solicitation_interval": 4000, "router_solicitation_delay": 1000, "use_tempaddr": 0, "temp_valid_lft": 604800, "temp_preferred_lft": 86400, "regen_max_retry": 3, "max_desync_factor": 600, "max_addresses": 16, "force_mld_version": 0, "accept_ra_defrtr": 1, "accept_ra_pinfo": 1, "accept_ra_rtr_pref": 1, "router_probe_interval": 60000, "accept_ra_rt_info_max_plen": 0, "proxy_ndp": 0, "optimistic_dad": 0, "accept_source_route": 0, "mc_forwarding": 0, "disable_ipv6": 0, "accept_dad": 1, "force_tllao": 0, "ndisc_notify": 0 } ], [ "IFLA_INET6_STATS", { "num": 37, "inpkts": 0, "inoctets": 0, "indelivers": 0, "outforwdatagrams": 0, "outpkts": 0, "outoctets": 0, "inhdrerrors": 0, "intoobigerrors": 0, "innoroutes": 0, "inaddrerrors": 0, "inunknownprotos": 0, "intruncatedpkts": 0, "indiscards": 0, "outdiscards": 0, "outnoroutes": 0, "reasmtimeout": 0, "reasmreqds": 0, "reasmoks": 0, "reasmfails": 0, "fragoks": 0, "fragfails": 0, "fragcreates": 0, "inmcastpkts": 0, "outmcastpkts": 0, "inbcastpkts": 0, "outbcastpkts": 0, "inmcastoctets": 0, "outmcastoctets": 0, "inbcastoctets": 0, "outbcastoctets": 0, "csumerrors": 0, "noectpkts": 0, "ect1pkts": 0, "ect0pkts": 0, "cepkts": 0 } ], [ "IFLA_INET6_ICMP6STATS", { "num": 6, "inmsgs": 0, "inerrors": 0, "outmsgs": 0, "outerrors": 0, "csumerrors": 0 } ], [ "IFLA_INET6_TOKEN", "::" ], [ "IFLA_INET6_ADDR_GEN_MODE", 0 ] ] } ] ] } ] ], "header": { "length": 1784, "type": 16, "flags": 2, "sequence_number": 258, "pid": 7728, "error": null, "target": "localhost", "stats": [ 0, 0, 0 ] }, "state": "up", "event": "RTM_NEWLINK" } ] pyroute2-0.7.11/tests/test_unit/test_iproute_match/test_match.py000066400000000000000000000041771455030217500251730ustar00rootroot00000000000000import json import pytest from pyroute2 import IPRoute from pyroute2.netlink.rtnl.ifinfmsg import ifinfmsg from pyroute2.requests.link import LinkFieldFilter, LinkIPRouteFilter from pyroute2.requests.main import RequestProcessor with open('test_unit/test_iproute_match/links.dump', 'r') as f: ifinfmsg_sample = [ifinfmsg().load(x) for x in json.load(f)] for msg in ifinfmsg_sample: msg.reset() msg.encode() msg.decode() @pytest.fixture def ipr(): with IPRoute() as iproute: yield iproute @pytest.mark.parametrize( 'spec,query,result', ( ({'ifname': 'lo'}, ('stats64', 'rx_packets'), 120), ({'ifname': 'lo'}, ('af_spec', 'af_inet', 'forwarding'), 1), ({'ifname': 'wl0'}, ('num_rx_queues',), 1), ({'ifname': 'wl0'}, ('qdisc',), 'noqueue'), ({'ifname': 'wl0'}, ('stats64', 'rx_packets'), 835511), ( {'ifname': 'wl0'}, ('af_spec', 'af_inet6', 'inet6_flags'), 2147483648, ), ( {'ifname': 'wl0'}, ('af_spec', 'af_inet6', 'inet6_conf', 'temp_preferred_lft'), 86400, ), ({'parent_dev_name': '0000:03:00.0'}, ('ifname',), 'wl0'), ({'kind': 'bridge', 'br_forward_delay': 1500}, ('ifname',), 'br0'), ( {'ifname': 'br0'}, ('linkinfo', 'data', 'br_group_addr'), '01:80:c2:00:00:00', ), ), ids=[ 'lo:stats64/rx_packets', 'lo:af_spec/af_inet/forwarding', 'wl0:num_rx_queues', 'wl0:qdisc', 'wl0:stats64/rx_packets', 'wl0:af_spec/af_inet6/inet6_flags', 'wl0:af_spec/af_inet6/inet6_conf/temp_preferred_lft', 'parent_dev_name(...) => wl0', 'br_forward_delay(...) => br0', 'br0:linkinfo/data/br_group_addr', ], ) def test_get_leaf(ipr, spec, query, result): spec = RequestProcessor(context=spec, prime=spec) spec.apply_filter(LinkFieldFilter()) spec.apply_filter(LinkIPRouteFilter('dump')) spec.finalize() msg = ipr.filter_messages(spec, ifinfmsg_sample) assert len(msg) == 1 assert msg[0].get_nested(*query) == result pyroute2-0.7.11/tests/test_unit/test_nlmsg/000077500000000000000000000000001455030217500207325ustar00rootroot00000000000000pyroute2-0.7.11/tests/test_unit/test_nlmsg/addrmsg_ipv4.dump000066400000000000000000000024341455030217500242070ustar00rootroot00000000000000# pyroute2 hex dump sample # # lo: 127.0.0.1/8 4c:00:00:00:14:00:02:00:02:01:00:00:c5:4e:00:00:02:08:80:fe:01:00:00:00:08:00:01:00:7f:00:00:01:08:00:02:00:7f:00:00:01:07:00:03:00:6c:6f:00:00:08:00:08:00:80:00:00:00:14:00:06:00:ff:ff:ff:ff:ff:ff:ff:ff:05:01:00:00:05:01:00:00 # parsed data should match messages below #: application/json [ { "attrs": [ [ "IFA_ADDRESS", "127.0.0.1" ], [ "IFA_LOCAL", "127.0.0.1" ], [ "IFA_LABEL", "lo" ], [ "IFA_FLAGS", 128 ], [ "IFA_CACHEINFO", { "cstamp": 261, "ifa_preferred": 4294967295, "ifa_valid": 4294967295, "tstamp": 261 } ] ], "event": "RTM_NEWADDR", "family": 2, "flags": 128, "header": { "error": null, "flags": 2, "length": 76, "pid": 20165, "sequence_number": 258, "type": 20 }, "index": 1, "prefixlen": 8, "scope": 254 } ] pyroute2-0.7.11/tests/test_unit/test_nlmsg/gre_01.dump000066400000000000000000000102771455030217500227050ustar00rootroot00000000000000# strace hex dump sample # # ip link add mgre0 type gre local 192.168.122.1 remote 192.168.122.60 ttl 16 # # nlmsg header \x84\x00\x00\x00 # length \x10\x00 # type \x05\x06 # flags \x49\x61\x03\x55 # sequence number \x00\x00\x00\x00 # pid # RTNL header \x00\x00 # ifi_family \x00\x00 # ifi_type \x00\x00\x00\x00 # ifi_index \x00\x00\x00\x00 # ifi_flags \x00\x00\x00\x00 # ifi_change # NLA chain \x0a\x00 # len \x03\x00 # type \x6d\x67\x72\x65\x30\x00\x00\x00 \x58\x00 # len \x12\x00 # type \x08\x00 \x01\x00 \x67\x72\x65\x00 \x4c\x00 \x02\x00 \x08\x00 \x04\x00 \x00\x00\x00\x00 \x08\x00 \x05\x00 \x00\x00\x00\x00 \x06\x00 \x02\x00 \x00\x00\x00\x00 \x06\x00 \x03\x00 \x00\x00\x00\x00 \x08\x00 \x06\x00 \xc0\xa8\x7a\x01 \x08\x00 \x07\x00 \xc0\xa8\x7a\x3c \x05\x00 \x0a\x00 \x01\x00\x00\x00 \x05\x00 \x08\x00 \x10\x00\x00\x00 \x05\x00 \x09\x00 \x00\x00\x00\x00 # the response packet \x24\x00\x00\x00\x02\x00\x00\x00\x49\x61\x03\x55\xf5\x2f\x00\x00\x00\x00\x00\x00\x84\x00\x00\x00\x10\x00\x05\x06\x49\x61\x03\x55\x00\x00\x00\x00 # parsed data should match primes below #: application/json [ { "attrs": [ [ "IFLA_IFNAME", "mgre0" ], [ "IFLA_LINKINFO", { "attrs": [ [ "IFLA_INFO_KIND", "gre" ], [ "IFLA_INFO_DATA", { "attrs": [ [ "IFLA_GRE_IKEY", 0 ], [ "IFLA_GRE_OKEY", 0 ], [ "IFLA_GRE_IFLAGS", 0 ], [ "IFLA_GRE_OFLAGS", 0 ], [ "IFLA_GRE_LOCAL", "192.168.122.1" ], [ "IFLA_GRE_REMOTE", "192.168.122.60" ], [ "IFLA_GRE_PMTUDISC", 1 ], [ "IFLA_GRE_TTL", 16 ], [ "IFLA_GRE_TOS", 0 ] ] } ] ] } ] ], "change": 0, "event": "RTM_NEWLINK", "family": 0, "flags": 0, "header": { "error": null, "flags": 1541, "length": 132, "pid": 0, "sequence_number": 1426284873, "type": 16 }, "ifi_type": 0, "index": 0, "state": "down" }, { "event": "NLMSG_ERROR", "header": { "error": null, "flags": 0, "length": 36, "pid": 12277, "sequence_number": 1426284873, "type": 2 } } ] pyroute2-0.7.11/tests/test_unit/test_nlmsg/iw_info_rsp.dump000066400000000000000000000025631455030217500241450ustar00rootroot00000000000000\x58\x00\x00\x00\x1b\x00\x00\x00\x32\xfa\xdd\x54\xf2\x7b\x00\x2e\x07\x01\x00\x00\x08\x00\x03\x00\x03\x00\x00\x00\x09\x00\x04\x00\x77\x6c\x6f\x31\x00\x00\x00\x00\x08\x00\x01\x00\x00\x00\x00\x00\x08\x00\x05\x00\x02\x00\x00\x00\x0c\x00\x99\x00\x01\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x06\x00\xa4\x4e\x31\x43\x1c\x7d\x00\x00\x08\x00\x2e\x00\x05\x00\x00\x00 # parsed data should match primes below #: application/json [ { "attrs": [ [ "NL80211_ATTR_IFINDEX", 3 ], [ "NL80211_ATTR_IFNAME", "wlo1" ], [ "NL80211_ATTR_WIPHY", 0 ], [ "NL80211_ATTR_IFTYPE", 2 ], [ "NL80211_ATTR_WDEV", 1 ], [ "NL80211_ATTR_MAC", "a4:4e:31:43:1c:7d" ], [ "NL80211_ATTR_GENERATION", 5 ] ], "cmd": 7, "event": "NL80211_CMD_NEW_INTERFACE", "header": { "error": null, "flags": 0, "length": 88, "pid": 771783666, "sequence_number": 1423833650, "type": 27 }, "reserved": 0, "version": 1 } ] pyroute2-0.7.11/tests/test_unit/test_nlmsg/iw_scan_rsp.dump000066400000000000000000001002161455030217500241300ustar00rootroot00000000000000# Bug-Url: https://github.com/svinota/pyroute2/pull/182 # Bug-Url: https://github.com/svinota/pyroute2/issues/183 # sample 1 d8:01:00:00 1c:00:02:00 03:01:00:00 60:02:00:00 22:01:00:00 08:00:2e:00 07:00:00:00 08:00:03:00 0b:00:00:00 0c:00:99:00 01:00:00:00 00:00:00:00 a8:01:2f:00 0a:00:01:00 14:22:db:00 b6:65:00:00 0c:00:03:00 8d:5c:63:ca 0f:00:00:00 a8:00:06:00 00:0c:52:6f 77:61:6e:27 73:20:44:65 73:6b:01:08 8c:12:98:24 b0:48:60:6c 03:01:24:05 04:01:02:00 00:30:14:01 00:00:0f:ac 04:01:00:00 0f:ac:04:01 00:00:0f:ac 02:0c:00:2d 1a:ef:11:1b ff:ff:00:00 00:00:00:00 00:00:00:00 01:00:00:00 00:00:00:00 00:00:00:3d 16:24:05:00 00:00:00:00 00:00:00:00 00:00:00:00 00:00:00:00 00:00:00:7f 08:04:00:00 00:00:00:00 40:bf:0c:b2 00:80:33:fa ff:00:00:fa ff:00:00:c0 05:01:2a:00 fc:ff:dd:18 00:50:f2:02 01:01:00:00 03:a4:00:00 27:a4:00:00 42:43:5e:00 62:32:2f:00 0c:00:0d:00 8d:5c:63:ca 0f:00:00:00 a8:00:0b:00 00:0c:52:6f 77:61:6e:27 73:20:44:65 73:6b:01:08 8c:12:98:24 b0:48:60:6c 03:01:24:05 04:01:02:00 00:30:14:01 00:00:0f:ac 04:01:00:00 0f:ac:04:01 00:00:0f:ac 02:0c:00:2d 1a:ef:11:1b ff:ff:00:00 00:00:00:00 00:00:00:00 01:00:00:00 00:00:00:00 00:00:00:3d 16:24:05:00 00:00:00:00 00:00:00:00 00:00:00:00 00:00:00:00 00:00:00:7f 08:04:00:00 00:00:00:00 40:bf:0c:b2 00:80:33:fa ff:00:00:fa ff:00:00:c0 05:01:2a:00 fc:ff:dd:18 00:50:f2:02 01:01:00:00 03:a4:00:00 27:a4:00:00 42:43:5e:00 62:32:2f:00 06:00:04:00 64:00:00:00 06:00:05:00 31:00:00:00 08:00:02:00 3c:14:00:00 08:00:0c:00 00:00:00:00 08:00:0a:00 ea:01:00:00 08:00:07:00 34:ef:ff:ff # sample 2 d0:01:00:00 1c:00:02:00 03:01:00:00 60:02:00:00 22:01:00:00 08:00:2e:00 07:00:00:00 08:00:03:00 0b:00:00:00 0c:00:99:00 01:00:00:00 00:00:00:00 a0:01:2f:00 0a:00:01:00 14:22:db:00 06:a7:00:00 0c:00:03:00 61:17:62:da 0f:00:00:00 a2:00:06:00 00:00:01:08 8c:12:98:24 b0:48:60:6c 03:01:24:05 04:01:02:00 00:2d:1a:ef 11:1b:ff:ff 00:00:00:00 00:00:00:00 00:00:01:00 00:00:00:00 00:00:00:00 00:3d:16:24 05:04:00:00 00:00:00:00 00:00:00:00 00:00:00:00 00:00:00:00 00:7f:08:04 00:00:00:00 00:00:40:bf 0c:b2:00:80 33:fa:ff:00 00:fa:ff:00 00:c0:05:01 2a:00:fc:ff dd:1a:00:90 4c:04:08:bf 0c:b2:00:80 33:fa:ff:00 00:fa:ff:00 00:c0:05:01 2a:00:fc:ff dd:18:00:50 f2:02:01:01 00:00:03:a4 00:00:27:a4 00:00:42:43 5e:00:62:32 2f:00:00:00 0c:00:0d:00 61:17:62:da 0f:00:00:00 a2:00:0b:00 00:00:01:08 8c:12:98:24 b0:48:60:6c 03:01:24:05 04:01:02:00 00:2d:1a:ef 11:1b:ff:ff 00:00:00:00 00:00:00:00 00:00:01:00 00:00:00:00 00:00:00:00 00:3d:16:24 05:04:00:00 00:00:00:00 00:00:00:00 00:00:00:00 00:00:00:00 00:7f:08:04 00:00:00:00 00:00:40:bf 0c:b2:00:80 33:fa:ff:00 00:fa:ff:00 00:c0:05:01 2a:00:fc:ff dd:1a:00:90 4c:04:08:bf 0c:b2:00:80 33:fa:ff:00 00:fa:ff:00 00:c0:05:01 2a:00:fc:ff dd:18:00:50 f2:02:01:01 00:00:03:a4 00:00:27:a4 00:00:42:43 5e:00:62:32 2f:00:00:00 06:00:04:00 64:00:00:00 06:00:05:00 01:00:00:00 08:00:02:00 3c:14:00:00 08:00:0c:00 00:00:00:00 08:00:0a:00 e0:01:00:00 08:00:07:00 1c:f3:ff:ff # sample 3 e0:01:00:00 1c:00:02:00 03:01:00:00 60:02:00:00 22:01:00:00 08:00:2e:00 07:00:00:00 08:00:03:00 0b:00:00:00 0c:00:99:00 01:00:00:00 00:00:00:00 b0:01:2f:00 0a:00:01:00 14:22:db:00 da:05:00:00 0c:00:03:00 df:31:9c:c2 01:00:00:00 aa:00:06:00 00:0e:48:6f 6f:6b:20:27 65:6d:20:48 6f:72:6e:73 01:08:8c:12 98:24:b0:48 60:6c:03:01 24:05:04:01 02:00:00:30 14:01:00:00 0f:ac:04:01 00:00:0f:ac 04:01:00:00 0f:ac:02:0c 00:2d:1a:ef 11:1b:ff:ff 00:00:00:00 00:00:00:00 00:00:01:00 00:00:00:00 00:00:00:00 00:3d:16:24 05:04:00:00 00:00:00:00 00:00:00:00 00:00:00:00 00:00:00:00 00:7f:08:04 00:00:00:00 00:00:40:bf 0c:b2:00:80 33:fa:ff:00 00:fa:ff:00 00:c0:05:01 2a:00:fc:ff dd:18:00:50 f2:02:01:01 00:00:03:a4 00:00:27:a4 00:00:42:43 5e:00:62:32 2f:00:00:00 0c:00:0d:00 df:31:9c:c2 01:00:00:00 aa:00:0b:00 00:0e:48:6f 6f:6b:20:27 65:6d:20:48 6f:72:6e:73 01:08:8c:12 98:24:b0:48 60:6c:03:01 24:05:04:01 02:00:00:30 14:01:00:00 0f:ac:04:01 00:00:0f:ac 04:01:00:00 0f:ac:02:0c 00:2d:1a:ef 11:1b:ff:ff 00:00:00:00 00:00:00:00 00:00:01:00 00:00:00:00 00:00:00:00 00:3d:16:24 05:04:00:00 00:00:00:00 00:00:00:00 00:00:00:00 00:00:00:00 00:7f:08:04 00:00:00:00 00:00:40:bf 0c:b2:00:80 33:fa:ff:00 00:fa:ff:00 00:c0:05:01 2a:00:fc:ff dd:18:00:50 f2:02:01:01 00:00:03:a4 00:00:27:a4 00:00:42:43 5e:00:62:32 2f:00:00:00 06:00:04:00 64:00:00:00 06:00:05:00 11:00:00:00 08:00:02:00 3c:14:00:00 08:00:0c:00 00:00:00:00 08:00:0a:00 d6:01:00:00 08:00:07:00 94:f8:ff:ff # sample 4 e0:01:00:00 1c:00:02:00 03:01:00:00 60:02:00:00 22:01:00:00 08:00:2e:00 07:00:00:00 08:00:03:00 0b:00:00:00 0c:00:99:00 01:00:00:00 00:00:00:00 b0:01:2f:00 0a:00:01:00 14:22:db:00 d7:25:00:00 0c:00:03:00 1d:be:f7:67 01:00:00:00 aa:00:06:00 00:0e:48:6f 6f:6b:20:27 65:6d:20:48 6f:72:6e:73 01:08:8c:12 98:24:b0:48 60:6c:03:01 24:05:04:00 02:00:00:30 14:01:00:00 0f:ac:04:01 00:00:0f:ac 04:01:00:00 0f:ac:02:0c 00:2d:1a:ef 11:1b:ff:ff 00:00:00:00 00:00:00:00 00:00:01:00 00:00:00:00 00:00:00:00 00:3d:16:24 05:00:00:00 00:00:00:00 00:00:00:00 00:00:00:00 00:00:00:00 00:7f:08:04 00:00:00:00 00:00:40:bf 0c:b2:00:80 33:fa:ff:00 00:fa:ff:00 00:c0:05:01 2a:00:fc:ff dd:18:00:50 f2:02:01:01 00:00:03:a4 00:00:27:a4 00:00:42:43 5e:00:62:32 2f:00:00:00 0c:00:0d:00 1d:be:f7:67 01:00:00:00 aa:00:0b:00 00:0e:48:6f 6f:6b:20:27 65:6d:20:48 6f:72:6e:73 01:08:8c:12 98:24:b0:48 60:6c:03:01 24:05:04:00 02:00:00:30 14:01:00:00 0f:ac:04:01 00:00:0f:ac 04:01:00:00 0f:ac:02:0c 00:2d:1a:ef 11:1b:ff:ff 00:00:00:00 00:00:00:00 00:00:01:00 00:00:00:00 00:00:00:00 00:3d:16:24 05:00:00:00 00:00:00:00 00:00:00:00 00:00:00:00 00:00:00:00 00:7f:08:04 00:00:00:00 00:00:40:bf 0c:b2:00:80 33:fa:ff:00 00:fa:ff:00 00:c0:05:01 2a:00:fc:ff dd:18:00:50 f2:02:01:01 00:00:03:a4 00:00:27:a4 00:00:42:43 5e:00:62:32 2f:00:00:00 06:00:04:00 64:00:00:00 06:00:05:00 31:00:00:00 08:00:02:00 3c:14:00:00 08:00:0c:00 00:00:00:00 08:00:0a:00 cc:01:00:00 08:00:07:00 10:f5:ff:ff # parsed data should match primes below #: application/x-python-code ( { 'cmd': 34, 'version': 1, 'reserved': 0, 'attrs': [ ('NL80211_ATTR_GENERATION', 7), ('NL80211_ATTR_IFINDEX', 11), ('NL80211_ATTR_WDEV', 1), ( 'NL80211_ATTR_BSS', { 'attrs': [ ('NL80211_BSS_BSSID', '14:22:db:00:b6:65'), ( 'NL80211_BSS_TSF', { 'VALUE': 67820018829, 'TIME': datetime.timedelta( seconds=67820, microseconds=18829 ), }, ), ( 'NL80211_BSS_INFORMATION_ELEMENTS', { 'SSID': b"Rowan's Desk", 'SUPPORTED_RATES': '6.0* 9.0 12.0* 18.0 24.0* 36.0 48.0 54.0 ', 'CHANNEL': 36, 'TRAFFIC INDICATION MAP': 'DTIM Count 1 DTIM Period 2 Bitmap Control 0x0 Bitmap[0] 0x0', 'RSN': { 'version': 1, 'group_cipher': 'CCMP', 'pairwise_cipher': ['CCMP'], 'auth_suites': ['PSK'], 'capabilities': [ '16-PTKSA-RC', '1-GTKSA-RC', ], 'pmkid_ids': None, 'group_mgmt_cipher_suite': None, }, 'HT_OPERATION': { 'PRIMARY_CHANNEL': 36, 'SECONDARY_CHANNEL': 1, 'CHANNEL_WIDTH': '20 or 40 MHz', 'HT_PROTECTION': 'no', 'RIFS': 0, 'NON_GF_PRESENT': 0, 'OBSS_NON_GF_PRESENT': 0, 'DUAL_BEACON': 0, 'DUAL_CTS_PROTECTION': 0, 'STBC_BEACON': 0, 'L_SIG_TXOP_PROT': 0, 'PCO_ACTIVE': 0, 'PCO_PHASE': 0, }, 'VHT_OPERATION': { 'CENTER_FREQ_SEG_1': 42, 'CENTER_FREQ_SEG_2': 42, 'VHT_BASIC_MCS_SET': (255, 252), 'CHANNEL_WIDTH': '80 MHz', }, 'VENDOR': [ b"\x00P\xf2\x02\x01\x01\x00\x00\x03\xa4\x00\x00'\xa4\x00\x00BC^\x00b2/\x00" ], }, ), ('NL80211_BSS_BEACON_TSF', 67820018829), ( 'NL80211_BSS_BEACON_IES', { 'SSID': b"Rowan's Desk", 'SUPPORTED_RATES': '6.0* 9.0 12.0* 18.0 24.0* 36.0 48.0 54.0 ', 'CHANNEL': 36, 'TRAFFIC INDICATION MAP': 'DTIM Count 1 DTIM Period 2 Bitmap Control 0x0 Bitmap[0] 0x0', 'RSN': { 'version': 1, 'group_cipher': 'CCMP', 'pairwise_cipher': ['CCMP'], 'auth_suites': ['PSK'], 'capabilities': [ '16-PTKSA-RC', '1-GTKSA-RC', ], 'pmkid_ids': None, 'group_mgmt_cipher_suite': None, }, 'HT_OPERATION': { 'PRIMARY_CHANNEL': 36, 'SECONDARY_CHANNEL': 1, 'CHANNEL_WIDTH': '20 or 40 MHz', 'HT_PROTECTION': 'no', 'RIFS': 0, 'NON_GF_PRESENT': 0, 'OBSS_NON_GF_PRESENT': 0, 'DUAL_BEACON': 0, 'DUAL_CTS_PROTECTION': 0, 'STBC_BEACON': 0, 'L_SIG_TXOP_PROT': 0, 'PCO_ACTIVE': 0, 'PCO_PHASE': 0, }, 'VHT_OPERATION': { 'CENTER_FREQ_SEG_1': 42, 'CENTER_FREQ_SEG_2': 42, 'VHT_BASIC_MCS_SET': (255, 252), 'CHANNEL_WIDTH': '80 MHz', }, 'VENDOR': [ b"\x00P\xf2\x02\x01\x01\x00\x00\x03\xa4\x00\x00'\xa4\x00\x00BC^\x00b2/\x00" ], }, ), ('NL80211_BSS_BEACON_INTERVAL', 100), ( 'NL80211_BSS_CAPABILITY', { 'VALUE': 49, 'CAPABILITIES': 'ESS Privacy ShortPreamble', }, ), ('NL80211_BSS_FREQUENCY', 5180), ('NL80211_BSS_CHAN_WIDTH', 0), ('NL80211_BSS_SEEN_MS_AGO', 490), ( 'NL80211_BSS_SIGNAL_MBM', { 'VALUE': -4300, 'SIGNAL_STRENGTH': { 'VALUE': -43.0, 'UNITS': 'dBm', }, }, ), ] }, ), ], 'header': { 'length': 472, 'type': 28, 'flags': 2, 'sequence_number': 259, 'pid': 608, 'error': None, }, 'event': 'NL80211_CMD_NEW_SCAN_RESULTS', }, { 'cmd': 34, 'version': 1, 'reserved': 0, 'attrs': [ ('NL80211_ATTR_GENERATION', 7), ('NL80211_ATTR_IFINDEX', 11), ('NL80211_ATTR_WDEV', 1), ( 'NL80211_ATTR_BSS', { 'attrs': [ ('NL80211_BSS_BSSID', '14:22:db:00:06:a7'), ( 'NL80211_BSS_TSF', { 'VALUE': 68088371041, 'TIME': datetime.timedelta( seconds=68088, microseconds=371041 ), }, ), ( 'NL80211_BSS_INFORMATION_ELEMENTS', { 'SSID': b'', 'SUPPORTED_RATES': '6.0* 9.0 12.0* 18.0 24.0* 36.0 48.0 54.0 ', 'CHANNEL': 36, 'TRAFFIC INDICATION MAP': 'DTIM Count 1 DTIM Period 2 Bitmap Control 0x0 Bitmap[0] 0x0', 'HT_OPERATION': { 'PRIMARY_CHANNEL': 36, 'SECONDARY_CHANNEL': 1, 'CHANNEL_WIDTH': '20 or 40 MHz', 'HT_PROTECTION': 'no', 'RIFS': 0, 'NON_GF_PRESENT': 1, 'OBSS_NON_GF_PRESENT': 0, 'DUAL_BEACON': 0, 'DUAL_CTS_PROTECTION': 0, 'STBC_BEACON': 0, 'L_SIG_TXOP_PROT': 0, 'PCO_ACTIVE': 0, 'PCO_PHASE': 0, }, 'VHT_OPERATION': { 'CENTER_FREQ_SEG_1': 42, 'CENTER_FREQ_SEG_2': 42, 'VHT_BASIC_MCS_SET': (255, 252), 'CHANNEL_WIDTH': '80 MHz', }, 'VENDOR': [ b'\x00\x90L\x04\x08\xbf\x0c\xb2\x00\x803\xfa\xff\x00\x00\xfa\xff\x00\x00\xc0\x05\x01*\x00\xfc\xff', b"\x00P\xf2\x02\x01\x01\x00\x00\x03\xa4\x00\x00'\xa4\x00\x00BC^\x00b2/\x00", ], }, ), ('NL80211_BSS_BEACON_TSF', 68088371041), ( 'NL80211_BSS_BEACON_IES', { 'SSID': b'', 'SUPPORTED_RATES': '6.0* 9.0 12.0* 18.0 24.0* 36.0 48.0 54.0 ', 'CHANNEL': 36, 'TRAFFIC INDICATION MAP': 'DTIM Count 1 DTIM Period 2 Bitmap Control 0x0 Bitmap[0] 0x0', 'HT_OPERATION': { 'PRIMARY_CHANNEL': 36, 'SECONDARY_CHANNEL': 1, 'CHANNEL_WIDTH': '20 or 40 MHz', 'HT_PROTECTION': 'no', 'RIFS': 0, 'NON_GF_PRESENT': 1, 'OBSS_NON_GF_PRESENT': 0, 'DUAL_BEACON': 0, 'DUAL_CTS_PROTECTION': 0, 'STBC_BEACON': 0, 'L_SIG_TXOP_PROT': 0, 'PCO_ACTIVE': 0, 'PCO_PHASE': 0, }, 'VHT_OPERATION': { 'CENTER_FREQ_SEG_1': 42, 'CENTER_FREQ_SEG_2': 42, 'VHT_BASIC_MCS_SET': (255, 252), 'CHANNEL_WIDTH': '80 MHz', }, 'VENDOR': [ b'\x00\x90L\x04\x08\xbf\x0c\xb2\x00\x803\xfa\xff\x00\x00\xfa\xff\x00\x00\xc0\x05\x01*\x00\xfc\xff', b"\x00P\xf2\x02\x01\x01\x00\x00\x03\xa4\x00\x00'\xa4\x00\x00BC^\x00b2/\x00", ], }, ), ('NL80211_BSS_BEACON_INTERVAL', 100), ( 'NL80211_BSS_CAPABILITY', {'VALUE': 1, 'CAPABILITIES': 'ESS'}, ), ('NL80211_BSS_FREQUENCY', 5180), ('NL80211_BSS_CHAN_WIDTH', 0), ('NL80211_BSS_SEEN_MS_AGO', 480), ( 'NL80211_BSS_SIGNAL_MBM', { 'VALUE': -3300, 'SIGNAL_STRENGTH': { 'VALUE': -33.0, 'UNITS': 'dBm', }, }, ), ] }, ), ], 'header': { 'length': 464, 'type': 28, 'flags': 2, 'sequence_number': 259, 'pid': 608, 'error': None, }, 'event': 'NL80211_CMD_NEW_SCAN_RESULTS', }, { 'cmd': 34, 'version': 1, 'reserved': 0, 'attrs': [ ('NL80211_ATTR_GENERATION', 7), ('NL80211_ATTR_IFINDEX', 11), ('NL80211_ATTR_WDEV', 1), ( 'NL80211_ATTR_BSS', { 'attrs': [ ('NL80211_BSS_BSSID', '14:22:db:00:da:05'), ( 'NL80211_BSS_TSF', { 'VALUE': 7559983583, 'TIME': datetime.timedelta( seconds=7559, microseconds=983583 ), }, ), ( 'NL80211_BSS_INFORMATION_ELEMENTS', { 'SSID': b"Hook 'em Horns", 'SUPPORTED_RATES': '6.0* 9.0 12.0* 18.0 24.0* 36.0 48.0 54.0 ', 'CHANNEL': 36, 'TRAFFIC INDICATION MAP': 'DTIM Count 1 DTIM Period 2 Bitmap Control 0x0 Bitmap[0] 0x0', 'RSN': { 'version': 1, 'group_cipher': 'CCMP', 'pairwise_cipher': ['CCMP'], 'auth_suites': ['PSK'], 'capabilities': [ '16-PTKSA-RC', '1-GTKSA-RC', ], 'pmkid_ids': None, 'group_mgmt_cipher_suite': None, }, 'HT_OPERATION': { 'PRIMARY_CHANNEL': 36, 'SECONDARY_CHANNEL': 1, 'CHANNEL_WIDTH': '20 or 40 MHz', 'HT_PROTECTION': 'no', 'RIFS': 0, 'NON_GF_PRESENT': 1, 'OBSS_NON_GF_PRESENT': 0, 'DUAL_BEACON': 0, 'DUAL_CTS_PROTECTION': 0, 'STBC_BEACON': 0, 'L_SIG_TXOP_PROT': 0, 'PCO_ACTIVE': 0, 'PCO_PHASE': 0, }, 'VHT_OPERATION': { 'CENTER_FREQ_SEG_1': 42, 'CENTER_FREQ_SEG_2': 42, 'VHT_BASIC_MCS_SET': (255, 252), 'CHANNEL_WIDTH': '80 MHz', }, 'VENDOR': [ b"\x00P\xf2\x02\x01\x01\x00\x00\x03\xa4\x00\x00'\xa4\x00\x00BC^\x00b2/\x00" ], }, ), ('NL80211_BSS_BEACON_TSF', 7559983583), ( 'NL80211_BSS_BEACON_IES', { 'SSID': b"Hook 'em Horns", 'SUPPORTED_RATES': '6.0* 9.0 12.0* 18.0 24.0* 36.0 48.0 54.0 ', 'CHANNEL': 36, 'TRAFFIC INDICATION MAP': 'DTIM Count 1 DTIM Period 2 Bitmap Control 0x0 Bitmap[0] 0x0', 'RSN': { 'version': 1, 'group_cipher': 'CCMP', 'pairwise_cipher': ['CCMP'], 'auth_suites': ['PSK'], 'capabilities': [ '16-PTKSA-RC', '1-GTKSA-RC', ], 'pmkid_ids': None, 'group_mgmt_cipher_suite': None, }, 'HT_OPERATION': { 'PRIMARY_CHANNEL': 36, 'SECONDARY_CHANNEL': 1, 'CHANNEL_WIDTH': '20 or 40 MHz', 'HT_PROTECTION': 'no', 'RIFS': 0, 'NON_GF_PRESENT': 1, 'OBSS_NON_GF_PRESENT': 0, 'DUAL_BEACON': 0, 'DUAL_CTS_PROTECTION': 0, 'STBC_BEACON': 0, 'L_SIG_TXOP_PROT': 0, 'PCO_ACTIVE': 0, 'PCO_PHASE': 0, }, 'VHT_OPERATION': { 'CENTER_FREQ_SEG_1': 42, 'CENTER_FREQ_SEG_2': 42, 'VHT_BASIC_MCS_SET': (255, 252), 'CHANNEL_WIDTH': '80 MHz', }, 'VENDOR': [ b"\x00P\xf2\x02\x01\x01\x00\x00\x03\xa4\x00\x00'\xa4\x00\x00BC^\x00b2/\x00" ], }, ), ('NL80211_BSS_BEACON_INTERVAL', 100), ( 'NL80211_BSS_CAPABILITY', {'VALUE': 17, 'CAPABILITIES': 'ESS Privacy'}, ), ('NL80211_BSS_FREQUENCY', 5180), ('NL80211_BSS_CHAN_WIDTH', 0), ('NL80211_BSS_SEEN_MS_AGO', 470), ( 'NL80211_BSS_SIGNAL_MBM', { 'VALUE': -1900, 'SIGNAL_STRENGTH': { 'VALUE': -19.0, 'UNITS': 'dBm', }, }, ), ] }, ), ], 'header': { 'length': 480, 'type': 28, 'flags': 2, 'sequence_number': 259, 'pid': 608, 'error': None, }, 'event': 'NL80211_CMD_NEW_SCAN_RESULTS', }, { 'cmd': 34, 'version': 1, 'reserved': 0, 'attrs': [ ('NL80211_ATTR_GENERATION', 7), ('NL80211_ATTR_IFINDEX', 11), ('NL80211_ATTR_WDEV', 1), ( 'NL80211_ATTR_BSS', { 'attrs': [ ('NL80211_BSS_BSSID', '14:22:db:00:d7:25'), ( 'NL80211_BSS_TSF', { 'VALUE': 6039256605, 'TIME': datetime.timedelta( seconds=6039, microseconds=256605 ), }, ), ( 'NL80211_BSS_INFORMATION_ELEMENTS', { 'SSID': b"Hook 'em Horns", 'SUPPORTED_RATES': '6.0* 9.0 12.0* 18.0 24.0* 36.0 48.0 54.0 ', 'CHANNEL': 36, 'TRAFFIC INDICATION MAP': 'DTIM Count 0 DTIM Period 2 Bitmap Control 0x0 Bitmap[0] 0x0', 'RSN': { 'version': 1, 'group_cipher': 'CCMP', 'pairwise_cipher': ['CCMP'], 'auth_suites': ['PSK'], 'capabilities': [ '16-PTKSA-RC', '1-GTKSA-RC', ], 'pmkid_ids': None, 'group_mgmt_cipher_suite': None, }, 'HT_OPERATION': { 'PRIMARY_CHANNEL': 36, 'SECONDARY_CHANNEL': 1, 'CHANNEL_WIDTH': '20 or 40 MHz', 'HT_PROTECTION': 'no', 'RIFS': 0, 'NON_GF_PRESENT': 0, 'OBSS_NON_GF_PRESENT': 0, 'DUAL_BEACON': 0, 'DUAL_CTS_PROTECTION': 0, 'STBC_BEACON': 0, 'L_SIG_TXOP_PROT': 0, 'PCO_ACTIVE': 0, 'PCO_PHASE': 0, }, 'VHT_OPERATION': { 'CENTER_FREQ_SEG_1': 42, 'CENTER_FREQ_SEG_2': 42, 'VHT_BASIC_MCS_SET': (255, 252), 'CHANNEL_WIDTH': '80 MHz', }, 'VENDOR': [ b"\x00P\xf2\x02\x01\x01\x00\x00\x03\xa4\x00\x00'\xa4\x00\x00BC^\x00b2/\x00" ], }, ), ('NL80211_BSS_BEACON_TSF', 6039256605), ( 'NL80211_BSS_BEACON_IES', { 'SSID': b"Hook 'em Horns", 'SUPPORTED_RATES': '6.0* 9.0 12.0* 18.0 24.0* 36.0 48.0 54.0 ', 'CHANNEL': 36, 'TRAFFIC INDICATION MAP': 'DTIM Count 0 DTIM Period 2 Bitmap Control 0x0 Bitmap[0] 0x0', 'RSN': { 'version': 1, 'group_cipher': 'CCMP', 'pairwise_cipher': ['CCMP'], 'auth_suites': ['PSK'], 'capabilities': [ '16-PTKSA-RC', '1-GTKSA-RC', ], 'pmkid_ids': None, 'group_mgmt_cipher_suite': None, }, 'HT_OPERATION': { 'PRIMARY_CHANNEL': 36, 'SECONDARY_CHANNEL': 1, 'CHANNEL_WIDTH': '20 or 40 MHz', 'HT_PROTECTION': 'no', 'RIFS': 0, 'NON_GF_PRESENT': 0, 'OBSS_NON_GF_PRESENT': 0, 'DUAL_BEACON': 0, 'DUAL_CTS_PROTECTION': 0, 'STBC_BEACON': 0, 'L_SIG_TXOP_PROT': 0, 'PCO_ACTIVE': 0, 'PCO_PHASE': 0, }, 'VHT_OPERATION': { 'CENTER_FREQ_SEG_1': 42, 'CENTER_FREQ_SEG_2': 42, 'VHT_BASIC_MCS_SET': (255, 252), 'CHANNEL_WIDTH': '80 MHz', }, 'VENDOR': [ b"\x00P\xf2\x02\x01\x01\x00\x00\x03\xa4\x00\x00'\xa4\x00\x00BC^\x00b2/\x00" ], }, ), ('NL80211_BSS_BEACON_INTERVAL', 100), ( 'NL80211_BSS_CAPABILITY', { 'VALUE': 49, 'CAPABILITIES': 'ESS Privacy ShortPreamble', }, ), ('NL80211_BSS_FREQUENCY', 5180), ('NL80211_BSS_CHAN_WIDTH', 0), ('NL80211_BSS_SEEN_MS_AGO', 460), ( 'NL80211_BSS_SIGNAL_MBM', { 'VALUE': -2800, 'SIGNAL_STRENGTH': { 'VALUE': -28.0, 'UNITS': 'dBm', }, }, ), ] }, ), ], 'header': { 'length': 480, 'type': 28, 'flags': 2, 'sequence_number': 259, 'pid': 608, 'error': None, }, 'event': 'NL80211_CMD_NEW_SCAN_RESULTS', }, ) pyroute2-0.7.11/tests/test_unit/test_nlmsg/test_attr.py000066400000000000000000000013221455030217500233130ustar00rootroot00000000000000import pytest from pyroute2.netlink import nlmsg prime = { 'attrs': ( ('A', 2), ('A', 3), ('A', 4), ('B', {'attrs': (('C', 5), ('D', {'attrs': (('E', 6), ('F', 7))}))}), ) } @pytest.fixture def msg(): msg = nlmsg() msg.setvalue(prime) yield msg def test_get_attr(msg): assert msg.get_attr('A') == 2 assert msg.get_attr('C') is None def test_get_attrs(msg): assert msg.get_attrs('A') == [2, 3, 4] assert msg.get_attrs('C') == [] def test_get_nested(msg): assert msg.get_nested('B', 'D', 'E') == 6 assert msg.get_nested('B', 'D', 'F') == 7 assert msg.get_nested('B', 'D', 'G') is None assert msg.get_nested('C', 'D', 'E') is None pyroute2-0.7.11/tests/test_unit/test_nlmsg/test_map_adapter.py000066400000000000000000000034331455030217500246230ustar00rootroot00000000000000import pytest from pyroute2.netlink import NlaMapAdapter, NlaSpec, nlmsg_atoms, rtnl from pyroute2.netlink.rtnl.ifaddrmsg import ifaddrmsg from pyroute2.netlink.rtnl.marshal import MarshalRtnl sample_data_ipaddr = ( b'L\x00\x00\x00\x14\x00\x02\x00\xff\x00\x00\x00\xfb\x8d\x00\x00\x02\x08' b'\x80\xfe\x01\x00\x00\x00\x08\x00\x01\x00\x7f\x00\x00\x01\x08\x00\x02\x00' b'\x7f\x00\x00\x01\x07\x00\x03\x00lo\x00\x00\x08\x00\x08\x00\x80\x00\x00' b'\x00\x14\x00\x06\x00\xff\xff\xff\xff\xff\xff\xff\xffQ\x00\x00\x00Q\x00' b'\x00\x00' ) class ifaddrmsg_default_decode(ifaddrmsg): # same function will be used both for decode and encode nla_map = NlaMapAdapter( lambda x: NlaSpec(nlmsg_atoms.hex, x, f'IFA_NLA_{x}') ) class ifaddrmsg_dict_decode(ifaddrmsg): # define separate decode / encode adapters nla_map = { 'decode': NlaMapAdapter( lambda x: NlaSpec(nlmsg_atoms.hex, x, f'IFA_NLA_{x}') ), 'encode': None, } @pytest.mark.parametrize( 'nlmsg_class,data', ( (ifaddrmsg_default_decode, sample_data_ipaddr), (ifaddrmsg_dict_decode, sample_data_ipaddr), ), ids=['default_decode', 'dict_decode'], ) def test_decode_adapter(nlmsg_class, data): marshal = MarshalRtnl() marshal.msg_map[rtnl.RTM_NEWADDR] = nlmsg_class msgs = tuple(marshal.parse(data)) msg = msgs[0] assert len(msgs) == 1 assert msg.get_attr('IFA_NLA_1') == '7f:00:00:01' # IFA_ADDRESS assert msg.get_attr('IFA_NLA_2') == '7f:00:00:01' # IFA_LOCAL assert msg.get_attr('IFA_NLA_3') == '6c:6f:00' # IFA_LABEL assert ( msg.get_attr('IFA_NLA_6') # IFA_CACHEINFO == 'ff:ff:ff:ff:ff:ff:ff:ff:51:00:00:00:51:00:00:00' ) assert msg.get_attr('IFA_NLA_8') == '80:00:00:00' # IFA_FLAGS pyroute2-0.7.11/tests/test_unit/test_nlmsg/test_marshal.py000066400000000000000000000100721455030217500237720ustar00rootroot00000000000000# required by iw_scan_rsp.dump import datetime # noqa: F401 import json import struct import pytest from pyroute2.common import load_dump from pyroute2.netlink import NLMSG_ERROR from pyroute2.netlink.nl80211 import MarshalNl80211 from pyroute2.netlink.rtnl import RTM_NEWADDR, RTM_NEWLINK from pyroute2.netlink.rtnl.iprsocket import MarshalRtnl def load_sample(sample): with open(sample, 'r') as buf: meta = {} data = load_dump(buf, meta) if 'application/json' in meta: messages = json.loads(meta['application/json']) elif 'application/x-python-code' in meta: messages = eval(meta['application/x-python-code']) else: raise KeyError('sample messages not found') return messages, data def run_using_marshal(sample, marshal): messages, data = load_sample(sample) parsed = tuple(marshal.parse(data)) assert len(parsed) == len(messages) for parsed_msg, sample_value in zip(parsed, messages): sample_msg = type(parsed_msg)() sample_msg.setvalue(sample_value) assert sample_msg == parsed_msg @pytest.mark.parametrize( 'sample,marshal', ( ('test_unit/test_nlmsg/addrmsg_ipv4.dump', MarshalRtnl()), ('test_unit/test_nlmsg/gre_01.dump', MarshalRtnl()), ('test_unit/test_nlmsg/iw_info_rsp.dump', MarshalNl80211()), ('test_unit/test_nlmsg/iw_scan_rsp.dump', MarshalNl80211()), ), ) def test_marshal(sample, marshal): return run_using_marshal(sample, marshal) @pytest.mark.parametrize( 'sample,marshal', ( ('test_unit/test_nlmsg/addrmsg_ipv4.dump', MarshalRtnl()), ('test_unit/test_nlmsg/gre_01.dump', MarshalRtnl()), ), ) def test_custom_key(sample, marshal): # the header: # # uint32 length # uint16 type # ... # # e.g.: # 4c:00:00:00:14:00:... # # this test uses: marshal.key_format = 'I' # 4 bytes LE as the key marshal.key_offset = 2 # but with offset 2 marshal.key_mask = 0xFFFF0000 # ignore 2 lower bytes: # # example 1: # offset 2 -> 00:00:14:00 # format I -> 0x140000 # & mask -> 0x140000 # # example 2: # offset 2 -> 01:02:14:00 # format I -> 0x140201 # & mask -> 0x140000 # # fix msg map to use new keys: for key, value in tuple(marshal.msg_map.items()): marshal.msg_map[key << 16] = value # # ok, now should run return run_using_marshal(sample, marshal) @pytest.mark.parametrize( 'sample,marshal', ( ('test_unit/test_nlmsg/addrmsg_ipv4.dump', MarshalRtnl()), ('test_unit/test_nlmsg/gre_01.dump', MarshalRtnl()), ), ) def test_custom_key_fail(sample, marshal): # same as above, but don't fix the map -> must fail marshal.key_format = 'I' marshal.key_offset = 2 marshal.key_mask = 0xFFFF0000 with pytest.raises(AssertionError): return run_using_marshal(sample, marshal) def custom_parser(data, offset, length): return dict( header=dict( zip( ('type', 'flags', 'sequence_number'), struct.unpack_from('HHI', data, offset + 4), ), error=None, ) ) @pytest.mark.parametrize( 'sample,parser,parser_id,msg_seq,msg_type', ( ( 'test_unit/test_nlmsg/addrmsg_ipv4.dump', lambda a, b, c: dict(custom_parser(a, b, c), parser='addr'), 'addr', 258, (RTM_NEWADDR,), ), ( 'test_unit/test_nlmsg/gre_01.dump', lambda a, b, c: dict(custom_parser(a, b, c), parser='link'), 'link', 1426284873, (RTM_NEWLINK, NLMSG_ERROR), ), ), ids=('custom_addr_parser', 'custom_link_parser'), ) def test_custom_parser(sample, parser, parser_id, msg_seq, msg_type): marshal = MarshalRtnl() marshal.msg_map = {} marshal.seq_map = {msg_seq: parser} messages, data = load_sample(sample) for msg in marshal.parse(data): assert msg['parser'] == parser_id assert msg['header']['type'] in msg_type pyroute2-0.7.11/tests/test_unit/test_requests/000077500000000000000000000000001455030217500214655ustar00rootroot00000000000000pyroute2-0.7.11/tests/test_unit/test_requests/common.py000066400000000000000000000005671455030217500233370ustar00rootroot00000000000000from pyroute2.requests.main import RequestProcessor class Request(dict): pass class Result(dict): pass def run_test(config, spec, result): processor = RequestProcessor(context=spec, prime=spec) for fspec in config['filters']: processor.apply_filter(fspec['class'](*fspec['argv'])) processor.finalize() assert Result(processor) == result pyroute2-0.7.11/tests/test_unit/test_requests/test_address.py000066400000000000000000000144371455030217500245340ustar00rootroot00000000000000from socket import AF_INET, AF_INET6 import pytest from common import Request, Result, run_test from pyroute2.requests.address import AddressFieldFilter, AddressIPRouteFilter config = { 'filters': ( {'class': AddressFieldFilter, 'argv': []}, {'class': AddressIPRouteFilter, 'argv': ['add']}, ) } ## # # broadcast tests: bool # @pytest.mark.parametrize( 'spec,result', ( ( Request( { 'index': 1, 'address': '10.0.0.1', 'prefixlen': 24, 'broadcast': True, } ), Result( { 'index': 1, 'address': '10.0.0.1', 'local': '10.0.0.1', 'prefixlen': 24, 'broadcast': '10.0.0.255', 'family': AF_INET, } ), ), ( Request( { 'index': 1, 'address': '10.0.0.1', 'prefixlen': 24, 'broadcast': False, } ), Result( { 'index': 1, 'address': '10.0.0.1', 'local': '10.0.0.1', 'prefixlen': 24, 'family': AF_INET, } ), ), ( Request( { 'index': 1, 'address': '10.0.0.1', 'prefixlen': 28, 'broadcast': '10.0.0.15', } ), Result( { 'index': 1, 'address': '10.0.0.1', 'local': '10.0.0.1', 'prefixlen': 28, 'broadcast': '10.0.0.15', 'family': AF_INET, } ), ), ), ids=('bool-true', 'bool-false', 'ipv4'), ) def test_add_broadcast(spec, result): return run_test(config, spec, result) ## # # index format tests: int, list, tuple # result = Result( { 'index': 1, 'address': '10.0.0.1', 'local': '10.0.0.1', 'prefixlen': 24, 'family': AF_INET, } ) @pytest.mark.parametrize( 'spec,result', ( ( Request({'index': 1, 'address': '10.0.0.1', 'prefixlen': 24}), result, ), ( Request({'index': [1], 'address': '10.0.0.1', 'prefixlen': 24}), result, ), ( Request({'index': (1,), 'address': '10.0.0.1', 'prefixlen': 24}), result, ), ), ids=('int', 'list', 'tuple'), ) def test_index(spec, result): return run_test(config, spec, result) @pytest.mark.parametrize( 'spec,result', ( ( Request({'index': 1, 'address': '10.0.0.1'}), Result( { 'index': 1, 'address': '10.0.0.1', 'local': '10.0.0.1', 'family': AF_INET, 'prefixlen': 32, } ), ), ( Request({'index': 1, 'address': '10.0.0.1', 'prefixlen': 16}), Result( { 'index': 1, 'address': '10.0.0.1', 'local': '10.0.0.1', 'family': AF_INET, 'prefixlen': 16, } ), ), ( Request({'index': 1, 'address': '10.0.0.1/24'}), Result( { 'index': 1, 'address': '10.0.0.1', 'local': '10.0.0.1', 'family': AF_INET, 'prefixlen': 24, } ), ), ( Request( {'index': 1, 'address': '10.0.0.1', 'prefixlen': '255.0.0.0'} ), Result( { 'index': 1, 'address': '10.0.0.1', 'local': '10.0.0.1', 'family': AF_INET, 'prefixlen': 8, } ), ), ( Request({'index': 1, 'address': '10.0.0.1/255.255.255.240'}), Result( { 'index': 1, 'address': '10.0.0.1', 'local': '10.0.0.1', 'family': AF_INET, 'prefixlen': 28, } ), ), ( Request({'index': 1, 'address': 'fc00::1'}), Result( { 'index': 1, 'address': 'fc00::1', 'family': AF_INET6, 'prefixlen': 128, } ), ), ( Request({'index': 1, 'address': 'fc00::1', 'prefixlen': 64}), Result( { 'index': 1, 'address': 'fc00::1', 'family': AF_INET6, 'prefixlen': 64, } ), ), ( Request({'index': 1, 'address': 'fc00::1/48'}), Result( { 'index': 1, 'address': 'fc00::1', 'family': AF_INET6, 'prefixlen': 48, } ), ), ( Request( { 'index': 1, 'address': 'fc00:0000:0000:0000:0000:0000:0000:0001/32', } ), Result( { 'index': 1, 'address': 'fc00::1', 'family': AF_INET6, 'prefixlen': 32, } ), ), ), ids=( 'ipv4-default', 'ipv4-prefixlen', 'ipv4-split', 'ipv4-prefixlen-dqn', 'ipv4-split-dqn', 'ipv6-default', 'ipv6-prefixlen', 'ipv6-split', 'ipv6-compressed', ), ) def test_family_and_prefix(spec, result): return run_test(config, spec, result) pyroute2-0.7.11/tests/test_unit/test_requests/test_link.py000066400000000000000000000047231455030217500240410ustar00rootroot00000000000000import pytest from common import Request, Result, run_test from pyroute2.requests.link import LinkFieldFilter, LinkIPRouteFilter config_add = { 'filters': ( {'class': LinkFieldFilter, 'argv': []}, {'class': LinkIPRouteFilter, 'argv': ['add']}, ) } config_dump = { 'filters': ( {'class': LinkFieldFilter, 'argv': []}, {'class': LinkIPRouteFilter, 'argv': ['dump']}, ) } result_add = Result( { 'index': 1, 'change': 0, 'flags': 0, 'IFLA_LINKINFO': {'attrs': [['IFLA_INFO_KIND', 'dummy']]}, } ) result_dump = Result({'index': 1, ('linkinfo', 'kind'): 'dummy'}) @pytest.mark.parametrize( 'config,spec,result', ( (config_add, Request({'index': 1, 'kind': 'dummy'}), result_add), (config_add, Request({'index': [1], 'kind': 'dummy'}), result_add), (config_add, Request({'index': (1,), 'kind': 'dummy'}), result_add), (config_dump, Request({'index': 1, 'kind': 'dummy'}), result_dump), (config_dump, Request({'index': [1], 'kind': 'dummy'}), result_dump), (config_dump, Request({'index': (1,), 'kind': 'dummy'}), result_dump), ), ids=[ 'int-add', 'list-add', 'tuple-add', 'int-dump', 'list-dump', 'tuple-dump', ], ) def test_index(config, spec, result): return run_test(config, spec, result) @pytest.mark.parametrize( 'spec,result', ( ( Request({'kind': 'bridge', 'br_stp_state': 1}), Result( { ('linkinfo', 'kind'): 'bridge', ('linkinfo', 'data', 'br_stp_state'): 1, } ), ), ( Request({'kind': 'bond', 'bond_primary': 1}), Result( { ('linkinfo', 'kind'): 'bond', ('linkinfo', 'data', 'bond_primary'): 1, } ), ), ( Request({'kind': 'vxlan', 'vxlan_id': 1}), Result( { ('linkinfo', 'kind'): 'vxlan', ('linkinfo', 'data', 'vxlan_id'): 1, } ), ), ( Request({'kind': 'fake', 'fake_attr': 1}), Result({('linkinfo', 'kind'): 'fake', 'fake_attr': 1}), ), ), ids=['bridge', 'bond', 'vxlan', 'fake'], ) def test_dump_specific(spec, result): return run_test(config_dump, spec, result) pyroute2-0.7.11/tests/test_unit/test_requests/test_neighbour.py000066400000000000000000000056201455030217500250630ustar00rootroot00000000000000from socket import AF_INET, AF_INET6 import pytest from common import Request, Result, run_test from pyroute2.netlink.rtnl.ndmsg import NUD_FAILED, NUD_PERMANENT from pyroute2.requests.neighbour import ( NeighbourFieldFilter, NeighbourIPRouteFilter, ) config = { 'filters': ( {'class': NeighbourFieldFilter, 'argv': []}, {'class': NeighbourIPRouteFilter, 'argv': ['add']}, ) } result = Result({'ifindex': 1, 'family': AF_INET, 'state': NUD_PERMANENT}) @pytest.mark.parametrize( 'spec,result', ( (Request({'index': 1}), result), (Request({'index': [1]}), result), (Request({'index': (1,)}), result), (Request({'ifindex': 1}), result), (Request({'ifindex': [1]}), result), (Request({'ifindex': (1,)}), result), ), ids=['ix-int', 'ix-list', 'ix-tuple', 'ifx-int', 'ifx-list', 'ifx-tuple'], ) def test_index(spec, result): return run_test(config, spec, result) @pytest.mark.parametrize( 'spec,result', ( ( Request({'ifindex': 1, 'dst': '10.0.0.1'}), Result( { 'ifindex': 1, 'dst': '10.0.0.1', 'family': AF_INET, 'state': NUD_PERMANENT, } ), ), ( Request({'ifindex': 1, 'dst': 'fc00::1'}), Result( { 'ifindex': 1, 'dst': 'fc00::1', 'family': AF_INET6, 'state': NUD_PERMANENT, } ), ), ), ids=['ipv4', 'ipv6'], ) def test_family(spec, result): return run_test(config, spec, result) @pytest.mark.parametrize( 'spec,result', ( ( Request({'ifindex': 1, 'state': 'permanent'}), Result({'ifindex': 1, 'state': NUD_PERMANENT, 'family': AF_INET}), ), ( Request({'ifindex': 1, 'state': 'failed'}), Result({'ifindex': 1, 'state': NUD_FAILED, 'family': AF_INET}), ), ( Request({'ifindex': 1, 'nud': 'permanent'}), Result({'ifindex': 1, 'state': NUD_PERMANENT, 'family': AF_INET}), ), ( Request({'ifindex': 1, 'nud': 'failed'}), Result({'ifindex': 1, 'state': NUD_FAILED, 'family': AF_INET}), ), ( Request({'ifindex': 1, 'nud': NUD_PERMANENT}), Result({'ifindex': 1, 'state': NUD_PERMANENT, 'family': AF_INET}), ), ( Request({'ifindex': 1, 'nud': NUD_FAILED}), Result({'ifindex': 1, 'state': NUD_FAILED, 'family': AF_INET}), ), ), ids=[ 'str-permanent', 'str-failed', 'nud-permanent', 'nud-failed', 'const-permanent', 'const-failed', ], ) def test_state(spec, result): return run_test(config, spec, result) pyroute2-0.7.11/tests/test_unit/test_requests/test_route.py000066400000000000000000000075421455030217500242440ustar00rootroot00000000000000from socket import AF_INET, AF_INET6 import pytest from common import Request, Result, run_test from pyroute2.requests.route import RouteFieldFilter, RouteIPRouteFilter config = { 'filters': ( {'class': RouteFieldFilter, 'argv': []}, {'class': RouteIPRouteFilter, 'argv': ['add']}, ) } result = Result({'oif': 1, 'iif': 2}) @pytest.mark.parametrize( 'spec,result', ( (Request({'oif': 1, 'iif': 2}), result), (Request({'oif': [1], 'iif': [2]}), result), (Request({'oif': (1,), 'iif': (2,)}), result), ), ids=['int', 'list', 'tuple'], ) def test_index(spec, result): return run_test(config, spec, result) result_dst_ipv4 = Result( { 'dst': '10.0.0.0', 'dst_len': 24, 'family': AF_INET, 'gateway': '10.1.0.1', } ) result_dst_ipv6 = Result( { 'dst': 'fc01:1100::', 'dst_len': 48, 'family': AF_INET6, 'gateway': 'fc00::1', } ) @pytest.mark.parametrize( 'spec,result', ( ( Request({'dst': 'default', 'gateway': '10.0.0.1'}), Result({'gateway': '10.0.0.1', 'family': AF_INET}), ), ( Request({'dst': 'default', 'gateway': 'fc00::1'}), Result({'gateway': 'fc00::1', 'family': AF_INET6}), ), ( Request({'dst': '10.0.0.0/24', 'gateway': '10.1.0.1'}), result_dst_ipv4, ), ( Request({'dst': '10.0.0.0/255.255.255.0', 'gateway': '10.1.0.1'}), result_dst_ipv4, ), ( Request({'dst': '10.0.0.0', 'dst_len': 24, 'gateway': '10.1.0.1'}), result_dst_ipv4, ), ( Request({'dst': 'fc01:1100::/48', 'gateway': 'fc00::1'}), result_dst_ipv6, ), ( Request( {'dst': 'fc01:1100::', 'dst_len': 48, 'gateway': 'fc00::1'} ), result_dst_ipv6, ), ), ids=[ 'default-ipv4', 'default-ipv6', 'split-ipv4-int', 'split-ipv4-dqn', 'explicit-ipv4-int', 'split-ipv6-int', 'explicit-ipv6-int', ], ) def test_dst(spec, result): return run_test(config, spec, result) @pytest.mark.parametrize( 'spec,result', ( ( Request({'dst': 'fd12::3', 'dst_len': 128, 'via': ''}), Result({'dst': 'fd12::3', 'dst_len': 128, 'family': AF_INET6}), ), ( Request({'dst': 'fd12::3/128', 'via': ''}), Result({'dst': 'fd12::3', 'dst_len': 128, 'family': AF_INET6}), ), ( Request({'dst': 'fd12::3', 'dst_len': 128, 'newdst': ''}), Result({'dst': 'fd12::3', 'dst_len': 128, 'family': AF_INET6}), ), ( Request({'dst': 'fd12::3/128', 'newdst': ''}), Result({'dst': 'fd12::3', 'dst_len': 128, 'family': AF_INET6}), ), ( Request({'dst': '10.0.0.0', 'dst_len': 24, 'via': ''}), Result({'dst': '10.0.0.0', 'dst_len': 24, 'family': AF_INET}), ), ( Request({'dst': '10.0.0.0/24', 'via': ''}), Result({'dst': '10.0.0.0', 'dst_len': 24, 'family': AF_INET}), ), ( Request({'dst': '10.0.0.0', 'dst_len': 24, 'newdst': ''}), Result({'dst': '10.0.0.0', 'dst_len': 24, 'family': AF_INET}), ), ( Request({'dst': '10.0.0.0/24', 'newdst': ''}), Result({'dst': '10.0.0.0', 'dst_len': 24, 'family': AF_INET}), ), ), ids=[ 'explicit-ipv6-via', 'split-ipv6-via', 'explicit-ipv6-newdst', 'split-ipv6-newdst', 'explicit-ipv4-via', 'split-ipv4-via', 'explicit-ipv4-newdst', 'split-ipv4-newdst', ], ) def test_empty_target(spec, result): return run_test(config, spec, result) pyroute2-0.7.11/tests/test_windows/000077500000000000000000000000001455030217500172665ustar00rootroot00000000000000pyroute2-0.7.11/tests/test_windows/test_ipr.py000066400000000000000000000006011455030217500214660ustar00rootroot00000000000000import pytest from pyroute2 import IPRoute @pytest.fixture def ipr(): with IPRoute() as iproute: yield iproute @pytest.mark.parametrize('variant', ('links', 'addr', 'neighbours', 'routes')) def test_list(ipr, variant): for msg in getattr(ipr, f'get_{variant}')(): assert msg['header']['target'] == 'localhost' assert msg['header']['type'] % 2 == 0 pyroute2-0.7.11/tests/utils.py000066400000000000000000000147341455030217500162600ustar00rootroot00000000000000import errno import os import platform import pwd import re import stat import subprocess import sys import uuid from socket import AF_INET, AF_INET6 import netaddr import pytest from pyroute2 import config from pyroute2.iproute.linux import IPRoute try: import httplib except ImportError: import http.client as httplib dtcd_uuid = str(uuid.uuid4()) # check the dtcd try: cx = httplib.HTTPConnection('localhost:7623') cx.request('GET', '/v1/network/') cx.getresponse() has_dtcd = True except: has_dtcd = False supernet = { AF_INET: netaddr.IPNetwork('172.16.0.0/12'), AF_INET6: netaddr.IPNetwork('fdb3:84e5:4ff4::/48'), } network_pool = { AF_INET: list(supernet[AF_INET].subnet(24)), AF_INET6: list(supernet[AF_INET6].subnet(64)), } allocations = {} family_url = {AF_INET: 'ipv4', AF_INET6: 'ipv6'} def allocate_network(family=AF_INET): global dtcd_uuid global network_pool global allocations network = None try: cx = httplib.HTTPConnection('localhost:7623') cx.request( 'POST', '/v1/network/%s/' % family_url[family], body=dtcd_uuid ) resp = cx.getresponse() if resp.status == 200: network = netaddr.IPNetwork(resp.read().decode('utf-8')) cx.close() except Exception: pass if network is None: network = network_pool[family].pop() allocations[network] = True return network def free_network(network, family=AF_INET): global network_pool global allocations if network in allocations: allocations.pop(network) network_pool[family].append(network) else: cx = httplib.HTTPConnection('localhost:7623') cx.request( 'DELETE', '/v1/network/%s/' % family_url[family], body=str(network) ) cx.getresponse() cx.close() def conflict_arch(arch): if platform.machine().find(arch) >= 0: pytest.skip('conflict with architecture %s' % (arch)) def kernel_version_ge(major, minor): # True if running kernel is >= X.Y if config.kernel[0] > major: return True if config.kernel[0] < major: return False if minor and config.kernel[1] < minor: return False return True def require_kernel(major, minor=None): if not kernel_version_ge(major, minor): pytest.skip('incompatible kernel version') def require_python(target): if sys.version_info[0] != target: pytest.skip('test requires Python %i' % target) def require_8021q(): try: os.stat('/proc/net/vlan/config') except OSError as e: # errno 2 'No such file or directory' if e.errno == 2: pytest.skip('missing 8021q support, or module is not loaded') raise def require_bridge(): with IPRoute() as ip: try: ip.link('add', ifname='test_req', kind='bridge') except Exception: pytest.skip('can not create ') idx = ip.link_lookup(ifname='test_req') if not idx: pytest.skip('can not create ') ip.link('del', index=idx) def require_bond(): with IPRoute() as ip: try: ip.link('add', ifname='test_req', kind='bond') except Exception: pytest.skip('can not create ') idx = ip.link_lookup(ifname='test_req') if not idx: pytest.skip('can not create ') ip.link('del', index=idx) def require_user(user): if bool(os.environ.get('PYROUTE2_TESTS_RO', False)): pytest.skip('read-only tests requested') if pwd.getpwuid(os.getuid()).pw_name != user: pytest.skip('required user %s' % (user)) def require_executable(name): try: with open(os.devnull, 'w') as fnull: subprocess.check_call(['which', name], stdout=fnull, stderr=fnull) except Exception: pytest.skip('required %s not found' % (name)) def remove_link(name): if os.getuid() != 0: return with open(os.devnull, 'w') as fnull: subprocess.call( ['ip', 'link', 'del', 'dev', name], stdout=fnull, stderr=fnull ) while True: links = get_ip_link() if name not in links: break def create_link(name, kind): if os.getuid() != 0: return subprocess.call(['ip', 'link', 'add', 'dev', name, 'type', kind]) for i in range(20): links = get_ip_link() if name in links: return raise Exception("interface not created") def _check_output(*argv): # we can not use check_output, as it does not exist in 2.6 process = subprocess.Popen(argv, stdout=subprocess.PIPE) ret = process.communicate() return ret[0].decode('utf-8').split('\n') def grep(command, pattern=None): out = _check_output(*command.split()) ret = [] reg = re.compile(pattern) for string in out: if reg.search(string): ret.append(string) return ret def get_ip_addr(interface=None): argv = ['ip', '-o', 'ad'] if interface: argv.extend(['li', 'dev', interface]) out = _check_output(*argv) ret = [] for string in out: fields = string.split() if len(fields) >= 5 and fields[2][:4] == 'inet': ret.append(fields[3]) return ret def get_ip_brd(interface=None): argv = ['ip', '-o', 'ad'] if interface: argv.extend(['li', 'dev', interface]) out = _check_output(*argv) ret = [] for string in out: fields = string.split() if len(fields) >= 5 and fields[4] == 'brd': ret.append(fields[5]) return ret def get_ip_link(): ret = [] out = _check_output('ip', '-o', 'li') for string in out: fields = string.split() if len(fields) >= 2: ret.append(fields[1][:-1].split('@')[0]) return ret def get_ip_default_routes(): ret = [] out = _check_output('ip', '-4', 'ro') for string in out: if 'default' in string: ret.append(string) return ret def get_ip_rules(proto='-4'): ret = [] out = _check_output('ip', proto, 'rule', 'show') for string in out: if len(string): ret.append(string) return ret def count_socket_fds(): pid_fd = '/proc/%s/fd' % os.getpid() sockets = 0 for fd in os.listdir(pid_fd): try: if stat.S_ISSOCK(os.stat(os.path.join(pid_fd, fd)).st_mode): sockets += 1 except OSError as e: if e.errno != errno.ENOENT: raise return sockets pyroute2-0.7.11/util/000077500000000000000000000000001455030217500143505ustar00rootroot00000000000000pyroute2-0.7.11/util/aafigure_mapper.py000066400000000000000000000011741455030217500200540ustar00rootroot00000000000000import contextlib import io import sys ret = io.StringIO() with contextlib.ExitStack() as ctx: map_file = ctx.enter_context(open(sys.argv[1], 'r')) img_file = ctx.enter_context(open(sys.argv[2], 'r')) mapping = { key.strip(): value.strip() for (key, value) in [x.split('|') for x in map_file.readlines()] } for line in img_file.readlines(): if 'a href' not in line: for key, value in mapping.items(): line = line.replace(key, f' {key}') ret.write(line) with open(sys.argv[2], 'w') as img_file: img_file.write(ret.getvalue()) pyroute2-0.7.11/util/aafigure_mapper.sh000077500000000000000000000001751455030217500200410ustar00rootroot00000000000000#!/usr/bin/env bash find docs \ -name 'aafig-*svg' \ -exec python util/aafigure_mapper.py docs/aafigure.map '{}' \; pyroute2-0.7.11/util/find_python.sh000077500000000000000000000033021455030217500172260ustar00rootroot00000000000000#!/usr/bin/env bash # # Utility to find Python # function list_pythons() { # # List all python binaries/shims/links in a directory $1 # ls -1 $1/python* 2>/dev/null | grep -E 'python[0-9.]+$' } function check_valid_python() { # # Return "$VERSION $1" for $1 if it returns a valid version string # and has the required modules: ensurepip # # Note on versions: X.Y.Z... => XY, e.g.: # 3.6.10 -> 36 # 3.10.1b1 -> 310 # # This is required to sort versions correctly. The last version # byte is ignored. # for MODULE in ensurepip sqlite3; do $1 -c "import $MODULE" >/dev/null 2>&1 || return done VERSION=$( $1 -V 2>/dev/null |\ grep -E '^Python [0-9a-z.]+$' |\ sed 's/Python \([3-9]\.[0-9]\+\).*$/\1/;s/\.//' ) if [ ! -z "$VERSION" ]; then echo $VERSION $1 fi } function list_valid_pythons() { # # Filter only valid Pythons in a directory $1, ignoring pyenv shims # not pointing to an installed Python binary. # for PYTHON in $( list_pythons $1 ); do PYTHON=$( check_valid_python $PYTHON ) if [ ! -z "$PYTHON" ]; then echo $PYTHON fi done } function iterate_path() { # # Iterate dirs in the $PATH variable, sorting Python versions # within each directory. # for DIR in $( echo $PATH | sed 's/:/ /g' ); do list_valid_pythons $DIR | sort -r -n done } # # Take the first available Python with the highest version, respecting # the $PATH variable. # # If operating in a venv, it will return the venv Python, despite the # higher version may be available in the system directories. # iterate_path | head -1 | cut -d \ -f 2 pyroute2-0.7.11/util/imports_dict.awk000066400000000000000000000004151455030217500175540ustar00rootroot00000000000000/^[[:space:]]+pr2modules/ { next } /^[[:space:]]+[[:alpha:]]/ { deps[$1][key]++ } /^[[:alpha:]]+/ { key = gensub(":", "", "g", $1) } END { for (i in deps) { print(i); for (k in deps[i]) { print("\t"k); }; }; } pyroute2-0.7.11/util/make_lab_templates.py000077500000000000000000000023071455030217500205400ustar00rootroot00000000000000#!/usr/bin/env python import pathlib import sys from docutils.core import publish_parts from jinja2 import Environment, FileSystemLoader env = Environment(loader=FileSystemLoader('lab/_templates/')) # js template template = env.get_template('conf.js') with open('lab/_static/conf.js', 'w') as f: f.write(template.render(distfile=sys.argv[1])) print('created lab/_static/conf.js') # html template template = env.get_template('form_template.html') root = pathlib.Path('examples/lab') for example in root.iterdir(): if not example.is_dir(): continue readme = publish_parts( example.joinpath('README.rst').read_text(), writer_name='html' )['html_body'] setup = example.joinpath('setup.py').read_text() task = example.joinpath('task.py').read_text() check = '' with example.joinpath('check.py').open('r') as f: for line in f.readlines(): if 'import' not in line: check += line name = example.name with open(f'lab/{name}.html', 'w') as f: f.write( template.render( readme=readme, setup=setup, task=task, check=check, name=name ) ) print(f'created lab/{name}.html') pyroute2-0.7.11/util/update_version.py000066400000000000000000000030241455030217500177500ustar00rootroot00000000000000#!/usr/bin/env python import subprocess from pathlib import Path version_module = "pyroute2/config/version.py" version_output_file = "VERSION" version_input_file = "VERSION" def get_project_version(): """ Get the project version 1. fetch version from git 2. if not available, fallback to the version file in the repo """ version = None try: git_top_level = Path( subprocess.check_output( ("git", "rev-parse", "--show-toplevel"), stderr=subprocess.DEVNULL, ) .decode("utf-8") .strip() ) pyroute2_top_level = Path(__file__).parent.parent.absolute() # Only retrieve the git description from the pyroute2 directory if git_top_level == pyroute2_top_level: version = subprocess.check_output( ("git", "describe"), stderr=subprocess.DEVNULL ).decode("utf-8") except (FileNotFoundError, subprocess.CalledProcessError): pass if version is None: with open(version_input_file, "r") as f: version = f.read() version = version.strip().split("-") if len(version) > 1: version = "{version[0]}.post{version[1]}".format(**locals()) else: version = version[0] return version if __name__ == "__main__": version = get_project_version() with open(version_module, "w") as f: f.write('__version__ = "%s"\n' % version) with open(version_output_file, "w") as f: f.write("%s\n" % version)