././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9623048 networking_bagpipe-22.0.0/0000775000175000017500000000000000000000000015474 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/.coveragerc0000664000175000017500000000016100000000000017613 0ustar00zuulzuul00000000000000[run] branch = True source = networking_bagpipe omit = networking_bagpipe/tests/* [report] ignore_errors = True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/.gitmodules0000664000175000017500000000000000000000000017637 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/.mailmap0000664000175000017500000000013000000000000017107 0ustar00zuulzuul00000000000000# Format is: # # ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/.pre-commit-config.yaml0000664000175000017500000000255500000000000021764 0ustar00zuulzuul00000000000000--- default_language_version: # force all unspecified python hooks to run python3 python: python3 repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.5.0 hooks: - id: trailing-whitespace - id: mixed-line-ending args: ['--fix', 'lf'] exclude: '.*\.(svg)$' - id: check-byte-order-marker - id: check-executables-have-shebangs - id: check-merge-conflict - id: debug-statements - id: check-yaml - repo: https://github.com/lucas-c/pre-commit-hooks rev: v1.5.4 hooks: - id: remove-tabs exclude: '.*\.(svg)$' - repo: https://opendev.org/openstack/hacking rev: 6.1.0 hooks: - id: hacking additional_dependencies: ['neutron-lib'] exclude: '^(doc|releasenotes|tools)/.*$' - repo: local hooks: - id: flake8 name: flake8 additional_dependencies: - hacking>=6.1.0,<6.2.0 - neutron-lib language: python entry: flake8 files: '^.*\.py$' exclude: '^(doc|releasenotes|tools)/.*$' # todo(slaweq): enable pylint check once all issues in the current code will # be solved # - id: pylint # name: pylint # entry: pylint # files: ^networking_bagpipe/ # language: system # types: [python] # args: ['--rcfile=.pylintrc', '--output-format=colorized'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/.pylintrc0000664000175000017500000000523000000000000017341 0ustar00zuulzuul00000000000000# The format of this file isn't really documented; just use --generate-rcfile [MASTER] # Add to the black list. It should be a base name, not a # path. You may set this option multiple times. ignore=.git,tests [MESSAGES CONTROL] # NOTE(gus): This is a long list. A number of these are important and # should be re-enabled once the offending code is fixed (or marked # with a local disable) disable= # "F" Fatal errors that prevent further processing import-error, # "I" Informational noise locally-disabled, # "E" Error for important programming issues (likely bugs) access-member-before-definition, no-member, no-method-argument, no-self-argument, possibly-used-before-assignment, # "W" Warnings for stylistic problems or minor programming issues abstract-method, arguments-differ, attribute-defined-outside-init, bad-indentation, broad-except, dangerous-default-value, expression-not-assigned, fixme, global-statement, not-callable, protected-access, redefined-builtin, redefined-outer-name, signature-differs, super-init-not-called, unpacking-non-sequence, unused-argument, unused-import, unused-variable, # "C" Coding convention violations invalid-name, len-as-condition, missing-docstring, superfluous-parens, # "R" Refactor recommendations duplicate-code, inconsistent-return-statements, no-else-return, too-few-public-methods, too-many-ancestors, too-many-arguments, too-many-branches, too-many-instance-attributes, too-many-lines, too-many-locals, too-many-public-methods, too-many-return-statements, too-many-statements [BASIC] # Variable names can be 1 to 31 characters long, with lowercase and underscores variable-rgx=[a-z_][a-z0-9_]{0,30}$ # Argument names can be 2 to 31 characters long, with lowercase and underscores argument-rgx=[a-z_][a-z0-9_]{1,30}$ # Method names should be at least 3 characters long # and be lowercased with underscores method-rgx=([a-z_][a-z0-9_]{2,}|setUp|tearDown)$ # Module names matching neutron-* are ok (files in bin/) module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(neutron-[a-z0-9_-]+))$ # Don't require docstrings on tests. no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$ [FORMAT] # Maximum number of characters on a single line. max-line-length=79 [VARIABLES] # List of additional names supposed to be defined in builtins. Remember that # you should avoid to define new builtins when possible. # _ is used by our localization additional-builtins=_ [IMPORTS] # Deprecated modules which should not be used, separated by a comma deprecated-modules= # should use oslo_serialization.jsonutils json [REPORTS] # Tells whether to display a full report or only the messages reports=no ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/.stestr.conf0000664000175000017500000000012000000000000017736 0ustar00zuulzuul00000000000000[DEFAULT] test_path=${OS_TEST_PATH:-./networking_bagpipe/tests/unit} top_dir=./ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/.zuul.yaml0000664000175000017500000001177200000000000017445 0ustar00zuulzuul00000000000000- project: templates: - openstack-python3-jobs-neutron - publish-openstack-docs-pti - release-notes-jobs-python3 - check-requirements - periodic-stable-jobs-neutron check: jobs: - openstack-tox-pep8: required-projects: - openstack/horizon - openstack/networking-bgpvpn - openstack/networking-sfc - openstack-tox-docs: required-projects: - openstack/horizon - openstack/networking-bgpvpn - openstack/networking-sfc - openstack-tox-py39: required-projects: &bagpipe_required_projects - openstack/neutron - openstack/horizon - openstack/networking-bgpvpn - openstack/networking-sfc - openstack-tox-py312: # from openstack-python3-jobs template required-projects: *bagpipe_required_projects - openstack-tox-cover: required-projects: *bagpipe_required_projects - neutron-tempest-plugin-bgpvpn-bagpipe: irrelevant-files: - ^(test-|)requirements.txt$ - ^setup.cfg$ - networking-bagpipe-dsvm-fullstack: voting: false - networking-bagpipe-tempest gate: jobs: - openstack-tox-pep8: required-projects: - openstack/horizon - openstack/networking-bgpvpn - openstack/networking-sfc - openstack-tox-docs: required-projects: - openstack/horizon - openstack/networking-bgpvpn - openstack/networking-sfc - openstack-tox-py39: required-projects: *bagpipe_required_projects - openstack-tox-py312: required-projects: *bagpipe_required_projects - neutron-tempest-plugin-bgpvpn-bagpipe: irrelevant-files: - ^(test-|)requirements.txt$ - ^setup.cfg$ - networking-bagpipe-tempest periodic-stable: jobs: - openstack-tox-docs: required-projects: *bagpipe_required_projects - openstack-tox-py39: required-projects: *bagpipe_required_projects periodic-weekly: jobs: - openstack-tox-py312: # from openstack-python3-jobs template required-projects: *bagpipe_required_projects - openstack-tox-py311: required-projects: *bagpipe_required_projects - openstack-tox-py312-with-oslo-master: required-projects: *bagpipe_required_projects - neutron-tempest-plugin-bgpvpn-bagpipe - networking-bagpipe-tempest experimental: jobs: - openstack-tox-py312-with-oslo-master: required-projects: *bagpipe_required_projects - job: name: networking-bagpipe-tempest parent: tempest-full-py3 nodeset: openstack-single-node-jammy timeout: 10800 required-projects: - openstack/tempest - openstack/neutron - openstack/networking-bagpipe vars: devstack_services: # Disable OVN services ovn-controller: false ovn-northd: false ovs-vswitchd: false ovsdb-server: false q-ovn-metadata-agent: false # Enable Neutron services that are not used by OVN br-ex-tcpdump: true br-int-flows: true q-agt: true q-dhcp: true q-l3: true q-meta: true q-metering: true devstack_localrc: Q_AGENT: openvswitch Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch Q_ML2_TENANT_NETWORK_TYPE: vxlan devstack_local_conf: post-config: /$NEUTRON_CORE_PLUGIN_CONF: ml2: tenant_network_types: vxlan,vlan tempest_concurrency: 4 # TODO(lajoskatona): On Ubuntu Focal some volume attach tests are # failing. When https://bugs.launchpad.net/nova/+bug/1882521 is solved # this list can be removed. tempest_exclude_regex: "\ (^tempest.api.compute.servers.test_server_rescue.ServerStableDeviceRescueTest)|\ (^tempest.api.compute.servers.test_server_rescue.ServerRescueNegativeTestJSON)|\ (^tempest.api.compute.volumes.test_attach_volume.AttachVolumeMultiAttachTest)|\ (^tempest.api.compute.servers.test_server_rescue.ServerStableDeviceRescueTest.test_stable_device_rescue_disk_virtio_with_volume_attached)|\ (^tempest.api.compute.servers.test_server_rescue_negative.ServerRescueNegativeTestJSON.test_rescued_vm_detach_volume)|\ (^tempest.api.compute.volumes.test_attach_volume.AttachVolumeMultiAttachTest.test_resize_server_with_multiattached_volume)" irrelevant-files: - ^(test-|)requirements.txt$ - ^setup.cfg$ - job: name: networking-bagpipe-dsvm-fullstack parent: neutron-fullstack timeout: 7800 required-projects: - openstack/neutron - openstack/networking-bgpvpn - openstack/networking-sfc vars: tox_envlist: dsvm-fullstack project_name: networking-bagpipe ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591455.0 networking_bagpipe-22.0.0/AUTHORS0000664000175000017500000000503300000000000016545 0ustar00zuulzuul00000000000000Akihiro Motoki Alex Katz Andreas Jaeger Ann Kamyshnikova Armando Migliaccio Bernard Cafarelli Boden R Brian Haley Cao Xuan Hoang Corey Bryant Corinne SAINT JALME Doug Hellmann Ekaterina Chernova Előd Illés Flavio Percoco Ghanshyam Mann Henry Gessau Henry Gessau Hong Hui Xiao Ian Wienand Ihar Hrachyshka Jeremy Stanley Le Hou Lucian Petrut Luke Hinds Mathieu Rohon Monty Taylor Nate Johnston Ngo Quoc Cuong Omar Sanhaji OpenStack Release Bot Pavlo Shchelokovskyy Reedip Rodolfo Alonso Hernandez Sean McGinnis Slawek Kaplonski SongmingYan Swapnil Kulkarni (coolsvap) Takashi Kajinami The Gitter Badger Thomas Morin Thomas Morin Thomas Morin Tony Xu Tuan Do Anh Valentin Chassignol Vieri <15050873171@163.com> Vu Cong Tuan Xingyu Pan Yannick Thomas YuehuiLei YuehuiLei blue55 chenxing elajkat gugug janonymous junbo likui mathieu-rohon melissaml pengyuesheng qinchunhua shanyunfan33 tmmorin venkatamahesh ythomas1 zhangboye zhouguowei ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/CONTRIBUTING.rst0000664000175000017500000000103300000000000020132 0ustar00zuulzuul00000000000000If you would like to contribute to the development of OpenStack, you must follow the steps in this page: https://docs.openstack.org/infra/manual/developers.html Once those steps have been completed, changes to OpenStack should be submitted for review via the Gerrit tool, following the workflow documented at: https://docs.openstack.org/infra/manual/developers.html#development-workflow Pull requests submitted through GitHub will be ignored. Bugs should be filed on Launchpad, not GitHub: https://bugs.launchpad.net/networking-bagpipe ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591455.0 networking_bagpipe-22.0.0/ChangeLog0000664000175000017500000012661500000000000017261 0ustar00zuulzuul00000000000000CHANGES ======= 22.0.0 ------ * Drop of\_interface option * Drop remaining code related to linux bridge driver * Uncap psutil * Drop removed tables for linux bridge plugin * CFG: add help text for OVS dataplane driver cfg options * Remove unused Babel * Remove LinuxBridge related code * pep8: Issue on Noble (U. 24.04) with pylint * tests: test\_db\_base\_plugin\_v2.py was moved to common * reno: Update master for unmaintained/2023.1 * pyupgrade changes for Python3.9+ * Remove translation sections from setup.cfg * Drop unused horizon from requirements * Bump actual minimum python version * Treat items of [sfc\_bagpipe] rtnn as Integer * Fix inconsistent type of hash\_method\_param * Remove unnecessary main function * Use common eventlet helper from neutron * Remove workaround for eventlet < 0.27.0 * Update jobs based on testing runtime for 2025.1 * [sqlalchemy-20] Remove CI job "networking-bagpipe-openstack-tox-py310-with-sqlalchemy-main" * Update master for stable/2024.2 21.0.0 ------ * Doc: remove sphinxcontrib-\*diag from doc dependencies * Add pyproject.toml to support pip 23.1 * Add os\_ken\_app to EVPN OVSDataplaneDriver * Add pre-commit configuration * Remove executable from python files which don't really needs it * Fix trailing whitespaces and replace tabs with 4 spaces * do not use str(url) to stringify a URL for subsequent use * reno: Update master for unmaintained/zed * rest\_attach: encode POST body in UTF-8 * Update jobs based on testing runtime for 2024.2 * Update master for stable/2024.1 20.0.1 ------ * reno: Update master for unmaintained/xena * reno: Update master for unmaintained/wallaby * reno: Update master for unmaintained/victoria * Remove the "new\_facade" parameter that is no longer needed * Bump hacking * tox: Drop envdir * reno: Update master for unmaintained/yoga * coveragerc: Remove reference to non-existant path * Add python3.10 & 3.11 support in testing runtime * Drop b1 from min versions in requirements.txt * py311: add required projects to py311 job and add it to weekly * Update master for stable/2023.2 19.0.0 ------ * Unit tests: replace the deprecated assertDictContainsSubset call * [sqlalchemy-20] Add the remaining context wrappers * SQLAlchemy: add context wrappers to SFC driver * Fix bindep.txt for python 3.11 job(Debian Bookworm) * CI: Change focal nodeset to jammy * [sqlalchemy-20]: remove subtransactions=True * Fix issues due to rcent RBAC changes and removal of neutron.debug * CI: Add periodic weekly job with sqlalchemy main * Implement "brctl" and "bridge" using oslo.privsep * Update master for stable/2023.1 18.0.0 ------ * Change IPDB to NDB * Tox4: add allowlist\_externals where necessary * CI: Add openstack-tox-py39-with-oslo-master to periodic weekly queue * Add required projects where necessary * Switch to 2023.1 Python3 unit tests and generic template name * Update master for stable/zed 17.0.0 ------ * Adopt to latest VlanManager and oslo.db changes * Fix .join() issue for bytes type * CI: Add required\_projects to weekly py39 job * Fix imports to align with neutron\_lib constants changes * Remove "distutils" library * py310: Add required-projects list to py310 job * Tests: fix requirements for unit tests * Py3: Import urllib properly * Update python testing as per zed cycle teting runtime * Drop lower-constraints.txt and its testing * CI: add required projects for unit test jobs * Add weekly jobs * Add Python3 zed unit tests * Update master for stable/yoga 16.0.0 ------ * Update python testing classifier * Use TOX\_CONSTRAINTS\_FILE * remove unicode from code * Add Python3 yoga unit tests * Update master for stable/xena 15.0.0 ------ * Changed minversion in tox to 3.18.0 * Follow pyroute2 changes * Explicitly set job networking-bagpipe-tempest to ML2/OVS * Switch testing to Xena testing runtime * setup.cfg: Replace dashes with underscores * Update master for stable/wallaby * Update master for stable/wallaby 14.0.0 ------ * Set secure mode for mpls bridge * Switch to new rolevar for run-temepst role * Implement "modprobe" using oslo.privsep * Add privsep boilerplate for bagpipe * Add Python3 wallaby unit tests * Update master for stable/victoria 13.0.0 ------ * Change bagpipe jobs to focal * Remove the unused coding style modules * Switch from unittest2 compat methods to Python 3.x methods * Fix pep8 job * Switch to hacking 3.0.1 * Drop py37 testing * Remove usage of six library * Switch to newer openstackdocstheme and reno versions * Add requirements.txt to docs deps * Monkey patch original current\_thread \_active * Remove the dependency on the "mock" package * Bump default tox env from py37 to py38 * Add py38 package metadata * Add Python3 victoria unit tests * Update master for stable/ussuri 12.0.0 ------ * Fix lower-constraints tox target * Cleanup py27 support * Switch fullstack job to Zuulv3 syntax * Remove bagpipe-bgp's deprecated action CLI option * Make releasenotes generation working again * Fix gate failure 12.0.0.0b1 ---------- * Remove references for unittest2 * Drop python 2 support and testing * Update and replace http with https for doc links * Fix the url errors * use object common\_types from neutron-lib * bagpipe-bgp: cleanly ignore RTC route of unsupported type * bagpipe-bgp: fix a broken looking-glass path * use callback payloads for AGENT OVS\_RESTARTED events * Update the constraints url * Force copy of dict items in "for" loop * Fix TypeError when calling join on bytes sequence * Update master for stable/train * PDF documentation build 11.0.0 ------ * Fix fullstack job * Ensure to python3 as basepython in all tox envs * Migrate bagpipe legacy tempest jobs to zuulv3 and py3 * Upgrade pylint to a version that works with python3 * Blacklist sphinx 2.1.0 (autodoc bug) * Add Python 3 Train unit tests * Add local bindep.txt * use AuthenticIPNetwork from neutron-lib * Update lower-constraints to valid with recent changes in Neutron * Switch to native openflow implementation * Use opendev repository * OpenDev Migration Patch * Dropping the py35 testing * Replace openstack.org git:// URLs with https:// * Update master for stable/stein * Remove unnecessary dependencies from requirements.txt * bagpipe-bgp: fix IPVPN OVS driver in patch port case * [Fullstack] Don't compile ovs kernel module before tests 10.0.0 ------ * add python 3.7 unit test job * fix lower constraints and add required projects * Consume networking-sfc stein b1 * stop using common db mixin * stop using common db mixin methods * use neutron-lib for model\_query * agent extension: smoother handling of bagpipe-bgp unavailability * Change openstack-dev to openstack-discuss * Update min tox version to 2.0 * Remove openstack-tox-py35-with-neutron-lib-master * Increment versioning with pbr instruction * Remove extra publish-openstack-python-branch-tarball job * build universal wheels * use common rpc and exceptions from neutron-lib * opt in for neutron-lib consumption patches * add local tox targets for pep8 and py3 * Update Zuul configuration * bgpvpn: override NORMAL action in ACCEPTED\_EGRESS\_TRAFFIC\_NORMAL\_TABLE * add python 3.6 unit test job * switch documentation job to new PTI * adjust requirements * import zuul job settings from project-config * Update neutron import * Update reno for stable/rocky 9.0.0 ----- * update requirements for neutron-lib 1.18.0 * bagpipe-bgp: Use bitwise matches for port range 9.0.0.0b3 --------- * add release notes for IPVPN OVS driver refactoring * Correct "coverage package name" in tox.ini * Updating required neutron version * bagpipe-bgp: Add arp\_responder in config files * Add release notes link in README * Optimize the link address in docs * switch to stestr and requirements updates * bagpipe-bgp: Use Neutron ovs\_lib for MPLS OVS * sfc: FlowSpec routes readvertisement update * cleanup setup.cfg (remove CLI for obsoleted ML2 specific agent) * bagpipe-bgp: Update object lifecycle manager * bagpipe-bgp: allow looking glass from a host wo config 9.0.0.0b2 --------- * Fix README setup.py check error * bagpipe-bgp: Update identifier allocator * bagpipe-bgp: Update object lifecycle managers * load neutron objects using neutron-lib * dashboard: use new neutronclient * fix BGPVPN OVO unit test * sfc: update for FlowSpec routes readvertisement * doc: typo fixes * bagpipe-bgp: Common root helper config copy * make bagpipe-bgp port easily changed * devstack: support non-legacy neutron * bagpipe-bgp: minor looking-glass simplification * bagpipe-bgp: handle vif\_plug/unplug with no IP as wildcard * bagpipe-bgp: ID allocator to reuse an id as late as possible * bagpipe-bgp: API error reporting and logging improvements * bagpipe-bgp: strictly avoid having same VNI in two VPN instances * sfc: Move configuration parameters * sfc: Correctly handle BaGPipePortHops OVO in agent 9.0.0.0b1 --------- * bagpipe ML2: reuse network VNI * bagpipe ml2: properly handle deleted/down/non-existing ports * bagpipe-bgp: Add identifier allocator * bagpipe-bgp: EVPN linuxbridge driver, avoid except when bridge is gone * bagpipe-bgp: EVPN linuxbridge driver, catch unplug exception * bagpipe-bgp: logging cleanups * bagpipe-bgp: fix broken logging when IP prefix not provided * bagpipe-bgp: Add object lifecycle manager * devstack: use neutron-bagpipe-\* for service names * bagpipe-bgp: compare\_ecmp and default LOCAL\_PREF * bagpipe-bgp: IPVPN MPLS OVS driver, silently ignore re-removal * Trivial typo fix * bagpipe-bgp: make it optional to provide an IP for a port * bagpipe-bgp: Add FlowSpec route cleanup assist * Use ALIAS instead of LABEL * Updated from global requirements * fix typos in documentation * add lower-constraints job * Updated from global requirements * Avoid tox\_install.sh * use LOG.exception instead of traceback * Updated from global requirements * Move neutron to requirements.txt * Updated from global requirements * Updated from global requirements * sfc: update bagpipe driver entry point * Update links in README * bagpipe-bgp: update doc and conf template * do not use tools/tox\_install.sh for docs * bagpipe-bgp: Add common dataplane\_utils module * use common agent topics from neutron-lib * bagpipe-bgp: Add FlowSpec routes readvertisement * bagpipe-bgp: Always remove readvertised route * documentation update related to Queens features * bagpipe ml2: agent extension, do not modify device\_details on callbacks * release note for improved fallback mechanism * bgpvpn: take admin\_state\_up into account * bagpipe-bgp: Correct default route handling * Provide missing release notes * Add missing i18n \_ import * bgpvpn: improve redirect/fallback mechanism * bagpipe-l2 agent extension must only support VXLAN * bagpipe-bgp: delay VPN instance startup until after first vif\_plugged * bagpipe-bgp: fix an E-VPN OVS unit test * bagpipe-bgp: EVPN OVS driver, rootwrap config fix * Update reno for stable/queens 8.0.0 ----- * bagpipe-bgp: EVPN OVS driver fixes * devstack: prepare sudoers and rootwrap setup * Use new facade for OVO objects * bagpipe-bgp: fix dummy dataplane drivers * bagpipe-bgp: OVS support for EVPN * bagpipe-bgp: safeguard against instance id > 2\*\*32 * SFC agent extension: Update unit tests * bgpvpn: support for port association 'bgpvpn' routes * bgpvpn: use directional per-port attachments for prefix routes * bagpipe-bgp: support directionnal attachments * SFC agent extension: Use bulk\_pull RPC * SFC: Files tree reorganization * SFC: Agent extension fixes * bagpipe-bgp: VPNInstance and VRF improvements * bgpvpn: provide 'descriptions' in agent extension * bagpipe-bgp: support VPN instance and attachment descriptions * bagpipe-bgp: improve update of export RTs * Updated from global requirements * requirements.txt hints for deps managed in tools/tox\_install.sh * bgpvpn: improve OVOs * Updated from global requirements * Fix SFC agent bug when calling do\_port\_plug\_refresh * bgpvpn: agent extension, do not skip processing when RTs are empty * Add SFC support * bagpipe-bgp: fix localpref comparison * bgpvpn: agent extension, support API-defined VNI * bagpipe ML2: remove the route\_target type driver 8.0.0.0b3 --------- * bgpvpn: agent extension support for Port advertise\_fixed\_ips * bagpipe-bgp: ensure instance id uniqueness * bgpvpn: agent extension support for local\_pref * minor logging fix * bgpvpn: agent extension, fix port association delete * bagpipe-bgp: EVPN/VXLAN interop fix * Updated from global requirements * bagpipe-bgp: fix worker cleanup * bgpvpn: add 'local\_pref' to OVO BGPVPN object * bagpipe-bgp: allow updating local\_pref * bagpipe-bgp: expose local\_pref in looking-glass * bagpipe-bgp: properly withdraw route on detach * agent extensions: support multiple detaches at the same time * bgpvpn: agent extension, fix port association 'routes' update * adjustments for, and upgrade to, ExaBGP 4.0.4 * bgpvpn: add 'local\_pref' to OVO port routes * bagpipe-bgp: take LOCAL\_PREF into account in route comparison * agent extensions: fix conditional detach for multiple attachments * bagpipe-bgp: allow to set the BGP local\_pref * agent extensions: remove 'static\_routes' special casing * Updated from global requirements * Updated from global requirements * Updated from global requirements * BGPVPN: add support for Port Associations, base agent code * bgpvpn OVO: remove BGPVPNAssociations OVO object * Add Port Associations to BGPVPN OVO definitions * BGPVPN OVO: use bulk\_pull and remove BGPVPNAssociations * bgpvpn agent extension, support for OVO-based push/pull RPCs * BGPVPN OVO connected networks fix * Adding BGPVPN OVO objects and tests * Updated from global requirements * Add \_port\_data to agent extensions unit tests base class 8.0.0.0b2 --------- * bagpipe-bgp: IPVPN OVS driver, advanced options * Updated from global requirements * bagpipe-bgp, EVPN: clear the proxy ARP entry only if needed * Always send import\_rt/export\_rt on bagpipe-bgp API * Modify ARP responder flow match * Remove setting of version/release from releasenotes * Updated from global requirements * bagpipe-bgp: IPVPN, fix concurrency issue on import RT update * Migration of Agent Extension to Neutron-lib * Modify ARP responder behavior from configuration * Disable ARP proxy from VXLAN interface for gateway * Updated from global requirements * Add ARP responder per VRF 8.0.0.0b1 --------- * EVPN/VXLAN linux: fix unplug * bagpipe-bgp: add cleanup assist * BGPVPN L2/EVPN: use same VNI as the associated network * bagpipe: E-VPN route withdraw issue * bgpvpn agent extension: fix disassociate after update * bgpvpn agent extension: fix import order * bagpipe-bgp: EVPN/VXLAN linux driver, fix state cleanup * bagpipe-bgp: avoid duplicate Route Targets * Move service specific code to agent extensions * bagpipe-bgp: IPVPN OVS, make ARP resolution fail clean * Updated from global requirements * Don't check RT parameters on detach * devstack: create GOPATH/bin before installing gobgp * devstack: install gobgp from binary release * bagpipe-bgp: fix race in import RTs update * Updated from global requirements * agent extensions: improve options types * oslo config generator and doc improvements * bagpipe-bgp: fix dataplane updates on some route updates * bagpipe-bgp: improve TrackerWorker and RTM testing * Updated from global requirements * devstack: fix pseudo b-bgp-looking-glass service * IPVPN OVS driver: longest match lookups * L2/EVPN: add fullstack tests * Updated from global requirements * Various minor logging improvements * cosmetic: avoid a few lambdas * Updated from global requirements * Fix post gate hook to accommodate for new os-testr * Update import for ml2 config * Updated from global requirements * Updated from global requirements * devstack gate hook for fullstack job, fix IPV4\_ADDRS\_SAFE\_TO\_USE * devstack: stop using screen * Upgrade pyroute2 to solve bug 1710216 * Make BaGPipe BGP agent singleton * L2/EVPN: log ERROR when used with OVS * Add agent constants module * fullstack: do not log gobgpd * Support static destination prefix classification * Drop MANIFEST.in - it's not needed by pbr * EVPN/VXLAN linuxbridge driver: use IANA allocated port * gate fullstack hook: have tox and zuul-cloner inherit all environment * Updated from global requirements * Updated from global requirements * bagpipe-bgp: linux IPVPN driver, log adjustement * Updated from global requirements * Update reno for stable/pike * bagpipe-bgp: support for OVS standard MPLS/GRE * add a release note for linuxbridge BGPVPN support * bagpipe-bgp: remove tracker\_worker verbosity * doc: remove module index * bagpipe-bgp: fix bug 1705477 for both exabgp 4.0.1 and 4.0.2 * bagpipe-bgp: workaround exabgp issue #690 * bagpipe-bgp translate\_api\_internal fix 7.0.0.0rc1 ---------- * bagpipe-bgp: avoid exabgp debug info, even in debug mode * bagpipe-bgp: OVS, avoid use of two patch ports * Revert "bagpipe-bgp: ipvpn 'linux' driver, fix ip route flush" * bagpipe-bgp: clean reraise on plug 7.0.0.0b3 --------- * Updated from global requirements * Add auto-generated config reference * Automatically generate configuration files * Fix patched\_format parameter error * Update the documentation link for doc migration * Updated from global requirements * Updated from global requirements * Use rootwrap mode for root user * Add pecan requirement * Enable H904 * doc: cleanup HTML rendering with openstackdocstheme * Rearrange existing documentation to fit the new standard layout * Switch from oslosphinx to openstackdocstheme * Turn on warning-is-error in doc build * Speed up tox\_install.sh * revert temp gate breakage workaround * Updated from global requirements * declare exabgp dependency in requirements.txt * Revert "bagpipe-bgp: ipvpn 'linux' driver, fix ip route flush" * Depend on pyroute2 0.4.17 * Remove usage of parameter enforce\_type * Updated from global requirements * bagpipe-bgp: support for exabgp 4.0.1 * bagpipe-bgp: move kernel loading to \_\_init\_\_ * bagpipe-bgp: better doc for driver init/cleanup workflow * Allow bgpvpn agent extension to work with linuxbridge * bagpipe-bgp: ipvpn 'linux' driver, fix ip route flush * Enable some off-by-default checks * fullstack: add scenario for linuxbridge IPVPN driver * bagpipe-bgp: be less verbose in debug mode * Updated from global requirements * bagpipe-bgp: cleanups in dataplane drivers init * Updated from global requirements * fullstack: add a bare MPLS scenario * bagpipe-bgp, bare MPLS: ARP trigger on specific interface * bagpipe-bgp: IPVPN OVS driver fix for bare MPLS * doc: improve information on installation, about pip and versions * Updated from global requirements * Updated from global requirements 7.0.0.0b2 --------- * pylint fixes * Updated from global requirements * devstack: pin exabgp version * bagpipe-bgp: minor improvement to InterfaceAddress * Updated from global requirements * unit test improvement: use deepcopy * bagpipe-bgp: make gateway\_ip API param optional for EVPN * bagpipe-bgp: move API parameters checks and conversions * Updated from global requirements * Updated from global requirements * Updated from global requirements * use pypi exabgp instead of master * fullstack: fix bagpipe-l2 test * Fix pushing malformed RT to bagpipe-bgp * Add fullstack tests framework * Agent code refactoring * Updated from global requirements * bagpipe-bgp: properly cleanup vpn\_instances on shutdown * agent: ARP responder cleanups * bagpipe-bgp: specify a forced VNI value to use for a VPN instance * add pylint checks * bagpipe-l2: mech\_driver imports constants from neutron-lib * bagpipe-bgp: dataplane driver cleanup * bagpipe-bgp: cleanup obsolete dataplane driver code * Updated from global requirements * devstack: set bagpipe-bgp rootwrap files even wo. bagpipe-bgp service * devstack: fix gobgp installation * Updated from global requirements * use MechanismDriver from neutron-lib * use neutron-lib constants rather than plugin constants * bagpipe-bgp: local\_address can be given as an interface address * bagpipe-bgp: minor opt parsing improvements * Add placeholders for gate pre/post hooks * consume neutron-lib callbacks * fix launchpad URL in README * Updated from global requirements * Updated from global requirements 7.0.0.0b1 --------- * bagpipe-bgp: follow exabgp peer down change * bagpipe-bgp, minor: revert bogus rename * Updated from global requirements * move bagpipe\_bgpvpn extension from -bgpvpn to -bagpipe * bagpipe ml2: don't wait for DHCP provisioning block to clear * Updated from global requirements * Updated from global requirements * remove log translations * use LocalVlanManager instead of OVSDB to lookup local vlan * bagpipe-bgp: exit 0 on shutdown * bagpipe-bgp: make BGP port configurable * bagpipe-bgp: log config on startup * Updated from global requirements * Agent common config move * Updated from global requirements * bagpipe-bgp: drop support for bagpipe-bgp --log-file * bagpipe-bgp: fix version info hook * devstack jobs cleanups: VERBOSE=True is now ok * Updated from global requirements * bagpipe-bgp merge: remaining cleanups * bagpipe-bgp merge: release note and doc update * bagpipe-bgp merge: update requirements * bagpipe-bgp: remove stale files * bagpipe-bgp: use short driver names * configure devstack to use merged bagpipe-bgp * bagpipe-bgp: IPVPN ovs driver, fix fallback * bagpipe-bgp sample conf: use newer host/port * bagpipe-bgp: looking-glass fixes * Cleanup delete\_flow call * remove deprecated use of get\_session() * remove deprecated use of get\_session() * bagpipe-bgp: MPLS OVS driver GRE tunnel interface fix * Fix API instantiation * use oslo\_config fixture * exabgp: install master branch of official repo * merge bagpipe\_bgp (s|ex)amples * merge bagpipe\_bgp etc files * merge bagpipe\_bgp entry points * enable bagpipe\_bgp unit tests * move bagpipe\_bgp:bagpipe.bgp to networking\_bagpipe.bagpipe\_bgp * add exabgp dependency * Use neutron-lib's context module * Updated from global requirements * move bagpipe\_bgp documentation in main doc dir * Switch to neutron-lib for parse\_mappings * Update test requirement * Updated from global requirements * Updated from global requirements * prepare move to openstack networking-bagpipe * Switch to neutron-lib for parse\_mappings * Remove capability to daemonize and python-daemon dependency * Avoid oslo\_log warning in init scripts * IPVPN MPLS OVS driver: fix an interface initialization issue * Stop using deprecated json module, use oslo\_serialization instead * Use only module imports * Clean-up obsolete licence-related text * Remove leftover traces from etc/bagpipe-bgp/log.conf * Simplify catchall log handler attachment * Use neutron-lib's context module * Update reno for stable/ocata * Updated from global requirements 6.0.0 ----- * Add py35 to classifier and remove a py34 environment from tox * refactor to use oslo.log * Prepare for using standard python tests * Avoid a cleanup failure if VXLAN tunnel does not exist * Restore br\_tun/br\_mpls flows on OVS restart * Cleanup delete\_flow call * Use neutron-lib portbindings api-def * Use neutron-lib provider net api-def * Updated from global requirements * remove README.exabgp * fix another minor rst formatting issue in README * setup.cfg should point to README.rst * fix rst formating nit in README.rst * devstack plugin: handle installation of gobgp and go language * Fix CLI option * Fix daemon stop * Refactor to use pecan instead of Botlle * Sort load balancing endpoints list based on lb\_consistent\_hash\_order * Additional test example with LB service VMs * Use MPLS bridge MAC address * Add possibility to configure GRE tunnel name in IPVPN OVS driver * Port markdown formatted README to reStructuredText * Updated from global requirements * Updated from global requirements * pep8 cleanups * Daemon/RESTAPI cleanup * instantiate VPNManager from RESTAPI * make VPNManager a singleton * make BGP Manager a singleton * Adopt oslo.config for configuration parsing * Add models sync with migration and functional tests * Updated from global requirements * Follow ml2 plugin openvswitch driver config * MPLS OVS IPVPN dataplane driver: use a register instead of a per-VRF patch port * Add HEAD files for contract and expand branches * Really call driver.initialize() after driver.reset\_state() * examples: hide netns cleanup messages * Fix the problem of the file mode * VPNInstance: don't unsubscribe during stop() * MPLS OVS dataplane: simplify OVS rule cleanup * MPLS OVS IPVPN dataplane driver: use a fixed gateway MAC * Fixes in OVS IP VPN dataplane driver * Show team and repo badges on README * Use patch-port:vlan i.o. linux interface as linuxif * Follow model\_base move * Use the use of --mac with --detach --port netns * Change passing session to context in segments db functions * Make ostestr\_compat\_shim.sh executable * Reorganise/clarify/enrich documentation * Use os-testr instead of testr * devstack gate fixes for bagpipe ML2 * tests: import exceptions from neutron\_lib * Use temporary directory for neutron install * Remove devstack/agent file * Updated from global requirements * devstack: refactor how ML2 agent and parameters are configured * Delete python bytecode before every test run * Updated from global requirements * Add reno to test-requirements.txt * Add reno configuration and tox target * devstack tempest job adjustments * Updated from global requirements * Remove last vestiges of oslo-incubator * bagpipe ml2 devtack job config fixes * devstack: configure all parameters via plugin.sh * Updated from global requirements * Add devstack gate hook and rcfile * Updated from global requirements * python3: use six for iteritems * Improve cover target * Fix tox cover target * bagpipe-bgp submodule sync * Updated from global requirements 5.0.0 ----- * Router fallback feature * devstack fix for bagpipe ml2 * devstack: fix to load bagpipe l2 agent extension * Code cleanups and simplification in run\_command * Code factorization in run\_command method * Remove python 3 from setup.cfg classifiers * Avoid useless bagpipe-rest-attach warnings * pep8/pylint cleanups * VPNManager has to run rootwrap or shell commands depending on common config * legacy run\_command method now supports rootwrap * Separate dataplane drivers configuration and init * VRF fallback feature * examples: send 3 pings rather than 12 * rest-attach: cleanup interface from OVS before recreating * Support for newer iproute * Simpler access to common config from drivers * Skip rootwrap/sudo if already root * Add common section and oslo.rootwrap helper parameters in configuration * Run some more rootwrapped commands in shell mode * Add possibility to run rootwrapped command in shell * Adapt VPNInstanceDataplane.\_run\_command method for rootwrap * Use oslo.rootwrap to run command as root * Update homepage with developer documentation page * Modify use of assertTrue(A in B) * Import DB model base from neutron\_lib * Load dataplane drivers with stevedore instead of custom code * devstack: properly set the l2 agent extension * Update flake8 ignore list * Updated from global requirements * VPNManager refactoring, solving concurrency issues * Improve route table manager warning on same route * Fix bug in duplicate consumption of instance ids * pep8 adjustements * Bye-bye camelCase! * MPLS OVS IPVPN driver: use a different table to resubmit after hash * MPLS OVS IPVPN driver, now requires OVS >= 2.5 * Have base dataplane driver claim support for MPLS encap * minor layout improvement in example output * MPLS OVS IPVPN driver: fail cleanly if MAC can't be resolved * Major code cleanup (pep8, pylint, others) * Cleanup looking glass code * Use upper constraints for test jobs * Update deprecated i18n an neutron const imports * Updated from global requirements * Test example for service chain load balancing * Script to setup cross routing per interface * IPVPN: adaptations for LB/CHSO and per-endpoint RDs * Driver adaptations for LB and CHSO * VPNInstance adaptations for CHSO and per-endpoint RD allocations * Add consistent hash sort order parameter to API and VPNManager * Add an allocator for Route Distinguishers * Updated from global requirements * Enable DeprecationWarning in test environments * Updated from global requirements * Add Python 3.5 classifier and venv * Remove discover from test-requirements * TrackerWorker: more cleanups and readability improvements * TrackerWorker: better use of dicts * RouteTableManager: better use dicts for more compact code, and other minor improvements * dummy dataplane driver: don't fail at init * Add test dependency on testresources * Updated from global requirements * Create veth interfaces with max possible MTU * MPLS Linux dataplane improvements * rest-attach tool: device name fixes * check that linux device name does not exceed max length * example tests: use shorter netns names * example tests * bagpipe-impex cleanups * Add next\_hop to route information in looking glass * BGP state machine rework/cleanup * minor source cleanups * Updated from global requirements * IPVPN MPLS Linux driver: minor updates * Updated from global requirements * Move from neutron.i18n to oslo.i18n * Update README * Up-to-date info on new MPLS Linux dataplane driver for IPVPN * Recover from failed connection attempts * IPVPN driver for linux kernel MPLS stack 4.0.0 ----- * ml2 agent extension: fix bagpipe-bgp client instantiation * Add a sample config for GoBGP * devstack plugin: fix xtrace state restoration * devstack plugin: only log to console * Improved README.rst * Updated from global requirements * Update bagpipe-bgp submodule reference * Fix devstack xtrace restoration * More tweak to troubleshoot Idb4fdad0da7bed5eda0c302bc79c5f05d50b44d7 * More info on restoring xtrace.. * Enable xtrace for devstack plugin * devstack plugin: properly restore xtrace * Excluding bagpipe-bgp submodule from pep8 tests * BGPVPN OVS: defer gw ARP redirection until we really need it * Fix exception message formating * Agent: better error management for OVSDB errors * Update bagpipe-bgp submodule reference * Patch for compatibility with ExaBGP * Updated to test bidirectional service chain * Patch for compatibility with ExaBGP * Add methods to generate FlowEvent and convert RouteTarget object to "asn:nn" string * Modify some methods to extract informations from advertise or withdraw route call Add some attract traffic tests to validate advertised/withdrawn FlowSpec and default routes handled in the correct VPN instance * Add some VPNManager tests * Return VPNInstance object instead of redirect port number * Add missing \_\_hash\_\_ method to TrafficClassifier class * Remove unused method * Advertise and withdraw default/prefix route methods renamed * Advertise default route to redirect VRF instead of one route per prefix in FlowSpec case (Route per prefix already advertised to redirected VRF) * Handle multiple classifiers (one per prefix) for each redirect route target * Register to redirect instance only on creation Return redirect instance instead of redirect port on traffic redirection * looking glass: improve error logging * properly unquote looking-glass path elements * Disable flow\_ip family, keep only flow\_vpn * devstack plugin: do not override BAGPIPE\_DIR if already set * Adding a SERVICE\_HOST as a default for BAGPIPE\_BGP\_PEER * impex2dot: support multiple servers and attract traffic * devstack: die on 'enable\_plugin bagpipe-bgp' * Modify method names and mechanism to handle traffic redirection to an injection instance based on BGP FlowSpec rules Add redirected instances list to properly handle injection instance stop * Change looking-glass to display attract traffic informations into re-advertise details (Attract traffic depending on re-advertise) * Fix bracket missing in devstack/plugin.sh * E-VPN VXLAN: avoid fdb issues at unplug * More robust bagpipe-bgp submodule hook * Fix devstack directory for bagpipe-bgp submodule * Override BAGPIPE\_DIR before sourcing bagpipe-bgp plugin.sh * Update bagpipe-bgp submodule reference * Init bagpipe-bgp submodule in devstack plugin * Integrate bagpipe-bgp as a submodule * Cleanup following refactoring into agent extensions * Follow neutron's master * Fix RPC destination server * Avoid "ifconfig|grep HWaddr" to find the MAC of a device * EVPN/VXLAN: add static fdb entry for local ports * Fix looking glass client * Simplify code going through Extended Communities * Looking glass improvements * routes not imported should never be installed in dataplane * Also apply RTRecord test before readvertise as flowspec * Updated from global requirements * Log message minor improvements * Properly populate route RTs, do not include RTRecord * Fix eq/hash for FlowSpec NLRI * Log message fix for advertised encapsulation issues * Accept routes with no encap specified in bare MPLS mode * Updated from global requirements * Fix port attach/detach notifications * Correct bug when unplugging local port which has been plugged with multiple MAC (and IP) addresses * Make attract\_traffic redirect RTs a list * minor improvement of chain-traffic-redirect * IPVPN OVS: use VXLAN resubmit if VXLAN is enabled * Cleanup of netns to make example script more reliable * Fix missing encap attributes on re-advertised routes * log message fix * impex2dot: dot syntax fix * fix readvertised route exposure via looking glass * fix readvertised route exposure via looking glass * expose flowspec-vpn routes in looking glass * fix previous commit for multiple IPs per MAC * do not readvertise non-IPVPN routes * Don't error if E-VPN route has no IP * Correctly handle plug/unplug for one MAC with multiples IPs * minor impex2dot bugfix * Stop redirect VPN instance when empty and traffic indirected from it * Subscribe/unsubscribe to/from FlowSpec routes when missing * Correctly handle plug/unplug of same MAC address with multiples IP addresses * Updated from global requirements * RTRecord is now upstreamed * RTRecord is now upstreamed * Setup ExaBGP environment even if no peers defined in configuration to avoid errors in single host case * Setup ExaBGP environment even if no peers defined in configuration to avoid errors in single host case * Add route redistribution graphing tool * allow config file to ommit API parameters * Add route redistribution graphing tool * allow config file to ommit API parameters * implement RTRecord * update description of examples/chain-traffic-redirect * implement RTRecord * README update on RR options * Modification due to afi/safi removed from RouteEntry constructor (see commit 4c5b97dbb82a28e462803e04c5d1a59a66daa807) * Fix port detach message not send when a port is migrated * Add mechanism driver unit tests * Disable VXLAN on Linux Bridge to fix tests * Route targets list now passed through API * Add attract traffic parameter details to looking glass * Add method to generate Route Distinguisher from InstanceId Modify NLRI Flow construction * Route Distinguisher from InstanceId generated in VPNInstance class * fix unittests after flowspec additions * Adds wrappers around ExaBGP Flowspec * First release of BGP FlowSpec handling based on 5-tuple classifier for traffic redirection * Updated from global requirements * py26/py33 are no longer supported by Infra's CI * remove python 2.6 trove classifier * OSV MPLS: avoid broadcast with secure mode * Route targets list now passed through API * Add attract traffic parameter details to looking glass * Add unit tests for agent and RT type driver * Add some verification methods * Modify agent: handle multiple port attach sources * in devstack, bind to 0.0.0.0 by default * control api\_host from devstack local.conf * update openstack projects URLs * API server: do not bind to 0.0.0.0 * tox to also use stable/liberty with zuul cloner * Have tox use neutron stable/liberty branch * Update to follow LinuxBridge agent modifications * Add method to generate Route Distinguisher from InstanceId Modify NLRI Flow construction * Route Distinguisher from InstanceId generated in VPNInstance class * follow project rename * Alembic update/cleanup * remove afi/safi from RouteEntry constructor * fix unittests after flowspec additions * Adds wrappers around ExaBGP Flowspec * Change repositories from stackforge to openstack * First release of BGP FlowSpec handling based on 5-tuple classifier for traffic redirection * Update .gitreview for new namespace * Updated from global requirements * Have log.conf also control exabgp code loglevel * Updated from global requirements * update requirements * logger method has moved to oslo * requirements: fix missing prefix in git repo spec * right destination dir for conf files * update requirement to point to exabgp git repo * missing files in previous commit following exabgp refactoring * follow change of EVPN route creation method * Precision that tested setup is with the DKMS OVS kernel module * Simplify RouteTableManager callbacks for first/last subscriber * Adapt to ExaBGP NLRI changes * Update requirements to require Exabgp >=3.9 * Adapt ExaBGPPeerWorker to ExaBGP internal changes * Change ignore-errors to ignore\_errors * Updated from global requirements * update info on OVS version dependency for MPLS dataplane driver * RPC renames * PEP8 Fixes * oslo.config is now oslo\_config * devstack plugin fix * Updated from global requirements * remove leftover backward compat devstack code * devstack: don't init OVS MPLS bridge unless IP VPN driver is OVS MPLS * follow exabgp change in NextHop object * E-VPN VXLAN: add the ability to specify local address * E-VPN/VXLAN: fix for proper cleanup of VXLAN interfaces * E-VPN/VXLAN: make kernel version check a warning * update README (stable/kilo or master branch) * update README.rst (devstack to point to stable/kilo branch) * Follow Neutron changes * update pbr requirement * Add eventlet monkeypatching to the agent * README/devstack: proper VXLAN E-VPN driver * fix parsing of vxlan\_dstport config parameter * fix indentation for proper rst formating * fix space in EVPN driver specification * allow to specify VXLAN dstport through config file * update requirements * convert vxlan port to int at init time * Added Gitter badge * correct wrong comment formatting in bgp.conf.template * README.rst update for bagpipe-bgp devstack plugin * devstack plugin: do not default to enabling real dataplanes * IPVPN OVS dataplane: more robust code to find port numbers * update reference to grempls ovs patch * devstack: do not run update-db on a compute node * upgrade version * Setup MPLS bridge only if OVS agent * now depend on external upstream exabgp * better document OVS IPVPN driver vxlan\_encap option * OVS IPVPN driver fix: only cleanup VXLAN state if VXLAN encap enabled * Script to test traffic between 2 E-VPNs through an IP-VPN * Create devstack plugin * VRF: bugfix wrong re-advertised route on vifPlugged/vifUnplugged * update VRF unit test for proper re-advertisement on vif(Un)Plugged * add more log decorators to base objects * OVS driver: expose VXLAN tunnel in looking glass * IP VPN OVS dataplane driver: support VXLAN encap for testing * use IANA port for VXLAN * follow upstream exabgp changes on FSM * examples: test number of routes after cleanup * need to overwrite nlri.action at event creation time * simplification, no need to set NLRI action * update requirements: nothing against python-daemon > 2.0, 2.0.5 actually works * follow exabgp upstream changes * comment out tests not relevant and not passing anymore with upstream exabgp * follow exabgp refactoring * add constants for evpn and ipvpn types * looking glass: dataplane instance to driver now uses an href * pep8'tify * cleaner looking glass display of received\_routes * add --recurse/-r to bagpipe-looking-glass * IPVPN: bugfix on re-advertised routes * improved unit-test for VPNInstance re-advertise behavior * untypo'ed * unit-test for VPNInstance re-advertise behavior * port bugfix f385b877572991ce9e78fec91f231cada698c685 to upstream bgp objects * complete a unit test on RouteEntry and RTs * pep8 cleanup * remove in-tree exabgp fork * update requirements for exabgp * New script to test destination-based chaining * MPLS OVS dataplane: Fix bug due to default route reception * port unit test to new exabgp objects * IPVPN: fix readvertise behavior * remove spurious unicode non-breakable spaces * unittest to cover the case fixed by commit 0aae28a71229ef7a3a7f04e528bbdd536554a9f1 * RouteTableManager should not break if a route has no RT * FIXMEs to track a re-advertise and updateRTs issue * work in progress * better tests for sortings extended communities * more tests for base engine objects * updating an attribute requires add/remove * fix updateRouteTargets export comparison * test update of RouteTargets * work in progress * work in progress * work in progress * work in progress * work in progress * work in progress * work in progress * work in progress * factor-out \_wait() calls in test\_route\_table\_manager * more fixes for use of upstream exabgp * log typo * import cleanup * more changes to adapt to upstream ExaBGP * reintegrate past engine refactoring patches * refactor RouteEntry and pushEvent * avoid having to specify RTs in two places * use upstream ExaBGP * install\_bagpipe\_bgp.sh should not fail if log dir already exists * pep8 long line fix * E-VPN dataplane driver: document setGatewayPort and gatewayPortDown * update readme for Kilo RC2 * use the correct ovs\_lib place * add missing agent files * add missing devstack plugin file for the agent * more info on which Openstack release to use * more info on BGP RRs * README updates * update README, correct wrong pointer * adjust required Twisted version (used for fake-rr only) * install script will now install a working config file if none exists * update README with pointers to Openstack Neutron plugins/drivers * cleanups * first commit * better .gitignore * BGP worker: use clean exception for shutdown * VPNInstance: raise API Exceptions when relevant * Avoid misleading logs at startup/shutdown * examples: stop bagpipe-bgp after each example to exercice unplugs and shutdown * documentation for examples * fix ommission of readvertise parameter at unplug time * README update: one caveat less (now has the ability to advertise subnets, not only /32) * example directory with ready-to-run examples, including service-chaining * rest\_attach cli tool: support for 'advertise\_subnet' * Support for 'advertise\_subnet' API option * MPLS OVS dataplane: fix to allow plugging a same port multiple times * logging improvements * readvertisement support: IPVPN support * readvertisement support: REST attach cli tool * routeTableManager: warning, not exception, on a duplicate advertisement * readvertisement support: API, VPNManager and VPNInstance changes * minor logging changes * cosmetic pep8 and code layout changes * correct 2 typos * Initial Cookiecutter Commit * Added .gitreview * bagpipe-looking-glass CLI: new options to specify port and URL prefix * update ref to E-VPN specs, base specs are now an RFC and overlay specs and IETF WG document * E-VPN, no reason anymore to have a knob to turn BUM support off * minor: remove useless subscription from BGP peer for IPv4/RTC: * remove unicode no-break spaces present in a few places * bagpipe-rest-attach: workaround a case where perl breaks due to wrong locale setup * E-VPN: remove obsolete code for RRs without proper E-VPN support * corrected method signature in Dummy dataplane driver * BGP sessions: improved state machine and error handling * move getBoolean to common.utils * EVPN Inclusive Multiast NLRI encoding: IP field len is in bits not bytes * EVPN PMSI Tunnel Attribute encoding: set Transitive flag * MPLS OVS dataplane: fix bridge name config file parsing regression * Cleanup * Code drop * Code drop * code drop, mostly bugfixes * IP VPN: fix logging bug on route removal * Code drop: improvements, major cleanups and some fixes * fix daemon startup regression in a recent commit * fixes to bagpipe-rest-attach and dataplane cleanup code * etc/init.d/bagpipe-bgp: indent fixes * README.md: fix IP used in examples * really move exabgp/lib/exabgp away (mis-merge) * Add unit test basis, project directory relayout, code and doc cleanups, minor bugfixes * Add E-VPN ExaBGP code and EVI implementation, plus one engine regression fix * Cleanup MPLS OVS driver * initial github commit * Initial commit ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/HACKING.rst0000664000175000017500000000031200000000000017266 0ustar00zuulzuul00000000000000======================================== networking-bagpipe Style Commandments ======================================== Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/LICENSE0000664000175000017500000002363600000000000016513 0ustar00zuulzuul00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9623048 networking_bagpipe-22.0.0/PKG-INFO0000644000175000017500000000453100000000000016572 0ustar00zuulzuul00000000000000Metadata-Version: 2.1 Name: networking-bagpipe Version: 22.0.0 Summary: Mechanism driver for Neutron ML2 plugin using BGP E-VPNs/IP VPNs as a backend Home-page: https://docs.openstack.org/networking-bagpipe/latest/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3.12 Requires-Python: >=3.9 License-File: LICENSE Requires-Dist: netaddr>=0.7.18 Requires-Dist: neutron-lib>=2.19.0 Requires-Dist: oslo.db>=4.37.0 Requires-Dist: oslo.config>=5.2.0 Requires-Dist: oslo.concurrency>=3.26.0 Requires-Dist: oslo.i18n>=3.15.3 Requires-Dist: oslo.log>=3.36.0 Requires-Dist: oslo.messaging>=5.29.0 Requires-Dist: oslo.privsep>=2.3.0 Requires-Dist: oslo.rootwrap>=5.8.0 Requires-Dist: oslo.serialization!=2.19.1,>=2.18.0 Requires-Dist: oslo.service!=1.28.1,>=1.24.0 Requires-Dist: oslo.versionedobjects>=1.35.1 Requires-Dist: pyroute2>=0.5.7 Requires-Dist: stevedore>=1.20.0 Requires-Dist: exabgp>=4.0.4 Requires-Dist: pecan>=1.3.2 Requires-Dist: neutron>=23.0.0 Requires-Dist: networking-bgpvpn>=12.0.0 Requires-Dist: networking-sfc>=10.0.0 ================== networking-bagpipe ================== Driver and agent code to use BaGPipe lightweight implementation of BGP-based VPNs as a backend for Neutron. * Free software: Apache license * Documentation: https://docs.openstack.org/networking-bagpipe/latest/ * Source: https://opendev.org/openstack/networking-bagpipe * Bugs: https://bugs.launchpad.net/networking-bagpipe * Release notes: https://docs.openstack.org/releasenotes/networking-bagpipe/ Team and repository tags ------------------------ .. image:: https://governance.openstack.org/tc/badges/networking-bagpipe.svg :target: https://governance.openstack.org/tc/reference/tags/index.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/README.rst0000664000175000017500000000123000000000000017157 0ustar00zuulzuul00000000000000================== networking-bagpipe ================== Driver and agent code to use BaGPipe lightweight implementation of BGP-based VPNs as a backend for Neutron. * Free software: Apache license * Documentation: https://docs.openstack.org/networking-bagpipe/latest/ * Source: https://opendev.org/openstack/networking-bagpipe * Bugs: https://bugs.launchpad.net/networking-bagpipe * Release notes: https://docs.openstack.org/releasenotes/networking-bagpipe/ Team and repository tags ------------------------ .. image:: https://governance.openstack.org/tc/badges/networking-bagpipe.svg :target: https://governance.openstack.org/tc/reference/tags/index.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/bindep.txt0000664000175000017500000000075500000000000017505 0ustar00zuulzuul00000000000000# This is a cross-platform list tracking distribution packages needed for install and tests; # see https://docs.openstack.org/infra/bindep/ for additional information. mysql-client [platform:dpkg !platform:debian] mysql-server [platform:dpkg !platform:debian] mariadb-server [platform:rpm platform:redhat platform:debian] postgresql postgresql-client [platform:dpkg] # cffi (required by oslo.privsep) and PyNaCL (required by paramiko) libffi-dev [platform:dpkg] libffi-devel [platform:rpm] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.8983064 networking_bagpipe-22.0.0/devstack/0000775000175000017500000000000000000000000017300 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/devstack/devstack-gate-rc0000664000175000017500000000302000000000000022342 0ustar00zuulzuul00000000000000# This file is hooked from https://git.openstack.org/cgit/openstack-infra/project-config/blob/master/jenkins/jobs/networking-bagpipe.yaml # Not used for functional or fullstack jobs # bagpipe ML2 mech_driver config export DEVSTACK_LOCAL_CONFIG+=$'\n'"ENABLE_BAGPIPE_L2=True" # no need for peers (we don't have a multinode CI yet) export DEVSTACK_LOCAL_CONFIG+=$'\n'"BAGPIPE_BGP_PEERS=-" # uncomment to periodically log the full content of the looking-glass export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_service neutron-bagpipe-bgp-lg" # Restrict Tempest test suite, for now # (partially and shamelessly stolen from networking-ovn/devstack/devstackgaterc) # Begin list of exclusions. r="^(?!.*" # exclude the slow tag (part of the default for 'full') r="$r(?:.*\[.*\bslow\b.*\])" # exclude some unrelated stuff to make targeted runs go faster r="$r|(?:tempest\.api\.identity*)" r="$r|(?:tempest\.api\.image*)" r="$r|(?:tempest\.api\.volume*)" r="$r|(?:tempest\.api\.compute\.admin*)" r="$r|(?:tempest\.api\.compute\.images*)" r="$r|(?:tempest\.api\.compute\.keypairs*)" r="$r|(?:tempest\.api\.compute\.certificates*)" r="$r|(?:tempest\.api\.compute\.flavors*)" r="$r|(?:tempest\.api\.compute\.servers*)" r="$r|(?:tempest\.api\.compute\.test_quotas*)" r="$r|(?:tempest\.api\.compute\.test_versions*)" r="$r|(?:tempest\.api\.compute\.volumes*)" # End list of exclusions. r="$r)" # only run tempest.api/scenario/thirdparty tests (part of the default for 'full') r="$r(tempest\.(api|scenario|thirdparty)).*$" export DEVSTACK_GATE_TEMPEST_REGEX="$r" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.8983064 networking_bagpipe-22.0.0/devstack/gate-hooks/0000775000175000017500000000000000000000000021341 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/devstack/gate-hooks/README0000664000175000017500000000017100000000000022220 0ustar00zuulzuul00000000000000# these are snippets meant for inclusion in a local.conf localrc section # by the load_rc_hooks function of gate_hook.sh ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/devstack/gate-hooks/bagpipe0000664000175000017500000000013100000000000022666 0ustar00zuulzuul00000000000000enable_plugin networking-bagpipe https://git.openstack.org/openstack/networking-bagpipe ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/devstack/gate-hooks/bgpvpn0000664000175000017500000000012700000000000022560 0ustar00zuulzuul00000000000000enable_plugin networking-bgpvpn https://git.openstack.org/openstack/networking-bgpvpn ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/devstack/gate-hooks/dstat0000664000175000017500000000002500000000000022400 0ustar00zuulzuul00000000000000enable_service dstat ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/devstack/gate-hooks/go-env0000664000175000017500000000004500000000000022456 0ustar00zuulzuul00000000000000GOPATH=/opt/go PATH=$PATH:$GOPATH/bin././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/devstack/gate-hooks/gobgp0000664000175000017500000000002400000000000022356 0ustar00zuulzuul00000000000000enable_service gobgp././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/devstack/gate-hooks/stack_base0000664000175000017500000000055000000000000023363 0ustar00zuulzuul00000000000000# set password, otherwise devstack enters interactive mode and fails ADMIN_PASSWORD=secretadmin # don't use screen to start services (needed to disable colorization in # captured service logs) USE_SCREEN=False # start with an empty service list, otherwise devstack will configure several # 'default' services, including rabbitmq and mysql disable_all_services ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/devstack/gate_hook.sh0000664000175000017500000000517500000000000021604 0ustar00zuulzuul00000000000000#!/bin/sh GATE_DEST=$BASE/new VENV=${1:-"tempest"} DEVSTACK_PATH=$GATE_DEST/devstack NEUTRON_PATH=$GATE_DEST/neutron BAGPIPE_PATH=$GATE_DEST/networking-bagpipe GATE_HOOKS=$NEUTRON_PATH/neutron/tests/contrib/hooks BAGPIPE_GATE_HOOKS=$BAGPIPE_PATH/devstack/gate-hooks LOCAL_CONF=$DEVSTACK_PATH/late-local.conf DSCONF=/tmp/devstack-tools/bin/dsconf # Install devstack-tools used to produce local.conf; we can't rely on # test-requirements.txt because the gate hook is triggered before neutron is # installed sudo -H pip install virtualenv virtualenv /tmp/devstack-tools /tmp/devstack-tools/bin/pip install -U devstack-tools==0.4.0 # Inject config from bagipe hook into localrc function load_bagpipe_rc_hook { local hook="$1" local tmpfile local config tmpfile=$(tempfile) config=$(cat $BAGPIPE_GATE_HOOKS/$hook) echo "[[local|localrc]]" > $tmpfile $DSCONF setlc_raw $tmpfile "$config" $DSCONF merge_lc $LOCAL_CONF $tmpfile rm -f $tmpfile } case $VENV in "functional"|"fullstack") VENV=dsvm-$VENV # The following need to be set before sourcing # configure_for_func_testing. GATE_STACK_USER=stack PROJECT_NAME=networking-bagpipe IS_GATE=True LOCAL_CONF=$DEVSTACK_PATH/local.conf source $DEVSTACK_PATH/functions source $NEUTRON_PATH/devstack/lib/ovs source $NEUTRON_PATH/tools/configure_for_func_testing.sh configure_host_for_func_testing # to be replaced by project config bindep trigger sudo PATH=/usr/sbin:/sbin:$PATH DEBIAN_FRONTEND=noninteractive \ apt-get -q --option "Dpkg::Options::=--force-confold" \ --assume-yes install fping # prepare base environment for ./stack.sh load_bagpipe_rc_hook stack_base # enable monitoring load_bagpipe_rc_hook dstat # have devstack know about our devstack plugin load_bagpipe_rc_hook bagpipe # setup go environement variables in devstack load_bagpipe_rc_hook go-env # create same go environement source $BAGPIPE_GATE_HOOKS/go-env sudo mkdir -p $GOPATH sudo chown -R $STACK_USER:$STACK_USER $GOPATH # install gobgp via our devstack plugin load_bagpipe_rc_hook gobgp # Make the workspace owned by the stack user sudo chown -R $STACK_USER:$STACK_USER $BASE # temporary fix for bug 1693689 export IPV4_ADDRS_SAFE_TO_USE=${DEVSTACK_GATE_IPV4_ADDRS_SAFE_TO_USE:-${DEVSTACK_GATE_FIXED_RANGE:-10.1.0.0/20}} # deploy devstack as per local.conf cd $DEVSTACK_PATH && sudo -E -H -u $GATE_STACK_USER ./stack.sh ;; "tempest") $GATE_DEST/devstack-gate/devstack-vm-gate.sh ;; *) echo "Unrecognized environment $VENV". exit 1 esac ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/devstack/override-defaults0000664000175000017500000000065700000000000022657 0ustar00zuulzuul00000000000000NETWORKING_BAGPIPE_DIR=$DEST/networking-bagpipe if [[ "$Q_AGENT" =~ "bagpipe-linuxbridge" ]] ; then echo "Using 'Q_AGENT='bagpipe-linuxbridge' in local.conf is deprecated, please replace by NEUTRON_AGENT=linuxbridge" NEUTRON_AGENT=linuxbridge Q_AGENT=linuxbridge fi ENABLE_BAGPIPE_L2=$(trueorfalse False ENABLE_BAGPIPE_L2) if [[ "$ENABLE_BAGPIPE_L2" == "True" ]]; then NEUTRON_AGENT=linuxbridge Q_AGENT=linuxbridge fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/devstack/plugin.sh0000664000175000017500000000342600000000000021137 0ustar00zuulzuul00000000000000#!/bin/bash # Save trace setting _XTRACE_NETWORKING_BAGPIPE=$(set +o | grep xtrace) set -o xtrace if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then # no-op : elif [[ "$1" == "stack" && "$2" == "install" ]]; then setup_develop $NETWORKING_BAGPIPE_DIR elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then if is_service_enabled neutron-agent || is_service_enabled q-agt ; then if is_neutron_legacy_enabled; then iniset /$Q_PLUGIN_CONF_FILE bagpipe bagpipe_bgp_port $BAGPIPE_SERVICE_PORT else iniset $NEUTRON_CORE_PLUGIN_CONF bagpipe_bgp_port $BAGPIPE_SERVICE_PORT fi fi if [[ "$ENABLE_BAGPIPE_L2" == "True" ]]; then if is_service_enabled neutron-agent || is_service_enabled q-agt ; then echo_summary "Configuring linuxbridge agent for bagpipe" if is_neutron_legacy_enabled; then iniset /$Q_PLUGIN_CONF_FILE ml2_bagpipe_extension as_number ${BAGPIPE_RT_ASN:-64512} iniset /$Q_PLUGIN_CONF_FILE vxlan arp_responder True else iniset $NEUTRON_CORE_PLUGIN_CONF ml2_bagpipe_extension as_number ${BAGPIPE_RT_ASN:-64512} iniset $NEUTRON_CORE_PLUGIN_CONF vxlan arp_responder True fi source $NEUTRON_DIR/devstack/lib/l2_agent plugin_agent_add_l2_agent_extension bagpipe configure_l2_agent fi fi fi if [[ "$1" == "unstack" ]]; then rm -f $TOP_DIR/lib/neutron_plugins/${BAGPIPE_L2_AGENT}_agent fi if [[ "$1" == "clean" ]]; then #no-op : fi echo "Running bagpipe-bgp devstack plugin..." source $NETWORKING_BAGPIPE_DIR/devstack/plugin.sh.bagpipe_bgp $1 $2 || die $LINEO "error in bagpipe-bgp plugin.sh ($1 $2)" # Restore trace setting ${_XTRACE_NETWORKING_BAGPIPE} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/devstack/plugin.sh.bagpipe_bgp0000664000175000017500000002262000000000000023372 0ustar00zuulzuul00000000000000#!/bin/bash # Save trace setting _XTRACE_BAGPIPE_BGP=$(set +o | grep xtrace) set -o xtrace # Pre-install gobgp requirements function pre_install_gobgp { # Install go language and configure environment variables install_package golang-go export GOPATH=${GOPATH:-$DEST/go} if [[ ! -d $GOPATH ]]; then mkdir -p $GOPATH fi } # Install gobgp function install_gobgp { # we install from binary, because compiling from source requires golang-1.7 which is not in ubuntu xenial mkdir -p $GOPATH/bin cd $GOPATH/bin wget $GOBGP_RELEASE -qO - | tar xz } # Set config files, create data dirs, etc function configure_bagpipe { # Put config files in ``/etc/bagpipe-bgp`` for everyone to find if [[ ! -d $BAGPIPE_CONF_DIR ]]; then sudo mkdir -p $BAGPIPE_CONF_DIR fi sudo chown $STACK_USER $BAGPIPE_CONF_DIR # place rootwrap config files # this is done here so that these files are in place, even when # bagpipe-bgp is not configured, specifically for the functional and # fullstack gate jobs BAGPIPE_BGP_RW_CONF=/etc/bagpipe-bgp/rootwrap.conf BAGPIPE_BGP_RW_D=/etc/bagpipe-bgp/rootwrap.d sudo install -o root -g root -m 644 $NETWORKING_BAGPIPE_DIR/etc/bagpipe-bgp/rootwrap.conf $BAGPIPE_BGP_RW_CONF sudo install -d -o root -m 755 $BAGPIPE_BGP_RW_D/ sudo install -o root -m 644 $NETWORKING_BAGPIPE_DIR/etc/bagpipe-bgp/rootwrap.d/* $BAGPIPE_BGP_RW_D/ sudo sed -e "s:^filters_path=.*$:filters_path=$BAGPIPE_BGP_RW_D:" -i $BAGPIPE_BGP_RW_CONF sudo sed -e 's:^exec_dirs=\(.*\)$:exec_dirs=\1,/usr/local/bin:' -i $BAGPIPE_BGP_RW_CONF # Specify ``rootwrap.conf`` as first parameter to neutron-rootwrap ROOTWRAP_SUDOER_CMD="$NEUTRON_ROOTWRAP $BAGPIPE_BGP_RW_CONF *" ROOTWRAP_DAEMON_SUDOER_CMD="$NEUTRON_ROOTWRAP-daemon $BAGPIPE_BGP_RW_CONF" # configure sudo to allow bagpipe-bgp rootwrap config TEMPFILE=`mktemp` echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_DAEMON_SUDOER_CMD" >>$TEMPFILE chmod 0440 $TEMPFILE sudo chown root:root $TEMPFILE sudo mv $TEMPFILE /etc/sudoers.d/bagpipe-rootwrap if is_service_enabled neutron-bagpipe-bgp; then # build the config file from scratch create_bagpipe_conf fi } # Create a new bgp.conf file function create_bagpipe_conf { # (Re)create ``bgp.conf`` cp -p $NETWORKING_BAGPIPE_DIR/etc/bagpipe-bgp/bgp.conf.template $BAGPIPE_CONF # build config iniset $BAGPIPE_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $BAGPIPE_CONF BGP local_address ${BAGPIPE_HOST_IP:-$HOST_IP} if [[ $BAGPIPE_BGP_PEERS == "-" ]]; then iniset $BAGPIPE_CONF BGP peers "" else iniset $BAGPIPE_CONF BGP peers "$BAGPIPE_BGP_PEERS" fi iniset $BAGPIPE_CONF API host $BAGPIPE_SERVICE_HOST iniset $BAGPIPE_CONF API port $BAGPIPE_SERVICE_PORT iniset $BAGPIPE_CONF DATAPLANE_DRIVER_IPVPN dataplane_driver ${BAGPIPE_DATAPLANE_DRIVER_IPVPN:-dummy} iniset $BAGPIPE_CONF DATAPLANE_DRIVER_IPVPN mpls_interface $BAGPIPE_MPLS_IFACE iniset $BAGPIPE_CONF DATAPLANE_DRIVER_IPVPN ovs_bridge $BAGPIPE_MPLS_BR iniset $BAGPIPE_CONF DATAPLANE_DRIVER_EVPN dataplane_driver ${BAGPIPE_DATAPLANE_DRIVER_EVPN:-dummy} iniset $BAGPIPE_CONF DATAPLANE_DRIVER_IPVPN arp_responder ${BAGPIPE_ARP_RESPONDER:-False} iniset $BAGPIPE_CONF DATAPLANE_DRIVER_IPVPN proxy_arp ${BAGPIPE_PROXY_ARP:-False} BAGPIPE_BGP_RW_COMMAND="sudo $NEUTRON_ROOTWRAP $BAGPIPE_BGP_RW_CONF" BAGPIPE_BGP_RW_DAEMON_COMMAND="sudo $NEUTRON_ROOTWRAP-daemon $BAGPIPE_BGP_RW_CONF" iniset $BAGPIPE_CONF common root_helper "$BAGPIPE_BGP_RW_COMMAND" iniset $BAGPIPE_CONF common root_helper_daemon "$BAGPIPE_BGP_RW_DAEMON_COMMAND" setup_logging $BAGPIPE_CONF } # Initialize databases, etc. function init_bagpipe { if [[ $BAGPIPE_DATAPLANE_DRIVER_IPVPN == *"MPLSOVSDataplaneDriver"* || $BAGPIPE_DATAPLANE_DRIVER_IPVPN == "ovs" ]]; then init_bagpipe_ovsmpls else echo "IP VPN driver not OVS, let's not init OVS MPLS bridge (driver is '$BAGPIPE_DATAPLANE_DRIVER_IPVPN')" fi } function init_bagpipe_ovsmpls { BAGPIPE_BR_RESET_SCRIPT=$(mktemp /var/tmp/bagpipe-bgp-br-reset.XXXXX) :> $BAGPIPE_BR_RESET_SCRIPT if [ -n "$BAGPIPE_MPLS_IFACE" ]; then cat >> $BAGPIPE_BR_RESET_SCRIPT <> $BAGPIPE_BR_RESET_SCRIPT < ./stestr.subunit $SCRIPTS_DIR/subunit2html ./stestr.subunit testr_results.html gzip -9 ./stestr.subunit gzip -9 ./testr_results.html sudo mv ./*.gz /opt/stack/logs/ fi } function generate_log_index { local xtrace xtrace=$(set +o | grep xtrace) set +o xtrace virtualenv /tmp/os-log-merger /tmp/os-log-merger/bin/pip install -U os-log-merger==1.1.0 files=$(find /opt/stack/logs/$venv-logs -name '*.txt' -o -name '*.log') # -a3 to truncate common path prefix # || true to avoid the whole run failure because of os-log-merger crashes and such # TODO(ihrachys) remove || true when we have more trust in os-log-merger contents=$(/tmp/os-log-merger/bin/os-log-merger -a3 $files || true) # don't store DEBUG level messages because they are not very useful, # and are not indexed by logstash anyway echo "$contents" | grep -v DEBUG | sudo tee /opt/stack/logs/$venv-index.txt > /dev/null $xtrace } if [[ "$venv" == *functional* ]] || [[ "$venv" == *fullstack* ]]; then venv=dsvm-$venv owner=stack # source go environment to obtain the same one as the one of the devstack plugin source $PROJECT_DIR/devstack/gate-hooks/go-env # and prepare for sudo using a PATH derived from that sudo_env="PATH=$PATH" # Set owner permissions according to job's requirements. cd $PROJECT_DIR sudo chown -R $owner:stack $PROJECT_DIR # fix iptables so that BGP traffic from simulated computes to the gobgp instance # in th default netns, isn't dropped sudo iptables -I openstack-INPUT -i cnt-+ -j ACCEPT # ditto for traffic on br- interfaces sudo iptables -I openstack-INPUT -i br-+ -j ACCEPT # Run tests echo "Running neutron $venv test suite" set +e sudo -E -H -u $owner $sudo_env tox -ve $venv testr_exit_code=$? set -e # move and zip tox logs into log directory sudo mv $PROJECT_DIR/.tox/$venv/log /opt/stack/logs/tox sudo -H -u $owner chmod o+rw -R /opt/stack/logs/tox/ gzip -9 /opt/stack/logs/tox/*.log # Collect and parse results generate_testr_results generate_log_index exit $testr_exit_code fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/devstack/pre_test_hook.sh0000664000175000017500000000000000000000000022467 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/devstack/settings0000664000175000017500000000123100000000000021060 0ustar00zuulzuul00000000000000# devstack plugin settings file for networking-bagpipe NETWORKING_BAGPIPE_DIR=$DEST/networking-bagpipe # configuration for bagpipe ML2: if [[ "$ENABLE_BAGPIPE_L2" == "True" ]]; then Q_ML2_PLUGIN_TYPE_DRIVERS=flat,vlan,vxlan Q_ML2_PLUGIN_MECHANISM_DRIVERS=linuxbridge,logger # Have bagpipe-bgp use VXLAN driver for E-VPN (unless overridden) BAGPIPE_DATAPLANE_DRIVER_EVPN=${BAGPIPE_DATAPLANE_DRIVER_EVPN:-linux} fi # common to BGPVPN driver, SFC driver and bagpipe ML2: if is_service_enabled neutron-agent || is_service_enabled q-agt ; then enable_service neutron-bagpipe-bgp fi source $NETWORKING_BAGPIPE_DIR/devstack/settings.bagpipe_bgp ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/devstack/settings.bagpipe_bgp0000664000175000017500000000110700000000000023320 0ustar00zuulzuul00000000000000BAGPIPE_CONF_DIR=/etc/bagpipe-bgp BAGPIPE_CONF=$BAGPIPE_CONF_DIR/bgp.conf BAGPIPE_SERVICE_HOST=${BAGPIPE_SERVICE_HOST:-0.0.0.0} BAGPIPE_SERVICE_PORT=${BAGPIPE_SERVICE_PORT:-8082} BAGPIPE_BGP_PEERS=${BAGPIPE_BGP_PEERS:-$SERVICE_HOST} BAGPIPE_MPLS_IFACE=${BAGPIPE_MPLS_IFACE:-} BAGPIPE_MPLS_BR=${BAGPIPE_MPLS_BR:-br-mpls} BAGPIPE_INTERNAL_PORT=bagpipe # backward compatibility with old-style service name if is_service_enabled b-bgp ; then enable_service neutron-bagpipe-bgp fi GOBGP_RELEASE=https://github.com/osrg/gobgp/releases/download/v1.24/gobgp_1.24_linux_amd64.tar.gz ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.8983064 networking_bagpipe-22.0.0/doc/0000775000175000017500000000000000000000000016241 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/doc/requirements.txt0000664000175000017500000000050100000000000021521 0ustar00zuulzuul00000000000000# NOTE(armax): requirements to the specs repo is kept out of sync # with g-r on purpose, as g-r is just for projects affecting the # integrated gate. Any sync must happen manually as recommended by # the openstack release team. sphinx>=2.0.0,!=2.1.0 # BSD openstackdocstheme>=2.2.1 # Apache-2.0 reno>=3.1.0 # Apache-2.0 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9023063 networking_bagpipe-22.0.0/doc/source/0000775000175000017500000000000000000000000017541 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9023063 networking_bagpipe-22.0.0/doc/source/_static/0000775000175000017500000000000000000000000021167 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/doc/source/_static/.placeholder0000664000175000017500000000000000000000000023440 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/doc/source/conf.py0000664000175000017500000000705700000000000021051 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys sys.path.insert(0, os.path.abspath('../..')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', #'sphinx.ext.intersphinx', 'openstackdocstheme', 'oslo_config.sphinxext', 'oslo_config.sphinxconfiggen', ] # openstackdocstheme options openstackdocs_repo_name = 'openstack/networking-bagpipe' openstackdocs_pdf_link = True openstackdocs_auto_name = False openstackdocs_bug_project = 'networking-bagpipe' openstackdocs_bug_tag = '' # autodoc generation is a bit aggressive and a nuisance when doing heavy # text edit cycles. # execute "export SPHINX_DEBUG=1" in your terminal to disable # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = 'networking-bagpipe' copyright = '2013, OpenStack Foundation' # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # Example configuration for intersphinx: refer to the Python standard library. #intersphinx_mapping = {'http://docs.python.org/': None} # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] # html_theme = '_theme' html_static_path = ['_static'] html_theme = 'openstackdocs' # Output file base name for HTML help builder. htmlhelp_basename = '%sdoc' % project # -- Options for LaTeX output ------------------------------------------------- # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'doc-%s.tex' % project, '%s Documentation' % project, 'OpenStack Foundation', 'manual'), ] # -- Options for oslo_config.sphinxconfiggen --------------------------------- _config_generator_config_files = [ 'neutron-agent.conf', 'bagpipe-bgp.conf', 'dataplane-evpn-linux-vxlan.conf', 'dataplane-ipvpn-mpls-linux.conf', 'dataplane-ipvpn-mpls-ovs.conf', ] def _get_config_generator_config_definition(conf): config_file_path = '../../etc/oslo-config-generator/%s' % conf # oslo_config.sphinxconfiggen appends '.conf.sample' to the filename, # strip file extentension (.conf or .ini). output_file_path = '_static/config_samples/%s' % conf.rsplit('.', 1)[0] return (config_file_path, output_file_path) config_generator_config_file = [ _get_config_generator_config_definition(conf) for conf in _config_generator_config_files ] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9023063 networking_bagpipe-22.0.0/doc/source/configuration/0000775000175000017500000000000000000000000022410 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/doc/source/configuration/bagpipe-bgp.rst0000664000175000017500000000040200000000000025313 0ustar00zuulzuul00000000000000================ bagpipe-bgp.conf ================ .. show-options:: :config-file: etc/oslo-config-generator/bagpipe-bgp.conf More dataplane configuration parameters exist depending on the driver: .. toctree:: :glob: :maxdepth: 1 dataplane* ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/doc/source/configuration/dataplane-evpn-linux-vxlan.rst0000664000175000017500000000033400000000000030324 0ustar00zuulzuul00000000000000========================================= [DATAPLANE_DRIVER_EVPN] with driver=linux ========================================= .. show-options:: :config-file: etc/oslo-config-generator/dataplane-evpn-linux-vxlan.conf ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/doc/source/configuration/dataplane-ipvpn-mpls-linux.rst0000664000175000017500000000034100000000000030331 0ustar00zuulzuul00000000000000=========================================== [DATAPLANE_DRIVER_IPVPN] with driver=linux =========================================== .. show-options:: :config-file: etc/oslo-config-generator/dataplane-ipvpn-mpls-linux.conf ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/doc/source/configuration/dataplane-ipvpn-mpls-ovs.rst0000664000175000017500000000032700000000000030005 0ustar00zuulzuul00000000000000======================================== [DATAPLANE_DRIVER_IPVPN] with driver=ovs ======================================== .. show-options:: :config-file: etc/oslo-config-generator/dataplane-ipvpn-mpls-ovs.conf ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/doc/source/configuration/index.rst0000664000175000017500000000126300000000000024253 0ustar00zuulzuul00000000000000===================== Configuration Options ===================== This section provides a list of all possible options for each configuration file. Configuration Reference ----------------------- networking-bagpipe uses the following configuration files for its various services. .. toctree:: :glob: :maxdepth: 1 neutron neutron-agent bagpipe-bgp Sample Configuration Files -------------------------- The following are sample configuration files for all networking-bagpipe. These are generated from code and reflect the current state of code in the networking-bagpipe repository. .. toctree:: :glob: :maxdepth: 1 samples/neutron-agent samples/bagpipe-bgp ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/doc/source/configuration/neutron-agent.rst0000664000175000017500000000032400000000000025727 0ustar00zuulzuul00000000000000==================== Neutron agent config ==================== The following section can be added to Neutron agent configuration. .. show-options:: :config-file: etc/oslo-config-generator/neutron-agent.conf ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/doc/source/configuration/neutron.rst0000664000175000017500000000041300000000000024632 0ustar00zuulzuul00000000000000============== Neutron config ============== .. _neutron-sfc-config: SFC --- The following section can be added to Neutron server configuration to parameters related to the sfc driver. .. show-options:: :config-file: etc/oslo-config-generator/neutron-sfc.conf ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9023063 networking_bagpipe-22.0.0/doc/source/configuration/samples/0000775000175000017500000000000000000000000024054 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/doc/source/configuration/samples/bagpipe-bgp.rst0000664000175000017500000000061700000000000026767 0ustar00zuulzuul00000000000000======================= Sample bagpipe-bgp.conf ======================= This sample configuration can also be viewed in `the raw format <../../_static/config_samples/bagpipe-bgp.conf.sample>`_. .. literalinclude:: ../../_static/config_samples/bagpipe-bgp.conf.sample More dataplane configuration parameters exist depending on the driver: .. toctree:: :glob: :maxdepth: 1 dataplane* ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/doc/source/configuration/samples/dataplane-evpn-linux-vxlan.rst0000664000175000017500000000057100000000000031773 0ustar00zuulzuul00000000000000================================================ Sample [DATAPLANE_DRIVER_EVPN] with driver=linux ================================================ This sample configuration can also be viewed in `the raw format <../../_static/config_samples/dataplane-evpn-linux-vxlan.conf.sample>`_. .. literalinclude:: ../../_static/config_samples/dataplane-evpn-linux-vxlan.conf.sample ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/doc/source/configuration/samples/dataplane-ipvpn-mpls-linux.rst0000664000175000017500000000057400000000000032005 0ustar00zuulzuul00000000000000================================================= Sample [DATAPLANE_DRIVER_IPVPN] with driver=linux ================================================= This sample configuration can also be viewed in `the raw format <../../_static/config_samples/dataplane-ipvpn-mpls-linux.conf.sample>`_. .. literalinclude:: ../../_static/config_samples/dataplane-ipvpn-mpls-linux.conf.sample ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/doc/source/configuration/samples/dataplane-ipvpn-mpls-ovs.rst0000664000175000017500000000056200000000000031452 0ustar00zuulzuul00000000000000=============================================== Sample [DATAPLANE_DRIVER_IPVPN] with driver=ovs =============================================== This sample configuration can also be viewed in `the raw format <../../_static/config_samples/dataplane-ipvpn-mpls-ovs.conf.sample>`_. .. literalinclude:: ../../_static/config_samples/dataplane-ipvpn-mpls-ovs.conf.sample ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/doc/source/configuration/samples/neutron-agent.rst0000664000175000017500000000044000000000000027372 0ustar00zuulzuul00000000000000=========================== Sample Neutron agent config =========================== This sample configuration can also be viewed in `the raw format <../../_static/config_samples/neutron-agent.conf.sample>`_. .. literalinclude:: ../../_static/config_samples/neutron-agent.conf.sample ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9023063 networking_bagpipe-22.0.0/doc/source/contributor/0000775000175000017500000000000000000000000022113 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/doc/source/contributor/index.rst0000664000175000017500000000015200000000000023752 0ustar00zuulzuul00000000000000============ Development ============ Contributing ------------ .. include:: ../../../CONTRIBUTING.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/doc/source/index.rst0000664000175000017500000000166200000000000021407 0ustar00zuulzuul00000000000000.. networking-bagpipe documentation master file, created by sphinx-quickstart on Tue Jul 9 22:26:36 2013. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. ================================ networking-bagpipe Documentation ================================ Driver and agent code to use BaGPipe lightweight implementation of BGP-based VPNs as a backend for Neutron. * Free software: Apache license * Documentation: https://docs.openstack.org/networking-bagpipe/latest/ * Source: http://opendev.org/openstack/networking-bagpipe * Bugs: https://bugs.launchpad.net/networking-bagpipe * Release notes: https://docs.openstack.org/releasenotes/networking-bagpipe/ ---- .. toctree:: :maxdepth: 2 overview user/index install/index configuration/index contributor/index .. only:: html .. rubric:: Indices and tables * :ref:`genindex` * :ref:`search` ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9023063 networking_bagpipe-22.0.0/doc/source/install/0000775000175000017500000000000000000000000021207 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/doc/source/install/index.rst0000664000175000017500000001555400000000000023062 0ustar00zuulzuul00000000000000============ Installation ============ .. _n8g_bagpipe_installation: Networking-bagpipe installation ------------------------------- The details related to how a package should be installed may depend on your environment. If possible, you should rely on packages provided by your Linux and/or OpenStack distribution. If you use ``pip``, follow these steps to install networking-bagpipe: * identify the version of the networking-bagpipe package that matches your Openstack version: * Liberty: most recent of 3.0.x * Mitaka: most recent of 4.0.x * Newton: most recent of 5.0.x * Ocata: most recent of 6.0.x * Pike: most recent of 7.0.x * Queens: most recent of 8.0.x * (see https://releases.openstack.org/index.html) * indicate pip to (a) install precisely this version and (b) take into account Openstack upper constraints on package versions for dependencies (example for Queens): .. code-block:: console $ pip install -c https://releases.openstack.org/constraints/upper/queens BaGPipe for Neutron L2 ---------------------- Installation in a devstack test/development environment ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * install devstack (whether stable/**x** or master) * enable the devstack plugin by adding this to ``local.conf``: * to use branch ``stable/x`` (e.g. `stable/queens`): .. code-block:: ini enable_plugin networking-bagpipe https://git.openstack.org/openstack/networking-bagpipe.git stable/X * to use the development branch: .. code-block:: ini enable_plugin networking-bagpipe https://git.openstack.org/openstack/networking-bagpipe.git master * enable bagpipe ML2 by adding this to ``local.conf``: .. code-block:: ini ENABLE_BAGPIPE_L2=True * for multinode setups, configure :ref:`bagpipe-bgp` on each compute node, i.e. you need each :ref:`bagpipe-bgp` to peer with a BGP Route Reflector: * in ``local.conf``: .. code-block:: ini # IP of your route reflector or BGP router, or fakeRR: BAGPIPE_BGP_PEERS=1.2.3.4 * for two compute nodes, you can use the FakeRR provided in :ref:`bagpipe-bgp` * for more than two compute nodes, you can use GoBGP_ (`sample configuration`_) or a commercial E-VPN implementation (e.g. vendors participating in `EANTC interop testing on E-VPN `_) Deployment ~~~~~~~~~~ On Neutron servers, the following needs to be done, *based on an ML2/openvswitch configuration* as a starting point: * installing ``networking-bagpipe`` python package (see :ref:`n8g_bagpipe_installation`) * in ML2 configuration (``/etc/neutron/plugins/ml2.ini``): * adding the ``bagpipe`` mechanism driver (additionally to the ``openvswitch`` driver which will still handle ``flat`` and ``vlan`` networks) * *before Queens release* (i.e. if networking-bagpipe < 8) use the ``route_target`` type driver as default * result: .. code-block:: ini [ml2] # tenant_network_types = route_target # before queens only! mechanism_drivers = openvswitch,bagpipe You need to deploy a BGP Route Reflector, that will distribute BGP VPN routes among compute and network nodes. This route reflector will need to support E-VPN and, optionally, RT Constraints. One option, among others is to use GoBGP_ (`sample configuration`_). On compute node (and network nodes if any) the following needs to be done, *based on and ML2/openvswitch configuration* as a starting point: * installing ``networking-bagpipe`` python package (see :ref:`n8g_bagpipe_installation`) * configuring Neutron OpenvSwitch agent for bagpipe ``/etc/neutron/plugins/ml2.ini``: * enabling ``bagpipe`` agent extension * *before Queens release* (i.e. if networking-bagpipe < 8), disable VXLAN: * configuring the AS number and range to use to allocate BGP Route Targets for tenant networks * result: .. code-block:: ini [agent] extensions = bagpipe [vxlan] # for a release strictly before OpenStack Queens (networking-bagpipe < 8) # enable_vxlan = False [ml2_bagpipe_extension] as_number = 64512 * configuring :ref:`bagpipe-bgp`: * setting ``local_address`` to the compute node address (or the name of one of its interfaces e.g. 'eth0') * adding the Route Reflector IP to ``peers`` * selecting the EVPN dataplane driver corresponding to your agent in (``/etc/bagpipe-bgp/bgp.conf``): * ``ovs`` for the openvswitch agent: .. code-block:: ini [DATAPLANE_DRIVER_EVPN] dataplane_driver = ovs BaGPipe for BGPVPN ------------------ Information on how to use ``bagpipe`` driver for networking-bgpvpn_ is provided in `BGPVPN bagpipe driver documentation`_. BaGPipe for networking-sfc -------------------------- To enable the use of networking-bagpipe driver for networking-sfc, the following needs to be done: * enable ``bagpipe`` driver for the ``networking-sfc`` service plugin, in ``/etc/neutron/neutron.conf`` and configure its parameters (see :ref:`neutron-sfc-config`): .. code-block:: ini [sfc] drivers = bagpipe [sfc_bagpipe] # examples, of course! as_number = 64517 rtnn = 10000,30000 * :ref:`bagpipe-bgp` lightweight BGP VPN implementation, configured to use ``ovs`` as dataplane driver for IPVPNs, and ``linux`` as dataplane driver for EVPN (``/etc/bagpipe-bgp/bgp.conf``): .. code-block:: ini [DATAPLANE_DRIVER_IPVPN] dataplane_driver = ovs [DATAPLANE_DRIVER_EVPN] dataplane_driver = linux In a devstack ~~~~~~~~~~~~~ To experiment with sfc driver in a devstack, the following is can be added in your `local.conf` (replace stable/X with stable/queens for e.g. Openstack Queens release) : .. code-block:: ini enable_plugin networking-sfc https://git.openstack.org/openstack/networking-bagpipe.git # enable_plugin networking-sfc https://git.openstack.org/openstack/networking-bagpipe.git stable/X enable_plugin networking-bagpipe https://git.openstack.org/openstack/networking-bagpipe.git # enable_plugin networking-bagpipe https://git.openstack.org/openstack/networking-bagpipe.git stable/X BAGPIPE_DATAPLANE_DRIVER_EVPN=linux BAGPIPE_DATAPLANE_DRIVER_IPVPN=ovs [[post-config|$NEUTRON_CONF]] [sfc] drivers = bagpipe [sfc_bagpipe] as_number = 64517 rtnn = 10000,30000 [[post-config|/$NEUTRON_CORE_PLUGIN_CONF]] [agent] extensions = bagpipe_sfc .. _networking-bgpvpn: http://git.openstack.org/cgit/openstack/networking-bgpvpn .. _GoBGP: http://osrg.github.io/gobgp .. _sample configuration: http://git.openstack.org/cgit/openstack/networking-bagpipe/tree/samples/gobgp.conf .. _BGPVPN bagpipe driver documentation: https://docs.openstack.org/networking-bgpvpn/latest/user/drivers/bagpipe/index.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/doc/source/overview.rst0000664000175000017500000000366400000000000022152 0ustar00zuulzuul00000000000000======== Overview ======== BGP-based VPNs rely on extensions to the BGP routing protocol and dataplane isolation (e.g. MPLS-over-x, VXLAN) to create multi-site isolated virtual networks over a shared infrastructure, such as BGP/MPLS IPVPNs (RFC4364_) and E-VPN (RFC7432_). They have been heavily used in IP/MPLS WAN backbones since the early 2000's. These BGP VPNs are relevant in the context of Neutron, for two distinct use cases: 1. creating reachability between Neutron ports (typically VMs) and BGP VPNs outside the cloud datacenter (this use case can be relevantindependently of the backend chosen for Neutron) 2. leveraging these BGP VPNs in Neutron's backend, to benefit from the flexibility, robustness and scalability of the underlying technology (as do other existing backends such as OpenContrail, Nuage Networks, or Calico -- although the latter relies on plain, non-VPN, BGP) BaGPipe proposal is to address these two use cases by implementing this protocol stack -- both the BGP routing protocol VPN extensions and the dataplane encapsulation -- in compute nodes or possibly ToR switches, and articulating it with Neutron thanks to drivers and plugins. The networking-bagpipe package includes: * for use case 1: backend code for Neutron's BGPVPN Interconnection service plugin (networking-bgpvpn_) ; only compute node code (agent and BGP) is in networking-bagpipe, the Neutron server-side part, being currently in networking-bgpvpn_ package) * for use case 2: a Neutron ML2 mechanism driver (base Neutron networks), a networking-sfc driver (service chaining) * compute code common to both: agent extensions for Neutron l2 agent (openvswitch-agent) to consolidate and pass information via its REST API to BaGPipe-BGP (a lightweight BGP VPN implementation) .. _networking-bgpvpn: https://github.com/openstack/networking-bgpvpn .. _RFC4364: http://tools.ietf.org/html/rfc4364 .. _RFC7432: http://tools.ietf.org/html/rfc7432 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9063063 networking_bagpipe-22.0.0/doc/source/user/0000775000175000017500000000000000000000000020517 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/doc/source/user/applications.rst0000664000175000017500000000313400000000000023740 0ustar00zuulzuul00000000000000Applications ============ ------------------------------ Neutron BGPVPN Interconnection ------------------------------ .. Note:: This application is distinct from the use of BaGPipe to realize Neutron networks with BGP E-VPNs. ``bagpipe`` driver for networking-bgpvpn_ supports both IPVPNs and E-VPNs, but does not rely on ``bagpipe`` ML2 mechanism driver to do so. In this application, ``networking-bagpipe`` aims at proposing a lightweight implementation of the BGPVPN Interconnection service, designed to work with the ML2 ``openvswitch`` mechanism drivers (or as an alternative with the ``bagpipe`` ML2 mechanism driver). When used along with the ``openvswitch`` ML2 mechanism driver, it involves the use of: * ``bagpipe`` driver for the BGPVPN service plugin (in networking-bgpvpn_ package) * ``bagpipe_bgpvpn`` extension for the Neutron compute node agent (in this package) * :ref:`bagpipe-bgp` lightweight BGP VPN implementation (in this package) Example with OVS agent: .. image:: figures/bgpvpn_blockdiag.png ---------------------------------------- Work in progress and future applications ---------------------------------------- Work in progress: * BaGPipe ML2 with openvswitch agent Considered: * networking-l2gw driver leveraging bagpipe-bgp running on a ToR * L3 plugin for inter-subnet distributed routing .. _networking-bgpvpn: https://github.com/openstack/networking-bgpvpn .. _BGPVPN documentation: https://docs.openstack.org/networking-bgpvpn/latest/user/drivers/bagpipe/index.html .. _draft-ietf-bess-service-chaining: https://tools.ietf.org/html/draft-ietf-bess-service-chaining ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/doc/source/user/bagpipe-bgp.rst0000664000175000017500000003745500000000000023444 0ustar00zuulzuul00000000000000.. _bagpipe-bgp: BaGPipe-BGP =========== BaGPipe-BGP is a component of networking-bagpipe, used on compute nodes along the Neutron agent and bagpipe agent extension of this agent. It is a lightweight implementation of BGP VPNs (IP VPNs and E-VPNs), targeting deployments on compute nodes hosting VMs, in particular for Openstack/KVM platforms. The goal of BaGPipe-BGP is *not* to fully implement BGP specifications, but only the subset of specifications required to implement IP VPN VRFs and E-VPN EVIs (`RFC4364 `__ a.k.a RFC2547bis, `RFC7432 `__/`draft-ietf-bess-evpn-overlay `__, and `RFC4684 `__). BaGPipe-BGP is designed to use encapsulations over IP (such as MPLS-over-GRE or VXLAN), and thus does not require the use of LDP. Bare MPLS over Ethernet is also supported and can be used if compute nodes/routers have direct Ethernet connectivity. Typical Use/deployment ---------------------- BaGPipe-BGP has been designed to provide VPN (IP VPN or E-VPN) connectivity to local VMs running on an Openstack compute node. BaGPipe-BGP is typically driven via its HTTP REST interface, by Openstack Neutron agent extensions found in this package. Moreover, BaGPipe-BGP can also be used standalone (in particular for testing purposes), with for instance VMs tap interfaces or with veth interfaces to network namespaces (see `below <#netns-example>`__). BGP and Route Reflection ------------------------ If you only want to test how to interconnect one compute node running bagpipe-bgp and an IP/MPLS router, you don't need to setup a BGP Route Reflector. However, using BaGPipe-BGP between compute nodes currently requires setting up a BGP Route Reflector (see :ref:`bgp_implementation` and `Caveats <#caveats>`__). Typically, passive mode will have to be used for BGP peerings. The term "BGP Route Reflector" refers to a BGP implementation that redistributes routes between iBGP peers `RFC4456 `__. When using bagpipe-bgp on more than one compute node, we thus need each instance of BaGPipe-BGP to be configured to peer with at least one route reflector (see `Configuration <#config>`__). We provide a tool that can be used to emulate a route reflector to interconnect **2** BaGPipe-BGP implementations, typically for test purposes (see `Fake RR <#fakerr>`__). For more than 2 compute nodes running BaGPipe-BGP, you will need a real BGP implementation supporting RFC4364 and BGP route reflection (and ideally also RFC4684), different options can be considered: * BGP implementations in other opensource projects would possibly be suitable, but we did not explore these exhaustively: - `GoBGP `__ , see `sample configuration`_ and `GoBGP as a RR for bagpipe-bgp PE implementations, with E-VPN `__ - we have successfully used OpenBSD BGPd as an IP VPN RR for bagpipe-bgp - FRRouting - Quagga * A commercial router from for instance, Alcatel-Lucent, Cisco or Juniper can be used; some of these vendors also provide their OSes as virtual machines .. _bagpipe-bgp-config: Configuration ------------- The bagpipe-bgp config file default location is: ``/etc/bagpipe-bgp/bgp.conf``. It needs to be customized, at least for the following: * ``local_address``: the local address to use for BGP sessions and traffic encapsulation (can also be specified as an interface, e.g. "eth0", in which the IPv4 address of this interface will be used) * ``peers``: the list of BGP peers, it depends on the BGP setup that you have chosen (see above `BGP Route Reflection <#bgprr>`__) * dataplane configuration, if you really want packets to get through (see `Dataplane configuration <#dpconfig>`__) Example with two compute nodes and relying on bagpipe fake route reflector: * On compute node A (local\_address=10.0.0.1): - run bagpipe-fakerr - run bagpipe-bgp with peers=127.0.0.1 (compute node A will thus connect to the locally running fake route-reflector) * On compute node B (local\_address=10.0.0.2): - run bagpipe-bgp with peers=10.0.0.1 Dataplane driver configuration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Note well that the dataplane drivers proposed in the sample config file are *dummy* drivers that will **not** actually drive any dataplane state. To have traffic really forwarded into IP VPNs or E-VPNs, you need to select real dataplane drivers. For instance, you can use the ``ovs`` dataplane driver for IP VPN, and the ``linux`` driver for E-VPN. **Note well** that there are specific constraints or dependencies applying to dataplane drivers for IP VPNs: * the ``ovs`` driver can be used on most recent Linux kernels, but requires an OpenVSwitch with suitable MPLS code (OVS 2.4 to 2.6 was tested); this driver can do bare-MPLS or MPLS-over-GRE (but see `Caveats <#caveats>`__ for MPLS-over-GRE); for bare MPLS, this driver requires the OVS bridge to be associated with an IP address, and that VRF interfaces be plugged into OVS prior to calling BaGPipe-BGP API to attach them * the ``linux`` driver relies on the native MPLS stack of the Linux kernel, it currently requires a kernel 4.4+ and uses the pyroute2 module that allows defining all states via Netlink rather than by executing 'ip' commands For E-VPN, the ``linux`` driver is supported without any particular additional configuration being required, and simply requires a Linux kernel >=3.10 (`linux\_vxlan.py `__). Usage ----- BaGPipe-BGP local service ~~~~~~~~~~~~~~~~~~~~~~~~~ If systemd init scripts are installed (see ``samples/systemd``), ``bagpipe-bgp`` is typically started with: ``systemctl start bagpipe-bgp`` It can also be started directly with the ``bagpipe-bgp`` command (``--help`` to see what parameters can be used). By default, it outputs logs on stdin (captured by systemd if run under systemd). BaGPipe Fake BGP Route Reflector ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If you choose to use our fake BGP Route Reflector (see `BGP Route Reflection <#bgprr>`__), you can start it whether with the ``bagpipe-fakerr`` command, or if you have startup scripts installed, with ``service bagpipe-fakerr start``. Note that this tool requires the additional installation of the ``twisted`` python package. There isn't anything to configure, logs will be in syslog. This tool is not a BGP implementation and simply plugs together two TCP connections face to face. REST API tool for interface attachments ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The ``bagpipe-rest-attach`` tool allows to exercise the REST API through the command line to attach and detach interfaces from IP VPN VRFs and E-VPN EVIs. See ``bagpipe-rest-attach --help``. IP VPN example with a VM tap interface ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ This example assumes that there is a pre-existing tap interface 'tap42'. * on compute node A, plug tap interface tap42, MAC de:ad:00:00:be:ef, IP 11.11.11.1 into an IP VPN VRF with route-target 64512:77: .. code-block:: console bagpipe-rest-attach --attach --port tap42 --mac de:ad:00:00:be:ef --ip 11.11.11.1 --gateway-ip 11.11.11.254 --network-type ipvpn --rt 64512:77 * on compute node B, plug tap interface tap56, MAC ba:d0:00:00:ca:fe, IP 11.11.11.2 into an IP VPN VRF with route-target 64512:77: .. code-block:: console bagpipe-rest-attach --attach --port tap56 --mac ba:d0:00:00:ca:fe --ip 11.11.11.2 --gateway-ip 11.11.11.254 --network-type ipvpn --rt 64512:77 Note that this example is a schoolbook example only, but does not actually work unless you try to use one of the two MPLS Linux dataplane drivers. Note also that, assuming that VMs are behind these tap interfaces, these VMs will need to have proper IP configuration. When BaGPipe-BGP is use standalone, no DHCP service is provided, and the IP configuration will have to be static. Another IP VPN example... ^^^^^^^^^^^^^^^^^^^^^^^^^ In this example, the bagpipe-rest-attach tool will build for you a network namespace and a properly configured pair of veth interfaces, and will plug one of the veth to the VRF: * on compute node A, plug a netns interface with IP 12.11.11.1 into a new IP VPN VRF named "test", with route-target 64512:78 .. code-block:: console bagpipe-rest-attach --attach --port netns --ip 12.11.11.1 --network-type ipvpn --vpn-instance-id test --rt 64512:78 * on compute node B, plug a netns interface with IP 12.11.11.2 into a new IP VPN VRF named "test", with route-target 64512:78 .. code-block:: console bagpipe-rest-attach --attach --port netns --ip 12.11.11.2 --network-type ipvpn --vpn-instance-id test --rt 64512:78 For this last example, assuming that you have configured bagpipe-bgp to use the ``ovs`` dataplane driver for IP VPN, you will actually be able to have traffic exchanged between the network namespaces: .. code-block:: console ip netns exec test ping 12.11.11.2 PING 12.11.11.2 (12.11.11.2) 56(84) bytes of data. 64 bytes from 12.11.11.2: icmp_req=6 ttl=64 time=1.08 ms 64 bytes from 12.11.11.2: icmp_req=7 ttl=64 time=0.652 ms An E-VPN example ^^^^^^^^^^^^^^^^ In this example, similarly as the previous one, the bagpipe-rest-attach tool will build for you a network namespace and a properly configured pair of veth interfaces, and will plug one of the veth to the E-VPN instance: * on compute node A, plug a netns interface with IP 12.11.11.1 into a new E-VPN named "test2", with route-target 64512:79 .. code-block:: console bagpipe-rest-attach --attach --port netns --ip 12.11.11.1 --network-type evpn --vpn-instance-id test2 --rt 64512:79 * on compute node B, plug a netns interface with IP 12.11.11.2 into a new E-VPN named "test2", with route-target 64512:79 .. code-block:: console bagpipe-rest-attach --attach --port netns --ip 12.11.11.2 --network-type evpn --vpn-instance-id test2 --rt 64512:79 For this last example, assuming that you have configured bagpipe-bgp to use the ``linux`` dataplane driver for E-VPN, you will actually be able to have traffic exchanged between the network namespaces: .. code-block:: console ip netns exec test2 ping 12.11.11.2 PING 12.11.11.2 (12.11.11.2) 56(84) bytes of data. 64 bytes from 12.11.11.2: icmp_req=1 ttl=64 time=1.71 ms 64 bytes from 12.11.11.2: icmp_req=2 ttl=64 time=1.06 ms Looking glass ~~~~~~~~~~~~~ The REST API (default port 8082) provide troubleshooting information, in read-only, through the /looking-glass URL. It can be accessed with a browser: e.g. http://10.0.0.1:8082/looking-glass or http://127.0.0.1:8082/looking-glass (a browser extension to nicely display JSON data is recommended). It can also be accessed with the ``bagpipe-looking-glass`` utility: .. code-block:: console # bagpipe-looking-glass bgp: (...) vpns: (...) config: (...) logs: (...) summary: warnings_and_errors: 2 start_time: 2014-06-11 14:52:32 local_routes_count: 1 BGP_established_peers: 0 vpn_instances_count: 1 received_routes_count: 0 .. code-block:: console # bagpipe-looking-glass bgp peers * 192.168.122.1 (...) state: Idle .. code-block:: console # bagpipe-looking-glass bgp routes match:IPv4/mpls-vpn,*: * RD:192.168.122.101:1 12.11.11.1/32 MPLS:[129-B]: attributes: next_hop: 192.168.122.101 extended_community: target:64512:78 afi-safi: IPv4/mpls-vpn source: VRF 1 (...) route_targets: * target:64512:78 match:IPv4/rtc,*: * RTC<64512>:target:64512:78: attributes: next_hop: 192.168.122.101 afi-safi: IPv4/rtc source: BGPManager (...) match:L2VPN/evpn,*: - Design overview --------------- The main components of BaGPipe-BGP are: * the engine dispatching events related to BGP routes between workers * a worker for each BGP peers * a VPN manager managing the life-cycle of VRFs, EVIs * a worker for each IP VPN VRF, or E-VPN EVI * a REST API: - to attach/detach interfaces to VRFs and control the parameters for said VRFs - to access internal information useful for troubleshooting (/looking-glass/ URL sub-tree) Publish/Subscribe design ~~~~~~~~~~~~~~~~~~~~~~~~ The engine dispatching events related to BGP routes is designed with a publish/subscribe pattern based on the principles in `RFC4684 `__. Workers (a worker can be a BGP peer or a local worker responsible for an IP VPN VRF) publish BGP VPN routes with specified Route Targets, and subscribe to the Route Targets that they need to receive. The engine takes care of propagating advertisement and withdrawal events between the workers, based on subscriptions and BGP semantics (e.g. no redistribution between BGP peers sessions). Best path selection ~~~~~~~~~~~~~~~~~~~ The core engine does not do any BGP best path selection. For routes received from external BGP peers, best path selection happens in the VRF workers. For routes that local workers advertise, no best path selection is done because two distinct workers will never advertise a route of same BGP NLRI. Multi-threading ~~~~~~~~~~~~~~~ For implementation convenience, the design choice was made to use Python native threads and python Queues to manage the API, local workers, and BGP peers workloads: * the engine (RouteTableManager) is running as a single thread * each local VPN worker has its own thread to process route events * each BGP peer worker has two threads to process outgoing route events, and receive socket data, plus a few timers. * VPN port attachment actions are done in the main thread handling initial setup and API calls, these calls are protected by Python locks Non-persistency of VPN and port attachments ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The BaGPipe-BGP service, as currently designed, does not persist information on VPNs (VRFs or EVIs) and the ports attached to them. On a restart, the component responsible triggering the attachment of interfaces to VPNs, can detect the restart of the BGP and re-trigger these attachments. .. _bgp_implementation: BGP Implementation ~~~~~~~~~~~~~~~~~~ The BGP protocol implementation reuses BGP code from `ExaBGP `__. BaGPipe-BGP only reuses the low-level classes for message encodings and connection setup. Non-goals for this BGP implementation: * full-fledged BGP implementation * redistribution of routes between BGP peers (hence, no route reflection, no eBGP) * accepting incoming BGP connections * scaling to a number of routes beyond the number of routes required to route traffic in/out of VMs hosted on a compute node running BaGPipe-BGP Dataplanes ~~~~~~~~~~ BaGPipe-BGP was designed to allow for a modular dataplane implementation. For each type of VPN (IP VPN, E-VPN) a dataplane driver is chosen through configuration. A dataplane driver is responsible for setting up forwarding state for incoming and outgoing traffic based on port attachment information and BGP routes. (see `Dataplane driver configuration <#dpconfig>`__) Caveats ------- * BGP implementation not written for compliancy - the BaGPipe-BGP service does not listen for incoming BGP connections (using a BGP route reflector is required to interconnect bagpipe-bgp instance together, typically using passive mode for BGP peerings) - the state machine, in particular retry timers is possibly not fully compliant - however, interop testing has been done with a fair amount of implementations * standard MPLS-over-GRE, interoperating with routers, requires OVS >= 2.8 (previous OpenVSwitch releases do MPLS-o-Ethernet-o-GRE and not MPLS-o-GRE) .. _sample configuration: http://git.openstack.org/cgit/openstack/networking-bagpipe/tree/samples/gobgp.conf ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/doc/source/user/bgpvpn.blockdiag0000664000175000017500000000312700000000000023657 0ustar00zuulzuul00000000000000blockdiag components-bagpipe { span_width = 64; node_height = 100; shadow_style=none; default_shape = roundedbox; group bgpvpn { label="BGPVPN service plugin"; color=red; api[label="BGPVPN API",color="lightyellow"]; db[shape=flowchart.database,label="Neutron DB",color="lightyellow"]; driver[label="bagpipe driver",color="lightyellow"]; api -> driver ; api -> db[folded]; driver -> db[folded]; } group compute { label="compute node"; color=orange; agent[label="OpenVSwitch Agent\n+ Bagpipe BGPVPN\nextension",color=grey,textcolor=darkorange]; vswitch[label="OVS br-int/br-tun",color=lightgrey]; mplsvswitch[label="OVS br-mpls",color="darkorange"]; bgpspeaker[label="bagpipe-bgp",color="darkorange"]; agent -> bgpspeaker[label="REST"]; agent -> vswitch[folded]; vswitch <-> mplsvswitch[label="packets"]; bgpspeaker -> mplsvswitch[folded]; } group routers { color=lightgrey; shape=line; style=dashed; bgppeers[label="BGP Peers\nor Route Reflectors",stacked,color=green]; mplsrouters[label="MPLS routers"]; bgppeers -- mplsrouters[style=dotted,folded]; } /*admin_or_tenant [shape=actor,label="admin, tenant"]; admin_or_tenant -> api[color=blue];*/ driver <-> agent [label="RPCs"]; bgpspeaker <-> bgppeers[color=green,label="BGP",textcolor=green]; mplsvswitch <-> mplsrouters[label="MPLS\nor ..",folded]; } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/doc/source/user/design.rst0000664000175000017500000000250400000000000022523 0ustar00zuulzuul00000000000000Design overview =============== The common design choices underlying bagpipe architecture are: a. on Neutron server, allocate and associate BGP VPN constructs necessary to realize Neutron API abstractions: network, router, service chain, BGP VPN interconnection, etc. b. pass the information about these BGP VPN constructs to the compute node agent via Openstack Neutron message bus (typically, but not necessarily RabbitMQ) c. on compute node, a bagpipe extension of the Neutron agent (OVS) passes the information to the local implementation of BGP VPN extensions (:ref:`bagpipe-bgp`) that will advertise and receive BGP VPN routes and populate the dataplane accordingly d. depending on the use cases, BGP VPN routes are exchanged between compute nodes, between compute nodes and DC gateway IP/MPLS routers, or both ; the strategy to scale this control plane will depend on the deployment context but will typically involve BGP Route Reflectors and the use of the RT Constraints pub/sub mechanism (RFC4684_) e. traffic is exchanged using an overlay encapsulation, with VXLAN as the typical choice for vswitch-to-vswitch, and MPLS-over-GRE or MPLS-over-UDP (future) as the target for vswitch-to-DC-gateway traffic .. image:: figures/overview_blockdiag.png .. _RFC4684: http://tools.ietf.org/html/rfc4684 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9063063 networking_bagpipe-22.0.0/doc/source/user/figures/0000775000175000017500000000000000000000000022163 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/doc/source/user/figures/bgpvpn_blockdiag.png0000664000175000017500000003152400000000000026171 0ustar00zuulzuul00000000000000PNG  IHDR@"}H3IDATxM$YU]n=֌ۋma`0 a;azN{]'N 'k/`xs0|XYZj5nZzʗxdFEEDFFd</O(?MN?j7c}{djef7+hI{Ykdm1SI?gnXcl&^CE_Bc_M\ͨ ki]nU neBiɀ0Bcς0 -aGIKadr=0[1_?'?e,nʮܿORԻoDA0_G T֯DRؒ ;lؖQU0ݸ}TӾVԑQ'BD&į#)jOnqs,ć#\xhhhOQGadJ P11+^ftE'ۏ?Nr>][+hz/Mֳ̱_Q=%@.tFttt#I#$F۰#;(ueG{ Gٰ%;iFH_D,і'/:?>lΫO^e/r\a~D.+͂8:#PAgٙd0Wd@l7]axա#]^-]]u5i2mfeg}sϾi?M7^Ǽn~V9n:Y7߼گ|fZN1畟7? ]fZ9cK?EV$2F \v$]{0>lEա&#/oiryKUWp/0iEGa?чo+7]4}/mgLo畓U}On?jM0iO :8W\)ЅL;5V&2zд@ڨ4j08wzΎ4̒aQ ١ lx?p@7O+Z߾ɪߍ2.'qf' NusꟹN.4h9ؾ~ittt$A8q՚zQWFhEHA" jz?<7q[<eL߼^_8^Dczx}tSٸW?څ@JXM&F}]^/ϿǏDO- r0 R"XײmW~~2\}\BZ1Ee&Uyf_rM/pQ tFv:::O5K:+WjŽFcѾƗ4z>FOoktvK`Ovԑ Mt,k/D^NɭCA1~LrQ[HAwѹ:*tdZSdwmԦ~ozH9W[~5&QdC_?yEz<9EWQG㱻)x2@5~{[[\7Wvr~r^VyEu)Oqe/XemH^fYMOO[z ntF:<Νg{G?T_gzG::z ZC3/=W/kǯ_^/]k|э0v7x2_&_&Ҍ/=˷~,3ʫ{EegiLʴCڇu<~Su_z2q:p7lOk'=]^ҳgw_oO?:;;՞֍  d>uI\&6(sFutt_|}zWQ?+_LǏuppVk p :5a\]]}u]u7u74Wum4ړƭ#juX#Wqw{% B=Qkѥ_|W~WQ'3uup%uCCi% [4tuvvKO/o[jk< I2棏^֣u%I[z?*C Tk\tΓR°5}t.p [ { ɱh«qh@Ң_'ҿuR`릭xʴB&q4dvD?J⢫=~KϟbGT]s`#ݻϮxe"YJ^⢫`Oa4B#Skƣ;==/h5W8X-ԄYa BPouˣ{Q?=2KFif@::U[h%'N4moo('QG=WmM.5ړFAr@ ?F uܒ jn@P C$xܚFi|0{@(iv;x܎@ahvtZwOI6TtI [ (e]? B+l ( j/ݨXk@49qW.`:?3f;^g.VO7{O@1If<Ȍ^0~b>Fx~s?~g&H3 @%ߒ$G/>%H' Ю ى~?$v:cǗihT]NFh&f 1UW D9, HXk4׎fm좲[M.K̻wl2nz}Z%dzkVpc슢q kr~2O`3\ި 4E^«(V4tD/?PFj@I']wg]캼}'$kw}s:IJGUE' 1i|%`>'k +ʞn^_ +=Y#lE֐E)P7\*FףK;DKƞG=00CP7%8 "(Ŏ t2#Y#UWdT]|s4x  Q_Yp^Ѷeݦ7` /Uꗵy#}~(?@qOn5pse) ؓeKV=!~$4tӽ^˝o>ތΣ?<(%լ6o:]V{A^}뚷#F [PQdêV u)[u6l՞d{,Zn޼m?`Մ#(4o~z~2Aqzd.ȺG@޶WE`?c ( 8]w56e9E5o2ޗyRyY[V> z|\'n:kyQyXȚ|^Ϙa+ Of-?yD-1ꠟzMZ֟WV?5l}u:k=_t~ѺEV0h0Suo|PuVa^~V]*~5.3m֟Z4=̬ėG`9j@ù$>ͽhޏ+>uܛf~b^O?$˷~Qu]qIzWǕrd5}Fge]yU$6mӼwGW]M0IsITr?ݳ~e+> شMI0ץf^/0нw;@N>8=_HԌ 8w-Xt~VE'1Y(*? f" K4ϡ,EDT~dWY˳WGR<~|Ǐ70%p&W7LmNQMQLh˩[" >=n^O/ۦ~տL=ǒ'?@ )  3_q/ʛN&1e(w'ϫϼ u0{_{W@Ew@vauynd¢O*}z}N.dyi~V"`u.[ys?1: P& <;OrĀ{-S:kҐ Y wֹ_w1i㏿^z[(&2ƪ^:FF6l)u58sڟWM$Eجݽ¢KPfyϻd`u ߄ >O ) W!k@{8,Jw#o=Sv ߾c' e8-NU_{=Yv~֥>g:>47i@nuq ߤu`MuOx>IMw:Ǘx3ck'S58oYvyȼQY̫+}!EGwY eF --ϫO/}w˷|MȰtQxO%UϢz-Ɂ ҟ rңۥOoW|*72k`~_7j*FļaīȺl`zU/rΥY2Zˮ?ѷ\u]Ve_*1{Y/{e/Rf (0. ˖]ud h %mK־ӟ3;`H/6nڗ.~UaчQ뱬yw_=/u 1enXG=oW ׬emQ]l}Oy󖥗~kHܶ{]o~4*=ˮu5OHف筟f^9O~)g筟<- uE^-g^.[f^lB_ٽիWɟe)*޲uV5\Pus^Ee~tdT>Y+&orM.]q#ayܵ}:n-/9o*Ǽ2vmΛWPz3:>j(D@3Fswz^umo~/S>^#5ݳYM'_QTϢ@=ftl9Y wK^w7=or39*!4 j]?_zIYȒ7 =?]`FyW撂~zjq<@$7^N,/Yk{5 <@$ <@$ <@i{X7JoFQSϟF Hx Hx>`И{UW0lՕU{'nUףJA;UWEɼ]u-PvU]*Yk*я<֮^N,'k Q1枬1UW#FF'ॾHL&pd$'o" (B#}Ǹ !YIUL?jQ[_>$M>SlOWm]~.Zc{" 6zZH/6|?rhfq7M ֮_u@_,s(ϗsY|ls(ϗsi$<ԗ {#}yKI׌M~DUWz$ $=<#'@_8== t[KPi"7q )Ƙ)ǂ'ƇH'H 7>|<@<=FOpv@L֠91&d@6k|o|xxV]T ~$o$ >hW]l)1n"2ϳS`8qzz:ٙ2 O# L4?DH'zf> cL8-4 O{?{;O8x{c6 >v@yf%`We=/|9V|9'$7rIgMg.#9G[gXslϏ>KX e@`ZO6hЛM;VG`Mdp^wnhҗ Xk3$hՠvR@dO/n4^ϰwH=HjvӲA Bc3ZG#uB`F(9g^ΊJ= FuE;y޻d?,K dEmeXBY\ dYH PB^?qgYe@nPB^ _fU",@ zOpUTK ̻62OP=$=4ɉtrb)^ 5'D ^*FyO`>$,l.V)gٲk@/ <h>^zg :`"+;;K <@@#nىp4u$ <@$Rk]?e^~リ`3x 5prr2~Z,S^uu,[:MM ݱc۳i^ub`G[uZk\c$Z[qM*, /˒6TouФ?t{7/KֺTWaץ^` %D1Z4ke{r`>99In@svY RTywaޱɁZ].@vy;?pO_4.+m^]emXz|\u=DK${YHK$d\weEuHϛc`EEt!>+i.:Y-JdY&z\t@_<`HO/WtB"Y~*/K(bbpkV]i^pdba<]NzyocsH41$I~Ri=7yUdۼf~zWQEp=嫘w\uMHn r\ʥmMbl( `}H[t2D2VR#ZT  /-(mQM=C&Av.Y_'}]PՍ֑ Y ˮ&4=hIu?LK׿{꺬K  EOWea$15?]c̽i^7)z煉e孳HyY|R2O9o>쪬k˲/z2I/Mޱu3oEزO/_< (w: Yq'ٿ/EkmPu%伲yee]5?kdKK pu^ \|u첢 >lYY Ez.Zy_&SПV-,/RnٲԳ'zW벪:(]vٺZt敍"y:{# | &9x.c_WEPWчoV]U);b`whB/?kC|'oYzTAMWIաnb@M$mʖW2`x HxhMz=:;ΟF Hx Hx UW֡p4 @$ <@$ <@i{XO# \G$ nܫ1dl OdKXIuM?j[?ܛ9>h'''UW899}Pu-eHk*zTZc*`)0+ ܶ_  dƼ$/n5쌛}`dNN *?cHd߫&ؖoi: o?_`EyMDKHWj~%HP/M$K݋_?Io0fce ^@XXS q:\@rz &rzq紇qIx`ܽg>01k̦7GjtcBO\lg@l7wIfnNn 8Mw7]k;z z.84cGS,/s욦'fqhn e EKى]mzN~эSV`>oDl/^b 7{n^{P @]4ƽΞf+2n+Pa*y=\%1q[&^@49`CY@ V+T=Q=V %.qm8nCM%vQK2d}mO ZN n[q[G . ZXjOda^`F)&inZZu:#u:#?$ݎk˝?Tp;T7-@59*\?ѹy+Ivtu50@eQC]Νg:::W;0L&Vwѹ:wi?n[+MqK6 GqԓPLJZC.Su:S?. hQul,I Ngs?p/I:8Ν' 4uhٓ:ݡuSݽ:>~su:C8~.tLg:O`ѝ' {lhnGFHRjsTsP&n JgVF tppKIޕ^xhtׇgA̻SR\LO _ >tZrϬ}p.ޕ:/n@u_i^oY 'N'P{&IjGvu, #xe0`2MGӃsugt KcݙP&)RHۉ?ӱ+i upZ3L0i ;R}Iu}xKIK֚5$)ѥS#u:Cu:WtjGֵzl0i"uJ탳%;iIjOM &c :WQIqZ`lnz~aRv4 i6 D q % ?lɆIkG73Q)rOOi5IL(8ӚI(?&%/D @̍?' ݩdv?չaNWYv@.cg]j .O$M `Y@&}KkR  47?ZPu$ <@$ <@$ <@$ <@$ <@m?#ů|IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/doc/source/user/figures/ml2_blockdiag.png0000664000175000017500000004410100000000000025362 0ustar00zuulzuul00000000000000PNG  IHDRXHIDATxM,}S=/=Hѕd#)p UA sW ۬UYC " bȵdGu]{gɢfjty>^ίJ9Pj $h0&h0&h0&h0&h0&h0&h0&h0&h0&h0&h0_w Fn rX` /I |A4VWX گ=@[J9&=yMBMu#M)GjaPR4"9WttR_ <[uL.|H)v\i3';O4>?:m2=G#N_iVO `%`gW]WQ׽Aǽ~烰 ~7ODDկfy+}54 F|.QU0jU6`"^zy܋\#ςumeӥ\)O:RoO_q*" ryPI5h^ 1p8xܛK_7~;}?M~v*󬺾-k$j`I G1G?a튰M5u5 F{Qw#w"׽Y5rkm8+kzviv|eǷ/[fY{.EZe}pGY5 0$QTHajT9PBJ -q?F8>GGxnNǓ,"˜8[f8h^.nyl_~`cʑRDUI{{/bwUNőiUW"GQ߉A^ߍQE}3&"/?HM}?peOμ_֞˶==}ٺ`Fo?UHqT;'QEoe_FoETGLJ2`ʯxA<x~xqGGg!X];ʯri0wf|;j_8<}kw7\>`L*8 Eܻ4FO#"ӯf'_p/G1| ?gc G}-mg?{ëy[e]8=OM;{yz4m8(+9Z`wWB|?ONjw訹2E]W3?:MyO/}?_f6̤򫎪1 co$^Od7""6O6Ni5Wy܋zwcsq/',Oݨv"4+,W_O߷筿S׷vca/\R)V4Fw14?Q#}'FV( ۯ[ 'W/8>/^<_/O__wūW{1b4j:ŏ({I?EDo//\yζgmk*R߯c0Ɲ;GxO_8N"6 3=㝨Ń8{q/J8~v/F&NÓ!˒~7ZO^[߲qZ]G{y/;IR_G5FQ 4_WW>~7!~71$_FwU5~Փln ǿrWyo/ǟޏыDzoTmb߬-pup8z'?x{{_eW>?oc }j0Jbehn^^xq7>^|կwQ?S#UU^(vwGqt2""N ޴J|;Y؜Nbn ?ǟޏ_=Q#K:ҿ{ÿo/>^g{Zū:R?jww:o_`%`Il'^ڋ/xj47irYvqDDǫW{qt'' wzNld'꣝ڋы=wbj7qwÙw1|5SUGի7='L¯SbE q|xj7wb8`*';wvNii_,Q1>މW1:މ<=QtAꨧjdr|&"/(TX'XDD]GuѨwzpXp? T@ΞYG(*FNUM`t Ųj.?; ۦ ymo]7C- PE xxtW*/X`m/a 0ɢ|^ײہa-Jӯ倛!&7o le{Ua(͢+_p `פQ;Xl0/!b7o_ lيya0+weYu94EUqS@`M@reKp<FU#ԓq)r݋zPx-͐svl1M@`M@t.#""ッÈo1hS{]\e`@Dh24 f_6moe{`%Uh5ik柝hUK9>88< aJEټ˖_6X*.ۮEmmeg[ǢpQ7 l`5?/4?矾b-v)ۼ`gYҞዳyCQw6̶qѶ1-~y7߼˂AUc0 Ժ(WU,]g2mp|rn-m7ɭt[-gMLd*Rz 6Ѣ4ٔ޿2l-X6r~g5郃ӯUwu56n{Ek^V~hE8.+e;57?lvмae!زVݎߏr-'k޴_Us /-[EJ{.}h_ 6/}dV;}Q6 YB/XoFB`Jp[~`jV_ԧ 2 2 :`Y1.)lݳڝwϻ52:f|͛vJnYeUo}/Gn,>S6XU/Tz+S䵶VS~]V;D?ut<ӄ`z1+}g򚿷}iy鿖soBA.pung0m}u :B@W処}sצa7&!X)7#<_:;\~vxvy_4m7m޶[4H0A]V?\+Uv-C!'܋O>vv UZʭy˦5iϫlmZ6BkQU{ET3Eˬevn>э[a_l?\0e_=[Uhڢusj?A]ܜ ;Utl|cWvN7͆W. ?ox^(V) wF_4?/g϶y_uK_g^MN~-E۬2h,$Ji,LeiBy2.Ԁ"Νe!Xߎ{Y GQUH)5ꤔꜯoSp/Fɯߋ?jn-y t<lB(UM}U-{â2>880|O])] y . j^5辶NE/Z"W,-kϻ(@k柝hUȼgg^.βu3-Ve}:^vEaآ烃0=|hQ8tQh3;/ެD[6PjE/ fhvlyfy0(Mͳ(кV cV]OnU eWp AUrMlphnw^U[f-2*Ur^v:(eZբiQ(6VyY4roVp rTLuEΛ8X¬2/ZϢQuo3n[*/=}4Tpkf+fo\v [sS[˨h0&h0&h0&h0&h0&h0&h0&h0&h0&h0&h0&h0&hr_~u7brLte)߯ݎuʏẛF~VW~V;ߺ۱N9* pa(,"rJ:FU0c?cޏҺEU`3rPI)AM7!כ`1 ݊"~M݀MҾl 7gfl,f_.?H#BVg?zkN|9*4pmμΛhZ`3cqs1 ( mp-Ge+|VVו}<]XLYY]W ?`t O9N_* VÙ9sEI Cp X?BN:,+B`@>:W 0`> w8yJ!h‘ bt)jP;1EP4EP4EP4EP4EPzu7@7Gnp`D^wnY~-M@`M@`M@`M@`M@n+0.FCnIztv (~(~@`MATZT A@`5[ (l/EuXy(]S EI݂ӕ* -jДO" nO=yd ڥ#BkJMU] #WɚmzP@ .iQեp6\ `5¯x N-1/tmE?yP Kc`tlF;vmy?v(& nM5ЫLk hT&U`t,"R΂i~)/݀uKQ%ꦜ;,b);t,@}HG4?k j 5; %1i6BEI Xn %- WV?L0:O*aU;ߦKl[tPgR)6l.Zw& ( ( ( ( ( ( ( ( ( ( ( ( ( ( svXV~U2(vN&!XO*9(^ƃh/""x'FiXC%eIU5aw$^>4މ"k;>&< 7UuDݓ=g18xIe`%%`Mgݣ88x?ׯ(^ڋpQ9`9JY)1 ΝxY'q88xG1 ׫[U`x=)GzR5F{14v_7Q^E=D~?@ɚ kzO:0w&$xQMnխ*M<TRh8$_ƽ{Od7""h'Auz (ٓ s ;qx=1DU":6jip14Oo(>h'pNoVnZ<#qdR){icpi_FD[߃K9.lr{C(_hiDD;|GG{qr2q俞"PeRﷻKx=wtOOH ᅩ<=vcg1>ڋd''r^MvxE =;FeTi?`MjN{1kA{{/""˸, `$`g0i;{QT06?Kujՠ0z{/ Z?*NiS5> ݣjpN0`vrB;WQUu DZbzc/^Z}~ tydѓ1`pQiG>6Ѥ:!E\#Qū1E"f6c\SsI߉ipx~lFU1~>8vwj_/蚳*櫎^MIUUuDlK@izuyw|M:OLtU:71I_O5`%`9RJ$t_GW <V6:Q\yIStܫN< j*|~5?}x?s\ ]ۯ/`6k@ҍ=kz^'^J*l~z^if֦ WbX9y}gvi.ާ o;`Y ֧1{2 0OIW%s,v+}9;^87 ,Gk`J `#``ܐUk,2!>n `\b;`P4EP.>.-%]höb+l ϧS?X;` 3 {;=TQ ^s)H4A`eHk*rNU\M,Ƥӈ~/շrk{3o}ͮk_vefxJ&!VJu~`;G1x,v$qgϢ}w)A[ n5`I6~}7ލ_}9^g<^7_O~w"F7*6{6@}3""u.NUGo$csOc;_yǝOrUM ޞ[ JNbn ?ǟޏ_=Q#zrK:6Ew?yt_l\ #vG1>z>쬯ĺ79G( yZ 6}c}N^~ߍ;1~\W7 s#f)OѻsG;k496T0b̄`y8z8NڍNau vXUQ&W;'p0y@Ȩ~A`M`h~([Ѫ3m2ຨ&E:>t Ųj.?; A- PE xxx*^7f0`lEJeM\E^]ղ0\'5i}ux?KHXuo-W0+weYu94:h0&hXN2?NpVͧkZg)r݋zw 7n\[ ( ( aDD|ַ٘MˮwQ{NM0Nm avaӯ{ʲ׽NM0i3/T- _y6Wvv-nl;-;k:}I`\h^,liOټ󂡋ֿ(vf۸h[ӘזU?߼̛oe1OU,]g2mp|rn`eWq®yfo܄ϴ mM U:}ppx5;j?Xf1mϳsk*}ٺ-E?༔sNn@u#2EE=܋ѫ{qϿx+^×/ߍKj6:\5$<<<ڳmңuuI߉Ibpuᓸ4Go8YTH8"ko\@[TFNWt ( (Fz_ qէ->MiOLߙS(ob;:{"?uY0Hݴ·`HGߌ7 JMV*vּeӚU 6-Wª=veV2ڿJ;7A_܋޺ܞsNn@upMBn$ bD9xJg1:ϟC$[ky}xEqʱox3ïEï}{-Zlr.,|yY${uy*:¨BE.+bq,Z..wQ4oԪ^(?xG7C@1V (ۦ_o㢧:VY*h67C@1^*[o;۸v4}P4EP4}Vt[Ͷ6h&}^( 004yn3a }^i*yK*. ښg/Zf.2oq9-3;HBy΢g(|[4EBξvׄwMPriݍ୤"׽{1zu/N~^WO/>Z;_8O㝯֏c;8wE58T#RoOfiUn̫غQ@`M@`M@`M@`M@`M@`M@`M@`M@`M@`M@`M@`M@`u758<EPEU^`M@'V{P4T}n鯻lh-=]>t\=yd ڥ#B0K_%k~C!0~uǓ'O:s'#( :F@ ( cңQTU0&h0&h0&h0&h0&h0&h0&h0&h0&h0&h0&h0&h0&h0&h0&h0&h0_w<|0""2^7N(CEP4EP4Ep>|&\Ç}-~SnN ,>'O1ɓKoݴf¨lCos@ 6*"B&7o{EԢ.7/`yaܼu/ .ޖ [ X٠g^*Тli碶wQ[g k5j^Xt8@(%WG<pmVnE\T~EAE>/2EOh]ϯyK'\yO4\o]uo0/~Q\,EP4EP4EP4EP4EP4EP4EP4EP4EP4EP4EP4EP4EP4EP4EP4EP4EtL~ܯ>|f>xݭ'h07)Tu/LQq:B楞kk pn7)M."b8"MC'Ob M)Cl0V5C:?ΧHR V4R?;6NQ7RҺ}}WՑUUG#>(W> $ELRTՑdꨫ:S!ETq0)M)GDG^ Jb|ED"R'xqTQT:AUM+d`@N&Q8R/SL*ROT~Ɠ00z'Q펢w8""~U]E;onj~O'Q Hqj|z,"(LUDw;G?x㣗QD=GW}J֮Kʯjww^FuEw$ @0L¯Q ^=ڋqw"Fuu>rhI#Du~i <MBj<9 $058K9"Փʯ0ݣ bj/ 򨊜[X3P&J1 uTaMI'14/=٫[U`i8$z/cpi'?Ϣ>ډ<Dӛ`V*OH;T{'$,sRZhWEeѧQb|NQo+jmM-)Ӂg^i_F58T!ՑaT*bdTQo_4 ? &O`Q_Fe^D58T 'P vT'ȑq0x5y8/&~ս^DNOT!Oͭ&}'ITIFT()gl&Ȫ&}{ս w|M:OLtU:71I_O j02B"r+O/R>Kϯz2Ncz^^'^J*l~B@9)"" { GPً9xAj $h0&h0&h0&h0&h0&h0&h0&h0&h0&h0&hQ7 FIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/doc/source/user/figures/overview_blockdiag.png0000664000175000017500000004305300000000000026543 0ustar00zuulzuul00000000000000PNG  IHDRX#EIDATxO#y-gwfuve[Qֆ >#ܜS?|Z)0 䚓K>|A`Y HVٕvfzfdYfd Y,Vf[or)V6 ( ( ( ( ( (@w ؀l ?bm_} b鼫/7X]+`6M?}r]"9c^r_ MWstDD/m-SO1WGJ)vK`4sG"&4}ە>x}~M]0#$ 'I. \u]E]w{1u1\K_?7w4J錢FAT(*"];^Q'rݍ<.Fl͔dx}~>䟧Y?*ŸOyԑ:Hhr;\"U")Hp؋  b0E]wb4̌TW?wW7~gs>:ni \)Sz΢;^4$Gs] a/Aԃȃu's1um8#ݝK^˧_ueY֞EUuK5oE?) R,ITHAjQB+-HCGn qzz'NN˷˷ b41y6`es?ln]vz[eH)Fbo,Oy>it:K~"FݨQމQ _oE}zxD3~yXzoO~?iW}mx<^֞u*Eꎢ;j$:ϣ{<:Q{1ck x @LFųgٳӧ(NN.B(p;yaft>|y_ηҟɟۃW7G?88{I m<>_btrgcs1xz/G1: 8@_g|?|~w'^Eay_+v8'SwN{tO"OH؅XEcj@'xxs'Ƨx^'')꺚 }u痭_ls:*G78:z=vNәL79jurLWG1|V }.N?y7N?g1xr/oE}yЋ\\yz3{w_Mzϩ~T_t{3n,lÒnKS9RoYGޓ'"NFLNlv0.;18>{_Fg_wƋ1b8l&hӿkߏ]|sޢm[w=m#) ΝiΧ/|?w?bEt:ghryߏ_'x/ߋO߉gwc`<0\ ҿc,Oͻ޲eZ6,\eM)ՑuTAtD8q?w)?qt>jEIWѦ`|xxx߈KO~3~wWLJqza&h}3Y}I| /~/CocگSܽi>N,jL[1xN~_?''?7Nu/LJ1:݋v^Uom_wvmեW離莢ݣ'q_ĝ/4|oc?E9|>&Qk)4^[gwW|r?={+NO1/ X׆}娪:::ayDDϕRםKu'곽|v7Nu/N?goE}ڍz84`L_iDw~~W&WrދWuNUw0Fsp]̕2 !^xqLJ[ٝxbܼΝӈ888/d/#^d˓볽Ob LJ1|V ݉ыGK:jwyʂ 5iSGgwNǧG썋%S" `0`Ћӽ8=Ƌqz80(I37jol'&t&GwōyЋzЋ^ԧ؏^IoPUUG=q8'Hv"׊h͜uQ)F*{ uD ۭb8N*:07sDDc<vvGUԃn&%uUGèUxNa5/Q9SM5G"a@ R8~(wo#g2쿞<9v=\Yu]E5/Oi.&f ]Բs!_|]WҫQɋr~ڝn-[m}^U)3זR7+4m/xtoܒޏA2>olo'K)|4[#BLl;X?fW1Ͷk~ ]2U O5۔/迴5?-:|mlzows 6k~L?hQ\JVX|Rc&?"r,_/9!3^vG3"__S<EG)gOaǑvzhufa%S/(1Hi8,Q,G? =V(GK6Px㛒C(L@@YeTZ wtk`oE]K҇3 kήtq3]=&ey~zgîi onO76nѨm(Z-[n(?lu l`d|ޑe[tD?}fOjͳϴB_qĮZv~|@ fQ?Ȋj"hфмsIU?Ͳye]1{DވEcbސ竾f9ղ@Ps2Q2KJpU5rDlLN)"u'C;Gnlİ@P@@\`Kޟ]w/{n۠ms˶h~-~<|2`gng|{c`gn,ul˶՟Yj{ufXϹֿeN^ #`|pԏnڵ}\Ke/ߏE*/ya@+sk˚Ws6`ܛn{_e]˚)9X!@SD>7o/[~v;*혷E N(b`+}x|l!me]!hhH:} ֡iS pٹlu.TE_fP `шyϱҦNK__n o"6Lq1U΍lnE^ղsB}-p-`t`[s[j9N /ߋK}%>v?zӧAzbϑr~M7Mq{)nf$m^rVǮV9Ó{e/,}߬?w8ziz'QUH)Ԧ"ם1|q7~^iVŋxwb0/lv;j#?t+nt0}۷מWM&ls۽m/E$zw_ởƝ/,}o>}iTH(bA~gJ.j-o~̿_ܴSMk_MڋNY.K??ڦq7ݖ6py҄m<湉=-"ZO€yA@1QVR(m/:>{yy]rȼyϻ,ӻwot'["ދiۂyGa^wQެ?׬evn?QHߖ ?smo[.;C@rUn ٔ^|fEEo/o^?\5o7)x~o=/ߔ}7Ereo֟\egۼj/殮sIZU"V=iHeE~0[޶u) U۳!߹s,9RJαtY/ ؖ#7aYmO0m@ބy#,;gBm }eG\Xo}mgn9(&׾#l9Ҟ;OݗGm>n&t[m{y/*Bm_ϮDdʑECmgvKZEo^wgr^{ss/jϦ/řw6߻߂^7\n]іmz,?,pEvfG̾nv- mwvVi6w6~KrՊZ,^2`\vnMsں?{4kG/.1}ey_w՞m_GO6ݖ7_}9k/MW]m,# e.+=jQWi6Oaˊy(M?ۮ{>?=eW6 v6mv\*ۉ\Az^6`&kp#nUG +Uߤˎ7/[7*~Fle͛|lWh*ChukzsыByCg,|Xe>~ׄ%w޿-3oΨ~`>˄l@-+BWyamg8,[唂^4ӟľm}|m6Xzb/wmr ܀%oӼ}[h%h@(M`C3?ˏϮ7ܼڰj{VYh?_m7buϹYovuݮ={뼞0MgnojW]uWeۿsǺGDLg[xO\w;˶sY6~eޟ~ݼ6Ҟy[EE۝X+#s-_ܲګl{>UVF@P@@P@@P@@P@@P@@P@@P wuJol@JGt#6,߁~nt;6)?~`~GnfG雛n96ݎM9Wt+؈`P@D-(OJGNi Q 3>0 H("uQ ӭ.BC?tt7nvZ>\~#BFT3?ͶkrUJi<'Bmgn=蹖h;> /M@{(|.wl;X?˝MoG@)_S^Y]) )rzX{eu|&|%(P? ϜGaNy> X <!@?aט 2#2'@7<5J*KX,>>P@@P@@Pyi `#MM"olD~)P@@P@@Pp m4[^,?a@a.+tYq@Ztv[t( (~G0{( (X QG,M7եڮPU[p{Jyܢ7-s:q+Rz^/@n[S ?~x a҃!RR  uImnӃB&E~ImXsQH B_Ǐ (`yžB{,] :>fnl^Ѿl(#3_\6"/*eZE{-^&0oFUύlUqW}?#(fƿ,@@P@@P@@P@@P@@P@@P@@P@@r[~M7t7݀mS&m 7wf*DS N+5"QNT?tSeR~nSxލ tRs򷋣RhVʑ:Re3_UuUUD)|M"*GSG;nwN^èjFMSzoN:*OvEIHU펢Q&gֻZ){uT:0;ԭe3,q I{3Ng zA09nw\lP]sΝy_펢F}NiwF :gQ3nդ(BVst/ED "uGy_ J;o,Ν8:z''#"bo,nFFeinwxqt29;nw܇T0SW{gQEIt^hW{gQG;ez@}yt^FITgQ흍CZ@U'qt<''qpp{1vG4hJ(U5>-j,^>$GG$& ?Uspѻ4'4F{Nzߍ p53WBIUEg,G/csOwiGdXM@3Y_3$޽'eSċ1b8" guq񥞺:zAܹs?w4{GGDZ :zj6'TQD8z˦?ዃȓ6k IuTAtCw>޽'=:jd|J@?pEm ͬyܽ$#"$,NNb0E]Fŕrz888qޓxw>EU 'ׅ|jwѻ$I9<Ϣ>ً3FL@R<TfpL䈨_58 |kή_-o4/.x~fpv|_P&^'}L:/mmJ*pumи}Xl~߹[.u䟧_?*ŸOyԑ:Hhr;\"U"d\y؋zp`?`/A/r݉<\"4ӏ~gW]>xo==8{W/m"b^¿ "΢Dի"u1Qo 5&=9Q7~ԧwbtr×o[QD}70k<]{_ˈ*]/kϺvzH)G΢?>qT/Ft#-u>#h<>_btrgcs1xz/G1: 8` .k-y_?ηg~[ge/?q\uܿ~i&G$G_שHHF@KhUQ'~ _gOލOY ܋[QE"i""?_t{3n,lÒnKCSL 7,zGϣwIgUHӨ:S"()lzO)rDE}z'/~#^qwb80l& W(C~s?7ly~o|o6mg-֢Oaٶ*mҸOԭ {$z;>_IO{YT/"u"4s \Xc09o;qoċ~)^_ċf8սt/bymvKEDҫSUGtG?ܓ8/wq71ퟢw>Ul9ȓs݉l/F'18~+ݍ_݋O[Qv/ KƿҲ7 #u꨺è1:yb3Os2C@hL^'{1|q>{+~ԣ%; ,<=PSRLfѹsQ'{xqTHAh zQz1:݋1<݋<0={q8'Hv"׊h)풛9""1TE ;Q#*A7r= T@ R*Qw(j_昌Jh#.Fd=y\7s:GTQUTSxiҏn hVy9翞-[m}^U)3FZ*GO/?>~vǭI!XooQL3}?lܢQ bQ,[꺅Nc6 0]s>Ha&XE7}iTH("lzDDS?XPz_j_ǛnK5we_ OF|zwnI)~.'-)7_-/[~ *۟]>U~7-`O_eIFxmqL ˊh{Bi&9f_9[l (Fpm۶*oR yziLk|z~Zټݔmq6ڲMwmkhzxP`v;]ml;ڹMݿ NZ+|+?ro?֟}eâ/;`U;o&,}p5)6x)"u'A _܍_/x_ a _?~77N!|}~U]UzoF俸sÓ}~w8|o7!>qtzU$R5H9"\pf|Ϟ"W]p_@P@@P@@P@@P@@P@@P@@P@@P@@n7 6G~VlV~V o6CP(QeqQOnA 1.7 `ӺnlZ~#BFT3?Ͷk Bq h.>mLR,WVWg(9( ( ( ( (@w 6#=t $BGnp@P@@P@@np˗G`3\V+PbQ~G ( 2{( 4Gt7&[p{Jy@\Ï?lCXAD@@C5)bN{y+=̊( HrLFP*@@P@@P@@P@@P@@P@@P@@P@@P˖vub.-7}t~Hiͺӯ6m_u5[/;e\6\BE[nQA>{#*`;`+ߴP -vv?=`vaNvI`G\H- 0 ( |l҂ڿ첃7ps\n^Dt+=nݴflC]Znlё")t;|~g_7/`yaļm/ 2.{nz_BX?F R/ =N;u}u6W#-0bM4t=MJ"-a`ˆ/:2?=eAeCg/2;\E_te@f_]uΛ0/P1nbj{@P@@P@@P@@P@@P@@P@@P@@P@@)?~>Ust/ED "uGy_ )΢{$G/ct<""ȣ|_s6&Gat~ݣѽsYT{g#)yspѻ4O""spӽa'r]Z ̕PRUGtG?ܓ98hܟ MɗӔ#R=>Dݣ{/$/"y*fZ)R[GD8$}{ݣOƧtQΗUH>'QGDD$?d/NhT)gR;/zwDyYj8 tZf>sܖɝl݋ӥGѻ$zo*ϣNhFlZhTGQ :ϣ7Uד @Y&j?>qTH`m! GR(RwUv;yԉ)9BhOz~Քj4 YTqDTt )g M!_;^QO(U7%t95_- W` h \Eϓ/Fzrs??/nGP&"m.u driver api -> db[folded] driver -> db[folded] } /* api_user [shape=actor,label="admin, tenant"] api_user -> api[color=blue] */ group computeA { label="compute node" color=orange agent_a[label="Neutron Agent\n+ bagpipe extension",color=grey,textcolor=darkorange] vswitch_a[label="vswitch\n(OVS)",color=lightgrey] bgpspeaker_a[label="bagpipe-bgp",color="darkorange"] agent_a -> bgpspeaker_a[label="REST"] agent_a -> vswitch_a[folded] bgpspeaker_a -> vswitch_a[folded] } group computeB { label="compute node" color=orange agent_b[label="Neutron Agent\n+ bagpipe extension",color=grey,textcolor=darkorange] vswitch_b[label="vswitch\n(OVS)",color=lightgrey] bgpspeaker_b[label="bagpipe-bgp",color="darkorange"] agent_b -> bgpspeaker_b[label="REST"] agent_b -> vswitch_b[folded] bgpspeaker_b -> vswitch_b[folded] } group routers { color=lightgrey shape=line style=none bgppeers[label="BGP Peers\nand/or\nRoute Reflector",stacked,color=green]; } bgpspeaker_a, bgpspeaker_b <-> bgppeers [color=green, label="BGP", textcolor=green] /*vswitch_b <-> vswitch_a [label="VXLAN", folded] vswitch_a, vswitch_b <-> mplsrouters[label="MPLS\n(over-x)"]*/ driver <-> agent_a, agent_b [label="RPCs"]; /*vswitch_a <-> mplsrouters [label="MPLS\n(over-x)"] vswitch_b <-> mplsrouters [label="MPLS\n(over-x)"]*/ }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9063063 networking_bagpipe-22.0.0/etc/0000775000175000017500000000000000000000000016247 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/etc/README.txt0000664000175000017500000000050700000000000017747 0ustar00zuulzuul00000000000000To generate the sample networking-bagpipe configuration files, run the following command from the top level of the networking-bagpipe directory: tox -e genconfig If a 'tox' environment is unavailable, then you can run the following script instead to generate the configuration files: ./tools/generate_config_file_samples.sh ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9063063 networking_bagpipe-22.0.0/etc/bagpipe-bgp/0000775000175000017500000000000000000000000020424 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/etc/bagpipe-bgp/bgp.conf.template0000664000175000017500000000576100000000000023666 0ustar00zuulzuul00000000000000[BGP] local_address=eth0 #local_address=192.168.100.177 peers=192.168.0.101 my_as=64512 enable_rtc=True [COMMON] # Root helper and root helper application # root_helper_daemon is designed to use oslo-rootwrap-daemon for commands # that need to be run as root. # If root_helper_daemon is unset, bagpipe-bgp will use 'root_helper' to call # these commands, which defaults to 'sudo', and can be configured to use # oslo.rootwrap. #root_helper = sudo #root_helper_daemon = sudo /usr/local/bin/neutron-rootwrap-daemon /etc/bagpipe-bgp/rootwrap.conf [API] # BGP component API IP address and port host=localhost port=8082 [DATAPLANE_DRIVER_IPVPN] # IP VPN dataplane driver class # Example values: #dataplane_driver = ovs #dataplane_driver = linux dataplane_driver = dummy # OVS bridge to use (defaults to br-int) #ovs_bridge=ovsbr0 # MPLS outgoing interface (for linux and ovs drivers) # # (not specifying an mpls_interface or using the '*gre*' special value # means that the driver will instantiate a tunnel interface and use an # MPLS/GRE encapsulation) # mpls_interface=*gre* mpls_interface=*gre* # GRE tunnel to use (default to mpls_gre) #gre_tunnel=mpls_gre # Uncomment the following to allow the use of standard MPLS-o-GRE in OVS # (rather than MPLS-o-Eth-o-GRE). # # gre_tunnel_options="packet_type=legacy_l3" # Support VXLAN encapsulation of IP VPN traffic with the # ovs driver. # # Note well: this is non-standard and aimed at making it easier # to test IP VPN until OVS 2.4 is shipping # # WARNING: this option does *not* co-exist with the EVPN # linux (can't have both OVS and the linux # kernel native VXLAN stack process VXLAN) # #vxlan_encap=True # local IP address (that others will use to send us encapsulated packets, and that # we will use to send) # Note: does not need to be specified if different than the BGP local_address # dataplane_local_address=eth1 # dataplane_local_address=1.2.3.4 # (obsolete:) ovsbr_interfaces_mtu=4000 # for ovs driver, control whether or not the VRF will # reply to ARP requests on the subnet and impersonate the gateway # (defaults to False) #arp_responder=False # for ovs driver, control if VRF will reply to ARP requests for all IP # addresses if enabled, otherwise only for gateway # (defaults to False) #proxy_arp=False [DATAPLANE_DRIVER_EVPN] # EVPN dataplane driver class # Example values: #dataplane_driver = ovs #dataplane_driver = linux dataplane_driver = dummy # The linux driver allows to force the VXLAN destination port IF iproute2 is # at version 3.14 or above (i.e. >= "ss140411" with "ip -V"). # # to use standard IANA port for VXLAN: #vxlan_dst_port=4789 # # to interoperate with switches or boxes not having the ability to # use another port than the linux kernel default 8472: #vxlan_dst_port=8472 # local IP address (that others will use to send us encapsulated packets, and that # we will use to send) # Note: does not need to be specified if different than the BGP local_address # dataplane_local_address=eth1 # dataplane_local_address=1.2.3.4 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/etc/bagpipe-bgp/rootwrap.conf0000664000175000017500000000167400000000000023160 0ustar00zuulzuul00000000000000# Configuration for bagpipe-bgp-rootwrap # This file should be owned by (and only-writeable by) the root user [DEFAULT] # List of directories to load filter definitions from (separated by ','). # These directories MUST all be only writeable by root ! filters_path=/etc/bagpipe-bgp/rootwrap.d # List of directories to search executables in, in case filters do not # explicitely specify a full path (separated by ',') # If not specified, defaults to system PATH environment variable. # These directories MUST all be only writeable by root ! exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin,/usr/local/sbin # Enable logging to syslog # Default value is False use_syslog=False # Which syslog facility to use. # Valid values include auth, authpriv, syslog, local0, local1... # Default value is 'syslog' syslog_log_facility=syslog # Which messages to log. # INFO means log all usage # ERROR means only log unsuccessful attempts syslog_log_level=ERROR ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9063063 networking_bagpipe-22.0.0/etc/bagpipe-bgp/rootwrap.d/0000775000175000017500000000000000000000000022523 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/etc/bagpipe-bgp/rootwrap.d/linux-vxlan.filters0000664000175000017500000000062400000000000026404 0ustar00zuulzuul00000000000000# bagpipe-bgp-rootwrap command filters for nodes on which bagpipe-bgp is # expected to control VXLAN Linux Bridge dataplane # # This file should be owned by (and only-writeable by) the root user # format seems to be # cmd-name: filter-name, raw-command, user, args [Filters] # ip_lib ip: IpFilter, ip, root ip_exec: IpNetnsExecFilter, ip, root # shell (for piped commands) sh: CommandFilter, sh, root././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/etc/bagpipe-bgp/rootwrap.d/mpls-ovs-dataplane.filters0000664000175000017500000000076500000000000027634 0ustar00zuulzuul00000000000000# bagpipe-bgp-rootwrap command filters for nodes on which bagpipe-bgp is # expected to control MPLS OpenVSwitch dataplane # # This file should be owned by (and only-writeable by) the root user # format seems to be # cmd-name: filter-name, raw-command, user, args [Filters] # openvswitch ovs-vsctl: CommandFilter, ovs-vsctl, root ovs-ofctl: CommandFilter, ovs-ofctl, root # ip_lib ip: IpFilter, ip, root ip_exec: IpNetnsExecFilter, ip, root # shell (for piped commands) sh: CommandFilter, sh, root././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9103062 networking_bagpipe-22.0.0/etc/oslo-config-generator/0000775000175000017500000000000000000000000022452 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/etc/oslo-config-generator/bagpipe-bgp.conf0000664000175000017500000000043200000000000025475 0ustar00zuulzuul00000000000000[DEFAULT] output_file = etc/bagpipe-bgp.conf.sample wrap_width = 79 namespace = networking_bagpipe.api namespace = networking_bagpipe.bgp_common namespace = networking_bagpipe.run_command namespace = networking_bagpipe.dataplane.ipvpn namespace = networking_bagpipe.dataplane.evpn ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/etc/oslo-config-generator/dataplane-evpn-linux-vxlan.conf0000664000175000017500000000021600000000000030502 0ustar00zuulzuul00000000000000[DEFAULT] output_file = etc/dataplane-evpn-linux-vxlan.conf.sample wrap_width = 79 namespace = networking_bagpipe.dataplane.evpn.linux_vxlan ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/etc/oslo-config-generator/dataplane-ipvpn-mpls-linux.conf0000664000175000017500000000021600000000000030511 0ustar00zuulzuul00000000000000[DEFAULT] output_file = etc/dataplane-ipvpn-mpls-linux.conf.sample wrap_width = 79 namespace = networking_bagpipe.dataplane.ipvpn.mpls_linux ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/etc/oslo-config-generator/dataplane-ipvpn-mpls-ovs.conf0000664000175000017500000000021200000000000030155 0ustar00zuulzuul00000000000000[DEFAULT] output_file = etc/dataplane-ipvpn-mpls-ovs.conf.sample wrap_width = 79 namespace = networking_bagpipe.dataplane.ipvpn.mpls_ovs ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/etc/oslo-config-generator/neutron-agent.conf0000664000175000017500000000016700000000000026113 0ustar00zuulzuul00000000000000[DEFAULT] output_file = etc/neutron-agent.ini.sample wrap_width = 79 namespace = networking_bagpipe.bagpipe_bgp_agent ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/etc/oslo-config-generator/neutron-sfc.conf0000664000175000017500000000016000000000000025561 0ustar00zuulzuul00000000000000[DEFAULT] output_file = etc/neutron-agent.ini.sample wrap_width = 79 namespace = networking_bagpipe.driver.sfc ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9103062 networking_bagpipe-22.0.0/networking_bagpipe/0000775000175000017500000000000000000000000021352 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/__init__.py0000664000175000017500000000121000000000000023455 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pbr.version __version__ = pbr.version.VersionInfo( 'networking_bagpipe').version_string() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/_i18n.py0000664000175000017500000000210500000000000022640 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import oslo_i18n DOMAIN = "networking-bagpipe" _translators = oslo_i18n.TranslatorFactory(domain=DOMAIN) # The primary translation function using the well-known name "_" _ = _translators.primary # The contextual translation function using the name "_C" # requires oslo.i18n >=2.1.0 _C = _translators.contextual_form # The plural translation function using the name "_P" # requires oslo.i18n >=2.1.0 _P = _translators.plural_form def get_available_languages(): return oslo_i18n.get_available_languages(DOMAIN) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1743591455.914306 networking_bagpipe-22.0.0/networking_bagpipe/agent/0000775000175000017500000000000000000000000022450 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/agent/__init__.py0000664000175000017500000000000000000000000024547 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/agent/agent_base_info.py0000664000175000017500000001171500000000000026132 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Orange. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools from collections import defaultdict from collections import namedtuple from oslo_log import log as logging LOG = logging.getLogger(__name__) class keydefaultdict(defaultdict): """Inherit defaultdict class to customize default_factory. Override __missing__ method to construct custom object as default_factory passing an argument. For example: class C(object): def __init__(self, value): self.value = value d = keydefaultdict(C) d[key] returns C(key) """ # pylint: disable=not-callable def __missing__(self, key): if self.default_factory is None: raise KeyError(key) else: ret = self[key] = getattr(self, 'default_factory')(key) return ret class CommonInfo: def __init__(self, id): self.id = id self._associations = dict() @property def associations(self): return self._associations.values() def add_association(self, association): self._associations[association.id] = association def remove_association(self, association): del self._associations[association.id] def has_association(self, association_id): return association_id in self._associations def get_association(self, association_id): return self._associations.get(association_id) class PortInfo(CommonInfo): def __init__(self, port_id): super().__init__(port_id) self.ip_address = None self.mac_address = None self.network = None self.chain_hops = dict() self.admin_state_up = False def __eq__(self, other): return (isinstance(other, self.__class__) and self.id == other.id) def __hash__(self): return hash(self.id) @property def all_associations(self): return itertools.chain( self.associations, self.network.associations if self.network else []) def has_any_association(self): return any(True for _ in self.all_associations) def add_chain_hop(self, chain_hop): if not chain_hop: return if not all(item in self.chain_hops.items() for item in chain_hop.items()): self.chain_hops.update(chain_hop) def update_admin_state(self, port_data, transition_to_down_hook=None): if port_data['admin_state_up']: self.admin_state_up = True else: previous_admin_state_up = self.admin_state_up if previous_admin_state_up: if callable(transition_to_down_hook): transition_to_down_hook() self.admin_state_up = False else: self.admin_state_up = False LOG.debug("port %s, admin_state_up is now: %s", self.id, self.admin_state_up) def __repr__(self): return "PortInfo({},{})".format(self.id, self.admin_state_up) GatewayInfo = namedtuple('GatewayInfo', ['mac', 'ip']) NO_GW_INFO = GatewayInfo(None, None) class NetworkInfo(CommonInfo): def __init__(self, network_id): super().__init__(network_id) self.gateway_info = NO_GW_INFO self.ports = set() self.segmentation_id = None def set_gateway_info(self, gateway_info): self.gateway_info = gateway_info def __repr__(self): return "NetInfo: {} (segmentation id:{}, gw:{}), ".format( self.id, self.segmentation_id, self.gateway_info) class BaseInfoManager: def __init__(self): # Store all ports level network and service informations self.ports_info = keydefaultdict(PortInfo) # Store all networks level network and service informations self.networks_info = keydefaultdict(NetworkInfo) def _get_network_port_infos(self, net_id, port_id): net_info = self.networks_info[net_id] port_info = self.ports_info[port_id] net_info.ports.add(port_info) port_info.network = net_info return net_info, port_info def _remove_network_port_infos(self, net_id, port_id): port_info = self.ports_info.get(port_id) net_info = self.networks_info.get(net_id) if port_info: del self.ports_info[port_id] if net_info: net_info.ports.discard(port_info) if not net_info.ports: del self.networks_info[net_id] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/agent/bagpipe_bgp_agent.py0000664000175000017500000004507400000000000026451 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Orange. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import httplib2 import json from oslo_config import cfg from oslo_log import helpers as log_helpers from oslo_log import log as logging from oslo_concurrency import lockutils from oslo_serialization import jsonutils from oslo_service import loopingcall from networking_bagpipe._i18n import _ from networking_bagpipe.bagpipe_bgp import constants as bbgp_const from neutron.conf.agent import common as config from neutron_lib import exceptions as n_exc LOG = logging.getLogger(__name__) # Having this at line 231 is apparently not enough, so adding here as well: # pylint: disable=not-callable bagpipe_bgp_opts = [ cfg.IntOpt('ping_interval', default=10, help=_("The number of seconds the bagpipe-bgp client will " "wait between polling for restart detection.")), cfg.PortOpt('bagpipe_bgp_port', default=8082, help=_("bagpipe-bgp REST service IP port.")), ] # these options are for internal use only (fullstack tests), and hence # better kept in a separate table not looked at by oslo gen confi hooks internal_opts = [ cfg.HostAddressOpt('bagpipe_bgp_ip', default='127.0.0.1', help=_("bagpipe-bgp REST service IP address.")), ] cfg.CONF.register_opts(bagpipe_bgp_opts, "BAGPIPE") cfg.CONF.register_opts(internal_opts, "BAGPIPE") config.register_agent_state_opts_helper(cfg.CONF) # don't use bbgp_const.VPN_TYPES here, because here in this module # we sometimes need to iterate the vpn types in a controlled order: # EVPN first on attach, EVPN last on detach VPN_TYPES = [bbgp_const.EVPN, bbgp_const.IPVPN] class BaGPipeBGPException(n_exc.NeutronException): message = "An exception occurred when calling bagpipe-bgp \ REST service: %(reason)s" class SetJSONEncoder(jsonutils.JSONEncoder): # JSON encoder that encodes set like a list, this # allows to store list of RTs as sets and simplify the code # in many places # pylint: disable=method-hidden def default(self, obj): if isinstance(obj, set): return list(obj) return json.JSONEncoder.default(self, obj) class HTTPClientBase: """An HTTP client base class""" def __init__(self, host="127.0.0.1", port=8082, client_name="HTTP client base"): """Create a new HTTP client :param host: HTTP server IP address :param port: HTTP server port """ self.host = host self.port = port self.client_name = client_name def do_request(self, method, action, body=None): LOG.debug("bagpipe-bgp client request: %(method)s %(action)s " "[%(body)s]", {'method': method, 'action': action, 'body': str(body)}) if isinstance(body, dict): body = jsonutils.dumps(body, cls=SetJSONEncoder) try: headers = {'User-Agent': self.client_name, "Content-Type": "application/json", "Accept": "application/json"} uri = "http://{}:{}/{}".format(self.host, self.port, action) http = httplib2.Http() response, content = http.request(uri, method, body, headers) LOG.debug("bagpipe-bgp returns [%(status)s:%(content)s]", {'status': str(response.status), 'content': content}) if response.status == 200: if content and len(content) > 1: return jsonutils.loads(content) else: reason = ( "An HTTP operation has failed on bagpipe-bgp." ) raise BaGPipeBGPException(reason=reason) except OSError as e: reason = "Failed to connect to bagpipe-bgp: %s" % str(e) raise BaGPipeBGPException(reason=reason) def get(self, action): return self.do_request("GET", action) def post(self, action, body=None): return self.do_request("POST", action, body=body) def put(self, action, body=None): return self.do_request("PUT", action, body=body) def delete(self, action): return self.do_request("DELETE", action) def get_default_vpn_instance_id(vpn_type, network_id): return '{}_{}'.format(vpn_type, network_id) class BaGPipeBGPAgent(HTTPClientBase): """Implements a BaGPipe-BGP REST client""" _instance = None # bagpipe-bgp status BAGPIPEBGP_UP = 'UP' BAGPIPEBGP_DOWN = 'DOWN' @classmethod @lockutils.synchronized('bagpipe-bgp-agent') def _create_instance(cls, agent_type): if not cls.has_instance(): cls._instance = cls(agent_type) @classmethod def has_instance(cls): return cls._instance is not None @classmethod def clear_instance(cls): cls._instance = None @classmethod def get_instance(cls, agent_type): # double checked locking if not cls.has_instance(): cls._create_instance(agent_type) else: if cls._instance.agent_type != agent_type: raise Exception("Agent already configured with another type") return cls._instance def __init__(self, agent_type): """Create a new BaGPipe-BGP REST service client. :param agent_type: bagpipe-bgp agent type (Linux bridge or OVS) """ super().__init__(cfg.CONF.BAGPIPE.bagpipe_bgp_ip, cfg.CONF.BAGPIPE.bagpipe_bgp_port, agent_type) self.agent_type = agent_type self.ping_interval = cfg.CONF.BAGPIPE.ping_interval self.bagpipe_bgp_status = self.BAGPIPEBGP_DOWN self.seq_num = 0 # Starts a greenthread for bagpipe-bgp status polling self._start_bagpipe_bgp_status_polling(self.ping_interval) # Maps registered service name to callback function used to build the # content of bagpipe-bgp API calls self.build_callbacks = dict() # Maps registered service name to list of ports for which a given # service is enabled self.port_lists = dict() def _check_bagpipe_bgp_status(self): """Trigger refresh on bagpipe-bgp restarts Check if bagpipe-bgp has restarted while sending ping request to detect sequence number change. If a restart is detected, re-send all registered attachments to bagpipe-bgp. """ new_seq_num = self._request_ping() # Check bagpipe-bgp restart if new_seq_num != self.seq_num: if new_seq_num != -1: if self.seq_num != 0: LOG.warning("bagpipe-bgp restart detected...") else: LOG.info("bagpipe-bgp successfully detected") self.seq_num = new_seq_num self.bagpipe_bgp_status = self.BAGPIPEBGP_UP # Re-send all registered ports to bagpipe-bgp ports = set() for port_list in self.port_lists.values(): ports |= port_list if ports: LOG.info("Sending all registered ports to bagpipe-bgp") LOG.debug("Registered ports list: %s", ports) for port_id in ports: self.do_port_plug(port_id) else: LOG.info("No attachment to send to bagpipe-bgp") else: self.bagpipe_bgp_status = self.BAGPIPEBGP_DOWN def _start_bagpipe_bgp_status_polling(self, ping_interval=10): # Start bagpipe-bgp status polling at regular interval status_loop = loopingcall.FixedIntervalLoopingCall( self._check_bagpipe_bgp_status) status_loop.start(interval=ping_interval, initial_delay=ping_interval) # TM: why not zero ? def _check_evpn2ipvpn_info(self, vpn_type, network_id, attach_list, attach_info): # Check if plugging an EVPN into an IPVPN if (vpn_type == bbgp_const.IPVPN and bbgp_const.EVPN in attach_list): attach_info['local_port'] = { bbgp_const.EVPN: { 'id': get_default_vpn_instance_id('evpn', network_id) } } def _compile_port_attach_info(self, port_id): # this method returns information for all bagpipe-bgp attachments to # produce for a given port: # { # 'evpn': [ # { # 'vpn_instance_id': # 'ip_address' # .. # 'import_rts': [...] # 'export_rts': [...] # }, # ... # ], # 'ipvpn': [ # { # 'vpn_instance_id': # 'ip_address' # .. # 'import_rts': [...] # 'export_rts': [...] # }, # ... # ] # } # # This structure produces consolidated information across all services # for all Route Target parameters: the import_rt and export_rt # attributes accumulate the RTs of all the service producing # attachments for a given VPN instance (vpn instance id). # # NOTE(tmorin): the code does not do consistency checks for parameters # that would be conflicting between attachments, for instance if # an EVPN attachment would specify a VNI X and another a VNI Y. # For read-only parameters, bagpipe-bgp would be in charge of detecting # an attempt at overwriting the parameters with a different value and # raising an error. # For proper safeguarding, the code here would need to check # consistency across attachments of read-write values. service_attachments_map = {} for service, build_callback in self.build_callbacks.items(): # what was returned by the callbacks before (a dict allowing to # describe one EVPN and one IPVPN attachment): # { # 'network_id': # 'ip_address' # .. # 'evpn': { # 'import_rt': [...] # 'export_rt': [...] # 'static_routes': ... # } # 'ipvpn': { # 'import_rt': [...] # 'export_rt': [...] # 'static_routes': ... # } # } # # we expect the callback to return a dict or lists, # following this template: # { # 'network_id': # use to generate vpn_instance_id if # # vpn_instance_id is omitted below # 'evpn': [ # { # 'vpn_instance_id': .. # 'ip_address': .. # ... # 'import_rt': [...] # 'export_rt': [...] # }, # ... # ], # 'ipvpn': [ # ] # } service_attachments_map[service] = build_callback(port_id) LOG.debug("port %s, attach info for %s: %s", port_id, service, service_attachments_map[service]) attach_list = {} # map in which we consolidate the RTs for a given vpn instance # vpn_instance_rts[vpn_instance_id]['import_rt'] = set() # vpn_instance_rts[vpn_instance_id]['export_rt'] = set() vpn_instance_rts = {} for vpn_type in VPN_TYPES: vpn_attachment_list = [] for service in self.build_callbacks.keys(): service_attachments = ( service_attachments_map[service].get(vpn_type) ) if not service_attachments: continue default_vpn_instance_id = get_default_vpn_instance_id( vpn_type, service_attachments_map[service]['network_id']) for service_attachment in service_attachments: vpn_instance_id = service_attachment.setdefault( 'vpn_instance_id', default_vpn_instance_id) service_attachment['vpn_type'] = vpn_type # initialize consolidated RTs for this vpn_instance_id # if this wasn't done yet vpn_instance_rts.setdefault(vpn_instance_id, { bbgp_const.RT_IMPORT: set(), bbgp_const.RT_EXPORT: set() }) for rt_type in (bbgp_const.RT_IMPORT, bbgp_const.RT_EXPORT): # merge this service RTs with the RTs we already had # for this vpn_instance_id orig_rts = set(service_attachment[rt_type]) vpn_instance_rts[vpn_instance_id][rt_type] |= orig_rts # have the RT information for this attachment # point to the consolidated RT list service_attachment[rt_type] = ( vpn_instance_rts[vpn_instance_id][rt_type]) LOG.debug("adding processed attachment: %s", service_attachment) vpn_attachment_list.append(service_attachment) if vpn_attachment_list: attach_list[vpn_type] = vpn_attachment_list LOG.debug("all attachments for port %s: %s", port_id, attach_list) return attach_list def _request_ping(self): """Send ping request to bagpipe-bgp to get sequence number""" try: response = self.get('ping') LOG.debug("bagpipe-bgp PING response received with " "sequence number %s", response) return response except Exception as e: LOG.warning(str(e)) return -1 @log_helpers.log_method_call def _send_attach_local_port(self, local_port_details): """Send local port attach request to BaGPipe-BGP if running""" if self.bagpipe_bgp_status is self.BAGPIPEBGP_UP: try: self.post('attach_localport', local_port_details) LOG.debug("Local port has been attached to bagpipe-bgp with " "details %s", local_port_details) except BaGPipeBGPException as e: LOG.error("Can't attach local port on bagpipe-bgp: %s", str(e)) else: LOG.debug("Local port not yet attached to bagpipe-bgp (not up)") @log_helpers.log_method_call def _send_detach_local_port(self, local_port_details): """Send local port detach request to BaGPipe-BGP if running""" if self.bagpipe_bgp_status is self.BAGPIPEBGP_UP: try: self.post('detach_localport', local_port_details) LOG.debug("Local port has been detached from bagpipe-bgp " "with details %s", local_port_details) except BaGPipeBGPException as e: LOG.error("Can't detach local port from bagpipe-bgp: %s", str(e)) raise else: LOG.debug("Local port not yet detached from bagpipe-bgp (not up)") @log_helpers.log_method_call def _send_all_attachments(self, plug_details): # First plug E-VPNs because they could be plugged into IP-VPNs for vpn_type in [t for t in VPN_TYPES if t in plug_details]: for plug_detail in plug_details[vpn_type]: self._send_attach_local_port(plug_detail) @log_helpers.log_method_call def do_port_plug(self, port_id): """Send port attach request to bagpipe-bgp.""" all_plug_details = self._compile_port_attach_info(port_id) self._send_all_attachments(all_plug_details) @log_helpers.log_method_call def do_port_plug_refresh(self, port_id, detach_infos): """Refresh port attach on bagpipe-bgp Send port attach and/or detach request to bagpipe-bgp when necessary. detach_infos: { 'network_id': ... 'evpn': { ... } 'ipvpn': { ... } } ] """ self.do_port_plug_refresh_many(port_id, [detach_infos]) @log_helpers.log_method_call def do_port_plug_refresh_many(self, port_id, detach_info_list): """Refresh port attach on bagpipe-bgp Send port attach and/or detach request to bagpipe-bgp when necessary. detach_infos, a list of: { 'network_id': ... 'evpn': { ... } 'ipvpn': { ... } } ] """ plug_details = self._compile_port_attach_info(port_id) for detach_infos in detach_info_list: network_id = detach_infos.pop('network_id') for detach_vpn_type, detach_info in list(detach_infos.items()): detach_info.setdefault( 'vpn_instance_id', get_default_vpn_instance_id(detach_vpn_type, network_id)) detach_info['vpn_type'] = detach_vpn_type # NOTE(tmorin): to be reconsidered self._check_evpn2ipvpn_info(detach_vpn_type, network_id, plug_details, detach_info) if detach_infos: # unplug IPVPN first, then EVPN (hence ::-1 below) for vpn_type in [t for t in VPN_TYPES[::-1] if t in detach_infos]: self._send_detach_local_port(detach_infos[vpn_type]) self._send_all_attachments(plug_details) @lockutils.synchronized('bagpipe-bgp-agent') def register_build_callback(self, service_name, callback): self.build_callbacks[service_name] = callback @lockutils.synchronized('bagpipe-bgp-agent') def register_port_list(self, service_name, port_list): self.port_lists[service_name] = port_list ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1743591455.914306 networking_bagpipe-22.0.0/networking_bagpipe/agent/bgpvpn/0000775000175000017500000000000000000000000023744 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/agent/bgpvpn/__init__.py0000664000175000017500000000000000000000000026043 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/agent/bgpvpn/agent_extension.py0000664000175000017500000012444300000000000027520 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Orange. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ L2 Agent extension to support bagpipe networking-bgpvpn driver RPCs in the OpenVSwitch agent """ import collections import copy import itertools import netaddr from oslo_concurrency import lockutils from oslo_config import cfg from oslo_log import helpers as log_helpers from oslo_log import log as logging from networking_bagpipe._i18n import _ from networking_bagpipe.agent import agent_base_info from networking_bagpipe.agent import bagpipe_bgp_agent from networking_bagpipe.agent.bgpvpn import constants as bgpvpn_const from networking_bagpipe.bagpipe_bgp import constants as bbgp_const from networking_bagpipe.objects import bgpvpn as objects from neutron.agent.common import ovs_lib from neutron.agent.linux.openvswitch_firewall import firewall \ as ovs_fw from neutron.api.rpc.callbacks.consumer import registry as rpc_registry from neutron.api.rpc.callbacks import events as rpc_events from neutron.api.rpc.handlers import resources_rpc from neutron.conf.agent import common as config from neutron.conf.plugins.ml2.drivers import ovs_conf from neutron.plugins.ml2.drivers.openvswitch.agent import vlanmanager from neutron_lib.agent import l2_extension from neutron_lib.api.definitions import bgpvpn from neutron_lib.api.definitions import bgpvpn_routes_control as bgpvpn_rc from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants as n_const from neutron_lib.plugins.ml2 import ovs_constants as ovs_agt_consts LOG = logging.getLogger(__name__) bagpipe_bgpvpn_opts = [ cfg.StrOpt('mpls_bridge', default='br-mpls', help=_("OVS MPLS bridge to use")), ] # these options are for internal use only (fullstack tests), and hence # better kept in a separate table not looked at by oslo gen confi hooks internal_opts = [ cfg.StrOpt('tun_to_mpls_peer_patch_port', default='patch-to-mpls', help=_("OVS Peer patch port in tunnel bridge to MPLS bridge ")), cfg.StrOpt('mpls_to_tun_peer_patch_port', default='patch-from-tun', help=_("OVS Peer patch port in MPLS bridge to tunnel bridge ")), cfg.StrOpt('mpls_to_int_peer_patch_port', default='patch-mpls-to-int', help=_("OVS Peer patch port in MPLS bridge to int bridge ")), cfg.StrOpt('int_to_mpls_peer_patch_port', default='patch-int-from-mpls', help=_("OVS Peer patch port in int bridge to MPLS bridge ")), ] cfg.CONF.register_opts(bagpipe_bgpvpn_opts, "BAGPIPE") cfg.CONF.register_opts(internal_opts, "BAGPIPE") ovs_conf.register_ovs_agent_opts() config.register_agent_state_opts_helper(cfg.CONF) def format_associations_route_targets(assocs): rts = collections.defaultdict(set) for assoc in assocs: rts[bbgp_const.RT_IMPORT] |= set(assoc.bgpvpn.route_targets or ()) rts[bbgp_const.RT_IMPORT] |= set(assoc.bgpvpn.import_targets or ()) rts[bbgp_const.RT_EXPORT] |= set(assoc.bgpvpn.route_targets or ()) rts[bbgp_const.RT_EXPORT] |= set(assoc.bgpvpn.export_targets or ()) return rts def bagpipe_vpn_type(bgpvpn_type): return bgpvpn_const.BGPVPN_2_BAGPIPE[bgpvpn_type] def vpn_instance_id_for_port_assoc_route(port_assoc, route): bbgp_vpn_type = bagpipe_vpn_type(port_assoc.bgpvpn.type) if route.type == bgpvpn_rc.BGPVPN_TYPE: return '{}_portassoc_{}_bgpvpn_{}'.format( bbgp_vpn_type, port_assoc.id, route.bgpvpn.id) elif route.type == bgpvpn_rc.PREFIX_TYPE: encoded_prefix = str( route.prefix).replace('.', '_').replace(':', '_').replace('/', '_') return '{}_portassoc_{}_prefix_{}'.format( bbgp_vpn_type, port_assoc.id, encoded_prefix) else: LOG.error("unknown route type: %s", route.type) def advertise_fixed_ip(port_info): return (any([assoc.advertise_fixed_ips for assoc in port_info.associations] ) or any([not isinstance(assoc, objects.BGPVPNPortAssociation) for assoc in port_info.network.associations])) class BagpipeBgpvpnAgentExtension(l2_extension.L2AgentExtension, agent_base_info.BaseInfoManager): def __init__(self): super().__init__() self.ports = set() @log_helpers.log_method_call def consume_api(self, agent_api): self.agent_api = agent_api @log_helpers.log_method_call def initialize(self, connection, driver_type): self.driver_type = driver_type if self._is_ovs_extension(): self.int_br = self.agent_api.request_int_br() self.tun_br = self.agent_api.request_tun_br() if self.tun_br is None: raise Exception("tunneling is not enabled in OVS agent, " "however bagpipe_bgpvpn extensions needs it") if cfg.CONF.SECURITYGROUP.firewall_driver != "openvswitch": LOG.warning('Neutron router fallback supported only with the ' '"openvswitch" firewall driver, or if l3agent is ' 'always deployed on servers distinct from compute ' 'nodes.') self.bagpipe_bgp_agent = ( bagpipe_bgp_agent.BaGPipeBGPAgent.get_instance( n_const.AGENT_TYPE_OVS) ) self._setup_ovs_bridge() self.vlan_manager = vlanmanager.LocalVlanManager() registry.subscribe(self.ovs_restarted, resources.AGENT, events.OVS_RESTARTED) else: raise Exception("driver type not supported: %s", driver_type) self.bagpipe_bgp_agent.register_build_callback( bgpvpn_const.BGPVPN_SERVICE, self.build_bgpvpn_attach_info) # NOTE(tmorin): replace by callback, so that info can be derived # from self.ports_info.keys() instead of being duplicated into # self.ports self.bagpipe_bgp_agent.register_port_list(bgpvpn_const.BGPVPN_SERVICE, self.ports) # OVO-based BGPVPN RPCs self._setup_rpc(connection) def _is_ovs_extension(self): return self.driver_type == ovs_agt_consts.EXTENSION_DRIVER_TYPE def _setup_rpc(self, connection): self.rpc_pull_api = resources_rpc.ResourcesPullRpcApi() rpc_registry.register(self.handle_notification_net_assocs, objects.BGPVPNNetAssociation.obj_name()) rpc_registry.register(self.handle_notification_router_assocs, objects.BGPVPNRouterAssociation.obj_name()) rpc_registry.register(self.handle_notification_port_assocs, objects.BGPVPNPortAssociation.obj_name()) endpoints = [resources_rpc.ResourcesPushRpcCallback()] topic_net_assoc = resources_rpc.resource_type_versioned_topic( objects.BGPVPNNetAssociation.obj_name()) topic_router_assoc = resources_rpc.resource_type_versioned_topic( objects.BGPVPNRouterAssociation.obj_name()) topic_port_assoc = resources_rpc.resource_type_versioned_topic( objects.BGPVPNPortAssociation.obj_name()) connection.create_consumer(topic_net_assoc, endpoints, fanout=True) connection.create_consumer(topic_router_assoc, endpoints, fanout=True) connection.create_consumer(topic_port_assoc, endpoints, fanout=True) @log_helpers.log_method_call @lockutils.synchronized('bagpipe-bgpvpn') def handle_port(self, context, data): port_id = data['port_id'] network_id = data['network_id'] if self._ignore_port(context, data): return self.ports.add(port_id) net_info, port_info = ( self._get_network_port_infos(network_id, port_id) ) def delete_hook(): self._delete_port(context, {'port_id': port_info.id}) port_info.update_admin_state(data, delete_hook) if not port_info.admin_state_up: return if data['network_type'] == n_const.TYPE_VXLAN: net_info.segmentation_id = data['segmentation_id'] port_info.mac_address = data['mac_address'] port_info.ip_address = data['fixed_ips'][0]['ip_address'] net_assocs = self.rpc_pull_api.bulk_pull( context, objects.BGPVPNNetAssociation.obj_name(), filter_kwargs=dict(network_id=network_id)) # find all the Router Association that are relevant for our network router_assocs = self.rpc_pull_api.bulk_pull( context, objects.BGPVPNRouterAssociation.obj_name(), filter_kwargs=dict(network_id=network_id)) for assoc in itertools.chain(net_assocs, router_assocs): # replug_ports=False because we will call do_port_plug # once for all associations, and only for this port, out of # the loop self._add_association_for_net(network_id, assoc, replug_ports=False) port_assocs = self.rpc_pull_api.bulk_pull( context, objects.BGPVPNPortAssociation.obj_name(), filter_kwargs=dict(port_id=port_id)) for port_assoc in port_assocs: self._add_association_for_port(port_info, port_assoc, replug_port=False) if port_info.has_any_association(): self.bagpipe_bgp_agent.do_port_plug(port_id) @lockutils.synchronized('bagpipe-bgpvpn') def delete_port(self, context, data): self._delete_port(context, data) # un-synchronized version, to be called indirectly from handle_port @log_helpers.log_method_call def _delete_port(self, context, data): port_id = data['port_id'] port_info = self.ports_info.get(port_id) if port_info and port_info.has_any_association(): if not port_info.admin_state_up: LOG.debug("port %s was not admin_state_up, ignoring", port_id) return if len(port_info.network.ports) == 1: # last port on network... self._stop_gateway_traffic_redirect(port_info.network, last_port=True) detach_infos = self._build_detach_infos(port_info) # here we clean our cache for this port, # and for its network if this was the last port on the network self._remove_network_port_infos(port_info.network.id, port_id) self.ports.remove(port_id) self.bagpipe_bgp_agent.do_port_plug_refresh_many(port_id, detach_infos) @log_helpers.log_method_call @lockutils.synchronized('bagpipe-bgpvpn') def handle_notification_net_assocs(self, context, resource_type, net_assocs, event_type): for net_assoc in net_assocs: if event_type in (rpc_events.CREATED, rpc_events.UPDATED): self._add_association_for_net(net_assoc.network_id, net_assoc) elif event_type == rpc_events.DELETED: self._remove_association_for_net(net_assoc.network_id, net_assoc) else: LOG.warning("unsupported event: %s", event_type) @log_helpers.log_method_call @lockutils.synchronized('bagpipe-bgpvpn') def handle_notification_router_assocs(self, context, resource_type, router_assocs, event_type): for router_assoc in router_assocs: if event_type in (rpc_events.CREATED, rpc_events.UPDATED): for connected_net in router_assoc.connected_networks: self._add_association_for_net(connected_net['network_id'], router_assoc) elif event_type == rpc_events.DELETED: for connected_net in router_assoc.connected_networks: self._remove_association_for_net( connected_net['network_id'], router_assoc) else: LOG.warning("unsupported event: %s", event_type) @log_helpers.log_method_call def _add_association_for_net(self, network_id, assoc, replug_ports=True): LOG.debug("add association with bgpvpn %s", assoc.bgpvpn) net_info = self.networks_info[network_id] # for now we only support a single IPv4 subnet for subnet in assoc.all_subnets(network_id): gateway_info = agent_base_info.GatewayInfo( subnet['gateway_mac'], subnet['gateway_ip'] ) if subnet['ip_version'] == 4: net_info.set_gateway_info(gateway_info) if assoc.bgpvpn.type == bgpvpn.BGPVPN_L3: self._gateway_traffic_redirect(net_info) break if not net_info: LOG.debug("no net_info for network %s, skipping", network_id) return if not net_info.ports: LOG.debug("no port on network %s, skipping", network_id) return net_info.add_association(assoc) if replug_ports: for port_info in net_info.ports: self.bagpipe_bgp_agent.do_port_plug(port_info.id) @log_helpers.log_method_call def _remove_association_for_net(self, network_id, assoc, refresh_plugs=True): net_info = self.networks_info.get(network_id) if not net_info: LOG.debug("no net_info for network %s, skipping", network_id) return if not net_info.ports: LOG.debug("no port on network %s, skipping", network_id) return # is there an association of same BGPVPN type that remains ? remaining = len([1 for a in net_info.associations if a.bgpvpn.type == assoc.bgpvpn.type]) # we need to build port detach_information before we update # net_info.associations: if refresh_plugs and remaining <= 1: detach_infos = {} for port_info in net_info.ports: detach_infos[port_info.id] = ( self._build_detach_infos(port_info, assoc.bgpvpn.type)) net_info.remove_association(assoc) if remaining > 1 and refresh_plugs: LOG.debug("some association of type %s remain, updating all ports", assoc.bgpvpn.type) for port_info in net_info.ports: self.bagpipe_bgp_agent.do_port_plug(port_info.id) else: LOG.debug("no association of type %s remains, detaching it for " "all ports", assoc.bgpvpn.type) if refresh_plugs: for port_info in net_info.ports: self.bagpipe_bgp_agent.do_port_plug_refresh_many( port_info.id, detach_infos[port_info.id]) if assoc.bgpvpn.type == bgpvpn.BGPVPN_L3: self._stop_gateway_traffic_redirect(net_info, last_assoc=True) @log_helpers.log_method_call @lockutils.synchronized('bagpipe-bgpvpn') def handle_notification_port_assocs(self, context, resource_type, port_assocs, event_type): for port_assoc in port_assocs: try: port_info = self.ports_info[port_assoc.port_id] except KeyError: LOG.debug("port %s is not present, skipping", port_assoc.port_id) continue if event_type == rpc_events.CREATED: self._add_association_for_port(port_info, port_assoc) elif event_type == rpc_events.UPDATED: self._update_port_association(port_info, port_assoc) elif event_type == rpc_events.DELETED: self._remove_association_for_port(port_info, port_assoc) else: LOG.warning("unsupported event: %s", event_type) @log_helpers.log_method_call def _update_port_association(self, port_info, new_port_assoc): # if we already had this association, do unplugs for prefixes # that were in the association but are not port present anymore # in new_port_assoc assoc_id = new_port_assoc.id if port_info.has_association(assoc_id): old_routes = set(port_info.get_association(assoc_id).routes) new_routes = set(new_port_assoc.routes) LOG.debug("old routes: %s", old_routes) LOG.debug("new routes: %s", new_routes) # update the port association, so that build_bgpvpn_attach_info # finds the updated version port_info.add_association(new_port_assoc) detach_infos = [] for removed_route in old_routes.difference(new_routes): detach_infos.extend( self._build_detach_infos_for_port_assoc(port_info, new_port_assoc, removed_route)) self.bagpipe_bgp_agent.do_port_plug_refresh_many(port_info.id, detach_infos) else: self._add_association_for_port(port_info, new_port_assoc) @log_helpers.log_method_call def _add_association_for_port(self, port_info, assoc, replug_port=True): port_info.add_association(assoc) self._add_association_for_net(port_info.network.id, assoc, replug_ports=False) if replug_port: self.bagpipe_bgp_agent.do_port_plug(port_info.id) @log_helpers.log_method_call def _remove_association_for_port(self, port_info, assoc): detach_infos = self._build_detach_infos_for_port_assoc(port_info, assoc) port_info.remove_association(assoc) self._remove_association_for_net(port_info.network.id, assoc, refresh_plugs=False) self.bagpipe_bgp_agent.do_port_plug_refresh_many(port_info.id, detach_infos) def _ignore_port(self, context, data): if data['port_id'] is None: return True if (data['device_owner'].startswith( n_const.DEVICE_OWNER_NETWORK_PREFIX)): LOG.info("Port %s owner is network:*, we'll do nothing", data['port_id']) return True return False def _setup_ovs_bridge(self): '''Setup the MPLS bridge for bagpipe-bgp. Creates MPLS bridge, and links it to the integration and tunnel bridges using patch ports. :param mpls_br: the name of the MPLS bridge. ''' mpls_br = cfg.CONF.BAGPIPE.mpls_bridge self.mpls_br = ovs_lib.OVSBridge(mpls_br) if not self.mpls_br.bridge_exists(mpls_br): LOG.error("Unable to enable MPLS on this agent, MPLS bridge " "%(mpls_br)s doesn't exist. Agent terminated!", {"mpls_br": mpls_br}) exit(1) # set secure mode self.mpls_br.set_secure_mode() # patch ports for traffic from tun bridge to mpls bridge self.patch_tun_to_mpls_ofport = self.tun_br.add_patch_port( cfg.CONF.BAGPIPE.tun_to_mpls_peer_patch_port, cfg.CONF.BAGPIPE.mpls_to_tun_peer_patch_port) self.patch_mpls_to_tun_ofport = self.mpls_br.add_patch_port( cfg.CONF.BAGPIPE.mpls_to_tun_peer_patch_port, cfg.CONF.BAGPIPE.tun_to_mpls_peer_patch_port) # patch ports for traffic from mpls bridge to int bridge self.patch_mpls_to_int_ofport = self.mpls_br.add_patch_port( cfg.CONF.BAGPIPE.mpls_to_int_peer_patch_port, cfg.CONF.BAGPIPE.int_to_mpls_peer_patch_port) self.patch_int_to_mpls_ofport = self.int_br.add_patch_port( cfg.CONF.BAGPIPE.int_to_mpls_peer_patch_port, cfg.CONF.BAGPIPE.mpls_to_int_peer_patch_port) if (int(self.patch_tun_to_mpls_ofport) < 0 or int(self.patch_mpls_to_tun_ofport) < 0 or int(self.patch_int_to_mpls_ofport) < 0 or int(self.patch_mpls_to_int_ofport) < 0): LOG.error("Failed to create OVS patch port. Cannot have " "MPLS enabled on this agent, since this version " "of OVS does not support patch ports. " "Agent terminated!") exit(1) self.patch_tun2int = self.tun_br.get_port_ofport( cfg.CONF.OVS.tun_peer_patch_port) # In br-tun, redirect all traffic from VMs towards the gateway # into br-mplsexcept the traffic that already went through br-mpls # and came back to br-tun via the fallback mecanism, this traffic is # identified by the specific FALLBACK_SRC_MAC MAC address # we need to copy the existing br-tun rules to dispatch to UCAST_TO_TUN # and FLOOD_TO_TUN, but only for except_from_src_mac MAC, # and with a priority of 2 self.tun_br.add_flow(table=ovs_agt_consts.PATCH_LV_TO_TUN, priority=2, dl_src=bgpvpn_const.FALLBACK_SRC_MAC, dl_dst="00:00:00:00:00:00/01:00:00:00:00:00", actions=("resubmit(,%s)" % ovs_agt_consts.UCAST_TO_TUN)) self.tun_br.add_flow(table=ovs_agt_consts.PATCH_LV_TO_TUN, priority=2, dl_src=bgpvpn_const.FALLBACK_SRC_MAC, dl_dst="01:00:00:00:00:00/01:00:00:00:00:00", actions=("resubmit(,%s)" % ovs_agt_consts.FLOOD_TO_TUN)) # Redirect traffic from the MPLS bridge to br-int self.tun_br.add_flow(in_port=self.patch_tun_to_mpls_ofport, actions="output:%s" % self.patch_tun2int) # In br-int... self.patch_int2tun = self.int_br.get_port_ofport( cfg.CONF.OVS.int_peer_patch_port) # when the OVS firewall driver is used, we can handle the # case where the gateway is directly connected to br-int: # traffic that was already fallback'd is not touched self.int_br.add_flow( table=ovs_agt_consts.ACCEPTED_EGRESS_TRAFFIC_NORMAL_TABLE, priority=3, dl_src=bgpvpn_const.FALLBACK_SRC_MAC, actions="NORMAL", ) # the base NORMAL action in this table is at priority 1 # the rules to redirect traffic are setup in gateway_traffic_redirect # with a priority of 2 # OVS1.3 needed for push_vlan in _gateway_traffic_redirect self.int_br.use_at_least_protocol(ovs_agt_consts.OPENFLOW13) def _redirect_br_tun_to_mpls(self, dst_mac, vlan): # then with a priority of 1, we redirect traffic to the dst_mac # address to br-mpls self.tun_br.add_flow( table=ovs_agt_consts.PATCH_LV_TO_TUN, priority=1, in_port=self.patch_tun2int, dl_dst=dst_mac, dl_vlan=vlan, actions="output:%s" % self.patch_tun_to_mpls_ofport ) def _stop_redirect_br_tun_to_mpls(self, vlan): self.tun_br.delete_flows( strict=True, table=ovs_agt_consts.PATCH_LV_TO_TUN, priority=1, in_port=self.patch_tun2int, dl_vlan=vlan ) @log_helpers.log_method_call @lockutils.synchronized('bagpipe-bgpvpn') def ovs_restarted(self, resources, event, trigger, payload=None): self._setup_ovs_bridge() for net_info in self.networks_info.values(): if (net_info.ports and any([assoc.bgpvpn.type == bgpvpn.BGPVPN_L3 for assoc in net_info.associations])): self._gateway_traffic_redirect(net_info) # TODO(tmorin): need to handle restart on bagpipe-bgp side, in the # meantime after an OVS restart, restarting bagpipe-bgp is required @log_helpers.log_method_call def _enable_gw_arp_responder(self, vlan, gateway_ip): # Add ARP responder entry for default gateway in br-tun # We may compete with the ARP responder entry for the real MAC # if the router is on a network node and we are a compute node, # so we must add our rule with a higher priority. Using a different # priority also means that arp_responder will not remove our ARP # responding flows and we won't remove theirs. # NOTE(tmorin): consider adding priority to install_arp_responder # and then use it here # (mostly copy-pasted ovs_ofctl....install_arp_responder) actions = ovs_agt_consts.ARP_RESPONDER_ACTIONS % { 'mac': netaddr.EUI(bgpvpn_const.DEFAULT_GATEWAY_MAC, dialect=netaddr.mac_unix), 'ip': netaddr.IPAddress(gateway_ip), } self.tun_br.add_flow(table=ovs_agt_consts.ARP_RESPONDER, priority=2, # see above dl_vlan=vlan, proto='arp', arp_op=0x01, arp_tpa='%s' % gateway_ip, actions=actions) @log_helpers.log_method_call def _disable_gw_arp_responder(self, vlan, gateway_ip): if not gateway_ip: LOG.warning('could not disable gateway ARP responder for vlan %s: ' 'no gateway IP', vlan) return # Remove ARP responder entry for default gateway in br-tun self.tun_br.delete_flows( strict=True, table=ovs_agt_consts.ARP_RESPONDER, priority=2, dl_vlan=vlan, proto='arp', arp_op=0x01, arp_tpa='%s' % gateway_ip) @log_helpers.log_method_call def _gateway_traffic_redirect(self, net_info): if not self._is_ovs_extension(): return LOG.debug("redirecting gw traffic for net %s: %s", net_info, net_info.gateway_info) # similar rules will later be added to redirect the traffic # to each specific router MAC to br-mpls try: vlan = self.vlan_manager.get(net_info.id, net_info.segmentation_id).vlan if net_info.gateway_info.mac: # there is a Neutron router on this network, so we won't # ARP spoof the gateway IP... self._disable_gw_arp_responder(vlan, net_info.gateway_info.ip) # but we will redirect traffic toward its MAC to br-mpls self._redirect_br_tun_to_mpls(net_info.gateway_info.mac, vlan) # we keep this one just in case VMs have a stale ARP entry # for the gateway IP: self._redirect_br_tun_to_mpls(bgpvpn_const.DEFAULT_GATEWAY_MAC, vlan) # when the OVS firewall driver is used, we can handle the # case where the gateway is directly connected to br-int: # traffic to the gateway MAC will be sent directly to br-tun # (ensuring that traffic that was already fallback'd is not # touched is done in setup_mpls_br) # (need push vlan because NORMAL will not be used, and hence # won't the vlan tag) flow = dict( table=ovs_agt_consts.ACCEPTED_EGRESS_TRAFFIC_NORMAL_TABLE, priority=2, # before NORMAL action reg_net=vlan, dl_dst=net_info.gateway_info.mac, actions="push_vlan:0x8100,mod_vlan_vid:%d,output:%s" % ( vlan, self.patch_int2tun) ) ovs_fw.create_reg_numbers(flow) self.int_br.add_flow(**flow) else: # no Neutron router plugged, so ARP spoofing the # gateway IP is needed if not net_info.gateway_info.ip: LOG.warning("could not enable gw ARP responder for %s", net_info.id) return self._enable_gw_arp_responder(vlan, net_info.gateway_info.ip) self._redirect_br_tun_to_mpls(bgpvpn_const.DEFAULT_GATEWAY_MAC, vlan) except vlanmanager.MappingNotFound: LOG.warning("no VLAN mapping for net %s, no gateway redirection " "in place", net_info.id) @log_helpers.log_method_call def _stop_gateway_traffic_redirect(self, net_info, last_port=False, last_assoc=False): if not self._is_ovs_extension(): return if not net_info: return # if we are unplugging the last_port, we don't need to do # anything if there is no l3vpn for this network if last_port and not any([assoc.bgpvpn.type == bgpvpn.BGPVPN_L3 for assoc in net_info.associations]): return # if we have just removed the last l3vpn association for the network # then we don't need to do anything if there is no port on this network if last_assoc and len(net_info.ports) == 0: return try: vlan = self.vlan_manager.get(net_info.id, net_info.segmentation_id).vlan if net_info.gateway_info.ip: self._disable_gw_arp_responder(vlan, net_info.gateway_info.ip) else: LOG.debug('no gw IP for %s, no ARP responder to disable', net_info.id) self._stop_redirect_br_tun_to_mpls(vlan) flow = dict( table=ovs_agt_consts.ACCEPTED_EGRESS_TRAFFIC_NORMAL_TABLE, reg_net=vlan, ) ovs_fw.create_reg_numbers(flow) self.int_br.delete_flows(**flow) except vlanmanager.MappingNotFound: LOG.warning("no VLAN mapping for net %s, could not disable gw " "redirection", net_info.id) def _base_attach_info(self, port_info, bbgp_vpn_type): i = { 'mac_address': port_info.mac_address, 'local_port': {} } if self._is_ovs_extension(): vlan = self.vlan_manager.get( port_info.network.id, port_info.network.segmentation_id).vlan i['local_port']['linuxif'] = ( '{}:{}'.format(bgpvpn_const.LINUXIF_PREFIX, vlan)) else: raise Exception("The LinuxBridge driver type not supported!") return i @log_helpers.log_method_call def build_bgpvpn_attach_info(self, port_id): if port_id not in self.ports_info: LOG.debug("no info for port %s", port_id) return {} port_info = self.ports_info[port_id] if not port_info.admin_state_up: LOG.debug("port %s admin-state-up False, no attachment", port_id) return {} attachments = {} for vpn_type in bbgp_const.VPN_TYPES: vpn_type_attachments = self._build_attachments(port_info, vpn_type) if vpn_type_attachments: attachments[vpn_type] = vpn_type_attachments if attachments: attachments['network_id'] = port_info.network.id return attachments def _build_attachments(self, port_info, bbgp_vpn_type): net_info = port_info.network assocs = [assoc for assoc in port_info.all_associations if bagpipe_vpn_type(assoc.bgpvpn.type) == bbgp_vpn_type] # skip if we don't have any association with a BGPVPN # of a type corresponding to bbgp_vpn_type if not assocs: return attach_info = self._base_attach_info(port_info, bbgp_vpn_type) attach_info['gateway_ip'] = net_info.gateway_info.ip if self._is_ovs_extension(): # Add OVS VLAN information vlan = self.vlan_manager.get(net_info.id, net_info.segmentation_id).vlan if bbgp_vpn_type == bbgp_const.EVPN: attach_info['local_port']['vlan'] = vlan elif bbgp_vpn_type == bbgp_const.IPVPN: attach_info['local_port'].update({ 'ovs': { 'plugged': True, 'port_number': self.patch_mpls_to_tun_ofport, 'vlan': vlan } }) # Add fallback information if needed as well if net_info.gateway_info.mac: attach_info.update({ 'fallback': { 'dst_mac': net_info.gateway_info.mac, 'src_mac': bgpvpn_const.FALLBACK_SRC_MAC, 'ovs_port_number': self.patch_mpls_to_int_ofport } }) if bbgp_vpn_type == bbgp_const.EVPN: # if the network is a VXLAN network, then reuse same VNI # in bagpipe-bgp vni = net_info.segmentation_id LOG.debug("reusing vni %s for net %s", vni, net_info.id) attach_info['vni'] = vni all_vnis = [ assoc.bgpvpn.vni for assoc in port_info.network.associations if (bagpipe_vpn_type(assoc.bgpvpn.type) == bbgp_vpn_type and assoc.bgpvpn.vni is not None) ] if all_vnis: if len(all_vnis) > 1: LOG.warning("multiple VNIs for port %s, using %d", port_info.id, all_vnis[0]) attach_info['vni'] = all_vnis[0] # use the highest local_pref of all associations all_local_prefs = [assoc.bgpvpn.local_pref for assoc in assocs if assoc.bgpvpn.local_pref is not None] if all_local_prefs: attach_info['local_pref'] = max(all_local_prefs) attach_info['description'] = {'port': port_info.id} # produce attachments attachments = [] # attachment for the port fixed IP # except if all routes of all port associations # have advertise_fixed_ips = False if advertise_fixed_ip(port_info): attachment = copy.deepcopy(attach_info) attachment['ip_address'] = port_info.ip_address attachment['instance_description'] = ( 'BGPVPN {} associations for net {} ({})'.format( bbgp_vpn_type, net_info.id, net_info.gateway_info.ip)) attachment.update(format_associations_route_targets(assocs)) attachments.append(attachment) # produce one attachment per prefix route address # this is to allow a prefix to be exported only toward the BGPVPN # of the corresponding association for port_assoc in [ pa for pa in port_info.associations if bagpipe_vpn_type(pa.bgpvpn.type) == bbgp_vpn_type]: for route in port_assoc.routes: attachment = copy.deepcopy(attach_info) attachment['vpn_instance_id'] = ( vpn_instance_id_for_port_assoc_route(port_assoc, route)) attachment['direction'] = 'to-port' attachment.update( format_associations_route_targets([port_assoc])) attachment['local_pref'] = ( route.local_pref or port_assoc.bgpvpn.local_pref) bgpvpn_desc = port_assoc.bgpvpn.name or port_assoc.bgpvpn.id if route.type == bgpvpn_rc.PREFIX_TYPE: attachment['ip_address'] = str(route.prefix) attachment['instance_description'] = ( "BGPVPN Port association: prefix route %s to BGPVPN " "%s via port %s (%s)" % (str(route.prefix), bgpvpn_desc, port_info.id, port_info.ip_address)) if '/' in str(route.prefix): attachment['advertise_subnet'] = True elif route.type == bgpvpn_rc.BGPVPN_TYPE: if not port_assoc.advertise_fixed_ips: LOG.warning("ignoring advertise_fixed_ips False for " "assoc %s, route %s", port_assoc, route) attachment['ip_address'] = port_info.ip_address attachment['instance_description'] = ( "BGPVPN Port association: route leaking from %s into " "%s via port %s (%s)" % (route.bgpvpn.name, bgpvpn_desc, port_info.id, port_info.ip_address)) attachment['readvertise'] = { 'from_rt': (set(route.bgpvpn.route_targets) | set(route.bgpvpn.import_targets)), 'to_rt': (set(port_assoc.bgpvpn.route_targets) | set(port_assoc.bgpvpn.export_targets)) } else: LOG.error("unknown type: %s", route.type) attachments.append(attachment) return attachments @log_helpers.log_method_call def _build_detach_infos_for_port_assoc(self, port_info, assoc, specific_route=None): detach_infos = [] bbgp_vpn_type = bagpipe_vpn_type(assoc.bgpvpn.type) base = { 'network_id': port_info.network.id, bbgp_vpn_type: self._base_attach_info(port_info, bbgp_vpn_type) } routes = [specific_route] if specific_route else assoc.routes for route in routes: detach = copy.deepcopy(base) detach[bbgp_vpn_type]['vpn_instance_id'] = ( vpn_instance_id_for_port_assoc_route(assoc, route)) if route.type == bgpvpn_rc.PREFIX_TYPE: detach[bbgp_vpn_type]['ip_address'] = str(route.prefix) elif route.type == bgpvpn_rc.BGPVPN_TYPE: detach[bbgp_vpn_type]['ip_address'] = port_info.ip_address else: LOG.error("unknown type: %s", route.type) detach_infos.append(detach) if specific_route is None: if advertise_fixed_ip(port_info): detach = copy.deepcopy(base) detach[bbgp_vpn_type]['ip_address'] = port_info.ip_address detach_infos.append(detach) return detach_infos @log_helpers.log_method_call def _build_detach_infos(self, port_info, detach_bgpvpn_type=None): detach_infos = [] if not port_info.admin_state_up: LOG.debug("port %s admin-state-up False, no attachment", port_info.id) return {} # if an association type is not provided, then detach for all VPN types # of all the associations relevant for the port bgpvpn_types = set([detach_bgpvpn_type] if detach_bgpvpn_type else [assoc.bgpvpn.type for assoc in port_info.all_associations]) # associations for the network detach_info = {} for assoc_type in bgpvpn_types: if (assoc_type == bgpvpn.BGPVPN_L3 and not advertise_fixed_ip(port_info)): continue bbgp_vpn_type = bagpipe_vpn_type(assoc_type) detach_info.update({ bbgp_vpn_type: self._base_attach_info(port_info, bbgp_vpn_type) }) detach_info[bbgp_vpn_type]['ip_address'] = port_info.ip_address if detach_info: detach_info['network_id'] = port_info.network.id detach_infos.append(detach_info) # port associations for assoc_type in bgpvpn_types: for association in set(port_info.associations): if association.bgpvpn.type == assoc_type: detach_infos.extend( self._build_detach_infos_for_port_assoc(port_info, association)) # detach information for BGPVPN leaks # FIXME: this should only be done this when detaching all endpoints # Need to handle the casse where: # - only one bgpvpn leaking route is removed # - one association is removed for port_assoc in port_info.associations: for route in [route for route in port_assoc.routes if route['type'] == bgpvpn_rc.BGPVPN_TYPE]: detach_info = { bbgp_vpn_type: self._base_attach_info(port_info, bbgp_vpn_type) } detach_info[bbgp_vpn_type]['vpn_instance_id'] = ( vpn_instance_id_for_port_assoc_route(port_assoc, route)) detach_info[bbgp_vpn_type]['ip_address'] = port_info.ip_address detach_infos.append(detach_info) LOG.debug("detach_infos: %s", detach_infos) return detach_infos ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/agent/bgpvpn/constants.py0000664000175000017500000000203600000000000026333 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Orange. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from networking_bagpipe.bagpipe_bgp import constants as bbgp_const from neutron_lib.api.definitions import bgpvpn BGPVPN_SERVICE = 'bgpvpn' DEFAULT_GATEWAY_MAC = "00:00:5e:00:43:64" FALLBACK_SRC_MAC = "00:00:5e:2a:10:00" # Map from BGPVPN service VPN types to bagpipe-bgp VPN types BGPVPN_2_BAGPIPE = {bgpvpn.BGPVPN_L2: bbgp_const.EVPN, bgpvpn.BGPVPN_L3: bbgp_const.IPVPN} LINUXIF_PREFIX = "patch2tun" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/agent/bgpvpn/rpc_client.py0000664000175000017500000000730600000000000026446 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Orange. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import oslo_messaging from neutron_lib.agent import topics from neutron_lib import rpc as n_rpc from oslo_log import log as logging LOG = logging.getLogger(__name__) # until we have a better way to add something in the topic namespace # from a python package external to Neutron... topics_BAGPIPE_BGPVPN = "bagpipe-bgpvpn" class BGPVPNAgentNotifyApi: """Base class for BGP VPN Service Plugin notification to agent RPC API.""" def __init__(self, topic=topics.AGENT): self.topic = topic self.topic_bgpvpn_update = topics.get_topic_name(self.topic, topics_BAGPIPE_BGPVPN, topics.UPDATE) target = oslo_messaging.Target(topic=topic, version='1.0') self.client = n_rpc.get_client(target) # BGP VPN CRUD notifications # -------------------------------------- def _notification_fanout(self, context, method, bgpvpn): LOG.debug('Fanout notify BGP VPN agents at %(topic)s ' 'the message %(method)s with %(bgpvpn)s', {'topic': self.topic_bgpvpn_update, 'method': method, 'bgpvpn': bgpvpn}) cctxt = self.client.prepare(topic=self.topic_bgpvpn_update, fanout=True) cctxt.cast(context, method, bgpvpn=bgpvpn) def create_bgpvpn(self, context, bgpvpn): return self._notification_fanout(context, 'create_bgpvpn', bgpvpn) def update_bgpvpn(self, context, bgpvpn): return self._notification_fanout(context, 'update_bgpvpn', bgpvpn) def delete_bgpvpn(self, context, bgpvpn): return self._notification_fanout(context, 'delete_bgpvpn', bgpvpn) # Port attach/detach on/from BGP VPN notifications # --------------------------------------------------------- def _notification_host(self, context, method, port_bgpvpn_info, host): LOG.debug('Notify BGP VPN agent %(host)s at %(topic)s ' 'the message %(method)s with %(port_bgpvpn_info)s', {'host': host, 'topic': self.topic_bgpvpn_update, 'method': method, 'port_bgpvpn_info': port_bgpvpn_info}) cctxt = self.client.prepare(topic=self.topic_bgpvpn_update, server=host) cctxt.cast(context, method, port_bgpvpn_info=port_bgpvpn_info) def attach_port_on_bgpvpn(self, context, port_bgpvpn_info, host=None): if port_bgpvpn_info: self._notification_host(context, 'attach_port_on_bgpvpn', port_bgpvpn_info, host) def detach_port_from_bgpvpn(self, context, port_bgpvpn_info, host=None): self._notification_host(context, 'detach_port_from_bgpvpn', port_bgpvpn_info, host) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1743591455.914306 networking_bagpipe-22.0.0/networking_bagpipe/agent/common/0000775000175000017500000000000000000000000023740 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/agent/common/__init__.py0000664000175000017500000000000000000000000026037 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1743591455.914306 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/0000775000175000017500000000000000000000000023611 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/__init__.py0000664000175000017500000000000000000000000025710 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1743591455.914306 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/api/0000775000175000017500000000000000000000000024362 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/api/__init__.py0000664000175000017500000000000000000000000026461 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/api/api.py0000664000175000017500000000344400000000000025512 0ustar00zuulzuul00000000000000# Copyright 2017 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg import pecan import socketserver from wsgiref import simple_server ROOT_CTRL = 'networking_bagpipe.bagpipe_bgp.api.controllers.RootController' def setup_app(*args, **kwargs): config = { 'server': { 'port': cfg.CONF.API.port, 'host': cfg.CONF.API.host, }, 'app': { 'root': ROOT_CTRL, } } pecan_config = pecan.configuration.conf_from_dict(config) app = pecan.make_app( pecan_config.app.root, debug=False, force_canonical=False, guess_content_type_from_ext=True ) return app class ThreadedSimpleServer(socketserver.ThreadingMixIn, simple_server.WSGIServer): pass class PecanAPI: def __init__(self): app = setup_app() self.wsgi = simple_server.make_server( cfg.CONF.API.host, cfg.CONF.API.port, app, server_class=ThreadedSimpleServer ) def run(self): self.wsgi.serve_forever() def stop(self): # call stop on RootController self.wsgi.get_app().application.root.stop() def main(): api = PecanAPI() api.run() if __name__ == "__main__": main() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/api/config.py0000664000175000017500000000203300000000000026177 0ustar00zuulzuul00000000000000# Copyright 2017 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg DEFAULT_PORT = 8082 common_opts = [ cfg.HostAddressOpt("host", default="127.0.0.1", help="IP address on which the API server should listen", deprecated_name="api_host"), cfg.PortOpt("port", default=DEFAULT_PORT, help="Port on which the API server should listen", deprecated_name="api_port") ] def register_config(): cfg.CONF.register_opts(common_opts, "API") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/api/controllers.py0000664000175000017500000003476000000000000027314 0ustar00zuulzuul00000000000000# Copyright 2017 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging as python_logging import time import uuid from oslo_config import cfg from oslo_log import log as logging import pbr.version import pecan from pecan import request from networking_bagpipe.bagpipe_bgp.common import exceptions as exc from networking_bagpipe.bagpipe_bgp.common import looking_glass as lg from networking_bagpipe.bagpipe_bgp.common import utils from networking_bagpipe.bagpipe_bgp.vpn import manager as vpn_manager LOG = logging.getLogger(__name__) LOOKING_GLASS_BASE = "looking-glass" def expose(*args, **kwargs): """Helper function so we don't have to specify json for everything.""" kwargs.setdefault('content_type', 'application/json') kwargs.setdefault('template', 'json') return pecan.expose(*args, **kwargs) def when(index, *args, **kwargs): """Helper function so we don't have to specify json for everything.""" kwargs.setdefault('content_type', 'application/json') kwargs.setdefault('template', 'json') return index.when(*args, **kwargs) class PingController: def __init__(self): # Random generated sequence number self.sequence = int(uuid.uuid4()) @expose(generic=True) def index(self): return self.sequence class VPNManagerController: def __init__(self): self.manager = vpn_manager.VPNManager.get_instance() @staticmethod def stop(): vpn_manager.VPNManager.get_instance().stop() class AttachController(VPNManagerController): """attach_localport parameters: 'vpn_instance_id': external VPN instance identifier (all ports with same vpn_instance_id will be plugged in the same VPN instance 'instance_description': free form description of VPN instance 'description': free form description of attachment 'vpn_type': type of the VPN instance ('ipvpn' or 'evpn') 'import_rt': list of import Route Targets (or comma-separated string) 'export_rt': list of export Route Targets (or comma-separated string) 'gateway_ip': IP address of gateway for this VPN instance 'mac_address': MAC address of endpoint to connect to the VPN instance 'ip_address': IP/mask of endpoint to connect to the VPN instance 'advertise_subnet': optional, if set to True then VRF will advertise the whole subnet (defaults to False, readvertise ip_address as a singleton (/32) 'linuxbr': Name of a linux bridge to which the linuxif is already plugged-in (optional) 'vni': VXLAN VNI to use (optional) 'local_pref': BGP LOCAL_PREF for the route to this vif (optional) 'direction': 'to-port' | 'from-port' | 'both' # specify local port traffic direction in VPN instance # (route advertisements are not done with from-port only) # to-port: only forward traffic to the VIF # from-port: only forward traffic to the VIF 'local_port': local port to plug to the VPN instance should be a dict containing any of the following key,value pairs { 'linuxif': 'tap456abc', # name of a linux interface # - if OVS information is provided it # does not have to be an existing # interface # - not needed/not used if 'evpn' plug # is used 'ovs': { # optional # whether or not interface is already plugged into the # OVS bridge: 'plugged': True, # name of a linux interface to be plugged into the OVS # bridge (optional and ignored if port_number is # provided): 'port_name': 'qvo456abc', # OVS port number (optional if 'port_name' provided): 'port_number': '7', # the VLAN id for VM traffic (optional) 'vlan': '42', # optional specification of a distinct port to send # traffic to the VM(only applies if a vlan is # specified) : 'to_vm_port_number' 'to_vm_port_name' }, 'evpn': { # for an ipvpn attachment... 'id': 'xyz' # specifies the vpn_instance_id of an evpn # that will be attached to the ipvpn 'ovs_port_name': 'qvb456abc' # optional, if provided, # and if ovs/port_name is # also provided, then the # interface name will be # assumed as already plugged # into the evpn } } if local_port is not a list, it is assumed to be a name of a linux interface (string) 'readvertise': { # optional, used to re-advertise addresses... 'from_rt': [list of RTs] # ...received on these RTs 'to_rt': [list of RTs] # ...toward these RTs } 'attract_traffic': { # optional, will result in the generation of FlowSpec # routes, based on the specified classifier, # advertised to the readvertise:to_rt RTs, redirecting # traffic toward the "attract_to_rts" RT using a # redirect-to-VRF action action. # The prefixes for which FlowSpec routes are # advertised are: # - prefixes carrying one of the readvertise:from_rt # RTs, # - prefixes in static_destination_prefixes. # # When this is used, the routes that are advertised to # the readvertise:to_rt route targets are, instead of # the prefixes of the routes carrying an RT in # readvertise:from_rt, the prefix in # static_destination_prefixes if any, or else a # default0.0.0.0 route. These routes are advertised # for each locally attached interface, each time with # a distinct RD 'classifier': { 'sourcePrefix': IP/mask, 'sourcePort': Port number or port range, 'destinationPort': Port number or port range, 'protocol': IP protocol }, 'redirect_rts': [list of RTs] # RTs of generated FlowSpec routes 'attract_to_rts': [list of RTs] # RTs of the redirect-to-VRF action of # the generated FlowSpec routes 'static_destination_prefixes': [list of IP/mask] # When present, # FlowSpec routes will # be generated from # these prefixes (as # destination prefix); # this is done # additionally to # FlowSpec routes # generated from # routes carrying # readvertise:from_rt } 'fallback': # (optional) if provided, on a VRF lookup miss, # the MAC destination address will be # rewritten to this MAC before being # sent back where it came from { 'src_mac': 'aa:bb:cc:dd:ee:ff' # new source MAC 'dst_mac': 'aa:bb:cc:dd:ee:00' # new destination MAC 'ovs_port_name': 'patch_foo' 'ovs_port_number': 4 # (unsupported yet) 'ovs_resubmit': '(,)' # (unsupported yet) } """ @expose(generic=True) def index(self): return {} @when(index, method='PUT') @when(index, method='DELETE') def not_supported(self): pecan.abort(405) @when(index, method='POST') def process(self): try: attach_params = request.json except Exception: LOG.error('attach_localport: No local port details received') pecan.abort(400, 'No local port details received') try: LOG.info('Local port attach received: %s', attach_params) self.manager.plug_vif_to_vpn(**attach_params) except exc.APIException as e: LOG.warning('attach_localport: API error: %s', e) pecan.abort(400, "API error: %s" % e) except Exception: LOG.exception('attach_localport: An error occurred during local ' 'port plug to VPN') pecan.abort(500, 'An error occurred during local port plug to VPN') class DetachController(VPNManagerController): @expose(generic=True) def index(self): return {} @when(index, method='PUT') @when(index, method='DELETE') def not_supported(self): pecan.abort(405) @when(index, method='POST') def process(self): try: detach_params = request.json except Exception: LOG.error('detach_localport: No local port details received') pecan.abort(400, 'No local port details received') try: LOG.info('Local port detach received: %s', detach_params) self.manager.unplug_vif_from_vpn(**detach_params) except exc.APIException as e: LOG.warning('detach_localport: API error: %s', e) pecan.abort(400, "API error: %s" % e) except Exception: LOG.exception('detach_localport: An error occurred during local ' 'port unplug from VPN') pecan.abort(500, 'An error occurred during local port unplug from ' 'VPN') class LookingGlassController(VPNManagerController, lg.LookingGlassMixin): def __init__(self): super().__init__() self.start_time = time.time() lg.set_references_root(LOOKING_GLASS_BASE) lg.set_reference_path("BGP_WORKERS", ["bgp", "workers"]) lg.set_reference_path("VPN_INSTANCES", ["vpns", "instances"]) lg.set_reference_path("DATAPLANE_DRIVERS", ["vpns", "dataplane", "drivers"]) self.catchall_lg_log_handler = lg.LookingGlassLogHandler() python_logging.getLogger().addHandler(self.catchall_lg_log_handler) @expose(generic=True) def index(self): return {} @when(index, method='GET') def process(self, *url_path_elements): path_prefix = "{}://{}/{}".format( request.scheme, # http request.host, LOOKING_GLASS_BASE, ) try: lg_info = self.get_looking_glass_info(path_prefix, url_path_elements) if lg_info is None: raise lg.NoSuchLookingGlassObject(path_prefix, url_path_elements[0]) return lg_info except lg.NoSuchLookingGlassObject as e: LOG.info('looking_glass: %s', repr(e)) pecan.abort(404, repr(e)) except Exception: LOG.exception('looking_glass: an error occurred') pecan.abort(500, 'Server error') @when(index, method='DELETE') @when(index, method='POST') @when(index, method='PUT') def not_supported(self): pecan.abort(405) # Looking glass hooks ################# def get_lg_map(self): return { "summary": (lg.SUBITEM, self.get_lg_summary), "config": (lg.SUBTREE, self.get_lg_config), "bgp": (lg.DELEGATE, self.manager.bgp_manager), "vpns": (lg.DELEGATE, self.manager), "logs": (lg.SUBTREE, self.get_logs) } def get_lg_config(self, path_prefix): return {section: utils.osloconfig_json_serialize(cfg.CONF[section]) for section in ('COMMON', 'API', 'BGP', 'DATAPLANE_DRIVER_IPVPN', 'DATAPLANE_DRIVER_EVPN') } def get_lg_summary(self): return { "BGP_established_peers": self.manager.bgp_manager.get_established_peers_count(), "route_counts": self.manager.bgp_manager.get_lg_route_counts(), "vpn_instances_count": self.manager.get_vpn_instances_count(), "warnings_and_errors": len(self.catchall_lg_log_handler), "start_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self.start_time)), "version": (pbr.version.VersionInfo('networking-bagpipe') .release_string()) } def get_logs(self, path_prefix): return [{'level': record.levelname, 'time': self.catchall_lg_log_handler.formatter.formatTime(record), 'name': record.name, 'message': record.msg} for record in self.catchall_lg_log_handler.get_records()] class RootController: @expose(generic=True) def index(self): return {} @when(index, method='POST') @when(index, method='PUT') @when(index, method='DELETE') def not_supported(self): pecan.abort(405) ping = PingController() attach_localport = AttachController() detach_localport = DetachController() def stop(self): VPNManagerController.stop() # there is a '-' in the LOOKING_GLASS_BASE name, so we have to use pecan.route pecan.route(RootController, LOOKING_GLASS_BASE, LookingGlassController()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/bgp_daemon.py0000664000175000017500000000727300000000000026267 0ustar00zuulzuul00000000000000# Copyright 2014 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging as python_logging import signal import sys from oslo_config import cfg from oslo_log import log as logging import pbr.version from neutron.common import config as n_config # noqa from networking_bagpipe.bagpipe_bgp.api import api from networking_bagpipe.bagpipe_bgp.api import config as api_config from networking_bagpipe.bagpipe_bgp.common import config from networking_bagpipe.bagpipe_bgp.engine import exabgp_peer_worker from networking_bagpipe.bagpipe_bgp.vpn import dataplane_drivers as drivers LOG = logging.getLogger(__name__) def setup_config(): api_config.register_config() config.register() cfg.CONF(args=sys.argv[1:], project='bagpipe-bgp', default_config_files=['/etc/bagpipe-bgp/bgp.conf'], version=('%%(prog)s %s' % pbr.version.VersionInfo('networking-bagpipe') .release_string())) BAGPIPE_BGP_MODULE = "networking_bagpipe.bagpipe_bgp" def setup_logging(): # even in debug mode we don't want to much talk from these extra_log_level_defaults = [ '%s.engine.exabgp_peer_worker.exabgp=INFO' % BAGPIPE_BGP_MODULE, '%s.common.looking_glass=WARNING' % BAGPIPE_BGP_MODULE, '%s.engine.route_table_manager=INFO' % BAGPIPE_BGP_MODULE, 'ovsdbapp.backend.ovs_idl.vlog=INFO', ] logging.set_defaults(default_log_levels=(logging.get_default_log_levels() + extra_log_level_defaults)) logging.setup(cfg.CONF, "bagpipe-bgp") def fix_log_file(): # assist transition from past bagpipe-bgp version which were # using --log-file to specify the location of a file to configure logging if (cfg.CONF.log_file and cfg.CONF.log_file.endswith('.conf')): cfg.CONF.log_file = None return ("now using oslo.log, specifying a log configuration file " "should be done with --log-config-append") def daemon_main(): logging.register_options(cfg.CONF) setup_config() log_file_warn = fix_log_file() setup_logging() if log_file_warn: LOG.warning(log_file_warn) exabgp_peer_worker.setup_exabgp_env() try: LOG.info("Starting bagpipe-bgp...") pecan_api = api.PecanAPI() cfg.CONF.log_opt_values(LOG, logging.INFO) def stop(signum, _): LOG.info("Received signal %d, stopping...", signum) pecan_api.stop() LOG.info("All threads now stopped...") sys.exit(0) signal.signal(signal.SIGTERM, stop) signal.signal(signal.SIGINT, stop) pecan_api.run() except Exception: LOG.exception("Error while starting BGP daemon") def cleanup_main(): logging.register_options(cfg.CONF) setup_config() fix_log_file() setup_logging() python_logging.root.name = "[BgpDataplaneCleaner]" for vpn_type, dataplane_driver in ( drivers.instantiate_dataplane_drivers().items()): LOG.info("Cleaning dataplane for %s...", vpn_type) dataplane_driver.reset_state() LOG.info("BGP component dataplanes have been cleaned up.") if __name__ == '__main__': daemon_main() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9183059 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/cli/0000775000175000017500000000000000000000000024360 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/cli/__init__.py0000664000175000017500000000000000000000000026457 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/cli/impex2dot.py0000664000175000017500000002363000000000000026651 0ustar00zuulzuul00000000000000#!/usr/bin/env python # # Copyright 2014 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import urllib.error import urllib.request import optparse from oslo_config import cfg from oslo_serialization import jsonutils from networking_bagpipe.bagpipe_bgp.api import config as api_config LOOKING_GLASS_BASE = "looking-glass" def request(options, server, args): target_url = "http://%s:%d/%s/%s" % (server, options.port, options.prefix, "/".join(args)) try: os.environ['NO_PROXY'] = server os.environ['no_proxy'] = server response = urllib.request.urlopen(target_url) if response.getcode() == 200: return jsonutils.load(response) except urllib.error.HTTPError as e: if e.code == 404: return {} print("Error requesting %s" % target_url) raise except Exception: print("Error requesting %s" % target_url) raise def normalize(string): return (string .replace("target:", "rt:") .replace(":", "_") .replace("-", "_") .replace(".", "_")) RT_TXT_STYLE = 'color="orange",fontcolor="orange"' RT_STYLE = 'color="orange",fontcolor="orange",arrowhead=onormal' RT_STYLE_REDIR_FROM_RT = ('color="orange",fontcolor="orange",style=dashed,' 'arrowhead=diamond,arrowtail=oinv,headclip=false,' 'dir=both') RT_STYLE_REDIR_TO_RT = ('color="orange",fontcolor="orange",style=dashed,' 'arrowhead=onormal,tailclip=false,dir=both,' 'arrowtail=oinv') RT_STYLE_FLOWSPEC = 'color="red",fontcolor="red",style=dotted' RT_STYLE_FLOWSPEC_INTER = ('color="red",fontcolor="red",style=dotted,' 'arrowhead=none,dir=both,arrowtail=oinv') RT_STYLE_FLOWSPEC_ACTION = 'color="red",fontcolor="red",style=dashed' PORT_LINK_STYLE = 'style=dashed,dir=none,color="gray",weight=3' def get_all(options): rts = set() vpns = set() for server in options.servers: for vpn in request(options, server, ["vpns", "instances"]): vpn_id = vpn['id'] type = vpn['name'][:3] vpns.add((server, vpn_id, type)) for rt_i in request(options, server, ["vpns", "instances", vpn_id, "route_targets", "import"]): rts.add(normalize(rt_i)) for rt_e in request(options, server, ["vpns", "instances", vpn_id, "route_targets", "export"]): rts.add(normalize(rt_e)) readvertise = request(options, server, ["vpns", "instances", vpn_id, "readvertise"]) if readvertise: rts.add(normalize(readvertise['to'][0])) rts.add(normalize(readvertise['from'][0])) attract = readvertise.get('attract_traffic', None) if attract: rts |= {normalize(rt) for rt in attract['redirect_rts']} return (rts, vpns) def vpn_uid(server, vpn): return "vpn_{}__{}".format(normalize(server), normalize(vpn)) def vpn_short(vpn): if len(vpn) > 11: return vpn[:4] + ".." + vpn[-5:] else: return vpn def main(): api_config.register_config() try: cfg.CONF(args=[], project='bagpipe-impex2dot', default_config_files=['/etc/bagpipe-bgp/bgp.conf']) api_port = cfg.CONF.API.port except cfg.ConfigFilesNotFoundError: api_port = api_config.DEFAULT_PORT usage = """ %prog [options] Example: bagpipe-impex2dot --server s1 --server s2 | dot -Tpdf > impex.pdf """ parser = optparse.OptionParser(usage) parser.add_option( "--server", dest="servers", default=[], action="append", help="IP address of a BaGPipe BGP instances (default: localhost)") parser.add_option( "--port", dest="port", type="int", default=api_port, help="Port of BaGPipe BGP (optional, default: %default)") parser.add_option( "--prefix", dest="prefix", default=LOOKING_GLASS_BASE, help="Looking-glass URL Prefix (optional, default: %default)") (options, _) = parser.parse_args() if len(options.servers) == 0: options.servers = ["localhost"] ports = set() dests = set() print('digraph import_export {') print(' node [fontname="Helvetica"];') print(' edge [fontname="Helvetica"];') print(' nodesep=0.55;') (rts, vpns) = get_all(options) print(' subgraph rts {') print(' rank=same;') for rt in rts: label = rt.upper().replace('_', '\\n') print(' {} [shape="circle",label="{}",{}];'.format( rt, label, RT_TXT_STYLE)) print(' }') print(' subgraph ipvpns {') print(' rank=same;') for (server, vpn, _) in filter(lambda x: x[2] == 'VRF', vpns): server_spec = ("\\n[%s]" % server) if server != 'localhost' else '' print(' %s [label="{<0>VRF\\n%s%s|}",' 'shape="record"];' % (vpn_uid(server, vpn), vpn_short(vpn), server_spec)) print(' }') print(' subgraph evpns {') for (server, vpn, _) in filter(lambda x: x[2] == 'EVI', vpns): server_spec = ("\\n[%s]" % server) if server != 'localhost' else '' print(' %s [label="{<0>EVI\\n%s%s}",' 'shape="record"];' % (vpn_uid(server, vpn), vpn_short(vpn), server_spec)) print(' }') for (server, vpn, _) in vpns: print(' /* {}:{} */'.format(server, vpn)) uid = vpn_uid(server, vpn) for rt_i in request(options, server, ["vpns", "instances", vpn, "route_targets", "import"]): print(' {} -> {}:0 [{}];'.format(normalize(rt_i), uid, RT_STYLE)) for rt_e in request(options, server, ["vpns", "instances", vpn, "route_targets", "export"]): print(' {}:0 -> {} [{}];'.format(uid, normalize(rt_e), RT_STYLE)) readvertise = request(options, server, ["vpns", "instances", vpn, "readvertise"]) if readvertise: readv = "%s:readv" % vpn_uid(server, vpn) print(' %s -> %s [label="",%s];' % (normalize(readvertise['from'][0]), readv, RT_STYLE_REDIR_FROM_RT)) print(' %s -> %s [label="",%s];' % (readv, normalize(readvertise['to'][0]), RT_STYLE_REDIR_TO_RT)) attract = readvertise.get('attract_traffic', None) if attract: intermediate = "{}_{}_attract".format(normalize(server), normalize(vpn)) print(' %s [style=invis,height=0,width=0,' 'fixedsize=true,rank=1]' % intermediate) redir_rt = attract['redirect_rts'][0] # readv -> intermediate print(' %s -> %s [label="flow",%s];' % (readv, intermediate, RT_STYLE_FLOWSPEC_INTER)) # intermediate -> flowspec route RT print(' %s -> %s [label="",weight=50,%s];' % (intermediate, normalize(redir_rt), RT_STYLE_FLOWSPEC)) # intermediate -> flowspec route redirect action RT print(' %s -> %s [label="action",%s,arrowhead=none' ',weight=20];' % (intermediate, normalize(readvertise['to'][0]), RT_STYLE_FLOWSPEC_ACTION)) for port in request(options, server, ["vpns", "instances", vpn, "ports"]).keys(): print(' {} -> port_{}_{} [{},weight=5];'.format( uid, normalize(server), normalize(port), PORT_LINK_STYLE)) ports.add((server, normalize(port))) # possible link between an E-VPN and an IPVPN ? ipvpn = request(options, server, ["vpns", "instances", vpn, "gateway_port", "ipvpn"]) if ipvpn: ipvpn_id = ipvpn['external_instance_id'] print(' {} -> {} [weight=500];'.format( uid, vpn_uid(server, ipvpn_id))) for (server, port) in ports: print(' port_%s_%s [label="",style=invis,height=0,width=0,' 'fixedsize=true];' % (normalize(server), normalize(port))) if port.startswith('to_'): dest = port.split('_')[1] print(' port_%s_%s -> dest_%s_%s [style=dashed,' 'dir=none,color="gray"' ',weight=5];' % (normalize(server), normalize(port), normalize(server), dest)) dests.add((server, dest)) for (server, dest) in dests: print(' dest_%s_%s [label="%s\\n[%s]",shape="square",' 'color="gray"];' % (normalize(server), normalize(dest), dest, server)) print('}') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/cli/looking_glass.py0000775000175000017500000001431400000000000027573 0ustar00zuulzuul00000000000000#!/usr/bin/env python # # Copyright 2014 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from sys import stdout import urllib.error import urllib.parse import urllib.request import optparse from oslo_config import cfg from oslo_serialization import jsonutils from networking_bagpipe.bagpipe_bgp.api import config as api_config BAGPIPE_PORT = 8082 LOOKING_GLASS_BASE = "looking-glass" INDENT_INCREMENT = 2 def pretty_print_recurse(data, indent, recursive_requests, url, already_anew_line=False): """recursively pretty print data key has already been output, this function will print data and finish at a start of line returns True if the key output was spread across multiple lines. """ if isinstance(data, dict): more = False if ("id" in data and "href" in data): stdout.write(data["id"]) del data["id"] if ("href" in data): more = True if more: target_url = data["href"] if recursive_requests: if target_url.startswith(url): response = urllib.request.urlopen(target_url) if response.getcode() == 200: pretty_print_recurse(jsonutils.load(response), indent + INDENT_INCREMENT, recursive_requests, target_url, already_anew_line=False) return True else: stdout.write(" (ERROR %d)", response.getcode()) return False del data["href"] stdout.write(" (...)") already_anew_line = False if len(data) > 0: if not already_anew_line: stdout.write("\n") first_val = True for (key, value) in data.items(): if not first_val or not already_anew_line: stdout.write("%s" % (" " * indent)) if first_val: first_val = False stdout.write("%s: " % key) pretty_print_recurse(value, indent + INDENT_INCREMENT, recursive_requests, url) else: if more: stdout.write("\n") else: stdout.write("-\n") elif isinstance(data, list): if len(data) > 0: if not already_anew_line: stdout.write("\n") already_anew_line = True for value in data: stdout.write("%s* " % (" " * indent)) if isinstance(value, dict) or isinstance(value, list): pretty_print_recurse(value, indent + INDENT_INCREMENT, recursive_requests, url, already_anew_line) else: stdout.write("%s\n" % value) already_anew_line = True else: stdout.write("-\n") else: if isinstance(data, str) and "\n" in data: data = data.strip("\n").replace("\n", "\n%s" % (" " * indent)) stdout.write("\n%s" % (" " * indent)) stdout.write("%s\n" % data) return False def main(): api_config.register_config() try: cfg.CONF(args=[], project='bagpipe-looking-glass', default_config_files=['/etc/bagpipe-bgp/bgp.conf']) api_port = cfg.CONF.API.port except cfg.ConfigFilesNotFoundError: api_port = api_config.DEFAULT_PORT usage = """ %prog [--server ] path to object in looking-glass e.g.: %prog vpns instances""" parser = optparse.OptionParser(usage) parser.add_option( "--server", dest="server", default="127.0.0.1", help="IP address of BaGPipe BGP (optional, default: %default)") parser.add_option( "--port", dest="port", type="int", default=api_port, help="Port of BaGPipe BGP (optional, default: %default)") parser.add_option( "--prefix", dest="prefix", default=LOOKING_GLASS_BASE, help="Looking-glass URL Prefix (optional, default: %default)") parser.add_option( "-r", "--recurse", dest="recurse", action="store_true", default=False, help="Recurse down into the whole looking-glass (disabled by default)") (options, args) = parser.parse_args() quoted_args = [urllib.parse.quote(arg) for arg in args] target_url = "http://%s:%d/%s/%s" % (options.server, options.port, options.prefix, "/".join(quoted_args)) try: os.environ['NO_PROXY'] = options.server response = urllib.request.urlopen(target_url) if response.getcode() == 200: data = jsonutils.load(response) if (isinstance(data, dict) and "href" in data): target_url_bis = data["href"] response_bis = urllib.request.urlopen(target_url_bis) if response.getcode() == 200: target_url = target_url_bis data = jsonutils.load(response_bis) pretty_print_recurse(data, 0, options.recurse, target_url, already_anew_line=True) except urllib.error.HTTPError as e: if e.code == 404: print("No such looking glass path: %s\n(%s)" % (" ".join(quoted_args), target_url)) else: print("Error code %d: %s" % (e.getcode(), e.read())) return except urllib.error.URLError as e: print("No server at http://%s:%d : %s" % (options.server, options.port, e)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/cli/rest_attach.py0000775000175000017500000004314700000000000027247 0ustar00zuulzuul00000000000000#!/usr/bin/env python # # Copyright 2014 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import functools import logging import os import re import sys import urllib.error import urllib.request import netaddr import optparse from oslo_config import cfg from oslo_serialization import jsonutils from networking_bagpipe.bagpipe_bgp.api import config as api_config from networking_bagpipe.bagpipe_bgp.common import net_utils from networking_bagpipe.bagpipe_bgp.common import run_command from networking_bagpipe.bagpipe_bgp import constants as const DEFAULT_VPN_INSTANCE_ID = "bagpipe-test" VPN2NS_INTERFACE_PREFIX = "ns-" NS2VPN_DEFAULT_IFNAME = "tovpn" # Needed so that the OVS bridge kernel interface can hava a high enough MTU DEFAULT_MTU = 9000 log_formatter = logging.Formatter("[%(levelname)-5.5s] %(message)s") log = logging.getLogger() console_handler = logging.StreamHandler() console_handler.setFormatter(log_formatter) log.addHandler(console_handler) log.setLevel(logging.WARNING) run_log_command = functools.partial(run_command.run_command, log, run_as_root=True) def create_veth_pair(vpn_interface, ns_interface, ns_name): run_log_command("ip netns exec %s ip link delete %s" % (ns_name, ns_interface), raise_on_error=False, acceptable_return_codes=[0, 1]) # in case the interface was previously attached to OVS, # we need to remove that, or when then interface is re-created # OVS will take it back! run_log_command("ovs-vsctl del-port %s" % vpn_interface, raise_on_error=False, acceptable_return_codes=[0, 1]) run_log_command("ip link delete %s" % vpn_interface, raise_on_error=False, acceptable_return_codes=[0, 1]) run_log_command( "ip link add %s type veth peer name %s netns %s mtu 65535" % (vpn_interface, ns_interface, ns_name), raise_on_error=False) run_log_command("ip link set dev %s up" % vpn_interface) run_log_command("ip link set dev %s mtu %d" % (vpn_interface, DEFAULT_MTU)) run_log_command("ip netns exec %s ip link set dev %s up" % (ns_name, ns_interface)) def get_vpn2ns_if_name(namespace): return (VPN2NS_INTERFACE_PREFIX + namespace)[:const.LINUX_DEV_LEN] def create_special_netns_port(options): print("Will plug local namespace %s into network" % options.netns) # create namespace run_log_command("ip netns add %s" % options.netns, raise_on_error=False) # create veth pair and move one into namespace if options.ovs_vlan: create_veth_pair(options.if2netns, "ns2vpn-raw", options.netns) run_log_command("ip netns exec %s ip link add link ns2vpn-raw " "name %s type vlan id %d" % (options.netns, options.if2vpn, options.ovs_vlan)) run_log_command("ip netns exec %s ip link set %s up" % (options.netns, options.if2vpn)) else: create_veth_pair(options.if2netns, options.if2vpn, options.netns) if options.mac: run_log_command("ip netns exec %s ip link set %s address %s" % (options.netns, options.if2vpn, options.mac)) run_log_command("ip netns exec %s ip addr add %s dev %s" % (options.netns, options.ip, options.if2vpn), raise_on_error=False) run_log_command("ip netns exec %s ip route add default dev %s via %s" % (options.netns, options.if2vpn, options.gw_ip), raise_on_error=False) run_log_command("ip netns exec %s ip link set %s mtu 1420" % (options.netns, options.if2vpn), raise_on_error=False) def classifier_callback(option, opt_str, value, parser): if not hasattr(parser.values, 'classifier'): parser.values.classifier = dict() parser.values.classifier.update({option.dest: value}) def main(): api_config.register_config() cfg.CONF(args=[], project='bagpipe-rest-attach', default_config_files=['/etc/bagpipe-bgp/bgp.conf']) usage = "usage: %prog [--attach|--detach] --network-type (ipvpn|evpn) "\ "--port (|netns) --ip [/] [options] (see --help)" parser = optparse.OptionParser(usage) parser.add_option("--attach", dest="operation", action="store_const", const="attach", help="attach local port") parser.add_option("--detach", dest="operation", action="store_const", const="detach", help="detach local port") parser.add_option("--network-type", dest="network_type", help="network type (ipvpn or evpn)", choices=[const.IPVPN, const.EVPN]) parser.add_option("--vpn-instance-id", dest="vpn_instance_id", help="UUID for the network instance " "(default: %default-(ipvpn|evpn))", default=DEFAULT_VPN_INSTANCE_ID) parser.add_option("--port", dest="port", help="local port to attach/detach (use special port " "'netns[:if]' to have an interface to a local network " "namespace attached/detached " "[with 'if' as the name of the interface to the netns]") parser.add_option("--direction", dest="direction", choices=[const.TO_PORT, const.FROM_PORT, const.BOTH], default=const.BOTH, help=("local port direction (to-port|from-port|both) " "in VPN (default: %default)")) parser.add_option("--rt", dest="route_targets", help="route target [default: 64512:0] (can be " "specified multiple times)", default=[], action="append") parser.add_option("--import-rt", dest="import_only_rts", help="import-only route target (can be specified" "multiple times)", default=[], action="append") parser.add_option("--export-rt", dest="export_only_rts", help="export-only route target (can be specified" "multiple times)", default=[], action="append") parser.add_option("--ip", dest="ip", help="IP prefix / mask (mask defaults to /24)") parser.add_option("--gateway-ip", dest="gw_ip", help="IP address of network gateway (optional, " "defaults to last IP in range)") parser.add_option("--mac", dest="mac", help="MAC address (required for evpn if port" " is not 'netns')") parser.set_defaults(advertise_subnet=False) parser.add_option("--advertise-singleton", action="store_false", dest="advertise_subnet", help="advertise IP address as a /32 (default)") parser.add_option("--advertise-subnet", action="store_true", dest="advertise_subnet", help="advertise the whole IP subnet") parser.add_option("--ovs-preplug", action="store_true", dest="ovs_preplug", default=False, help="should we prealably plug the port " "into an OVS bridge") parser.add_option("--ovs-bridge", dest="bridge", default="br-int", help="if preplug, specifies which OVS bridge to use" " (default: %default)") parser.add_option("--ovs-vlan", dest="ovs_vlan", type='int', help="if specified, only this VLAN from the OVS " "interface will be attached to the VPN instance " "(optional)") parser.add_option("--netns", dest="netns", help="name of network namespace (optional, for use with" " --port netns)") parser.add_option("--if2vpn", dest="if2vpn", default=NS2VPN_DEFAULT_IFNAME, help="name of interface in netns toward VPN" "defaults to %default " "(optional, for use with --port netns)") parser.add_option("--readv-from-rt", dest="readv_from_rts", help="enables route readvertisement from these RTs," " works in conjunction with --readv-to-rt", default=[], action="append") parser.add_option("--readv-to-rt", dest="readv_to_rts", help="enables route readvertisement to these RTs," " works in conjunction with --readv-from-rt", default=[], action="append") parser.add_option("--redirect-rts", dest="redirect_rts", help="Redirection Route Targets to attract traffic, " "matching the traffic classifier, in specified VRF from " "any VRF importing this route target", default=[], action="append") parser.add_option("--source-prefix", dest="sourcePrefix", type="string", help="Traffic classifier source prefix " "filter", action="callback", callback=classifier_callback) parser.add_option("--destination-prefix", dest="destinationPrefix", type="string", help="Traffic classifier destination " "prefix filter", action="callback", callback=classifier_callback) parser.add_option("--source-port", dest="sourcePort", type="string", help="Traffic classifier source port " "number or range filter", action="callback", callback=classifier_callback) parser.add_option("--destination-port", dest="destinationPort", type="string", help="Traffic classifier destination port" " number or range filter", action="callback", callback=classifier_callback) parser.add_option("--protocol", dest="protocol", type="string", help="Traffic classifier IP protocol " "filter", action="callback", callback=classifier_callback) parser.add_option("--attract-to-rt", dest="attract_to_rts", help="enables route advertisement to these RTs," " works in conjunction with " "--static-destination-prefix", default=[], action="append") parser.add_option("--static-destination-prefix", dest="static_dest_prefixes", help="static destination prefix to advertise," " works in conjunction with --attract-to-rts", default=[], action="append") parser.add_option("--lb-consistent-hash-order", dest="lb_consistent_hash_order", default=0, type="int", help="Load Balancing consistent hash sort order") parser.add_option("--vni", dest="vni", default=0, type="int", help="VXLAN VNI to use for this VPN instance (optional)") parser.add_option("--local-pref", dest="local_pref", default=None, type="int", help="BGP LOCAL PREF attribute (optional)") (options, _unused) = parser.parse_args() if not options.operation: parser.error("Need to specify --attach or --detach") if not options.port: parser.error("Need to specify --port ") if not options.network_type: parser.error("Need to specify --network-type") if not options.ip: parser.error("Need to specify --ip") if (len(options.route_targets) == 0 and not (options.import_only_rts or options.export_only_rts)): if options.network_type == const.IPVPN: options.route_targets = ["64512:512"] else: options.route_targets = ["64512:513"] import_rts = copy.copy(options.route_targets or []) for rt in options.import_only_rts: import_rts.append(rt) export_rts = copy.copy(options.route_targets or []) for rt in options.export_only_rts: export_rts.append(rt) if not re.match('.*/[0-9]+$', options.ip): options.ip = options.ip + "/24" if not options.gw_ip: net = netaddr.IPNetwork(options.ip) print("using %s as gateway address" % str(net[-2])) options.gw_ip = str(net[-2]) if options.vpn_instance_id == DEFAULT_VPN_INSTANCE_ID: options.vpn_instance_id = "{}-{}".format( options.network_type, options.vpn_instance_id) if options.port.startswith("netns"): if not options.netns: options.netns = options.vpn_instance_id try: (_unused, options.if2netns) = options.port.split(":") except Exception: options.if2netns = get_vpn2ns_if_name(options.netns) if options.operation == "attach": create_special_netns_port(options) options.port = options.if2netns if not options.mac: options.mac = net_utils.get_device_mac(run_log_command, options.if2vpn, options.netns) print("Local port: {} ({})".format(options.port, options.mac)) run_log_command("ip link show %s" % options.port) local_port = {} if options.port[:5] == "evpn:": if (options.network_type == const.IPVPN): print("will plug evpn %s into the IPVPN" % options.port[5:]) local_port['evpn'] = {'id': options.port[5:]} else: raise Exception("Can only plug an evpn into an ipvpn") else: local_port['linuxif'] = options.port # currently our only the MPLS OVS driver for ipvpn requires preplug if (options.ovs_preplug and options.network_type == const.IPVPN): print("pre-plugging {} into {}".format(options.port, options.bridge)) run_log_command("ovs-vsctl del-port %s %s" % (options.bridge, options.port), raise_on_error=False) run_log_command("ovs-vsctl add-port %s %s" % (options.bridge, options.port)) local_port['ovs'] = {'port_name': options.port, 'plugged': True} if options.ovs_vlan: local_port['ovs']['vlan'] = options.ovs_vlan if not options.mac: if options.network_type == const.IPVPN: options.mac = "52:54:00:99:99:22" else: parser.error("Need to specify --mac for an EVPN network " "attachment if port is not 'netns'") readvertise = None if options.readv_to_rts: readvertise = {"from_rt": options.readv_from_rts, "to_rt": options.readv_to_rts} attract_traffic = dict() if options.redirect_rts: if options.classifier: attract_traffic.update(dict(redirect_rts=options.redirect_rts, classifier=options.classifier)) else: parser.error("Need to specify --redirect-rt and at least one " "traffic classifier option") if options.attract_to_rts: if options.static_dest_prefixes: attract_traffic.update(dict( to=options.attract_to_rts, static_destination_prefixes=options.static_dest_prefixes )) else: parser.error("Need to specify --attract-to-rt and at least " "one static destination prefix option") data = { "import_rt": import_rts, "export_rt": export_rts, "local_port": local_port, "vpn_instance_id": options.vpn_instance_id, "vpn_type": options.network_type, "gateway_ip": options.gw_ip, "mac_address": options.mac, "ip_address": options.ip, "advertise_subnet": options.advertise_subnet, "readvertise": readvertise, "attract_traffic": attract_traffic, "lb_consistent_hash_order": options.lb_consistent_hash_order, "vni": options.vni } if options.local_pref: data['local_pref'] = options.local_pref if options.direction: data['direction'] = options.direction json_data = jsonutils.dumps(data).encode('utf-8') print("request: %s" % json_data) os.environ['NO_PROXY'] = "127.0.0.1" req = urllib.request.Request( "http://127.0.0.1:%d/%s_localport" % (cfg.CONF.API.port, options.operation), json_data, {'Content-Type': 'application/json'}) try: response = urllib.request.urlopen(req) response_content = response.read() response.close() print("response: %d %s" % (response.getcode(), response_content)) except urllib.error.HTTPError as e: error_content = e.read() print(" %s" % error_content) sys.exit("error %d, reason: %s" % (e.code, e.reason)) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9183059 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/common/0000775000175000017500000000000000000000000025101 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/common/__init__.py0000664000175000017500000000121400000000000027210 0ustar00zuulzuul00000000000000# Copyright 2014 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def plural(x): if len(x) > 1: return "s" else: return "" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/common/config.py0000664000175000017500000000724700000000000026732 0ustar00zuulzuul00000000000000# Copyright 2014 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import shlex import socket from oslo_config import cfg from oslo_config import types from oslo_privsep import priv_context from pyroute2 import ndb as ndb_module class InterfaceAddress(types.ConfigType): # Option type for a config entry accepting whether an IP address # or an interface from which to derive the IP address # convert from IP version (4 or 6) to family number FAMILY_MAP = { 4: socket.AF_INET, 6: socket.AF_INET6, } def __init__(self, type_name="interface address value", version=4): super().__init__(type_name=type_name) self.version = version self.family = self.FAMILY_MAP[version] self.ip_address = types.IPAddress(version) def __call__(self, value): try: return self.ip_address(value) except ValueError: # pyroute2 call to take the first address of this interface having # the right IP version (family) with ndb_module.main.NDB() as ndb: # pylint: disable=no-member try: interface = ndb.interfaces[value] except KeyError: raise ValueError("interface %s does not exist" % value) # we can't use an iterator if we want to access dictionaries # inside ipaddr for i in range(0, len(interface.ipaddr)): addr = interface.ipaddr[i] if addr['family'] == self.family: return self.ip_address(addr['address']) raise ValueError("no IPv%s address found on interface %s", self.version, value) def _formatter(self, value): address = self(value) return "{}({})".format(address, value) def __repr__(self): return "InterfaceAddress" def __eq__(self, other): return self.__class__ == other.__class__ bgp_opts = [ cfg.Opt('local_address', required=True, type=InterfaceAddress(), help="IP address used for BGP peerings"), cfg.ListOpt('peers', default=[], item_type=types.HostAddress(version=4), help="IP addresses of BGP peers"), cfg.IntOpt('my_as', min=1, max=2 ** 16 - 1, required=True, help="Our BGP Autonomous System"), cfg.BoolOpt('enable_rtc', default=True, help="Enable RT Constraint (RFC4684)"), cfg.PortOpt('bgp_port', default=179, help="TCP port of connections to BGP peers") ] def register(): cfg.CONF.register_opts(bgp_opts, "BGP") def set_default_root_helper(): # copy bagpipe-bgp root helper configuration into neutron's config, so # that neutron classes find the right configuration to execute commands cfg.CONF.set_default('root_helper', cfg.CONF.COMMON.root_helper, group="AGENT") cfg.CONF.set_default('root_helper_daemon', cfg.CONF.COMMON.root_helper_daemon, group="AGENT") def setup_privsep(): priv_context.init(root_helper=shlex.split(cfg.CONF.COMMON.root_helper)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/common/dataplane_utils.py0000664000175000017500000002151500000000000030630 0ustar00zuulzuul00000000000000# Copyright 2018 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import collections from oslo_log import log as logging from networking_bagpipe._i18n import _ from networking_bagpipe.bagpipe_bgp.common import log_decorator from neutron.agent.common import ovs_lib from neutron_lib import exceptions from neutron_lib.plugins.ml2 import ovs_constants as ovs_const LOG = logging.getLogger(__name__) # largely copied from networking_sfc.services.sfc.common.ovs_ext_lib class OVSBridgeWithGroups: def __init__(self, ovs_bridge): self.bridge = ovs_bridge # OpenFlow 1.1 is needed to manipulate groups self.bridge.use_at_least_protocol(ovs_const.OPENFLOW11) # proxy most methods to self.bridge def __getattr__(self, name): return getattr(self.bridge, name) def do_action_groups(self, action, kwargs_list): group_strs = [_build_group_expr_str(kw, action) for kw in kwargs_list] options = ['-'] if action == 'add' or action == 'del': cmd = '%s-groups' % action elif action == 'mod': cmd = '%s-group' % action options.insert(0, '--may-create') elif action == 'insert-buckets' or action == 'remove-buckets': cmd = action else: msg = _("Action is illegal") raise exceptions.InvalidInput(error_message=msg) if action == 'del' and {} in kwargs_list: self.run_ofctl(cmd, []) else: self.run_ofctl(cmd, options, '\n'.join(group_strs)) @log_decorator.log_info def add_group(self, **kwargs): self.do_action_groups('add', [kwargs]) @log_decorator.log_info def mod_group(self, **kwargs): self.do_action_groups('mod', [kwargs]) @log_decorator.log_info def delete_group(self, **kwargs): self.do_action_groups('del', [kwargs]) @log_decorator.log_info def insert_bucket(self, **kwargs): self.do_action_groups('insert-buckets', [kwargs]) @log_decorator.log_info def remove_bucket(self, **kwargs): self.do_action_groups('remove-buckets', [kwargs]) def dump_group_for_id(self, group_id): retval = None group_str = "%d" % group_id group = self.run_ofctl("dump-groups", [group_str]) if group: retval = '\n'.join(item for item in group.splitlines() if ovs_lib.is_a_flow_line(item)) return retval def get_bridge_ports(self): port_name_list = self.bridge.get_port_name_list() of_portno_list = list() for port_name in port_name_list: of_portno_list.append(self.bridge.get_port_ofport(port_name)) return of_portno_list def _build_group_expr_str(group_dict, cmd): group_expr_arr = [] buckets = None group_id = None if cmd != 'del': if "group_id" not in group_dict: msg = _("Must specify one group Id on group addition" " or modification") raise exceptions.InvalidInput(error_message=msg) group_id = "group_id=%s" % group_dict.pop('group_id') if cmd != 'remove-buckets': if "buckets" not in group_dict: msg = _("Must specify one or more buckets on group addition/" "modification or buckets insertion/deletion") raise exceptions.InvalidInput(error_message=msg) buckets = "%s" % group_dict.pop('buckets') if group_id: group_expr_arr.append(group_id) for key, value in group_dict.items(): group_expr_arr.append("{}={}".format(key, value)) if buckets: group_expr_arr.append(buckets) return ','.join(group_expr_arr) class OVSExtendedBridge(ovs_lib.OVSBridge): def add_flow_extended(self, flow_matches=None, actions=None): if flow_matches is None: flow_matches = [] if actions is None: actions = [] flow_args = {} for match in flow_matches: flow_args.update(match) if actions: flow_args["actions"] = join_s(*actions) self.add_flow(**flow_args) def delete_flows_extended(self, flow_matches=None): if flow_matches is None: flow_matches = [] flow_args = {} for match in flow_matches: flow_args.update(match) self.delete_flows(**flow_args) def join_s(*args): return ','.join([_f for _f in args if _f]) class ObjectLifecycleManager: def __init__(self): self.objects = dict() self.object_used_for = collections.defaultdict(set) @log_decorator.log_info def is_object_user(self, object_key, user_key): return (object_key in self.objects and user_key in self.object_used_for[object_key]) @abc.abstractmethod def create_object(self, object_key, *args, **kwargs): pass @abc.abstractmethod def delete_object(self, object): pass @log_decorator.log_info def get_object(self, object_key, user_key, *args, **kwargs): obj = self.find_object(object_key) if obj is None: obj = self.create_object(object_key, *args, **kwargs) self.objects[object_key] = obj LOG.debug("object for %s: %s", object_key, obj) first = not self.object_used_for[object_key] self.object_used_for[object_key].add(user_key) if first: LOG.debug("%s is first user for %s", user_key, object_key) return (obj, first) @log_decorator.log_info def find_object(self, object_key): obj = self.objects.get(object_key) if obj is not None: LOG.debug("existing object for %s: %s", object_key, obj) return obj @log_decorator.log_info def free_object(self, object_key, user_key): if object_key not in self.object_used_for: LOG.debug("no object to free for %s", object_key) return self.object_used_for[object_key].discard(user_key) last = not self.object_used_for[object_key] if last: obj = self.objects[object_key] LOG.debug("%s was last user for %s, clearing", user_key, object_key) self.delete_object(obj) del self.objects[object_key] del self.object_used_for[object_key] else: LOG.debug("remaining users for object %s: %s", object_key, self.object_used_for[object_key]) return last @log_decorator.log_info def clear_objects(self, filter_method): for object_key, users in list(self.object_used_for.items()): for user in users: if filter_method(object_key, user): self.delete_object(self.objects[object_key]) del self.objects[object_key] del self.object_used_for[object_key] break def infos(self): return self.objects class ObjectLifecycleManagerProxy: def __init__(self, manager, parent_user): self.manager = manager self.parent_user = parent_user def _object_key(self, object_key): return (self.parent_user, object_key) def is_object_user(self, object_key, user_key): return self.manager.is_object_user(self._object_key(object_key), (self.parent_user, user_key)) def get_object(self, object_key, user_key, *args, **kwargs): return self.manager.get_object(self._object_key(object_key), (self.parent_user, user_key), *args, **kwargs) def find_object(self, object_key): return self.manager.find_object(self._object_key(object_key)) def free_object(self, object_key, user_key): if user_key: return self.manager.free_object(self._object_key(object_key), (self.parent_user, user_key)) def clear_objects(self, filter_method=lambda obj_key, user_key: True): self.manager.clear_objects( lambda obj_key, user_key: (user_key[0] == self.parent_user and filter_method(obj_key, user_key[1])) ) def infos(self): return self.manager.infos() class SharedObjectLifecycleManagerProxy(ObjectLifecycleManagerProxy): def _object_key(self, object_key): return object_key ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/common/exceptions.py0000664000175000017500000000423100000000000027634 0ustar00zuulzuul00000000000000# Copyright 2014 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class RemotePEMACAddressNotFound(Exception): def __init__(self, ip_address): super().__init__( "MAC address for %s could not be found. CAUTION:" " Need direct MPLS/Eth connection" % ip_address) class APIException(Exception): pass class VPNNotFound(APIException): def __init__(self, vrf_id): super().__init__("VPN %s could not be found" % vrf_id) class MalformedMACAddress(APIException): def __init__(self, address): super().__init__( "MAC address %s is not valid" % address) class MalformedIPAddress(APIException): def __init__(self, address): super().__init__( "IP address %s is not valid" % address) class OVSBridgeNotFound(APIException): def __init__(self, bridge): super().__init__( "OVS bridge '%s' doesn't exist" % bridge) class OVSBridgePortNotFound(APIException): def __init__(self, interface, bridge): super().__init__( "OVS Port {} doesn't exist on OVS Bridge {}".format(interface, bridge)) class APIMissingParameterException(APIException): def __init__(self, parameter): super().__init__( "Missing parameter: '%s'" % parameter) class APIAlreadyUsedVNI(APIException): def __init__(self, vni): super().__init__( "A VPN instance using vni %d already exists." % vni) class APINotPluggedYet(APIException): def __init__(self, endpoint): super().__init__( "Endpoint {} not plugged yet, can't unplug".format(endpoint)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/common/log_decorator.py0000664000175000017500000000254200000000000030301 0ustar00zuulzuul00000000000000# Copyright 2014 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import functools from oslo_log import log as logging # inspired from neutron.log def log(method, level=logging.DEBUG): """Decorator helping to log method calls.""" @functools.wraps(method) def wrapper(*args, **kwargs): instance = args[0] data = {"class_name": instance.__class__.__name__, "method_name": method.__name__, "args": args[1:], "kwargs": kwargs} if hasattr(args[0], 'log'): logger = args[0].log else: logger = logging.getLogger(method.__module__) logger.log(level, 'method %(class_name)s.%(method_name)s' ' called with %(args)s %(kwargs)s', data) return method(*args, **kwargs) return wrapper def log_info(method): return log(method, logging.INFO) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/common/looking_glass.py0000664000175000017500000004006000000000000030306 0ustar00zuulzuul00000000000000# Copyright 2014 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging as python_logging import re from oslo_log import log as logging from urllib.parse import quote LOG = logging.getLogger(__name__) VALUE = 'VALUE' SUBITEM = 'SUBITEM' SUBTREE = 'SUBTREE' FORWARD = 'FORWARD' COLLECTION = 'COLLECTION' DELEGATE = 'DELEGATE' def _split_lg_path(path_prefix, path): if len(path) == 0: return (None, None, path_prefix) else: return (path[0], path[1:], # pylint: disable=no-member "{}/{}".format(path_prefix, quote(path[0]))) def _get_lg_local_info_recurse(obj, cls, path_prefix): if cls == LookingGlassMixin: return {} result = cls.get_lg_local_info(obj, path_prefix) assert isinstance(result, dict) for base in cls.__bases__: if issubclass(base, LookingGlassMixin): result.update( _get_lg_local_info_recurse(obj, base, path_prefix)) return result def _get_lg_map_recurse(obj, cls): if cls == LookingGlassMixin: return {} result = cls.get_lg_map(obj) for base in cls.__bases__: if issubclass(base, LookingGlassMixin): result.update(_get_lg_map_recurse(obj, base)) else: LOG.debug("not recursing into %s", base) return result def _lookup_path(my_dict, path): '''lookup path in dict''' assert isinstance(path, (list, tuple)) if len(path) == 0: return my_dict # len(path)>0 if not isinstance(my_dict, dict): raise KeyError(path[0]) else: return _lookup_path(my_dict[path[0]], path[1:]) def get_lg_prefixed_path(path_prefix, path_items): fmt = "%s" + ('/%s' * len(path_items)) # pylint: disable=no-member quoted_path_items = [quote(path_item) for path_item in path_items] quoted_path_items.insert(0, path_prefix) return fmt % tuple(quoted_path_items) class LookingGlassMixin: def _get_lg_map(self): # not to be overridden: calls get_lg_map, on each of the super classes # and merge the result in a dict return _get_lg_map_recurse(self, self.__class__) def get_lg_map(self): """Main looking glass hook for LG objects This can be overridden by looking glass objects to indicate looking glass information items for this objects. :returns: a dict mapping a to a (,) tuple if lg_map_type is VALUE, then the looking glass information for will be if lg_map_type is SUBITEM, then is supposed to be a function and the looking glass information for will be the result of calling hook() if lg_map_type is SUBTREE, then is supposed to be a function and the looking glass information for will be the result of calling hook(path_prefix), but this information will only be produced if the is queried (not produced if the full object is queried) if lg_map_type is FORWARD, then is supposed to be a looking glass object and the looking glass information for will be the looking glass information for of object if lg_map_type is DELEGATE, then is supposed to be a looking glass object and the looking glass information for will be the full looking glass information for object if lg_map_type is COLLECTION, then is supposed to be a tuple of functions (list_callback,target_callback). list_callback() is expected to return a list of string, each string identifying a looking glass object target_callback(string) is expected to return the looking glass object corresponding to if *self* is directly queried, the information returned is just a list of dict containing "href" values pointing to each object in the collection if a is queried, the information returned is the looking glass information for the object corresponding to """ return {} def _get_lg_local_info(self, path_prefix): # not to be overridden: calls get_lg_local_info, on each of # the super classes and merge the result in a dict return _get_lg_local_info_recurse(self, self.__class__, path_prefix) def get_lg_local_info(self, path_prefix): """Deeper hook for LG objects Can be overridden by looking glass objects instead of get_lg_map :param path_prefix: the URL prefix that was used to reach *self* through the looking glass :returns: a dict that will be serialized as JSON and passed to the looking glass client, either as is, or if a sub path was queried, the dict value corresponding to the first item of the path """ return {} def get_looking_glass_info(self, path_prefix="", path=None): """Internal method to build the looking glass information Builds the LG information for *self* based on the looking glass map. *not* to be overridden by looking glass objects """ if path is None: path = [] (first_segment, path_reminder, new_path_prefix) = _split_lg_path(path_prefix, path) lg_map = self._get_lg_map() if first_segment in lg_map: (mapping_type, mapping_target) = lg_map[first_segment] LOG.debug("Delegation for path_item '%s': %s:%s ", first_segment, mapping_type, mapping_target) if mapping_type == VALUE: return mapping_target if mapping_type == FORWARD: LOG.debug( " Forwarded '%s' to target %s...", path, mapping_target) if not isinstance(mapping_target, LookingGlassMixin): LOG.error("Delegation target for '%s' at '%s' does not " "implement LookingGlassMixin!", first_segment, new_path_prefix) raise NoSuchLookingGlassObject(new_path_prefix, first_segment) return mapping_target.get_looking_glass_info(path_prefix, path) if mapping_type == FORWARD: LOG.debug( " Forwarded '%s' to target %s...", path, mapping_target) if not isinstance(mapping_target, LookingGlassMixin): LOG.error("Delegation target for '%s' at '%s' does not " "implement LookingGlassMixin!", first_segment, new_path_prefix) raise NoSuchLookingGlassObject(new_path_prefix, first_segment) return mapping_target.get_looking_glass_info(path_prefix, path) elif mapping_type == DELEGATE: LOG.debug( " Delegated '%s' to delegation target %s ...", path, mapping_target) if not isinstance(mapping_target, LookingGlassMixin): LOG.error("Delegation target for '%s' at '%s' does not " "implement LookingGlassMixin!", first_segment, new_path_prefix) raise NoSuchLookingGlassObject(new_path_prefix, first_segment) return mapping_target.get_looking_glass_info(new_path_prefix, path_reminder) elif mapping_type == SUBITEM: LOG.debug(" Sub-item callback: %s", first_segment) try: return _lookup_path(mapping_target(), path_reminder) except KeyError as e: raise NoSuchLookingGlassObject(new_path_prefix, str(e)) elif mapping_type == SUBTREE: LOG.debug(" Subtree callback: %s(...)", first_segment) try: return _lookup_path(mapping_target(new_path_prefix), path_reminder) except KeyError as e: raise NoSuchLookingGlassObject(new_path_prefix, str(e)) elif mapping_type == COLLECTION: LOG.debug(" Collection callback...") (list_callback, target_callback) = mapping_target (second_segment, path_reminder, newer_path_prefix) = \ _split_lg_path(new_path_prefix, path_reminder) if second_segment is None: LOG.debug(" Getting list elements: %s", list_callback) result = [] for x in list_callback(): x["href"] = get_lg_prefixed_path(path_prefix, [first_segment, x["id"]]) result.append(x) return result else: LOG.debug(" Callback -> resolve subItem '%s' with %s " "and follow up get_looking_glass_info(...'%s')", second_segment, target_callback, path_reminder) try: # TODO(tmorin): catch errors target = target_callback(second_segment) if target is None: LOG.error("No delegation target for '%s' at '%s' ", second_segment, new_path_prefix) raise NoSuchLookingGlassObject(new_path_prefix, second_segment) if not isinstance(target, LookingGlassMixin): LOG.error("Delegation target for '%s' at '%s' does" " not implement LookingGlassMixin (%s)!", second_segment, new_path_prefix, type(target)) raise NoSuchLookingGlassObject(new_path_prefix, second_segment) return target.get_looking_glass_info(newer_path_prefix, path_reminder) except KeyError: raise NoSuchLookingGlassObject(new_path_prefix, second_segment) # firt_segment is None or is not in our map # let's build LookingGlassLocalInfo info = self._get_lg_local_info(path_prefix) for (path_item, (mapping_type, mapping_target)) in lg_map.items(): if path_item in info: LOG.warning("overriding '%s', present both in " "LookingGlassLocalInfo and LookingGlassMixin map", path_item) if mapping_type in (FORWARD, DELEGATE, SUBTREE, COLLECTION): info[path_item] = {"href": get_lg_prefixed_path(path_prefix, [path_item]) } elif mapping_type == SUBITEM: LOG.debug(" Subitem => callback %s(...)", mapping_target) # TODO(tmorin): catch errors info[path_item] = mapping_target() elif mapping_type == VALUE: info[path_item] = mapping_target else: LOG.warning("LGMap not processed for %s", path_item) if first_segment is None: return info else: try: return _lookup_path(info, path) except KeyError as e: raise NoSuchLookingGlassObject(new_path_prefix, str(e)) LOG.warning("Looking glass did not found a looking-glass object for" " this path...") return None class NoSuchLookingGlassObject(Exception): def __init__(self, path_prefix, path): super().__init__() assert isinstance(path_prefix, str) self.path_prefix = path_prefix assert isinstance(path, str) self.path = path def __repr__(self): return "No such looking glass object: {} at {}".format( self.path, self.path_prefix) # Looking glass reference URLs root = "" references = {} def set_references_root(url_prefix): global root root = url_prefix def set_reference_path(reference, path): references[reference] = path def get_absolute_path(reference, path_prefix, path=None): if path is None: path = [] index = path_prefix.find(root) absolute_base_url = path_prefix[:index + len(root)] return get_lg_prefixed_path(absolute_base_url, references[reference] + path) class LookingGlassLogHandler(python_logging.Handler): """Looking Glass LogHandler storing logs to make them available in LG This log handler simply stores the last messages of importance above . These messages can be retrieved with .get_records(). """ def __init__(self, level=logging.WARNING, max_size=100): super().__init__(level) self.records = [] self.max_size = max_size self.setFormatter( python_logging.Formatter('%(asctime)s - %(levelname)s - ' '%(message)s')) def emit(self, record): # expand the log message now and free references to the arguments record.msg = record.getMessage().replace('"', "'") record.args = [] self.records.insert(0, record) del self.records[self.max_size:] def __len__(self): return len(self.records) def get_records(self): return self.records def reset_local_lg_logs(self): del self.records[:] class LookingGlassLocalLogger(LookingGlassMixin): """LookingGlass Mixin making self.log a LookingGlass log catcher For objects subclassing this class, self.log will be a logger derived from based on the existing logging configuration, but with an additional logger using LookingGlassLogHandler. This additional logger is used to make the last records (above WARNING) available through the looking glass """ def __init__(self, append_to_name=""): try: self.lg_log_handler except AttributeError: self.lg_log_handler = LookingGlassLogHandler() name = self.__module__ if append_to_name: name += "." + append_to_name elif hasattr(self, 'instance_id'): # pylint: disable=no-member name += ".%d" % self.instance_id elif hasattr(self, 'name'): # pylint: disable=no-member name += ".%s" % re.sub("[. ]", "-", self.name).lower() self.log = logging.getLogger(name) self.log.logger.addHandler(self.lg_log_handler) def get_lg_map(self): return {"logs": (SUBTREE, self.get_logs)} def get_logs(self, path_prefix): return [{'level': record.levelname, 'time': self.lg_log_handler.formatter.formatTime(record), 'message': record.msg} for record in self.lg_log_handler.get_records()] def _reset_local_lg_logs(self): self.lg_log_handler.reset_local_lg_logs() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/common/net_utils.py0000664000175000017500000000252600000000000027466 0ustar00zuulzuul00000000000000# Copyright 2016 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging LOG = logging.getLogger(__name__) def get_device_mac(run_command_fn, dev_name, netns_name=None): """Find device MAC address""" if netns_name: command_prefix = "ip netns exec %s " % netns_name else: command_prefix = "" (output, _) = run_command_fn("%scat /sys/class/net/%s/address" % (command_prefix, dev_name)) return output[0] def set_device_mac(run_command_fn, dev_name, mac_address, netns_name=None): """Set device MAC address""" if netns_name: command_prefix = "ip netns exec %s " % netns_name else: command_prefix = "" run_command_fn("{} ip link set {} address {}".format( command_prefix, dev_name, mac_address), run_as_root=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/common/run_command.py0000664000175000017500000001211400000000000027754 0ustar00zuulzuul00000000000000# Copyright 2014 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import subprocess import threading from oslo_config import cfg from oslo_log import log as logging from oslo_rootwrap import client import shlex common_opts = [ cfg.StrOpt("root_helper", default="sudo", help="Root helper command."), cfg.StrOpt("root_helper_daemon", help="Root helper daemon application to use when possible."), ] cfg.CONF.register_opts(common_opts, "COMMON") class RootwrapDaemonHelper: __client = None __lock = threading.Lock() def __new__(cls): """There is no reason to instantiate this class""" raise NotImplementedError() @classmethod def get_client(cls): with cls.__lock: if cls.__client is None: cls.__client = client.Client( shlex.split(cfg.CONF.COMMON.root_helper_daemon)) return cls.__client def _rootwrap_command(log, command, stdin=None, shell=False): '''Executes 'command' in rootwrap mode. Returns (exit_code, command_output command_error) - command_output is the list of lines output on stdout by the command - command_error is the list of lines error on stderr by the command ''' rootwrap_client = RootwrapDaemonHelper.get_client() if shell: exit_code, output, error = rootwrap_client.execute(["sh", "-c", command], stdin) else: exit_code, output, error = rootwrap_client.execute(command.split(), stdin) return (exit_code, output, error) def _shell_command(log, command, stdin=None): '''Executes 'command' in subshell mode. Returns (exit_code, command_output, command_error) - command_output is the list of lines output on stdout by the command - command_error is the list of lines error on stderr by the command ''' process = subprocess.Popen(command, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (output, error) = process.communicate(stdin) exit_code = process.returncode return (exit_code, output, error) def _log_stdx_if(log_fn, output, error): if output: log_fn(" run_command stdout: %s", "\n ".join(output)) if error: log_fn(" run_command stderr: %s", "\n".join(error)) def run_command(log, command, run_as_root=False, raise_on_error=True, acceptable_return_codes=[0], *args, **kwargs): '''Executes 'command' in subshell or rootwrap mode. Returns (command_output, exit_code) - command_output is the list of lines output on stdout by the command Raises an exception based on the following: - will only raise an Exception if raise_on_error optional parameter is True - the exit code is acceptable - exit code is acceptable by default if it is zero - exit code is acceptable if it is in the (optional) acceptable_return_codes list parameter - putting -1 in the acceptable_return_codes list means that *any* exit code is acceptable ''' if run_as_root and cfg.CONF.COMMON.root_helper_daemon: log.debug("Running command in rootwrap mode: %s", command) exit_code, output, error = _rootwrap_command(log, command, *args, **kwargs) else: log.debug("Running command in subshell mode: %s ", command) if run_as_root: command = " ".join([cfg.CONF.COMMON.root_helper, command]) # remove shell from kwargs (uses shell by default) kwargs.pop("shell", False) exit_code, output, error = _shell_command(log, command, *args, **kwargs) output = [line.decode() for line in output.splitlines()] error = [line.decode() for line in error.splitlines()] if log.isEnabledFor(logging.DEBUG): _log_stdx_if(log.debug, output, error) if (exit_code in acceptable_return_codes or -1 in acceptable_return_codes): return (output, exit_code) else: message = "Exit code %d when running '%s'" % (exit_code, command) if raise_on_error: log.error(message) _log_stdx_if(log.error, output, error) raise Exception(message) else: log.warning(message) return (output, exit_code) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/common/utils.py0000664000175000017500000000452200000000000026616 0ustar00zuulzuul00000000000000# Copyright 2014 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import re from oslo_concurrency import lockutils from oslo_config import cfg from networking_bagpipe.bagpipe_bgp.engine import exa def synchronized(method): def synchronized_method(self, *arg, **kws): with self.lock: return method(self, *arg, **kws) return synchronized_method oslo_synchronized = lockutils.synchronized_with_prefix('bagpipe-bgp-') def plural(x): if len(x) > 1: return "s" else: return "" def invert_dict_of_sets(d): '''return inverted dict of sets from original dict original dict possibly containing sets of non-unique hashable items ''' new_d = collections.defaultdict(set) for k in d: for v in d[k]: new_d[v].add(k) return new_d camel2underscore_regex = re.compile('(?!^)([A-Z]+)') def dict_camelcase_to_underscore(dictionary): '''copy dict, with translation of keys from FooBar to foo_bar''' return {camel2underscore_regex.sub(r'_\1', key).lower(): value for (key, value) in dictionary.items() } def osloconfig_json_serialize(obj): if (isinstance(obj, cfg.ConfigOpts) or isinstance(obj, cfg.ConfigOpts.GroupAttr)): return {osloconfig_json_serialize(k): osloconfig_json_serialize(v) for k, v in obj.items()} return obj def convert_route_targets(orig_list): assert isinstance(orig_list, list) list_ = [] for rt in orig_list: if rt == '': continue try: asn, nn = rt.split(':') list_.append(exa.RouteTarget(int(asn), int(nn))) except Exception: raise Exception("Malformed route target: '%s'" % rt) return list_ class ClassReprMixin: def __repr__(self): return self.__class__.__name__ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/constants.py0000664000175000017500000000212200000000000026174 0ustar00zuulzuul00000000000000# Copyright 2014 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. IPVPN = "ipvpn" EVPN = "evpn" VPN_TYPES = [EVPN, IPVPN] RT_IMPORT = 'import_rt' RT_EXPORT = 'export_rt' RT_TYPES = [RT_IMPORT, RT_EXPORT] # port directions TO_PORT = 'to-port' FROM_PORT = 'from-port' BOTH = 'both' ALL_DIRECTIONS = (BOTH, TO_PORT, FROM_PORT) def config_group(vpn_type): return "DATAPLANE_DRIVER_%s" % vpn_type.upper() # maximum length for a linux network device name # grep 'define.*IFNAMSIZ' /usr/src/linux/include/uapi/linux/if.h # define IFNAMSIZ 16 # (minus 1 for trailing null) LINUX_DEV_LEN = 15 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9223058 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/engine/0000775000175000017500000000000000000000000025056 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/engine/__init__.py0000664000175000017500000002627500000000000027203 0ustar00zuulzuul00000000000000# Copyright 2014 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This module classes related to producing and consuming events related to BGP routes. routes: RouteEntry events: RouteEvent an announcement or a withdrawal of a BGP route workers: Worker * produce events * subscribe to the route table manager to consume events related to certain BGP routes route table manager (singleton) * tracks subscriptions of workers * dispatches events based on subscriptions """ from oslo_log import log as logging from networking_bagpipe.bagpipe_bgp.common import log_decorator from networking_bagpipe.bagpipe_bgp.common import looking_glass as lg from networking_bagpipe.bagpipe_bgp.engine import exa LOG = logging.getLogger(__name__) class RouteEntry(lg.LookingGlassMixin): """A route entry describes a BGP route i.e. the association of: * a BGP NLRI of a specific type (e.g. a VPNv4 route like "1.2.3.4:5:192.168.0.5/32") * BGP attributes * the source of the BGP route (e.g. the BGP peer, or the local VPN instance, that advertizes the route) """ def __init__(self, nlri, rts=None, attributes=None, source=None): if attributes is None: attributes = exa.Attributes() assert isinstance(attributes, exa.Attributes) if rts is not None: assert isinstance(rts, list) assert len(rts) == 0 or isinstance(rts[0], exa.RouteTarget) self.source = source self.afi = nlri.afi self.safi = nlri.safi self.nlri = nlri self.attributes = attributes # a list of exa.bgp.message.update.attribute.community. # extended.RouteTargetASN2Number self._route_targets = [] if exa.Attribute.CODE.EXTENDED_COMMUNITY in self.attributes: ecoms = self.attributes[ exa.Attribute.CODE.EXTENDED_COMMUNITY].communities # use type(..) because isinstance(rtrecord, RouteTarget) is True self._route_targets = [ecom for ecom in ecoms if isinstance(ecom, exa.RouteTarget)] if rts: ecoms += rts self._route_targets += rts else: if rts: self.attributes.add(exa.ExtendedCommunities(rts)) self._route_targets += rts @property def route_targets(self): return self._route_targets def ecoms(self, filter_=None): '''returns the route extended communities (+ optional filtering)''' if filter_ is None: def filter_real(ecom): return True elif isinstance(filter_, type): def filter_real(ecom): return isinstance(ecom, filter_) else: # filter is a function(ecom) filter_real = filter_ if exa.Attribute.CODE.EXTENDED_COMMUNITY in self.attributes: return list( filter(filter_real, self.attributes[exa.Attribute.CODE.EXTENDED_COMMUNITY] .communities) ) else: return [] @log_decorator.log def set_route_targets(self, route_targets): # first build a list of ecoms without any RT ecoms = self.ecoms(lambda ecom: not isinstance(ecom, exa.RouteTarget)) # then add the right RTs new_ecoms = exa.ExtendedCommunities() new_ecoms.communities += ecoms new_ecoms.communities += route_targets # update self._route_targets = route_targets self.attributes.remove(new_ecoms.ID) self.attributes.add(new_ecoms) @property def nexthop(self): try: return self.nlri.nexthop.top() except AttributeError: try: return self.attributes[exa.Attribute.CODE.NEXT_HOP].top() except KeyError: return None def __eq__(self, other): if other is None: return False assert isinstance(other, RouteEntry) return (self.afi == other.afi and self.safi == other.safi and self.source == other.source and self.nlri == other.nlri and self.attributes.sameValuesAs(other.attributes)) def __hash__(self): return hash((self.afi, self.safi, str(self.source), str(self.nexthop), self.nlri, self.attributes)) def __repr__(self, skip_nexthop=False): from_string = " from:%s" % self.source if self.source else "" nexthop = "" if not skip_nexthop: nexthop = str(self.nexthop) return "[RouteEntry: {}/{} {} nh:{} {}{}]".format( self.afi, self.safi, self.nlri, nexthop, self.attributes, from_string) def get_lg_local_info(self, path_prefix): att_dict = {} for attribute in self.attributes.values(): # skip some attributes that we care less about if attribute.ID in (exa.Attribute.CODE.AS_PATH, exa.Attribute.CODE.ORIGIN): continue att_dict[ repr(exa.Attribute.CODE(attribute.ID))] = str(attribute) res = {"afi-safi": "{}/{}".format(self.afi, self.safi), "attributes": att_dict, "next_hop": self.nexthop } if self.source: res["source"] = {"id": self.source.name, "href": lg.get_absolute_path("BGP_WORKERS", path_prefix, [self.source.name]) } if self.safi in [exa.SAFI.mpls_vpn, exa.SAFI.evpn]: res["route_targets"] = [str(rt) for rt in self.route_targets] return { repr(self.nlri): res } class RouteEvent: """Represents an advertisement or withdrawal of a RouteEntry""" # event Types ADVERTISE = 1 WITHDRAW = 2 type2name = {ADVERTISE: "Advertise", WITHDRAW: "Withdraw"} def __init__(self, event_type, route_entry, source=None): assert (event_type == RouteEvent.ADVERTISE or event_type == RouteEvent.WITHDRAW) assert isinstance(route_entry, RouteEntry) self.type = event_type self.route_entry = route_entry if source is not None: self.source = source self.route_entry.source = source else: self.source = route_entry.source assert self.source is not None self.replaced_route = None # this is required to overwrite the action field in an NLRI # in the case where we generate a withdraw from an existing NLRI # on a replaced route # and this spares us the pain of specifying the action # when creating an nlri if event_type == RouteEvent.ADVERTISE: self.route_entry.nlri.action = exa.OUT.ANNOUNCE else: # WITHDRAW self.route_entry.nlri.action = exa.OUT.WITHDRAW @log_decorator.log def set_replaced_route(self, replaced_route): '''set_replaced_route hook for RouteTableManager Called only by RouteTableManager. replaced_route should be a RouteEntry ''' assert (isinstance(replaced_route, RouteEntry) or (replaced_route is None)) assert replaced_route != self.route_entry self.replaced_route = replaced_route def __repr__(self): if self.replaced_route: replaces_str = "replaces one route" else: replaces_str = "replaces no route" return "[RouteEvent({}): {} {} {}]".format(replaces_str, RouteEvent.type2name[ self.type], self.route_entry, self.source) class UnsupportedRT(Exception): def __init__(self, rt): self.rt = rt class _SubUnsubCommon: def __init__(self, afi, safi, route_target, worker=None): if not (route_target is None or isinstance(route_target, exa.RouteTarget)): raise UnsupportedRT(route_target) self.afi = afi self.safi = safi self.route_target = route_target self.worker = worker def __repr__(self): by_worker = " by %s" % self.worker.name if self.worker else "" return "{} [{}/{},{}]{}".format(self.__class__.__name__, self.afi or "*", self.safi or "*", self.route_target or "*", by_worker) class Subscription(_SubUnsubCommon): """Represents a Subscription to RouteEvents A subscription specifies the AFI, the SAFI, and the Route Target of the RouteEntry for which the subscriber wants to receive events. Any of these (afi, safi or route target) can be replaced by a wildcard: * Subscription.ANY_AFI * Subscription.ANY_SAFI * Subscription.ANY_RT """ ANY_AFI = None ANY_SAFI = None ANY_RT = None def __init__(self, afi, safi, route_target=None, worker=None): _SubUnsubCommon.__init__(self, afi, safi, route_target, worker) class Unsubscription(_SubUnsubCommon): def __init__(self, afi, safi, route_target=None, worker=None): _SubUnsubCommon.__init__(self, afi, safi, route_target, worker) class EventSource(lg.LookingGlassMixin): '''Class for objects that advertise and withdraw routes class needs to have a 'name' attribute ''' def __init__(self, route_table_manager): self.rtm = route_table_manager # private data of RouteTableManager self._rtm_route_entries = set() def get_route_entries(self): return self._rtm_route_entries @log_decorator.log_info def _advertise_route(self, route_entry): LOG.debug("Publish advertise route event") self.rtm.enqueue(RouteEvent(RouteEvent.ADVERTISE, route_entry, self)) @log_decorator.log_info def _withdraw_route(self, route_entry): LOG.debug("Publish withdraw route event") self.rtm.enqueue(RouteEvent(RouteEvent.WITHDRAW, route_entry, self)) def get_lg_map(self): return { "adv_routes": (lg.SUBTREE, self.get_lg_routes) } def get_lg_routes(self, path_prefix): return [route.get_looking_glass_info(path_prefix) for route in self.get_route_entries()] class WorkerCleanupEvent: def __init__(self, worker): self.worker = worker def __repr__(self): return "WorkerCleanupEvent:%s" % (self.worker.name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/engine/bgp_manager.py0000664000175000017500000001204000000000000027667 0ustar00zuulzuul00000000000000# Copyright 2014 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from functools import reduce from oslo_config import cfg from oslo_log import log as logging from networking_bagpipe.bagpipe_bgp.common import log_decorator from networking_bagpipe.bagpipe_bgp.common import looking_glass as lg from networking_bagpipe.bagpipe_bgp.common import utils from networking_bagpipe.bagpipe_bgp import engine from networking_bagpipe.bagpipe_bgp.engine import bgp_peer_worker from networking_bagpipe.bagpipe_bgp.engine import exa from networking_bagpipe.bagpipe_bgp.engine import exabgp_peer_worker from networking_bagpipe.bagpipe_bgp.engine import route_table_manager as rtm LOG = logging.getLogger(__name__) # SAFIs for which RFC4684 is effective RTC_SAFIS = (exa.SAFI.mpls_vpn, exa.SAFI.evpn) class Manager(engine.EventSource, lg.LookingGlassMixin, utils.ClassReprMixin): _instance = None def __init__(self): LOG.debug("Instantiating BGPManager") if cfg.CONF.BGP.enable_rtc: first_local_subscriber_callback = self.rtc_advertisement_for_sub last_local_subscriber_callback = self.rtc_withdrawal_for_sub else: first_local_subscriber_callback = None last_local_subscriber_callback = None self.rtm = rtm.RouteTableManager(first_local_subscriber_callback, last_local_subscriber_callback) self.rtm.start() self.peers = {} if cfg.CONF.BGP.peers: for peer_address in cfg.CONF.BGP.peers: LOG.debug("Creating a peer worker for %s", peer_address) peer_worker = exabgp_peer_worker.ExaBGPPeerWorker(self, peer_address) self.peers[peer_address] = peer_worker peer_worker.start() # we need a .name since we'll masquerade as a route_entry source self.name = "BGPManager" engine.EventSource.__init__(self, self.rtm) def __repr__(self): return self.__class__.__name__ @log_decorator.log def stop(self): for peer in self.peers.values(): peer.stop() self.rtm.stop() for peer in self.peers.values(): peer.join() self.rtm.join() def get_local_address(self): return cfg.CONF.BGP.local_address @log_decorator.log def rtc_advertisement_for_sub(self, sub): if sub.safi in RTC_SAFIS: event = engine.RouteEvent( engine.RouteEvent.ADVERTISE, self._subscription_2_rtc_route_entry(sub), self) LOG.debug("Based on subscription => synthesized RTC %s", event) self.rtm.enqueue(event) @log_decorator.log def rtc_withdrawal_for_sub(self, sub): if sub.safi in RTC_SAFIS: event = engine.RouteEvent( engine.RouteEvent.WITHDRAW, self._subscription_2_rtc_route_entry(sub), self) LOG.debug("Based on unsubscription => synthesized withdraw" " for RTC %s", event) self.rtm.enqueue(event) def _subscription_2_rtc_route_entry(self, subscription): nlri = exa.RTC.new(exa.AFI.ipv4, exa.SAFI.rtc, cfg.CONF.BGP.my_as, subscription.route_target, exa.IP.create(self.get_local_address())) route_entry = engine.RouteEntry(nlri) return route_entry # Looking Glass Functions ################### def get_lg_map(self): return {"peers": (lg.COLLECTION, (self.get_lg_peer_list, self.get_lg_peer_path_item)), "routes": (lg.FORWARD, self.rtm), "workers": (lg.FORWARD, self.rtm), "route_counts": (lg.SUBITEM, self.get_lg_route_counts)} def get_established_peers_count(self): return reduce(lambda count, peer: count + (isinstance(peer, bgp_peer_worker.BGPPeerWorker) and peer.is_established()), self.peers.values(), 0) def get_lg_peer_list(self): return [{"id": peer.peer_address, "state": peer.fsm.state} for peer in self.peers.values()] def get_lg_peer_path_item(self, path_item): return self.peers[path_item] def get_lg_route_counts(self): return {"local_routes_count": self.rtm.get_local_routes_count(), "received_routes_count": self.rtm.get_received_routes_count()} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/engine/bgp_peer_worker.py0000664000175000017500000002641000000000000030607 0ustar00zuulzuul00000000000000# Copyright 2014 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import random import threading import time from oslo_log import log as logging from networking_bagpipe.bagpipe_bgp.common import looking_glass as lg from networking_bagpipe.bagpipe_bgp import engine from networking_bagpipe.bagpipe_bgp.engine import worker INIT = "Init-Event" CONNECT_NOW = "Connect-Now" SEND_KEEP_ALIVE = "Send-KeepAlive" KEEP_ALIVE_RECEIVED = "KeepAlive-received" DEFAULT_HOLDTIME = 180 ERROR_RETRY_TIMER = 20 KA_EXPIRY_RETRY_TIMER = 5 CONNECT_RETRY_TIMER = 5 class FSM: '''Represents the state of the BGP Finite State Machine''' Idle = "Idle" Connect = 'Connect' OpenSent = 'OpenSent' OpenConfirm = 'OpenConfirm' Active = 'Active' Established = 'Established' def __init__(self, worker): self.worker = worker self._state = FSM.Idle self._prev_state = None self.all_states = [FSM.Idle, FSM.Connect, FSM.OpenSent, FSM.OpenConfirm, FSM.Active, FSM.Established] self.last_transition_time = time.time() @property def state(self): return self._state @property def previous_state(self): return self._prev_state @state.setter def state(self, state): if state == self._state: return if state in self.all_states: self._prev_state = self._state self._state = state self.last_transition_time = time.time() self.worker.log.info( "%s BGP FSM transitioned from '%s' to '%s' state" % (self.worker, self._prev_state, self._state)) else: raise Exception("no such state (%s)" % repr(state)) def __repr__(self): return self._state class StoppedException(Exception): pass class InitiateConnectionException(Exception): pass class OpenWaitTimeout(Exception): pass class ToIdle: def __init__(self, delay): # add 50% random delay to avoid reconnect bursts self.delay = delay * random.uniform(1, 1.5) def __repr__(self): return "ToIdle(%s)" % self.delay class BGPPeerWorker(worker.Worker, threading.Thread, lg.LookingGlassLocalLogger, metaclass=abc.ABCMeta): '''Partially abstract class for a Worker implementing the BGP protocol.''' def __init__(self, bgp_manager, peer_address): # call super threading.Thread.__init__(self) self.setDaemon(True) worker.Worker.__init__(self, bgp_manager, "BGP-%s" % peer_address) self.peer_address = peer_address # its up to subclasses to call _set_hold_time again to set holdtime # based on value advertized by peer self._set_hold_time(DEFAULT_HOLDTIME) # used to stop receive_thread self._stop_loops = threading.Event() # used to track that we've been told to stop: self.should_stop = False self.send_ka_timer = None self.ka_reception_timer = None lg.LookingGlassLocalLogger.__init__(self, self.peer_address.replace(".", "-")) self.fsm = FSM(self) self.log.debug("INIT %s", self.name) self.enqueue(CONNECT_NOW) def stop(self): super().stop() self.should_stop = True self._stop_and_clean() def _set_hold_time(self, holdtime): '''sets the session holdtime holdtime in seconds keepalive expected, or sent, every holdtime/3 second ''' assert holdtime > 30 self.kat_period = int(holdtime / 3.0) self.kat_expiry_time = self.kat_period * 3 # called by _event_queue_processor_loop def _on_event(self, event): self.log.debug("event: %s", event) if event == CONNECT_NOW: self._connect() elif isinstance(event, ToIdle): self._to_idle(event.delay) elif isinstance(event, engine.RouteEvent): if self.fsm.state == FSM.Established: self._send(self._update_for_route_event(event)) else: raise Exception("cannot process event in '%s' state" % self.fsm.state) elif event == SEND_KEEP_ALIVE: self._send(self._keep_alive_message_data()) elif event == KEEP_ALIVE_RECEIVED: self.on_keep_alive_received() else: self.log.warning("event not processed: %s", event) def _stopped(self): self.fsm.state = FSM.Idle def _connect(self): self._reset_local_lg_logs() # initiate connection self.log.debug("connecting now") self.fsm.state = FSM.Connect try: self._initiate_connection() except (InitiateConnectionException, OpenWaitTimeout) as e: self.log.warning("%s while initiating connection: %s", e.__class__.__name__, e) self._to_active() return except StoppedException: self.log.info("Thread stopped during connection init") return except Exception as e: self.log.warning("Exception while initiating connection: %s", e) if self.log.isEnabledFor(logging.DEBUG): self.log.exception("") self._to_active() return self._stop_loops.clear() self.init_send_keep_alive_timer() self.init_keep_alive_reception_timer() # spawns a receive thread self.receive_thread = threading.Thread(target=self._receive_loop, name=("%s:receive_loop" % self.name)) self.receive_thread.start() self._to_established() def _to_active(self): self.fsm.state = FSM.Active self._stop_and_clean() self.init_connect_timer(CONNECT_RETRY_TIMER) def _to_established(self): self.fsm.state = FSM.Established def _to_idle(self, delay_before_connect=0): self.fsm.state = FSM.Idle self._stop_and_clean() if delay_before_connect: self.init_connect_timer(delay_before_connect) else: self.enqueue(CONNECT_NOW) def _stop_and_clean(self): self._stop_loops.set() if self.send_ka_timer: self.send_ka_timer.cancel() if self.ka_reception_timer: self.ka_reception_timer.cancel() self._cleanup() def is_established(self): return self.fsm.state == FSM.Established def _receive_loop(self): self.log.info("Start receive loop") self._stop_loops.clear() while not self._stop_loops.isSet(): try: loop_result = self._receive_loop_fun() if loop_result == 0: self.log.info("receive_loop_fun returned 0, aborting") break elif loop_result == 2: self.log.warning("receive_loop_fun returned 2 (error), " "aborting receive_loop and reinit'ing") # FIXME: use (Worker.)enqueue_high_priority so that # ToIdle is treated before other events self.enqueue(ToIdle(ERROR_RETRY_TIMER)) break else: # everything went fine pass except Exception as e: self.log.error("Error: %s (=> aborting receive_loop and " "reinitializing)", e) if self.log.isEnabledFor(logging.WARNING): self.log.exception("") # FIXME: use (Worker.)enqueue_high_priority so that # ToIdle is treated before other events self.enqueue(ToIdle(ERROR_RETRY_TIMER)) break self.log.info("End receive loop") # Connect retry timer ##### def init_connect_timer(self, delay): self.log.debug("INIT connect timer (%ds)", delay) self.connect_timer = threading.Timer(delay, self.enqueue, [CONNECT_NOW]) self.connect_timer.name = "%s:connect_timer" % self.name self.connect_timer.start() # Sending keep-alive's ##### def init_send_keep_alive_timer(self): self.log.debug("INIT Send Keepalive timer (%ds)", self.kat_period) self.send_ka_timer = threading.Timer(self.kat_period, self.send_keep_alive_trigger) self.send_ka_timer.name = "%s:send_ka_timer" % self.name self.send_ka_timer.start() def send_keep_alive_trigger(self): self.log.debug("Trigger to send Keepalive") self.enqueue(SEND_KEEP_ALIVE) self.init_send_keep_alive_timer() # Receiving keep-alive's ##### def init_keep_alive_reception_timer(self): self.log.debug( "INIT Keepalive reception timer (%ds)", self.kat_expiry_time) self.ka_reception_timer = threading.Timer( self.kat_expiry_time, self.enqueue, [ToIdle(KA_EXPIRY_RETRY_TIMER)]) self.ka_reception_timer.start() def on_keep_alive_received(self): self.log.debug("Keepalive received") self.ka_reception_timer.cancel() self.init_keep_alive_reception_timer() # Abstract methods @abc.abstractmethod def _initiate_connection(self): '''Abstract method to initiate a connection The implementation will initiated the connection to the BGP peer, do the initial BGP handshake (send Open, receive Open, send first KeepAlive, receive first KeepAlive) and track the intermediate FSM states (OpenSent, OpenConfirm). ''' pass @abc.abstractmethod def _receive_loop_fun(self): '''abstract method for internal receive loop Return codes: - 0: we decided to stop based on stop_loops being set - 1: everything went fine, pursue - 2: there was an error ''' pass @abc.abstractmethod def _keep_alive_message_data(self): pass @abc.abstractmethod def _send(self, data): pass @abc.abstractmethod def _update_for_route_event(self, event): pass # Looking glass hooks ### def get_lg_local_info(self, path_prefix): return { "protocol": { "state": self.fsm.state, "previous_state": "(%s)" % self.fsm.previous_state, "hold_time": self.kat_expiry_time, "last_transition_time": time.strftime( '%Y-%m-%d %H:%M:%S', time.localtime(self.fsm.last_transition_time)) } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/engine/exa.py0000664000175000017500000000561500000000000026214 0ustar00zuulzuul00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2017 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This module is here to allow conciseness in exabgp imports # by other modules # flake8: noqa from exabgp.bgp.message import OUT from exabgp.bgp.message import IN from exabgp.bgp.message.open.asn import ASN from exabgp.bgp.message.update import Attributes from exabgp.bgp.message.update.attribute.attribute import Attribute from exabgp.bgp.message.update.attribute.community.extended.communities \ import ExtendedCommunities from exabgp.bgp.message.update.attribute.community.extended \ import communities as extcoms from exabgp.bgp.message.update.attribute.community.extended \ import ConsistentHashSortOrder from exabgp.bgp.message.update.attribute.community.extended \ import Encapsulation from exabgp.bgp.message.update.attribute.community.extended \ import RouteTarget as RTExtCom from exabgp.bgp.message.update.attribute.community.extended \ import RouteTargetASN2Number as RouteTarget from exabgp.bgp.message.update.attribute.community.extended \ import TrafficRedirect from exabgp.bgp.message.update.attribute.community.extended.rt_record\ import RTRecord from exabgp.bgp.message.update.attribute.localpref import LocalPreference from exabgp.bgp.message.update.attribute.nexthop import NextHop from exabgp.bgp.message.update.attribute.pmsi import PMSI from exabgp.bgp.message.update.attribute.pmsi import PMSIIngressReplication from exabgp.bgp.message.update.nlri.flow import Flow from exabgp.bgp.message.update.nlri import flow from exabgp.bgp.message.update.nlri.ipvpn import IPVPN from exabgp.bgp.message.update.nlri.nlri import NLRI from exabgp.bgp.message.update.nlri.qualifier.esi import ESI from exabgp.bgp.message.update.nlri.qualifier.etag import EthernetTag from exabgp.bgp.message.update.nlri.qualifier.labels import Labels from exabgp.bgp.message.update.nlri.qualifier.mac import MAC from exabgp.bgp.message.update.nlri.qualifier.rd import RouteDistinguisher from exabgp.bgp.message.update.nlri.rtc import RTC from exabgp.bgp.message.update.nlri.evpn.nlri import EVPN from exabgp.bgp.message.update.nlri.evpn.mac import MAC as EVPNMAC from exabgp.bgp.message.update.nlri.evpn.multicast import \ Multicast as EVPNMulticast from exabgp.protocol.ip import IP from exabgp.protocol import Protocol from exabgp.reactor.protocol import AFI from exabgp.reactor.protocol import SAFI ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/engine/exabgp_peer_worker.py0000664000175000017500000003551300000000000031311 0ustar00zuulzuul00000000000000# Copyright 2014 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import functools import logging as python_logging import select import time from exabgp.bgp import fsm as exa_fsm from exabgp.bgp import message as exa_message from exabgp.bgp.message import open as exa_open from exabgp.bgp import neighbor as exa_neighbor from exabgp import logger as exa_logger from exabgp.protocol import family as exa_family from exabgp import reactor as exa_reactor from exabgp.reactor import peer as exa_peer from oslo_config import cfg from oslo_log import log as logging from networking_bagpipe.bagpipe_bgp.common import looking_glass as lg from networking_bagpipe.bagpipe_bgp import engine from networking_bagpipe.bagpipe_bgp.engine import bgp_peer_worker from networking_bagpipe.bagpipe_bgp.engine import exa LOG = logging.getLogger(__name__) def setup_exabgp_env(): # initialize/tweak ExaBGP config and log internals from exabgp.configuration.setup import environment environment.application = 'bagpipe-bgp' env = environment.setup(None) # tell exabgp to parse routes: env.log.routes = True # we "tweak" the internals of exabgp Logger, so that (a) it does not break # oslo_log and (b) it logs through oslo_log # decorating the original restart would be better... exa_logger.Logger._restart = exa_logger.Logger.restart def decorated_restart(f): @functools.wraps(f) def restart_never_first(self, first): # we don't want exabgp to really ever do its first restart stuff # that resets the root logger handlers return f(self, False) return restart_never_first exa_logger.Logger.restart = decorated_restart( exa_logger.Logger.restart ) exa_logger.Logger._syslog = logging.getLogger(__name__ + ".exabgp").logger # prevent exabgp Logger code from adding or removing handlers for # this logger def noop(handler): pass exa_logger.Logger._syslog.addHandler = noop exa_logger.Logger._syslog.removeHandler = noop def patched_format(self, message, source, level, timestamp=None): if self.short: return message return "%-13s %s" % (source, message) exa_logger.Logger._format = patched_format env.log.enable = True if LOG.logger.getEffectiveLevel(): env.log.level = environment.syslog_value( python_logging.getLevelName(LOG.logger.getEffectiveLevel()) ) else: env.log.level = environment.syslog_value('INFO') env.log.all = True env.log.packets = True # monkey patch exabgp to work around exabgp issue #690 # only relevant for exabgp 4.0.2 # ( https://github.com/Exa-Networks/exabgp/issues/690 ) if hasattr(exa_family.Family, 'size'): exa_family.Family.size[(exa_family.AFI.l2vpn, exa_family.SAFI.evpn)] = ((4,), 0) TRANSLATE_EXABGP_STATE = {exa_fsm.FSM.IDLE: bgp_peer_worker.FSM.Idle, exa_fsm.FSM.ACTIVE: bgp_peer_worker.FSM.Active, exa_fsm.FSM.CONNECT: bgp_peer_worker.FSM.Connect, exa_fsm.FSM.OPENSENT: bgp_peer_worker.FSM.OpenSent, exa_fsm.FSM.OPENCONFIRM: bgp_peer_worker.FSM.OpenConfirm, exa_fsm.FSM.ESTABLISHED: bgp_peer_worker.FSM.Established, } class ExaBGPPeerWorker(bgp_peer_worker.BGPPeerWorker, lg.LookingGlassMixin): enabled_families = [(exa.AFI.ipv4, exa.SAFI.mpls_vpn), # (exa.exa.AFI.ipv6, exa.SAFI.mpls_vpn), (exa.AFI.l2vpn, exa.SAFI.evpn), (exa.AFI.ipv4, exa.SAFI.flow_vpn)] def __init__(self, bgp_manager, peer_address): bgp_peer_worker.BGPPeerWorker.__init__(self, bgp_manager, peer_address) self.local_address = cfg.CONF.BGP.local_address self.peer_address = peer_address self.peer = None self.rtc_active = False self._active_families = [] # hooks into BGPPeerWorker state changes def _stop_and_clean(self): super()._stop_and_clean() self._active_families = [] if self.peer is not None: self.log.info("Clearing peer") if self.peer.proto: self.peer.proto.close() self.peer.stop() self.peer = None def _to_established(self): super()._to_established() if self.rtc_active: self.log.debug("RTC active, subscribing to all RTC routes") # subscribe to RTC routes, to be able to propagate them from # internal workers to this peer self._subscribe(exa.AFI.ipv4, exa.SAFI.rtc) else: self.log.debug("RTC inactive, subscribing to all active families") # if we don't use RTC with our peer, then we need to see events for # all routes of all active families, to be able to send them to him for (afi, safi) in self._active_families: self._subscribe(afi, safi) # implementation of BGPPeerWorker abstract methods def _initiate_connection(self): self.log.debug("Initiate ExaBGP connection to %s:%s from %s", self.peer_address, cfg.CONF.BGP.bgp_port, self.local_address) self.rtc_active = False neighbor = exa_neighbor.Neighbor() neighbor.make_rib() neighbor.router_id = exa_open.RouterID(self.local_address) neighbor.local_as = exa.ASN(cfg.CONF.BGP.my_as) # no support for eBGP yet: neighbor.peer_as = exa.ASN(cfg.CONF.BGP.my_as) neighbor.local_address = exa.IP.create(self.local_address) neighbor.md5_ip = exa.IP.create(self.local_address) neighbor.peer_address = exa.IP.create(self.peer_address) neighbor.hold_time = exa_open.HoldTime( bgp_peer_worker.DEFAULT_HOLDTIME) neighbor.connect = cfg.CONF.BGP.bgp_port neighbor.api = collections.defaultdict(list) neighbor.extended_message = False for afi_safi in self.enabled_families: neighbor.add_family(afi_safi) if cfg.CONF.BGP.enable_rtc: # how to test fot this ? neighbor.add_family((exa.AFI.ipv4, exa.SAFI.rtc)) self.log.debug("Instantiate ExaBGP Peer") self.peer = exa_peer.Peer(neighbor, None) try: for action in self.peer._establish(): self.fsm.state = TRANSLATE_EXABGP_STATE[ self.peer.fsm.state] if action == exa_peer.ACTION.LATER: time.sleep(2) elif action == exa_peer.ACTION.NOW: time.sleep(0.1) if self.should_stop: self.log.debug("We're closing, raise StoppedException") raise bgp_peer_worker.StoppedException() if action == exa_peer.ACTION.CLOSE: self.log.debug("Socket status is CLOSE, " "raise InitiateConnectionException") raise bgp_peer_worker.InitiateConnectionException( "Socket is closed") except exa_peer.Interrupted: self.log.debug("Connect was interrupted, " "raise InitiateConnectionException") raise bgp_peer_worker.InitiateConnectionException( "Connect was interrupted") except exa_message.Notify as e: self.log.debug("Notify: %s", e) if (e.code, e.subcode) == (1, 1): raise bgp_peer_worker.OpenWaitTimeout(str(e)) else: raise Exception("Notify received: %s" % e) except exa_reactor.network.error.LostConnection: raise # check the capabilities of the session just established... self.protocol = self.peer.proto received_open = self.protocol.negotiated.received_open self._set_hold_time(self.protocol.negotiated.holdtime) mp_capabilities = received_open.capabilities.get( exa_open.capability.Capability.CODE.MULTIPROTOCOL, []) # check that our peer advertized at least mpls_vpn and evpn # capabilities self._active_families = [] for (afi, safi) in (self.__class__.enabled_families + [(exa.AFI.ipv4, exa.SAFI.rtc)]): if (afi, safi) not in mp_capabilities: if (((afi, safi) != (exa.AFI.ipv4, exa.SAFI.rtc)) or cfg.CONF.BGP.enable_rtc): self.log.warning("Peer does not advertise (%s,%s) " "capability", afi, safi) else: self.log.info( "Family (%s,%s) successfully negotiated with peer %s", afi, safi, self.peer_address) self._active_families.append((afi, safi)) if len(self._active_families) == 0: self.log.error("No family was negotiated for VPN routes") self.rtc_active = False if cfg.CONF.BGP.enable_rtc: if (exa.AFI.ipv4, exa.SAFI.rtc) in mp_capabilities: self.log.info( "RTC successfully enabled with peer %s", self.peer_address) self.rtc_active = True else: self.log.warning( "enable_rtc True but peer not configured for RTC") def _receive_loop_fun(self): try: select.select([self.protocol.connection.io], [], [], 2) if not self.protocol.connection: raise Exception("lost connection") message = next(self.protocol.read_message()) if message.ID != exa_message.NOP.ID: self.log.debug("protocol read message: %s", message) except exa_message.Notification as e: self.log.error("Notification: %s", e) return 2 except exa_reactor.network.error.LostConnection as e: self.log.warning("Lost connection while waiting for message: %s", e) return 2 except TypeError as e: self.log.error("Error while reading BGP message: %s", e) return 2 except Exception as e: self.log.error("Error while reading BGP message: %s", e) raise if message.ID == exa_message.NOP.ID: return 1 if message.ID == exa_message.Update.ID: if self.fsm.state != bgp_peer_worker.FSM.Established: raise Exception("Update received but not in Established state") # more below elif message.ID == exa_message.KeepAlive.ID: self.enqueue(bgp_peer_worker.KEEP_ALIVE_RECEIVED) self.log.debug("Received message: %s", message) else: self.log.warning("Received unexpected message: %s", message) if isinstance(message, exa_message.Update): if message.nlris: for nlri in message.nlris: if nlri.action == exa_message.IN.ANNOUNCED: action = engine.RouteEvent.ADVERTISE elif nlri.action == exa_message.IN.WITHDRAWN: action = engine.RouteEvent.WITHDRAW else: raise Exception("should not be reached (action:%s)", nlri.action) self._process_received_route(action, nlri, message.attributes) return 1 def _process_received_route(self, action, nlri, attributes): self.log.info("Received route: %s, %s", nlri, attributes) route_entry = engine.RouteEntry(nlri, None, attributes) if action == exa_message.IN.ANNOUNCED: self._advertise_route(route_entry) elif action == exa_message.IN.WITHDRAWN: self._withdraw_route(route_entry) else: raise Exception("unsupported action ??? (%s)" % action) # TODO(tmmorin): move RTC code out-of the peer-specific code if (nlri.afi, nlri.safi) == (exa.AFI.ipv4, exa.SAFI.rtc): self.log.info("Received an RTC route") if nlri.rt is None: self.log.info("Received RTC is a wildcard") # the semantic of RTC routes does not distinguish between AFI/SAFIs # if our peer subscribed to a Route Target, it means that we needs # to send him all routes of any AFI/SAFI carrying this RouteTarget. for (afi, safi) in self._active_families: if (afi, safi) != (exa.AFI.ipv4, exa.SAFI.rtc): if action == exa_message.IN.ANNOUNCED: self._subscribe(afi, safi, nlri.rt) elif action == exa_message.IN.WITHDRAWN: self._unsubscribe(afi, safi, nlri.rt) else: raise Exception("unsupported action ??? (%s)" % action) def _send(self, data): # (error if state not the right one for sending updates) self.log.debug("Sending %d bytes on socket to peer %s", len(data), self.peer_address) try: for _unused in self.protocol.connection.writer(data): pass except Exception: self.log.exception("Was not able to send data") def _keep_alive_message_data(self): return exa_message.KeepAlive().message() def _update_for_route_event(self, event): try: r = exa_message.Update([event.route_entry.nlri], event.route_entry.attributes) return b''.join(r.messages(self.protocol.negotiated)) except Exception: self.log.exception("Exception while generating message for " "route %s", r) return b'' # Looking Glass ############### def get_lg_local_info(self, path_prefix): return { "peeringAddresses": {"peer_address": self.peer_address, "local_address": self.local_address}, "as_info": {"local": cfg.CONF.BGP.my_as, "peer": cfg.CONF.BGP.my_as}, "rtc": {"active": self.rtc_active, "enabled": cfg.CONF.BGP.enable_rtc}, "active_families": [repr(f) for f in self._active_families], } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/engine/flowspec.py0000664000175000017500000000237500000000000027261 0ustar00zuulzuul00000000000000# Copyright 2014 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from networking_bagpipe.bagpipe_bgp.engine import exa @exa.NLRI.register(exa.AFI.ipv4, exa.SAFI.flow_vpn, force=True) @exa.NLRI.register(exa.AFI.ipv6, exa.SAFI.flow_vpn, force=True) class Flow(exa.Flow): '''ExaBGP Flow wrapper This wraps an ExaBGP Flow so that __eq__ and __hash__ meet the criteria for RouteTableManager (in particular, not look at actions and nexthop) ''' def __eq__(self, other): return self.pack() == other.pack() def __hash__(self): return hash(self.pack()) def __repr__(self): return str(self) def FlowRouteFactory(afi, rd): flow_route = Flow(afi, safi=exa.SAFI.flow_vpn) flow_route.rd = rd return flow_route ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/engine/ipvpn.py0000664000175000017500000000262200000000000026566 0ustar00zuulzuul00000000000000# Copyright 2014 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from networking_bagpipe.bagpipe_bgp.engine import exa def prefix_to_packed_ip_mask(prefix): ip_string, mask = prefix.split("/") return (exa.IP.pton(ip_string), int(mask)) @exa.NLRI.register(exa.AFI.ipv4, exa.SAFI.mpls_vpn, force=True) @exa.NLRI.register(exa.AFI.ipv6, exa.SAFI.mpls_vpn, force=True) class IPVPN(exa.IPVPN): # two NLRIs with same RD and prefix, but different labels need to # be equal and have the same hash def __eq__(self, other): return self.rd == other.rd and self.cidr == other.cidr def __hash__(self): return hash((self.rd, self.cidr._packed)) def IPVPNRouteFactory(afi, prefix, label, rd, nexthop): packed_prefix, mask = prefix_to_packed_ip_mask(prefix) return IPVPN.new(afi, exa.SAFI.mpls_vpn, packed_prefix, mask, exa.Labels([label], True), rd, nexthop) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/engine/route_table_manager.py0000664000175000017500000005507700000000000031445 0ustar00zuulzuul00000000000000# Copyright 2014 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections from functools import reduce import queue import threading from oslo_log import log as logging from networking_bagpipe.bagpipe_bgp.common import log_decorator from networking_bagpipe.bagpipe_bgp.common import looking_glass as lg from networking_bagpipe.bagpipe_bgp.common import utils from networking_bagpipe.bagpipe_bgp import engine from networking_bagpipe.bagpipe_bgp.engine import bgp_peer_worker from networking_bagpipe.bagpipe_bgp.engine import exa from networking_bagpipe.bagpipe_bgp.engine import worker as worker_m LOG = logging.getLogger(__name__) def cmp(a, b): return (a > b) - (b > a) class Match: def __init__(self, afi, safi, route_target): assert route_target is None or isinstance(route_target, exa.RouteTarget) self.afi = afi self.safi = safi self.route_target = route_target def __hash__(self): # FIXME, could use a tuple, but RT not yet hashable # return hash((self.afi, self.safi, self.route_target)) return hash(str(self)) def __repr__(self): return "match:%s" % str(self) def __str__(self): return "{}/{},{}".format(self.afi or "*", self.safi or "*", self.route_target or "*") def __cmp__(self, other): assert isinstance(other, Match) self_afi = self.afi or engine.Subscription.ANY_AFI self_safi = self.safi or engine.Subscription.ANY_SAFI self_rt = self.route_target or exa.RouteTarget(0, 0) other_afi = other.afi or engine.Subscription.ANY_AFI other_safi = other.safi or engine.Subscription.ANY_SAFI other_rt = other.route_target or exa.RouteTarget(0, 0) val = cmp((self_afi, self_safi, str(self_rt)), (other_afi, other_safi, str(other_rt))) return val def __lt__(self, other): return self.__cmp__(other) == -1 def __eq__(self, other): return self.__cmp__(other) == 0 MATCH_ANY = Match(engine.Subscription.ANY_AFI, engine.Subscription.ANY_SAFI, engine.Subscription.ANY_RT) def matches_for(afi, safi, route_targets): # generate all possible match entries for this afi/safi # and these routetargets, with all possible wildcards # # There are 4*(n+1) possible Match object (for n route_targets) for _afi in (engine.Subscription.ANY_AFI, afi): for _safi in (engine.Subscription.ANY_SAFI, safi): yield Match(_afi, _safi, None) if route_targets is not None: for rt in route_targets: yield Match(_afi, _safi, rt) class WorkersAndEntries: def __init__(self): self.workers = set() self.entries = set() self.n_local_workers = 0 def __repr__(self): return "workers: {}\nentries: {}".format(self.workers, self.entries) def add_worker(self, worker): '''returns True iif first local worker''' self.workers.add(worker) if not isinstance(worker, bgp_peer_worker.BGPPeerWorker): self.n_local_workers += 1 return self.n_local_workers == 1 def del_worker(self, worker): '''returns True iif last local worker''' self.workers.remove(worker) if not isinstance(worker, bgp_peer_worker.BGPPeerWorker): self.n_local_workers -= 1 return self.n_local_workers == 0 def is_empty(self): return (len(self.workers) == 0 and len(self.entries) == 0) STOP_EVENT = "STOP_EVENT" def test_should_dispatch(route_event, target_worker): '''tests if route should be dispatched to a worker returns a (boolean,string) tuple the string contains the reason why the route_event should not be dispatched to target_worker ''' if route_event.source == target_worker: return (False, "not dispatching an update back to its source") elif (isinstance(route_event.source, bgp_peer_worker.BGPPeerWorker) and isinstance(target_worker, bgp_peer_worker.BGPPeerWorker)): return (False, "do not dispatch a route between BGP peers") else: return (True, "") class RouteTableManager(threading.Thread, lg.LookingGlassMixin, utils.ClassReprMixin): """Singleton class dispatching route events between workers Events relates to BGP routes that are announced or withdrawn by workers. Workers subscribe to events by indicating AFI, SAFI, Route Targets in which they are interested. There can be workers responsible for handling services locally (e.g. a VPN instance) and workers that are BGP peers. Though sources of routes are typically Workers, they don't need to be; any class can source a route. """ def __init__(self, first_local_subscriber_cb, last_local_subscriber_cb): threading.Thread.__init__(self, name="RouteTableManager") self.setDaemon(True) # keys are Matches, values are WorkersAndEntries objects: self._match_2_workers_entries = ( collections.defaultdict(WorkersAndEntries)) # workers known to us # name -> worker dict self._workers = {} # keys are (source,nlri) tuples, values are Entry objects: self._source_nlri_2_entry = {} self.first_local_subscriber_callback = first_local_subscriber_cb self.last_local_subscriber_callback = last_local_subscriber_cb self._queue = queue.Queue() @log_decorator.log_info def stop(self): self.enqueue(STOP_EVENT) def run(self): while True: LOG.debug("RouteTableManager waiting on queue") event = self._queue.get() if event == STOP_EVENT: LOG.info("STOP_EVENT => breaking main loop") break else: self._on_event(event) LOG.debug("RouteTableManager queue size: %d", self._queue.qsize()) LOG.info("Out of main loop") @log_decorator.log_info def _on_event(self, event): try: if event.__class__ == engine.RouteEvent: self._receive_route_event(event) elif event.__class__ == engine.Subscription: self._worker_subscribes(event) elif event.__class__ == engine.Unsubscription: self._worker_unsubscribes(event) elif event.__class__ == engine.WorkerCleanupEvent: self._worker_cleanup(event.worker) else: raise Exception("unknown event: %s", event) except Exception: LOG.exception("Exception during processing of event %s", event) def enqueue(self, event): self._queue.put(event) def _check_match_2_workers_and_entries_cleanup(self, match): wa = self._match_2_workers_entries.get(match) if wa is None: LOG.warning("why are we here ?") # nothing to cleanup return else: if wa.is_empty(): del self._match_2_workers_entries[match] def callback_first_local_subscriber(self, sub): if self.first_local_subscriber_callback: LOG.debug("first local subscriber callback for %s ...", sub) self.first_local_subscriber_callback(sub) def callback_last_local_subscriber(self, sub): if self.last_local_subscriber_callback: LOG.debug("last local subscriber callback for %s ...", sub) self.last_local_subscriber_callback(sub) @log_decorator.log_info def _worker_subscribes(self, sub): assert isinstance(sub.worker, worker_m.Worker) worker = sub.worker self._workers[worker.name] = worker match = Match(sub.afi, sub.safi, sub.route_target) wa = self._match_2_workers_entries[match] # update match2worker if wa.add_worker(worker): self.callback_first_local_subscriber(sub) LOG.debug("match2workers: %s", wa.workers) # re-synthesize events for entry in wa.entries: LOG.debug("Found an entry for this match: %s", entry) event = engine.RouteEvent(engine.RouteEvent.ADVERTISE, entry) (dispatch, reason) = test_should_dispatch(event, worker) if dispatch: # check if the entry carries a route_target to which the worker # was already subscribed for rt in entry.route_targets: if Match(entry.afi, entry.safi, rt) in worker._rtm_matches: (dispatch, reason) = ( False, "worker already had a subscription for this route") break if dispatch: LOG.info("Dispatching re-synthesized event for %s", entry) worker.enqueue(event) else: LOG.info("%s => not dispatching re-synthesized event for %s", reason, entry) # update worker matches worker._rtm_matches.add(match) # self._dump_state() def _worker_unsubscribes(self, sub): assert isinstance(sub.worker, worker_m.Worker) worker = sub.worker # self._dump_state() match = Match(sub.afi, sub.safi, sub.route_target) # update worker matches try: worker._rtm_matches.remove(match) except KeyError: LOG.warning("worker %s unsubs' from %s but this match was" " not tracked for this worker (should not happen," " this is a bug)", worker, match) # synthesize withdraw events wa = self._match_2_workers_entries.get(match) if wa: for entry in wa.entries: intersect = set(matches_for(entry.afi, entry.safi, entry.route_targets) ).intersection(worker._rtm_matches) if len(intersect) > 0: LOG.debug("Will not synthesize withdraw event for %s, " "because worker subscribed to %s", entry, intersect) else: LOG.debug("Found an entry for this match: %s", entry) event = engine.RouteEvent(engine.RouteEvent.WITHDRAW, entry) (dispatch, reason) = test_should_dispatch(event, worker) if dispatch: LOG.info("Dispatching re-synthesized event for %s", entry) worker.enqueue(event) else: LOG.info("%s => not dispatching re-synthesized event" " for %s", reason, entry) # update _match_2_workers_entries try: if wa.del_worker(worker): LOG.debug("see if need to callback on last local worker") self.callback_last_local_subscriber(sub) self._check_match_2_workers_and_entries_cleanup(match) except KeyError: LOG.warning("worker %s unsubscribed from %s but was not" " subscribed yet", worker, match) else: # wa is None LOG.warning("worker %s unsubscribed from %s but we had no such" " subscription yet", worker, match) if len(worker._rtm_matches) == 0: self._workers.pop(worker.name, None) # self._dump_state() @log_decorator.log def _propagate_route_event(self, route_event, except_workers=None): '''Propagate route_event to workers subscribed to the route based on subscribed RTs/wildcards, and excluding the workers in except_workers. Returns the list of workers to which the event was propagated. ''' re = route_event.route_entry if except_workers is None: except_workers = [] target_workers = set() for match in matches_for(re.afi, re.safi, re.route_targets): LOG.debug("Finding interested workers for match %s", match) interested_workers = self._match_2_workers_entries[match].workers LOG.debug(" Workers interested in this match: %s", interested_workers) for worker in interested_workers: (dispatch, reason) = test_should_dispatch(route_event, worker) if dispatch: if worker not in except_workers: LOG.debug("Will dispatch event to %s: %s", worker, route_event) target_workers.add(worker) else: LOG.debug("Decided not to dispatch to %s, based on" " except_workers: %s", worker, route_event) else: LOG.debug("Decided not to dispatch to %s: %s (%s)", worker, reason, route_event) for worker in target_workers: LOG.info("Dispatching event to %s: %s", worker, route_event) worker.enqueue(route_event) return target_workers @log_decorator.log_info def _receive_route_event(self, route_event): entry = route_event.route_entry LOG.debug("Try to find an entry from same worker with same nlri") replaced_entry = self._source_nlri_2_entry.get((entry.source, entry.nlri)) LOG.debug(" Result: %s", replaced_entry) # replaced_entry should be non-empty for a withdraw if (replaced_entry is None and route_event.type == engine.RouteEvent.WITHDRAW): LOG.warning("WITHDRAW but found no route that we could remove: %s", route_event.route_entry) return # Propagate events to interested workers... if route_event.type == engine.RouteEvent.ADVERTISE: if replaced_entry == route_event.route_entry: LOG.warning("Ignoring, the route advertized is the same as the" " one previously advertized by the source %s: %s", route_event.source, route_event.route_entry) return # propagate event to interested worker # and include the info on the route are replaced by this # route, if any route_event.set_replaced_route(replaced_entry) workers_already_notified = self._propagate_route_event(route_event) else: # WITHDRAW workers_already_notified = None # Synthesize and dispatch a withdraw event for the route entry that # was withdrawn or replaced, except, in the case of a replaced route, # to workers that had the ADVERTISE event if replaced_entry is not None: LOG.debug("Synthesizing a withdraw event for replaced route %s", replaced_entry) removal_event = engine.RouteEvent(engine.RouteEvent.WITHDRAW, replaced_entry, route_event.source) self._propagate_route_event(removal_event, workers_already_notified) # Update match2entries for the replaced_route for match in matches_for(replaced_entry.afi, replaced_entry.safi, replaced_entry.route_targets): wa = self._match_2_workers_entries.get(match, None) if wa is None: LOG.error("Trying to remove a route from a match, but" " match %s not found - not supposed to happen" " (route: %s)", match, replaced_entry) else: wa.entries.discard(replaced_entry) self._check_match_2_workers_and_entries_cleanup(match) # update the route entries for this source replaced_entry.source._rtm_route_entries.discard(replaced_entry) if route_event.type == engine.RouteEvent.ADVERTISE: # Update match2entries and source2entries for the newly # advertized route for match in matches_for(entry.afi, entry.safi, entry.route_targets): self._match_2_workers_entries[match].entries.add(entry) entry.source._rtm_route_entries.add(entry) # Update _source_nlri_2_entry self._source_nlri_2_entry[(entry.source, entry.nlri)] = entry else: # WITHDRAW # Update source2entries entry.source._rtm_route_entries.discard(entry) # Update _source_nlri_2_entry try: del self._source_nlri_2_entry[(entry.source, entry.nlri)] except KeyError: LOG.error("BUG: withdraw, but nothing could be removed in " "_source_nlri_2_entry") # self._dump_state() @log_decorator.log_info def _worker_cleanup(self, worker): '''Cleanup the subscriptions and routes advertised by a worker Consider all routes announced by this worker as withdrawn. Consider this worker unsubscribed from all of its current subscriptions. ''' assert isinstance(worker, worker_m.Worker) # synthesize withdraw events for all routes from this worker LOG.info(" Preparing to withdraw %d routes that were advertised " "by worker", len(worker._rtm_route_entries)) for entry in worker._rtm_route_entries: LOG.info(" Enqueue event to Withdraw route %s", entry) self.enqueue(engine.RouteEvent(engine.RouteEvent.WITHDRAW, entry)) # remove worker from all of its subscriptions for match in worker._rtm_matches: wa = self._match_2_workers_entries[match] if wa.del_worker(worker): self.callback_last_local_subscriber( engine.Subscription(match.afi, match.safi, match.route_target, worker)) self._check_match_2_workers_and_entries_cleanup(match) worker._rtm_matches.clear() # self._dump_state() def _dump_state(self): if not LOG.isEnabledFor(logging.DEBUG): return dump = [] dump.append("~~~ Worker -> Matches ~~~") for worker in self._workers.values(): dump.append(" %s" % worker) matches = list(worker._rtm_matches) matches.sort() for match in matches: dump.append(" %s" % match) match_2_worker_dump = [] match_2_entries_dump = [] matches = list(self._match_2_workers_entries.keys()) matches.sort() for match in matches: match_2_worker_dump.append(" %s" % match) match_2_entries_dump.append(" %s" % match) wa = self._match_2_workers_entries.get(match) if wa: for worker in wa.workers: match_2_worker_dump.append(" %s" % worker) for re in wa.entries: match_2_entries_dump.append(" %s" % re) dump.append("\n~~~ Match -> Workers ~~~\n%s\n" % "\n".join(match_2_worker_dump)) dump.append("~~~ Match -> Entries ~~~\n%s\n" % "\n".join(match_2_entries_dump)) dump.append("~~~ (source,nlri) -> entries ~~~") for ((source, nlri), entry) in self._source_nlri_2_entry.items(): dump.append(" ({}, {}): {}".format(source, nlri, entry)) LOG.debug("RouteTableManager data dump:\n\n%s\n", "\n".join(dump)) # Looking Glass ##### def get_lg_map(self): return {"workers": (lg.COLLECTION, (self.get_lg_worker_list, self.get_lg_worker_from_path_item) ), "routes": (lg.SUBTREE, self.get_lg_routes)} def get_lg_routes(self, path_prefix): result = {} match_IPVPN = Match(exa.AFI.ipv4, exa.SAFI.mpls_vpn, engine.Subscription.ANY_RT) match_EVPN = Match(exa.AFI.l2vpn, exa.SAFI.evpn, engine.Subscription.ANY_RT) match_RTC = Match(exa.AFI.ipv4, exa.SAFI.rtc, engine.Subscription.ANY_RT) match_FlowSpecVPN = Match(exa.AFI.ipv4, exa.SAFI.flow_vpn, engine.Subscription.ANY_RT) for match in [match_IPVPN, match_EVPN, match_RTC, match_FlowSpecVPN]: match_result = [] wa = self._match_2_workers_entries.get(match) if wa is not None: for entry in wa.entries: match_result.append( entry.get_looking_glass_info(path_prefix)) result[str(match)] = match_result return result def get_lg_worker_list(self): return [{"id": worker.name} for worker in self._workers.values()] def get_lg_worker_from_path_item(self, path_item): return self._workers.get(path_item, None) def get_all_routes_but_rtc(self): return [re for re in self._match_2_workers_entries[MATCH_ANY].entries if (re.afi, re.safi) != (exa.AFI.ipv4, exa.SAFI.rtc) ] def get_local_routes_count(self): return reduce( lambda count, entry: count + (not isinstance(entry.source, bgp_peer_worker.BGPPeerWorker)), self.get_all_routes_but_rtc(), 0) def get_received_routes_count(self): return reduce( lambda count, entry: count + isinstance(entry.source, bgp_peer_worker.BGPPeerWorker), self.get_all_routes_but_rtc(), 0) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/engine/tracker_worker.py0000664000175000017500000005061300000000000030461 0ustar00zuulzuul00000000000000# Copyright 2014 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import socket from oslo_log import log as logging from networking_bagpipe.bagpipe_bgp.common import log_decorator from networking_bagpipe.bagpipe_bgp.common import looking_glass as lg from networking_bagpipe.bagpipe_bgp.common import utils from networking_bagpipe.bagpipe_bgp import engine from networking_bagpipe.bagpipe_bgp.engine import exa from networking_bagpipe.bagpipe_bgp.engine import worker # Explanations on FilteredRoute # We need to call self.best_route_removed for a route that was # implicitly withdrawn, except if, in best_routes, there is # a route that is the same with reference to what the # best_route_removed callback will do: # - if there is a route with same nexthop, same encap, same label # (other attributes make the route different) # => no call to best_route_removed # - if there is a route with same nexthop, but a different label or # a different encap # => need to call best_route_removed # # Similarly, when multiple best routes exist for an entry # (ECMP case), we don't want to call new_best_route multiple times # uselessly for instance for two routes that would be 'same' wrt # the actions done by the new_best_route callback. So we will do a # call to self.new_best_route... *only* if the # new_route is different from all current best routes. # # FilteredRoute is the class we use to identify a route # that would be the 'same', based on the above. For instance # FilteredRoute removes information on the .source and # only BGP attributes that are used by callbacks. keep_attributes_default = [exa.Attribute.CODE.NEXT_HOP, exa.Attribute.CODE.PMSI_TUNNEL, exa.Attribute.CODE.MED, exa.Attribute.CODE.EXTENDED_COMMUNITY, exa.Attribute.CODE.LOCAL_PREF] class FilteredRoute(engine.RouteEntry): def __init__(self, re, keep_attributes=None): if keep_attributes is None: keep_attributes = keep_attributes_default attributes = exa.Attributes() for (attribute_id, attribute) in re.attributes.items(): if attribute_id in keep_attributes: attributes.add(attribute) super().__init__(re.nlri, None, attributes) def equivalent_route_in_routes(function, route, routes): reference = function(route) for r in routes: if function(r) == reference: return True return False # a compare_route callback has the following signature: # # def compare_route(self, route_a, route_b): # """ # should return: # - an int>0 if route_a is better than route_b # - an int<0 if route_b is better than route_a # - else 0 # """ # TODO(tmorin): both comparison should MAC Mobility if present def compare_ecmp(worker, route_a, route_b): # makes a comparison based on LOCAL_PREF # if route has no LOCAL_PREF, use default value (100) as defined in exabgp l_a = route_a.attributes.get(exa.Attribute.CODE.LOCAL_PREF) l_a = l_a.localpref if l_a else 100 l_b = route_b.attributes.get(exa.Attribute.CODE.LOCAL_PREF) l_b = l_b.localpref if l_b else 100 return (l_a > l_b) - (l_b > l_a) def compare_no_ecmp(worker, route_a, route_b): '''comparison operator without ECMP This compares the two routes in a consistent fashion, but two routes will never be considered of equal cost. The comparison is 'salted' so that two distinct VRFs (e.g. on two distinct bagpipe-bgp instances will not necessarily elect the same route as the best one). ''' worker.log.trace("compare_no_ecmp used") base_comparison = compare_ecmp(worker, route_a, route_b) if base_comparison: return base_comparison salt = socket.gethostname() + worker.name hash_a = hash(salt + repr(route_a)) hash_b = hash(salt + repr(route_b)) cmp_hashes = (hash_a > hash_b) - (hash_b > hash_a) if cmp_hashes: return cmp_hashes return (route_a > route_b) - (route_b > route_a) class TrackerWorker(worker.Worker, lg.LookingGlassLocalLogger, metaclass=abc.ABCMeta): def __init__(self, bgp_manager, worker_name, compare_routes=compare_no_ecmp): worker.Worker.__init__(self, bgp_manager, worker_name) lg.LookingGlassLocalLogger.__init__(self) # dict: entry -> list of routes: self.tracked_entry_2_routes = dict() # dict: entry -> set of best_routes: self.tracked_entry_2_best_routes = dict() self._compare_routes = compare_routes @log_decorator.log def _on_event(self, event): new_route = event.route_entry filtered_new_route = FilteredRoute(new_route) entry = self.route_to_tracked_entry(new_route) if entry is None: self.log.debug("Route not mapped to a tracked entry, ignoring: %s", new_route) return self.log.trace("tracked_entry for this route: %s (type: %s)", TrackerWorker._display_entry(entry), type(entry)) self._dump_state() all_routes = self.tracked_entry_2_routes.setdefault(entry, []) self.log.debug("We currently have %d route%s for this entry", len(all_routes), utils.plural(all_routes)) if event.type == engine.RouteEvent.ADVERTISE: withdrawn_best_routes = [] best_routes = self.tracked_entry_2_best_routes.get(entry) if best_routes is None: self.log.trace("We had no route for this entry (%s)") self.tracked_entry_2_best_routes[entry] = {new_route} best_routes = set() self._call_new_best_route(entry, filtered_new_route) else: if event.replaced_route is not None: self.log.trace("Will remove replaced route from all_routes" " and best_routes: %s", event.replaced_route) try: all_routes.remove(event.replaced_route) except ValueError: # we did not have any route for this entry self.log.error("replaced_route is an entry for which " "we had no route ??? (bug ?)") if event.replaced_route in best_routes: self.log.trace( "Removing replaced_route from best_routes") best_routes.remove(event.replaced_route) withdrawn_best_routes.append(event.replaced_route) else: self.log.trace("replaced_route is not in best_routes") self.log.trace("best_routes: %s", best_routes) else: self.log.trace("No replaced route to remove") call_new_best_route_4_all = False if len(best_routes) == 0: self.log.trace("All best routes have been replaced") self._recompute_best_routes(all_routes, best_routes) if best_routes: current_best = next(iter(best_routes)) self.log.trace("We'll need to call new_best_route for " "all our new best routes") call_new_best_route_4_all = True else: current_best = None call_new_best_route_4_all = False else: # (if there is more than one route in the best routes, we # take the first one) current_best = next(iter(best_routes)) self.log.trace("Current best route: %s", current_best) if new_route == current_best: self.log.info("New route is a route we already had, " "nothing to do.") # nothing to do return # let's find if we need to update our best routes if current_best: route_comparison = self._compare_routes(self, new_route, current_best) else: route_comparison = 1 self.log.trace("route_comparison: %d", route_comparison) if route_comparison > 0: # new_route is a strictly better route than any current # one, discard all the current best routes self.log.trace("Replacing all best routes with new one") withdrawn_best_routes.extend(best_routes.copy()) best_routes.clear() best_routes.add(new_route) self._call_new_best_route(entry, filtered_new_route) call_new_best_route_4_all = False elif route_comparison == 0: # new_route is as good as the current ones self.log.trace("Adding new_route to best_routes...") if call_new_best_route_4_all: self._call_new_best_route_for_routes(entry, best_routes) if not equivalent_route_in_routes(FilteredRoute, new_route, best_routes): best_routes.add(new_route) self.log.trace("Calling self.new_best_route since we " "yet had no such route in best routes") self._call_new_best_route(entry, filtered_new_route) else: best_routes.add(new_route) self.log.trace("Not calling new_best_route since we " "had received a similar route already") else: self.log.trace("The route is no better than current " "best ones") if call_new_best_route_4_all: self._call_new_best_route_for_routes(entry, best_routes) self.log.trace("Considering implicitly withdrawn best routes") for r in withdrawn_best_routes: self._selective_best_route_removed(entry, r, best_routes, False) # add the route to the list of routes for this entry self.log.trace("Adding route to all_routes for this entry") all_routes.append(new_route) else: # RouteEvent.WITHDRAW withdrawn_route = new_route self.log.trace("Removing route from all_routes for this entry") # let's update known routes for this entry try: all_routes.remove(withdrawn_route) except ValueError: # we did not have any route for this entry self.log.error("Withdraw received for an entry for which we " "had no route ??? (not supposed to happen)") # let's now update best routes best_routes = self.tracked_entry_2_best_routes.get(entry) if best_routes is None: # we did not have any route for this entry self.log.error("Withdraw received for an entry for which we " "had no route: not supposed to happen!") return if withdrawn_route in best_routes: self.log.trace("The event received is about a route which " "is among the best routes for this entry") # remove the route from best_routes best_routes.remove(withdrawn_route) withdrawn_route_is_last = False if len(best_routes) == 0: # we don't have any best route left... self._recompute_best_routes(all_routes, best_routes) if len(best_routes) > 0: self._call_new_best_route_for_routes(entry, best_routes) else: self.log.trace("Cleanup all_routes and best_routes") withdrawn_route_is_last = True del self.tracked_entry_2_best_routes[entry] del self.tracked_entry_2_routes[entry] self._selective_best_route_removed(entry, withdrawn_route, best_routes, withdrawn_route_is_last) else: self.log.trace("The event received is not related to any " "of the best routes for this entry") # no need to update our best route list pass self.log.info("We now have %d route%s for this entry.", len(all_routes), utils.plural(all_routes)) self._dump_state() def _recompute_best_routes(self, all_routes, best_routes): '''internal method to update best routes update best_routes to contain the best routes from all_routes, based on _compare_routes ''' new_best_routes = [] for route in all_routes: if len(new_best_routes) == 0: new_best_routes = [route] continue comparison = self._compare_routes(self, route, new_best_routes[0]) if comparison > 0: new_best_routes = [route] elif comparison == 0: new_best_routes.append(route) best_routes.clear() best_routes.update(new_best_routes) self.log.trace("Recomputed new best routes: %s", best_routes) @log_decorator.log def _selective_best_route_removed(self, entry, withdrawn_route, filtered_best_routes, withdrawn_route_is_last): if not equivalent_route_in_routes(FilteredRoute, withdrawn_route, filtered_best_routes): self._call_best_route_removed(entry, FilteredRoute(withdrawn_route), withdrawn_route_is_last) else: self.log.trace("No need to call best_route_removed: %s", FilteredRoute(withdrawn_route)) def equivalent_route_in_best_routes(self, route, function): # This method checks if there is in the best routes for the same # tracked entry as 'route', at least one route r for which function(r) # is equal to function(route) return equivalent_route_in_routes( function, route, self.tracked_entry_2_best_routes.get( self.route_to_tracked_entry(route), [] ) ) def synthesize_withdraw_all(self, afi, safi): for tracked_entry, routes in list(self.tracked_entry_2_routes.items()): self.log.trace("Synthesizing withdraws for all routes of %s with " "AFI(%s)/SAFI(%s)", tracked_entry, afi, safi) for route in routes: if (route.nlri.afi, route.nlri.safi) == (afi, safi): self._on_event( engine.RouteEvent(engine.RouteEvent.WITHDRAW, route) ) @log_decorator.log def _call_new_best_route_for_routes(self, entry, routes): routes_no_dups = {FilteredRoute(r) for r in routes} self.log.debug(" After filtering duplicates: %s", routes_no_dups) for route in routes_no_dups: self._call_new_best_route(entry, route) @log_decorator.log def _call_new_best_route(self, entry, new_route): try: self.new_best_route(entry, new_route) except Exception as e: self.log.error("Exception in .new_best_route: %s", e) if self.log.isEnabledFor(logging.WARNING): self.log.exception("") @log_decorator.log def _call_best_route_removed(self, entry, old_route, last): try: self.best_route_removed(entry, old_route, last) except Exception as e: self.log.error("Exception in .best_route_removed: %s", e) if self.log.isEnabledFor(logging.WARNING): self.log.exception("") # Callbacks for subclasses ######################## @abc.abstractmethod def route_to_tracked_entry(self, route): """Hook to control mapping from a route to a tracked entry This method is how the subclass maps a BGP route into an object that the TrackerWorker code will track. For instance, a VPN VRF is expected to keep track of IP prefixes; hence the route2tracked_entry code for a VRF could return the IP prefix in the VPNv4 route. The result will be that the TrackerWorker code will keep track, for a each prefix, of all the routes and of the best routes. """ pass @abc.abstractmethod def new_best_route(self, entry, new_route): '''New Best Route hook A new best route has been advertized for this tracked entry ''' pass @abc.abstractmethod def best_route_removed(self, entry, old_route, last): '''Best Route removed hook A route that was a best route for this tracked entry has been removed. The 'last' flag indicates if this was the last route for this tracked entry. ''' pass # Debug support methods ######### def _dump_state(self): if self.log.isEnabledFor(logging.TRACE): self.log.trace("--- tracked_entry_2_routes ---") for (entry, routes) in self.tracked_entry_2_routes.items(): self.log.trace( " Entry: %s", TrackerWorker._display_entry(entry)) for route in routes: self.log.trace(" Route: %s", route) self.log.trace("--- tracked_entry_2_best_routes ---") for (entry, best_routes) in \ self.tracked_entry_2_best_routes.items(): self.log.trace(" Entry: %s", TrackerWorker._display_entry(entry)) for route in best_routes: self.log.trace(" Route: %s", route) self.log.trace("--- ---") @staticmethod def _display_entry(entry): if (isinstance(entry, tuple) and len(entry) > 0 and isinstance(entry[0], type)): return repr(tuple([entry[0].__name__] + list(entry[1:]))) else: return repr(entry) # Looking glass ########### def get_lg_map(self): return {"received_routes": (lg.SUBTREE, self.get_lg_all_routes), "best_routes": (lg.SUBTREE, self.get_lg_best_routes)} def get_lg_all_routes(self, path_prefix): return self._get_lg_routes(path_prefix, self.tracked_entry_2_routes) def get_lg_best_routes(self, path_prefix): return self._get_lg_routes(path_prefix, self.tracked_entry_2_best_routes) def _get_lg_routes(self, path_prefix, route_dict): routes = {} for entry in route_dict.keys(): entry_repr = self._display_entry(entry) routes[entry_repr] = [route.get_looking_glass_info(path_prefix) for route in route_dict[entry]] return routes ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/engine/worker.py0000664000175000017500000001114100000000000026737 0ustar00zuulzuul00000000000000# Copyright 2014 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import queue import threading from oslo_log import log as logging from networking_bagpipe.bagpipe_bgp.common import looking_glass as lg from networking_bagpipe.bagpipe_bgp import engine LOG = logging.getLogger(__name__) STOP_EVENT = "STOP_EVENT" class Worker(engine.EventSource, lg.LookingGlassMixin): """Base class for objects that interact with the route table manager These objects will: * use _subscribe(...) and _unsubscribe(...) to subscribe to routing events * will specialize _on_event(event) to react to received events They also inherit from EventSource to publish events """ def __init__(self, bgp_manager, worker_name): self.bgp_manager = bgp_manager self.rtm = bgp_manager.rtm self._queue = queue.Queue() self._please_stop = threading.Event() self.name = worker_name assert self.name is not None engine.EventSource.__init__(self, self.rtm) # private data for RouteTableManager self._rtm_matches = set() self._was_stopped = False LOG.debug("Instantiated %s worker", self.name) def stop(self): """Stop this worker. Set the _please_stop internal event to stop the event processor loop and indicate to the route table manager that this worker is stopped. Then call _stopped() to let a subclass implement any further work. """ if self._was_stopped: LOG.debug("not running, nothing to do to stop") return LOG.info("Stop worker %s", self) self.stop_event_loop() self._cleanup() self._stopped() self._was_stopped = True def stop_event_loop(self): self._please_stop.set() self.enqueue(STOP_EVENT) def _cleanup(self): self.rtm.enqueue(engine.WorkerCleanupEvent(self)) def _stopped(self): """Hook for subclasses to react when Worker is stopped (NoOp in base Worker class) """ def _event_queue_processor_loop(self): """Main loop where the worker consumes events.""" while not self._please_stop.isSet(): event = self._dequeue() if event == STOP_EVENT: LOG.debug("Stop event, breaking queue processor loop") self._please_stop.set() break try: self._on_event(event) except Exception: LOG.exception("Exception raised on subclass._on_event: %s", event) def run(self): self._event_queue_processor_loop() def _on_event(self, event): """Method implemented by subclasses to react to routing events.""" LOG.debug("Worker %s _on_event: %s", self.name, event) raise NotImplementedError def _dequeue(self): return self._queue.get() def enqueue(self, event): # TODO(tmmorin): replace Queue by a PriorityQueue and use a higher # priority for ReInit event self._queue.put(event) def _subscribe(self, afi, safi, rt=None): try: subobj = engine.Subscription(afi, safi, rt, self) LOG.info("Subscribe: %s ", subobj) self.rtm.enqueue(subobj) except engine.UnsupportedRT as e: LOG.debug("unsupported RT, ignoring (%s)", e.rt) def _unsubscribe(self, afi, safi, rt=None): try: subobj = engine.Unsubscription(afi, safi, rt, self) LOG.info("Unsubscribe: %s ", subobj) self.rtm.enqueue(subobj) except engine.UnsupportedRT as e: LOG.debug("unsupported RT, ignoring (%s)", e.rt) def get_subscriptions(self): return sorted(self._rtm_matches) def __repr__(self): return "%s" % (self.name) # Looking glass ### def get_lg_local_info(self, path_prefix): return { "name": self.name, "internals": { "event queue length": self._queue.qsize(), "subscriptions": [repr(sub) for sub in self.get_subscriptions()], } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/fakerr.py0000664000175000017500000001064500000000000025443 0ustar00zuulzuul00000000000000# Copyright 2014 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fake BGP Route Reflector It is a dumb program connecting two TCP sockets together. It will buffer what is sent by the first TCP client, and send it to the second one when it arrives. It can be used to act as a super crude route reflector between two bagpipe-bgp instances. But it won't support more than 2 ! """ import threading # fakerr is provided as a helper for manuals tests, but we don't want to draw # twisted as a dependency for the whole package so it is not installed in # test venvs, so we need that to keep pylint happy: # pylint: disable=import-error from twisted.application import internet from twisted.application import service from twisted.internet import endpoints from twisted.internet import protocol from twisted.internet import reactor from twisted.protocols import basic # pylint: disable=no-member class FakeRR(basic.LineReceiver): delimiter = chr(255) * 16 def __init__(self): self.lock = threading.Lock() def connectionMade(self): with self.lock: if self.factory.clients == 2: print("Have already 2 peers, not accepting %s, " "reseting everyone !" % self.transport.getPeer().host) for client in self.factory.clients: client.transport.loseConnection() raise Exception("Have already 2 peers, not accepting more !" " (%s)" % self.transport.getPeer().host) print("Got new peer: %s" % self.transport.getPeer().host) self.factory.clients.append(self) if len(self.factory.clients) == 2: # we are the second client print("%s is second peer, sending buffered data..." % self.transport.getPeer().host) for olddata in self.factory.buffer: print(" sending buffered data to peer %s (%d bytes)" % (self.transport.getPeer().host, len(olddata))) self.transport.write(olddata) self.factory.buffer = [] self.factory.ready = True print("now ready") else: print("%s is first peer, will buffer data until second peer " "arrives..." % self.transport.getPeer().host) self.factory.ready = False self.factory.buffer = [] def connectionLost(self, reason=protocol.Protocol.connectionLost): print("Lost peer %s" % self.transport.getPeer().host) try: self.factory.clients.remove(self) except Exception: pass for c in self.factory.clients: if c != self: c.transport.loseConnection() try: self.factory.clients.remove(c) except Exception: pass self.factory.ready = False self.factory.buffer = [] def dataReceived(self, data): if self.factory.ready: for c in self.factory.clients: if c != self: c.transport.write(data) else: print("buffering received data (%d bytes)" % len(data)) if self.factory.buffer is None: print("??? not ready, but no self.factory.buffer...??") self.factory.buffer = [] self.factory.buffer.append(data) factory = protocol.ServerFactory() factory.protocol = FakeRR factory.clients = [] factory.ready = False factory.buffer = [] # this application definition allows the use with twisted: # # echo "from networking_bagpipe.bagpipe_bgp.fakerr import application" \ # | twistd -y /dev/stdin application = service.Application("fakerr") internet.TCPServer(179, factory).setServiceParent(application) def main(): endpoints.serverFromString(reactor, "tcp:179").listen(factory) reactor.run() if __name__ == '__main__': main() __all__ = ['main'] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9223058 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/vpn/0000775000175000017500000000000000000000000024414 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/vpn/__init__.py0000664000175000017500000000000000000000000026513 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/vpn/dataplane_drivers.py0000664000175000017500000003271100000000000030461 0ustar00zuulzuul00000000000000# Copyright 2014 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc from oslo_config import cfg from oslo_log import log as logging from oslo_utils import versionutils import stevedore from networking_bagpipe.bagpipe_bgp.common import config from networking_bagpipe.bagpipe_bgp.common import exceptions as exc from networking_bagpipe.bagpipe_bgp.common import log_decorator from networking_bagpipe.bagpipe_bgp.common import looking_glass as lg from networking_bagpipe.bagpipe_bgp.common import run_command from networking_bagpipe.bagpipe_bgp.common import utils from networking_bagpipe.bagpipe_bgp import constants from networking_bagpipe.bagpipe_bgp.engine import exa LOG = logging.getLogger(__name__) # NOTE(tmorin): have dataplane_local_address default to # cfg.CONF.BGP.local_address does not work (import order issue) # TODO(tmorin): list possible values for dataplane_driver, # see what neutron-db-manage does dataplane_common_opts = [ cfg.Opt("dataplane_local_address", type=config.InterfaceAddress(), help=("IP address to use as next-hop in our route " "advertisements, will be used to send us " "VPN traffic")), cfg.StrOpt("dataplane_driver", default="dummy", help="Dataplane driver.") ] for vpn_type in constants.VPN_TYPES: cfg.CONF.register_opts(dataplane_common_opts, constants.config_group(vpn_type)) def register_driver_opts(vpn_type, driver_opts): cfg.CONF.register_opts(driver_opts, constants.config_group(vpn_type)) # prefix for setuptools entry points for dataplane drivers DATAPLANE_DRIVER_ENTRY_POINT_PFX = "bagpipe.dataplane" def instantiate_dataplane_drivers(): LOG.debug("Building dataplane drivers...") if 'DATAPLANE_DRIVER' in cfg.CONF: LOG.warning("Config file is obsolete, should have a " "DATAPLANE_DRIVER_IPVPN section instead of" " DATAPLANE_DRIVER") drivers = {} for vpn_type in constants.VPN_TYPES: dp_config = cfg.CONF.get(constants.config_group(vpn_type)) driver_name = dp_config.dataplane_driver LOG.debug("Instantiating dataplane driver for %s, with %s", vpn_type, driver_name) try: driver_class = stevedore.driver.DriverManager( namespace='{}.{}'.format(DATAPLANE_DRIVER_ENTRY_POINT_PFX, vpn_type), name=driver_name, ).driver drivers[vpn_type] = driver_class() except Exception: LOG.exception("Error while instantiating dataplane driver for " "%s with %s", vpn_type, driver_name) raise return drivers class DataplaneDriver(lg.LookingGlassLocalLogger, utils.ClassReprMixin, metaclass=abc.ABCMeta): '''Dataplane driver The initialisation workflow is the following: - on startup, the selected dataplane driver is loaded (its __init__ is called) - on the first attachment of an interface that matches the driver's type the following happens: * the driver reset_state() method is called * the driver initialize() method is called The cleanup workflow (what happens when bagpipe-bgp-cleanup is called) is the following: - the dataplane driver is loaded (its __init__ is called) - reset_state() method is called As a consequence, whether some init action should go in __init__ or initialize depends on what reset_state does: - __init__ should not do things that reset_state will revert - initialize can do things that reset_state will revert ''' type = None dataplane_instance_class = object # has to be overridden by subclasses encaps = [exa.Encapsulation(exa.Encapsulation.Type.DEFAULT), exa.Encapsulation(exa.Encapsulation.Type.MPLS)] ecmp_support = False required_kernel = None driver_opts = [] @log_decorator.log def __init__(self): lg.LookingGlassLocalLogger.__init__(self) cfg.CONF.register_opts(self.driver_opts, constants.config_group(self.type)) self.config = cfg.CONF.get(constants.config_group(self.type)) assert issubclass(self.dataplane_instance_class, VPNInstanceDataplane) self.local_address = self.config.get("dataplane_local_address") if self.local_address is None: self.local_address = cfg.CONF.BGP.local_address self.log.info("Will use %s as local_address", self.local_address) # Linux kernel version check o = self._run_command("uname -r") self.kernel_release = o[0][0].split("-")[0] if self.required_kernel: kernel_release_version = versionutils.convert_version_to_tuple( self.kernel_release) required_kernel_version = versionutils.convert_version_to_tuple( self.required_kernel) if kernel_release_version < required_kernel_version: self.log.warning("%s requires at least Linux kernel %s" " (you are running %s)", self.__class__.__name__, self.required_kernel, self.kernel_release) # Flag to trigger cleanup all dataplane states on first call to # vif_plugged self.first_init = True @abc.abstractmethod def reset_state(self): '''dataplane cleanup hook (abstract) This method is called: - right before the work on the first attachment begins (initialize() is called afterwards) - by bagpipe-bgp-cleanup ''' pass def initialize(self): '''dataplane initialization hook This is called after reset_state (which, e.g. cleans up the stuff possibly left-out by a previous run). Things that are reverted by reset_state() should go here. ''' pass @log_decorator.log_info def initialize_dataplane_instance(self, instance_id, external_instance_id, gateway_ip, mask, instance_label, **kwargs): '''per-VPN dataplane instanciation returns a VPNInstanceDataplane subclass after calling reset_state on the dataplane driver, if this is the first call to initialize_dataplane_instance ''' if self.first_init: self.log.info("First VPN instance init, reinitializing dataplane" " state") try: self.reset_state() except Exception as e: self.log.error("Exception while resetting state: %s", e) try: self.initialize() except Exception as e: self.log.error("Exception while initializing dataplane" " state: %s", e) raise self.first_init = False else: self.log.debug("(not reinitializing dataplane state)") return self.dataplane_instance_class(self, instance_id, external_instance_id, gateway_ip, mask, instance_label, **kwargs) def get_local_address(self): return self.local_address def supported_encaps(self): return self.__class__.encaps def needs_cleanup_assist(self): '''Control per-route cleanup events A dataplane driver not able to cleanup all the states for a given VPN instance can return True here to receive artifical dataplane removal calls, such as remove_dataplane_for_remote_endpoint, for each state previously setup ''' return False def validate_directions(self, direction): # by default, assume a driver only supports plugging in both directions # if a driver does not support forwarding only the traffic to the port, # (and hence omit forwarding traffic from the port), it should raise an # exception if directions is TO_PORT # if a driver does not support forwarding only the traffic from the # port (and hence omit forwarding traffic to the port), it should raise # an exception if directions is FROM_PORT if (direction is not None) and (direction != constants.BOTH): self.log.warning("Unsupported direction: %s", direction) raise exc.APIException("Unsupported direction: %s" % direction) def _run_command(self, command, run_as_root=False, *args, **kwargs): return run_command.run_command(self.log, command, run_as_root, *args, **kwargs) def get_lg_map(self): encaps = [] for encap in self.supported_encaps(): encaps.append(repr(encap)) return { "name": (lg.VALUE, self.__class__.__name__), "local_address": (lg.VALUE, self.local_address), "supported_encaps": (lg.VALUE, encaps), "config": (lg.VALUE, utils.osloconfig_json_serialize(self.config)), "kernel_release": (lg.VALUE, self.kernel_release) } class VPNInstanceDataplane(lg.LookingGlassLocalLogger, metaclass=abc.ABCMeta): @log_decorator.log_info def __init__(self, dataplane_driver, instance_id, external_instance_id, gateway_ip, network_plen, instance_label=None, **kwargs): lg.LookingGlassLocalLogger.__init__(self, repr(instance_id)) self.driver = dataplane_driver self.config = dataplane_driver.config self.instance_id = instance_id self.external_instance_id = external_instance_id self.gateway_ip = gateway_ip self.network_plen = network_plen self.instance_label = instance_label @abc.abstractmethod def cleanup(self): pass @abc.abstractmethod def vif_plugged(self, mac_address, ip_address_prefix, localport, label, direction): pass @abc.abstractmethod def vif_unplugged(self, mac_address, ip_address_prefix, localport, label, direction, last_endpoint=True): pass def update_fallback(self, fallback): if fallback is not None: self.log.warning("fallback specified (%s) but not supported by " "driver, ignoring", fallback) @abc.abstractmethod def setup_dataplane_for_remote_endpoint(self, prefix, remote_pe, label, nlri, encaps, lb_consistent_hash_order=0): pass @abc.abstractmethod def remove_dataplane_for_remote_endpoint(self, prefix, remote_pe, label, nlri, encaps, lb_consistent_hash_order=0): pass def needs_cleanup_assist(self): '''Control per-route cleanup events A dataplane driver not able to cleanup all the states for a given VPN instance can return True here to receive artifical dataplane removal calls, such as remove_dataplane_for_remote_endpoint, for each state previously setup ''' return self.driver.needs_cleanup_assist() def _run_command(self, command, run_as_root=False, *args, **kwargs): return run_command.run_command(self.log, command, run_as_root, *args, **kwargs) def __repr__(self): return "%s<%d>" % (self.__class__.__name__, self.instance_id) # Looking glass info #### def get_lg_local_info(self, path_prefix): driver = {"id": self.driver.type, "href": lg.get_absolute_path( "DATAPLANE_DRIVERS", path_prefix, [self.driver.type])} return { "driver": driver, } class DummyVPNInstanceDataplane(VPNInstanceDataplane): @log_decorator.log def __init__(self, *args, **kwargs): VPNInstanceDataplane.__init__(self, *args) @log_decorator.log def vif_plugged(self, *args, **kwargs): pass @log_decorator.log def vif_unplugged(self, *args, **kwargs): pass @log_decorator.log def update_fallback(self, *args, **kwargs): pass @log_decorator.log def setup_dataplane_for_remote_endpoint(self, *args, **kwargs): pass @log_decorator.log def remove_dataplane_for_remote_endpoint(self, *args, **kwargs): pass @log_decorator.log def cleanup(self, *args, **kwargs): pass class DummyDataplaneDriver(DataplaneDriver): dataplane_instance_class = DummyVPNInstanceDataplane def __init__(self, *args): DataplaneDriver.__init__(self, *args) self.log.warning("Dummy dataplane driver, won't do anything useful") @log_decorator.log_info def initialize(self, *args, **kwargs): pass @log_decorator.log_info def reset_state(self, *args, **kwargs): pass ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9223058 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/vpn/evpn/0000775000175000017500000000000000000000000025364 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/vpn/evpn/__init__.py0000664000175000017500000002774300000000000027512 0ustar00zuulzuul00000000000000# Copyright 2014 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc from networking_bagpipe.bagpipe_bgp.common import log_decorator from networking_bagpipe.bagpipe_bgp.common import looking_glass as lg from networking_bagpipe.bagpipe_bgp.common import utils from networking_bagpipe.bagpipe_bgp import constants from networking_bagpipe.bagpipe_bgp import engine from networking_bagpipe.bagpipe_bgp.engine import exa from networking_bagpipe.bagpipe_bgp.vpn import dataplane_drivers as dp_drivers from networking_bagpipe.bagpipe_bgp.vpn import vpn_instance class VPNInstanceDataplane(dp_drivers.VPNInstanceDataplane, metaclass=abc.ABCMeta): @abc.abstractmethod def add_dataplane_for_bum_endpoint(self, remote_pe, dpid, nlri, encaps): pass @abc.abstractmethod def remove_dataplane_for_bum_endpoint(self, remote_pe, dpid, nlri): pass @abc.abstractmethod def set_gateway_port(self, linuxif, gateway_ip): '''Set the IP gateway port Used to determine a port to which traffic at the destination of the IP gateway should be sent. This is used to plug an EVI into an IP VPN VRF. ''' pass @abc.abstractmethod def gateway_port_down(self, linuxif): '''Unset the IP gateway port Used to revert the action done when set_gateway_port was called. Relevant only when an EVI had been plugged into an IP VPN VRF. ''' pass class DummyVPNInstanceDataplane(dp_drivers.DummyVPNInstanceDataplane, VPNInstanceDataplane): '''Dummy, do-nothing dataplane driver''' @log_decorator.log_info def add_dataplane_for_bum_endpoint(self, *args, **kwargs): pass @log_decorator.log_info def remove_dataplane_for_bum_endpoint(self, *args, **kwargs): pass @log_decorator.log_info def set_gateway_port(self, *args, **kwargs): pass @log_decorator.log_info def gateway_port_down(self, *args, **kwargs): pass class DummyDataplaneDriver(dp_drivers.DummyDataplaneDriver): type = constants.EVPN dataplane_instance_class = DummyVPNInstanceDataplane encaps = [exa.Encapsulation(exa.Encapsulation.Type.VXLAN)] def __init__(self, *args, **kwargs): dp_drivers.DummyDataplaneDriver.__init__(self, *args, **kwargs) class EVI(vpn_instance.VPNInstance, lg.LookingGlassMixin): '''Implementation an E-VPN MAC-VRF instance (EVI) based on RFC7432 and draft-ietf-bess-evpn-overlay. ''' type = constants.EVPN afi = exa.AFI.l2vpn safi = exa.SAFI.evpn @log_decorator.log def __init__(self, *args, **kwargs): vpn_instance.VPNInstance.__init__(self, *args, **kwargs) self.gw_port = None encaps = self.dp_driver.supported_encaps() if (exa.Encapsulation(exa.Encapsulation.Type.VXLAN) in encaps and any([ exa.Encapsulation(exa.Encapsulation.Type.MPLS) in encaps, exa.Encapsulation(exa.Encapsulation.Type.GRE) in encaps, exa.Encapsulation(exa.Encapsulation.Type.MPLS_UDP) in encaps ])): raise Exception("The dataplane can't support both an MPLS encap " "and a VXLAN encapsulation") # Advertise route to receive multi-destination traffic self.log.info("Generating BGP route for broadcast/multicast traffic") nlri = exa.EVPNMulticast( self.instance_rd, exa.EthernetTag(), exa.IP.create(self.bgp_manager.get_local_address()), None, exa.IP.create(self.bgp_manager.get_local_address())) attributes = exa.Attributes() attributes.add(self._gen_encap_extended_communities()) # add PMSI Tunnel Attribute route attributes.add( exa.PMSIIngressReplication(self.dp_driver.get_local_address(), raw_label=self.instance_label)) self.multicast_route_entry = engine.RouteEntry(nlri, self.export_rts, attributes) self._advertise_route(self.multicast_route_entry) def _vxlan_dp_driver(self): return (exa.Encapsulation(exa.Encapsulation.Type.VXLAN) in self.dp_driver.supported_encaps()) def generate_vif_bgp_route(self, mac_address, ip_prefix, plen, label, rd): # Generate BGP route and advertise it... if ip_prefix: assert plen == 32 if self._vxlan_dp_driver(): mpls_label_field = exa.Labels([], raw_labels=[self.instance_label]) else: mpls_label_field = exa.Labels([self.instance_label]) # label parameter ignored, we need to use instance label nlri = exa.EVPNMAC( rd, exa.ESI(), exa.EthernetTag(), exa.MAC(mac_address), 6 * 8, mpls_label_field, exa.IP.create(ip_prefix) if ip_prefix else None, None, exa.IP.create(self.dp_driver.get_local_address())) return engine.RouteEntry(nlri) @log_decorator.log def set_gateway_port(self, linuxif, ipvpn): self.dataplane.set_gateway_port(linuxif, ipvpn.gateway_ip) self.gw_port = (linuxif, ipvpn) @log_decorator.log def gateway_port_down(self, linuxif): self.dataplane.gateway_port_down(linuxif) self.gw_port = None def has_gateway_port(self): return (self.gw_port is not None) def _interpret_nlri_mpls_field(self, label_field): # interpret the MPLS field of an EVPN MAroute, based on # whether we assume this is a VXLAN VNI or not if self._vxlan_dp_driver(): return label_field.raw_labels[0] else: return label_field.labels[0] def _interpret_pta_label_field(self, pmsi_tunnel): # interpret the MPLS field of a PMSI tunnel attribute, based on # whether we assume this is a VXLAN VNI or not if self._vxlan_dp_driver(): return pmsi_tunnel.raw_label else: return pmsi_tunnel.label # TrackerWorker callbacks for BGP route updates ########################## def route_to_tracked_entry(self, route): if isinstance(route.nlri, exa.EVPNMAC): return (exa.EVPNMAC, route.nlri.mac) elif isinstance(route.nlri, exa.EVPNMulticast): return (exa.EVPNMulticast, (route.nlri.ip, route.nlri.rd)) elif isinstance(route.nlri, exa.EVPN): self.log.warning("Received EVPN route of unsupported subtype: %s", route.nlri.CODE) return None else: raise Exception("EVI %d should not receive routes of type %s" % (self.instance_id, type(route.nlri))) @utils.synchronized @log_decorator.log def new_best_route(self, entry, new_route): (entry_class, info) = entry encaps = self._check_encaps(new_route) if not encaps: return if entry_class == exa.EVPNMAC: prefix = info remote_pe = new_route.nexthop dataplane_id = self._interpret_nlri_mpls_field( new_route.nlri.label) self.dataplane.setup_dataplane_for_remote_endpoint( prefix, remote_pe, dataplane_id, new_route.nlri, encaps) elif entry_class == exa.EVPNMulticast: remote_endpoint = info # check that the route is actually carrying an PMSITunnel of type # ingress replication pmsi_tunnel = new_route.attributes.get(exa.PMSI.ID) if not isinstance(pmsi_tunnel, exa.PMSIIngressReplication): self.log.warning("Received PMSITunnel of unsupported type: %s", type(pmsi_tunnel)) else: remote_endpoint = pmsi_tunnel.ip dataplane_id = self._interpret_pta_label_field(pmsi_tunnel) self.log.info("Setting up dataplane for new ingress " "replication destination %s", remote_endpoint) self.dataplane.add_dataplane_for_bum_endpoint( remote_endpoint, dataplane_id, new_route.nlri, encaps) else: self.log.warning("unsupported entry_class: %s", entry_class.__name__) @utils.synchronized @log_decorator.log def best_route_removed(self, entry, old_route, last): (entry_class, info) = entry if entry_class == exa.EVPNMAC: if self._skip_route_removal(last): self.log.debug("Skipping removal of non-last route because " "dataplane does not want it") return def ip_dpid_from_route(route): return (route.nexthop, self._interpret_nlri_mpls_field(route.nlri.label)) if self.equivalent_route_in_best_routes(old_route, ip_dpid_from_route): self.log.debug("Route for same dataplane is still in best " "routes, skipping removal") return prefix = info remote_pe, dataplane_id = ip_dpid_from_route(old_route) self.dataplane.remove_dataplane_for_remote_endpoint( prefix, remote_pe, dataplane_id, old_route.nlri) elif entry_class == exa.EVPNMulticast: remote_endpoint = info def ip_dpid_from_route(route): pmsi_tunnel = route.attributes.get(exa.PMSI.ID) remote_endpoint = pmsi_tunnel.ip dataplane_id = self._interpret_pta_label_field(pmsi_tunnel) return (remote_endpoint, dataplane_id) # check that the route is actually carrying an PMSITunnel of type # ingress replication pmsi_tunnel = old_route.attributes.get(exa.PMSI.ID) if not isinstance(pmsi_tunnel, exa.PMSIIngressReplication): self.log.warning("PMSITunnel of suppressed route is of" " unsupported type") return if self.equivalent_route_in_best_routes(old_route, ip_dpid_from_route): self.log.debug("Route for same dataplane is still in best " "routes, skipping removal") return remote_endpoint, dataplane_id = ip_dpid_from_route(old_route) self.log.info("Cleaning up dataplane for ingress replication " "destination %s", remote_endpoint) self.dataplane.remove_dataplane_for_bum_endpoint( remote_endpoint, dataplane_id, old_route.nlri) else: self.log.warning("unsupported entry_class: %s", entry_class.__name__) # Looking Glass #### def get_lg_local_info(self, path_prefix): if not self.gw_port: return {"gateway_port": None} else: (linuxif, ipvpn) = self.gw_port return {"gateway_port": { "interface": repr(linuxif), "ipvpn": {"href": lg.get_absolute_path( "VPN_INSTANCES", path_prefix, [ipvpn.external_instance_id]), "id": ipvpn.name, "external_instance_id": ipvpn.external_instance_id }, }} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/vpn/evpn/linux_vxlan.py0000664000175000017500000003036300000000000030312 0ustar00zuulzuul00000000000000# Copyright 2014 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from pyroute2 import ndb as ndb_mod # pylint: disable=no-name-in-module from networking_bagpipe.bagpipe_bgp.common import log_decorator from networking_bagpipe.bagpipe_bgp import constants as consts from networking_bagpipe.bagpipe_bgp.vpn import evpn from networking_bagpipe.privileged import privileged_utils BRIDGE_NAME_PREFIX = "evpn---" VXLAN_INTERFACE_PREFIX = "vxlan--" LOG = logging.getLogger(__name__) class LinuxVXLANEVIDataplane(evpn.VPNInstanceDataplane): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if 'linuxbr' in kwargs: self.bridge_name = kwargs.get('linuxbr') else: self.bridge_name = ( BRIDGE_NAME_PREFIX + self.external_instance_id)[:consts.LINUX_DEV_LEN] if not self._interface_exists(self.bridge_name): self.log.debug("Starting bridge %s", self.bridge_name) # Create bridge privileged_utils.brctl('addbr %s' % self.bridge_name) privileged_utils.brctl('setfd %s 0' % self.bridge_name) privileged_utils.brctl('stp %s off' % self.bridge_name) self._run_command("ip link set %s up" % self.bridge_name, run_as_root=True) self.log.debug("Bridge %s created", self.bridge_name) self._create_and_plug_vxlan_if() self.log.debug("VXLAN interface %s plugged on bridge %s", self.vxlan_if_name, self.bridge_name) self._cleaning_up = False @log_decorator.log_info def cleanup(self): self.log.info("Cleaning EVI bridge and VXLAN interface %s", self.bridge_name) self._cleaning_up = True # removing the vxlan interface removes our routes, # but if we don't remove the vxlan if (if it was reused) then # cleanup will not happen, which is why we use cleanup assist # (see needs cleanup assist below) self._cleanup_vxlan_if() # Delete only EVPN Bridge (Created by dataplane driver) if BRIDGE_NAME_PREFIX in self.bridge_name: self._run_command("ip link set %s down" % self.bridge_name, run_as_root=True, raise_on_error=False) privileged_utils.brctl('delbr %s' % self.bridge_name, check_exit=False) def needs_cleanup_assist(self): # If we reused a vxlan interface we won't cleanup fdb entries # in cleanup(), so we need to have remove_dataplane_for_x # be called for reach state via cleanup assist return VXLAN_INTERFACE_PREFIX not in self.vxlan_if_name def _create_and_plug_vxlan_if(self): # if a VXLAN interface, with the VNI we want to use, is already plugged # in the bridge, we want to reuse it with ndb_mod.main.NDB() as ndb: # pylint: disable=no-member for port_id in ndb.interfaces[self.bridge_name].ports: port = ndb.interfaces[port_id] # pylint: disable=no-member if (port['kind'] == "vxlan" and port['vxlan_id'] == self.instance_label): self.log.info("reuse vxlan interface %s for VXLAN VNI %s", port['ifname'], self.instance_label) self.vxlan_if_name = port['ifname'] return self.vxlan_if_name = (VXLAN_INTERFACE_PREFIX + self.external_instance_id)[:consts.LINUX_DEV_LEN] self.log.debug("Creating and plugging VXLAN interface %s", self.vxlan_if_name) if self._interface_exists(self.vxlan_if_name): self._remove_vxlan_if() dst_port_spec = "" if self.driver.config.vxlan_dst_port: dst_port_spec = ("dstport %d" % self.driver.config.vxlan_dst_port) # Create VXLAN interface self._run_command( "ip link add %s type vxlan id %d local %s nolearning proxy %s" % (self.vxlan_if_name, self.instance_label, self.driver.get_local_address(), dst_port_spec), run_as_root=True ) self._run_command("ip link set %s up" % self.vxlan_if_name, run_as_root=True) # Plug VXLAN interface into bridge privileged_utils.brctl('addif {} {}'.format(self.bridge_name, self.vxlan_if_name)) def _cleanup_vxlan_if(self): if VXLAN_INTERFACE_PREFIX not in self.vxlan_if_name: self.log.debug("we reused the VXLAN interface, don't cleanup") return if self._is_vxlan_if_on_bridge(): # Unplug VXLAN interface from Linux bridge self._unplug_from_bridge(self.vxlan_if_name) self._remove_vxlan_if() def _remove_vxlan_if(self): if not VXLAN_INTERFACE_PREFIX not in self.vxlan_if_name: self.log.debug("we reused the VXLAN interface, don't remove") return # Remove VXLAN interface self._run_command("ip link set %s down" % self.vxlan_if_name, run_as_root=True) self._run_command("ip link del %s" % self.vxlan_if_name, run_as_root=True) def _is_if_on_bridge(self, ifname): with ndb_mod.main.NDB() as ndb: try: # pylint: disable=no-member for port_id in ndb.interfaces[self.bridge_name].ports: port = ndb.interfaces[port_id] # pylint: disable=no-member if port.ifname == ifname: return True except KeyError: return False return False def _is_vxlan_if_on_bridge(self): return self._is_if_on_bridge(self.vxlan_if_name) def _interface_exists(self, interface): """Check if interface exists.""" (_, exit_code) = self._run_command("ip link show dev %s" % interface, raise_on_error=False, acceptable_return_codes=[-1]) return (exit_code == 0) def _unplug_from_bridge(self, interface): if self._interface_exists(self.bridge_name): privileged_utils.brctl('delif {} {}'.format(self.bridge_name, interface), check_exit=[0, 1]) def set_gateway_port(self, linuxif, gw_ip): privileged_utils.brctl('addif {} {}'.format(self.bridge_name, linuxif), check_exit=False) self._fdb_dump() def gateway_port_down(self, linuxif): privileged_utils.brctl('delif {} {}'.format(self.bridge_name, linuxif), check_exit=False) # TODO(tmorin): need to cleanup bridge fdb and ip neigh ? def set_bridge_name(self, linuxbr): self.bridge_name = linuxbr @log_decorator.log_info def vif_plugged(self, mac_address, ip_address, localport, dpid, direction): # Plug localport only if bridge was created by us if BRIDGE_NAME_PREFIX in self.bridge_name: self.log.debug("Plugging localport %s into EVPN bridge %s", localport['linuxif'], self.bridge_name) privileged_utils.brctl( 'addif {} {}'.format(self.bridge_name, localport['linuxif']), check_exit=False) privileged_utils.bridge( 'fdb replace {} dev {}'.format(mac_address, localport['linuxif'])) self._fdb_dump() @log_decorator.log_info def vif_unplugged(self, mac_address, ip_address, localport, dpid, direction, last_endpoint=True): # remove local fdb entry, but only if tap interface is still here if self._is_if_on_bridge(localport['linuxif']): privileged_utils.bridge( 'fdb delete {} dev {}'.format(mac_address, localport['linuxif'])) # unplug localport only if bridge was created by us if BRIDGE_NAME_PREFIX in self.bridge_name: self.log.debug("Unplugging localport %s from EVPN bridge %s", localport['linuxif'], self.bridge_name) self._unplug_from_bridge(localport['linuxif']) self._fdb_dump() @log_decorator.log def setup_dataplane_for_remote_endpoint(self, prefix, remote_pe, dpid, nlri, encaps): if self._cleaning_up: self.log.debug("setup_dataplane_for_remote_endpoint: instance" " cleaning up, do nothing") return mac = prefix ip = nlri.ip vni = dpid # populate bridge forwarding db privileged_utils.bridge( 'fdb replace %s dev %s dst %s vni %s' % (mac, self.vxlan_if_name, remote_pe, vni)) # populate ARP cache if ip is not None: self._run_command("ip neighbor replace %s lladdr %s dev %s nud " "permanent" % (ip, mac, self.vxlan_if_name), run_as_root=True) else: self.log.trace("No IP in E-VPN route, no ARP proxy for %s" % mac) self._fdb_dump() @log_decorator.log def remove_dataplane_for_remote_endpoint(self, prefix, remote_pe, dpid, nlri): if self._cleaning_up: self.log.debug("setup_dataplane_for_remote_endpoint: instance" " cleaning up, do nothing") return mac = prefix ip = nlri.ip vni = dpid self._fdb_dump() # clear ARP proxy if ip is not None: self._run_command("ip neighbor del %s lladdr %s dev %s nud " "permanent" % (ip, mac, self.vxlan_if_name), run_as_root=True) privileged_utils.bridge( 'fdb del %s dev %s dst %s vni %s' % (mac, self.vxlan_if_name, remote_pe, vni)) self._fdb_dump() @log_decorator.log def add_dataplane_for_bum_endpoint(self, remote_pe, dpid, nlri, encaps): if self._cleaning_up: self.log.debug("setup_dataplane_for_remote_endpoint: instance" " cleaning up, do nothing") return vni = dpid # 00:00:00:00:00 usable as default since kernel commit # 58e4c767046a35f11a55af6ce946054ddf4a8580 (2013-06-25) privileged_utils.bridge( 'fdb append 00:00:00:00:00:00 dev %s dst %s vni %s' % (self.vxlan_if_name, remote_pe, vni)) self._fdb_dump() @log_decorator.log def remove_dataplane_for_bum_endpoint(self, remote_pe, dpid, nlri): if self._cleaning_up: self.log.debug("setup_dataplane_for_remote_endpoint: instance" " cleaning up, do nothing") return vni = dpid self._fdb_dump() privileged_utils.bridge( 'fdb delete 00:00:00:00:00:00 dev %s dst %s vni %s' % (self.vxlan_if_name, remote_pe, vni)) self._fdb_dump() def _fdb_dump(self): if self.log.isEnabledFor(logging.DEBUG): self.log.debug("bridge fdb dump: %s", self._run_command( "fdb show br %s" % self.bridge_name, acceptable_return_codes=[0, 255], run_as_root=True)[0]) # Looking glass #### def get_lg_local_info(self, path_prefix): return { "linux_bridge": self.bridge_name, "vxlan_if": self.vxlan_if_name } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/vpn/evpn/ovs.py0000664000175000017500000002245000000000000026550 0ustar00zuulzuul00000000000000# Copyright 2018 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_log import log as logging from networking_bagpipe.bagpipe_bgp.common import config from networking_bagpipe.bagpipe_bgp.common import dataplane_utils from networking_bagpipe.bagpipe_bgp.common import log_decorator from networking_bagpipe.bagpipe_bgp import constants as consts from networking_bagpipe.bagpipe_bgp.engine import exa from networking_bagpipe.bagpipe_bgp.vpn import dataplane_drivers as dp_drivers from networking_bagpipe.bagpipe_bgp.vpn import evpn from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native import \ br_tun from neutron.plugins.ml2.drivers.openvswitch.agent import ovs_neutron_agent from neutron_lib import constants as n_consts from neutron_lib.plugins.ml2 import ovs_constants as ovs_const LOG = logging.getLogger(__name__) FLOOD = "flood" FLOW_PRIORITY = 5 class OVSEVIDataplane(evpn.VPNInstanceDataplane): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.bridge = self.driver.bridge # OpenFlow 1.3 is needed for mod_vlan_vid self.bridge.use_at_least_protocol(ovs_const.OPENFLOW13) self.tunnel_mgr = dataplane_utils.SharedObjectLifecycleManagerProxy( self.driver.tunnel_mgr, self.instance_id ) self.flooding_ports = set() self.vlan = None self.local_ip = self.driver.get_local_address() def cleanup(self): self.bridge.delete_flows(strict=True, table=ovs_const.FLOOD_TO_TUN, priority=FLOW_PRIORITY, dl_vlan=self.vlan) self.bridge.delete_group(group_id=self.vlan) @log_decorator.log_info def vif_plugged(self, mac_address, ip_address_prefix, localport, label, direction): if 'vlan' not in localport: raise Exception("missing localport['vlan'] parameter") if self.vlan and localport['vlan'] != self.vlan: raise Exception("inconsistent vlan") else: self.vlan = localport['vlan'] # map traffic to this EVI VNI to the right table, similarly as in # OVSTunnelBridge.provision_local_vlan self.bridge.add_flow(table=ovs_const.VXLAN_TUN_TO_LV, priority=FLOW_PRIORITY, tun_id=self.instance_label, actions=("push_vlan:0x8100,mod_vlan_vid:%d," "resubmit(,%s)" % (self.vlan, ovs_const.LEARN_FROM_TUN))) @log_decorator.log_info def vif_unplugged(self, mac_address, ip_address_prefix, localport, label, direction, last_endpoint=True): self.log.debug("nothing to do on unplug") def _local_vni_actions(self, vni): # "load:0->NXM_OF_IN_PORT[]" allows the packets coming from br-int # via patch-port to go back to br-int via the same port return "load:0->NXM_OF_IN_PORT[],set_tunnel:%d,resubmit(,%s)" % ( vni, ovs_const.VXLAN_TUN_TO_LV) @log_decorator.log_info def setup_dataplane_for_remote_endpoint(self, prefix, remote_pe, vni, nlri, encaps): mac = prefix ip = nlri.ip # what is done here is similar as # OVSTunnelBridge.install_unicast_to_tun, but with local delivery to # table VXLAN_TUN_TO_LV for routes advertized locally if remote_pe == self.local_ip: actions = self._local_vni_actions(vni) else: port, _ = self.tunnel_mgr.get_object(remote_pe, (vni, mac)) actions = "set_tunnel:%d,output:%s" % (vni, port) self.bridge.add_flow(table=ovs_const.UCAST_TO_TUN, priority=FLOW_PRIORITY, dl_vlan=self.vlan, dl_dst=mac, actions="strip_vlan,%s" % actions) # add ARP responder if ip: self.bridge.install_arp_responder(self.vlan, str(ip), str(mac)) @log_decorator.log def remove_dataplane_for_remote_endpoint(self, prefix, remote_pe, vni, nlri): mac = prefix ip = nlri.ip self.bridge.delete_unicast_to_tun(self.vlan, mac) if remote_pe != self.local_ip: self.tunnel_mgr.free_object(remote_pe, (vni, mac)) # cleanup ARP responder if ip: self.bridge.delete_arp_responder(self.vlan, str(ip)) @log_decorator.log_info def add_dataplane_for_bum_endpoint(self, remote_pe, vni, nlri, encaps): if remote_pe == self.local_ip: port = "local" else: port, _ = self.tunnel_mgr.get_object(remote_pe, (vni, FLOOD)) self.flooding_ports.add((port, vni)) self._update_flooding_buckets() @log_decorator.log_info def remove_dataplane_for_bum_endpoint(self, remote_pe, vni, nlri): if remote_pe == self.local_ip: port = "local" else: port = self.tunnel_mgr.find_object(remote_pe) if port: self.flooding_ports.remove((port, vni)) self._update_flooding_buckets() if remote_pe != self.local_ip: self.tunnel_mgr.free_object(remote_pe, (vni, FLOOD)) def _update_flooding_buckets(self): buckets = [] for port, vni in self.flooding_ports: if port == "local": buckets.append("bucket=strip_vlan,%s" % self._local_vni_actions(vni)) else: buckets.append("bucket=strip_vlan,set_tunnel:%d,output:%s" % (vni, port)) self.bridge.mod_group(group_id=self.vlan, type='all', buckets=','.join(buckets)) self.bridge.add_flow(table=ovs_const.FLOOD_TO_TUN, priority=FLOW_PRIORITY, dl_vlan=self.vlan, actions="group:%d" % self.vlan) self.log.debug("buckets: %s", buckets) def set_gateway_port(self, linuxif, gateway_ip): # nothing to do, because we make the assumption that the # IPVPN driver is 'ovs' as well, and setup in conjunction # with Neutron OVS BGPVPN extension which does the plugging # between L2 and L3 pass def gateway_port_down(self, linuxif): pass # Looking glass #### def get_lg_local_info(self, path_prefix): return { "vlan": self.vlan, "flooding-ports": [{"port": str(port), "vni": vni} for port, vni in self.flooding_ports] } class TunnelManager(dataplane_utils.ObjectLifecycleManager): def __init__(self, bridge, local_ip): super().__init__() self.bridge = bridge self.local_ip = local_ip @log_decorator.log_info def create_object(self, remote_ip, *args, **kwargs): port_name = ovs_neutron_agent.OVSNeutronAgent.get_tunnel_name( n_consts.TYPE_VXLAN, self.local_ip, remote_ip) tunnel = self.bridge.add_tunnel_port(port_name, remote_ip, self.local_ip, n_consts.TYPE_VXLAN) self.bridge.setup_tunnel_port(n_consts.TYPE_VXLAN, tunnel) LOG.debug("tunnel for %s: %s (%s)", remote_ip, port_name, tunnel) return tunnel @log_decorator.log_info def delete_object(self, tunnel): self.bridge.delete_port(tunnel) class OVSDataplaneDriver(dp_drivers.DataplaneDriver): dataplane_instance_class = OVSEVIDataplane type = consts.EVPN ecmp_support = False encaps = [exa.Encapsulation(exa.Encapsulation.Type.VXLAN)] driver_opts = [ cfg.StrOpt("ovs_bridge", default="br-tun", help=("Name of the OVS bridge to use, this has to be the " "same as the tunneling bridge of the Neutron OVS " "agent, usually br-tun")), ] def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) config.set_default_root_helper() self.bridge = dataplane_utils.OVSBridgeWithGroups( br_tun.OVSTunnelBridge(self.config.ovs_bridge, os_ken_app=self) ) self.tunnel_mgr = TunnelManager(self.bridge, self.get_local_address()) def needs_cleanup_assist(self): return True def reset_state(self): # cleanup is taken care of by OVS Neutron Agent pass # Looking glass #### def get_lg_local_info(self, path_prefix): return { "tunnels": self.tunnel_mgr.infos(), } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/vpn/identifier_allocators.py0000664000175000017500000000672700000000000031347 0ustar00zuulzuul00000000000000# Copyright 2018 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools import threading from oslo_log import log as logging from networking_bagpipe.bagpipe_bgp.common import looking_glass as lg from networking_bagpipe.bagpipe_bgp.common import utils from networking_bagpipe.bagpipe_bgp.engine import exa from neutron_lib import exceptions LOG = logging.getLogger(__name__) class MaxIDReached(exceptions.NeutronException): message = "Could not allocate identifier, maximum (%(max)d) was reached" class IDAllocator(lg.LookingGlassMixin): MIN = 0 MAX = 2 ** 32 - 1 # no id > MAX will be allocated def __init__(self): self.allocated_ids = dict() self.released_ids = list() self.current_id = self.MIN self.lock = threading.Lock() def _allocate(self, id, description, update_current=False): self.allocated_ids[id] = description # Update current_id to the next free id if update_current and id == self.current_id: for next_id in itertools.count(self.current_id + 1): if next_id not in self.allocated_ids: self.current_id = next_id break LOG.debug("Allocated id %d for '%s'", id, description) return id @utils.synchronized def get_new_id(self, description=None, hint_value=None): if hint_value is not None and hint_value > self.MAX: LOG.warning("Allocator hint value cannot be beyond MAX") if hint_value is not None and hint_value not in self.allocated_ids: return self._allocate(hint_value, description, update_current=True) elif self.current_id > self.MAX: if len(self.released_ids) > 0: # FIFO (pop the id that was released the earliest) return self._allocate(self.released_ids.pop(0), description) else: raise MaxIDReached(max=self.MAX) else: return self._allocate(self.current_id, description, update_current=True) @utils.synchronized def release(self, id): if id in self.allocated_ids: LOG.debug("Released id %d ('%s')", id, self.allocated_ids[id]) del self.allocated_ids[id] self.released_ids.append(id) else: raise Exception("Asked to release a non-allocated id: %d" % id) def get_lg_local_info(self, prefix): return self.allocated_ids class RDAllocator(IDAllocator): MAX = 2 ** 16 - 1 def __init__(self, prefix): super().__init__() self.prefix = prefix def get_new_rd(self, description): new_id = self.get_new_id(description) return exa.RouteDistinguisher.fromElements(self.prefix, new_id) def release(self, rd): super().release(int(str(rd).split(':')[1])) class LabelAllocator(IDAllocator): MIN = 16 MAX = 2 ** 20 - 1 def get_new_label(self, description): return self.get_new_id(description) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9223058 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/vpn/ipvpn/0000775000175000017500000000000000000000000025550 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/vpn/ipvpn/__init__.py0000664000175000017500000005216300000000000027670 0ustar00zuulzuul00000000000000# Copyright 2014 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import itertools from networking_bagpipe.bagpipe_bgp.common import exceptions as exc from networking_bagpipe.bagpipe_bgp.common import log_decorator from networking_bagpipe.bagpipe_bgp.common import looking_glass as lg from networking_bagpipe.bagpipe_bgp.common import utils from networking_bagpipe.bagpipe_bgp import constants from networking_bagpipe.bagpipe_bgp import engine from networking_bagpipe.bagpipe_bgp.engine import exa from networking_bagpipe.bagpipe_bgp.engine import flowspec from networking_bagpipe.bagpipe_bgp.engine import ipvpn as ipvpn_routes from networking_bagpipe.bagpipe_bgp.vpn import dataplane_drivers as dp_drivers from networking_bagpipe.bagpipe_bgp.vpn import vpn_instance DEFAULT_ADDR_PREFIX = '0.0.0.0/0' class VPNInstanceDataplane(dp_drivers.VPNInstanceDataplane, metaclass=abc.ABCMeta): @abc.abstractmethod def add_dataplane_for_traffic_classifier(self, classifier, redirect_to_instance_id): pass @abc.abstractmethod def remove_dataplane_for_traffic_classifier(self, classifier): pass class DummyVPNInstanceDataplane(dp_drivers.DummyVPNInstanceDataplane, VPNInstanceDataplane): def add_dataplane_for_traffic_classifier(self, *args, **kwargs): raise Exception("not implemented") def remove_dataplane_for_traffic_classifier(self, *args, **kwargs): raise Exception("not implemented") class DummyDataplaneDriver(dp_drivers.DummyDataplaneDriver): type = constants.IPVPN dataplane_instance_class = DummyVPNInstanceDataplane class VRF(vpn_instance.VPNInstance, lg.LookingGlassMixin): # component managing a VRF: # - calling a driver to instantiate the dataplane # - registering to receive routes for the needed route targets # - calling the driver to setup/update/remove routes in the dataplane # - cleanup: calling the driver, unregistering for BGP routes type = constants.IPVPN afi = exa.AFI.ipv4 safi = exa.SAFI.mpls_vpn @log_decorator.log def __init__(self, *args, **kwargs): vpn_instance.VPNInstance.__init__(self, *args, **kwargs) self.readvertised = set() def needs_cleanup_assist(self, afi, safi): if (afi, safi) == (self.afi, exa.SAFI.flow_vpn): return True else: return self.dataplane.needs_cleanup_assist() @classmethod def validate_convert_params(cls, params, also_mandatory=()): return super().validate_convert_params( params, also_mandatory=tuple(set(also_mandatory) | {'ip_address'})) @classmethod def validate_convert_attach_params(cls, params): super().validate_convert_attach_params(params) if 'gateway_ip' not in params: raise exc.APIMissingParameterException('gateway_ip') def _nlri_from(self, prefix, label, rd): assert rd is not None return ipvpn_routes.IPVPNRouteFactory( self.afi, prefix, label, rd, self.dp_driver.get_local_address()) def generate_vif_bgp_route(self, mac_address, ip_prefix, plen, label, rd): # Generate BGP route and advertise it... nlri = self._nlri_from("{}/{}".format(ip_prefix, plen), label, rd) return engine.RouteEntry(nlri) def _get_local_labels(self): for port_data in self.mac_2_localport_data.values(): yield port_data['label'] def _get_route_params_for_endpoint(self, endpoint): port_data = self.mac_2_localport_data[endpoint[0]] label = port_data['label'] lb_consistent_hash_order = port_data[ 'lb_consistent_hash_order'] rd = self.endpoint_2_rd[endpoint] return (label, rd, lb_consistent_hash_order) def _imported(self, route): return len(set(route.route_targets).intersection(set(self.import_rts)) ) > 0 def _to_readvertise(self, route): rt_records = route.ecoms(exa.RTRecord) self.log.debug("RTRecords: %s (readvertise_to_rts:%s)", rt_records, self.readvertise_to_rts) readvertise_targets_as_records = [exa.RTRecord.from_rt(rt) for rt in self.readvertise_to_rts] if self.attract_traffic: readvertise_targets_as_records += [exa.RTRecord.from_rt(rt) for rt in self.attract_rts] if set(readvertise_targets_as_records).intersection(set(rt_records)): self.log.debug("not to re-advertise because one of the readvertise" " or attract-redirect RTs is in RTRecords: %s", set(readvertise_targets_as_records) .intersection(set(rt_records))) return False return len(set(route.route_targets).intersection( set(self.readvertise_from_rts) )) > 0 def _gen_rtrecords_extended_community(self, ecoms): # new RTRecord = original RTRecord (if any) + orig RTs converted into # RTRecord attributes orig_rtrecords = ecoms(exa.RTRecord) rts = ecoms(exa.RTExtCom) add_rtrecords = [exa.RTRecord.from_rt(rt) for rt in rts] return list(set(orig_rtrecords) | set(add_rtrecords)) def _route_for_readvertisement(self, route, endpoint): label, rd, lb_consistent_hash_order = ( self._get_route_params_for_endpoint(endpoint) ) self.log.debug("Prefix %s (re-)advertisement with label %s and route " "distinguisher %s", route.nlri.cidr.prefix(), label, rd) nlri = self._nlri_from(route.nlri.cidr.prefix(), label, rd) attributes = exa.Attributes() ecoms = self._gen_encap_extended_communities() ecoms.communities += ( self._gen_rtrecords_extended_community(route.ecoms) ) ecoms.communities.append( exa.ConsistentHashSortOrder(lb_consistent_hash_order)) attributes.add(ecoms) entry = engine.RouteEntry(nlri, self.readvertise_to_rts, attributes) self.log.debug("RouteEntry for (re-)advertisement: %s", entry) return entry def _default_route_for_advertisement(self, endpoint): label, rd, lb_consistent_hash_order = ( self._get_route_params_for_endpoint(endpoint) ) self.log.debug("Default route (re-)advertisement with label %s and " "route distinguisher %s", label, rd) nlri = self._nlri_from(DEFAULT_ADDR_PREFIX, label, rd) attributes = exa.Attributes() ecoms = self._gen_encap_extended_communities() ecoms.communities.append( exa.ConsistentHashSortOrder(lb_consistent_hash_order)) attributes.add(ecoms) entry = engine.RouteEntry(nlri, self.readvertise_to_rts, attributes) self.log.debug("RouteEntry for default prefix advertisement: %s", entry) return entry @log_decorator.log def _routes_for_attract_static_dest_prefixes(self, endpoint): if not self.attract_static_dest_prefixes: return label, rd, _ = self._get_route_params_for_endpoint(endpoint) for prefix in self.attract_static_dest_prefixes: nlri = self._nlri_from(prefix, label, rd) entry = engine.RouteEntry(nlri, self.readvertise_to_rts) self.log.debug("RouteEntry for attract static destination prefix: " "%s", entry) yield entry @log_decorator.log def _route_for_redirect_prefix(self, prefix): prefix_classifier = utils.dict_camelcase_to_underscore( self.attract_classifier) prefix_classifier['destination_prefix'] = prefix traffic_classifier = vpn_instance.TrafficClassifier( **prefix_classifier) self.log.debug("Advertising prefix %s for redirection based on " "traffic classifier %s", prefix, traffic_classifier) rules = traffic_classifier.map_traffic_classifier_2_redirect_rules() return self.synthesize_redirect_bgp_route(rules) def _redirect_route_for_readvertisement(self, route): # Create a FlowSpec NLRI with distinct RD and a copy of rules from # FlowSpec route to readvertise nlri = flowspec.FlowRouteFactory(self.afi, self.instance_rd) nlri.rules = route.nlri.rules attributes = exa.Attributes() ecoms = exa.ExtendedCommunities() ecoms.communities += ( self._gen_rtrecords_extended_community(route.ecoms) ) assert len(self.attract_rts) == 1 rt = self.attract_rts[0] ecoms.communities.append( exa.TrafficRedirect(exa.ASN(int(rt.asn)), int(rt.number)) ) attributes.add(ecoms) entry = engine.RouteEntry(nlri, self.readvertise_to_rts, attributes) self.log.debug("RouteEntry for redirect (re-)advertisement: %s", entry) return entry @log_decorator.log def _readvertise(self, route): nlri = route.nlri self.log.debug("Start re-advertising %s from VRF", nlri) if self.attract_traffic: # Start advertising default route only when the first route to # readvertise appears if not self.readvertised: for endpoint in self.all_endpoints(): self.log.debug("Start advertising default route from VRF " "%d to redirection VRF", self.instance_id) self._advertise_route( self._default_route_for_advertisement(endpoint) ) if isinstance(nlri, flowspec.Flow): # Readvertise FlowSpec route self._advertise_route( self._redirect_route_for_readvertisement(route) ) else: # Advertise FlowSpec route for prefix self._advertise_route( self._route_for_redirect_prefix(nlri.cidr.prefix()) ) else: for endpoint in self.all_endpoints(): self.log.debug("Start re-advertising %s from endpoint %s", nlri.cidr.prefix(), endpoint) self._advertise_route( self._route_for_readvertisement(route, endpoint) ) self.readvertised.add(route) @log_decorator.log def _readvertise_stop(self, route, last): nlri = route.nlri if last: self.log.debug("Stop re-advertising %s from VRF", nlri) if self.attract_traffic: # Stop advertising default route only if the withdrawn route is # the last of the routes to readvertise if len(self.readvertised) == 1: for endpoint in self.all_endpoints(): self.log.debug("Stop advertising default route from " "VRF to redirection VRF") self._withdraw_route( self._default_route_for_advertisement(endpoint) ) if isinstance(nlri, flowspec.Flow): # Withdraw readvertised FlowSpec route self._withdraw_route( self._redirect_route_for_readvertisement(route) ) else: # Withdraw FlowSpec route for prefix self._withdraw_route( self._route_for_redirect_prefix(nlri.cidr.prefix()) ) else: for endpoint in self.all_endpoints(): self.log.debug("Stop re-advertising %s from endpoint %s", nlri.cidr.prefix(), endpoint) route_entry = ( self._route_for_readvertisement(route, endpoint) ) self._withdraw_route(route_entry) self.readvertised.remove(route) @log_decorator.log_info def vif_plugged(self, mac_address, ip_address_prefix, localport, advertise_subnet=False, lb_consistent_hash_order=0, local_pref=None, **kwargs): super().vif_plugged(mac_address, ip_address_prefix, localport, advertise_subnet, lb_consistent_hash_order, local_pref, **kwargs) if vpn_instance.forward_to_port(kwargs.get('direction')): endpoint = (mac_address, ip_address_prefix) for route in itertools.chain( self.readvertised, self._routes_for_attract_static_dest_prefixes(endpoint)): self.log.debug("Re-advertising %s with this port as next hop", route.nlri) if self.attract_traffic: self._advertise_route( self._default_route_for_advertisement(endpoint) ) if self.has_only_one_endpoint(): self._advertise_route(self._route_for_redirect_prefix( route.nlri.cidr.prefix())) else: self._advertise_route( self._route_for_readvertisement(route, endpoint) ) @log_decorator.log_info def vif_unplugged(self, mac_address, ip_address_prefix): endpoint = (mac_address, ip_address_prefix) direction = self.endpoint_2_direction[endpoint] if vpn_instance.forward_to_port(direction): for route in itertools.chain( self.readvertised, self._routes_for_attract_static_dest_prefixes(endpoint)): self.log.debug("Stop re-advertising %s", route.nlri) if self.attract_traffic: self._withdraw_route( self._default_route_for_advertisement(endpoint) ) if self.has_only_one_endpoint(): self._withdraw_route(self._route_for_redirect_prefix( route.nlri.cidr.prefix())) else: self._withdraw_route( self._route_for_readvertisement(route, endpoint) ) super().vif_unplugged(mac_address, ip_address_prefix) # Callbacks for BGP route updates (TrackerWorker) ######################## def route_to_tracked_entry(self, route): if isinstance(route.nlri, ipvpn_routes.IPVPN): return route.nlri.cidr.prefix() elif isinstance(route.nlri, flowspec.Flow): return (flowspec.Flow, route.nlri._rules()) else: self.log.error("We should not receive routes of type %s", type(route.nlri)) return None @utils.synchronized @log_decorator.log def new_best_route(self, entry, new_route): if self.readvertise: # check if this is a route we need to re-advertise self.log.debug("route RTs: %s", new_route.route_targets) self.log.debug("readv from RTs: %s", self.readvertise_from_rts) if self._to_readvertise(new_route): self.log.debug("Need to re-advertise %s", entry) self._readvertise(new_route) if not self._imported(new_route): self.log.debug("No need to setup dataplane for:%s", entry) return if isinstance(new_route.nlri, flowspec.Flow): if len(new_route.ecoms(exa.TrafficRedirect)) == 1: traffic_redirect = new_route.ecoms(exa.TrafficRedirect) redirect_rt = "{}:{}".format(traffic_redirect[0].asn, traffic_redirect[0].target) self.start_redirect_traffic(redirect_rt, new_route.nlri.rules) else: self.log.warning("FlowSpec action or multiple traffic " "redirect actions not supported: %s", new_route.ecoms()) else: prefix = entry encaps = self._check_encaps(new_route) if not encaps: return assert len(new_route.nlri.labels.labels) == 1 lb_consistent_hash_order = 0 if new_route.ecoms(exa.ConsistentHashSortOrder): lb_consistent_hash_order = new_route.ecoms( exa.ConsistentHashSortOrder)[0].order self.dataplane.setup_dataplane_for_remote_endpoint( prefix, new_route.nexthop, new_route.nlri.labels.labels[0], new_route.nlri, encaps, lb_consistent_hash_order) @utils.synchronized @log_decorator.log def best_route_removed(self, entry, old_route, last): if self.readvertise: # check if this is a route we were re-advertising if self._to_readvertise(old_route): self.log.debug("Need to stop re-advertising %s", entry) self._readvertise_stop(old_route, last) if isinstance(old_route.nlri, flowspec.Flow): if self._imported(old_route): if len(old_route.ecoms(exa.TrafficRedirect)) == 1: if last: traffic_redirect = old_route.ecoms( exa.TrafficRedirect) redirect_rt = "{}:{}".format( traffic_redirect[0].asn, traffic_redirect[0].target) self.stop_redirect_traffic(redirect_rt, old_route.nlri.rules) else: self.log.warning("FlowSpec action or multiple traffic " "redirect actions not supported: %s", old_route.ecoms()) else: prefix = entry # NOTE(tmorin): On new best routes, we only trigger dataplane # update events after checking with self._imported(...) that the # route was imported (and not a route that we receive because the # VRF should readvertise ir). On best_route_removed, we can't do # that because we could end up in a situation where: # - initially import_rts contains RT X # - we receive a route for RT X and install dataplane state # - the import_rts list is later updated and RT X is not anymore # part of the imported RTs, and the VRF unsubscribes from RT X # - we receive the best_route_removed callbacks corresponding to # the unsubscribe, but since the route is for no RT that is in # import_rts, we don't update the dataplane # The result would be to fail to remove dataplane state for this # route, so we're better not optimizing this case and remove # dataplane state, including possibly for routes that we did # not install in it. if self._skip_route_removal(last): self.log.debug("Skipping removal of non-last route because " "dataplane does not want it") return # if we still have a route with same dataplane properties in # best routes, then we don't want to clear the dataplane entry if self.equivalent_route_in_best_routes( old_route, lambda r: (r.nexthop, r.nlri.labels.labels[0])): self.log.debug("Route for same dataplane is still in best " "routes, skipping removal") return encaps = self._check_encaps(old_route) if not encaps: return assert len(old_route.nlri.labels.labels) == 1 lb_consistent_hash_order = 0 if old_route.ecoms(exa.ConsistentHashSortOrder): lb_consistent_hash_order = old_route.ecoms( exa.ConsistentHashSortOrder)[0].order self.dataplane.remove_dataplane_for_remote_endpoint( prefix, old_route.nexthop, old_route.nlri.labels.labels[0], old_route.nlri, encaps, lb_consistent_hash_order) # Looking glass ### def get_lg_map(self): return { "readvertised": (lg.SUBTREE, self.get_lg_readvertised_routes), } def get_lg_readvertised_routes(self, path_prefix): return [route.get_lg_local_info(path_prefix) for route in self.readvertised] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/vpn/ipvpn/mpls_linux_dataplane.py0000664000175000017500000003540700000000000032336 0ustar00zuulzuul00000000000000# Copyright 2014 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import errno import socket import netaddr from oslo_config import cfg from oslo_serialization import jsonutils import pyroute2 from pyroute2 import common as pr_common # pylint: disable=no-name-in-module from pyroute2 import netlink # pylint: disable=no-name-in-module from networking_bagpipe.bagpipe_bgp.common import log_decorator from networking_bagpipe.bagpipe_bgp.common import looking_glass as lg from networking_bagpipe.bagpipe_bgp import constants as consts from networking_bagpipe.bagpipe_bgp.engine import exa from networking_bagpipe.bagpipe_bgp.vpn import dataplane_drivers as dp_drivers from networking_bagpipe.privileged import privileged_utils ipr = pyroute2.IPRoute() # pylint: disable=no-member VRF_INTERFACE_PREFIX = "bvrf-" RT_TABLE_BASE = 1000 RT_PROT_BAGPIPE = 19 # NOTE(tmorin): can this be removed ? (is jsonutils to_primitive() enough?) def json_set_default(obj): if isinstance(obj, set): return list(obj) raise TypeError def sysctl(sysctl_path, val): privileged_utils.sysctl(knob='.'.join(sysctl_path,), value=val) def proxy_arp(ifname, enable): sysctl(['net', 'ipv4', 'conf', ifname, 'proxy_arp'], int(enable)) sysctl(['net', 'ipv4', 'conf', ifname, 'proxy_arp_pvlan'], int(enable)) class MPLSLinuxVRFDataplane(dp_drivers.VPNInstanceDataplane): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # FIXME: maybe not thread safe ? self.ip = self.driver.ip self.vrf_if = ("%s%d" % (VRF_INTERFACE_PREFIX, self.instance_id))[:consts.LINUX_DEV_LEN] self.rt_table = RT_TABLE_BASE + self.instance_id self.log.info("Initializing VRF interface %s", self.vrf_if) self.flush_routes() self.log.debug("Creating VRF interface...") # Create VRF interface with self.ip.create(kind='vrf', ifname=self.vrf_if, vrf_table=self.rt_table, reuse=True) as i: i.up() self.vrf_if_idx = i.index # Create ip rule for VRF route table # TODO(tmorin): do in IPDB, when possible (check: what if # rule exist, but is not known by IPDB?) for family in socket.AF_INET, socket.AF_INET6: ipr.flush_rules(family=family, iifname=self.vrf_if) ipr.flush_rules(family=family, oifname=self.vrf_if) ipr.rule('add', family=family, priority=100, iifname=self.vrf_if, table=self.rt_table, action='FR_ACT_TO_TBL') ipr.rule('add', family=family, priority=100, oifname=self.vrf_if, table=self.rt_table, action='FR_ACT_TO_TBL') # if VRF traffic does not match any route, # lookups must *not* fallback to main/default # routing table ipr.rule('add', family=family, priority=101, iifname=self.vrf_if, action='FR_ACT_UNREACHABLE') ipr.rule('add', family=family, priority=101, oifname=self.vrf_if, action='FR_ACT_UNREACHABLE') def add_route(self, route): route.update({'proto': RT_PROT_BAGPIPE}) self.ip.routes.add(route).commit() def flush_routes(self): ipr.flush_routes(table=self.rt_table) @log_decorator.log_info def cleanup(self): # bring down and disconnect all interfaces from vrf interface with self.ip.interfaces[self.vrf_if_idx] as vrf: for interface in vrf.ports: with self.ip.interfaces[interface] as i: i.down() vrf.del_port(interface) vrf.remove() ipr.flush_rules(iifname=self.vrf_if) ipr.flush_rules(oifname=self.vrf_if) self.flush_routes() @log_decorator.log_info def vif_plugged(self, mac_address, ip_address, localport, label, direction): interface = localport['linuxif'] if interface not in self.ip.interfaces: self.log.warning("interface %s not in interfaces, ignoring plug", interface) return # ip link set dev localport master vrf_interface with self.ip.interfaces[self.vrf_if_idx] as vrf: vrf.add_port(interface) with self.ip.interfaces[interface] as i: i.up() # add static ARP entry toward interface (because we can) ipr.neigh('add', dst=ip_address, lladdr=mac_address, ifindex=self.ip.interfaces[interface].index, state=netlink.rtnl.ndmsg.states['permanent']) # Setup ARP proxying proxy_arp(interface, True) # Configure gateway address on this port # FIXME: that would need to be per vif port # Retrieve broadcast IP address broadcast_ip = str(netaddr.IPNetwork("{}/{}".format(self.gateway_ip, self.network_plen) ).broadcast) try: ipr.addr('add', index=self.ip.interfaces[interface].index, address=self.gateway_ip, mask=self.network_plen, broadcast=broadcast_ip) except netlink.exceptions.NetlinkError as x: if x.code == errno.EEXIST: # the route already exists, fine self.log.debug("route %s already exists on %s", self.gateway_ip, interface) else: raise # Configure mapping for incoming MPLS traffic # with this port label req = {'family': pr_common.AF_MPLS, 'oif': self.ip.interfaces[interface].index, 'dst': label, # FIXME how to check for BoS? 'via': {'family': socket.AF_INET, 'addr': ip_address} } try: self.add_route(req) except netlink.exceptions.NetlinkError as x: if x.code == errno.EEXIST: # the route already exists, fine self.log.warning("MPLS state for %d already exists", label) else: raise @log_decorator.log_info def vif_unplugged(self, mac_address, ip_address, localport, label, direction, last_endpoint=True): interface = localport['linuxif'] if interface not in self.ip.interfaces: self.log.warning("interface %s not in interfaces, ignoring plug", interface) return # bring the interface down, we don't want # traffic from this interface to leak out of the VRF with self.ip.interfaces[interface] as i: i.down() # Disable ARP proxying proxy_arp(interface, False) # ip link set dev localport master vrf_interface with self.ip.interfaces[self.vrf_if_idx] as i: i.del_port(interface) # Unconfigure gateway address on this port # FIXME: that would need to be per vif port # Retrieve broadcast IP address ip = netaddr.IPNetwork( "{}/{}".format(self.gateway_ip, self.network_plen)) broadcast_ip = str(ip.broadcast) ipr.addr('del', index=self.ip.interfaces[interface].index, address=self.gateway_ip, mask=self.network_plen, broadcast=broadcast_ip) with self.ip.routes.tables['mpls'][label] as r: r.remove() def _read_mpls_in(self, label): routes = [r for r in self.ip.routes.tables["mpls"] if r['dst'] == label] assert len(routes) == 1 res = (routes[0]['oif'], routes[0]['via']['addr']) self.log.debug("Found %s for %d with IPDB", res, label) return res def _nh(self, remote_pe, label, encaps): mpls = True if str(remote_pe) == self.driver.get_local_address(): # FIXME: does not work yet, from this table, # 'gateway' is considered unreachable # we could drop 'gateway' and just keep oif # but this would only work for connected routes # if remote_pe is ourselves # we lookup the route for the label and deliver directly # to this oif/gateway # (oif, gateway) = self._read_mpls_in(label) # mpls = False gateway = '127.0.0.1' oif = self.ip.interfaces['lo'].index else: gateway = remote_pe oif = self.driver.mpls_interface_index nh = {'oif': oif} if gateway: nh['gateway'] = gateway if mpls: nh['encap'] = {'type': 'mpls', 'labels': [{'bos': 1, 'label': label}]} self.log.debug("nh: %s", nh) return nh def _get_route(self, prefix): return self.ip.routes.tables[self.rt_table][prefix] @log_decorator.log_info def setup_dataplane_for_remote_endpoint(self, prefix, remote_pe, label, nlri, encaps, lb_consistent_hash_order=0): prefix = str(prefix) if prefix == "0.0.0.0/0": prefix = 'default' try: with self._get_route(prefix) as r: self.log.debug("a route to %s already exists, adding nexthop", prefix) r.add_nh(self._nh(remote_pe, label, encaps)) except KeyError: self.log.debug("no route to %s yet, creating", prefix) req = {'table': self.rt_table, 'dst': prefix, 'multipath': [self._nh(remote_pe, label, encaps)]} self.log.debug("adding route: %s", req) self.add_route(req) @log_decorator.log_info def remove_dataplane_for_remote_endpoint(self, prefix, remote_pe, label, nlri, encaps, lb_consistent_hash_order=0): prefix = str(prefix) if prefix == "0.0.0.0/0": prefix = 'default' try: with self._get_route(prefix) as r: # FIXME: encap info is missing here if r['multipath']: r.del_nh(self._nh(remote_pe, label, None)) else: # last route r.remove() except KeyError: self.log.warning("no route found on remove_dataplane_for" "_remote_endpoint for %s", prefix) # Looking Glass ## def get_lg_map(self): return {"routes": (lg.SUBTREE, self.get_lg_routes), "route_table": (lg.VALUE, self.rt_table), "vrf_if": (lg.VALUE, self.vrf_if), } @log_decorator.log_info def get_lg_routes(self, path_prefix): routes = self.ip.routes.tables[self.rt_table] return [{r['dst']: jsonutils.loads(jsonutils.dumps(r, default=json_set_default))} for r in routes] class MPLSLinuxDataplaneDriver(dp_drivers.DataplaneDriver): """Dataplane driver relying on the MPLS stack in the Linux kernel This dataplane driver relies on the MPLS stack in the Linux kernel, and on linux vrf interfaces. """ required_kernel = "4.4" dataplane_instance_class = MPLSLinuxVRFDataplane type = consts.IPVPN ecmp_support = True driver_opts = [ cfg.StrOpt("mpls_interface", help=("Interface used to send/receive MPLS traffic. " "Use '*gre*' to choose automatic creation of a tunnel" " interface for MPLS/GRE encap")), ] def __init__(self): super().__init__() privileged_utils.modprobe('mpls_router') privileged_utils.modprobe('mpls_gso') privileged_utils.modprobe('mpls_iptunnel') privileged_utils.modprobe('vrf') self.ip = pyroute2.IPDB() # pylint: disable=no-member @log_decorator.log_info def reset_state(self): # remove all VRF interfaces for itf in self.ip.interfaces.keys(): if isinstance(itf, str) and itf.startswith(VRF_INTERFACE_PREFIX): # bring the interface vrf slave interfaces down, # we don't want traffic from these interfaces # to leak out of the VRF after removal of VRF interface for index in self.ip.interfaces[itf].ports: with self.ip.interfaces[index] as port: port.down() ipr.link('del', ifname=itf) # Flush all routes setup by us in past runs ipr.flush_routes(proto=RT_PROT_BAGPIPE) # Flush all MPLS routes redirecting traffic to network namespaces # (just in case, should be covered by the above) ipr.flush_routes(family=pr_common.AF_MPLS) @log_decorator.log_info def initialize(self): sysctl('net.mpls.platform_labels', 2 ** 20 - 1) if "*gre*" in self.config["mpls_interface"]: self.mpls_interface = "gre_wildcard" raise Exception("MPLS/GRE not supported yet") else: self.mpls_interface = self.config["mpls_interface"] sysctl('net.mpls.conf.%s.input' % self.mpls_interface, 1) self.mpls_interface_index = self.ip.interfaces[self.mpls_interface ].index # for traffic from ourselves: sysctl('net.mpls.conf.lo.input', 1) # enable forwarding sysctl('net.ipv4.ip_forward', 1) def supported_encaps(self): yield exa.Encapsulation(exa.Encapsulation.Type.MPLS) # we also accept route with no encap specified yield exa.Encapsulation(exa.Encapsulation.Type.DEFAULT) # Looking glass #### def get_lg_map(self): return {"mpls": (lg.SUBTREE, self.get_lg_mpls_routes), } def get_lg_mpls_routes(self, path_prefix): return [{r['dst']: jsonutils.loads(jsonutils.dumps(r, default=json_set_default))} for r in self.ip.routes.tables['mpls']] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/vpn/ipvpn/mpls_ovs_dataplane.py0000664000175000017500000014630200000000000032003 0ustar00zuulzuul00000000000000# Copyright 2014 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re import collections import netaddr from oslo_config import cfg from oslo_config import types from oslo_log import log as logging from oslo_utils import versionutils from neutron.common import utils as n_utils from networking_bagpipe.bagpipe_bgp.common import config from networking_bagpipe.bagpipe_bgp.common import dataplane_utils from networking_bagpipe.bagpipe_bgp.common import exceptions as exc from networking_bagpipe.bagpipe_bgp.common import log_decorator from networking_bagpipe.bagpipe_bgp.common import looking_glass as lg from networking_bagpipe.bagpipe_bgp.common import net_utils from networking_bagpipe.bagpipe_bgp import constants as consts from networking_bagpipe.bagpipe_bgp.engine import exa from networking_bagpipe.bagpipe_bgp.vpn import dataplane_drivers as dp_drivers from networking_bagpipe.bagpipe_bgp.vpn import identifier_allocators from networking_bagpipe.bagpipe_bgp.vpn import vpn_instance from neutron.agent.common import ovs_lib from neutron_lib.plugins.ml2 import ovs_constants as ovs_const # man ovs-ofctl /32768 DEFAULT_OVS_FLOW_PRIORITY = 0x8000 # we want to avoid having our flows having a lowest priority than # the default DEFAULT_RULE_PRIORITY = DEFAULT_OVS_FLOW_PRIORITY + 0x1000 # priorities for IP match flows # highest priority MAX_PREFIX_PRIORITY for MAX_PREFIX_LENGTH prefix # (MAX_PREFIX_PRIORITY - MAX_PREFIX_LENGTH) for a zero length prefix MAX_PREFIX_PRIORITY = DEFAULT_RULE_PRIORITY MAX_PREFIX_LENGTH = 0x80 # 128 # fallback flows get a priority even lower # (we round it for better readability of flow dumps) FALLBACK_PRIORITY = MAX_PREFIX_PRIORITY - MAX_PREFIX_LENGTH - 0x80 NO_MPLS_PHY_INTERFACE = -1 VXLAN_TUNNEL = "vxlan" OVS_DUMP_FLOW_FILTER = "| grep -v NXST_FLOW | perl -pe '" \ "s/ *cookie=[^,]+, duration=[^,]+, table=[^,]+, //;" \ "s/ *n_bytes=[^,]+, //; " \ "s/ *(hard|idle)_age=[^,]+, //g; " \ "s/n_packets=([0-9]),/packets=$1 /; " \ "s/n_packets=([0-9]{2}),/packets=$1 /; " \ "s/n_packets=([0-9]{3}),/packets=$1 /; " \ "s/n_packets=([0-9]+),/packets=$1 /; " \ "'" GATEWAY_MAC = "00:00:5e:00:43:64" ARP_RESPONDER_ACTIONS = ('move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[],' 'mod_dl_src:%(mac)s,' 'load:0x2->NXM_OF_ARP_OP[],' 'move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[],' 'push:NXM_OF_ARP_TPA[],' 'move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[],' 'load:%(mac)#x->NXM_NX_ARP_SHA[],' 'pop:NXM_OF_ARP_SPA[],' '%(vlan_action)soutput:%(in_port)d') VRF_REGISTER = 0 def _match_from_prefix(prefix): # A zero-length prefix is a default route, no nw_dst is needed/possible # in this case prefix_length = netaddr.IPNetwork(prefix).prefixlen return {'nw_dst': prefix} if prefix_length != 0 else {} def _priority_from_prefix(prefix): # FIXME: use a priority depending on the prefix len # to compensate the fact that "OpenFlow leaves behavior # undefined when two or more flows with the same priority # can match a single packet. Some users expect ``sensible'' # behavior, such as more specific flows taking precedence # over less specific flows, but OpenFlow does not specify # this and Open vSwitch does not implement it. Users should # therefore take care to use priorities to ensure the # behavior that they expect. prefix_length = netaddr.IPNetwork(prefix).prefixlen # to implement a longest-match lookup we give longest prefixes # the higher priority priority = MAX_PREFIX_PRIORITY - (MAX_PREFIX_LENGTH - prefix_length) return priority class MPLSOVSVRFDataplane(dp_drivers.VPNInstanceDataplane): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # Initialize dict where we store info on OVS ports (port numbers and # bound IP address) self._ovs_port_info = dict() self.bridge = self.driver.bridge self.nh_group_mgr = NextHopGroupManagerProxy( self.driver.nh_group_mgr, self.instance_id, self.driver.vrf_table, self._vrf_match(), self._cookie ) self.fallback = None self.ovs_vlan = None @log_decorator.log_info def cleanup(self): if self._ovs_port_info: self.log.warning("OVS port numbers list for local ports plugged in" " VRF is not empty, clearing...") self._ovs_port_info.clear() # Remove all flows for this instance for table in self.driver.all_tables.values(): self.bridge.delete_flows(table=table, cookie=self._cookie(add=False)) # Remove all groups for this instance self.nh_group_mgr.clear_objects() @log_decorator.log def _extract_mac_address(self, output): """Extract MAC address from command output""" return re.search(r"([0-9A-F]{2}[:-]){5}([0-9A-F]{2})", output, re.IGNORECASE).group() def _find_remote_mac_address(self, remote_ip): """Find MAC address for a remote IP address""" # PING remote IP address (_, exit_code) = self._run_command("fping -r4 -t100 -q -I %s %s" % (self.bridge.br_name, remote_ip), raise_on_error=False, acceptable_return_codes=[-1]) if exit_code != 0: self.log.info("can't ping %s via %s, proceeding anyways", remote_ip, self.bridge.br_name) # we proceed even if the ping failed, since the ping was # just a way to trigger an ARP resolution which may have # succeeded even if the ping failed # Look in ARP cache to find remote MAC address (output, _) = self._run_command("ip neigh show to %s dev %s" % (remote_ip, self.bridge.br_name)) if (not output or "FAILED" in output[0] or "INCOMPLETE" in output[0]): raise exc.RemotePEMACAddressNotFound(remote_ip) try: return self._extract_mac_address(output[0]) except Exception: raise exc.RemotePEMACAddressNotFound(remote_ip) def _mtu_fixup(self, localport): # This is a hack, proper MTUs should actually be configured in the # hybrid vif driver # TODO(tmorin): obsolete mtu = self.config["ovsbr_interfaces_mtu"] if not mtu: self.log.debug("No ovsbr_interfaces_mtu specified in config file," " not trying to fixup MTU") return try: itf = localport['ovs']['port_name'] except KeyError: self.log.warning("No OVS port name provided, cannot fix MTU") return self.log.info("Will adjust %s if with MTU %s " "(ovsbr_interfaces_mtu specified in config)", itf, mtu) (_, exit_code) = self._run_command("ip link show %s" % itf, raise_on_error=False, acceptable_return_codes=[0, 1]) if exit_code != 0: self.log.warning("No %s if, not trying to fix MTU", itf) else: self._run_command("ip link set {} mtu {}".format(itf, mtu), run_as_root=True) def _get_ovs_port_specifics(self, localport): # Returns a tuple of: # - OVS port number for traffic to/from VMs # - OVS port name try: port_name = "" if ('ovs' in localport and localport['ovs']['plugged']): try: port = localport['ovs']['port_number'] except KeyError: self.log.info("No OVS port number provided, trying to use" " a port name") port = self.driver.find_ovs_port( localport['ovs']['port_name']) else: try: try: port_name = localport['ovs']['port_name'] except KeyError: port_name = localport['linuxif'] except Exception: raise Exception("Trying to find which port to plug, but no" " portname was provided") try: port = self.driver.find_ovs_port(port_name) except Exception: port = self.bridge.add_port(port_name) self.log.debug("Corresponding port number: %s", port) except KeyError as e: self.log.error("Incomplete port specification: %s", e) raise Exception("Incomplete port specification: %s" % e) return (port, port_name) def _vlan_match(self): return {'dl_vlan': self.ovs_vlan} if self.ovs_vlan else {} def get_push_vlan_action(self): return ("push_vlan:0x8100,mod_vlan_vid:%d" % self.ovs_vlan if self.ovs_vlan else "") def get_strip_vlan_action(self): return "strip_vlan" if self.ovs_vlan else "" @log_decorator.log_info def update_fallback(self, fallback=None): if fallback: self.fallback = fallback if not self.fallback: return for param in ('src_mac', 'dst_mac', 'ovs_port_number'): if not self.fallback.get(param): self.log.error("fallback specified without '%s'", param) return # use priority -1 so that the route only hits when the packet # does not matches any VRF route self.bridge.add_flow_extended( flow_matches=[dict(table=self.driver.vrf_table, cookie=self._cookie(add=True), priority=FALLBACK_PRIORITY), self._vrf_match()], actions=[self.get_push_vlan_action(), 'mod_dl_src:%s' % self.fallback.get('src_mac'), 'mod_dl_dst:%s' % self.fallback.get('dst_mac'), 'output:%d' % self.fallback.get('ovs_port_number')]) @log_decorator.log_info def setup_arp_responder(self, ovs_port): if not self.driver.config.arp_responder: return self.bridge.add_flow_extended( flow_matches=[dict(table=self.driver.vrf_table, cookie=self._cookie(add=True), priority=DEFAULT_RULE_PRIORITY, proto='arp', dl_dst='ff:ff:ff:ff:ff:ff', arp_op='0x1'), self._vrf_match(), # Respond to all IP addresses if proxy ARP is # enabled, otherwise only for gateway {'arp_tpa': self.gateway_ip} if not self.driver.config.proxy_arp else {}], actions=[ARP_RESPONDER_ACTIONS % { 'mac': netaddr.EUI(GATEWAY_MAC, dialect=netaddr.mac_unix), 'vlan_action': self.get_push_vlan_action(), 'in_port': ovs_port }]) @log_decorator.log_info def remove_arp_responder(self): self.bridge.delete_flows_extended( flow_matches=[dict(table=self.driver.vrf_table, cookie=self._cookie(add=False), proto='arp'), self._vrf_match()]) def _check_vlan_use(self, localport): # checks that if a vlan_action is used, it is the same # for all interfaces plugged into the VRF # on first plug we update try: ovs_vlan = int(localport['ovs']['vlan']) except KeyError: return if self.ovs_vlan is None: self.ovs_vlan = ovs_vlan else: # on a subsequent plug, we check if self.ovs_vlan != ovs_vlan: self.log.error("different VLAN for different interfaces: " "%s vs %s", self.ovs_vlan, ovs_vlan) raise Exception("can't specify a different VLAN for different" " interfaces") @log_decorator.log def vif_plugged(self, mac_address, ip_address, localport, label, direction): (ovs_port, ovs_port_name) = self._get_ovs_port_specifics(localport) self._check_vlan_use(localport) # need to update fallback, in case it was called before the # first vifPlugged call could define the push_vlan action self.update_fallback() # This is a hack used with previous versions of Openstack # proper MTUs should actually be configured in the hybrid vif driver # Please consider this obsolete until it gets clean'd up self._mtu_fixup(localport) if vpn_instance.forward_from_port(direction): # Map traffic from plugged port to VRF # Note that we first reset the in_port so that OVS will allow the # packet to eventually go back to this port after a VRF lookup # (the case where that happens is where in port is a patch-port on # which different VLANs are used to reach different networks): # patch port vlan X -- VRFX --- VRF Y -- patch-port vlan Y # # ( see http://docs.openvswitch.org/en/latest/faq/openflow/ # "Q: I added a flow to send packets out the ingress port..." ) for proto in ('ip', 'arp'): self.bridge.add_flow_extended( flow_matches=[dict(table=self.driver.input_table, cookie=self._cookie(add=True), priority=DEFAULT_RULE_PRIORITY, proto=proto, in_port=ovs_port), self._vlan_match()], actions=[self.get_strip_vlan_action(), 'load:0->NXM_OF_IN_PORT[],', 'set_field:%d->reg%d,' % (self.instance_id, VRF_REGISTER), 'resubmit(,%d)' % self.driver.vrf_table]) # Map ARP responder if necessary if not self._ovs_port_info: self.setup_arp_responder(ovs_port) if vpn_instance.forward_to_port(direction): # Map incoming MPLS traffic going to the VM port incoming_actions = [self.get_push_vlan_action(), "mod_dl_src:{},mod_dl_dst:{}".format( GATEWAY_MAC, mac_address), "output:%s" % ovs_port] self.bridge.add_flow_extended( flow_matches=[dict(table=self.driver.encap_in_table, cookie=self._cookie(add=True), priority=DEFAULT_RULE_PRIORITY, proto="mpls", mpls_label=label, mpls_bos=1)], actions=["pop_mpls:0x0800"] + incoming_actions) # additional incoming traffic rule for VXLAN if self.driver.vxlan_encap: self.bridge.add_flow_extended( flow_matches=[ dict(table=self.driver.encap_in_table, cookie=self._cookie(add=True), priority=DEFAULT_RULE_PRIORITY, in_port=self.driver.vxlan_tunnel_port_number, tun_id=label)], actions=incoming_actions) # Add OVS port number in list for local port plugged in VRF # FIXME: check check check, is linuxif the right key?? self.log.debug("Adding OVS port %s with port %s for address " "%s, to the list of ports plugged in VRF", localport['linuxif'], ovs_port, ip_address) self._ovs_port_info[localport['linuxif']] = { "ovs_port": ovs_port, "ovs_port_name": ovs_port_name, } @log_decorator.log def vif_unplugged(self, mac_address, ip_address, localport, label, direction, last_endpoint=True): ovs_port = self._ovs_port_info[localport['linuxif']]['ovs_port'] ovs_port_name = self._ovs_port_info[ localport['linuxif']]['ovs_port_name'] if vpn_instance.forward_to_port(direction): # Unmap incoming MPLS traffic going to the VM port self.bridge.delete_flows(table=self.driver.encap_in_table, cookie=self._cookie(add=False), proto="mpls", mpls_label=label, mpls_bos=1) # Unmap incoming VXLAN traffic... if self.driver.vxlan_encap: self.bridge.delete_flows( table=self.driver.encap_in_table, cookie=self._cookie(add=False), in_port=self.driver.vxlan_tunnel_port_number, tun_id=label) if last_endpoint: if vpn_instance.forward_from_port(direction): # Unmap all traffic from plugged port self.bridge.delete_flows_extended( flow_matches=[dict(table=self.driver.input_table, cookie=self._cookie(add=False), in_port=ovs_port), self._vlan_match()]) # Unmap ARP responder self.remove_arp_responder() # Run port unplug action if necessary (OVS port delete) if ovs_port_name: self.bridge.delete_port(ovs_port_name) # Remove OVS port number from list for local port plugged in VRF del self._ovs_port_info[localport['linuxif']] def _get_label_action(self, label, encaps): if (self.driver.vxlan_encap and exa.Encapsulation(exa.Encapsulation.Type.VXLAN) in encaps): return "set_field:%d->tunnel_id" % label else: return "push_mpls:0x8847,set_mpls_label:%d" % label def _get_output_action(self, remote_pe, encaps): # Check if prefix is from a local VRF if self.driver.get_local_address() == remote_pe: self.log.debug("Local route, using a resubmit action") # For local traffic, we have to use a resubmit action if (self.driver.vxlan_encap and exa.Encapsulation(exa.Encapsulation.Type.VXLAN) in encaps): return ("resubmit(%d,%d)" % (self.driver.vxlan_tunnel_port_number, self.driver.encap_in_table)) else: return "resubmit(%d,%d)" % (self.driver.mpls_in_port(), self.driver.encap_in_table) else: if (self.driver.vxlan_encap and exa.Encapsulation(exa.Encapsulation.Type.VXLAN) in encaps): self.log.debug("Will use a VXLAN encap for this destination") return "set_field:{}->tun_dst,output:{}".format( remote_pe, self.driver.vxlan_tunnel_port_number) elif self.driver.use_gre: self.log.debug("Using MPLS/GRE encap") return "set_field:{}->tun_dst,output:{}".format( remote_pe, self.driver.gre_tunnel_port_number) else: self.log.debug("Using bare MPLS encap") # Find remote router MAC address try: remote_pe_mac_address = self._find_remote_mac_address( remote_pe) self.log.debug("MAC address found for remote router " "%(remote_pe)s: %(remote_pe_mac_address)s", locals()) except exc.RemotePEMACAddressNotFound as e: self.log.error("An error occured during setupDataplaneFor" "RemoteEndpoint: %s", e) raise # Map traffic to remote IP address as MPLS on ethX to remote # router MAC address return "mod_dl_src:{},mod_dl_dst:{},output:{}".format( self.driver.mpls_if_mac_address, remote_pe_mac_address, self.driver.ovs_mpls_if_port_number) def _cookie(self, add=False): mask = "" if not add: mask = "/-1" return "%d%s" % (self.instance_id, mask) def _vrf_match(self): return {'reg%d' % VRF_REGISTER: self.instance_id} @log_decorator.log_info def setup_dataplane_for_remote_endpoint(self, prefix, remote_pe, label, nlri, encaps, lb_consistent_hash_order=0): nexthop = NextHop(label, remote_pe, encaps, lb_consistent_hash_order) if self.nh_group_mgr.is_object_user(prefix, nexthop): self.log.debug("Dataplane already in place for %s, %s, skipping", prefix, nexthop) return dec_ttl = (netaddr.IPNetwork(prefix) not in netaddr.IPNetwork( "{}/{}".format(self.gateway_ip, self.network_plen))) label_action = self._get_label_action(nexthop.label, nexthop.encaps) output_action = self._get_output_action(nexthop.remote_pe, nexthop.encaps) self.nh_group_mgr.new_nexthop(prefix, nexthop, actions=["dec_ttl" if dec_ttl else "", label_action, output_action]) @log_decorator.log_info def remove_dataplane_for_remote_endpoint(self, prefix, remote_pe, label, nlri, encaps, lb_consistent_hash_order=0): nexthop = NextHop(label, remote_pe, encaps, lb_consistent_hash_order) if self.nh_group_mgr.is_object_user(prefix, nexthop): self.nh_group_mgr.del_nexthop(prefix, nexthop) else: self.log.debug("remove_dataplane_for_remote_endpoint called, " "for %s and %s, but we don't know about this " "prefix and next-hop", prefix, nexthop.__dict__) def _get_port_range_from_classifier(self, classifier_port): if classifier_port: if isinstance(classifier_port, tuple): port_min, port_max = classifier_port else: port_min = port_max = classifier_port return port_min, port_max def _create_port_range_flow_matches(self, classifier_match, classifier): flow_matches = [] src_port_match = f'{classifier.protocol:s}_src' if classifier.source_port: if isinstance(classifier.source_port, tuple): src_port_min, src_port_max = classifier.source_port else: src_port_min = src_port_max = classifier.source_port dst_port_match = f'{classifier.protocol:s}_dst' if classifier.destination_port: if isinstance(classifier.destination_port, tuple): dst_port_min, dst_port_max = classifier.destination_port else: dst_port_min = dst_port_max = classifier.destination_port dst_port_range = [] if dst_port_min and dst_port_max: dst_port_range = n_utils.port_rule_masking(dst_port_min, dst_port_max) src_port_range = [] if src_port_min and src_port_max: src_port_range = n_utils.port_rule_masking(src_port_min, src_port_max) for port in src_port_range: flow_match = classifier_match.copy() flow_match[src_port_match] = port if dst_port_range: for port in dst_port_range: dst_flow = flow_match.copy() dst_flow[dst_port_match] = port flow_matches.append(dst_flow) else: flow_matches.append(flow_match) else: for port in dst_port_range: flow_match = classifier_match.copy() flow_match[dst_port_match] = port flow_matches.append(flow_match) return flow_matches def _create_classifier_flow_matches(self, classifier): classifier_match = dict(proto=classifier.protocol) if classifier.source_pfx: classifier_match.update({'nw_src': classifier.source_pfx}) if classifier.destination_pfx: classifier_match.update({'nw_dst': classifier.destination_pfx}) return self._create_port_range_flow_matches(classifier_match, classifier) @log_decorator.log_info def add_dataplane_for_traffic_classifier(self, classifier, redirect_to_instance_id): classifier_matches = self._create_classifier_flow_matches(classifier) # Add traffic redirection to redirection VRF for classifier matches for classifier_match in classifier_matches: self.bridge.add_flow_extended( flow_matches=[dict(table=self.driver.vrf_table, cookie=self._cookie(add=True), priority=DEFAULT_RULE_PRIORITY), self._vrf_match(), classifier_match], actions=['set_field:%d->reg%d' % (redirect_to_instance_id, VRF_REGISTER), 'resubmit(,%d)' % self.driver.vrf_table]) @log_decorator.log_info def remove_dataplane_for_traffic_classifier(self, classifier): classifier_matches = self._create_classifier_flow_matches(classifier) # Remove traffic redirection to redirection VRF for classifier matches for classifier_match in classifier_matches: self.bridge.delete_flows_extended( flow_matches=[dict(table=self.driver.vrf_table, cookie=self._cookie(add=False)), self._vrf_match(), classifier_match]) def get_lg_map(self): return { "flows": (lg.SUBTREE, self.get_lg_ovs_flows) } def get_lg_ovs_flows(self, path_prefix): return self.driver.get_lg_ovs_flows( path_prefix, 'cookie=%s' % self._cookie(add=False)) class OVSGroupAllocator(identifier_allocators.IDAllocator): MAX = 2 ** 32 - 1 class OVSBucketAllocator(identifier_allocators.IDAllocator): # Values greater than 0xFFFFFF00 are reserved MAX = 2 ** 32 - 2 ** 8 - 1 class NextHop: def __init__(self, label, remote_pe, encaps, lb_consistent_hash_order): self.label = label self.remote_pe = str(remote_pe) self.encaps = frozenset(encaps) self.lb_consistent_hash_order = lb_consistent_hash_order def __eq__(self, other): return ((self.label, self.remote_pe, self.encaps) == (other.label, other.remote_pe, other.encaps)) def __hash__(self): return hash((self.label, self.remote_pe, self.encaps)) def __repr__(self): return "NextHop({},{},{},{})".format(self.label, self.remote_pe, self.encaps, self.lb_consistent_hash_order) class NextHopGroupManager(dataplane_utils.ObjectLifecycleManager): def __init__(self, bridge, hash_method, hash_method_param, hash_fields): super().__init__() self.bridge = bridge self.hash_method = hash_method self.hash_method_param = hash_method_param self.hash_fields = hash_fields self.group_allocator = OVSGroupAllocator() def get_selection_method(self): selection_method = self.hash_method if self.hash_fields and self.hash_method == 'hash': selection_method += ",fields(%s)" % ','.join(self.hash_fields) return selection_method @log_decorator.log_info def create_object(self, prefix, *args, **kwargs): buckets = ( {'buckets': kwargs['buckets']} if kwargs.get('buckets') else {} ) group_id = self.group_allocator.get_new_id("Group ID for prefix %s" % str(prefix)) self.bridge.add_group(group_id=group_id, type='select', selection_method=self.get_selection_method(), selection_method_param=self.hash_method_param, **buckets) return group_id @log_decorator.log_info def delete_object(self, group_id): self.bridge.delete_group(group_id=group_id) self.group_allocator.release(group_id) class NextHopGroupManagerProxy(dataplane_utils.ObjectLifecycleManagerProxy): def __init__(self, manager, parent_key, vrf_table, vrf_match, cookie_func): super().__init__(manager, parent_key) self.vrf_table = vrf_table self.vrf_match = vrf_match self.cookie_func = cookie_func self.bucket_allocators = ( collections.defaultdict(OVSBucketAllocator) ) self.prefix_nexthop_2_bucket = dict() def _update_group_buckets(self, group_id, prefix): buckets = [] for prefix_nh, bucket in self.prefix_nexthop_2_bucket.items(): if prefix == prefix_nh[0]: buckets.append('bucket=bucket_id=%d,%s' % ( bucket[0], dataplane_utils.join_s(*bucket[1]))) self.manager.bridge.mod_group( group_id=group_id, type='select', selection_method=self.manager.get_selection_method(), selection_method_param=self.manager.hash_method_param, buckets=','.join(buckets)) def new_nexthop(self, prefix, nexthop, actions=None): if actions is None: actions = [] bucket_allocator = self.bucket_allocators[prefix] bucket_id = bucket_allocator.get_new_id( "Bucket ID for prefix {} and nexthop {}".format( str(prefix), nexthop), hint_value=nexthop.lb_consistent_hash_order ) bucket = 'bucket=bucket_id=%d,%s' % (bucket_id, dataplane_utils.join_s(*actions)) self.prefix_nexthop_2_bucket[(prefix, nexthop)] = (bucket_id, actions) group_id, first = self.get_object(prefix, nexthop, buckets=bucket) if first: self.manager.bridge.add_flow_extended( flow_matches=[dict(table=self.vrf_table, cookie=self.cookie_func(add=True), priority=_priority_from_prefix(prefix), proto='ip'), self.vrf_match, _match_from_prefix(prefix)], actions=["group:%d" % group_id]) else: self._update_group_buckets(group_id, prefix) def del_nexthop(self, prefix, nexthop): group_id = self.find_object(prefix) bucket_id, _ = self.prefix_nexthop_2_bucket.pop((prefix, nexthop)) bucket_allocator = self.bucket_allocators[prefix] bucket_allocator.release(bucket_id) self._update_group_buckets(group_id, prefix) last = self.free_object(prefix, nexthop) if last: self.manager.bridge.delete_flows_extended( flow_matches=[dict(strict=True, table=self.vrf_table, cookie=self.cookie_func(add=False), priority=_priority_from_prefix(prefix), proto='ip'), self.vrf_match, _match_from_prefix(prefix)]) del self.bucket_allocators[prefix] class MPLSOVSDataplaneDriver(dp_drivers.DataplaneDriver): """Dataplane driver using OpenVSwitch Based on an OpenVSwitch 2.4 MPLS kernel dataplane implementation. This driver was successfully tested with the OVS 2.4 DKMS module. This driver uses MPLS-over-GRE by default. However, note well that current OVS implementation of MPLS-over-GRE is not yet conformant with RFC4023, because of an intermediate Eth header (MPLS-over-Eth-over-GRE). If MPLS-over-GRE is disabled (with mpls_over_gre=False), this driver currently requires that the OVS bridge be associated to the address used as the local_address in bgp.conf, to allow the linux IP stack to use the same physical interface as the one on which MPLS packets are forwarded. This requires to configure the OVS bridge so that it passes packets from the physical interface to the linux IP stack if they are not MPLS, and packets from the linux IP stack to the physical device. Howto allow the use of the OVS bridge interface also as an IP interface of the Linux kernel IP stack: ovs-ofctl del-flows br-int ovs-ofctl add-flow br-int in_port=LOCAL,action=output:1 ovs-ofctl add-flow br-int in_port=1,action=output:LOCAL (on a debian or ubuntu system, this can be done part of the ovs bridge definition in /etc/network/interfaces, as post-up commands) The 'vrf_table' (resp. 'input_table') config parameters can be used to specify which OVS table will host the rules for traffic from VRFs (resp. for incoming traffic). Beware, this dataplane driver will *not* take care of setting up rules so that MPLS traffic or the traffic from attached ports is matched against rules in these tables. """ dataplane_instance_class = MPLSOVSVRFDataplane type = consts.IPVPN ecmp_support = True required_ovs_version = "2.8.0" driver_opts = [ cfg.StrOpt("mpls_interface", help=("Interface used to send/receive MPLS traffic. " "Use '*gre*' to choose automatic creation of a tunnel" " port for MPLS/GRE encap")), cfg.StrOpt("mpls_over_gre", choices=['auto', 'True', 'False'], default="auto", advanced=True, help=("Force the use of MPLS/GRE even with " "mpls_interface specified")), cfg.BoolOpt("proxy_arp", default=False, advanced=True, help=("Activate ARP responder per VRF for any IP " "address")), cfg.BoolOpt("arp_responder", default=False, advanced=True, help=("ARP responder per VRF")), cfg.BoolOpt("vxlan_encap", default=False, advanced=True, help=("Be ready to receive VPN traffic as VXLAN, and to " "preferrably send traffic as VXLAN when advertised " "by the remote end")), cfg.StrOpt("ovs_bridge", default="br-mpls", advanced=True, help=("Name of the OVS bridge to use, this has to be the " "same as the tunneling bridge of the Neutron OVS " "agent.")), cfg.IntOpt("input_table", default=0, advanced=True, help=("Specifies which OVS table will host the rules " "for traffic from VRFs")), cfg.IntOpt("ovs_table_id_start", default=1, advanced=True, help=("The starting OVS table number to use for " "additional tables. The values for the encap-in " "and VRF tables are derived from this. " "NOTE: This must be different than input_table " "config option.")), cfg.StrOpt("gre_tunnel", default="mpls_gre", advanced=True, help="OVS interface name for MPLS/GRE encap"), cfg.ListOpt("gre_tunnel_options", default=[], item_type=types.String(), help=("Options, comma-separated, passed to OVS for GRE " "tunnel port creation (e.g. 'packet_type=legacy_l3" ", ...') that will be added as OVS tunnel " "interface options (e.g. 'options:packet_type=" "legacy_l3 options:...')")), cfg.IntOpt("ovsbr_interfaces_mtu", advanced=True, help=("MTU used for OVS bridge interfaces.")), cfg.StrOpt("hash_method", choices=["hash", "dp_hash"], default="dp_hash", advanced=True, help=("Can be used to control the OVS group bucket " "selection method (mapped to ovs " "'selection_method')")), cfg.IntOpt("hash_method_param", default=0, min=0, max=2**64 - 1, advanced=True, help=("Can be used to control the OVS group bucket " "selection method (mapped to ovs " "'selection_method_param')")), cfg.ListOpt("hash_fields", default=[], advanced=True, help=("Can be used to control the fields used by the OVS " "group bucket selection method (mapped to ovs " "'fields')")) ] def __init__(self): super().__init__() config.set_default_root_helper() config.setup_privsep() try: (o, _) = self._run_command("ovs-ofctl -V | head -1 |" " awk '{print $4}'") self.ovs_release = o[0] self.log.info("OVS version: %s", self.ovs_release) except Exception: self.log.warning("Could not determine OVS release") self.ovs_release = None self.mpls_interface = self.config.mpls_interface if self.config.mpls_over_gre != "auto": self.use_gre = True else: self.use_gre = not (self.mpls_interface and self.mpls_interface != "*gre*") if not self.mpls_interface: if not self.use_gre: raise Exception("mpls_over_gre force-disabled, but no " "mpls_interface specified") else: self.use_gre = True self.log.info("Defaulting to use of MPLS-over-GRE (no " "mpls_interface specified)") elif self.mpls_interface == "*gre*": if not self.use_gre: raise Exception("mpls_over_gre force-disabled, but " "mpls_interface set to '*gre', cannot " "use bare MPLS") else: self.log.info("mpls_interface is '*gre*', will thus use " "MPLS-over-GRE") self.use_gre = True self.mpls_interface = None else: if self.use_gre: self.log.warning("mpls_over_gre set to True, " "ignoring mpls_interface parameter") self.mpls_interface = None else: self.log.info("Will use bare MPLS on interface %s", self.mpls_interface) self.input_table = self.config.input_table if self.config.ovs_table_id_start == self.input_table: raise Exception("invalid ovs_table_id_start (%d): can't use tables" " same as input table (%d)" % ( self.config.ovs_table_id_start, self.config.input_table)) self.encap_in_table = self.config.ovs_table_id_start self.vrf_table = self.config.ovs_table_id_start + 1 self.all_tables = {'incoming': self.input_table, 'vrf': self.vrf_table, 'encap_in': self.encap_in_table} # Used to control whether this VRF will support # receiving traffic as VXLAN self.vxlan_encap = self.config.vxlan_encap # check OVS version ovs_release_version = versionutils.convert_version_to_tuple( self.ovs_release) required_ovs_version_tuple = versionutils.convert_version_to_tuple( self.required_ovs_version) if (not self.vxlan_encap and ovs_release_version < required_ovs_version_tuple): self.log.warning("%s requires at least OVS %s" " (you are running %s)", self.__class__.__name__, self.required_ovs_version, self.ovs_release) # unless useGRE is enabled, check that fping is installed if not self.use_gre: self._run_command("fping -v", raise_on_error=True) self.bridge = dataplane_utils.OVSBridgeWithGroups( dataplane_utils.OVSExtendedBridge(self.config.ovs_bridge) ) # Check if OVS bridge exist if not self.bridge.bridge_exists(self.bridge.br_name): raise exc.OVSBridgeNotFound(self.bridge.br_name) self.bridge.use_at_least_protocol(ovs_const.OPENFLOW15) self.nh_group_mgr = NextHopGroupManager(self.bridge, self.config.hash_method, self.config.hash_method_param, self.config.hash_fields) if not self.use_gre: self.log.info("Will not force the use of GRE/MPLS, trying to bind " "physical interface %s", self.mpls_interface) # Check if MPLS interface is attached to OVS bridge if not self.bridge.port_exists(self.mpls_interface): raise Exception("Specified mpls_interface %s is not plugged to" " OVS bridge %s" % (self.mpls_interface, self.bridge.br_name)) else: self.ovs_mpls_if_port_number = self.bridge.get_port_ofport( self.mpls_interface) def supported_encaps(self): if self.use_gre: yield exa.Encapsulation(exa.Encapsulation.Type.GRE) yield exa.Encapsulation(exa.Encapsulation.Type.DEFAULT) # we will accept routes with no encap # specified and force the use of GRE else: yield exa.Encapsulation(exa.Encapsulation.Type.MPLS) # we also accept route with no encap specified yield exa.Encapsulation(exa.Encapsulation.Type.DEFAULT) if self.vxlan_encap: yield exa.Encapsulation(exa.Encapsulation.Type.VXLAN) def mpls_in_port(self): if self.use_gre: return self.gre_tunnel_port_number else: return self.ovs_mpls_if_port_number @log_decorator.log_info def reset_state(self): # Flush all MPLS and ARP flows, all groups, if bridge exists if self.bridge.bridge_exists(self.bridge.br_name): self.log.info("Cleaning up OVS rules") self.bridge.delete_flows(table=self.input_table, cookie=ovs_lib.COOKIE_ANY, proto='mpls') if self.vxlan_encap: try: self.bridge.delete_flows( table=self.input_table, in_port=self.find_ovs_port(VXLAN_TUNNEL)) except Exception: self.log.info("no VXLAN tunnel port, nothing to clean up") # the above won't clean up flows if the vxlan_tunnel interface # has changed... self.bridge.delete_flows(table=self.input_table, cookie=ovs_lib.COOKIE_ANY, tun_id='2/1') self.bridge.delete_flows(table=self.input_table, cookie=ovs_lib.COOKIE_ANY, tun_id='1/1') # clean input_table rule for plugged ports # NOTE(tmorin): would be cleaner using a cookie self.bridge.delete_flows(table=self.input_table, cookie=ovs_lib.COOKIE_ANY, proto='ip') self.bridge.delete_flows(table=self.input_table, cookie=ovs_lib.COOKIE_ANY, proto='arp') self.bridge.delete_flows(table=self.encap_in_table, cookie=ovs_lib.COOKIE_ANY) self.bridge.delete_flows(table=self.vrf_table, cookie=ovs_lib.COOKIE_ANY) # clean all groups self.bridge.delete_group() if self.log.isEnabledFor(logging.DEBUG): self.log.debug("All our rules have been flushed:\n%s", '\n'.join(self.bridge.dump_all_flows())) self.log.debug("All groups have been flushed:\n%s", self.bridge.run_ofctl("dump-groups", [])) else: self.log.info("No OVS bridge (%s), no need to cleanup OVS rules", self.bridge.br_name) def initialize(self): if self.use_gre: self.log.info("Setting up tunnel for MPLS/GRE (%s)", self.config.gre_tunnel) self.bridge.delete_port(self.config.gre_tunnel) gre_tunnel_options = dict( o.split('=') for o in self.config.gre_tunnel_options) gre_tunnel_attrs = [ ('type', 'gre'), ('options', dict({'local_ip': self.get_local_address(), 'remote_ip': 'flow'}, **gre_tunnel_options)) ] self.gre_tunnel_port_number = ( self.bridge.add_port(self.config.gre_tunnel, *gre_tunnel_attrs) ) self.mpls_if_mac_address = None else: # Find ethX MPLS interface MAC address try: self.mpls_if_mac_address = net_utils.get_device_mac( self._run_command, self.mpls_interface) except Exception: # Interface without MAC address (patch port case), use MPLS # bridge MAC address instead self.mpls_if_mac_address = net_utils.get_device_mac( self._run_command, self.bridge.br_name) self.bridge.add_flow(table=self.input_table, priority=DEFAULT_RULE_PRIORITY, in_port=self.mpls_in_port(), proto='mpls', actions="resubmit(,%d)" % self.encap_in_table) if self.vxlan_encap: self.log.info("Enabling VXLAN encapsulation") self.bridge.delete_port(VXLAN_TUNNEL) vxlan_tunnel_attrs = [ ('type', 'vxlan'), ('options', {'local_ip': self.get_local_address(), 'remote_ip': 'flow', 'key': 'flow'}) ] self.vxlan_tunnel_port_number = ( self.bridge.add_port(VXLAN_TUNNEL, *vxlan_tunnel_attrs) ) self.bridge.add_flow(table=self.input_table, priority=DEFAULT_RULE_PRIORITY, in_port=self.vxlan_tunnel_port_number, actions="resubmit(,%d)" % self.encap_in_table) def validate_directions(self, direction): # this driver supports all combinations of directions pass def find_ovs_port(self, dev_name): """Find OVS port number from port name""" ofport = self.bridge.get_port_ofport(dev_name) if ofport is None: raise Exception("OVS port not found for device %s" % dev_name) return ofport # Looking glass code #### def get_lg_map(self): return { "flows": (lg.SUBTREE, self.get_lg_ovs_flows), "ports": (lg.SUBTREE, self.get_lg_ovs_ports) } def get_lg_local_info(self, path_prefix): d = { "ovs_bridge": self.bridge.br_name, "mpls_interface": self.mpls_interface, "gre": {'enabled': self.use_gre}, "vxlan": {'enabled': self.vxlan_encap}, "ovs_version": self.ovs_release, "tables": self.all_tables } if self.use_gre: d["gre"].update({'gre_tunnel_port': self.config.gre_tunnel}) if self.vxlan_encap: d["gre"].update({'vxlan_tunnel_port': VXLAN_TUNNEL}) return d def get_lg_ovs_flows(self, path_prefix, cookie_spec=None): output = {} for (table_name, table_id) in self.all_tables.items(): output.update({ "%s (%d)" % (table_name, table_id): self._run_command( "ovs-ofctl -O {} dump-flows --names {} '{}' {}".format( ovs_const.OPENFLOW15, self.bridge.br_name, dataplane_utils.join_s("table=%d" % table_id, cookie_spec), OVS_DUMP_FLOW_FILTER), run_as_root=True, shell=True )[0] }) return output def get_lg_ovs_ports(self, path_prefix): (output, _) = self._run_command( "ovs-ofctl -O {} show {} |grep addr".format(ovs_const.OPENFLOW15, self.bridge.br_name), run_as_root=True, acceptable_return_codes=[0, 1], shell=True) # FIXME: does it properly show the GRE tunnel interface ? return output ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/vpn/manager.py0000664000175000017500000005153200000000000026406 0ustar00zuulzuul00000000000000# Copyright 2014 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import threading from oslo_log import log as logging from networking_bagpipe.bagpipe_bgp.common import exceptions as exc from networking_bagpipe.bagpipe_bgp.common import log_decorator from networking_bagpipe.bagpipe_bgp.common import looking_glass as lg from networking_bagpipe.bagpipe_bgp.common import run_command from networking_bagpipe.bagpipe_bgp.common import utils from networking_bagpipe.bagpipe_bgp import constants from networking_bagpipe.bagpipe_bgp.engine import bgp_manager from networking_bagpipe.bagpipe_bgp.vpn import dataplane_drivers as dp_drivers from networking_bagpipe.bagpipe_bgp.vpn import evpn from networking_bagpipe.bagpipe_bgp.vpn import identifier_allocators from networking_bagpipe.bagpipe_bgp.vpn import ipvpn from neutron_lib import exceptions LOG = logging.getLogger(__name__) INSTANCE_ID_MAX = 2 ** 32 - 1 def redirect_instance_extid(instance_type, rt): '''generate the ext intance id of a redirection VPN instance''' return "redirect-to-{}-{}".format(instance_type, rt.replace(":", "_")) class MaxInstanceIDReached(exceptions.NeutronException): _message = "Could not create VPN instance: max instance id was reached" class VPNManager(lg.LookingGlassMixin, utils.ClassReprMixin): """VPN Manager Creates, and keeps track of, VPN instances (VRFs and EVIs) and passes plug/unplug calls to the right VPN instance. """ _instance = None type2class = {constants.IPVPN: ipvpn.VRF, constants.EVPN: evpn.EVI } @log_decorator.log def __init__(self): LOG.debug("Instantiating VPN Manager...") self.bgp_manager = bgp_manager.Manager() self.dataplane_drivers = dp_drivers.instantiate_dataplane_drivers() # VPN instance dict self.vpn_instances = {} self.next_vpn_instance_id = 1 LOG.debug("Creating label allocator") self.label_allocator = identifier_allocators.LabelAllocator() LOG.debug("Creating route distinguisher allocator") self.rd_allocator = identifier_allocators.RDAllocator( self.bgp_manager.get_local_address()) # dict containing info how an ipvpn is plugged # from an evpn (keys: ipvpn instances) self._evpn_ipvpn_ifs = {} # keys: vni # value: VPNInstance self.vpn_instance_by_vni = {} self.lock = threading.RLock() def _run_command(self, *args, **kwargs): run_command.run_command(LOG, *args, run_as_root=True, **kwargs) @log_decorator.log_info def _attach_evpn_2_ipvpn(self, localport, ipvpn_instance): # Assuming localport indicates no real interface but only # an EVPN, this method will create a pair of twin interfaces, one # to plug in the EVPN, the other to plug in the IPVPN. # # The localport dict will be modified so that the 'linuxif' indicates # the name of the interface to plug in the IPVPN. # # The EVPN instance will be notified so that it forwards traffic # destinated to the gateway on the interface toward the IPVPN. assert 'evpn' in localport if 'id' not in localport['evpn']: raise Exception("Missing parameter 'id' :an external EVPN " "instance id must be specified for an EVPN " "attachment") try: evpn = self.vpn_instances[localport['evpn']['id']] except Exception: raise Exception("The specified evpn instance does not exist (%s)" % localport['evpn']) if evpn.type != constants.EVPN: raise Exception("The specified instance to plug is not an evpn" "instance (is %s instead)" % evpn.type) if ipvpn_instance in self._evpn_ipvpn_ifs: (evpn_if, ipvpn_if, evpn, managed) = \ self._evpn_ipvpn_ifs[ipvpn_instance] if localport['evpn']['id'] != evpn.external_instance_id: raise Exception('Trying to plug into an IPVPN a new E-VPN ' 'while one is already plugged in') else: # do nothing LOG.warning('Trying to plug an E-VPN into an IPVPN, but it was' ' already done') localport['linuxif'] = ipvpn_if return # detect if this evpn is already plugged into an IPVPN if evpn.has_gateway_port(): raise Exception("Trying to plug E-VPN into an IPVPN, but this EVPN" " is already plugged into an IPVPN") if 'linuxif' in localport and localport['linuxif']: raise Exception("Cannot specify an attachment with both a linuxif " "and an evpn") if 'ovs_port_name' in localport['evpn']: try: assert localport['ovs']['plugged'] assert (localport['ovs']['port_name'] or localport['ovs']['port_number']) except Exception: raise Exception("Using ovs_port_name in EVPN/IPVPN attachment" " requires specifying the corresponding OVS" " port, which must also be pre-plugged") evpn_if = localport['evpn']['ovs_port_name'] # we assume in this case that the E-VPN interface is already # plugged into the E-VPN bridge managed = False else: evpn_if = "evpn%d-ipvpn%d" % ( evpn.instance_id, ipvpn_instance.instance_id) ipvpn_if = "ipvpn%d-evpn%d" % ( ipvpn_instance.instance_id, evpn.instance_id) # FIXME: do it only if not existing already... LOG.info("Creating veth pair %s %s ", evpn_if, ipvpn_if) # delete the interfaces if they exist already self._run_command("ip link delete %s" % evpn_if, acceptable_return_codes=[0, 1]) self._run_command("ip link delete %s" % ipvpn_if, acceptable_return_codes=[0, 1]) self._run_command("ip link add %s type veth peer name %s" " mtu 65535" % (evpn_if, ipvpn_if)) self._run_command("ip link set %s up" % evpn_if) self._run_command("ip link set %s up" % ipvpn_if) managed = True localport['linuxif'] = ipvpn_if evpn.set_gateway_port(evpn_if, ipvpn_instance) self._evpn_ipvpn_ifs[ipvpn_instance] = ( evpn_if, ipvpn_if, evpn, managed) @log_decorator.log_info def _detach_evpn_2_ipvpn(self, ipvpn): # Symmetric to _attach_evpn_2_ipvpn (evpn_if, ipvpn_if, evpn_instance, managed) = self._evpn_ipvpn_ifs[ipvpn] if not ipvpn.has_enpoint(ipvpn_if): # TODO(tmorin): check that this evpn instance is still up and # running ? evpn_instance.gateway_port_down(evpn_if) # cleanup veth pair if managed: self._run_command("ip link delete %s" % evpn_if) del self._evpn_ipvpn_ifs[ipvpn] def _cleanup_evpn2ipvpn(self, ipvpn): (_, ipvpn_if, _, managed) = self._evpn_ipvpn_ifs[ipvpn] # cleanup veth pair if managed: self._run_command("ovs-vsctl del-port %s" % ipvpn_if) self._run_command("ip link delete %s" % ipvpn_if) @utils.synchronized @log_decorator.log_info def _get_vpn_instance(self, external_instance_id, instance_type, import_rts, export_rts, gateway_ip, mask, readvertise, attract_traffic, fallback=None, **kwargs): # Get an vpn_instance with this external_instance_id, # if one already exists, check matching instance_type # else create one with provided parameters and start it # (unless create_if_none is False --> raise exc.VPNNotFound) # returns True if an already started instance was found # False if a new instance was created without starting it LOG.info("Finding %s for external vpn_instance identifier %s", instance_type, external_instance_id) vpn_instance = self.vpn_instances.get(external_instance_id) if vpn_instance: if vpn_instance.type != instance_type: raise Exception("Found an existing vpn_instance with " "external id %s but a different type " "(asked %s vs. already having %s)" % (external_instance_id, instance_type, vpn_instance.type)) return vpn_instance, True if not kwargs.pop('create_if_none', True): raise exc.VPNNotFound(external_instance_id) # if a vni is specified, check that no VPN instance with same VNI # already exists... if 'vni' in kwargs and kwargs['vni'] in self.vpn_instance_by_vni: raise exc.APIAlreadyUsedVNI(kwargs['vni']) vpn_instance_class = VPNManager.type2class[instance_type] dataplane_driver = self.dataplane_drivers[instance_type] # unique internal vpn instance id instance_id = self.next_vpn_instance_id if instance_id > INSTANCE_ID_MAX: raise MaxInstanceIDReached() self.next_vpn_instance_id += 1 vpn_instance = vpn_instance_class(self, dataplane_driver, external_instance_id, instance_id, import_rts, export_rts, gateway_ip, mask, readvertise, attract_traffic, fallback, **kwargs) self.register_vpn_instance(vpn_instance) return vpn_instance, False @utils.synchronized @log_decorator.log_info def register_vpn_instance(self, vpn_instance): self.vpn_instances[vpn_instance.external_instance_id] = vpn_instance if vpn_instance.forced_vni: self.vpn_instance_by_vni[ vpn_instance.instance_label] = vpn_instance @utils.synchronized @log_decorator.log_info def unregister_vpn_instance(self, vpn_instance): del self.vpn_instances[vpn_instance.external_instance_id] if vpn_instance.forced_vni: del self.vpn_instance_by_vni[vpn_instance.instance_label] def _check_instance_type(self, params): if 'vpn_type' not in params: raise exc.APIException("missing instance_type") instance_type = params['vpn_type'] if instance_type not in self.type2class: raise exc.APIException("unknown vpn_type: %s" % instance_type) if instance_type not in self.dataplane_drivers: LOG.error("No dataplane driver for VPN type %s", instance_type) raise exc.APIException("No dataplane driver for VPN type %s" % instance_type) return instance_type @log_decorator.log_info def plug_vif_to_vpn(self, **params): instance_type = self._check_instance_type(params) vpn_instance_class = VPNManager.type2class[instance_type] vpn_instance_class.validate_convert_attach_params(params) external_instance_id = params.get('external_instance_id') import_rts = params.get('import_rts') export_rts = params.get('export_rts') mac_address = params.get('mac_address') gateway_ip = params.get('gateway_ip') localport = params.get('localport') linuxbr = params.get('linuxbr') advertise_subnet = params.get('advertise_subnet') readvertise = params.get('readvertise') attract_traffic = params.get('attract_traffic') lb_consistent_hash_order = params.get('lb_consistent_hash_order') local_pref = params.get('local_pref') fallback = params.get('fallback') vni = params.get('vni') ip_address_prefix = params.get('ip_address_prefix') ip_address_plen = params.get('ip_address_plen') # Convert route target string to RouteTarget dictionary import_rts = utils.convert_route_targets(import_rts) export_rts = utils.convert_route_targets(export_rts) if readvertise: try: readvertise = {k: utils.convert_route_targets(readvertise[k]) for k in ['from_rt', 'to_rt']} except KeyError as e: raise Exception("Wrong 'readvertise' parameters: %s" % e) if attract_traffic: try: attract_traffic['redirect_rts'] = ( utils.convert_route_targets( attract_traffic['redirect_rts']) ) except KeyError as e: raise Exception("Wrong 'attract_traffic' parameters: %s" % e) kwargs = {} if vni: kwargs['vni'] = vni if instance_type == constants.EVPN and linuxbr: kwargs['linuxbr'] = linuxbr vpn_instance, started = self._get_vpn_instance( external_instance_id, instance_type, import_rts, export_rts, gateway_ip, ip_address_plen, readvertise, attract_traffic, fallback, **kwargs) vpn_instance.description = params.get('instance_description') vpn_instance.update_route_targets(import_rts, export_rts) vpn_instance.update_fallback(fallback) if instance_type == constants.IPVPN and 'evpn' in localport: # special processing for the case where what we plug into # the ipvpn is not an existing interface but an interface # to create, connected to an existing evpn instance self._attach_evpn_2_ipvpn(localport, vpn_instance) plug_kwargs = {} plug_kwargs['description'] = params.get('description') plug_kwargs['direction'] = params.get('direction') # Plug VIF to VPN instance vpn_instance.vif_plugged(mac_address, ip_address_prefix, localport, advertise_subnet, lb_consistent_hash_order, local_pref, **plug_kwargs) # delaying the start after the first vif_plugged allows to handle # dataplane driver for which the first vif_plugged needs to happen # before route advertisements can be processed if not started: vpn_instance.start() @log_decorator.log_info def unplug_vif_from_vpn(self, **params): instance_type = self._check_instance_type(params) vpn_instance_class = VPNManager.type2class[instance_type] vpn_instance_class.validate_convert_detach_params(params) external_instance_id = params.get('external_instance_id') mac_address = params.get('mac_address') localport = params.get('localport') ip_address_prefix = params.get('ip_address_prefix') # Retrieve VPN instance or raise exception if does not exist try: vpn_instance = self.vpn_instances[external_instance_id] except KeyError: LOG.error("Try to unplug VIF from non existing VPN instance %s", external_instance_id) raise exc.VPNNotFound(external_instance_id) # Unplug VIF from VPN instance vpn_instance.vif_unplugged(mac_address, ip_address_prefix) if vpn_instance.type == constants.IPVPN and 'evpn' in localport: self._detach_evpn_2_ipvpn(vpn_instance) if vpn_instance.stop_if_empty(): self.unregister_vpn_instance(vpn_instance) def redirect_instance_for_rt(self, redirected_type, redirect_rt, stop=False): external_instance_id = redirect_instance_extid(redirected_type, redirect_rt) LOG.info("Need VPN instance %s for traffic redirection to RT %s", external_instance_id, redirect_rt) # Convert route target string to RouteTarget dictionary import_rts = utils.convert_route_targets([redirect_rt]) # Retrieve a redirect VPN instance or create a new one if none exists # yet try: i, s = self._get_vpn_instance(external_instance_id, redirected_type, import_rts, [], "127.0.0.1", "24", None, None, create_if_none=(not stop)) if not s: i.start() return i except exc.VPNNotFound: # (reached only in the 'stop' case) LOG.error("Try to stop traffic redirection for an RT for which" " no VPN instance exists (%s)", external_instance_id) raise @log_decorator.log_info def redirect_traffic_to_vpn(self, redirected_id, redirected_type, redirect_rt): redirect_instance = self.redirect_instance_for_rt(redirected_type, redirect_rt) redirect_instance.register_redirected_instance(redirected_id) return redirect_instance @log_decorator.log_info def stop_redirect_to_vpn(self, redirected_id, redirected_type, redirect_rt): redirect_instance = self.redirect_instance_for_rt(redirected_type, redirect_rt, stop=True) redirect_instance.unregister_redirected_instance(redirected_id) if redirect_instance.stop_if_no_redirected_instance(): self.unregister_vpn_instance(redirect_instance) @log_decorator.log_info def stop(self): self.bgp_manager.stop() for vpn_instance in self.vpn_instances.values(): vpn_instance.stop() # Cleanup veth pair if (vpn_instance.type == constants.IPVPN and self._evpn_ipvpn_ifs.get(vpn_instance)): self._cleanup_evpn2ipvpn(vpn_instance) for vpn_instance in self.vpn_instances.values(): vpn_instance.join() self.vpn_instances.clear() @classmethod @utils.oslo_synchronized('VPNManager') def _create_instance(cls): if not cls.has_instance(): cls._instance = cls() @classmethod def has_instance(cls): return cls._instance is not None @classmethod def clear_instance(cls): cls._instance = None @classmethod def get_instance(cls): # double checked locking if not cls.has_instance(): cls._create_instance() return cls._instance # Looking Glass hooks #### def get_lg_map(self): class DataplaneLGHook(lg.LookingGlassMixin): def __init__(self, vpn_manager): self.manager = vpn_manager def get_lg_map(self): return { "drivers": (lg.COLLECTION, ( self.manager.get_lg_dataplanes_list, self.manager.get_lg_dataplane_from_path_item)), "ids": (lg.DELEGATE, self.manager.label_allocator) } dataplane_hook = DataplaneLGHook(self) return { "instances": (lg.COLLECTION, (self.get_lg_vpn_list, self.get_lg_vpn_from_path_item)), "dataplane": (lg.DELEGATE, dataplane_hook), "instances_per_vni": (lg.SUBITEM, self.get_lg_instances_per_vni) } def get_lg_vpn_list(self): return [instance.get_lg_summary() for instance in list(self.vpn_instances.values())] def get_lg_vpn_from_path_item(self, path_item): return self.vpn_instances[path_item] def get_vpn_instances_count(self): return len(self.vpn_instances) def get_lg_instances_per_vni(self): return {vni: {'name': str(instance), 'external_instance_id': instance.external_instance_id } for vni, instance in self.vpn_instance_by_vni.items()} def get_lg_dataplanes_list(self): return [{"id": i} for i in self.dataplane_drivers.keys()] def get_lg_dataplane_from_path_item(self, path_item): return self.dataplane_drivers[path_item] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/bagpipe_bgp/vpn/vpn_instance.py0000664000175000017500000013337400000000000027470 0ustar00zuulzuul00000000000000# Copyright 2014 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import collections import copy import itertools import re import socket import threading import netaddr from oslo_log import log as logging from networking_bagpipe.bagpipe_bgp.common import exceptions as exc from networking_bagpipe.bagpipe_bgp.common import log_decorator from networking_bagpipe.bagpipe_bgp.common import looking_glass as lg from networking_bagpipe.bagpipe_bgp.common import utils from networking_bagpipe.bagpipe_bgp import constants from networking_bagpipe.bagpipe_bgp import engine from networking_bagpipe.bagpipe_bgp.engine import exa from networking_bagpipe.bagpipe_bgp.engine import flowspec from networking_bagpipe.bagpipe_bgp.engine import tracker_worker LOG = logging.getLogger(__name__) DEFAULT_LOCAL_PREF = 100 def forward_to_port(direction): return direction in (None, constants.BOTH, constants.TO_PORT) def forward_from_port(direction): return direction in (None, constants.BOTH, constants.FROM_PORT) class TrafficClassifier: def __init__(self, source_prefix=None, destination_prefix=None, source_port=None, destination_port=None, protocol='tcp'): self.source_pfx = source_prefix self.destination_pfx = destination_prefix if source_port: if ":" in source_port: self.source_port = tuple( [int(p) for p in source_port.split(":")] ) else: self.source_port = int(source_port) else: self.source_port = source_port if destination_port: if ":" in destination_port: self.destination_port = tuple( [int(p) for p in destination_port.split(":")] ) else: self.destination_port = int(destination_port) else: self.destination_port = destination_port self.protocol = protocol def __repr__(self): return "traffic-classifier:%s" % str(self) def __str__(self): return "{}-{},{}-{},{}".format(self.source_pfx or "*", self.source_port or "*", self.destination_pfx or "*", self.destination_port or "*", self.protocol) def __eq__(self, other): return (isinstance(other, self.__class__) and self.__dict__ == other.__dict__) def __hash__(self): return hash((self.source_pfx, self.source_port, self.destination_pfx, self.destination_port, self.protocol)) def _interpret_port_rule(self, rule): if len(rule) == 1: op = rule[0].operations port = rule[0].value if op == '>=': port_range = (int(port), 65535) elif op == '<=': port_range = (0, int(port)) elif op == '>': port_range = (int(port) + 1, 65535) elif op == '<': port_range = (0, int(port) - 1) else: port_range = int(port) else: port_range = () for elem in rule: op = elem.operations port = elem.value if op == '>=' or op == '<=': port_range += int(port) elif op == '>': port_range += int(port) + 1 elif op == '<': port_range += int(port) - 1 return port_range @log_decorator.log def _construct_port_rules(self, port_range, flow_object): LOG.info("Construct port rules with %s (type %s) on %s object", port_range, type(port_range), flow_object) port_rules = list() # Check if port range or port number if isinstance(port_range, tuple): port_min, port_max = port_range if int(port_min) == 0: # Operator < port_rules.append(flow_object(exa.flow.NumericOperator.LT, port_max)) elif int(port_max) == 65535: # Operator > port_rules.append(flow_object(exa.flow.NumericOperator.GT, port_min)) else: # Operator >= gt_eq_op = (exa.flow.NumericOperator.GT | exa.flow.NumericOperator.EQ) # Operator &<= lt_eq_op = (exa.flow.NumericOperator.AND | exa.flow.NumericOperator.LT | exa.flow.NumericOperator.EQ) port_rules.append(flow_object(gt_eq_op, port_min)) port_rules.append(flow_object(lt_eq_op, port_max)) else: port_rules.append(flow_object(exa.flow.NumericOperator.EQ, port_range)) return port_rules def _get_source_prefix(self, rule): self.source_pfx = rule[0].cidr.prefix() def _get_destination_prefix(self, rule): self.destination_pfx = rule[0].cidr.prefix() def _parse_any_port(self, rule): self.source_port = self._interpret_port_rule(rule) self.destination_port = list(self.source_port) def _parse_source_port(self, rule): self.source_port = self._interpret_port_rule(rule) def _parse_destination_port(self, rule): self.destination_port = self._interpret_port_rule(rule) def _get_protocol(self, rule): self.protocol = exa.Protocol.names[rule[0].value] def map_traffic_classifier_2_redirect_rules(self): rules = list() if self.source_pfx: ip, mask = self.source_pfx.split('/') if netaddr.IPNetwork(self.source_pfx).version == 4: rules.append( exa.flow.Flow4Source(socket.inet_pton(socket.AF_INET, ip), int(mask))) elif netaddr.IPNetwork(self.source_pfx).version == 6: # TODO(xxx): IPv6 offset ?? rules.append( exa.flow.Flow6Source(socket.inet_pton(socket.AF_INET6, ip), int(mask), 0)) if self.destination_pfx: ip, mask = self.destination_pfx.split('/') if netaddr.IPNetwork(self.destination_pfx).version == 4: rules.append( exa.flow.Flow4Destination(socket.inet_pton(socket.AF_INET, ip), int(mask)) ) elif netaddr.IPNetwork(self.destination_pfx).version == 6: # TODO(xxx): IPv6 offset ?? rules.append( exa.flow.Flow6Destination(socket.inet_pton(socket.AF_INET6, ip), int(mask), 0) ) if self.source_port: rules += self._construct_port_rules(self.source_port, exa.flow.FlowSourcePort) if self.destination_port: rules += self._construct_port_rules(self.destination_port, exa.flow.FlowDestinationPort) if self.protocol: rules.append(exa.flow.FlowIPProtocol(exa.flow.NumericOperator.EQ, exa.Protocol.named( self.protocol) )) return rules def map_redirect_rules_2_traffic_classifier(self, rules): components = {1: self._get_destination_prefix, # FlowDestinationPrefix 2: self._get_source_prefix, # FlowSourcePrefix 3: self._get_protocol, # FlowIPProtocol 4: self._parse_any_port, # FlowAnyPort 5: self._parse_destination_port, # FLowDestinationPort 6: self._parse_source_port} # FlowSourcePort for ID, rule in rules.items(): components[ID](rule) class VPNInstance(tracker_worker.TrackerWorker, threading.Thread, lg.LookingGlassLocalLogger, metaclass=abc.ABCMeta): type = None # set by subclasses: 'ipvpn', 'evpn', etc. afi = None safi = None @log_decorator.log def __init__(self, vpn_manager, dataplane_driver, external_instance_id, instance_id, import_rts, export_rts, gateway_ip, ip_address_plen, readvertise, attract_traffic, fallback=None, **kwargs): self.manager = vpn_manager self.instance_type = self.__class__.__name__ self.instance_id = instance_id self.description = None threading.Thread.__init__(self) self.setDaemon(True) if dataplane_driver.ecmp_support: compare_routes = tracker_worker.compare_ecmp else: compare_routes = tracker_worker.compare_no_ecmp tracker_worker.TrackerWorker.__init__( self, self.manager.bgp_manager, repr(self), compare_routes) lg.LookingGlassLocalLogger.__init__(self, repr(self)) self.lock = threading.RLock() self.import_rts = import_rts self.export_rts = export_rts self.external_instance_id = external_instance_id self.gateway_ip = gateway_ip self.network_plen = ip_address_plen self.fallback = None self.afi = self.__class__.afi self.safi = self.__class__.safi self.dp_driver = dataplane_driver if 'vni' in kwargs: self.instance_label = kwargs.pop('vni') self.forced_vni = True else: self.instance_label = self.manager.label_allocator.get_new_label( "Incoming traffic for %s" % self) self.forced_vni = False self.instance_rd = self.manager.rd_allocator.get_new_rd( "Default route distinguisher for %s" % self) self.localport_data = dict() # One local port -> set of endpoints (MAC and IP addresses tuple) self.localport_2_endpoints = collections.defaultdict(set) # One endpoint (MAC and IP addresses tuple) -> One route distinguisher self.endpoint_2_rd = dict() # endpoint (MAC and IP addresses tuple) -> route entry self.endpoint_2_route = dict() # One MAC address -> One local port self.mac_2_localport_data = dict() # One IP address prefix -> Multiple MAC address self.ip_address_2_mac = collections.defaultdict(set) # One endpoint (MAC and IP addresses tuple) -> BGP local_pref self.endpoint_2_lp = dict() # endpoint (MAC and IP addresses tuple) -> description self.endpoint_2_desc = dict() # endpoint (MAC and IP addresses tuple) -> direction self.endpoint_2_direction = dict() # MAC -> set of IP addresses self.mac_2_ips = collections.defaultdict(set) # Redirected instances list from which traffic is attracted (based on # FlowSpec 5-tuple classification) self.redirected_instances = set() self.dataplane = self.dp_driver.initialize_dataplane_instance( self.instance_id, self.external_instance_id, self.gateway_ip, self.network_plen, self.instance_label, **kwargs) for rt in self.import_rts: self._subscribe(self.afi, self.safi, rt) # Subscribe to FlowSpec routes # FIXME(tmorin): this maybe isn't applicable yet to E-VPN yet self._subscribe(self.afi, exa.SAFI.flow_vpn, rt) if readvertise: self.readvertise = True try: self.readvertise_to_rts = readvertise['to_rt'] except KeyError: raise exc.APIException("'readvertise' specified with no " "'to_rt'") self.readvertise_from_rts = readvertise.get('from_rt', []) self.log.debug("readvertise enabled, from RT:%s, to %s", self.readvertise_from_rts, self.readvertise_to_rts) for rt in self.readvertise_from_rts: self._subscribe(self.afi, self.safi, rt) else: self.log.debug("readvertise not enabled") self.readvertise = False self.attract_static_dest_prefixes = None if (attract_traffic and (self.readvertise or attract_traffic.get('to_rt'))): # Convert route target string to RouteTarget dictionary attract_to_rts = attract_traffic.get('to_rt') if attract_to_rts: converted_to_rts = ( utils.convert_route_targets(attract_to_rts) ) if self.readvertise: if converted_to_rts != self.readvertise_to_rts: raise exc.APIException("if both are set, then " "'attract_traffic/to_rt' and " "'readvertise/to_rt' have to " "be equal") else: self.readvertise_to_rts = converted_to_rts try: self.attract_static_dest_prefixes = ( attract_traffic['static_destination_prefixes'] ) except KeyError: raise exc.APIException("'attract_traffic/to_rt' specified " "without " "'static_destination_prefixes'") if len(self.readvertise_to_rts) != 1: raise exc.APIException("attract_traffic requires exactly one " "RT to be provided in " "'readvertise/to_rt' and " "'attract_traffic/to_rt'") self.attract_traffic = True self.attract_rts = attract_traffic['redirect_rts'] try: self.attract_classifier = attract_traffic['classifier'] except KeyError: raise exc.APIException("'attract_traffic' specified with no " "'classifier'") self.log.debug("Attract traffic enabled with RT: %s and " "classifier: %s", self.attract_rts, self.attract_classifier) else: self.log.debug("attract traffic not enabled") self.attract_traffic = False self.dataplane.update_fallback(fallback) def needs_cleanup_assist(self, afi, safi): return self.dataplane.needs_cleanup_assist() def __repr__(self): return "{}-{}".format(self.instance_type, self.instance_id) @utils.synchronized @log_decorator.log def stop(self): if self._was_stopped: LOG.debug("was already stopped, nothing to do to stop") return self.stop_event_loop() for afi in (self.afi,): for safi in (self.safi, exa.SAFI.flow_vpn): if self.needs_cleanup_assist(afi, safi): self.log.debug("Dataplane driver needs cleanup assistance " "for AFI(%s)/SAFI(%s)", afi, safi) self.synthesize_withdraw_all(afi, safi) self.dataplane.cleanup() if not self.forced_vni: self.manager.label_allocator.release(self.instance_label) # this makes sure that the thread will be stopped, and any remaining # routes/subscriptions are released: tracker_worker.TrackerWorker.stop(self) @utils.synchronized @log_decorator.log def stop_if_empty(self): self.log.debug("localport_2_endpoints: %s", self.localport_2_endpoints) if self.is_empty(): self.stop() return True return False def is_empty(self): return not self.localport_2_endpoints def has_enpoint(self, linuxif): return self.localport_2_endpoints.get(linuxif) is not None def has_only_one_endpoint(self): return (len(self.localport_2_endpoints) == 1 and len(next(iter(self.localport_2_endpoints.values()))) == 1) def all_endpoints(self): return itertools.chain(*self.localport_2_endpoints.values()) @log_decorator.log def update_route_targets(self, new_import_rts, new_export_rts): added_import_rt = set(new_import_rts) - set(self.import_rts) removed_import_rt = set(self.import_rts) - set(new_import_rts) self.log.debug("Added Import RTs: %s", added_import_rt) self.log.debug("Removed Import RTs: %s", removed_import_rt) # Unregister from BGP with these route targets for rt in removed_import_rt: self._unsubscribe(self.afi, self.safi, rt) self._unsubscribe(self.afi, exa.SAFI.flow_vpn, rt) # Update import and export route targets # (needs to be done before subscribe or we get a race where # VRF._imported rejects route that it's supposed to use) self.import_rts = new_import_rts # Register to BGP with these route targets for rt in added_import_rt: self._subscribe(self.afi, self.safi, rt) self._subscribe(self.afi, exa.SAFI.flow_vpn, rt) # Re-advertise all routes with new export RTs self.log.debug("Exports RTs: %s -> %s", self.export_rts, new_export_rts) if frozenset(new_export_rts) != frozenset(self.export_rts): self.log.debug("Will re-export routes with new RTs") self.export_rts = new_export_rts for route_entry in self.endpoint_2_route.values(): if new_export_rts: self.log.info("Re-advertising route %s with updated RTs " "(%s)", route_entry.nlri, new_export_rts) updated_route_entry = engine.RouteEntry( route_entry.nlri, None, copy.copy(route_entry.attributes)) # reset the route_targets, overwriting RTs originally # present in route_entry.attributes updated_route_entry.set_route_targets(self.export_rts) self.log.debug(" updated route: %s", updated_route_entry) self._advertise_route(updated_route_entry) else: self._withdraw_route(route_entry) def update_fallback(self, fallback): if self.fallback != fallback and fallback is not None: self.log.info("update fallback: %s", fallback) self.fallback = fallback self.dataplane.update_fallback(fallback) def _parse_ipaddress_prefix(self, ip_address_prefix): if ip_address_prefix is None: return (None, 0) net = netaddr.IPNetwork(ip_address_prefix) return (str(net.ip), net.prefixlen) def _gen_encap_extended_communities(self): ecommunities = exa.extcoms.ExtendedCommunities() for encap in self.dp_driver.supported_encaps(): if not isinstance(encap, exa.Encapsulation): raise Exception("dp_driver.supported_encaps() should " "return a list of Encapsulation objects (%s)", type(encap)) if encap != exa.Encapsulation( exa.Encapsulation.Type.DEFAULT): ecommunities.communities.append(encap) # FIXME: si DEFAULT + xxx => adv MPLS return ecommunities @abc.abstractmethod def generate_vif_bgp_route(self, mac_address, ip_prefix, plen, label, rd): '''returns a RouteEntry''' pass def synthesize_vif_bgp_route(self, mac_address, ip_prefix, plen, label, lb_consistent_hash_order, route_distinguisher=None, local_pref=None): rd = route_distinguisher if route_distinguisher else self.instance_rd route_entry = self.generate_vif_bgp_route(mac_address, ip_prefix, plen, label, rd) assert isinstance(route_entry, engine.RouteEntry) route_entry.attributes.add(self._gen_encap_extended_communities()) route_entry.set_route_targets(self.export_rts) ecommunities = exa.ExtendedCommunities() ecommunities.communities.append( exa.ConsistentHashSortOrder(lb_consistent_hash_order)) route_entry.attributes.add(ecommunities) route_entry.attributes.add(exa.LocalPreference(local_pref or DEFAULT_LOCAL_PREF)) self.log.debug("Synthesized Vif route entry: %s", route_entry) return route_entry def synthesize_redirect_bgp_route(self, rules): self.log.info("synthesize_redirect_bgp_route called for rules %s", rules) nlri = flowspec.FlowRouteFactory(self.afi, self.instance_rd) for rule in rules: nlri.add(rule) route_entry = engine.RouteEntry(nlri) assert isinstance(route_entry, engine.RouteEntry) ecommunities = exa.ExtendedCommunities() # checked at __init__: assert len(self.readvertise_to_rts) == 1 rt = self.readvertise_to_rts[0] ecommunities.communities.append( exa.TrafficRedirect(exa.ASN(int(rt.asn)), int(rt.number)) ) route_entry.attributes.add(ecommunities) route_entry.set_route_targets(self.attract_rts) self.log.debug("Synthesized redirect route entry: %s", route_entry) return route_entry @classmethod def validate_convert_params(cls, params, also_mandatory=()): for param in ('vpn_instance_id', 'mac_address', 'local_port') + also_mandatory: if param not in params: raise exc.APIMissingParameterException(param) # if local_port is not a dict, then assume it designates a linux # interface if isinstance(params['local_port'], str): params['local_port'] = {'linuxif': params['local_port']} for param in ('import_rt', 'export_rt'): if param not in params: continue # if import_rt or export_rt are strings, convert them into lists if isinstance(params[param], str): try: params[param] = re.split(',+ *', params[param]) except Exception: raise exc.APIException("Unable to parse RT string into " " a list: '%s'" % params[param]) # remove duplicates params[param] = list(set(params[param])) if not ('linuxif' in params['local_port'] or 'evpn' in params['local_port']): raise exc.APIException("Mandatory key is missing in local_port " "parameter (linuxif, or evpn)") # validate mac_address try: netaddr.EUI(params['mac_address']) except netaddr.core.AddrFormatError: raise exc.MalformedMACAddress(params['mac_address']) # validate ip_address if present if 'ip_address' in params: try: net = netaddr.IPNetwork(params['ip_address']) params['ip_address_prefix'] = params['ip_address'] params['ip_address_plen'] = net.prefixlen except netaddr.core.AddrFormatError: # Try again assuming ip_address is an IP without a prefix # (implicitly using /32) try: netaddr.IPAddress(params['ip_address']) params['ip_address_prefix'] = params['ip_address'] + "/32" params['ip_address_plen'] = 32 except netaddr.core.AddrFormatError: raise exc.MalformedIPAddress(params['ip_address']) else: params['ip_address_prefix'] = None params['ip_address_plen'] = None if not isinstance(params.get('advertise_subnet', False), bool): raise exc.APIException("'advertise_subnet' must be a boolean") @classmethod def translate_api_internal(cls, params): # some API parameters have different internal names _TRANSLATE_API_TO_INTERNAL = { 'vpn_instance_id': 'external_instance_id', 'local_port': 'localport', 'import_rt': 'import_rts', # API name is singular, hence wrong... 'export_rt': 'export_rts', # API name is singular, hence wrong... } for api_param_name in list(params): internal_name = _TRANSLATE_API_TO_INTERNAL.get(api_param_name) if internal_name: params[internal_name] = params.pop(api_param_name) @classmethod def validate_convert_attach_params(cls, params): cls.validate_convert_params( params, also_mandatory=('import_rt', 'export_rt') ) params['advertise_subnet'] = params.get('advertise_subnet', False) params['lb_consistent_hash_order'] = params.get( 'lb_consistent_hash_order', 0) params['vni'] = params.get('vni', 0) params['direction'] = params.get('direction') or constants.BOTH if params['direction'] not in constants.ALL_DIRECTIONS: raise exc.APIException("direction should be one of: %s" % ', '.join(constants.ALL_DIRECTIONS)) cls.translate_api_internal(params) @classmethod def validate_convert_detach_params(cls, params): cls.validate_convert_params(params) cls.translate_api_internal(params) def _rd_for_endpoint(self, endpoint, message): # endpoint is a (mac, ip_address_prefix) tuple rd = self.endpoint_2_rd.get(endpoint) if not rd: rd = self.manager.rd_allocator.get_new_rd(message) self.endpoint_2_rd[endpoint] = rd return rd def _raise_if_mac2ip_inconsistency(self, mac_address, ip_address_prefix): if not ip_address_prefix: return if mac_address not in self.ip_address_2_mac[ip_address_prefix]: raise exc.APIException( "Inconsistent endpoint info: IP %s already bound to a MAC " "address different from %s (%s)" % (ip_address_prefix, mac_address, self.ip_address_2_mac[ip_address_prefix])) def _check_ip_mac(self, mac_address, ip_address_prefix, advertise_subnet): (ip_prefix, plen) = self._parse_ipaddress_prefix(ip_address_prefix) # Special case where no IP was provided if ip_prefix is None: refresh_only = mac_address in self.mac_2_localport_data return (None, None, refresh_only) if not advertise_subnet and plen != 32: self.log.debug("Using /32 instead of /%s", plen) plen = 32 # - Verify (MAC address, IP address) tuple consistency refresh_only = False if ip_address_prefix in self.ip_address_2_mac and plen == 32: self._raise_if_mac2ip_inconsistency(mac_address, ip_address_prefix) LOG.debug("IP/MAC already plugged, only updating route") refresh_only = True return ip_prefix, plen, refresh_only @utils.synchronized @log_decorator.log_info def vif_plugged(self, mac_address, ip_address_prefix, localport, advertise_subnet=False, lb_consistent_hash_order=0, local_pref=None, **kwargs): linuxif = localport['linuxif'] endpoint = (mac_address, ip_address_prefix) # Check if this port has already been plugged # - Verify port informations consistency if mac_address in self.mac_2_localport_data: self.log.debug("MAC address already plugged, checking port " "consistency") pdata = self.mac_2_localport_data[mac_address] if pdata.get("port_info") != localport: raise exc.APIException("Port information is not consistent. " "MAC address cannot be bound to two " "different ports. Previous plug for " "port %s (%s != %s)" % (linuxif, pdata.get("port_info"), localport)) try: self.endpoint_2_desc[endpoint] = kwargs.get('description') (ip_prefix, plen, refresh_only) = self._check_ip_mac( mac_address, ip_address_prefix, advertise_subnet) self.log.debug("Plugging port (%s)", ip_prefix) pdata = self.mac_2_localport_data.get(mac_address, dict()) if not pdata: pdata['label'] = self.manager.label_allocator.get_new_label( "Incoming traffic for %s, interface %s, endpoint %s" % (self, linuxif, endpoint) ) pdata["port_info"] = localport pdata["lb_consistent_hash_order"] = lb_consistent_hash_order direction = kwargs.get('direction') if not refresh_only: self.dp_driver.validate_directions(direction) # Call driver to setup the dataplane for incoming traffic self.dataplane.vif_plugged(mac_address, ip_prefix, localport, pdata['label'], direction) if forward_to_port(direction): endpoint_rd = self._rd_for_endpoint( endpoint, "Route distinguisher for %s, interface %s, endpoint %s" % (self, linuxif, endpoint) ) rd = self.instance_rd if plen == 32 else endpoint_rd self.log.info("Synthesizing and advertising BGP route for VIF " "%s endpoint %s", linuxif, endpoint) route_entry = self.synthesize_vif_bgp_route( mac_address, ip_prefix, plen, pdata['label'], lb_consistent_hash_order, rd, local_pref) self._advertise_route(route_entry) self.endpoint_2_route[endpoint] = route_entry self.endpoint_2_rd[endpoint] = endpoint_rd self.localport_2_endpoints[linuxif].add(endpoint) self.endpoint_2_lp[endpoint] = local_pref self.endpoint_2_direction[endpoint] = direction self.mac_2_localport_data[mac_address] = pdata if ip_address_prefix: self.ip_address_2_mac[ip_address_prefix].add(mac_address) self.mac_2_ips[mac_address].add(ip_address_prefix) # we've a non-wildcard plug, so if a wildcard IP was plugged # for this MAC, consider it replaced: # - withdraw the corresponding route # - cleanup the wildcard endpoint records wildcard_endpoint = (mac_address, None) if wildcard_endpoint in self.localport_2_endpoints[linuxif]: self._withdraw_route(self.endpoint_2_route.pop( wildcard_endpoint)) self.localport_2_endpoints[linuxif].remove( wildcard_endpoint) del self.endpoint_2_desc[wildcard_endpoint] except Exception as e: self.log.error("Error in vif_plugged: %s", e) if linuxif in self.localport_2_endpoints: self.localport_2_endpoints[linuxif].discard(endpoint) if not self.localport_2_endpoints[linuxif]: del self.localport_2_endpoints[linuxif] if mac_address in self.mac_2_localport_data: del self.mac_2_localport_data[mac_address] if ip_address_prefix in self.ip_address_2_mac: self.ip_address_2_mac[ip_address_prefix].discard(mac_address) raise self.log.debug("localport_2_endpoints: %s", self.localport_2_endpoints) self.log.debug("endpoint_2_rd: %s", self.endpoint_2_rd) self.log.debug("mac_2_localport_data: %s", self.mac_2_localport_data) self.log.debug("ip_address_2_mac: %s", self.ip_address_2_mac) @utils.synchronized @log_decorator.log_info def vif_unplugged(self, mac_address, ip_address_prefix): if ip_address_prefix is None: # wildcard IP address case... errors = False for ip_addr_pfx in list(self.mac_2_ips[mac_address]): try: self.vif_unplugged_real(mac_address, ip_addr_pfx) except Exception: LOG.exception("Exception while unplugging (%s, %s)", mac_address, ip_addr_pfx) errors = True if errors: raise Exception("There were errors on at least one of the " "unplug resulting from the wildcard unplug") # remove the wildcard endpoint itself, if any: if (mac_address, None) in self.endpoint_2_rd: try: self.vif_unplugged_real(mac_address, None) except exc.APINotPluggedYet: LOG.debug("Wildcard unplug failed because not-plugged-yet," " ignoring.") else: self.vif_unplugged_real(mac_address, ip_address_prefix) @log_decorator.log_info def vif_unplugged_real(self, mac_address, ip_address_prefix): # NOTE(tmorin): move this as a vif_unplugged_precheck, so that # in ipvpn.VRF this is done before readvertised route withdrawal endpoint = (mac_address, ip_address_prefix) # Verify port and endpoint (MAC address, IP address) tuple consistency pdata = self.mac_2_localport_data.get(mac_address) if not pdata: raise exc.APINotPluggedYet(endpoint) self._raise_if_mac2ip_inconsistency(mac_address, ip_address_prefix) if endpoint not in self.endpoint_2_rd: raise Exception("Endpoint record missing: {}".format(endpoint)) # Finding label and local port informations label = pdata.get('label') localport = pdata.get('port_info') linuxif = localport['linuxif'] direction = self.endpoint_2_direction[endpoint] if not label or not localport: self.log.error("vif_unplugged called for endpoint (%s, %s), but " "port data (%s, %s) is incomplete", mac_address, ip_address_prefix, label, localport) raise Exception("Inconsistent informations for port, bug ?") if linuxif in self.localport_2_endpoints: # Parse address/mask (ip_prefix, _unused) = self._parse_ipaddress_prefix( ip_address_prefix) self.log.info("Withdrawing BGP route for VIF %s endpoint %s", linuxif, endpoint) self._withdraw_route(self.endpoint_2_route.pop(endpoint)) last_endpoint = len(self.localport_2_endpoints[linuxif]) <= 1 if forward_to_port(direction): # Unplug endpoint from data plane self.dataplane.vif_unplugged( mac_address, ip_prefix, localport, label, direction, last_endpoint) # Forget data for this port if last endpoint if last_endpoint: self.log.info("Last endpoint, freeing label %s", label) # Free label to the allocator self.manager.label_allocator.release(label) del self.localport_2_endpoints[linuxif] else: self.localport_2_endpoints[linuxif].remove(endpoint) # Free route distinguisher to the allocator if forward_to_port(direction): self.manager.rd_allocator.release( self.endpoint_2_rd.pop(endpoint)) if not last_endpoint: if not any([ep[0] == mac_address for ep in self.localport_2_endpoints[linuxif]] ): del self.mac_2_localport_data[mac_address] else: del self.mac_2_localport_data[mac_address] if ip_address_prefix: self.ip_address_2_mac[ip_address_prefix].discard(mac_address) if not self.ip_address_2_mac[ip_address_prefix]: del self.ip_address_2_mac[ip_address_prefix] self.mac_2_ips[mac_address].discard(ip_address_prefix) if not self.mac_2_ips[mac_address]: del self.mac_2_ips[mac_address] del self.endpoint_2_desc[endpoint] else: self.log.error("vif_unplugged called for endpoint %s, but" " port data is incomplete", endpoint) raise Exception("bagpipe-bgp bug, check its logs") self.log.debug("localport_2_endpoints: %s", self.localport_2_endpoints) self.log.debug("endpoint_2_rd: %s", self.endpoint_2_rd) self.log.debug("mac_2_localport_data: %s", self.mac_2_localport_data) self.log.debug("ip_address_2_mac: %s", self.ip_address_2_mac) @utils.synchronized def register_redirected_instance(self, instance_id): self.redirected_instances.add(instance_id) @utils.synchronized def unregister_redirected_instance(self, instance_id): self.redirected_instances.remove(instance_id) @utils.synchronized @log_decorator.log def stop_if_no_redirected_instance(self): self.log.debug("redirected_instances: %s", self.redirected_instances) if not self.redirected_instances: self.stop() return True return False @log_decorator.log def start_redirect_traffic(self, redirect_rt, rules): self.log.debug("Start redirecting traffic to VPN instance importing " "route target %s based on rules %s", redirect_rt, rules) # Create redirection instance if first FlowSpec route for this # redirect route target redirect_instance = self.manager.redirect_traffic_to_vpn( self.external_instance_id, self.type, redirect_rt ) # Create traffic classifier from FlowSpec rules classifier = TrafficClassifier() classifier.map_redirect_rules_2_traffic_classifier(rules) self.dataplane.add_dataplane_for_traffic_classifier( classifier, redirect_instance.instance_id) if not hasattr(self, 'redirect_rt_2_classifiers'): # One redirect route target -> Multiple traffic classifiers (One # per prefix) # Can be inverted to one traffic classifier -> Multiple redirect # route targets self.redirect_rt_2_classifiers = collections.defaultdict(set) self.redirect_rt_2_classifiers[redirect_rt].add(classifier) @log_decorator.log def stop_redirect_traffic(self, redirect_rt, rules): self.log.debug("Stop redirecting traffic to VPN instance importing " "route target %s based on rules %s", redirect_rt, rules) # Create traffic classifier from FlowSpec rule classifier = TrafficClassifier() classifier.map_redirect_rules_2_traffic_classifier(rules) if (redirect_rt in self.redirect_rt_2_classifiers and classifier in self.redirect_rt_2_classifiers[redirect_rt]): self.redirect_rt_2_classifiers[redirect_rt].remove(classifier) if not utils.invert_dict_of_sets( self.redirect_rt_2_classifiers)[classifier]: self.dataplane.remove_dataplane_for_traffic_classifier( classifier) if not self.redirect_rt_2_classifiers[redirect_rt]: self.manager.stop_redirect_to_vpn( self.external_instance_id, self.type, redirect_rt ) del self.redirect_rt_2_classifiers[redirect_rt] else: self.log.error("stop_redirect_traffic called for redirect route " "target %s and classifier %s, but doesn't exist", redirect_rt, classifier) raise Exception("bagpipe-bgp bug, check its logs") def _check_encaps(self, route): '''check that encaps of a route returns a list of encaps supported by both the dataplane driver and the advertized route (based on BGP Encapsulation community) logs a warning if there is no common encap ''' adv_encaps = None try: adv_encaps = route.ecoms(exa.Encapsulation) self.log.debug("Advertized Encaps: %s", adv_encaps) except KeyError: self.log.debug("no encap advertized, let's use default") if not adv_encaps: adv_encaps = [exa.Encapsulation( exa.Encapsulation.Type.DEFAULT)] good_encaps = set(adv_encaps) & set( self.dp_driver.supported_encaps()) if not good_encaps: self.log.warning("No encap supported by dataplane driver for route" " %s, advertized: %s, dataplane supports: {%s}", route, adv_encaps, ", ".join([repr(encap) for encap in self.dp_driver.supported_encaps()] )) return good_encaps def _skip_route_removal(self, last): '''check if a route withdraw should be skipped returns true if the removal of the route should be skipped, based whether or not the route removed is the last one and depending on the desired behavior for the dataplane driver ''' if last: # never skip the last route return False # if the driver supports ECMP, then it needs to see all the route # withdraws return not self.dp_driver.ecmp_support # Callbacks for BGP route updates (TrackerWorker) ######################## @utils.synchronized @log_decorator.log def new_best_route(self, entry, new_route): pass @utils.synchronized @log_decorator.log def best_route_removed(self, entry, old_route, last): pass # Looking Glass #### def get_lg_map(self): return { "instance_type": (lg.VALUE, self.instance_type), "external_instance_id": (lg.VALUE, self.external_instance_id), "description": (lg.VALUE, self.description), "dataplane": (lg.DELEGATE, self.dataplane), "route_targets": (lg.SUBITEM, self.get_rts), "gateway_ip": (lg.VALUE, self.gateway_ip), "subnet_prefix_length": (lg.VALUE, self.network_plen), "instance_dataplane_id": (lg.VALUE, self.instance_label), "ports": (lg.SUBTREE, self.get_lg_local_port_data), "readvertise": (lg.SUBITEM, self.get_lg_readvertise), "attract_traffic": (lg.SUBITEM, self.get_lg_attract_traffic), "fallback": (lg.VALUE, self.fallback) } def get_lg_local_port_data(self, path_prefix): r = {} for (port, endpoints) in self.localport_2_endpoints.items(): eps = [] for endpoint in endpoints: mac, ip = endpoint eps.append({ 'label': self.mac_2_localport_data[mac]['label'], 'mac_address': mac, 'ip_address': ip, 'local_pref': self.endpoint_2_lp.get(endpoint), 'rd': repr(self.endpoint_2_rd[endpoint]), 'description': self.endpoint_2_desc.get(endpoint), 'direction': self.endpoint_2_direction.get(endpoint) }) r[port] = { 'endpoints': eps } return r def get_rts(self): return { "import": [repr(rt) for rt in self.import_rts], "export": [repr(rt) for rt in self.export_rts] } def get_lg_readvertise(self): r = {} if self.readvertise: r = { 'from': [repr(rt) for rt in self.readvertise_from_rts], 'to': [repr(rt) for rt in self.readvertise_to_rts] } return r def get_lg_attract_traffic(self): r = {} if self.attract_traffic: r = { 'redirect_rts': [repr(rt) for rt in self.attract_rts], 'classifier': self.attract_classifier, } if not hasattr(self, 'attract_static_dest_prefixes'): return r r.update({ 'to': [repr(rt) for rt in self.readvertise_to_rts], 'static_destination_prefixes': self.attract_static_dest_prefixes }) return r def get_lg_summary(self): # used from VPNManager looking glass entry = {"id": self.external_instance_id, "name": self.name} if self.description: entry['description'] = self.description return entry ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9263058 networking_bagpipe-22.0.0/networking_bagpipe/db/0000775000175000017500000000000000000000000021737 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/db/__init__.py0000664000175000017500000000000000000000000024036 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9343054 networking_bagpipe-22.0.0/networking_bagpipe/db/migration/0000775000175000017500000000000000000000000023730 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/db/migration/__init__.py0000664000175000017500000000000000000000000026027 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9343054 networking_bagpipe-22.0.0/networking_bagpipe/db/migration/alembic_migrations/0000775000175000017500000000000000000000000027560 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/db/migration/alembic_migrations/README0000664000175000017500000000004600000000000030440 0ustar00zuulzuul00000000000000Generic single-database configuration.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/db/migration/alembic_migrations/env.py0000664000175000017500000000470100000000000030724 0ustar00zuulzuul00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from logging import config as logging_config from alembic import context from neutron_lib.db import model_base from oslo_config import cfg from oslo_db.sqlalchemy import session import sqlalchemy as sa from sqlalchemy import event MYSQL_ENGINE = None BAGPIPEL2_VERSION_TABLE = 'alembic_version_bagpipel2' config = context.config neutron_config = config.neutron_config logging_config.fileConfig(config.config_file_name) target_metadata = model_base.BASEV2.metadata def set_mysql_engine(): try: mysql_engine = neutron_config.command.mysql_engine except cfg.NoSuchOptError: mysql_engine = None global MYSQL_ENGINE MYSQL_ENGINE = (mysql_engine or model_base.BASEV2.__table_args__['mysql_engine']) def run_migrations_offline(): set_mysql_engine() kwargs = dict() if neutron_config.database.connection: kwargs['url'] = neutron_config.database.connection else: kwargs['dialect_name'] = neutron_config.database.engine kwargs['version_table'] = BAGPIPEL2_VERSION_TABLE context.configure(**kwargs) with context.begin_transaction(): context.run_migrations() @event.listens_for(sa.Table, 'after_parent_attach') def set_storage_engine(target, parent): if MYSQL_ENGINE: target.kwargs['mysql_engine'] = MYSQL_ENGINE def run_migrations_online(): set_mysql_engine() engine = session.create_engine(neutron_config.database.connection) connection = engine.connect() context.configure( connection=connection, target_metadata=target_metadata, version_table=BAGPIPEL2_VERSION_TABLE ) try: with context.begin_transaction(): context.run_migrations() finally: connection.close() engine.dispose() if context.is_offline_mode(): run_migrations_offline() else: run_migrations_online() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/db/migration/alembic_migrations/script.py.mako0000664000175000017500000000203700000000000032366 0ustar00zuulzuul00000000000000# Copyright ${create_date.year} # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """${message} Revision ID: ${up_revision} Revises: ${down_revision} Create Date: ${create_date} """ # revision identifiers, used by Alembic. revision = ${repr(up_revision)} down_revision = ${repr(down_revision)} % if branch_labels: branch_labels = ${repr(branch_labels)} % endif from alembic import op import sqlalchemy as sa ${imports if imports else ""} def upgrade(): ${upgrades if upgrades else "pass"} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9343054 networking_bagpipe-22.0.0/networking_bagpipe/db/migration/alembic_migrations/versions/0000775000175000017500000000000000000000000031430 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.8823068 networking_bagpipe-22.0.0/networking_bagpipe/db/migration/alembic_migrations/versions/2025.1/0000775000175000017500000000000000000000000032157 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000112 path=networking_bagpipe-22.0.0/networking_bagpipe/db/migration/alembic_migrations/versions/2025.1/contract/ 28 mtime=1743591455.9343054 networking_bagpipe-22.0.0/networking_bagpipe/db/migration/alembic_migrations/versions/2025.1/contrac0000775000175000017500000000000000000000000033531 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000025100000000000011453 xustar0000000000000000147 path=networking_bagpipe-22.0.0/networking_bagpipe/db/migration/alembic_migrations/versions/2025.1/contract/796580a58032_remove_linux_bridge.py 22 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/db/migration/alembic_migrations/versions/2025.1/contrac0000664000175000017500000000205600000000000033536 0ustar00zuulzuul00000000000000# Copyright 2025 NTT DATA Group # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Remove linux bridge Revision ID: 796580a58032 Revises: 0a2ee5cbb1a5 Create Date: 2025-03-02 13:56:36.549392 """ from alembic import op # revision identifiers, used by Alembic. revision = '796580a58032' down_revision = '0a2ee5cbb1a5' depends_on = ('d2c2dcb6c2d4',) def upgrade(): table_names = [ 'sfc_bagpipe_ppg_rtnn_associations', 'sfc_bagpipe_chain_hops' ] for table_name in table_names: op.drop_table(table_name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/db/migration/alembic_migrations/versions/CONTRACT_HEAD0000664000175000017500000000001500000000000033345 0ustar00zuulzuul00000000000000796580a58032 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/db/migration/alembic_migrations/versions/EXPAND_HEAD0000664000175000017500000000001500000000000033107 0ustar00zuulzuul00000000000000d2c2dcb6c2d4 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9343054 networking_bagpipe-22.0.0/networking_bagpipe/db/migration/alembic_migrations/versions/liberty/0000775000175000017500000000000000000000000033102 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=networking_bagpipe-22.0.0/networking_bagpipe/db/migration/alembic_migrations/versions/liberty/__init__.py 22 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/db/migration/alembic_migrations/versions/liberty/__init0000664000175000017500000000000000000000000034254 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000021500000000000011453 xustar0000000000000000113 path=networking_bagpipe-22.0.0/networking_bagpipe/db/migration/alembic_migrations/versions/liberty/contract/ 28 mtime=1743591455.9343054 networking_bagpipe-22.0.0/networking_bagpipe/db/migration/alembic_migrations/versions/liberty/contra0000775000175000017500000000000000000000000034311 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000023600000000000011456 xustar0000000000000000136 path=networking_bagpipe-22.0.0/networking_bagpipe/db/migration/alembic_migrations/versions/liberty/contract/0a2ee5cbb1a5_initial.py 22 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/db/migration/alembic_migrations/versions/liberty/contra0000664000175000017500000000165300000000000034320 0ustar00zuulzuul00000000000000# Copyright 2015 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """contract initial Revision ID: 0a2ee5cbb1a5 Revises: start_networking_bagpipe Create Date: 2015-10-28 18:35:11.000000 """ from neutron.db.migration import cli # revision identifiers, used by Alembic. revision = '0a2ee5cbb1a5' down_revision = 'start_networking_bagpipe' branch_labels = (cli.CONTRACT_BRANCH,) def upgrade(): pass ././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=networking_bagpipe-22.0.0/networking_bagpipe/db/migration/alembic_migrations/versions/liberty/contract/__init__.py 22 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/db/migration/alembic_migrations/versions/liberty/contra0000664000175000017500000000000000000000000034301 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000111 path=networking_bagpipe-22.0.0/networking_bagpipe/db/migration/alembic_migrations/versions/liberty/expand/ 28 mtime=1743591455.9343054 networking_bagpipe-22.0.0/networking_bagpipe/db/migration/alembic_migrations/versions/liberty/expand0000775000175000017500000000000000000000000034302 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000022000000000000011447 xustar0000000000000000122 path=networking_bagpipe-22.0.0/networking_bagpipe/db/migration/alembic_migrations/versions/liberty/expand/__init__.py 22 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/db/migration/alembic_migrations/versions/liberty/expand0000664000175000017500000000000000000000000034272 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000023400000000000011454 xustar0000000000000000134 path=networking_bagpipe-22.0.0/networking_bagpipe/db/migration/alembic_migrations/versions/liberty/expand/d4d4d7f03b21_initial.py 22 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/db/migration/alembic_migrations/versions/liberty/expand0000664000175000017500000000246100000000000034307 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Orange. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """contract initial Revision ID: d4d4d7f03b21 Revises: start_networking_bagpipe Create Date: 2015-10-28 17:35:11.000000 """ from alembic import op import sqlalchemy as sa from neutron.db.migration import cli # revision identifiers, used by Alembic. revision = 'd4d4d7f03b21' down_revision = 'start_networking_bagpipe' branch_labels = (cli.EXPAND_BRANCH,) def upgrade(): op.create_table( 'ml2_route_target_allocations', sa.Column('rt_nn', sa.Integer, nullable=False, autoincrement=False), sa.Column('allocated', sa.Boolean, nullable=False), sa.PrimaryKeyConstraint('rt_nn')) def downgrade(): op.drop_table('ml2_route_target_allocations') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.8823068 networking_bagpipe-22.0.0/networking_bagpipe/db/migration/alembic_migrations/versions/pike/0000775000175000017500000000000000000000000032360 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9343054 networking_bagpipe-22.0.0/networking_bagpipe/db/migration/alembic_migrations/versions/pike/expand/0000775000175000017500000000000000000000000033637 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000025100000000000011453 xustar0000000000000000147 path=networking_bagpipe-22.0.0/networking_bagpipe/db/migration/alembic_migrations/versions/pike/expand/d2c2dcb6c2d4_defining_sfc_data_model.py 22 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/db/migration/alembic_migrations/versions/pike/expand/d20000664000175000017500000000542500000000000034075 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Orange. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Defining SFC data-model Revision ID: d2c2dcb6c2d4 Revises: 6185f1633a3d Create Date: 2017-03-02 15:59:58.430218 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'd2c2dcb6c2d4' down_revision = 'd4d4d7f03b21' def upgrade(): op.create_table( 'sfc_bagpipe_ppg_rtnn_associations', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('ppg_id', sa.String(length=36), nullable=False), sa.Column('rtnn', sa.Integer(), nullable=False), sa.Column('is_redirect', sa.Boolean(), nullable=False), sa.Column('reverse', sa.Boolean(), nullable=False), sa.PrimaryKeyConstraint('id', 'ppg_id'), sa.UniqueConstraint('rtnn') ) op.create_table( 'sfc_bagpipe_chain_hops', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('project_id', sa.String(length=255), nullable=True), sa.Column('portchain_id', sa.String(length=36), nullable=False), sa.Column('rts', sa.String(length=255), nullable=True), sa.Column('ingress_gw', sa.String(length=64), nullable=False), sa.Column('egress_gw', sa.String(length=64), nullable=False), sa.Column('ingress_ppg', sa.String(length=36), nullable=True), sa.Column('egress_ppg', sa.String(length=36), nullable=True), sa.Column('ingress_network', sa.String(length=36), nullable=True), sa.Column('egress_network', sa.String(length=36), nullable=True), sa.Column('readv_from_rts', sa.String(length=255), nullable=True), sa.Column('readv_to_rt', sa.String(length=255), nullable=True), sa.Column('attract_to_rt', sa.String(length=255), nullable=True), sa.Column('redirect_rts', sa.String(length=255), nullable=True), sa.Column('classifiers', sa.String(length=255), nullable=True), sa.Column('reverse_hop', sa.Boolean(), nullable=False), sa.ForeignKeyConstraint(['ingress_network'], ['networks.id']), sa.ForeignKeyConstraint(['egress_network'], ['networks.id']), sa.PrimaryKeyConstraint('id', 'portchain_id') ) ././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000123 path=networking_bagpipe-22.0.0/networking_bagpipe/db/migration/alembic_migrations/versions/start_networking_bagpipe.py 22 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/db/migration/alembic_migrations/versions/start_networki0000664000175000017500000000155000000000000034433 0ustar00zuulzuul00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """start networking_bagpipe chain Revision ID: start_networking_bagpipe Revises: None Create Date: 2015-10-28 18:04:17.265514 """ # revision identifiers, used by Alembic. revision = 'start_networking_bagpipe' down_revision = None def upgrade(): pass ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9343054 networking_bagpipe-22.0.0/networking_bagpipe/db/models/0000775000175000017500000000000000000000000023222 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/db/models/__init__.py0000664000175000017500000000000000000000000025321 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/db/models/head.py0000664000175000017500000000134100000000000024474 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Orange. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.db.migration.models import head def get_metadata(): return head.model_base.BASEV2.metadata ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9343054 networking_bagpipe-22.0.0/networking_bagpipe/driver/0000775000175000017500000000000000000000000022645 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/driver/__init__.py0000664000175000017500000000000000000000000024744 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/driver/constants.py0000664000175000017500000000155000000000000025234 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Orange. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. INGRESS = 'ingress' EGRESS = 'egress' REVERSE_PORT_SIDE = {INGRESS: EGRESS, EGRESS: INGRESS} SOURCE = 'source' DESTINATION = 'destination' REVERSE_FLOW_SIDE = {SOURCE: DESTINATION, DESTINATION: SOURCE} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/driver/mech_bagpipe.py0000664000175000017500000000435200000000000025626 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Orange. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants as n_constants from oslo_config import cfg from oslo_log import log from neutron.agent import securitygroups_rpc from neutron.plugins.ml2.drivers import mech_agent from neutron_lib.api.definitions import portbindings from neutron_lib import constants as n_const LOG = log.getLogger(__name__) ml2_bagpipe_opts = [ cfg.IntOpt('as_number', default=-1, help=("not used: bagpipe AS configuration for generation of " "EVPN RTs must be done on neutron l2 agents")) ] cfg.CONF.register_opts(ml2_bagpipe_opts, "ml2_bagpipe") class BaGPipeMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase): """ML2 Mechanism driver for bagpipe-bgp This mechanism driver uses RPCs toward compute node agents to trigger the attachment of VM ports in E-VPN VPN instances. """ def __init__(self): sg_enabled = securitygroups_rpc.is_firewall_enabled() super().__init__( n_const.AGENT_TYPE_OVS, portbindings.VIF_TYPE_BRIDGE, {portbindings.CAP_PORT_FILTER: sg_enabled}) if cfg.CONF.ml2_bagpipe.as_number != -1: raise Exception( "bagpipe AS configuration must be done on neutron l2 agents, " "in [ml2_bagpipe_extension]") def get_allowed_network_types(self, agent): return (agent['configurations'].get('tunnel_types', []) + [n_constants.TYPE_LOCAL, n_constants.TYPE_FLAT, n_constants.TYPE_VLAN, n_constants.TYPE_VXLAN]) def get_mappings(self, agent): return agent['configurations'].get('interface_mappings', {}) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9343054 networking_bagpipe-22.0.0/networking_bagpipe/objects/0000775000175000017500000000000000000000000023003 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/objects/__init__.py0000664000175000017500000000000000000000000025102 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/objects/bgpvpn.py0000664000175000017500000003525000000000000024656 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Orange. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_versionedobjects import fields as obj_fields from networking_bgpvpn.neutron.db import bgpvpn_db from neutron.api.rpc.callbacks import resources from neutron.objects import base from neutron.objects.ports import IPAllocation from neutron.objects.ports import Port from neutron.objects.router import RouterPort from neutron.objects.subnet import Subnet from neutron_lib.api.definitions import bgpvpn as bgpvpn_api from neutron_lib.api.definitions import bgpvpn_routes_control as bgpvpn_rc_api from neutron_lib import constants from neutron_lib.objects import common_types from neutron_lib.utils import net as net_utils LOG = logging.getLogger(__name__) def _get_gateway_mac_by_subnet(obj_context, subnet): if not subnet.gateway_ip: LOG.error("no gateway IP defined for subnet %s", subnet) return None ip_allocation = IPAllocation.get_object(obj_context, network_id=subnet.network_id, subnet_id=subnet.id, ip_address=subnet.gateway_ip) # pylint: disable=no-member if ip_allocation: port = Port.get_object(obj_context, id=ip_allocation.port_id) return str(port.mac_address) else: LOG.debug("no port allocated to gateway IP for subnet %s", subnet.id) return None def _get_subnets_info(obj_context, net_id): subnets = Subnet.get_objects(obj_context, network_id=net_id) return [ {'ip_version': subnet.ip_version, 'id': subnet.id, 'cidr': subnet.cidr, 'gateway_ip': subnet.gateway_ip, 'gateway_mac': _get_gateway_mac_by_subnet(obj_context, subnet) } for subnet in subnets ] class BGPVPNTypeField(obj_fields.AutoTypedField): AUTO_TYPE = obj_fields.Enum(valid_values=bgpvpn_api.BGPVPN_TYPES) @base.NeutronObjectRegistry.register class BGPVPN(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = bgpvpn_db.BGPVPN fields = { 'id': common_types.UUIDField(), 'project_id': obj_fields.StringField(), 'type': BGPVPNTypeField(), 'name': obj_fields.StringField(nullable=True, default=None), 'route_targets': obj_fields.ListOfStringsField(nullable=True, default=[]), 'import_targets': obj_fields.ListOfStringsField(nullable=True, default=[]), 'export_targets': obj_fields.ListOfStringsField(nullable=True, default=[]), 'route_distinguishers': obj_fields.ListOfStringsField(nullable=True, default=[]), 'local_pref': obj_fields.IntegerField(nullable=True), 'vni': obj_fields.IntegerField(nullable=True), } fields_no_update = ['id', 'project_id', 'type', 'port_id'] foreign_keys = {'BGPVPNNetAssociation': {'id': 'bgpvpn_id'}, 'BGPVPNRouterAssociation': {'id': 'bgpvpn_id'}, 'BGPVPNPortAssociation': {'id': 'bgpvpn_id'}, 'BGPVPNPortAssociationRoute': {'id': 'bgpvpn_id'}, } @classmethod def modify_fields_from_db(cls, db_obj): result = super().modify_fields_from_db(db_obj) for field in ['route_targets', 'import_targets', 'export_targets', 'route_distinguishers']: if field in result: result[field] = (result[field].split(',') if result[field] else []) return result @classmethod def modify_fields_to_db(cls, fields): result = super().modify_fields_to_db(fields) for field in ['route_targets', 'import_targets', 'export_targets', 'route_distinguishers']: if field in result: result[field] = ','.join(result.get(field, [])) return result @base.NeutronObjectRegistry.register class BGPVPNNetAssociation(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = bgpvpn_db.BGPVPNNetAssociation fields = { 'id': common_types.UUIDField(), 'project_id': obj_fields.StringField(), 'bgpvpn_id': obj_fields.StringField(), 'bgpvpn': obj_fields.ObjectField('BGPVPN'), 'network_id': obj_fields.StringField(), 'subnets': common_types.ListOfDictOfMiscValuesField(nullable=True) } fields_no_update = ['id', 'project_id', 'bgpvpn_id', 'network_id'] synthetic_fields = ['bgpvpn', 'subnets'] def __init__(self, context=None, **kwargs): super().__init__(context, **kwargs) def create(self): with self.db_context_writer(self.obj_context): super().create() self.obj_load_attr('subnets') def obj_load_attr(self, attrname): if attrname == 'subnets': self._load_subnets() else: super().obj_load_attr(attrname) def _load_subnets(self, db_obj=None): # pylint: disable=no-member subnets_info = _get_subnets_info(self.obj_context, self.network_id) setattr(self, 'subnets', subnets_info) self.obj_reset_changes(['subnets']) def from_db_object(self, obj): super().from_db_object(obj) self._load_subnets(obj) def all_subnets(self, network_id): # pylint: disable=no-member return self.subnets @base.NeutronObjectRegistry.register class BGPVPNRouterAssociation(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = bgpvpn_db.BGPVPNRouterAssociation fields = { 'id': common_types.UUIDField(), 'project_id': obj_fields.StringField(), 'bgpvpn_id': obj_fields.StringField(), 'bgpvpn': obj_fields.ObjectField('BGPVPN'), 'router_id': obj_fields.StringField(), 'connected_networks': common_types.ListOfDictOfMiscValuesField(nullable=True) } fields_no_update = ['id', 'project_id', 'bgpvpn_id', 'router_id'] synthetic_fields = ['bgpvpn', 'connected_networks'] def __init__(self, context=None, **kwargs): super().__init__(context, **kwargs) def create(self): with self.db_context_writer(self.obj_context): super().create() self.obj_load_attr('connected_networks') def update(self): with self.db_context_writer(self.obj_context): if 'connected_networks' in self.obj_what_changed(): self.obj_load_attr('connected_networks') super().update() def obj_load_attr(self, attrname): if attrname == 'connected_networks': return self._load_connected_networks() super().obj_load_attr(attrname) @classmethod def get_objects(cls, context, _pager=None, validate_filters=True, **kwargs): if 'network_id' in kwargs and 'router_id' not in kwargs: ports = Port.get_objects( context, network_id=kwargs.pop('network_id'), device_owner=constants.DEVICE_OWNER_ROUTER_INTF) router_assocs = [] for port in ports: # pylint: disable=no-member router_assocs.extend( super().get_objects( context, _pager=_pager, validate_filters=validate_filters, router_id=RouterPort.get_object( context, port_id=port.id).router_id, **kwargs) ) return router_assocs return super().get_objects( context, _pager=_pager, validate_filters=validate_filters, **kwargs) # pylint: disable=no-member def _load_connected_networks(self, db_obj=None): # NOTE(tmorin): can be improved by directly looking up # Ports with device_id=self.router_id router_ports = RouterPort.get_objects( self.obj_context, router_id=self.router_id) connected_networks = [] for router_port in router_ports: port = Port.get_object(self.obj_context, id=router_port.port_id) if port: # router gateway networks are not considered as requiring # to be bound to BGPVPNs if port.device_owner == constants.DEVICE_OWNER_ROUTER_GW: LOG.debug("skipping port %s, because router gateway", port.id) continue connected_networks.append({ 'network_id': port.network_id, 'subnets': _get_subnets_info(self.obj_context, port.network_id) }) else: LOG.warning("Couldn't find Port for RouterPort (router:%s," "port:%s)", router_port.router_id, router_port.port_id) setattr(self, 'connected_networks', connected_networks) self.obj_reset_changes(['connected_networks']) def from_db_object(self, obj): super().from_db_object(obj) self._load_connected_networks(obj) def all_subnets(self, network_id): # pylint: disable=no-member for connected_net in self.connected_networks: if connected_net['network_id'] == network_id: return connected_net['subnets'] return [] @base.NeutronObjectRegistry.register class BGPVPNPortAssociation(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = bgpvpn_db.BGPVPNPortAssociation fields = { 'id': common_types.UUIDField(), 'project_id': obj_fields.StringField(), 'bgpvpn_id': obj_fields.StringField(), 'bgpvpn': obj_fields.ObjectField('BGPVPN'), 'port_id': obj_fields.StringField(), 'subnets': common_types.ListOfDictOfMiscValuesField(nullable=True), 'routes': obj_fields.ListOfObjectsField('BGPVPNPortAssociationRoute'), 'advertise_fixed_ips': obj_fields.BooleanField(default=True) } fields_no_update = ['id', 'project_id', 'bgpvpn_id', 'port_id'] synthetic_fields = ['bgpvpn', 'subnets', 'routes'] def __init__(self, context=None, **kwargs): super().__init__(context, **kwargs) def create(self): with self.db_context_writer(self.obj_context): super().create() self.obj_load_attr('subnets') def obj_load_attr(self, attrname): if attrname == 'subnets': self._load_subnets() else: super().obj_load_attr(attrname) def _load_subnets(self, db_obj=None): # pylint: disable=no-member port = Port.get_object(self.obj_context, id=self.port_id) subnets_info = _get_subnets_info(self.obj_context, port.network_id) setattr(self, 'subnets', subnets_info) self.obj_reset_changes(['subnets']) def from_db_object(self, obj): super().from_db_object(obj) self._load_subnets(obj) def all_subnets(self, network_id): # pylint: disable=no-member return self.subnets class BGPVPNPortAssociationRouteTypeField(obj_fields.AutoTypedField): AUTO_TYPE = obj_fields.Enum(valid_values=bgpvpn_rc_api.ROUTE_TYPES) @base.NeutronObjectRegistry.register class BGPVPNPortAssociationRoute(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' db_model = bgpvpn_db.BGPVPNPortAssociationRoute fields = { 'id': common_types.UUIDField(), 'port_association_id': common_types.UUIDField(), 'type': BGPVPNPortAssociationRouteTypeField(), 'prefix': common_types.IPNetworkField(nullable=True, default=None), 'local_pref': obj_fields.IntegerField(nullable=True), 'bgpvpn_id': obj_fields.StringField(nullable=True, default=None), 'bgpvpn': obj_fields.ObjectField('BGPVPN', nullable=True, default=None), } fields_no_update = fields.keys() foreign_keys = {'BGPVPNPortAssociation': {'port_association_id': 'id'}, 'BGPVPN': {'bgpvpn_id': 'id'}, } synthetic_fields = ['bgpvpn'] def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) @classmethod def modify_fields_from_db(cls, db_obj): fields = super().modify_fields_from_db(db_obj) if 'prefix' in fields and fields['prefix'] is not None: fields['prefix'] = net_utils.AuthenticIPNetwork(fields['prefix']) return fields @classmethod def modify_fields_to_db(cls, fields): result = super().modify_fields_to_db(fields) if 'prefix' in result and result['prefix'] is not None: result['prefix'] = cls.filter_to_str(result['prefix']) return result # we use these objects in set() in bgpvpn agent extension def __eq__(self, other): # pylint: disable=no-member return ((self.type, self.prefix, self.bgpvpn_id) == (other.type, other.prefix, other.bgpvpn_id)) def __hash__(self): # pylint: disable=no-member return hash((self.type, self.prefix, self.bgpvpn_id)) resources.register_resource_class(BGPVPN) resources.register_resource_class(BGPVPNNetAssociation) resources.register_resource_class(BGPVPNRouterAssociation) resources.register_resource_class(BGPVPNPortAssociation) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/opts.py0000664000175000017500000000464200000000000022717 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from networking_bagpipe.agent.bgpvpn import agent_extension as bgpvpn_agt_ext from networking_bagpipe.bagpipe_bgp.api import config as api_config from networking_bagpipe.bagpipe_bgp.common import config from networking_bagpipe.bagpipe_bgp.common import run_command from networking_bagpipe.bagpipe_bgp import constants from networking_bagpipe.bagpipe_bgp.vpn import dataplane_drivers from networking_bagpipe.bagpipe_bgp.vpn.ipvpn import mpls_linux_dataplane from networking_bagpipe.bagpipe_bgp.vpn.ipvpn import mpls_ovs_dataplane # NOTE(amotoki): oslo.config suggests to use lower case as group name. # If a group name is registered with upper case names, # oslo.config looks up both upper case and lower case versions of names # in configuration files, so using lower case in sample files is safe enough. def list_bagpipe_bgp_agent_opts(): return [ ('bagpipe', bgpvpn_agt_ext.bagpipe_bgpvpn_opts), ] def list_api_opts(): return [ ('api', api_config.common_opts), ] def list_bgp_common_opts(): return [ ('bgp', config.bgp_opts), ] def list_run_command_opts(): return [ ('common', run_command.common_opts), ] def list_dataplane_driver_ipvpn_opts(): return [ (constants.config_group(constants.IPVPN).lower(), dataplane_drivers.dataplane_common_opts), ] def list_dataplane_driver_evpn_opts(): return [ (constants.config_group(constants.EVPN).lower(), dataplane_drivers.dataplane_common_opts), ] def list_dataplane_driver_ipvpn_mpls_linux_opts(): return [ (constants.config_group(constants.IPVPN).lower(), mpls_linux_dataplane.MPLSLinuxDataplaneDriver.driver_opts), ] def list_dataplane_driver_ipvpn_mpls_ovs_opts(): return [ (constants.config_group(constants.IPVPN).lower(), mpls_ovs_dataplane.MPLSOVSDataplaneDriver.driver_opts), ] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9343054 networking_bagpipe-22.0.0/networking_bagpipe/privileged/0000775000175000017500000000000000000000000023504 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/privileged/__init__.py0000664000175000017500000000163100000000000025616 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_privsep import capabilities as caps from oslo_privsep import priv_context default_cmd = priv_context.PrivContext( prefix=__name__, cfg_section='privsep', pypath=__name__ + '.default_cmd', capabilities=[caps.CAP_SYS_ADMIN, # pylint: disable=no-member caps.CAP_NET_ADMIN] # pylint: disable=no-member ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/privileged/privileged_utils.py0000664000175000017500000000434500000000000027436 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_concurrency import processutils from networking_bagpipe import privileged @privileged.default_cmd.entrypoint def sysctl(knob, value): """Run sysctl command :param knob: (string) sysctl knob name, a path under /proc/sys, see: https://review.opendev.org/665155 :param value: (int) value to be set in the knob :return: 0 if the command succeeded, 1 otherwise """ cmd = ['sysctl'] cmd += ['-w', '{}={}'.format(knob, value)] result = processutils.execute(*cmd, check_exit_code=True) return 1 if result[1] else 0 @privileged.default_cmd.entrypoint def modprobe(module_name): """run modprobe command :param module_name: the name of the module to check with modprobe """ cmd = ['modprobe', module_name] processutils.execute(*cmd, check_exit_code=True) # TODO(lajoskatona): use pyroute2.IPDB() @privileged.default_cmd.entrypoint def brctl(params, check_exit=True): """run brctl command :param params: parameters for brctl :param check_exit: boolean or list of allowed exit codes, see https://opendev.org/openstack/oslo.concurrency/src/ branch/master/oslo_concurrency/processutils.py#L207 :return: tupple of stdout, stderr """ cmd = ['brctl'] + params.split() return processutils.execute(*cmd, check_exit_code=check_exit, run_as_root=True) # TODO(lajoskatona): use pyroute2.IPRoute() fdb @privileged.default_cmd.entrypoint def bridge(params): """Run bridge command :param params: parameters for bridge """ cmd = ['bridge'] + params.split() return processutils.execute(*cmd, run_as_root=True) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9343054 networking_bagpipe-22.0.0/networking_bagpipe/tests/0000775000175000017500000000000000000000000022514 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/__init__.py0000664000175000017500000000000000000000000024613 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9343054 networking_bagpipe-22.0.0/networking_bagpipe/tests/common/0000775000175000017500000000000000000000000024004 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/common/__init__.py0000664000175000017500000000000000000000000026103 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/common/json_fixtures.py0000664000175000017500000000266500000000000027271 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Orange. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os.path import fixtures from oslo_serialization import jsonutils class JsonFileFixture(fixtures.Fixture): """A fixture that knows how to translate configuration to JSON file. :param base_filename: the filename to use on disk. :param config: a dictionary. :param temp_dir: an existing temporary directory to use for storage. """ def __init__(self, base_filename, config, temp_dir): super().__init__() self.base_filename = base_filename self.config = config self.temp_dir = temp_dir def _setUp(self): # Need to randomly generate a unique folder to put the file in self.filename = os.path.join(self.temp_dir, self.base_filename) with open(self.filename, 'w') as f: jsonutils.dump(self.config, f, indent=4) f.flush() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9343054 networking_bagpipe-22.0.0/networking_bagpipe/tests/fullstack/0000775000175000017500000000000000000000000024504 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/fullstack/__init__.py0000664000175000017500000000000000000000000026603 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/fullstack/base.py0000664000175000017500000000577400000000000026005 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Orange. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.tests.fullstack import base from neutron.tests.fullstack.resources import environment as neutron_env from networking_bagpipe.tests.fullstack.resources.bgpvpn \ import client as bgpvpn_client from networking_bagpipe.tests.fullstack.resources.common \ import environment as common_env SUBNET_CIDR1 = '10.0.0.0/24' SUBNET_CIDR2 = '20.0.0.0/24' SUBNET_CIDR3 = '30.0.0.0/24' class BaGPipeBaseFullStackTestCase(base.BaseFullStackTestCase): evpn_driver = 'dummy' ipvpn_driver = 'dummy' compute_node_count = 3 port_per_compute_per_net = 2 def setUp(self): host_descriptions = [ neutron_env.HostDescription(l2_agent_type=self.l2_agent_type) for _ in range(self.compute_node_count) ] env = common_env.BaGPipeEnvironment( common_env.BaGPipeEnvironmentDescription( bagpipe_ml2=self.bagpipe_ml2, evpn_driver=self.evpn_driver, bgpvpn=self.bgpvpn, ipvpn_driver=self.ipvpn_driver, ipvpn_encap=self.ipvpn_encap, mech_drivers=self.mech_drivers, service_plugins=self.service_plugins ), host_descriptions) super().setUp(env) if self.bgpvpn: self.safe_client = self.useFixture( bgpvpn_client.BGPVPNClientFixture(self.client)) def _create_net_subnet_bgpvpn_assoc(self, tenant_uuid, subnet_cidr, bgpvpn_id=None): network = self.safe_client.create_network(tenant_uuid) subnet = self.safe_client.create_subnet( tenant_uuid, network['id'], subnet_cidr) if bgpvpn_id: self.safe_client.create_network_association(tenant_uuid, bgpvpn_id, network['id']) return (network['id'], subnet['id']) def _create_router_bgpvpn_assoc(self, tenant_uuid, subnet_ids, bgpvpn_id=None): router = self.safe_client.create_router() for subnet_id in subnet_ids: self.safe_client.add_router_interface(router['id'], subnet_id) self.safe_client.create_router_association(tenant_uuid, bgpvpn_id, router['id']) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9383054 networking_bagpipe-22.0.0/networking_bagpipe/tests/fullstack/resources/0000775000175000017500000000000000000000000026516 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/fullstack/resources/__init__.py0000664000175000017500000000000000000000000030615 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9383054 networking_bagpipe-22.0.0/networking_bagpipe/tests/fullstack/resources/bagpipe_ml2/0000775000175000017500000000000000000000000030677 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/fullstack/resources/bagpipe_ml2/__init__.py0000664000175000017500000000000000000000000032776 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/fullstack/resources/bagpipe_ml2/config.py0000664000175000017500000000176300000000000032525 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Orange. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.tests.fullstack.resources import config as neutron_cfg class ML2ConfigFixture(neutron_cfg.ML2ConfigFixture): def __init__(self, env_desc, host_desc, temp_dir, tenant_network_types): super().__init__( env_desc, host_desc, temp_dir, tenant_network_types) if env_desc.bagpipe_ml2: self.config['ml2']['type_drivers'] = 'vxlan' ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9383054 networking_bagpipe-22.0.0/networking_bagpipe/tests/fullstack/resources/bgpvpn/0000775000175000017500000000000000000000000030012 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/fullstack/resources/bgpvpn/__init__.py0000664000175000017500000000000000000000000032111 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/fullstack/resources/bgpvpn/client.py0000664000175000017500000000431400000000000031644 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Orange. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.common import utils from neutron.tests.fullstack.resources import client as neutron_client class BGPVPNClientFixture(neutron_client.ClientFixture): """Manage and cleanup BGPVPN resources.""" def create_bgpvpn(self, tenant_id, name=None, **kwargs): resource_type = 'bgpvpn' name = name or utils.get_rand_name(prefix='bgpvpn') spec = { 'tenant_id': tenant_id, 'name': name } spec.update(kwargs) return self._create_resource(resource_type, spec) def create_network_association(self, tenant_id, bgpvpn_id, network_id): network_association = { 'network_association': { 'tenant_id': tenant_id, 'network_id': network_id } } assoc = self.client.create_bgpvpn_network_assoc( bgpvpn_id, network_association) self.addCleanup( neutron_client._safe_method( self.client.delete_bgpvpn_network_assoc), bgpvpn_id, assoc['network_association']['id']) def create_router_association(self, tenant_id, bgpvpn_id, router_id): router_association = { 'router_association': { 'tenant_id': tenant_id, 'router_id': router_id } } assoc = self.client.create_bgpvpn_router_assoc( bgpvpn_id, router_association) self.addCleanup( neutron_client._safe_method( self.client.delete_bgpvpn_router_assoc), bgpvpn_id, assoc['router_association']['id']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/fullstack/resources/bgpvpn/config.py0000664000175000017500000000376000000000000031637 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Orange. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import networking_bgpvpn from neutron.tests.fullstack.resources import config as neutron_cfg BGPVPN_SERVICE = 'bgpvpn' BGPVPN_PROVIDER = ('BGPVPN:BaGPipe:networking_bgpvpn.neutron.services.' 'service_drivers.bagpipe.bagpipe.BaGPipeBGPVPNDriver:' 'default') class NeutronConfigFixture(neutron_cfg.NeutronConfigFixture): def __init__(self, env_desc, host_desc, temp_dir, connection, rabbitmq_environment): super().__init__( env_desc, host_desc, temp_dir, connection, rabbitmq_environment) if env_desc.bgpvpn: self.config['oslo_policy']['policy_dirs'] = ( os.path.join(networking_bgpvpn.__path__[0], '..', 'etc', 'neutron', 'policy.d') ) # for L2 BGPVPN tests, we need multiple subnet resources using # a common IP subnet self.config['DEFAULT'].update({ 'allow_overlapping_ips': True }) class BGPVPNProviderConfigFixture(neutron_cfg.ConfigFixture): def __init__(self, env_desc, host_desc, temp_dir): super().__init__( env_desc, host_desc, temp_dir, base_filename='networking_bgpvpn.conf') self.config.update({ 'service_providers': { 'service_provider': BGPVPN_PROVIDER } }) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9383054 networking_bagpipe-22.0.0/networking_bagpipe/tests/fullstack/resources/common/0000775000175000017500000000000000000000000030006 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/fullstack/resources/common/__init__.py0000664000175000017500000000000000000000000032105 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/fullstack/resources/common/config.py0000664000175000017500000001465700000000000031642 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Orange. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from networking_bagpipe.tests.common import json_fixtures from neutron.common import utils from neutron.tests.fullstack.resources import config as neutron_cfg ROOTWRAP_DAEMON_CMD_DFLT = ("sudo /usr/local/bin/oslo-rootwrap-daemon " "/etc/bagpipe-bgp/rootwrap.conf") def bagpipe_agent_config_fixture_init_common(self, env_desc, host_desc, local_ip): agent_config = self.config.get('agent') if agent_config is None: self.config.update({'agent': {}}) agent_config = self.config.get('agent') agent_exts = agent_config.get('extensions', '').split(',') if env_desc.bagpipe_ml2: agent_exts.append('bagpipe') if env_desc.bgpvpn: agent_exts.append('bagpipe_bgpvpn') agent_config.update({ 'extensions': ','.join(filter(None, agent_exts)) }) self.config.update({ 'bagpipe': { 'bagpipe_bgp_ip': local_ip } }) class OVSConfigFixture(neutron_cfg.OVSConfigFixture): def __init__(self, env_desc, host_desc, temp_dir, local_ip, mpls_bridge): super().__init__(env_desc, host_desc, temp_dir, local_ip) bagpipe_agent_config_fixture_init_common( self, env_desc, host_desc, local_ip) self.config.bagpipe.update({ 'mpls_bridge': mpls_bridge, 'tun_to_mpls_peer_patch_port': utils.get_rand_device_name(prefix='to-mpls'), 'mpls_to_tun_peer_patch_port': utils.get_rand_device_name(prefix='to-tun'), 'mpls_to_int_peer_patch_port': utils.get_rand_device_name(prefix='mpls-to-int'), 'int_to_mpls_peer_patch_port': utils.get_rand_device_name(prefix='int-to-mpls'), }) class JsonFixture(neutron_cfg.ConfigFixture): """A fixture that holds a JSON configuration.""" def _setUp(self): cfg_fixture = json_fixtures.JsonFileFixture( self.base_filename, self.config, self.temp_dir) self.useFixture(cfg_fixture) self.filename = cfg_fixture.filename class BagpipeBGPConfigFixture(neutron_cfg.ConfigFixture): def __init__(self, env_desc, host_desc, temp_dir, local_ip, bgp_peer, bgp_port, mpls_bridge, mpls_interface): super().__init__( env_desc, host_desc, temp_dir, base_filename='bgp.conf') self.config.update({ 'DEFAULT': { 'debug': True, }, 'COMMON': { 'root_helper_daemon': os.environ.get('OS_ROOTWRAP_DAEMON_CMD', ROOTWRAP_DAEMON_CMD_DFLT) }, 'BGP': { 'local_address': local_ip, 'peers': bgp_peer, 'my_as': '64512', 'bgp_port': bgp_port, }, 'API': { 'host': local_ip, 'port': '8082' }, 'DATAPLANE_DRIVER_IPVPN': { 'dataplane_driver': self.env_desc.ipvpn_driver, 'ovs_bridge': mpls_bridge, 'proxy_arp': 'False' }, 'DATAPLANE_DRIVER_EVPN': { 'dataplane_driver': self.env_desc.evpn_driver } }) if self.env_desc.ipvpn_driver != 'dummy': if self.env_desc.ipvpn_encap == 'vxlan': self.config['DATAPLANE_DRIVER_IPVPN'].update({ 'vxlan_encap': 'True', 'mpls_interface': '' }) if 'mpls-gre' in self.env_desc.ipvpn_encap: self.config['DATAPLANE_DRIVER_IPVPN'].update({ 'mpls_interface': '*gre*', 'gre_tunnel': self._generate_gre_tunnel() }) if self.env_desc.ipvpn_encap == 'mpls-gre-l3': self.config['DATAPLANE_DRIVER_IPVPN'].update({ 'gre_tunnel_options': "packet_type=legacy_l3" }) if self.env_desc.ipvpn_encap == 'bare-mpls': self.config['DATAPLANE_DRIVER_IPVPN'].update({ 'mpls_interface': mpls_interface }) def _generate_gre_tunnel(self): return utils.get_rand_device_name(prefix='mpls-gre') class GoBGPConfigFixture(JsonFixture): def __init__(self, env_desc, host_desc, temp_dir, bgp_peer, bgp_port, host_ips): super().__init__( env_desc, host_desc, temp_dir, base_filename='gobgp.conf') self.config.update({ 'global': { 'config': { 'as': '64512', 'router-id': bgp_peer, 'port': bgp_port } } }) neighbors = list() for host_ip in host_ips: neighbor = { 'config': { 'neighbor-address': host_ip, 'peer-as': '64512' }, 'transport': { 'config': { 'passive-mode': 'true' } }, 'route-reflector': { 'config': { 'route-reflector-client': 'true', 'route-reflector-cluster-id': '1.2.3.4' } } } afi_safis = list() for afi_safi in ('rtc', 'l2vpn-evpn', 'l3vpn-ipv4-unicast', 'l3vpn-ipv4-flowspec'): afi_safis.append({ 'config': { 'afi-safi-name': afi_safi } }) neighbor.update({'afi-safis': afi_safis}) neighbors.append(neighbor) self.config.update({'neighbors': neighbors}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/fullstack/resources/common/environment.py0000664000175000017500000002501000000000000032722 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Orange. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures from oslo_config import cfg import random from networking_bagpipe.tests.fullstack.resources.bagpipe_ml2 \ import config as bagpipe_ml2_cfg from networking_bagpipe.tests.fullstack.resources.bgpvpn \ import config as bgpvpn_cfg from networking_bagpipe.tests.fullstack.resources.common \ import config from networking_bagpipe.tests.fullstack.resources.common \ import config as common_cfg from networking_bagpipe.tests.fullstack.resources.common \ import process as common_proc from neutron_lib import constants from neutron.agent.linux import ip_lib from neutron.agent.linux import utils as a_utils from neutron.common import utils from neutron.tests.common.exclusive_resources import ip_network from neutron.tests.common import net_helpers from neutron.tests.fullstack.resources import environment as neutron_env from neutron.tests.fullstack.resources import process as neutron_proc class BaGPipeEnvironmentDescription(neutron_env.EnvironmentDescription): def __init__(self, bagpipe_ml2=False, evpn_driver='linux', bgpvpn=False, ipvpn_driver='ovs', ipvpn_encap='gre', network_type='vxlan', mech_drivers='openvswitch', service_plugins=None): super().__init__( network_type=network_type, l2_pop=not bagpipe_ml2 and bgpvpn, mech_drivers=mech_drivers, service_plugins=service_plugins, arp_responder=not bagpipe_ml2 and bgpvpn ) self.bagpipe_ml2 = bagpipe_ml2 self.bgpvpn = bgpvpn self.evpn_driver = evpn_driver self.ipvpn_driver = ipvpn_driver self.ipvpn_encap = ipvpn_encap class BaGPipeHost(neutron_env.Host): def __init__(self, env_desc, host_desc, test_name, neutron_config, central_data_bridge, central_external_bridge, bgp_peer, bgp_port): super().__init__(env_desc, host_desc, test_name, neutron_config, central_data_bridge, central_external_bridge) self.bgp_peer = bgp_peer self.bgp_port = bgp_port def _setUp(self): if (self.env_desc.bgpvpn and self.host_desc.l2_agent_type == constants.AGENT_TYPE_OVS): self.mpls_bridge = self.useFixture( net_helpers.OVSBridgeFixture(self.generate_mpls_bridge()) ).bridge self.mpls_bridge.set_secure_mode() super()._setUp() self.setup_host_with_bagpipe_bgp() def generate_mpls_bridge(self): return utils.get_rand_device_name(prefix='br-mpls') def setup_host_with_ovs_agent(self): agent_cfg_fixture = config.OVSConfigFixture( self.env_desc, self.host_desc, self.neutron_config.temp_dir, self.local_ip, self.mpls_bridge.br_name) self.useFixture(agent_cfg_fixture) self.useFixture( net_helpers.OVSBridgeFixture( agent_cfg_fixture.get_br_tun_name())).bridge self.ovs_agent = self.useFixture( neutron_proc.OVSAgentFixture( self.env_desc, self.host_desc, self.test_name, self.neutron_config, agent_cfg_fixture)) def setup_host_with_bagpipe_bgp(self): if self.host_desc.l2_agent_type == constants.AGENT_TYPE_OVS: mpls_bridge = (self.mpls_bridge.br_name if self.env_desc.bgpvpn else '') mpls_interface = '' if self.env_desc.bgpvpn: self.connect_to_internal_network_via_tunneling() if self.env_desc.ipvpn_encap == 'bare-mpls': self.connect_to_internal_network_via_mpls_bridge() mpls_interface = filter( lambda port: net_helpers.VETH0_PREFIX in port, self.mpls_bridge.get_port_name_list())[0] bgp_cfg_fixture = common_cfg.BagpipeBGPConfigFixture( self.env_desc, self.host_desc, self.neutron_config.temp_dir, self.local_ip, self.bgp_peer, self.bgp_port, mpls_bridge, mpls_interface) self.useFixture(bgp_cfg_fixture) self.bagpipe_bgp = self.useFixture( common_proc.BagpipeBGPFixture( self.env_desc, self.host_desc, self.test_name, bgp_cfg_fixture, namespace=self.host_namespace)) def connect_to_internal_network_via_mpls_bridge(self): veth_1, veth_2 = self.useFixture( net_helpers.VethFixture()).ports veth_1.link.set_up() veth_2.link.set_up() self.mpls_bridge.add_port(veth_1.name) self.central_data_bridge.add_port(veth_2.name) mpls_device = ip_lib.IPDevice(self.mpls_bridge.br_name) mpls_device.addr.add(utils.ip_to_cidr(self.local_ip, 24)) mpls_device.link.set_up() self.mpls_bridge.remove_all_flows() self.mpls_bridge.add_flow(in_port='LOCAL', actions='output:1') self.mpls_bridge.add_flow(in_port='1', actions='output:LOCAL') class GoBGPHost(neutron_env.Host): def __init__(self, bgp_address, *args, **kwargs): super().__init__(*args, **kwargs) self.bgp_address = bgp_address # the right thing would be to use a fixture, but we can't in # __init__, so let's gamble :) self.bgp_port = random.randint(10000, 60000) def _setUp(self): gobgp_cfg_fixture = self.useFixture( common_cfg.GoBGPConfigFixture( self.env_desc, "gobgp", self.useFixture(fixtures.TempDir()).path, self.bgp_address, self.bgp_port, [host.local_ip for host in self.hosts])) self.useFixture( common_proc.GoBGPFixture(self.env_desc, None, self.test_name, gobgp_cfg_fixture)) class BaGPipeEnvironment(neutron_env.Environment): def _bagpipe_host_fixture(self, host_desc, bgp_peer, bgp_port): temp_dir = self.useFixture(fixtures.TempDir()).path neutron_config = bgpvpn_cfg.NeutronConfigFixture( self.env_desc, host_desc, temp_dir, cfg.CONF.database.connection, self.rabbitmq_environment) self.useFixture(neutron_config) return self.useFixture( BaGPipeHost(self.env_desc, host_desc, self.test_name, neutron_config, self.central_data_bridge, self.central_external_bridge, bgp_peer, bgp_port)) def _create_gobgp_host(self, bgp_address): return GoBGPHost(bgp_address, self.env_desc, "gobgp", self.test_name, None, self.central_data_bridge, self.central_external_bridge) def _dont_be_paranoid(self): # we will have many br-mplsXXXXX or host-xxx interfaces on the # same subnet (the one for self.env_desc.network_range) # and we don't want the IP stack to drop the packets received # on these because they are from "us" but coming from "the outside" a_utils.execute(['sudo', 'sysctl', '-w', 'net.ipv4.conf.default.accept_local=1']) a_utils.execute(['sudo', 'sysctl', '-w', 'net.ipv4.conf.all.rp_filter=0']) a_utils.execute(['sudo', 'sysctl', '-w', 'net.ipv4.conf.default.rp_filter=0']) def _get_network_range(self): # for bare MPLS all compute nodes must be in the same subnet if self.env_desc.ipvpn_encap == 'bare-mpls': self._dont_be_paranoid() return self.useFixture( ip_network.ExclusiveIPNetwork( "240.0.0.0", "240.255.255.255", "24")).network r = super()._get_network_range() if r: self._dont_be_paranoid() return r def _setUp(self): self.temp_dir = self.useFixture(fixtures.TempDir()).path # We need this bridge before rabbit and neutron service will start self.central_data_bridge = self.useFixture( net_helpers.OVSBridgeFixture('cnt-data')).bridge self.central_external_bridge = self.useFixture( net_helpers.OVSBridgeFixture('cnt-ex')).bridge # Get rabbitmq address (and cnt-data network) rabbitmq_ip_address = self._configure_port_for_rabbitmq() self.rabbitmq_environment = self.useFixture( neutron_proc.RabbitmqEnvironmentFixture(host=rabbitmq_ip_address) ) plugin_cfg_fixture = self.useFixture( bagpipe_ml2_cfg.ML2ConfigFixture( self.env_desc, self.hosts_desc, self.temp_dir, self.env_desc.network_type)) neutron_cfg_fixture = self.useFixture( bgpvpn_cfg.NeutronConfigFixture( self.env_desc, None, self.temp_dir, cfg.CONF.database.connection, self.rabbitmq_environment)) service_cfg_fixtures = list() if self.env_desc.bgpvpn: service_cfg_fixtures.append(self.useFixture( bgpvpn_cfg.BGPVPNProviderConfigFixture( self.env_desc, self.hosts_desc, self.temp_dir))) self.neutron_server = self.useFixture( neutron_proc.NeutronServerFixture( self.env_desc, None, self.test_name, neutron_cfg_fixture, plugin_cfg_fixture, service_cfg_fixtures)) gobgp_host = self._create_gobgp_host(rabbitmq_ip_address) self.hosts = [self._bagpipe_host_fixture(desc, rabbitmq_ip_address, gobgp_host.bgp_port) for desc in self.hosts_desc] gobgp_host.hosts = self.hosts self.useFixture(gobgp_host) self.wait_until_env_is_up() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/fullstack/resources/common/process.py0000664000175000017500000001137400000000000032044 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Orange. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import os import random import shutil import signal import fixtures from oslo_utils import fileutils from neutron.agent.common import async_process from neutron.tests import base from neutron.tests.fullstack import base as neutron_base from neutron.tests.fullstack.resources import process as neutron_proc class BagpipeBGPFixture(fixtures.Fixture): BAGPIPE_BGP = "bagpipe-bgp" def __init__(self, env_desc, host_desc, test_name, bgp_cfg_fixture, namespace=None): super().__init__() self.env_desc = env_desc self.host_desc = host_desc self.test_name = test_name self.bgp_cfg_fixture = bgp_cfg_fixture self.namespace = namespace def _setUp(self): self.process_fixture = self.useFixture(neutron_proc.ProcessFixture( test_name=self.test_name, process_name=self.BAGPIPE_BGP, exec_name=self.BAGPIPE_BGP, config_filenames=[self.bgp_cfg_fixture.filename], namespace=self.namespace, kill_signal=signal.SIGTERM)) class BagpipeFakeRRProcessFixture(neutron_proc.ProcessFixture): def start(self): cmd = [shutil.which(self.exec_name)] self.process = async_process.AsyncProcess( cmd, run_as_root=True, namespace=self.namespace ) self.process.start() class BagpipeFakeRRFixture(fixtures.Fixture): BAGPIPE_FAKERR = "bagpipe-fakerr" def __init__(self, env_desc, host_desc, test_name): super().__init__() self.env_desc = env_desc self.host_desc = host_desc self.test_name = test_name def _setUp(self): self.process_fixture = self.useFixture(BagpipeFakeRRProcessFixture( test_name=self.test_name, process_name=self.BAGPIPE_FAKERR, exec_name=self.BAGPIPE_FAKERR, config_filenames=None, kill_signal=signal.SIGTERM)) class GoBGPProcessFixture(neutron_proc.ProcessFixture): # NOTE(tmorin): stopping the process does not work yet for # GOBGPD_LOG = True, because get_root_helper_child_pid is not designed to # find the right child pid when things when an intermediate shell # is used # (using 'sh -c "exec gobgpd ..."' does not work either, gobgpd silently # stops right after startup for a reason I did not identify) GOBGPD_LOG = False def start(self): test_name = base.sanitize_log_path(self.test_name) log_dir = os.path.join(neutron_base.DEFAULT_LOG_DIR, test_name) fileutils.ensure_tree(log_dir, mode=0o755) timestamp = datetime.datetime.now().strftime("%Y-%m-%d--%H-%M-%S-%f") log_file = "{}/{}--{}.log".format(log_dir, self.process_name, timestamp) gobgpd_exec = shutil.which(self.exec_name) if not gobgpd_exec: raise Exception("can't find gobgpd executable in PATH (%s, %s)" % (self.exec_name, os.environ['PATH'])) cmd = [ gobgpd_exec, '-t', 'json', '-f', self.config_filenames[0], '--log-level=debug', # we don't need this management API: '--api-hosts=0.0.0.0:%s' % random.randint(20000, 30000) ] if self.GOBGPD_LOG: cmd = ['sh', '-c', ('%s > %s 2>&1') % (' '.join(cmd), log_file)] self.process = async_process.AsyncProcess( cmd, namespace=self.namespace ) self.process.start() class GoBGPFixture(fixtures.Fixture): GOBGPD = "gobgpd" def __init__(self, env_desc, host_desc, test_name, gobgp_cfg_fixture): super().__init__() self.env_desc = env_desc self.host_desc = host_desc self.test_name = test_name self.gobgp_cfg_fixture = gobgp_cfg_fixture def _setUp(self): config_filenames = [self.gobgp_cfg_fixture.filename] self.process_fixture = self.useFixture(GoBGPProcessFixture( test_name=self.test_name, process_name=self.GOBGPD, exec_name=self.GOBGPD, config_filenames=config_filenames, kill_signal=signal.SIGTERM)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/fullstack/test_bagpipe_ml2_connectivity.py0000664000175000017500000000370200000000000033076 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Orange. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants from oslo_utils import uuidutils import testscenarios from neutron.tests.fullstack.resources import machine from networking_bagpipe.tests.fullstack import base load_tests = testscenarios.load_tests_apply_scenarios class TestBaGPipeML2ConnectivitySameNetwork(base.BaGPipeBaseFullStackTestCase): bgpvpn = False ipvpn_encap = None mech_drivers = 'bagpipe' service_plugins = 'router' l2_agent_type = constants.AGENT_TYPE_OVS compute_node_count = 5 port_per_compute_per_net = 2 scenarios = [ ('BaGPipe native VXLAN', {'bagpipe_ml2': True, 'evpn_driver': 'linux'})] def test_connectivity(self): tenant_uuid = uuidutils.generate_uuid() network = self.safe_client.create_network(tenant_uuid) self.safe_client.create_subnet( tenant_uuid, network['id'], base.SUBNET_CIDR1) vms = machine.FakeFullstackMachinesList([ self.useFixture( machine.FakeFullstackMachine( self.environment.hosts[i], network['id'], tenant_uuid, self.safe_client)) for i in range(self.compute_node_count) * self.port_per_compute_per_net]) vms.block_until_all_boot() vms.ping_all() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/fullstack/test_bgpvpn_connectivity.py0000664000175000017500000001710500000000000032213 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Orange. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools from neutron_lib import constants from oslo_utils import uuidutils import testscenarios import unittest from neutron.tests.fullstack.resources import machine from networking_bagpipe.tests.fullstack import base from networking_bagpipe.tests.fullstack.resources.bgpvpn \ import config as bgpvpn_cfg load_tests = testscenarios.load_tests_apply_scenarios class TestConnectivitySameBGPVPN(base.BaGPipeBaseFullStackTestCase): bagpipe_ml2 = False service_plugins = 'router,%s' % bgpvpn_cfg.BGPVPN_SERVICE bgpvpn = True port_per_compute_per_net = 2 compute_node_count = 2 scenarios = [ ('OpenVSwitch MPLS-over-TEB-over-GRE', { 'mech_drivers': 'openvswitch', 'l2_agent_type': constants.AGENT_TYPE_OVS, 'ipvpn_driver': 'ovs', 'ipvpn_encap': 'mpls-gre' }), ('OpenVSwitch MPLS-over-GRE', { 'mech_drivers': 'openvswitch', 'l2_agent_type': constants.AGENT_TYPE_OVS, 'ipvpn_driver': 'ovs', 'ipvpn_encap': 'mpls-gre-l3' }), ('OpenVSwitch bare MPLS', { 'mech_drivers': 'openvswitch', 'l2_agent_type': constants.AGENT_TYPE_OVS, 'ipvpn_driver': 'ovs', 'ipvpn_encap': 'bare-mpls' }) ] def test_l3_network_connectivity(self): tenant_uuid = uuidutils.generate_uuid() bgpvpn = self.safe_client.create_bgpvpn(tenant_uuid, route_targets=['64512:1']) network_ids = list() for subnet_cidr in (base.SUBNET_CIDR1, base.SUBNET_CIDR2): network_ids.append( self._create_net_subnet_bgpvpn_assoc(tenant_uuid, subnet_cidr, bgpvpn['id'])[0] ) fake_machines = list() for network_id in network_ids: fake_machines.extend([ self.useFixture( machine.FakeFullstackMachine( self.environment.hosts[i], network_id, tenant_uuid, self.safe_client)) for i in range( self.compute_node_count) * self.port_per_compute_per_net]) vms = machine.FakeFullstackMachinesList(fake_machines) vms.block_until_all_boot() vms.ping_all() def test_l3_router_connectivity(self): tenant_uuid = uuidutils.generate_uuid() bgpvpn = self.safe_client.create_bgpvpn(tenant_uuid, route_targets=['64512:1']) network1 = self.safe_client.create_network(tenant_uuid) subnet1 = self.safe_client.create_subnet( tenant_uuid, network1['id'], '10.0.0.0/24') network2 = self.safe_client.create_network(tenant_uuid) subnet2 = self.safe_client.create_subnet( tenant_uuid, network2['id'], '20.0.0.0/24') router = self.safe_client.create_router(tenant_uuid) self.safe_client.add_router_interface(router['id'], subnet1['id']) self.safe_client.add_router_interface(router['id'], subnet2['id']) self.safe_client.create_router_association(tenant_uuid, bgpvpn['id'], router['id']) network3 = self.safe_client.create_network(tenant_uuid) self.safe_client.create_subnet( tenant_uuid, network3['id'], '30.0.0.0/24') self.safe_client.create_network_association(tenant_uuid, bgpvpn['id'], network3['id']) fake_machines = list() for network in (network1, network2, network3): fake_machines.extend([ self.useFixture( machine.FakeFullstackMachine( self.environment.hosts[i], network['id'], tenant_uuid, self.safe_client)) for i in range( self.compute_node_count) * self.port_per_compute_per_net]) vms = machine.FakeFullstackMachinesList(fake_machines) vms.block_until_all_boot() vms.ping_all() @unittest.skip("Disabled because of bug 1715660 ( https://" "bugs.launchpad.net/networking-bagpipe/+bug/1715660 )") def test_l2_network_connectivity(self): # create fake machines in 2 different networks, all using # the same IP subnet, and check that each machine can reach all the # others. We create machines so that we confirm that connectivity # still works *inside* a given network, both locally on a compute # node, and across different compute nodes if self.evpn_driver == 'dummy': self.skipTest("L2VPN unsupported for this scenario") tenant_uuid = uuidutils.generate_uuid() bgpvpn = self.safe_client.create_bgpvpn(tenant_uuid, type="l2", route_targets=['64512:10']) fake_machines = list() for network in range(2): # we'll use the same subnet range for all networks, but # choose in this range distinct IP addresses for each fake machine network_id, subnet_id = self._create_net_subnet_bgpvpn_assoc( tenant_uuid, base.SUBNET_CIDR1, bgpvpn['id'] ) for compute, port_i in itertools.product( range(self.compute_node_count), range(self.port_per_compute_per_net)): # NOTE(tmorin): choice of fixed IP done this way for sake # of simplicity, of course, this breaks e.g. for # compute_node_count > 10 fixed_ip = ( base.SUBNET_CIDR1[:base.SUBNET_CIDR1.find('0/24')] + str(100 * network + 10 * (compute + 1) + port_i)) neutron_port = self.safe_client.create_port( network_id=network_id, tenant_id=tenant_uuid, hostname=self.environment.hosts[compute].hostname, fixed_ips=[{"subnet_id": subnet_id, "ip_address": fixed_ip}] ) fake_machines.append( self.useFixture( machine.FakeFullstackMachine( self.environment.hosts[compute], network_id, tenant_uuid, self.safe_client, neutron_port=neutron_port ) ) ) vms = machine.FakeFullstackMachinesList(fake_machines) vms.block_until_all_boot() vms.ping_all() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9383054 networking_bagpipe-22.0.0/networking_bagpipe/tests/functional/0000775000175000017500000000000000000000000024656 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/functional/__init__.py0000664000175000017500000000000000000000000026755 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9383054 networking_bagpipe-22.0.0/networking_bagpipe/tests/functional/db/0000775000175000017500000000000000000000000025243 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/functional/db/__init__.py0000664000175000017500000000000000000000000027342 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/functional/db/test_migrations.py0000664000175000017500000000447100000000000031036 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Orange. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron.db.migration.alembic_migrations import external from neutron.db.migration import cli as migration from neutron.tests.functional.db import test_migrations from neutron.tests.unit import testlib_api from networking_bagpipe.db.models import head EXTERNAL_TABLES = set(external.TABLES) VERSION_TABLE = 'alembic_version_bagpipel2' class _TestModelsMigrationsBaGPipe(test_migrations._TestModelsMigrations): def db_sync(self, engine): cfg.CONF.set_override( 'connection', engine.url.render_as_string(hide_password=False), group='database') for conf in migration.get_alembic_configs(): self.alembic_config = conf self.alembic_config.neutron_config = cfg.CONF migration.do_alembic_command(conf, 'upgrade', 'heads') def get_metadata(self): return head.get_metadata() def include_object(self, object_, name, type_, reflected, compare_to): if type_ == 'table' and (name.startswith('alembic') or name == VERSION_TABLE or name in EXTERNAL_TABLES): return False if type_ == 'index' and reflected and name.startswith("idx_autoinc_"): return False return True class TestModelsMigrationsMysql(testlib_api.MySQLTestCaseMixin, _TestModelsMigrationsBaGPipe, testlib_api.SqlTestCaseLight): pass class TestModelsMigrationsPostgresql(testlib_api.PostgreSQLTestCaseMixin, _TestModelsMigrationsBaGPipe, testlib_api.SqlTestCaseLight): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/functional/requirements.txt0000664000175000017500000000047000000000000030143 0ustar00zuulzuul00000000000000# Additional requirements for functional tests # The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. psutil>=1.1.1 psycopg2 PyMySQL>=0.6.2 # MIT License ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9383054 networking_bagpipe-22.0.0/networking_bagpipe/tests/unit/0000775000175000017500000000000000000000000023473 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/unit/__init__.py0000664000175000017500000000000000000000000025572 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9383054 networking_bagpipe-22.0.0/networking_bagpipe/tests/unit/agent/0000775000175000017500000000000000000000000024571 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/unit/agent/__init__.py0000664000175000017500000000000000000000000026670 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/unit/agent/base.py0000664000175000017500000002505600000000000026065 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Orange. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from unittest import mock from oslo_utils import uuidutils from networking_bagpipe.agent import bagpipe_bgp_agent from networking_bagpipe.agent.bgpvpn import constants as bgpvpn_const from networking_bagpipe.bagpipe_bgp import constants as bbgp_const from neutron.plugins.ml2.drivers.openvswitch.agent \ import ovs_agent_extension_api as ovs_ext_agt from neutron.plugins.ml2.drivers.openvswitch.agent import vlanmanager from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent \ import ovs_test_base from neutron_lib.plugins.ml2 import ovs_constants as ovs_agt_constants PORT10_ID = uuidutils.generate_uuid() PORT10 = {'id': PORT10_ID, 'mac_address': '00:00:de:ad:be:ef', 'ip_address': '10.0.0.2'} PORT11 = {'id': uuidutils.generate_uuid(), 'mac_address': '00:00:de:ad:f0:0d', 'ip_address': '10.0.0.3'} NETWORK1 = {'id': uuidutils.generate_uuid(), 'gateway_ip': '10.0.0.1', 'segmentation_id': '101'} PORT20 = {'id': uuidutils.generate_uuid(), 'mac_address': '00:00:de:ad:be:ef', 'ip_address': '20.0.0.2'} PORT21 = {'id': uuidutils.generate_uuid(), 'mac_address': '00:00:de:ad:f0:0d', 'ip_address': '20.0.0.3'} NETWORK2 = {'id': uuidutils.generate_uuid(), 'gateway_ip': '20.0.0.1', 'segmentation_id': '202'} ROUTER1 = {'id': uuidutils.generate_uuid()} port_2_net = { PORT10['id']: NETWORK1, PORT11['id']: NETWORK1, PORT20['id']: NETWORK2, PORT21['id']: NETWORK2, } LOCAL_VLAN_MAP = { NETWORK1['id']: [31, NETWORK1['segmentation_id']], NETWORK2['id']: [52, NETWORK2['segmentation_id']] } BGPVPN_L2_RT10 = {'route_targets': ['BGPVPN_L2:10'], 'import_targets': [], 'export_targets': [] } BGPVPN_L2_RT20 = {'route_targets': ['BGPVPN_L2:20'], 'import_targets': [], 'export_targets': [] } BGPVPN_L3_RT100 = {'route_targets': ['BGPVPN_L3:100'], 'import_targets': [], 'export_targets': [] } BGPVPN_L3_RT200 = {'route_targets': ['BGPVPN_L3:200'], 'import_targets': [], 'export_targets': [] } class DummyPort: def __init__(self, network, port, bgpvpn_port=False, evpn=None, ipvpn=None): self.id = port['id'] self.network_id = network['id'] self.mac_address = port['mac_address'] self.ip_address = port['ip_address'] self.gateway_ip = network['gateway_ip'] if bgpvpn_port: if evpn: self.l2vpn = copy.deepcopy(evpn) if ipvpn: self.l3vpn = copy.deepcopy(ipvpn) else: if evpn: self.evpn = copy.deepcopy(evpn) if ipvpn: self.ipvpn = copy.deepcopy(ipvpn) class DummyVif: def __init__(self, ofport, port_name): self.ofport = ofport self.port_name = port_name class DummyBGPVPN: def __init__(self, network, l2vpn=None, l3vpn=None, gateway_mac=None): self.id = uuidutils.generate_uuid() self.network_id = network['id'] if l2vpn: self.l2vpn = copy.deepcopy(l2vpn) if l3vpn: self.l3vpn = copy.deepcopy(l3vpn) if gateway_mac: self.gateway_mac = gateway_mac class RTList(list): def __eq__(self, other): return set(self) == set(other) class BaseTestAgentExtension: agent_extension_class = None DUMMY_VIF10 = None DUMMY_VIF11 = None DUMMY_VIF20 = None DUMMY_VIF21 = None def setUp(self): self.mocked_bagpipe_agent = mock.Mock( spec=bagpipe_bgp_agent.BaGPipeBGPAgent ) self.mocked_bagpipe_agent.do_port_plug = mock.Mock() self.mocked_bagpipe_agent.do_port_plug_refresh = mock.Mock() patcher = mock.patch('networking_bagpipe.agent.bagpipe_bgp_agent.' 'BaGPipeBGPAgent.get_instance', return_value=self.mocked_bagpipe_agent) patcher.start() self.addCleanup(patcher.stop) self.agent_ext = self.agent_extension_class() self.connection = mock.Mock() def _port_data(self, port, delete=False, admin_state_up=True): data = { 'port_id': port['id'] } if not delete: data.update({ 'port_id': port['id'], 'admin_state_up': admin_state_up, 'network_id': port_2_net[port['id']]['id'], 'segmentation_id': port_2_net[port['id']]['segmentation_id'], 'network_type': 'vxlan', 'device_owner': 'compute:None', 'mac_address': port['mac_address'], 'fixed_ips': [ { 'ip_address': port['ip_address'], } ] }) return data def _get_expected_local_port(self, bbgp_vpn_type, network_id, segmentation_id, port_id, detach=False): raise NotImplementedError def _check_network_info(self, network_id, expected_size, vpn_type=None, vpn_rts=None): if expected_size == 0: self.assertNotIn(network_id, self.agent_ext.networks_info, "Network %s expected to have no ports left" % network_id) else: self.assertIn(network_id, self.agent_ext.networks_info) network_info = self.agent_ext.networks_info[network_id] self.assertEqual(len(network_info.ports), expected_size, "Network ports size not as expected") PATCH_INT_TO_MPLS = 5 PATCH_INT_TO_TUN = 7 PATCH_TUN_TO_MPLS = 1 PATCH_TUN_TO_INT = 4 PATCH_MPLS_TO_TUN = 2 PATCH_MPLS_TO_INT = 6 BR_INT_PATCHES = { 'patch-tun': PATCH_INT_TO_TUN, 'patch-int-from-mpls': PATCH_INT_TO_MPLS } BR_TUN_PATCHES = { 'patch-int': PATCH_TUN_TO_INT, 'patch-to-mpls': PATCH_TUN_TO_MPLS } BR_MPLS_PATCHES = { 'patch-from-tun': PATCH_MPLS_TO_TUN, 'patch-mpls-to-int': PATCH_MPLS_TO_INT } def get_port_ofport_tun_br(port_name): return BR_TUN_PATCHES[port_name] def get_port_ofport_int_br(port_name): return BR_INT_PATCHES[port_name] def add_patch_port_mpls(patch, peer): return BR_MPLS_PATCHES[patch] def add_patch_port_tun(patch, peer): return BR_TUN_PATCHES[patch] def add_patch_port_int(patch, peer): return BR_INT_PATCHES[patch] class BaseTestOVSAgentExtension(ovs_test_base.OVSOSKenTestBase, BaseTestAgentExtension): driver_type = ovs_agt_constants.EXTENSION_DRIVER_TYPE DUMMY_VIF10 = DummyVif(10, 'VIF10') DUMMY_VIF11 = DummyVif(11, 'VIF11') DUMMY_VIF20 = DummyVif(20, 'VIF20') DUMMY_VIF21 = DummyVif(21, 'VIF21') def setUp(self): ovs_test_base.OVSOSKenTestBase.setUp(self) BaseTestAgentExtension.setUp(self) self.int_br = self.br_int_cls("br-int") self.int_br.add_patch_port = mock.Mock( side_effect=add_patch_port_int) self.int_br.get_port_ofport = mock.Mock( side_effect=get_port_ofport_int_br) self.int_br.add_flow = mock.Mock() self.int_br.delete_flows = mock.Mock() self.int_br.use_at_least_protocol = mock.Mock() self.tun_br = self.br_tun_cls("br-tun") self.tun_br.add_patch_port = mock.Mock( side_effect=add_patch_port_tun) self.tun_br.get_port_ofport = mock.Mock( side_effect=get_port_ofport_tun_br) self.tun_br.add_flow = mock.Mock() self.tun_br.delete_flows = mock.Mock() agent_extension_api = ovs_ext_agt.OVSAgentExtensionAPI(self.int_br, self.tun_br) self.agent_ext.consume_api(agent_extension_api) br_exists_patcher = mock.patch( 'neutron.agent.common.ovs_lib.OVSBridge.bridge_exists', return_value=True) br_exists_patcher.start() self.addCleanup(br_exists_patcher.stop) add_patch_patcher = mock.patch('neutron.agent.common.ovs_lib.OVSBridge' '.add_patch_port', side_effect=add_patch_port_mpls) add_patch_patcher.start() self.addCleanup(add_patch_patcher.stop) secure_mode_patcher = mock.patch( 'neutron.agent.common.ovs_lib.OVSBridge.set_secure_mode') secure_mode_patcher.start() self.addCleanup(secure_mode_patcher.stop) self.agent_ext.initialize(self.connection, ovs_agt_constants.EXTENSION_DRIVER_TYPE) self.vlan_manager = vlanmanager.LocalVlanManager() for net_id, vlan in LOCAL_VLAN_MAP.items(): try: self.vlan_manager.add(net_id, vlan[0], None, None, segmentation_id=vlan[1]) except vlanmanager.MappingAlreadyExists: pass def _get_expected_local_port(self, bbgp_vpn_type, network_id, segmentation_id, port_id, detach=False): vlan = self.vlan_manager.get(network_id, segmentation_id).vlan if bbgp_vpn_type == bbgp_const.IPVPN: r = dict( local_port=dict( linuxif='{}:{}'.format(bgpvpn_const.LINUXIF_PREFIX, vlan), ovs=dict(plugged=True, port_number=PATCH_MPLS_TO_TUN, vlan=vlan) ) ) if detach: del r['local_port']['ovs'] return r else: r = dict( local_port=dict( linuxif='{}:{}'.format(bgpvpn_const.LINUXIF_PREFIX, vlan), vlan=vlan ) ) if detach: del r['local_port']['vlan'] return r ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9423053 networking_bagpipe-22.0.0/networking_bagpipe/tests/unit/agent/bgpvpn/0000775000175000017500000000000000000000000026065 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/unit/agent/bgpvpn/__init__.py0000664000175000017500000000000000000000000030164 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/unit/agent/bgpvpn/test_agent_extension.py0000664000175000017500000025634300000000000032705 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Orange. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from unittest import mock import netaddr from oslo_utils import uuidutils from networking_bagpipe.agent.bgpvpn import agent_extension as bagpipe_agt_ext from networking_bagpipe.agent.bgpvpn import constants as bgpvpn_const from networking_bagpipe.bagpipe_bgp import constants as bbgp_const from networking_bagpipe.objects import bgpvpn as objects from networking_bagpipe.tests.unit.agent import base from neutron.api.rpc.callbacks.consumer import registry from neutron.api.rpc.callbacks import events as rpc_events from neutron.api.rpc.handlers import resources_rpc from neutron_lib.api.definitions import bgpvpn from neutron_lib import context from neutron_lib.plugins.ml2 import ovs_constants as ovs_agt_constants class HashableDict(dict): def __init__(self, dictionary): _dict = copy.deepcopy(dictionary) for k, v in list(_dict.items()): if (isinstance(v, dict) and not isinstance(v, HashableDict)): _dict[k] = HashableDict(v) super().__init__(_dict) def __hash__(self): return hash(tuple(sorted(self.items()))) def make_list_hashable(list_): if isinstance(list_[0], dict) and len(list_): return [HashableDict(d) for d in list_] class UnorderedList(list): def __eq__(self, other): return set(make_list_hashable(self)) == set(make_list_hashable(other)) class StringContains: def __init__(self, *items): self.items = items def __eq__(self, other): return all([(item in other) for item in self.items]) def __repr__(self): return 'StringContains(%s)' % ','.join(self.items) class TestBgpvpnAgentExtensionMixin: def setUp(self): bulk_pull_patcher = mock.patch.object( self.agent_ext.rpc_pull_api, 'bulk_pull') self.mocked_rpc_pull = bulk_pull_patcher.start() self.addCleanup(bulk_pull_patcher.stop) self.context = context.get_admin_context() @mock.patch.object(registry, 'register') @mock.patch.object(resources_rpc, 'ResourcesPushRpcCallback') def test_initialize_rpcs(self, rpc_mock, subscribe_mock): self.agent_ext._setup_mpls_br = mock.Mock() # already called in setUp self.agent_ext.initialize(self.connection, self.driver_type) self.connection.create_consumer.assert_has_calls( [mock.call( resources_rpc.resource_type_versioned_topic(resource_type), [rpc_mock()], fanout=True) for resource_type in ( objects.BGPVPNNetAssociation.obj_name(), objects.BGPVPNRouterAssociation.obj_name())], any_order=True ) subscribe_mock.assert_has_calls( [ mock.call(mock.ANY, objects.BGPVPNNetAssociation.obj_name()), mock.call(mock.ANY, objects.BGPVPNRouterAssociation.obj_name()) ], any_order=True ) def _expand_rts(self, rts): vpn_info = {} # rts are BGPVPN API resources rts vpn_info['import_rt'] = set(rts.get('route_targets') + rts.get('import_targets')) vpn_info['export_rt'] = set(rts.get('route_targets') + rts.get('export_targets')) return vpn_info def _fake_bgpvpn(self, bgpvpn_type, id=None, **bgpvpn_params): return objects.BGPVPN(self.context, id=id or uuidutils.generate_uuid(), type=bgpvpn_type, **bgpvpn_params) def _fake_net_assoc(self, network, bgpvpn_type, gateway_mac=None, id=None, **bgpvpn_params): bgpvpn = self._fake_bgpvpn(bgpvpn_type, **bgpvpn_params) net_assoc = objects.BGPVPNNetAssociation( self.context, id=id or uuidutils.generate_uuid(), network_id=network['id'], bgpvpn_id=bgpvpn.id, bgpvpn=bgpvpn ) net_assoc.subnets = [{ 'ip_version': 4, 'cidr': "NOT_USED_TODAY", 'gateway_ip': network['gateway_ip'], 'gateway_mac': gateway_mac, }] return net_assoc def _fake_router_assoc(self, router, bgpvpn_type, networks, **bgpvpn_params): bgpvpn = self._fake_bgpvpn(bgpvpn_type, **bgpvpn_params) router_assoc = objects.BGPVPNRouterAssociation( self.context, id=uuidutils.generate_uuid(), router_id=router['id'], bgpvpn_id=bgpvpn.id, bgpvpn=bgpvpn ) router_assoc.connected_networks = [ {'network_id': net['id'], 'subnets': [{'ip_version': 4, 'cidr': "NOT_USED_TODAY", 'gateway_ip': net['gateway_ip'], 'gateway_mac': net.get('gateway_mac', None)}]} for net in networks] return router_assoc def _fake_port_assoc(self, port, bgpvpn_type, network, gateway_mac=None, route_prefixes=None, id=None, bgpvpn_routes=None, advertise_fixed_ips=True, **bgpvpn_params): bgpvpn = self._fake_bgpvpn(bgpvpn_type, **bgpvpn_params) port_assoc = objects.BGPVPNPortAssociation( self.context, id=id or uuidutils.generate_uuid(), port_id=port['id'], bgpvpn_id=bgpvpn.id, bgpvpn=bgpvpn, advertise_fixed_ips=advertise_fixed_ips ) port_assoc.subnets = [{ 'ip_version': 4, 'cidr': "NOT_USED_TODAY", 'gateway_ip': network['gateway_ip'], 'gateway_mac': gateway_mac, }] route_prefixes = route_prefixes or [] bgpvpn_routes = bgpvpn_routes or [] prefix_routes = [ objects.BGPVPNPortAssociationRoute( self.context, type='prefix', prefix=netaddr.IPNetwork(prefix), local_pref=local_pref) for prefix, local_pref in route_prefixes] bgpvpn_routes_objs = [ objects.BGPVPNPortAssociationRoute( self.context, type='bgpvpn', bgpvpn=bgpvpn_, bgpvpn_id=bgpvpn_.id, local_pref=local_pref) for bgpvpn_, local_pref in bgpvpn_routes] port_assoc.routes = prefix_routes + bgpvpn_routes_objs return port_assoc def _fake_associations(self, net_assocs=None, router_assocs=None): assocs = mock.Mock() assocs.network_associations = net_assocs or [] assocs.router_associations = router_assocs or [] return assocs def _net_assoc_notif(self, net_assoc, event_type): self.agent_ext.handle_notification_net_assocs( None, objects.BGPVPNNetAssociation.obj_name(), [net_assoc], event_type) def _router_assoc_notif(self, router_assoc, event_type): self.agent_ext.handle_notification_router_assocs( None, objects.BGPVPNNetAssociation.obj_name(), [router_assoc], event_type) def _port_assoc_notif(self, port_assoc, event_type): self.agent_ext.handle_notification_port_assocs( None, objects.BGPVPNPortAssociation.obj_name(), [port_assoc], event_type) def test_net_assoc_no_plugged_ports(self): net_assoc = self._fake_net_assoc(base.NETWORK1, bgpvpn.BGPVPN_L2, **base.BGPVPN_L2_RT10) self._net_assoc_notif(net_assoc, rpc_events.UPDATED) self.assertEqual(0, self.mocked_bagpipe_agent.do_port_plug.call_count, "Do port plug mustn't be called") def test_router_assoc_no_plugged_ports(self): router_assoc = self._fake_router_assoc(base.ROUTER1, bgpvpn.BGPVPN_L3, [base.NETWORK1], **base.BGPVPN_L3_RT100) self._router_assoc_notif(router_assoc, rpc_events.UPDATED) self.assertEqual(0, self.mocked_bagpipe_agent.do_port_plug.call_count, "Do port plug mustn't be called") def test_net_assoc_already_plugged_ports(self): self.agent_ext.handle_port(None, self._port_data(base.PORT10)) self.agent_ext.handle_port(None, self._port_data(base.PORT11)) self.assertEqual(self.mocked_bagpipe_agent.do_port_plug.call_count, 0) net_assoc = self._fake_net_assoc(base.NETWORK1, bgpvpn.BGPVPN_L3, **base.BGPVPN_L3_RT100) # Verify build callback attachments def check_build_cb(*args): for port in [base.PORT10, base.PORT11]: local_port = self._get_expected_local_port( bbgp_const.IPVPN, base.NETWORK1['id'], base.NETWORK1['segmentation_id'], port['id']) self.assertDictEqual( dict( network_id=base.NETWORK1['id'], ipvpn=[dict( description=mock.ANY, instance_description=mock.ANY, ip_address=port['ip_address'], mac_address=port['mac_address'], gateway_ip=base.NETWORK1['gateway_ip'], local_port=local_port['local_port'], **self._expand_rts(base.BGPVPN_L3_RT100) )] ), self.agent_ext.build_bgpvpn_attach_info(port['id']) ) # we need to check what build_bgpvpn_attach_info returns, at the # precise time when do_port_plug is called self.mocked_bagpipe_agent.do_port_plug.side_effect = check_build_cb self._net_assoc_notif(net_assoc, rpc_events.UPDATED) # Verify attachments list consistency self._check_network_info(base.NETWORK1['id'], 2) self.mocked_bagpipe_agent.do_port_plug.assert_has_calls( [mock.call(base.PORT10['id']), mock.call(base.PORT11['id'])], any_order=True ) return net_assoc def test_router_assoc_already_plugged_ports(self): self.agent_ext.handle_port(None, self._port_data(base.PORT10)) self.agent_ext.handle_port(None, self._port_data(base.PORT11)) self.assertEqual(self.mocked_bagpipe_agent.do_port_plug.call_count, 0) router_assoc = self._fake_router_assoc(base.ROUTER1, bgpvpn.BGPVPN_L3, [base.NETWORK1], **base.BGPVPN_L3_RT100) # Verify build callback attachments def check_build_cb(*args): for port in [base.PORT10, base.PORT11]: local_port = self._get_expected_local_port( bbgp_const.IPVPN, base.NETWORK1['id'], base.NETWORK1['segmentation_id'], port['id']) self.assertDictEqual( dict( network_id=base.NETWORK1['id'], ipvpn=[dict( description=mock.ANY, instance_description=mock.ANY, ip_address=port['ip_address'], mac_address=port['mac_address'], gateway_ip=base.NETWORK1['gateway_ip'], local_port=local_port['local_port'], **self._expand_rts(base.BGPVPN_L3_RT100) )] ), self.agent_ext.build_bgpvpn_attach_info(port['id']) ) # we need to check what build_bgpvpn_attach_info returns, at the # precise time when do_port_plug is called self.mocked_bagpipe_agent.do_port_plug.side_effect = check_build_cb self._router_assoc_notif(router_assoc, rpc_events.UPDATED) # Verify attachments list consistency self._check_network_info(base.NETWORK1['id'], 2) self.mocked_bagpipe_agent.do_port_plug.assert_has_calls( [mock.call(base.PORT10['id']), mock.call(base.PORT11['id'])], any_order=True ) def test_net_assoc_update_then_remove(self): self.agent_ext.handle_port(None, self._port_data(base.PORT10)) self.agent_ext.handle_port(None, self._port_data(base.PORT11)) self.assertEqual(self.mocked_bagpipe_agent.do_port_plug.call_count, 0) net_assoc = self._fake_net_assoc(base.NETWORK1, bgpvpn.BGPVPN_L3, **base.BGPVPN_L3_RT100) self._net_assoc_notif(net_assoc, rpc_events.UPDATED) self.assertEqual(self.mocked_bagpipe_agent.do_port_plug.call_count, 2) self.mocked_bagpipe_agent.do_port_plug.reset_mock() self._net_assoc_notif(net_assoc, rpc_events.DELETED) self.mocked_bagpipe_agent.do_port_plug.assert_not_called() self.assertEqual( self.mocked_bagpipe_agent.do_port_plug_refresh_many.call_count, 2) def test_net_assoc_update_rts_to_empty(self): self.agent_ext.handle_port(None, self._port_data(base.PORT10)) self.assertEqual(self.mocked_bagpipe_agent.do_port_plug.call_count, 0) net_assoc = self._fake_net_assoc(base.NETWORK1, bgpvpn.BGPVPN_L3, **base.BGPVPN_L3_RT100) self._net_assoc_notif(net_assoc, rpc_events.CREATED) self.assertEqual(self.mocked_bagpipe_agent.do_port_plug.call_count, 1) self.mocked_bagpipe_agent.do_port_plug.reset_mock() net_assoc.bgpvpn.route_targets = [] self._net_assoc_notif(net_assoc, rpc_events.UPDATED) self.mocked_bagpipe_agent.do_port_plug.assert_called() self.mocked_bagpipe_agent.do_port_plug_refresh_many.assert_not_called() def test_router_assoc_update_then_remove(self): self.agent_ext.handle_port(None, self._port_data(base.PORT10)) self.agent_ext.handle_port(None, self._port_data(base.PORT11)) self.assertEqual(self.mocked_bagpipe_agent.do_port_plug.call_count, 0) router_assoc = self._fake_router_assoc(base.ROUTER1, bgpvpn.BGPVPN_L3, [base.NETWORK1], **base.BGPVPN_L3_RT100) self._router_assoc_notif(router_assoc, rpc_events.UPDATED) self.assertEqual(self.mocked_bagpipe_agent.do_port_plug.call_count, 2) self.mocked_bagpipe_agent.do_port_plug.reset_mock() self._router_assoc_notif(router_assoc, rpc_events.DELETED) self.mocked_bagpipe_agent.do_port_plug.assert_not_called() self.assertEqual( self.mocked_bagpipe_agent.do_port_plug_refresh_many.call_count, 2) def test_net_assoc_before_delete_port(self): self.agent_ext.handle_port(None, self._port_data(base.PORT10)) net_assoc = self._fake_net_assoc(base.NETWORK1, bgpvpn.BGPVPN_L3, **base.BGPVPN_L3_RT100) self._net_assoc_notif(net_assoc, rpc_events.UPDATED) self.agent_ext.delete_port(None, self._port_data(base.PORT10, delete=True)) local_port = self._get_expected_local_port( bbgp_const.IPVPN, base.NETWORK1['id'], base.NETWORK1['segmentation_id'], base.PORT10['id'], detach=True) detach_info = { 'network_id': base.NETWORK1['id'], bbgp_const.IPVPN: { 'ip_address': base.PORT10['ip_address'], 'mac_address': base.PORT10['mac_address'], 'local_port': local_port['local_port'] } } self.mocked_bagpipe_agent.do_port_plug_refresh_many.assert_has_calls( [mock.call(base.PORT10['id'], [detach_info])] ) # Verify attachments list consistency self._check_network_info(base.NETWORK1['id'], 0) self.assertEqual(0, len(self.agent_ext.networks_info), "Registered attachments list must be empty: %s" % self.agent_ext.networks_info) def test_two_net_assocs_same_bgpvpn_type(self): self.agent_ext.handle_port(None, self._port_data(base.PORT10)) net_assoc = self._fake_net_assoc(base.NETWORK1, bgpvpn.BGPVPN_L3, **base.BGPVPN_L3_RT100) self._net_assoc_notif(net_assoc, rpc_events.UPDATED) self.mocked_bagpipe_agent.do_port_plug.assert_has_calls( [mock.call(base.PORT10['id'])] ) net_assoc_2 = self._fake_net_assoc(base.NETWORK1, bgpvpn.BGPVPN_L3, **base.BGPVPN_L3_RT200) def check_build_cb(*args): rts_1_2 = {k: rts + base.BGPVPN_L3_RT200[k] for k, rts in base.BGPVPN_L3_RT100.items()} # Verify build callback attachments local_port = self._get_expected_local_port( bbgp_const.IPVPN, base.NETWORK1['id'], base.NETWORK1['segmentation_id'], base.PORT10['id']) self.assertDictEqual( dict( network_id=base.NETWORK1['id'], ipvpn=[dict( description=mock.ANY, instance_description=mock.ANY, ip_address=base.PORT10['ip_address'], mac_address=base.PORT10['mac_address'], gateway_ip=base.NETWORK1['gateway_ip'], local_port=local_port['local_port'], **self._expand_rts(rts_1_2) )] ), self.agent_ext.build_bgpvpn_attach_info(base.PORT10['id']) ) self.mocked_bagpipe_agent.do_port_plug.side_effect = check_build_cb self._net_assoc_notif(net_assoc_2, rpc_events.UPDATED) self.mocked_bagpipe_agent.do_port_plug.assert_has_calls( [mock.call(base.PORT10['id'])] ) self._check_network_info(base.NETWORK1['id'], 1) def test_delete_net_assoc_remaining_plugged_ports(self): self.agent_ext.handle_port(None, self._port_data(base.PORT10)) self.agent_ext.handle_port(None, self._port_data(base.PORT11)) net_assoc = self._fake_net_assoc(base.NETWORK1, bgpvpn.BGPVPN_L3, **base.BGPVPN_L3_RT100) self._net_assoc_notif(net_assoc, rpc_events.UPDATED) self.mocked_bagpipe_agent.do_port_plug.assert_has_calls( [mock.call(base.PORT10['id']), mock.call(base.PORT11['id'])], any_order=True ) # Verify attachments list consistency self._check_network_info(base.NETWORK1['id'], 2) self.mocked_bagpipe_agent.reset_mock() # prepare expected information for DELETE local_port10 = self._get_expected_local_port( bbgp_const.IPVPN, base.NETWORK1['id'], base.NETWORK1['segmentation_id'], base.PORT10['id'], detach=True) detach_info10 = { 'network_id': base.NETWORK1['id'], bbgp_const.IPVPN: dict( ip_address=base.PORT10['ip_address'], mac_address=base.PORT10['mac_address'], local_port=local_port10['local_port'] ) } local_port11 = self._get_expected_local_port( bbgp_const.IPVPN, base.NETWORK1['id'], base.NETWORK1['segmentation_id'], base.PORT11['id'], detach=True) detach_info11 = { 'network_id': base.NETWORK1['id'], bbgp_const.IPVPN: dict( ip_address=base.PORT11['ip_address'], mac_address=base.PORT11['mac_address'], local_port=local_port11['local_port'] ) } def check_b_cb(*args): self.assertDictEqual( {}, self.agent_ext.build_bgpvpn_attach_info(base.PORT10['id']) ) # we need to check that build_bgpvpn_attach_info contains the expected # content precisely at the time when do_port_plug_refresh is called self.mocked_bagpipe_agent.do_port_plug_refresh_many.side_effect = ( check_b_cb) # Delete the network associations self._net_assoc_notif(net_assoc, rpc_events.DELETED) self.mocked_bagpipe_agent.do_port_plug_refresh_many.assert_has_calls( [mock.call(base.PORT10['id'], [detach_info10]), mock.call(base.PORT11['id'], [detach_info11])], any_order=True ) # Verify attachments list consistency self._check_network_info(base.NETWORK1['id'], 2) def test_two_assocs_one_deleted_then_the_second_same_type(self): self.agent_ext.handle_port(None, self._port_data(base.PORT10)) self.agent_ext.handle_port(None, self._port_data(base.PORT11)) net_assoc = self._fake_net_assoc(base.NETWORK1, bgpvpn.BGPVPN_L3, **base.BGPVPN_L3_RT100) self._net_assoc_notif(net_assoc, rpc_events.UPDATED) self.mocked_bagpipe_agent.do_port_plug.assert_has_calls( [mock.call(base.PORT10['id']), mock.call(base.PORT11['id'])], any_order=True ) net_assoc_2 = self._fake_net_assoc(base.NETWORK1, bgpvpn.BGPVPN_L3, **base.BGPVPN_L3_RT200) self._net_assoc_notif(net_assoc_2, rpc_events.UPDATED) self.mocked_bagpipe_agent.do_port_plug.assert_has_calls( [mock.call(base.PORT10['id']), mock.call(base.PORT11['id'])], any_order=True ) # delete first network association self.mocked_bagpipe_agent.reset_mock() def check_build_cb(*args): # Verify build callback attachments for port in [base.PORT10, base.PORT10]: local_port = self._get_expected_local_port( bbgp_const.IPVPN, base.NETWORK1['id'], base.NETWORK1['segmentation_id'], port['id']) self.assertDictEqual( dict( network_id=base.NETWORK1['id'], ipvpn=[dict( description=mock.ANY, instance_description=mock.ANY, ip_address=port['ip_address'], mac_address=port['mac_address'], gateway_ip=base.NETWORK1['gateway_ip'], local_port=local_port['local_port'], **self._expand_rts(base.BGPVPN_L3_RT200) )] ), self.agent_ext.build_bgpvpn_attach_info(port['id']) ) self.mocked_bagpipe_agent.do_port_plug.side_effect = check_build_cb self._net_assoc_notif(net_assoc, rpc_events.DELETED) self.mocked_bagpipe_agent.do_port_plug.assert_has_calls( [mock.call(base.PORT10['id']), mock.call(base.PORT11['id'])], any_order=True ) self.mocked_bagpipe_agent.do_port_plug_refresh_many.assert_not_called() # Verify attachments list consistency self._check_network_info(base.NETWORK1['id'], 2) # delete the second network association self.mocked_bagpipe_agent.reset_mock() def check_build_cb_empty(*args): # Verify build callback attachments for port in [base.PORT10, base.PORT10]: self.assertEqual( {}, self.agent_ext.build_bgpvpn_attach_info(port['id']) ) self.mocked_bagpipe_agent.do_port_plug_refresh_many.side_effect = ( check_build_cb_empty ) self._net_assoc_notif(net_assoc_2, rpc_events.DELETED) local_port_1 = self._get_expected_local_port( bbgp_const.IPVPN, base.NETWORK1['id'], base.NETWORK1['segmentation_id'], base.PORT10['id'], detach=True) detach_info_1 = { 'network_id': base.NETWORK1['id'], bbgp_const.IPVPN: { 'ip_address': base.PORT10['ip_address'], 'mac_address': base.PORT10['mac_address'], 'local_port': local_port_1['local_port'] } } local_port_2 = self._get_expected_local_port( bbgp_const.IPVPN, base.NETWORK1['id'], base.NETWORK1['segmentation_id'], base.PORT11['id'], detach=True) detach_info_2 = { 'network_id': base.NETWORK1['id'], bbgp_const.IPVPN: { 'ip_address': base.PORT11['ip_address'], 'mac_address': base.PORT11['mac_address'], 'local_port': local_port_2['local_port'] } } self.mocked_bagpipe_agent.do_port_plug.assert_not_called() self.assertEqual( 2, self.mocked_bagpipe_agent.do_port_plug_refresh_many.call_count) self.mocked_bagpipe_agent.do_port_plug_refresh_many.assert_has_calls( [mock.call(base.PORT10['id'], [detach_info_1]), mock.call(base.PORT11['id'], [detach_info_2])], any_order=True, ) # Verify attachments list consistency self._check_network_info(base.NETWORK1['id'], 2) def test_two_assocs_one_deleted_then_the_second_different_types(self): self.agent_ext.handle_port(None, self._port_data(base.PORT10)) self.agent_ext.handle_port(None, self._port_data(base.PORT11)) net_assoc = self._fake_net_assoc(base.NETWORK1, bgpvpn.BGPVPN_L2, **base.BGPVPN_L2_RT10) self._net_assoc_notif(net_assoc, rpc_events.UPDATED) self.mocked_bagpipe_agent.do_port_plug.assert_has_calls( [mock.call(base.PORT10['id']), mock.call(base.PORT11['id'])], any_order=True ) net_assoc_2 = self._fake_net_assoc(base.NETWORK1, bgpvpn.BGPVPN_L3, **base.BGPVPN_L3_RT200) self._net_assoc_notif(net_assoc_2, rpc_events.UPDATED) self.mocked_bagpipe_agent.do_port_plug.assert_has_calls( [mock.call(base.PORT10['id']), mock.call(base.PORT11['id'])], any_order=True ) # delete first network association self.mocked_bagpipe_agent.reset_mock() def check_b_cb(*args): # Verify build callback attachments for port in [base.PORT10, base.PORT10]: local_port = self._get_expected_local_port( bbgp_const.IPVPN, base.NETWORK1['id'], base.NETWORK1['segmentation_id'], port['id']) self.assertDictEqual( dict( network_id=base.NETWORK1['id'], ipvpn=[dict( description=mock.ANY, instance_description=mock.ANY, ip_address=port['ip_address'], mac_address=port['mac_address'], gateway_ip=base.NETWORK1['gateway_ip'], local_port=local_port['local_port'], **self._expand_rts(base.BGPVPN_L3_RT200) )] ), self.agent_ext.build_bgpvpn_attach_info(port['id']) ) self.mocked_bagpipe_agent.do_port_plug_refresh_many.side_effect = ( check_b_cb) self._net_assoc_notif(net_assoc, rpc_events.DELETED) self.mocked_bagpipe_agent.do_port_plug.assert_not_called() # Verify attachments list consistency self._check_network_info(base.NETWORK1['id'], 2) # check that the bgpvpn type of first assoc was removed local_port_1 = self._get_expected_local_port( bbgp_const.EVPN, base.NETWORK1['id'], base.NETWORK1['segmentation_id'], base.PORT10['id'], detach=True) detach_info_1 = { 'network_id': base.NETWORK1['id'], bbgp_const.EVPN: { 'ip_address': base.PORT10['ip_address'], 'mac_address': base.PORT10['mac_address'], 'local_port': local_port_1['local_port'] } } local_port_2 = self._get_expected_local_port( bbgp_const.EVPN, base.NETWORK1['id'], base.NETWORK1['segmentation_id'], base.PORT11['id'], detach=True) detach_info_2 = { 'network_id': base.NETWORK1['id'], bbgp_const.EVPN: { 'ip_address': base.PORT11['ip_address'], 'mac_address': base.PORT11['mac_address'], 'local_port': local_port_2['local_port'] } } self.mocked_bagpipe_agent.do_port_plug.assert_not_called() self.assertEqual( 2, self.mocked_bagpipe_agent.do_port_plug_refresh_many.call_count) self.mocked_bagpipe_agent.do_port_plug_refresh_many.assert_has_calls( [mock.call(base.PORT10['id'], [detach_info_1]), mock.call(base.PORT11['id'], [detach_info_2])], any_order=True ) # delete the second network association self.mocked_bagpipe_agent.reset_mock() def check_build_cb_empty(*args): # Verify build callback attachments for port in [base.PORT10, base.PORT10]: self.assertEqual( {}, self.agent_ext.build_bgpvpn_attach_info(port['id']) ) self.mocked_bagpipe_agent.do_port_plug_refresh_many.side_effect = ( check_build_cb_empty ) self._net_assoc_notif(net_assoc_2, rpc_events.DELETED) local_port_2 = self._get_expected_local_port( bbgp_const.IPVPN, base.NETWORK1['id'], base.NETWORK1['segmentation_id'], base.PORT11['id'], detach=True) detach_info_2 = { 'network_id': base.NETWORK1['id'], bbgp_const.IPVPN: { 'ip_address': base.PORT11['ip_address'], 'mac_address': base.PORT11['mac_address'], 'local_port': local_port_2['local_port'] } } self.mocked_bagpipe_agent.do_port_plug.assert_not_called() self.assertEqual( 2, self.mocked_bagpipe_agent.do_port_plug_refresh_many.call_count) self.mocked_bagpipe_agent.do_port_plug_refresh_many.assert_has_calls( [mock.call(base.PORT11['id'], [detach_info_2])] ) # Verify attachments list consistency self._check_network_info(base.NETWORK1['id'], 2) def test_delete_net_assoc_no_plugged_ports(self): net_assoc_2 = self._fake_net_assoc(base.NETWORK1, bgpvpn.BGPVPN_L3, **base.BGPVPN_L3_RT200) self._net_assoc_notif(net_assoc_2, rpc_events.DELETED) self.mocked_bagpipe_agent.do_port_plug.assert_not_called() self.mocked_bagpipe_agent.do_port_plug_refresh_many.assert_not_called() def test_net_assoc_with_plugged_ports(self): self.agent_ext.handle_port(None, self._port_data(base.PORT10)) self.agent_ext.handle_port(None, self._port_data(base.PORT11)) net_assoc = self._fake_net_assoc(base.NETWORK1, bgpvpn.BGPVPN_L3, **base.BGPVPN_L3_RT100) self._net_assoc_notif(net_assoc, rpc_events.UPDATED) self.mocked_bagpipe_agent.reset_mock() self.agent_ext.delete_port(None, self._port_data(base.PORT10, delete=True)) self.agent_ext.delete_port(None, self._port_data(base.PORT11, delete=True)) self.assertEqual( 2, self.mocked_bagpipe_agent.do_port_plug_refresh_many.call_count) self.mocked_bagpipe_agent.do_port_plug.assert_not_called() self.assertEqual(0, len(self.agent_ext.networks_info)) self.mocked_bagpipe_agent.reset_mock() self._net_assoc_notif(net_assoc, rpc_events.DELETED) self.mocked_bagpipe_agent.do_port_plug_refresh_many.assert_not_called() self.mocked_bagpipe_agent.do_port_plug.assert_not_called() def test_net_assoc_single_port_l3_bgpvpn(self): self.agent_ext.handle_port(None, self._port_data(base.PORT10)) net_assoc = self._fake_net_assoc(base.NETWORK1, bgpvpn.BGPVPN_L3, **base.BGPVPN_L3_RT100) def check_build_cb(*args): # Verify build callback attachments local_port = self._get_expected_local_port( bbgp_const.IPVPN, base.NETWORK1['id'], base.NETWORK1['segmentation_id'], base.PORT10['id']) self.assertDictEqual( dict( network_id=base.NETWORK1['id'], ipvpn=[dict( description=mock.ANY, instance_description=mock.ANY, ip_address=base.PORT10['ip_address'], mac_address=base.PORT10['mac_address'], gateway_ip=base.NETWORK1['gateway_ip'], local_port=local_port['local_port'], **self._expand_rts(base.BGPVPN_L3_RT100) )] ), self.agent_ext.build_bgpvpn_attach_info(base.PORT10['id']) ) self.mocked_bagpipe_agent.do_port_plug.side_effect = check_build_cb self._net_assoc_notif(net_assoc, rpc_events.UPDATED) self.mocked_bagpipe_agent.do_port_plug.assert_has_calls( [mock.call(base.PORT10['id'])] ) self._check_network_info(base.NETWORK1['id'], 1) def test_net_assoc_single_port_l2_bgpvpn(self): self.agent_ext.handle_port(None, self._port_data(base.PORT10)) net_assoc = self._fake_net_assoc(base.NETWORK1, bgpvpn.BGPVPN_L2, **base.BGPVPN_L2_RT10) def check_build_cb(*args): # Verify build callback attachments local_port = self._get_expected_local_port( bbgp_const.EVPN, base.NETWORK1['id'], base.NETWORK1['segmentation_id'], base.PORT10['id']) self.assertDictEqual( dict( network_id=base.NETWORK1['id'], evpn=[dict( description=mock.ANY, instance_description=mock.ANY, ip_address=base.PORT10['ip_address'], mac_address=base.PORT10['mac_address'], gateway_ip=base.NETWORK1['gateway_ip'], vni=base.NETWORK1["segmentation_id"], **dict(list(local_port.items()) + list(self._expand_rts( base.BGPVPN_L2_RT10).items())) )] ), self.agent_ext.build_bgpvpn_attach_info(base.PORT10['id']) ) self.mocked_bagpipe_agent.do_port_plug.side_effect = check_build_cb self._net_assoc_notif(net_assoc, rpc_events.UPDATED) self.mocked_bagpipe_agent.do_port_plug.assert_has_calls( [mock.call(base.PORT10['id'])] ) self._check_network_info(base.NETWORK1['id'], 1) def test_net_assoc_single_port_multiple_bgpvpns(self): self.agent_ext.handle_port(None, self._port_data(base.PORT10)) net_assoc_1 = self._fake_net_assoc(base.NETWORK1, bgpvpn.BGPVPN_L3, **base.BGPVPN_L3_RT100) net_assoc_2 = self._fake_net_assoc(base.NETWORK1, bgpvpn.BGPVPN_L3, **base.BGPVPN_L3_RT200) self._net_assoc_notif(net_assoc_1, rpc_events.UPDATED) def check_build_cb(*args): # Verify build callback attachments local_port = self._get_expected_local_port( bbgp_const.IPVPN, base.NETWORK1['id'], base.NETWORK1['segmentation_id'], base.PORT10['id']) rts_1_2 = {k: rts + base.BGPVPN_L3_RT200[k] for k, rts in base.BGPVPN_L3_RT100.items()} self.assertDictEqual( dict( network_id=base.NETWORK1['id'], ipvpn=[dict( description=mock.ANY, instance_description=mock.ANY, ip_address=base.PORT10['ip_address'], mac_address=base.PORT10['mac_address'], gateway_ip=base.NETWORK1['gateway_ip'], local_port=local_port['local_port'], **self._expand_rts(rts_1_2) )] ), self.agent_ext.build_bgpvpn_attach_info(base.PORT10['id']) ) self.mocked_bagpipe_agent.do_port_plug.side_effect = check_build_cb self._net_assoc_notif(net_assoc_2, rpc_events.UPDATED) self.mocked_bagpipe_agent.do_port_plug.assert_has_calls( [mock.call(base.PORT10['id'])] ) def test_net_assoc_multiple_ports_different_bgpvpns(self): self.agent_ext.handle_port(None, self._port_data(base.PORT10)) self.agent_ext.handle_port(None, self._port_data(base.PORT20)) net_assoc_1 = self._fake_net_assoc(base.NETWORK1, bgpvpn.BGPVPN_L3, **base.BGPVPN_L3_RT100) net_assoc_2 = self._fake_net_assoc(base.NETWORK2, bgpvpn.BGPVPN_L3, **base.BGPVPN_L3_RT200) self._net_assoc_notif(net_assoc_1, rpc_events.UPDATED) def check_build_cb(*args): # Verify build callback attachments for port, network, rts in [(base.PORT10, base.NETWORK1, base.BGPVPN_L3_RT100), (base.PORT20, base.NETWORK2, base.BGPVPN_L3_RT200)]: local_port = self._get_expected_local_port( bbgp_const.IPVPN, network['id'], network['segmentation_id'], port['id']) self.assertDictEqual( dict( network_id=network['id'], ipvpn=[dict( description=mock.ANY, instance_description=mock.ANY, ip_address=port['ip_address'], mac_address=port['mac_address'], gateway_ip=network['gateway_ip'], local_port=local_port['local_port'], **self._expand_rts(rts) )] ), self.agent_ext.build_bgpvpn_attach_info(port['id']) ) self.mocked_bagpipe_agent.do_port_plug.side_effect = check_build_cb self._net_assoc_notif(net_assoc_2, rpc_events.UPDATED) self.mocked_bagpipe_agent.do_port_plug.assert_has_calls( [mock.call(base.PORT10['id']), mock.call(base.PORT20['id'])] ) self._check_network_info(base.NETWORK1['id'], 1) self._check_network_info(base.NETWORK2['id'], 1) def test_delete_net_assoc_multiple_bgpvpns_different_type(self): self.agent_ext.handle_port(None, self._port_data(base.PORT10)) net_assoc_1 = self._fake_net_assoc(base.NETWORK1, bgpvpn.BGPVPN_L3, **base.BGPVPN_L3_RT100) net_assoc_2 = self._fake_net_assoc(base.NETWORK1, bgpvpn.BGPVPN_L2, **base.BGPVPN_L2_RT10) self._net_assoc_notif(net_assoc_1, rpc_events.UPDATED) self._net_assoc_notif(net_assoc_2, rpc_events.UPDATED) self.mocked_bagpipe_agent.do_port_plug.assert_has_calls( [mock.call(base.PORT10['id'])] ) self.mocked_bagpipe_agent.reset_mock() self.agent_ext.delete_port(None, self._port_data(base.PORT10, delete=True)) local_port_l3 = self._get_expected_local_port( bbgp_const.IPVPN, base.NETWORK1['id'], base.NETWORK1['segmentation_id'], base.PORT10['id'], detach=True) local_port_l2 = self._get_expected_local_port( bbgp_const.EVPN, base.NETWORK1['id'], base.NETWORK1['segmentation_id'], base.PORT10['id'], detach=True) detach_info = { 'network_id': base.NETWORK1['id'], bbgp_const.EVPN: { 'ip_address': base.PORT10['ip_address'], 'mac_address': base.PORT10['mac_address'], 'local_port': local_port_l2['local_port'] }, bbgp_const.IPVPN: { 'ip_address': base.PORT10['ip_address'], 'mac_address': base.PORT10['mac_address'], 'local_port': local_port_l3['local_port'] } } self.mocked_bagpipe_agent.do_port_plug_refresh_many.assert_has_calls( [mock.call(base.PORT10['id'], [detach_info])] ) # Verify attachments list consistency self._check_network_info(base.NETWORK1['id'], 0) def test_net_assoc_before_port_up(self): net_assoc = self._fake_net_assoc(base.NETWORK1, bgpvpn.BGPVPN_L3, **base.BGPVPN_L3_RT100) self.mocked_rpc_pull.side_effect = [[net_assoc], [], []] def check_build_cb(*args): # Verify build callback attachments local_port = self._get_expected_local_port( bbgp_const.IPVPN, base.NETWORK1['id'], base.NETWORK1['segmentation_id'], base.PORT10['id']) self.assertDictEqual( dict( network_id=base.NETWORK1['id'], ipvpn=[dict( description=mock.ANY, instance_description=mock.ANY, ip_address=base.PORT10['ip_address'], mac_address=base.PORT10['mac_address'], gateway_ip=base.NETWORK1['gateway_ip'], local_port=local_port['local_port'], **self._expand_rts(base.BGPVPN_L3_RT100) )] ), self.agent_ext.build_bgpvpn_attach_info(base.PORT10['id']) ) self.mocked_bagpipe_agent.do_port_plug.side_effect = check_build_cb self.agent_ext.handle_port(None, self._port_data(base.PORT10)) self.mocked_bagpipe_agent.do_port_plug.assert_has_calls( [mock.call(base.PORT10['id'])] ) self._check_network_info(base.NETWORK1['id'], 1) def test_router_assoc_before_port_up(self): router_assoc = self._fake_router_assoc(base.ROUTER1, bgpvpn.BGPVPN_L3, [base.NETWORK1], **base.BGPVPN_L3_RT100) self.mocked_rpc_pull.side_effect = [[], [router_assoc], []] def check_build_cb(*args): # Verify build callback attachments local_port = self._get_expected_local_port( bbgp_const.IPVPN, base.NETWORK1['id'], base.NETWORK1['segmentation_id'], base.PORT10['id']) self.assertDictEqual( dict( network_id=base.NETWORK1['id'], ipvpn=[dict( description=mock.ANY, instance_description=mock.ANY, ip_address=base.PORT10['ip_address'], mac_address=base.PORT10['mac_address'], gateway_ip=base.NETWORK1['gateway_ip'], local_port=local_port['local_port'], **self._expand_rts(base.BGPVPN_L3_RT100) )] ), self.agent_ext.build_bgpvpn_attach_info(base.PORT10['id']) ) self.mocked_bagpipe_agent.do_port_plug.side_effect = check_build_cb self.agent_ext.handle_port(None, self._port_data(base.PORT10)) self.mocked_bagpipe_agent.do_port_plug.assert_has_calls( [mock.call(base.PORT10['id'])] ) self._check_network_info(base.NETWORK1['id'], 1) def test_format_bgpvpn_network_route_targets(self): n = base.NETWORK1 assocs = [ self._fake_net_assoc(n, bgpvpn.BGPVPN_L3, route_targets=['12345:1', '12345:2', '12345:3'], import_targets=['12345:2', '12345:5'], export_targets=['12345:3', '12345:8'] ), self._fake_net_assoc(n, bgpvpn.BGPVPN_L3, route_targets=['12345:6', '12345:1'], import_targets=['12345:2'], export_targets=[] ) ] result = bagpipe_agt_ext.format_associations_route_targets(assocs) expected = { 'import_rt': ['12345:1', '12345:2', '12345:3', '12345:5', '12345:6'], 'export_rt': ['12345:1', '12345:2', '12345:3', '12345:8', '12345:6'] } self.assertCountEqual(result['import_rt'], expected['import_rt']) self.assertCountEqual(result['export_rt'], expected['export_rt']) def test_port_association_before_port_up(self): port_assoc = self._fake_port_assoc( base.PORT10, bgpvpn.BGPVPN_L3, base.NETWORK1, route_prefixes=[("40.0.0.0/24", None), ("60.0.0.0/24", 66)], route_targets=base.BGPVPN_L3_RT100['route_targets'], local_pref=44 ) self.mocked_rpc_pull.side_effect = [[], [], [port_assoc]] instance_id_base = 'ipvpn_portassoc_%s_prefix_' % port_assoc.id def check_build_cb(*args): # Verify build callback attachments local_port = self._get_expected_local_port( bbgp_const.IPVPN, base.NETWORK1['id'], base.NETWORK1['segmentation_id'], base.PORT10['id']) self.assertDictEqual( dict( network_id=base.NETWORK1['id'], ipvpn=[ dict( description=mock.ANY, instance_description=mock.ANY, ip_address=base.PORT10['ip_address'], mac_address=base.PORT10['mac_address'], gateway_ip=base.NETWORK1['gateway_ip'], local_pref=44, local_port=local_port['local_port'], **self._expand_rts(base.BGPVPN_L3_RT100) ), dict( description=mock.ANY, instance_description=mock.ANY, vpn_instance_id=(instance_id_base + '40_0_0_0_24'), direction='to-port', ip_address='40.0.0.0/24', advertise_subnet=True, mac_address=base.PORT10['mac_address'], gateway_ip=base.NETWORK1['gateway_ip'], local_pref=44, local_port=local_port['local_port'], **self._expand_rts(base.BGPVPN_L3_RT100) ), dict( description=mock.ANY, instance_description=mock.ANY, vpn_instance_id=(instance_id_base + '60_0_0_0_24'), direction='to-port', ip_address='60.0.0.0/24', advertise_subnet=True, mac_address=base.PORT10['mac_address'], gateway_ip=base.NETWORK1['gateway_ip'], local_pref=66, local_port=local_port['local_port'], **self._expand_rts(base.BGPVPN_L3_RT100) ) ] ), self.agent_ext.build_bgpvpn_attach_info(base.PORT10['id']) ) self.mocked_bagpipe_agent.do_port_plug.side_effect = check_build_cb self.agent_ext.handle_port(None, self._port_data(base.PORT10)) self.mocked_bagpipe_agent.do_port_plug.assert_has_calls( [mock.call(base.PORT10['id'])] ) self._check_network_info(base.NETWORK1['id'], 1) def test_port_assoc_after_port_up(self): self.agent_ext.handle_port(None, self._port_data(base.PORT10)) port_assoc = self._fake_port_assoc(base.PORT10, bgpvpn.BGPVPN_L3, base.NETWORK1, route_prefixes=[("40.0.0.0/24", 77)], **base.BGPVPN_L3_RT100) instance_id_base = 'ipvpn_portassoc_%s_prefix_' % port_assoc.id def check_build_cb(*args): # Verify build callback attachments local_port = self._get_expected_local_port( bbgp_const.IPVPN, base.NETWORK1['id'], base.NETWORK1['segmentation_id'], base.PORT10['id']) self.assertDictEqual( dict( network_id=base.NETWORK1['id'], ipvpn=[ dict( description=mock.ANY, instance_description=mock.ANY, ip_address=base.PORT10['ip_address'], mac_address=base.PORT10['mac_address'], gateway_ip=base.NETWORK1['gateway_ip'], local_port=local_port['local_port'], **self._expand_rts(base.BGPVPN_L3_RT100) ), dict( description=mock.ANY, instance_description=mock.ANY, vpn_instance_id=(instance_id_base + '40_0_0_0_24'), direction='to-port', ip_address='40.0.0.0/24', advertise_subnet=True, local_pref=77, mac_address=base.PORT10['mac_address'], gateway_ip=base.NETWORK1['gateway_ip'], local_port=local_port['local_port'], **self._expand_rts(base.BGPVPN_L3_RT100) ) ] ), self.agent_ext.build_bgpvpn_attach_info(base.PORT10['id']) ) self.mocked_bagpipe_agent.do_port_plug.side_effect = check_build_cb self._port_assoc_notif(port_assoc, rpc_events.CREATED) self.mocked_bagpipe_agent.do_port_plug.assert_has_calls( [mock.call(base.PORT10['id'])] ) # delete port association def check_build_cb2(*args): self.assertDictEqual( {}, self.agent_ext.build_bgpvpn_attach_info(base.PORT10['id']) ) self.mocked_bagpipe_agent.do_port_plug.reset_mock() self.mocked_bagpipe_agent.do_port_plug_refresh_many.side_effect = ( check_build_cb2) self._port_assoc_notif(port_assoc, rpc_events.DELETED) # check that a detach is produced for the removed prefix route local_port_l3 = self._get_expected_local_port( bbgp_const.IPVPN, base.NETWORK1['id'], base.NETWORK1['segmentation_id'], base.PORT10['id'], detach=True) self.mocked_bagpipe_agent.do_port_plug_refresh_many.assert_has_calls( [mock.call(base.PORT10['id'], UnorderedList([ {'network_id': base.NETWORK1['id'], bbgp_const.IPVPN: { 'ip_address': base.PORT10['ip_address'], 'mac_address': base.PORT10['mac_address'], 'local_port': local_port_l3['local_port']} }, {'network_id': base.NETWORK1['id'], bbgp_const.IPVPN: { 'ip_address': "40.0.0.0/24", 'vpn_instance_id': (instance_id_base + '40_0_0_0_24'), 'mac_address': base.PORT10['mac_address'], 'local_port': local_port_l3['local_port']} } ])) ] ) self.mocked_bagpipe_agent.do_port_plug.assert_not_called() def test_port_assoc_update_removes_a_prefix_route(self): self.agent_ext.handle_port(None, self._port_data(base.PORT10)) port_assoc = self._fake_port_assoc(base.PORT10, bgpvpn.BGPVPN_L3, base.NETWORK1, route_prefixes=[("40.0.0.0/24", None)], **base.BGPVPN_L3_RT100) self._port_assoc_notif(port_assoc, rpc_events.CREATED) # now remove the prefix route new_port_assoc = self._fake_port_assoc(base.PORT10, bgpvpn.BGPVPN_L3, base.NETWORK1, id=port_assoc.id, **base.BGPVPN_L3_RT100) instance_id_base = 'ipvpn_portassoc_%s_prefix_' % port_assoc.id def check_build_cb(*args): # Verify build callback attachments local_port = self._get_expected_local_port( bbgp_const.IPVPN, base.NETWORK1['id'], base.NETWORK1['segmentation_id'], base.PORT10['id']) self.assertDictEqual( dict( network_id=base.NETWORK1['id'], ipvpn=[dict( description=mock.ANY, instance_description=mock.ANY, vpn_instance_id=(instance_id_base + '40_0_0_0_24'), direction='to-port', ip_address=base.PORT10['ip_address'], mac_address=base.PORT10['mac_address'], gateway_ip=base.NETWORK1['gateway_ip'], local_port=local_port['local_port'], **self._expand_rts(base.BGPVPN_L3_RT100) )] ), self.agent_ext.build_bgpvpn_attach_info(base.PORT10['id']) ) self.mocked_bagpipe_agent.do_port_plug.reset_mock() self.mocked_bagpipe_agent.do_port_plug.side_effect = check_build_cb self.mocked_bagpipe_agent.do_port_plug_refresh_many.reset_mock() self._port_assoc_notif(new_port_assoc, rpc_events.UPDATED) self.mocked_bagpipe_agent.do_port_plug.assert_not_called() # check that a detach is produced for the removed prefix route local_port_l3 = self._get_expected_local_port( bbgp_const.IPVPN, base.NETWORK1['id'], base.NETWORK1['segmentation_id'], base.PORT10['id'], detach=True) detach_info = { 'network_id': base.NETWORK1['id'], bbgp_const.IPVPN: { 'vpn_instance_id': (instance_id_base + '40_0_0_0_24'), 'ip_address': "40.0.0.0/24", 'mac_address': base.PORT10['mac_address'], 'local_port': local_port_l3['local_port'] } } self.mocked_bagpipe_agent.do_port_plug_refresh_many.assert_has_calls( [mock.call(base.PORT10['id'], [detach_info])] ) def test_port_with_prefix_route_then_delete_port(self): self.agent_ext.handle_port(None, self._port_data(base.PORT10)) port_assoc = self._fake_port_assoc( base.PORT10, bgpvpn.BGPVPN_L3, base.NETWORK1, route_prefixes=[("40.0.0.0/24", None), ("60.0.0.0/16", None)], **base.BGPVPN_L3_RT100) instance_id_base = 'ipvpn_portassoc_%s_prefix_' % port_assoc.id self._port_assoc_notif(port_assoc, rpc_events.CREATED) # check that detach are produced for the deleted port local_port_l3 = self._get_expected_local_port( bbgp_const.IPVPN, base.NETWORK1['id'], base.NETWORK1['segmentation_id'], base.PORT10['id'], detach=True) calls = [ mock.call(base.PORT10['id'], UnorderedList([ {'network_id': base.NETWORK1['id'], bbgp_const.IPVPN: { 'ip_address': base.PORT10['ip_address'], 'mac_address': base.PORT10['mac_address'], 'local_port': local_port_l3['local_port']} }, {'network_id': base.NETWORK1['id'], bbgp_const.IPVPN: { 'ip_address': "40.0.0.0/24", 'vpn_instance_id': (instance_id_base + '40_0_0_0_24'), 'mac_address': base.PORT10['mac_address'], 'local_port': local_port_l3['local_port']} }, {'network_id': base.NETWORK1['id'], bbgp_const.IPVPN: { 'ip_address': "60.0.0.0/16", 'vpn_instance_id': (instance_id_base + '60_0_0_0_16'), 'mac_address': base.PORT10['mac_address'], 'local_port': local_port_l3['local_port']} } ])) ] self.agent_ext.delete_port(None, self._port_data(base.PORT10)) self.mocked_bagpipe_agent.do_port_plug_refresh_many.assert_has_calls( calls, any_order=True, ) def test_port_assoc_adv_fixed_ips_false(self): self.agent_ext.handle_port(None, self._port_data(base.PORT10)) port_assoc = self._fake_port_assoc(base.PORT10, bgpvpn.BGPVPN_L3, base.NETWORK1, route_prefixes=[("40.0.0.0/24", None)], advertise_fixed_ips=False, **base.BGPVPN_L3_RT100) instance_id_base = 'ipvpn_portassoc_%s_prefix_' % port_assoc.id def check_build_cb(*args): # Verify build callback attachments local_port = self._get_expected_local_port( bbgp_const.IPVPN, base.NETWORK1['id'], base.NETWORK1['segmentation_id'], base.PORT10['id']) self.assertDictEqual( dict( network_id=base.NETWORK1['id'], ipvpn=[dict( description=mock.ANY, instance_description=mock.ANY, vpn_instance_id=(instance_id_base + '40_0_0_0_24'), direction='to-port', ip_address="40.0.0.0/24", advertise_subnet=True, local_pref=None, mac_address=base.PORT10['mac_address'], gateway_ip=base.NETWORK1['gateway_ip'], local_port=local_port['local_port'], **self._expand_rts(base.BGPVPN_L3_RT100) )] ), self.agent_ext.build_bgpvpn_attach_info(base.PORT10['id']) ) self.mocked_bagpipe_agent.do_port_plug.side_effect = check_build_cb # trigger port assoc, check fixed IP not advertised (check_build_db) self._port_assoc_notif(port_assoc, rpc_events.CREATED) self.mocked_bagpipe_agent.do_port_plug.assert_has_calls( [mock.call(base.PORT10['id'])] ) # tear down the port self.agent_ext.delete_port(None, self._port_data(base.PORT10, delete=True)) # check that a detach is produced for the prefix route only local_port_l3 = self._get_expected_local_port( bbgp_const.IPVPN, base.NETWORK1['id'], base.NETWORK1['segmentation_id'], base.PORT10['id'], detach=True) detach_info = { 'network_id': base.NETWORK1['id'], bbgp_const.IPVPN: { 'vpn_instance_id': (instance_id_base + '40_0_0_0_24'), 'ip_address': "40.0.0.0/24", 'mac_address': base.PORT10['mac_address'], 'local_port': local_port_l3['local_port'] } } self.mocked_bagpipe_agent.do_port_plug_refresh_many.assert_has_calls( [mock.call(base.PORT10['id'], [detach_info])] ) def test_port_assoc_bgpvpn_routes(self): from_bgpvpns = [ (self._fake_bgpvpn(bgpvpn.BGPVPN_L3, import_targets=['64512:96']), 44), (self._fake_bgpvpn(bgpvpn.BGPVPN_L3, import_targets=['64512:97']), 55) ] port_assoc = self._fake_port_assoc( base.PORT10, bgpvpn.BGPVPN_L3, base.NETWORK1, bgpvpn_routes=from_bgpvpns, export_targets=['64512:98'], import_targets=['64512:99'] ) self.mocked_rpc_pull.side_effect = [[], [], [port_assoc]] def check_build_cb(*args): # Verify build callback attachments local_port = self._get_expected_local_port( bbgp_const.IPVPN, base.NETWORK1['id'], base.NETWORK1['segmentation_id'], base.PORT10['id']) self.assertDictEqual( dict( network_id=base.NETWORK1['id'], ipvpn=[ dict( description=mock.ANY, instance_description=mock.ANY, ip_address=base.PORT10['ip_address'], mac_address=base.PORT10['mac_address'], gateway_ip=base.NETWORK1['gateway_ip'], local_port=local_port['local_port'], export_rt={'64512:98'}, import_rt={'64512:99'}, ), dict( description=mock.ANY, instance_description=mock.ANY, vpn_instance_id=StringContains( "ipvpn_portassoc", port_assoc.id, from_bgpvpns[0][0].id), direction='to-port', ip_address=base.PORT10['ip_address'], mac_address=base.PORT10['mac_address'], gateway_ip=base.NETWORK1['gateway_ip'], local_port=local_port['local_port'], readvertise={ 'from_rt': {'64512:96'}, 'to_rt': {'64512:98'} }, import_rt={'64512:99'}, export_rt={'64512:98'}, local_pref=44, ), dict( description=mock.ANY, instance_description=mock.ANY, vpn_instance_id=StringContains( "ipvpn_portassoc", port_assoc.id, from_bgpvpns[1][0].id), direction='to-port', ip_address=base.PORT10['ip_address'], mac_address=base.PORT10['mac_address'], gateway_ip=base.NETWORK1['gateway_ip'], local_port=local_port['local_port'], readvertise={ 'from_rt': {'64512:97'}, 'to_rt': {'64512:98'} }, import_rt={'64512:99'}, export_rt={'64512:98'}, local_pref=55, ), ] ), self.agent_ext.build_bgpvpn_attach_info(base.PORT10['id']) ) self.mocked_bagpipe_agent.do_port_plug.side_effect = check_build_cb self.agent_ext.handle_port(None, self._port_data(base.PORT10)) self.mocked_bagpipe_agent.do_port_plug.assert_has_calls( [mock.call(base.PORT10['id'])] ) self._check_network_info(base.NETWORK1['id'], 1) # remove one of the BGPVPN leaking routes # (keep only the second one) from_bgpvpns_bis = [ (self._fake_bgpvpn(bgpvpn.BGPVPN_L3, id=from_bgpvpns[1][0].id, import_targets=['64512:97']), 55) ] new_port_assoc = self._fake_port_assoc(base.PORT10, bgpvpn.BGPVPN_L3, base.NETWORK1, id=port_assoc.id, bgpvpn_routes=from_bgpvpns_bis, export_targets=['64512:98'], import_targets=['64512:99'] ) def check_build_cb(*args): # Verify build callback attachments local_port = self._get_expected_local_port( bbgp_const.IPVPN, base.NETWORK1['id'], base.NETWORK1['segmentation_id'], base.PORT10['id']) self.assertDictEqual( dict( network_id=base.NETWORK1['id'], ipvpn=[ dict( description=mock.ANY, instance_description=mock.ANY, ip_address=base.PORT10['ip_address'], mac_address=base.PORT10['mac_address'], gateway_ip=base.NETWORK1['gateway_ip'], local_port=local_port['local_port'], export_rt={'64512:98'}, import_rt={'64512:99'}, ), dict( description=mock.ANY, instance_description=mock.ANY, vpn_instance_id=StringContains( "ipvpn_portassoc", port_assoc.id, from_bgpvpns[1][0].id), direction='to-port', ip_address=base.PORT10['ip_address'], mac_address=base.PORT10['mac_address'], gateway_ip=base.NETWORK1['gateway_ip'], local_port=local_port['local_port'], readvertise={ 'from_rt': {'64512:97'}, 'to_rt': {'64512:98'} }, import_rt={'64512:99'}, export_rt={'64512:98'}, local_pref=55, ), ] ), self.agent_ext.build_bgpvpn_attach_info(base.PORT10['id']) ) self.mocked_bagpipe_agent.do_port_plug.reset_mock() self.mocked_bagpipe_agent.do_port_plug.side_effect = check_build_cb self.mocked_bagpipe_agent.do_port_plug_refresh_many.reset_mock() self._port_assoc_notif(new_port_assoc, rpc_events.UPDATED) self.mocked_bagpipe_agent.do_port_plug.assert_not_called() # check that a detach is produced for the removed prefix route local_port_l3 = self._get_expected_local_port( bbgp_const.IPVPN, base.NETWORK1['id'], base.NETWORK1['segmentation_id'], base.PORT10['id'], detach=True) detach_info = { 'network_id': base.NETWORK1['id'], bbgp_const.IPVPN: { 'vpn_instance_id': StringContains("ipvpn_portassoc", port_assoc.id, from_bgpvpns[0][0].id), 'ip_address': base.PORT10['ip_address'], 'mac_address': base.PORT10['mac_address'], 'local_port': local_port_l3['local_port'] } } self.mocked_bagpipe_agent.do_port_plug_refresh_many.assert_has_calls( [mock.call(base.PORT10['id'], [detach_info])] ) # now remove the port association def check_build_cb(*args): # Verify build callback attachments self.assertDictEqual( {}, self.agent_ext.build_bgpvpn_attach_info(base.PORT10['id']) ) self.mocked_bagpipe_agent.do_port_plug.reset_mock() self.mocked_bagpipe_agent.do_port_plug.side_effect = check_build_cb self.mocked_bagpipe_agent.do_port_plug_refresh_many.reset_mock() self._port_assoc_notif(new_port_assoc, rpc_events.DELETED) self.mocked_bagpipe_agent.do_port_plug.assert_not_called() expected_vpn_instance_id = ( 'ipvpn_portassoc_{}_bgpvpn_{}'.format(port_assoc.id, from_bgpvpns[1][0].id) ) self.mocked_bagpipe_agent.do_port_plug_refresh_many.assert_has_calls( [mock.call( base.PORT10['id'], UnorderedList([ {'network_id': base.NETWORK1['id'], bbgp_const.IPVPN: { 'ip_address': base.PORT10['ip_address'], 'mac_address': base.PORT10['mac_address'], 'local_port': local_port_l3['local_port'] }}, {'network_id': base.NETWORK1['id'], bbgp_const.IPVPN: { 'vpn_instance_id': expected_vpn_instance_id, 'ip_address': base.PORT10['ip_address'], 'mac_address': base.PORT10['mac_address'], 'local_port': local_port_l3['local_port'] }}, ])) ] ) def test_net_assoc_l2_bgpvpn_vni(self): self.agent_ext.handle_port(None, self._port_data(base.PORT10)) net_assoc = self._fake_net_assoc(base.NETWORK1, bgpvpn.BGPVPN_L2, **base.BGPVPN_L2_RT10) net_assoc.bgpvpn.vni = 4242 def check_build_cb(*args): # Verify build callback attachments local_port = self._get_expected_local_port( bbgp_const.EVPN, base.NETWORK1['id'], base.NETWORK1['segmentation_id'], base.PORT10['id']) self.assertDictEqual( dict( network_id=base.NETWORK1['id'], evpn=[dict( description=mock.ANY, instance_description=mock.ANY, ip_address=base.PORT10['ip_address'], mac_address=base.PORT10['mac_address'], gateway_ip=base.NETWORK1['gateway_ip'], vni=4242, **dict(list(local_port.items()) + list(self._expand_rts( base.BGPVPN_L2_RT10).items())) )] ), self.agent_ext.build_bgpvpn_attach_info(base.PORT10['id']) ) self.mocked_bagpipe_agent.do_port_plug.side_effect = check_build_cb self._net_assoc_notif(net_assoc, rpc_events.UPDATED) self.mocked_bagpipe_agent.do_port_plug.assert_has_calls( [mock.call(base.PORT10['id'])] ) self._check_network_info(base.NETWORK1['id'], 1) def test_net_assoc_port_admin_state_down_up_down(self): self.agent_ext.handle_port(None, self._port_data(base.PORT10, admin_state_up=False)) self.assertEqual(self.mocked_bagpipe_agent.do_port_plug.call_count, 0) net_assoc = self._fake_net_assoc(base.NETWORK1, bgpvpn.BGPVPN_L3, **base.BGPVPN_L3_RT100) # Verify build callback attachments def check_build_cb(*args): self.assertDictEqual( {}, self.agent_ext.build_bgpvpn_attach_info(base.PORT10['id']) ) # we need to check what build_bgpvpn_attach_info returns, at the # precise time when do_port_plug is called self.mocked_bagpipe_agent.do_port_plug.side_effect = check_build_cb self._net_assoc_notif(net_assoc, rpc_events.CREATED) # test transition to admin_state_up = True self.mocked_bagpipe_agent.do_port_plug.reset_mock() # Verify build callback attachments def check_build_cb_2(*args): self.assertNotEqual( 0, len(self.agent_ext.build_bgpvpn_attach_info(base.PORT10['id'])) ) self.mocked_bagpipe_agent.do_port_plug.side_effect = check_build_cb_2 self.agent_ext.handle_port(None, self._port_data(base.PORT10, admin_state_up=True)) # test transition to admin_state_up = False self.mocked_bagpipe_agent.do_port_plug.reset_mock() def check_build_cb_3(*args): self.assertDictEqual( {}, self.agent_ext.build_bgpvpn_attach_info(base.PORT10['id']) ) self.mocked_bagpipe_agent.do_port_plug.side_effect = check_build_cb_3 self.agent_ext.handle_port(None, self._port_data( base.PORT10, admin_state_up=False)) self.mocked_bagpipe_agent.do_port_plug_refresh_many.\ assert_called_once_with(base.PORT10['id'], mock.ANY) def test_net_assoc_port_admin_state_down_delete(self): self.agent_ext.handle_port(None, self._port_data(base.PORT10, admin_state_up=False)) self.assertEqual(self.mocked_bagpipe_agent.do_port_plug.call_count, 0) self._fake_net_assoc(base.NETWORK1, bgpvpn.BGPVPN_L3, **base.BGPVPN_L3_RT100) # test delete_port when port in admin_state_up = False self.agent_ext.delete_port(None, self._port_data(base.PORT10, delete=True)) self.mocked_bagpipe_agent.do_port_plug_refresh_many.assert_not_called() class TestOVSAgentExtension(base.BaseTestOVSAgentExtension, TestBgpvpnAgentExtensionMixin): agent_extension_class = bagpipe_agt_ext.BagpipeBgpvpnAgentExtension def setUp(self): base.BaseTestOVSAgentExtension.setUp(self) TestBgpvpnAgentExtensionMixin.setUp(self) # test what happened during initialize() self.tun_br.add_patch_port.assert_called_once() self.int_br.add_patch_port.assert_called_once() self.agent_ext.mpls_br.set_secure_mode.assert_called_once() self.assertEqual(self.agent_ext.mpls_br.add_patch_port.call_count, 2) self.tun_br.add_flow.assert_has_calls( [ mock.call(table=ovs_agt_constants.PATCH_LV_TO_TUN, priority=2, dl_src=bgpvpn_const.FALLBACK_SRC_MAC, dl_dst=mock.ANY, actions=mock.ANY), mock.call(table=ovs_agt_constants.PATCH_LV_TO_TUN, priority=2, dl_src=bgpvpn_const.FALLBACK_SRC_MAC, dl_dst=mock.ANY, actions=mock.ANY), mock.call(in_port=base.PATCH_TUN_TO_MPLS, actions="output:%d" % base.PATCH_TUN_TO_INT) ], any_order=True, ) self.int_br.add_flow.assert_called_once_with( table=ovs_agt_constants.ACCEPTED_EGRESS_TRAFFIC_NORMAL_TABLE, priority=3, dl_src="00:00:5e:2a:10:00", actions="NORMAL", ) self.int_br.add_flow.reset_mock() self.tun_br.add_flow.reset_mock() # Test fallback and ARP gateway voodoo def test_fallback(self): GW_MAC = 'aa:bb:cc:dd:ee:ff' with mock.patch.object(self.agent_ext.int_br, 'get_vif_port_by_id', side_effect=[self.DUMMY_VIF10, self.DUMMY_VIF11]): super().test_net_assoc_already_plugged_ports() net_assoc = self._fake_net_assoc(base.NETWORK1, bgpvpn.BGPVPN_L3, gateway_mac=GW_MAC, **base.BGPVPN_L3_RT100) local_port = self._get_expected_local_port( bbgp_const.IPVPN, base.NETWORK1['id'], base.NETWORK1['segmentation_id'], base.PORT10['id']) def check_build_cb(*args): self.assertDictEqual( dict( network_id=base.NETWORK1['id'], ipvpn=[dict( description=mock.ANY, instance_description=mock.ANY, ip_address=base.PORT10['ip_address'], mac_address=base.PORT10['mac_address'], gateway_ip=base.NETWORK1['gateway_ip'], fallback={'dst_mac': GW_MAC, 'ovs_port_number': base.PATCH_MPLS_TO_INT, 'src_mac': '00:00:5e:2a:10:00'}, local_port=local_port['local_port'], **self._expand_rts(base.BGPVPN_L3_RT100) )] ), self.agent_ext.build_bgpvpn_attach_info(base.PORT10['id']) ) self.mocked_bagpipe_agent.do_port_plug.side_effect = check_build_cb self._net_assoc_notif(net_assoc, rpc_events.UPDATED) def test_gateway_redirection(self): GW_MAC = 'aa:bb:cc:dd:ee:ff' vlan = base.LOCAL_VLAN_MAP[base.NETWORK1['id']][0] with mock.patch.object(self.agent_ext.int_br, 'get_vif_port_by_id', side_effect=[self.DUMMY_VIF10, self.DUMMY_VIF11]), \ mock.patch.object(self.agent_ext.int_br, 'add_flow') as int_add_flow, \ mock.patch.object(self.agent_ext.tun_br, 'add_flow') as tun_add_flow, \ mock.patch.object(self.agent_ext.tun_br, 'delete_flows') as tun_delete_flows, \ mock.patch.object(self.agent_ext.int_br, 'delete_flows') as int_delete_flows: net_assoc_0 = super().test_net_assoc_already_plugged_ports() int_add_flow.assert_not_called() tun_add_flow.assert_has_calls([ mock.call( table=ovs_agt_constants.ARP_RESPONDER, priority=2, dl_vlan=vlan, proto='arp', arp_op=0x01, arp_tpa=base.NETWORK1['gateway_ip'], actions=StringContains("5e004364"), ), mock.call( in_port=base.PATCH_TUN_TO_INT, dl_dst="00:00:5e:00:43:64", actions="output:%s" % base.PATCH_TUN_TO_MPLS, dl_vlan=vlan, priority=mock.ANY, table=mock.ANY )], any_order=True, ) int_add_flow.reset_mock() tun_add_flow.reset_mock() tun_delete_flows.reset_mock() int_delete_flows.reset_mock() net_assoc = self._fake_net_assoc(base.NETWORK1, bgpvpn.BGPVPN_L3, id=net_assoc_0.id, gateway_mac=GW_MAC, **base.BGPVPN_L3_RT100) vlan = base.LOCAL_VLAN_MAP[base.NETWORK1['id']][0] self.mocked_bagpipe_agent.do_port_plug.side_effect = None self._net_assoc_notif(net_assoc, rpc_events.UPDATED) # we now have a router will a real GW MAC tun_delete_flows.assert_called_with( strict=True, table=ovs_agt_constants.ARP_RESPONDER, priority=2, dl_vlan=vlan, proto='arp', arp_op=0x01, arp_tpa=base.NETWORK1['gateway_ip']) # check that traffic to gw is sent to br-mpls tun_add_flow.assert_has_calls( [ mock.call(in_port=base.PATCH_TUN_TO_INT, dl_dst=GW_MAC, actions="output:%s" % base.PATCH_TUN_TO_MPLS, dl_vlan=vlan, priority=mock.ANY, table=mock.ANY), mock.call(in_port=base.PATCH_TUN_TO_INT, dl_dst="00:00:5e:00:43:64", actions="output:%s" % base.PATCH_TUN_TO_MPLS, dl_vlan=vlan, priority=mock.ANY, table=mock.ANY) ], any_order=True ) int_add_flow.assert_called_once_with( table=ovs_agt_constants.ACCEPTED_EGRESS_TRAFFIC_NORMAL_TABLE, priority=2, reg6=vlan, dl_dst=GW_MAC, actions="push_vlan:0x8100,mod_vlan_vid:%d,output:%s" % ( vlan, base.PATCH_INT_TO_TUN) ) int_add_flow.reset_mock() tun_add_flow.reset_mock() tun_delete_flows.reset_mock() int_delete_flows.reset_mock() # stop the redirection when association is cleared self._net_assoc_notif(net_assoc, rpc_events.DELETED) # ARP responder deletion tun_delete_flows.assert_has_calls( [ mock.call( strict=True, table=ovs_agt_constants.PATCH_LV_TO_TUN, priority=1, in_port=base.PATCH_TUN_TO_INT, dl_vlan=vlan ), mock.call( strict=True, table=ovs_agt_constants.ARP_RESPONDER, priority=2, dl_vlan=vlan, proto='arp', arp_op=0x01, arp_tpa=base.NETWORK1['gateway_ip'],) ], any_order=True ) int_delete_flows.assert_called_once_with( table=ovs_agt_constants.ACCEPTED_EGRESS_TRAFFIC_NORMAL_TABLE, reg6=vlan) def test_gateway_redirection_ovs_restart(self): GW_MAC = 'aa:bb:cc:dd:ee:ff' with mock.patch.object(self.agent_ext.int_br, 'get_vif_port_by_id', side_effect=[self.DUMMY_VIF10, self.DUMMY_VIF11]), \ mock.patch.object(self.agent_ext.int_br, 'add_flow') as add_flow: self.agent_ext.handle_port(None, self._port_data(base.PORT10)) net_assoc = self._fake_net_assoc(base.NETWORK1, bgpvpn.BGPVPN_L3, gateway_mac=GW_MAC, **base.BGPVPN_L3_RT100) self._net_assoc_notif(net_assoc, rpc_events.UPDATED) add_flow.assert_called() add_flow.reset_mock() with mock.patch.object(self.agent_ext, '_setup_ovs_bridge') as \ mock_setup_mpls_br: self.agent_ext.ovs_restarted(None, None, None) mock_setup_mpls_br.assert_called() add_flow.assert_called() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9423053 networking_bagpipe-22.0.0/networking_bagpipe/tests/unit/agent/common/0000775000175000017500000000000000000000000026061 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/unit/agent/common/__init__.py0000664000175000017500000000000000000000000030160 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/unit/agent/common/constants.py0000664000175000017500000000254300000000000030453 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Orange. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import uuidutils EVPN_RT1 = {'import_rt': ['EVPN:1'], 'export_rt': ['EVPN:1']} EVPN_RT2 = {'import_rt': ['EVPN:2'], 'export_rt': ['EVPN:2']} IPVPN_RT100 = {'import_rt': ['IPVPN:100'], 'export_rt': ['IPVPN:100']} IPVPN_RT200 = {'import_rt': ['IPVPN:200'], 'export_rt': ['IPVPN:200']} NETWORK_INFO1 = {'network_id': uuidutils.generate_uuid(), 'gateway_ip': '10.0.0.1' } PORT_INFO1 = {'mac_address': '00:00:de:ad:be:ef', 'ip_address': '10.0.0.2', 'local_port': {'linuxif': 'port1'}, } UPDATED_LOCAL_PORT1 = {'linuxif': 'updated_port1'} GW_MAC_PORT1 = '00:00:ca:fe:ba:be' STATIC_ROUTE1 = '1.1.1.1/24' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/unit/agent/test_bagpipe_bgp_agent.py0000664000175000017500000004472700000000000031635 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Orange. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import testtools from unittest import mock from networking_bagpipe.agent import bagpipe_bgp_agent as agent from networking_bagpipe.bagpipe_bgp import constants as bbgp_const from networking_bagpipe.tests.unit.agent.common \ import constants as const from neutron.tests import base def rts_as_set(rts): return {k: set(rt_list) for k, rt_list in rts.items()} def _attachments_gen(vpn_type, network, port, rts): attachment = { 'network_id': network['network_id'], vpn_type: [dict( gateway_ip=network['gateway_ip'], **port )] } attachment[vpn_type][0].update(**rts_as_set(rts)) return attachment class TestBaGPipeBGPAgentSingleService(base.BaseTestCase): def setUp(self): super().setUp() self.agent = agent.BaGPipeBGPAgent('ovs agent') self.agent._send_attach_local_port = mock.Mock() self.agent._send_detach_local_port = mock.Mock() self.service1 = mock.Mock(name='SERVICE1') self.agent.register_build_callback(self.service1.name, self.service1._build_port_info) def _test_port_plug(self, vpn_type, rts): attachments = _attachments_gen(vpn_type, const.NETWORK_INFO1, const.PORT_INFO1, rts) self.service1._build_port_info.return_value = attachments self.agent.do_port_plug(None) self.agent._send_attach_local_port.assert_has_calls([ mock.call(dict( vpn_instance_id='{}_{}'.format( vpn_type, const.NETWORK_INFO1['network_id']), vpn_type=vpn_type, ip_address=const.PORT_INFO1['ip_address'], mac_address=const.PORT_INFO1['mac_address'], gateway_ip=const.NETWORK_INFO1['gateway_ip'], local_port=const.PORT_INFO1['local_port'], **rts_as_set(rts) )) ]) def test_evpn_port_plug(self): self._test_port_plug(bbgp_const.EVPN, const.EVPN_RT1) def test_ipvpn_port_plug(self): self._test_port_plug(bbgp_const.IPVPN, const.IPVPN_RT100) def test_evpn2ipvpn_port_plug(self): attachments = _attachments_gen('evpn', const.NETWORK_INFO1, const.PORT_INFO1, const.EVPN_RT1) attachments.update(_attachments_gen('ipvpn', const.NETWORK_INFO1, const.PORT_INFO1, const.IPVPN_RT100)) self.service1._build_port_info.return_value = attachments self.agent.do_port_plug(None) self.agent._send_attach_local_port.assert_has_calls([ mock.call(dict( vpn_instance_id='{}_{}'.format( bbgp_const.EVPN, const.NETWORK_INFO1['network_id']), vpn_type=bbgp_const.EVPN, ip_address=const.PORT_INFO1['ip_address'], mac_address=const.PORT_INFO1['mac_address'], gateway_ip=const.NETWORK_INFO1['gateway_ip'], local_port=const.PORT_INFO1['local_port'], **rts_as_set(const.EVPN_RT1) )), mock.call(dict( vpn_instance_id='{}_{}'.format( bbgp_const.IPVPN, const.NETWORK_INFO1['network_id']), vpn_type=bbgp_const.IPVPN, ip_address=const.PORT_INFO1['ip_address'], mac_address=const.PORT_INFO1['mac_address'], gateway_ip=const.NETWORK_INFO1['gateway_ip'], local_port=const.PORT_INFO1['local_port'], **rts_as_set(const.IPVPN_RT100) )) ]) def test_epvpn_port_plug_updated_local_port(self): attachments = _attachments_gen('evpn', const.NETWORK_INFO1, const.PORT_INFO1, const.EVPN_RT1) attachments['evpn'][0]['local_port'] = const.UPDATED_LOCAL_PORT1 self.service1._build_port_info.return_value = attachments self.agent.do_port_plug(None) self.agent._send_attach_local_port.assert_has_calls([ mock.call(dict( vpn_instance_id='{}_{}'.format( bbgp_const.EVPN, const.NETWORK_INFO1['network_id']), vpn_type=bbgp_const.EVPN, ip_address=const.PORT_INFO1['ip_address'], mac_address=const.PORT_INFO1['mac_address'], gateway_ip=const.NETWORK_INFO1['gateway_ip'], local_port=const.UPDATED_LOCAL_PORT1, **rts_as_set(const.EVPN_RT1) )) ]) def _test_port_plug_refresh_without_detach(self, vpn_type, rts): attachments = _attachments_gen(vpn_type, const.NETWORK_INFO1, const.PORT_INFO1, rts) detach_info1 = { 'network_id': const.NETWORK_INFO1['network_id'], vpn_type: dict(**const.PORT_INFO1) } self.service1._build_port_info.return_value = attachments self.agent.do_port_plug_refresh(None, detach_info1) self.agent._send_attach_local_port.assert_has_calls([ mock.call(dict( vpn_instance_id='{}_{}'.format( vpn_type, const.NETWORK_INFO1['network_id']), vpn_type=vpn_type, ip_address=const.PORT_INFO1['ip_address'], mac_address=const.PORT_INFO1['mac_address'], gateway_ip=const.NETWORK_INFO1['gateway_ip'], local_port=const.PORT_INFO1['local_port'], **rts_as_set(rts) )) ]) self.agent._send_detach_local_port.assert_not_called() @testtools.skip("skip until bug 1744344 is resolved") def test_evpn_port_plug_refresh_without_detach(self): self._test_port_plug_refresh_without_detach(bbgp_const.EVPN, const.EVPN_RT1) @testtools.skip("skip until bug 1744344 is resolved") def test_ipvpn_port_plug_refresh_without_detach(self): self._test_port_plug_refresh_without_detach(bbgp_const.IPVPN, const.IPVPN_RT100) def _test_port_plug_refresh_with_detach(self, vpn_type, rts): attachments = _attachments_gen(vpn_type, const.NETWORK_INFO1, const.PORT_INFO1, rts) detach_info1 = { 'network_id': const.NETWORK_INFO1['network_id'], vpn_type: dict(**const.PORT_INFO1) } self.service1._build_port_info.return_value = attachments self.agent.do_port_plug_refresh(None, detach_info1) self.agent._send_attach_local_port.assert_not_called() self.agent._send_detach_local_port.assert_has_calls([ mock.call(dict( vpn_instance_id='{}_{}'.format( vpn_type, const.NETWORK_INFO1['network_id']), vpn_type=vpn_type, ip_address=const.PORT_INFO1['ip_address'], mac_address=const.PORT_INFO1['mac_address'], local_port=const.PORT_INFO1['local_port'] )) ]) @testtools.skip("skip until bug 1744344 is resolved") def test_evpn_port_plug_refresh_with_detach(self): self._test_port_plug_refresh_with_detach(bbgp_const.EVPN, const.EVPN_RT1) @testtools.skip("skip until bug 1744344 is resolved") def test_ipvpn_port_plug_refresh_with_detach(self): self._test_port_plug_refresh_with_detach(bbgp_const.IPVPN, const.IPVPN_RT100) def test_evpn2ipvpn_port_plug_refresh_with_detach(self): attachments = _attachments_gen('evpn', const.NETWORK_INFO1, const.PORT_INFO1, const.EVPN_RT1) detach_info1 = { 'network_id': const.NETWORK_INFO1['network_id'], 'ipvpn': dict(**const.PORT_INFO1) } self.service1._build_port_info.return_value = attachments self.agent.do_port_plug_refresh(None, detach_info1) self.agent._send_attach_local_port.assert_has_calls([ mock.call(dict( vpn_instance_id='{}_{}'.format( bbgp_const.EVPN, const.NETWORK_INFO1['network_id']), vpn_type=bbgp_const.EVPN, ip_address=const.PORT_INFO1['ip_address'], mac_address=const.PORT_INFO1['mac_address'], gateway_ip=const.NETWORK_INFO1['gateway_ip'], local_port=const.PORT_INFO1['local_port'], **rts_as_set(const.EVPN_RT1) )) ]) self.agent._send_detach_local_port.assert_has_calls([ mock.call(dict( vpn_instance_id='{}_{}'.format( bbgp_const.IPVPN, const.NETWORK_INFO1['network_id']), vpn_type=bbgp_const.IPVPN, ip_address=const.PORT_INFO1['ip_address'], mac_address=const.PORT_INFO1['mac_address'], local_port={ bbgp_const.EVPN: { 'id': '{}_{}'.format( bbgp_const.EVPN, const.NETWORK_INFO1['network_id']) } } )) ]) class TestBaGPipeBGPAgentMultipleServices(base.BaseTestCase): def setUp(self): super().setUp() self.agent = agent.BaGPipeBGPAgent('ovs agent') self.agent._send_attach_local_port = mock.Mock() self.agent._send_detach_local_port = mock.Mock() self.service1 = mock.Mock(name='SERVICE1') self.agent.register_build_callback(self.service1.name, self.service1._build_port_info) self.service2 = mock.Mock(name='SERVICE2') self.agent.register_build_callback(self.service2.name, self.service2._build_port_info) def _merge_rts(self, rt1, rt2): return {k: set(rt1[k] + rt2[k]) for k in rt1} def test_evpns_port_plug(self): attachments_1 = _attachments_gen('evpn', const.NETWORK_INFO1, const.PORT_INFO1, const.EVPN_RT1) self.service1._build_port_info.return_value = attachments_1 attachments_2 = _attachments_gen('evpn', const.NETWORK_INFO1, const.PORT_INFO1, const.EVPN_RT2) self.service2._build_port_info.return_value = attachments_2 self.agent.do_port_plug(None) self.agent._send_attach_local_port.assert_has_calls([ mock.call(dict( vpn_instance_id='{}_{}'.format( bbgp_const.EVPN, const.NETWORK_INFO1['network_id']), vpn_type=bbgp_const.EVPN, ip_address=const.PORT_INFO1['ip_address'], mac_address=const.PORT_INFO1['mac_address'], gateway_ip=const.NETWORK_INFO1['gateway_ip'], local_port=const.PORT_INFO1['local_port'], **self._merge_rts(const.EVPN_RT1, const.EVPN_RT2) )) ]) def test_ipvpns_port_plug(self): attachments_1 = _attachments_gen('ipvpn', const.NETWORK_INFO1, const.PORT_INFO1, const.IPVPN_RT100) self.service1._build_port_info.return_value = attachments_1 attachments_2 = _attachments_gen('ipvpn', const.NETWORK_INFO1, const.PORT_INFO1, const.IPVPN_RT200) self.service2._build_port_info.return_value = attachments_2 self.agent.do_port_plug(None) self.agent._send_attach_local_port.assert_has_calls([ mock.call(dict( vpn_instance_id='{}_{}'.format( bbgp_const.IPVPN, const.NETWORK_INFO1['network_id']), vpn_type=bbgp_const.IPVPN, ip_address=const.PORT_INFO1['ip_address'], mac_address=const.PORT_INFO1['mac_address'], gateway_ip=const.NETWORK_INFO1['gateway_ip'], local_port=const.PORT_INFO1['local_port'], **self._merge_rts(const.IPVPN_RT100, const.IPVPN_RT200) )) ]) def test_evpn2ipvpn_port_plug(self): attachments_1 = _attachments_gen('evpn', const.NETWORK_INFO1, const.PORT_INFO1, const.EVPN_RT1) self.service1._build_port_info.return_value = attachments_1 attachments_2 = _attachments_gen('ipvpn', const.NETWORK_INFO1, const.PORT_INFO1, const.IPVPN_RT100) self.service2._build_port_info.return_value = attachments_2 self.agent.do_port_plug(None) self.agent._send_attach_local_port.assert_has_calls([ mock.call(dict( vpn_instance_id='{}_{}'.format( bbgp_const.EVPN, const.NETWORK_INFO1['network_id']), vpn_type=bbgp_const.EVPN, ip_address=const.PORT_INFO1['ip_address'], mac_address=const.PORT_INFO1['mac_address'], gateway_ip=const.NETWORK_INFO1['gateway_ip'], local_port=const.PORT_INFO1['local_port'], **rts_as_set(const.EVPN_RT1) )), mock.call(dict( vpn_instance_id='{}_{}'.format( bbgp_const.IPVPN, const.NETWORK_INFO1['network_id']), vpn_type=bbgp_const.IPVPN, ip_address=const.PORT_INFO1['ip_address'], mac_address=const.PORT_INFO1['mac_address'], gateway_ip=const.NETWORK_INFO1['gateway_ip'], local_port=const.PORT_INFO1['local_port'], **rts_as_set(const.IPVPN_RT100) )) ]) def test_evpns_port_plug_refresh_without_detach(self): attachments_1 = _attachments_gen('evpn', const.NETWORK_INFO1, const.PORT_INFO1, const.EVPN_RT1) self.service1._build_port_info.return_value = attachments_1 self.service2._build_port_info.return_value = {} detach_info1 = { 'network_id': const.NETWORK_INFO1['network_id'], bbgp_const.EVPN: dict(**const.PORT_INFO1) } self.agent.do_port_plug_refresh(None, detach_info1) self.agent._send_attach_local_port.assert_has_calls([ mock.call(dict( vpn_instance_id='{}_{}'.format( bbgp_const.EVPN, const.NETWORK_INFO1['network_id']), vpn_type=bbgp_const.EVPN, ip_address=const.PORT_INFO1['ip_address'], mac_address=const.PORT_INFO1['mac_address'], gateway_ip=const.NETWORK_INFO1['gateway_ip'], local_port=const.PORT_INFO1['local_port'], **rts_as_set(const.EVPN_RT1) )) ]) def test_ipvpns_port_plug_refresh_without_detach(self): attachments_1 = _attachments_gen('ipvpn', const.NETWORK_INFO1, const.PORT_INFO1, const.IPVPN_RT100) self.service1._build_port_info.return_value = attachments_1 self.service2._build_port_info.return_value = {} detach_info1 = { 'network_id': const.NETWORK_INFO1['network_id'], bbgp_const.IPVPN: dict(**const.PORT_INFO1) } self.agent.do_port_plug_refresh(None, detach_info1) self.agent._send_attach_local_port.assert_has_calls([ mock.call(dict( vpn_instance_id='{}_{}'.format( bbgp_const.IPVPN, const.NETWORK_INFO1['network_id']), vpn_type=bbgp_const.IPVPN, ip_address=const.PORT_INFO1['ip_address'], mac_address=const.PORT_INFO1['mac_address'], gateway_ip=const.NETWORK_INFO1['gateway_ip'], local_port=const.PORT_INFO1['local_port'], **rts_as_set(const.IPVPN_RT100) )) ]) def test_evpn2ipvpn_port_plug_refresh_with_detach(self): attachments_1 = _attachments_gen('evpn', const.NETWORK_INFO1, const.PORT_INFO1, const.EVPN_RT1) self.service1._build_port_info.return_value = attachments_1 self.service2._build_port_info.return_value = {} detach_info1 = { 'network_id': const.NETWORK_INFO1['network_id'], bbgp_const.IPVPN: dict(**const.PORT_INFO1) } self.agent.do_port_plug_refresh(None, detach_info1) self.agent._send_attach_local_port.assert_has_calls([ mock.call(dict( vpn_instance_id='{}_{}'.format( bbgp_const.EVPN, const.NETWORK_INFO1['network_id']), vpn_type=bbgp_const.EVPN, ip_address=const.PORT_INFO1['ip_address'], mac_address=const.PORT_INFO1['mac_address'], gateway_ip=const.NETWORK_INFO1['gateway_ip'], local_port=const.PORT_INFO1['local_port'], **rts_as_set(const.EVPN_RT1) )) ]) self.agent._send_detach_local_port.assert_has_calls([ mock.call(dict( vpn_instance_id='{}_{}'.format( bbgp_const.IPVPN, const.NETWORK_INFO1['network_id']), vpn_type=bbgp_const.IPVPN, ip_address=const.PORT_INFO1['ip_address'], mac_address=const.PORT_INFO1['mac_address'], local_port={ bbgp_const.EVPN: { 'id': '{}_{}'.format(bbgp_const.EVPN, const.NETWORK_INFO1['network_id']) } } )) ]) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9463053 networking_bagpipe-22.0.0/networking_bagpipe/tests/unit/bagpipe_bgp/0000775000175000017500000000000000000000000025732 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/unit/bagpipe_bgp/__init__.py0000664000175000017500000000000000000000000030031 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/unit/bagpipe_bgp/base.py0000664000175000017500000001234000000000000027216 0ustar00zuulzuul00000000000000# Copyright 2014 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging as python_logging import time from oslo_config import fixture as config_fixture from oslo_log import log as logging import testtools from networking_bagpipe.bagpipe_bgp.common import config from networking_bagpipe.bagpipe_bgp import engine from networking_bagpipe.bagpipe_bgp.engine import exa from networking_bagpipe.bagpipe_bgp.engine import exabgp_peer_worker exabgp_peer_worker.setup_exabgp_env() WAIT_TIME = 0.05 RT1 = exa.RouteTarget(64512, 10) RT2 = exa.RouteTarget(64512, 20) RT3 = exa.RouteTarget(64512, 30) RT4 = exa.RouteTarget(64512, 40) RT5 = exa.RouteTarget(64512, 50) def _rt_to_string(rt): assert isinstance(rt, exa.RouteTarget) return "{}:{}".format(rt.asn, rt.number) class TestNLRI: def __init__(self, desc): self.desc = desc self.action = None self.afi = exa.AFI(exa.AFI.ipv4) self.safi = exa.SAFI(exa.SAFI.mpls_vpn) def __repr__(self): return self.desc def __eq__(self, other): return self.desc == other.desc def __hash__(self): return hash(self.desc) NLRI1 = TestNLRI("NLRI1") NLRI2 = TestNLRI("NLRI2") NH1 = "1.1.1.1" NH2 = "2.2.2.2" NH3 = "3.3.3.3" NBR = "NBR" BRR = "BRR" python_logging.basicConfig(level=logging.DEBUG, filename="bagpipe-bgp-testsuite.log", format="%(asctime)s %(threadName)-30s %(name)-30s " "%(levelname)-8s %(message)s") LOG = logging.getLogger() class TestCase(testtools.TestCase): def setUp(self): super().setUp() cfg_fixture = self.useFixture(config_fixture.Config()) cfg_fixture.register_opts(config.bgp_opts, "BGP") cfg_fixture.config(group='BGP', local_address='11.22.33.44', my_as=64513 ) class FakeNLRI: def __init__(self, nlri_desc, afi=exa.AFI.ipv4, safi=exa.SAFI.mpls_vpn): self.nlri = nlri_desc self.afi = afi self.safi = safi def __repr__(self): return "FakeNLRI %s (%d:%d)" % (self.nlri, self.afi, self.safi) class BaseTestBagPipeBGP: def set_event_target_worker(self, worker): self.event_target_worker = worker def _fake_nlri(self, fake_nlri_desc, **kwargs): return FakeNLRI(fake_nlri_desc, **kwargs) def _new_route_event(self, event_type, nlri, rts, source, nh, lp=0, replaced_route_entry=None, afi=exa.AFI(exa.AFI.ipv4), safi=exa.SAFI(exa.SAFI.mpls_vpn), **kwargs): attributes = exa.Attributes() attributes.add(exa.NextHop(nh)) attributes.add(exa.LocalPreference(lp)) if 'rtrecords' in kwargs: ecoms = exa.ExtendedCommunities() ecoms.communities += kwargs['rtrecords'] attributes.add(ecoms) route_event = engine.RouteEvent(event_type, engine.RouteEntry(nlri, rts, attributes, source), source) route_event.set_replaced_route(replaced_route_entry) LOG.info("*** Emitting event to %s: %s", self.event_target_worker, route_event) self.event_target_worker._on_event(route_event) return route_event def _new_flow_event(self, event_type, nlri, to_rts, attract_rts, source, afi=exa.AFI(exa.AFI.ipv4), safi=exa.SAFI(exa.SAFI.flow_vpn), **kwargs): attributes = exa.Attributes() ecommunities = exa.ExtendedCommunities() ecommunities.communities.append( exa.TrafficRedirect(exa.ASN(int(to_rts[0].asn)), int(to_rts[0].number)) ) attributes.add(ecommunities) flow_event = engine.RouteEvent(event_type, engine.RouteEntry(nlri, attract_rts, attributes, source), source) self.event_target_worker._on_event(flow_event) return flow_event def _revert_event(self, event): if event.type == engine.RouteEvent.ADVERTISE: type = engine.RouteEvent.WITHDRAW else: # WITHDRAW type = engine.RouteEvent.ADVERTISE route_event = engine.RouteEvent(type, event.route_entry, event.source) self.event_target_worker._on_event(route_event) def _wait(self): time.sleep(WAIT_TIME) def _append_call(self, obj): LOG.info("****** %s ******", obj) self._calls.append(obj) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9463053 networking_bagpipe-22.0.0/networking_bagpipe/tests/unit/bagpipe_bgp/evpn/0000775000175000017500000000000000000000000026702 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/unit/bagpipe_bgp/evpn/__init__.py0000664000175000017500000000000000000000000031001 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/unit/bagpipe_bgp/evpn/test_ovs.py0000664000175000017500000001662600000000000031135 0ustar00zuulzuul00000000000000# Copyright 2018 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from networking_bagpipe.bagpipe_bgp.common import dataplane_utils from networking_bagpipe.bagpipe_bgp.vpn import dataplane_drivers as dp_drivers from networking_bagpipe.bagpipe_bgp.vpn.evpn import ovs from networking_bagpipe.tests.unit.bagpipe_bgp import base as t from neutron.agent.common import ovs_lib from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native import \ br_tun from neutron_lib.plugins.ml2 import ovs_constants as ovs_const LOCAL_IP = "1.2.3.4" MAC1 = "01:00:de:ad:be:ef" MAC2 = "01:00:fe:ed:f0:0d" class TestTunnelManager(t.TestCase): def setUp(self): super().setUp() self.bridge = mock.Mock(spec=br_tun.OVSTunnelBridge) self.manager = ovs.TunnelManager(self.bridge, LOCAL_IP) def test_get_tunnel(self): t1, _ = self.manager.get_object("2.2.2.2", "A") self.bridge.add_tunnel_port.assert_called_once_with(mock.ANY, "2.2.2.2", LOCAL_IP, mock.ANY) self.bridge.setup_tunnel_port.assert_called_once_with(mock.ANY, t1) self.bridge.add_tunnel_port.reset_mock() self.bridge.setup_tunnel_port.reset_mock() t2, _ = self.manager.get_object("2.2.2.2", "B") self.bridge.add_tunnel_port.assert_not_called() self.bridge.setup_tunnel_port.assert_not_called() self.assertEqual(t1, t2) self.bridge.add_tunnel_port.reset_mock() self.bridge.setup_tunnel_port.reset_mock() t3, _ = self.manager.get_object("3.3.3.3", "A") self.bridge.add_tunnel_port.assert_called_once() self.bridge.setup_tunnel_port.assert_called_once() self.assertEqual(t3, t1) self.assertTrue(len(self.manager.infos())) def test_free_object(self): t1, _ = self.manager.get_object("2.2.2.2", "A") self.manager.get_object("2.2.2.2", "B") t2, _ = self.manager.get_object("3.3.3.3", "A") self.bridge.add_tunnel_port.reset_mock() self.bridge.delete_port.reset_mock() self.bridge.setup_tunnel_port.reset_mock() self.manager.free_object("2.2.2.2", "A") self.bridge.delete_port.assert_not_called() t1bis = self.manager.find_object("2.2.2.2") self.assertEqual(t1bis, t1) self.bridge.add_tunnel_port.reset_mock() self.bridge.delete_port.reset_mock() self.bridge.setup_tunnel_port.reset_mock() self.manager.free_object("2.2.2.2", "B") self.bridge.delete_port.assert_called_once_with(t1) self.bridge.add_tunnel_port.reset_mock() self.bridge.delete_port.reset_mock() self.bridge.setup_tunnel_port.reset_mock() self.manager.free_object("3.3.3.3", "A") self.bridge.delete_port.assert_called_once_with(t2) class FakeBridgeMockSpec(dataplane_utils.OVSBridgeWithGroups, br_tun.OVSTunnelBridge, ovs_lib.OVSBridge): pass class FakeNLRI: def __init__(self, ip): self.ip = ip class TestOVSEVIDataplane(t.TestCase): def setUp(self): super().setUp() self.bridge = mock.Mock(spec=FakeBridgeMockSpec) self.tunnel_mgr = mock.Mock(spec=ovs.TunnelManager) self.tunnel_mgr.get_object.return_value = ("TUNNEL1", None) self.tunnel_mgr.find_object.return_value = "TUNNEL1" self.dp_driver = mock.Mock(spec=dp_drivers.DataplaneDriver) self.dp_driver.bridge = self.bridge self.dp_driver.tunnel_mgr = self.tunnel_mgr self.dp_driver.config = {} self.dp_driver.get_local_address.return_value = LOCAL_IP self.dataplane = ovs.OVSEVIDataplane( self.dp_driver, 77, "foo_external_instance_id", None, None, instance_label=99) self.vlan = 99 self.dataplane.vif_plugged("MAC1", "10.0.0.1", {'vlan': self.vlan}, None, None) self.bridge.add_flow.assert_called_once_with( table=ovs_const.VXLAN_TUN_TO_LV, priority=mock.ANY, tun_id=99, actions=mock.ANY) def test_setup_dataplane_for_remote_endpoint__local(self): self.dataplane.setup_dataplane_for_remote_endpoint( MAC1, LOCAL_IP, 42, FakeNLRI("11.0.0.1"), None) self.tunnel_mgr.get_object.assert_not_called() def test_setup_dataplane_for_remote_endpoint(self): self.dataplane.setup_dataplane_for_remote_endpoint( MAC1, "2.2.2.2", 42, FakeNLRI("11.0.0.1"), None) self.tunnel_mgr.get_object.assert_called_once_with( "2.2.2.2", (77, (42, MAC1))) self.bridge.add_flow.assert_called_with( table=ovs_const.UCAST_TO_TUN, priority=mock.ANY, dl_vlan=self.vlan, dl_dst=MAC1, actions=mock.ANY) def test_remove_dataplane_for_remote_endpoint__local(self): self.dataplane.remove_dataplane_for_remote_endpoint( MAC1, LOCAL_IP, 42, FakeNLRI("11.0.0.1")) self.bridge.delete_unicast_to_tun.assert_called_with(self.vlan, MAC1) self.tunnel_mgr.free_object.assert_not_called() def test_remove_dataplane_for_remote_endpoint(self): self.dataplane.remove_dataplane_for_remote_endpoint( MAC1, "2.2.2.2", 42, FakeNLRI("11.0.0.1")) self.bridge.delete_unicast_to_tun.assert_called_with(self.vlan, MAC1) self.tunnel_mgr.free_object.assert_called_with( "2.2.2.2", (77, (42, MAC1))) def test_add_dataplane_for_bum_endpoint__local(self): self.dataplane.add_dataplane_for_bum_endpoint(LOCAL_IP, 45, None, None) self.tunnel_mgr.get_object.assert_not_called() def test_add_dataplane_for_bum_endpoint(self): self.dataplane.add_dataplane_for_bum_endpoint("2.2.2.2", 45, None, None) self.tunnel_mgr.get_object.assert_called_with( "2.2.2.2", (77, (45, "flood"))) def test_remove_dataplane_for_bum_endpoint__local(self): self.dataplane.add_dataplane_for_bum_endpoint(LOCAL_IP, 45, None, None) self.tunnel_mgr.free_object.reset_mock() self.dataplane.remove_dataplane_for_bum_endpoint(LOCAL_IP, 45, None) self.tunnel_mgr.free_object.assert_not_called() def test_remove_dataplane_for_bum_endpoint(self): self.dataplane.add_dataplane_for_bum_endpoint("2.2.2.2", 45, None, None) self.tunnel_mgr.free_object.reset_mock() self.dataplane.remove_dataplane_for_bum_endpoint("2.2.2.2", 45, None) self.tunnel_mgr.free_object.assert_called_with( "2.2.2.2", (77, (45, "flood"))) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9463053 networking_bagpipe-22.0.0/networking_bagpipe/tests/unit/bagpipe_bgp/ipvpn/0000775000175000017500000000000000000000000027066 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/unit/bagpipe_bgp/ipvpn/__init__.py0000664000175000017500000000000000000000000031165 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/unit/bagpipe_bgp/ipvpn/mpls_ovs_dataplane.py0000664000175000017500000001567500000000000033331 0ustar00zuulzuul00000000000000# Copyright 2017 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # NOTE(tmorin): unit tests for system interactions aren't that useful, which # is why you will find very few things here. The code is is expected to be # covered by tempest and fullstack test jobstores from unittest import mock from networking_bagpipe.bagpipe_bgp.common import dataplane_utils from networking_bagpipe.bagpipe_bgp.vpn.ipvpn import mpls_ovs_dataplane from networking_bagpipe.tests.unit.bagpipe_bgp import base as t INSTANCE_ID = 77 LOCAL_IP = "1.1.1.1" REMOTE_PE1 = "2.2.2.2" REMOTE_PE2 = "3.3.3.3" NH1 = mpls_ovs_dataplane.NextHop(1, LOCAL_IP, None, 0) NH2 = mpls_ovs_dataplane.NextHop(2, LOCAL_IP, None, 1) REMOTE_PREFIX1 = "11.0.0.2" class TestMPLSOVSDataplaneDriver(t.TestCase): def _test_priority_for_prefix(self, prefix_list): # assuming that prefix_list is a list of prefixes of increasing length # check that each prefixes has a priority higher than the previous one previous_prio = 0 previous_prefix = "" for prefix in prefix_list: prio = mpls_ovs_dataplane._priority_from_prefix(prefix) self.assertTrue(prio > previous_prio, ("%s should have a priority higher than %s, " "but 0x%x !> 0x%x") % (prefix, previous_prefix, prio, previous_prio)) previous_prio = prio previous_prefix = prefix def test_priority_for_prefix_v4(self): self._test_priority_for_prefix( ["0.0.0.0/0", "1.1.0.0/16", "2.2.2.0/24", "3.3.3.3/32"]) def test_priority_for_prefix_v6(self): self._test_priority_for_prefix( ["::0/0", "2001:db8:8:4::2/64", "::1/128"]) def test_fallback_priority(self): self.assertLess(mpls_ovs_dataplane.FALLBACK_PRIORITY, mpls_ovs_dataplane._priority_from_prefix("0.0.0.0/0")) class TestNextHopGroupManager(t.TestCase): def setUp(self): super().setUp() self.bridge = mock.Mock(spec=dataplane_utils.OVSBridgeWithGroups) self.manager = mpls_ovs_dataplane.NextHopGroupManager(self.bridge, None, None, None) def test_get_nexthop_group(self): nh_g1 = self.manager.get_object("2.2.2.2", NH1) self.bridge.add_group.assert_called_once_with( group_id=nh_g1, type='select', selection_method=None, selection_method_param=None, fields=None) self.bridge.add_group.reset_mock() nh_g2 = self.manager.get_object("2.2.2.2", NH2) self.bridge.add_group.assert_not_called() self.assertEqual(nh_g1, nh_g2) self.bridge.add_group.reset_mock() nh_g3 = self.manager.get_object("3.3.3.3", NH1) self.bridge.add_group.assert_called_once() self.assertNotEqual(nh_g3, nh_g1) self.assertTrue(len(self.manager.infos())) def test_free_object(self): nh_g1 = self.manager.get_object("2.2.2.2", NH1) self.manager.get_object("2.2.2.2", NH2) nh_g2 = self.manager.get_object("3.3.3.3", NH1) self.bridge.add_group.reset_mock() self.manager.free_object("2.2.2.2", NH1) self.bridge.delete_group.assert_not_called() nh_g1bis = self.manager.get_object("2.2.2.2") self.assertEqual(nh_g1bis, nh_g1) self.manager.free_object("2.2.2.2", NH2) self.bridge.delete_group.assert_called_once_with(nh_g1) self.bridge.delete_group.reset_mock() self.manager.free_object("3.3.3.3", NH1) self.bridge.delete_group.assert_called_once_with(nh_g2) class FakeBridgeMockSpec(dataplane_utils.OVSBridgeWithGroups, dataplane_utils.OVSExtendedBridge): pass class FakeNLRI: def __init__(self, ip): self.ip = ip class TestMPLSOVSVRFSDataplane(t.TestCase): def setUp(self): super().setUp() self.bridge = mock.Mock(spec=FakeBridgeMockSpec) self.nh_group_mgr = mock.Mock( spec=mpls_ovs_dataplane.NextHopGroupManager ) self.nh_group_mgr.bridge = self.bridge self.nh_group_mgr.get_object.side_effect = [None, 0, 0] self.dp_driver = mock.Mock( spec=mpls_ovs_dataplane.MPLSOVSDataplaneDriver ) self.dp_driver.bridge = self.bridge self.dp_driver.nh_group_mgr = self.nh_group_mgr self.dp_driver.vxlan_encap = False self.dp_driver.vrf_table = 3 self.dp_driver.config = mock.Mock() self.dp_driver.config.arp_responder = False self.dp_driver.get_local_address.return_value = LOCAL_IP self.dp_driver.mpls_in_port.return_value = 1 self.label = 99 self.dataplane = mpls_ovs_dataplane.MPLSOVSVRFDataplane( self.dp_driver, INSTANCE_ID, "foo_external_instance_id", "10.0.0.1", "24", instance_label=self.label) self.dataplane._mtu_fixup = mock.Mock() self.dataplane._match_label_action = mock.Mock(return_value="") self.dataplane._match_output_action = mock.Mock(return_value="") def test_setup_dataplane_for_remote_endpoint(self): self.dataplane.setup_dataplane_for_remote_endpoint( REMOTE_PREFIX1, REMOTE_PE1, 42, FakeNLRI(REMOTE_PREFIX1), None) expected_nh = mpls_ovs_dataplane.NextHop(42, REMOTE_PE1, None, 0) self.nh_group_mgr.get_object.assert_called_with( REMOTE_PREFIX1, (INSTANCE_ID, mock.ANY), buckets=mock.ANY) nh1 = self.nh_group_mgr.get_object.call_args_list[1][0][1][1] self.assertEqual(expected_nh, nh1) self.bridge.add_flow.assert_called_once_with( table=3, cookie=mock.ANY, priority=mock.ANY, nw_dst=REMOTE_PREFIX1, actions="group:0") self.nh_group_mgr.get_object.reset_mock() self.bridge.add_flow.reset_mock() self.dataplane.setup_dataplane_for_remote_endpoint( REMOTE_PREFIX1, REMOTE_PE2, 43, FakeNLRI(REMOTE_PREFIX1), None, 1) self.nh_group_mgr.get_object.assert_called_with( REMOTE_PREFIX1) self.bridge.add_flow.assert_not_called() self.bridge.insert_bucket.assert_called_once_with( group_id=0, bucket_id=1, command_bucket_id="last", actions=mock.ANY) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/unit/bagpipe_bgp/test_api.py0000664000175000017500000000214500000000000030116 0ustar00zuulzuul00000000000000# Copyright 2014 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import fixture as config_fixture from networking_bagpipe.bagpipe_bgp.api import api from networking_bagpipe.bagpipe_bgp.api import config as api_config from networking_bagpipe.tests.unit.bagpipe_bgp import base class TestAPI(base.TestCase): def setUp(self): super().setUp() cfg_fixture = self.useFixture(config_fixture.Config()) cfg_fixture.register_opts(api_config.common_opts, "API") def test_api_init(self): # instantiate the API, will fail if an exception is raised api.PecanAPI() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/unit/bagpipe_bgp/test_bgp_manager.py0000664000175000017500000000250700000000000031611 0ustar00zuulzuul00000000000000# Copyright 2014 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from networking_bagpipe.bagpipe_bgp import engine from networking_bagpipe.bagpipe_bgp.engine import bgp_manager from networking_bagpipe.bagpipe_bgp.engine import exa from networking_bagpipe.tests.unit.bagpipe_bgp import base class TestRouteTableManager(base.TestCase): def setUp(self): super().setUp() self.bgp_manager = bgp_manager.Manager() def test1(self): subscription = engine.Subscription(engine.Subscription.ANY_AFI, engine.Subscription.ANY_SAFI, engine.Subscription.ANY_RT) route_entry = self.bgp_manager._subscription_2_rtc_route_entry( subscription) self.assertEqual(route_entry.safi, exa.SAFI.rtc, "wrong RTC route") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/unit/bagpipe_bgp/test_config.py0000664000175000017500000000212000000000000030603 0ustar00zuulzuul00000000000000# Copyright 2017 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from networking_bagpipe.bagpipe_bgp.common import config from oslo_config.tests import test_types class TestConfigInterfaceAddress(test_types.TypeTestHelper, unittest.TestCase): type = config.InterfaceAddress() def test_interface_address_ip(self): self.assertConvertedValue("127.0.0.1", "127.0.0.1") def test_interface_address(self): self.assertConvertedValue("lo", "127.0.0.1") def test_interface_address_non_existing(self): self.assertInvalid("non_existing_interface") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/unit/bagpipe_bgp/test_dataplane_utils.py0000664000175000017500000000622000000000000032514 0ustar00zuulzuul00000000000000# Copyright 2018 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import testtools from unittest import mock from networking_bagpipe.bagpipe_bgp.common import dataplane_utils class TestObjectLifecycleManager(testtools.TestCase): def setUp(self): super().setUp() self.test_object_mgr = dataplane_utils.ObjectLifecycleManager() self.test_object_mgr.create_object = mock.Mock(return_value=1) self.test_object_mgr.delete_object = mock.Mock() def test_get_object_first_user(self): _, first1 = self.test_object_mgr.get_object("OBJ1", "USER_A") self.assertTrue(first1) self.test_object_mgr.create_object.assert_called_once() self.assertEqual(len(self.test_object_mgr.objects), 1) self.assertEqual(len(self.test_object_mgr.object_used_for["OBJ1"]), 1) def test_get_object_multiple_users(self): self.test_object_mgr.get_object("OBJ1", "USER_A") _, first2 = self.test_object_mgr.get_object("OBJ1", "USER_B") self.assertFalse(first2) self.test_object_mgr.create_object.assert_called_once() self.assertEqual(len(self.test_object_mgr.objects), 1) self.assertEqual(len(self.test_object_mgr.object_used_for["OBJ1"]), 2) def test_find_object_already_exist(self): self.test_object_mgr.get_object("OBJ1", "USER_A") obj1 = self.test_object_mgr.find_object("OBJ1") self.assertIsNotNone(obj1) def test_find_object_empty(self): obj1 = self.test_object_mgr.find_object("OBJ1") self.assertIsNone(obj1) def test_free_object_last_user(self): self.test_object_mgr.get_object("OBJ1", "USER_A") last1 = self.test_object_mgr.free_object("OBJ1", "USER_A") self.assertTrue(last1) self.test_object_mgr.delete_object.assert_called_once() self.assertTrue(not self.test_object_mgr.objects) self.assertTrue(not self.test_object_mgr.object_used_for) def test_free_object_multiple_users(self): self.test_object_mgr.get_object("OBJ1", "USER_A") self.test_object_mgr.get_object("OBJ1", "USER_B") last1 = self.test_object_mgr.free_object("OBJ1", "USER_A") self.assertFalse(last1) self.test_object_mgr.delete_object.assert_not_called() self.assertEqual(len(self.test_object_mgr.objects), 1) self.assertEqual(len(self.test_object_mgr.object_used_for["OBJ1"]), 1) last2 = self.test_object_mgr.free_object("OBJ1", "USER_B") self.assertTrue(last2) self.test_object_mgr.delete_object.assert_called_once() self.assertFalse(self.test_object_mgr.objects) self.assertFalse(self.test_object_mgr.object_used_for) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/unit/bagpipe_bgp/test_engine_objects.py0000664000175000017500000002541200000000000032325 0ustar00zuulzuul00000000000000# Copyright 2014 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ .. module:: test_vpn_instance :synopsis: module with unit tests for bagpipe.bgp.engine.__init__ Validates the behavior of base objects of the engine. Also validates that ExaBGP classes behave as expected by the code in bagpipe.bgp.engine.__init__ . """ from testtools import TestCase from networking_bagpipe.bagpipe_bgp import engine from networking_bagpipe.bagpipe_bgp.engine import exa from networking_bagpipe.tests.unit.bagpipe_bgp import base TEST_RD = exa.RouteDistinguisher.fromElements("42.42.42.42", 5) class TestEngineObjects(TestCase): def setUp(self): super().setUp() def tearDown(self): super().tearDown() # Tests on EVPN NLRIs def test_100_evpn_mac_hash_equal(self): # Two indistinct EVPN NLRI should # hash to the same value, and be equal nlri1 = exa.EVPNMAC(TEST_RD, exa.ESI(), exa.EthernetTag(111), exa.MAC("01:02:03:04:05:06"), 6 * 8, exa.Labels([42], True), exa.IP.create("1.1.1.1")) nlri2 = exa.EVPNMAC(TEST_RD, exa.ESI(), exa.EthernetTag(111), exa.MAC("01:02:03:04:05:06"), 6 * 8, exa.Labels([42], True), exa.IP.create("1.1.1.1")) self.assertEqual(hash(nlri1), hash(nlri2)) self.assertEqual(nlri1, nlri2) def test_101_evpn_hash_equal_somefieldsvary(self): # Two EVPN MAC NLRIs differing by their ESI or label or RD, # or nexthop, but otherwise identical should hash to the same value, # and be equal nlri0 = exa.EVPNMAC(TEST_RD, exa.ESI(), exa.EthernetTag(111), exa.MAC("01:02:03:04:05:06"), 6 * 8, exa.Labels([42], True), exa.IP.create("1.1.1.1")) # Esi nlri1 = exa.EVPNMAC(TEST_RD, exa.ESI(b''.join(bytes((1,)) for _ in range(0, 10))), exa.EthernetTag(111), exa.MAC("01:02:03:04:05:06"), 6 * 8, exa.Labels([42], True), exa.IP.create("1.1.1.1")) # label nlri2 = exa.EVPNMAC(TEST_RD, exa.ESI(), exa.EthernetTag(111), exa.MAC("01:02:03:04:05:06"), 6 * 8, exa.Labels([4444], True), exa.IP.create("1.1.1.1")) # IP: different IPs, but same MACs: different route nlri3 = exa.EVPNMAC(TEST_RD, exa.ESI(), exa.EthernetTag(111), exa.MAC("01:02:03:04:05:06"), 6 * 8, exa.Labels([42], True), exa.IP.create("2.2.2.2")) # with a next hop... nlri4 = exa.EVPNMAC(TEST_RD, exa.ESI(), exa.EthernetTag(111), exa.MAC("01:02:03:04:05:06"), 6 * 8, exa.Labels([42], True), exa.IP.create("1.1.1.1"), exa.IP.pton("10.10.10.10")) nlri5 = exa.EVPNMAC(TEST_RD, exa.ESI(), exa.EthernetTag(111), exa.MAC("01:02:03:04:05:06"), 6 * 8, exa.Labels([42], True), exa.IP.create("1.1.1.1"), exa.IP.pton("11.11.11.11")) self.assertEqual(hash(nlri0), hash(nlri1)) self.assertEqual(hash(nlri0), hash(nlri2)) self.assertEqual(hash(nlri0), hash(nlri4)) self.assertEqual(nlri0, nlri1) self.assertEqual(nlri0, nlri2) self.assertEqual(nlri0, nlri4) self.assertEqual(nlri1, nlri2) self.assertEqual(nlri1, nlri4) self.assertEqual(nlri2, nlri4) self.assertEqual(nlri4, nlri5) self.assertNotEqual(hash(nlri0), hash(nlri3)) self.assertNotEqual(nlri0, nlri3) self.assertNotEqual(nlri1, nlri3) self.assertNotEqual(nlri2, nlri3) self.assertNotEqual(nlri3, nlri4) # tests on attributes def test_4_same_nlri_distinct_attributes(self): # Two routes with same NLRI but distinct attributes should # not be equal atts1 = exa.Attributes() atts1.add(exa.LocalPreference(10)) atts2 = exa.Attributes() atts2.add(exa.LocalPreference(20)) entry1 = engine.RouteEntry(base.NLRI1, None, atts1) entry2 = engine.RouteEntry(base.NLRI1, None, atts2) self.assertNotEqual(entry1, entry2) def test_5_same_nlri_same_attributes(self): # Two routes with same NLRI but and same attributes should # hash to the same values and be equal. atts1 = exa.Attributes() atts1.add(exa.LocalPreference(10)) atts2 = exa.Attributes() atts2.add(exa.LocalPreference(10)) entry1 = engine.RouteEntry(base.NLRI1, None, atts1) entry2 = engine.RouteEntry(base.NLRI1, None, atts2) self.assertEqual(hash(entry1), hash(entry2)) self.assertEqual(entry1, entry2) def test_6_same_nlri_same_attributes_order_multivalued(self): # Two routes with same NLRI but and same attributes should # hash to the same values and be equal, *even if* for a said # multivalued attributes, like extended community, the values # appear in a distinct order atts1 = exa.Attributes() ecoms1 = exa.ExtendedCommunities() ecoms1.communities.append(exa.RouteTarget(64512, 1)) ecoms1.communities.append(exa.Encapsulation( exa.Encapsulation.Type.VXLAN)) ecoms1.communities.append(exa.RouteTarget(64512, 2)) atts1.add(ecoms1) atts2 = exa.Attributes() ecoms2 = exa.ExtendedCommunities() ecoms2.communities.append(exa.RouteTarget(64512, 2)) ecoms2.communities.append(exa.RouteTarget(64512, 1)) ecoms2.communities.append(exa.Encapsulation( exa.Encapsulation.Type.VXLAN)) atts2.add(ecoms2) entry1 = engine.RouteEntry(base.NLRI1, None, atts1) entry2 = engine.RouteEntry(base.NLRI1, None, atts2) self.assertEqual(hash(entry1), hash(entry2)) self.assertEqual(entry1, entry2) def test_8_route_entry_set_rts(self): atts = exa.Attributes() ecoms = exa.ExtendedCommunities() ecoms.communities.append(exa.RouteTarget(64512, 1)) ecoms.communities.append(exa.RouteTarget(64512, 2)) ecoms.communities.append(exa.Encapsulation( exa.Encapsulation.Type.VXLAN)) atts.add(exa.LocalPreference(20)) atts.add(ecoms) entry = engine.RouteEntry(base.NLRI1, None, atts) # check that the route_entry object has the RTs we wanted self.assertIn(exa.RouteTarget(64512, 1), entry.route_targets) self.assertIn(exa.RouteTarget(64512, 2), entry.route_targets) # modify the route targets entry.set_route_targets([exa.RouteTarget(64512, 3), exa.RouteTarget(64512, 1)]) # check that the new RTs have replaced the old ones self.assertIn(exa.RouteTarget(64512, 1), entry.route_targets) self.assertIn(exa.RouteTarget(64512, 3), entry.route_targets) self.assertNotIn(exa.RouteTarget(64512, 2), entry.route_targets) # also need to check the RTs in the attributes ecoms = entry.attributes[ exa.Attribute.CODE.EXTENDED_COMMUNITY].communities self.assertIn(exa.RouteTarget(64512, 1), ecoms) self.assertIn(exa.RouteTarget(64512, 3), ecoms) self.assertNotIn(exa.RouteTarget(64512, 2), ecoms) # check that other communities were preserved self.assertIn(exa.Encapsulation(exa.Encapsulation.Type.VXLAN), ecoms) def test_9_route_entry_rts_as_init_param(self): atts = exa.Attributes() ecoms = exa.ExtendedCommunities() ecoms.communities.append(exa.Encapsulation( exa.Encapsulation.Type.VXLAN)) atts.add(exa.LocalPreference(20)) atts.add(ecoms) rts = [exa.RouteTarget(64512, 1), exa.RouteTarget(64512, 2)] entry = engine.RouteEntry(base.NLRI1, rts, atts) self.assertIn(exa.RouteTarget(64512, 1), entry.route_targets) self.assertIn(exa.RouteTarget(64512, 2), entry.route_targets) ecoms = entry.attributes[ exa.Attribute.CODE.EXTENDED_COMMUNITY].communities self.assertIn(exa.RouteTarget(64512, 1), ecoms) self.assertIn(exa.RouteTarget(64512, 2), ecoms) self.assertIn(exa.Encapsulation(exa.Encapsulation.Type.VXLAN), ecoms) def test_10_ecoms(self): ecoms1 = exa.ExtendedCommunities() ecoms1.communities.append(exa.Encapsulation( exa.Encapsulation.Type.VXLAN)) atts1 = exa.Attributes() atts1.add(ecoms1) ecoms2 = exa.ExtendedCommunities() ecoms2.communities.append(exa.Encapsulation( exa.Encapsulation.Type.VXLAN)) ecoms2.communities.append(exa.RouteTarget(64512, 1)) atts2 = exa.Attributes() atts2.add(ecoms2) self.assertFalse(atts1.sameValuesAs(atts2)) self.assertFalse(atts2.sameValuesAs(atts1)) def test_11_rts(self): rt1a = exa.RouteTarget(64512, 1) rt1b = exa.RouteTarget(64512, 1) rt3 = exa.RouteTarget(64512, 2) rt4 = exa.RouteTarget(64513, 1) self.assertEqual(hash(rt1a), hash(rt1b)) # self.assertEqual(hash(rt1a), hash(rt2)) self.assertNotEqual(hash(rt1a), hash(rt3)) self.assertNotEqual(hash(rt1a), hash(rt4)) self.assertEqual(rt1a, rt1b) # self.assertEqual(rt1a, rt2) self.assertNotEqual(rt1a, rt3) self.assertNotEqual(rt1a, rt4) self.assertEqual({rt1a}, {rt1b}) # self.assertEqual(set([rt1a]), set([rt2])) self.assertEqual(1, len({rt1a}.intersection({rt1b}))) # self.assertEqual(1, len(set([rt2]).intersection(set([rt1b])))) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/unit/bagpipe_bgp/test_identifier_allocators.py0000664000175000017500000001133400000000000033712 0ustar00zuulzuul00000000000000# Copyright 2018 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import testtools from networking_bagpipe.bagpipe_bgp.vpn import identifier_allocators class TestIDAllocator(testtools.TestCase): min_id = 100 alloc_id_count = 5 remove_index = 2 def setUp(self): super().setUp() self.test_allocator = identifier_allocators.IDAllocator() self.test_allocator.current_id = self.min_id self.test_allocator.MAX = (self.min_id + self.alloc_id_count) - 1 self.allocated_ids = set() for i in range(self.alloc_id_count): self.allocated_ids.add( self.test_allocator.get_new_id("Test identifier %d" % i) ) def test_allocated_id_uniqueness(self): self.assertEqual(self.allocated_ids, set(self.test_allocator.allocated_ids.keys())) self.assertEqual(self.test_allocator.MAX + 1, self.test_allocator.current_id) def test_reuse_released_id(self): # Check identifier reused after having been released remove_id = self.min_id + self.remove_index self.test_allocator.release(remove_id) reused_id = self.test_allocator.get_new_id("Test reused identifier") self.assertEqual(remove_id, reused_id) self.assertEqual(self.test_allocator.MAX + 1, self.test_allocator.current_id) def test_allocated_id_max(self): # Check no identifiers left exception self.assertRaises(identifier_allocators.MaxIDReached, self.test_allocator.get_new_id, "Test max reached identifier") class TestIDAllocatorReUse(testtools.TestCase): def setUp(self): testtools.TestCase.setUp(self) def test_do_not_reuse_at_once(self): test_allocator = identifier_allocators.IDAllocator() x = test_allocator.get_new_id() test_allocator.release(x) y = test_allocator.get_new_id() self.assertNotEqual(x, y) def test_reuse_as_late_as_possible(self): # create an allocator for 4 values test_allocator = identifier_allocators.IDAllocator() test_allocator.MAX = 3 # allocate one value, and release it at once x = test_allocator.get_new_id() test_allocator.release(x) # allocate 3 values, check that none is x intermediate_ids = [test_allocator.get_new_id(desc) for desc in ('one', 'two', 'three')] for y in intermediate_ids: self.assertNotEqual(x, y) # allocate one more, this can't be anything else than x z1 = test_allocator.get_new_id() self.assertEqual(x, z1) # we reached MAX, we are now allocating from released_ids() # free x test_allocator.release(x) # free the intermediate ids for y in intermediate_ids: test_allocator.release(y) # check that the next id given to us is x z2 = test_allocator.get_new_id() self.assertEqual(x, z2) class TestIDAllocatorRequestValue(testtools.TestCase): def setUp(self): testtools.TestCase.setUp(self) def test_request_not_allocated(self): test_allocator = identifier_allocators.IDAllocator() x = test_allocator.get_new_id() # Request to allocate id greater than current_id value y = test_allocator.get_new_id(hint_value=x + 3) self.assertEqual(y, x + 3) self.assertEqual(test_allocator.current_id, x + 1) def test_request_allocated(self): test_allocator = identifier_allocators.IDAllocator() x = test_allocator.get_new_id() y = test_allocator.get_new_id(hint_value=x) self.assertNotEqual(y, x) self.assertEqual(y, x + 1) self.assertEqual(test_allocator.current_id, x + 2) def test_request_next_already_allocated(self): test_allocator = identifier_allocators.IDAllocator() x = test_allocator.get_new_id() # Request to allocate id equal to current_id value y = test_allocator.get_new_id(hint_value=x + 1) z = test_allocator.get_new_id() self.assertNotEqual(z, y) self.assertEqual(z, x + 2) self.assertEqual(test_allocator.current_id, x + 3) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/unit/bagpipe_bgp/test_ipvpn_objects.py0000664000175000017500000000507000000000000032212 0ustar00zuulzuul00000000000000# Copyright 2014 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from networking_bagpipe.bagpipe_bgp.engine import exa from networking_bagpipe.bagpipe_bgp.engine import ipvpn TEST_RD = exa.RouteDistinguisher.fromElements("42.42.42.42", 5) def _create_test_ipvpn_nlri(label, nexthop): return ipvpn.IPVPNRouteFactory(exa.AFI(exa.AFI.ipv4), "1.1.1.1/32", label, TEST_RD, nexthop) class TestNLRIs(unittest.TestCase): def setUp(self): super().setUp() def tearDown(self): super().tearDown() # tests on MPLS VPN NLRIs def test_0_mpls_vpn_hash_equal(self): # Two indistinct VPN NLRI should # hash to the same value, and be equal nlri1 = _create_test_ipvpn_nlri(42, "45.45.45.45") nlri2 = _create_test_ipvpn_nlri(42, "45.45.45.45") self.assertEqual(hash(nlri1), hash(nlri2)) self.assertEqual(nlri1, nlri2) def test_1_mpls_vpn_hash_equal(self): # Two VPN NLRI distinct only by their *label* should # hash to the same value, and be equal nlri1 = _create_test_ipvpn_nlri(42, "45.45.45.45") nlri2 = _create_test_ipvpn_nlri(0, "45.45.45.45") self.assertEqual(hash(nlri1), hash(nlri2)) self.assertEqual(nlri1, nlri2) def test_2_mpls_vpn_hash_equal(self): # Two VPN NLRI distinct only by their *nexthop* should # hash to the same value, and be equal nlri1 = _create_test_ipvpn_nlri(42, "45.45.45.45") nlri2 = _create_test_ipvpn_nlri(42, "77.77.77.77") self.assertEqual(hash(nlri1), hash(nlri2)) self.assertEqual(nlri1, nlri2) def test_3_mpls_vpn_hash_equal(self): # Two VPN NLRI distinct only by their *action* should # hash to the same value, and be equal nlri1 = _create_test_ipvpn_nlri(42, "45.45.45.45") nlri1.action = exa.OUT.ANNOUNCE nlri2 = _create_test_ipvpn_nlri(42, "45.45.45.45") nlri2.action = exa.OUT.WITHDRAW self.assertEqual(hash(nlri1), hash(nlri2)) self.assertEqual(nlri1, nlri2) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/unit/bagpipe_bgp/test_route_table_manager.py0000664000175000017500000010111100000000000033335 0ustar00zuulzuul00000000000000# Copyright 2014 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ .. module:: test_route_table_manager :synopsis: module that defines several test cases for the route_table_manager module. In particular, unit tests for RouteTableManager class. Setup : Start RouteTableManager thread instance. TearDown : Stop RouteTableManager thread instance. RouteTableManager is in charge to maintain the list of Workers (TrackerWorker, ExaBGPPeerWorker, BGPPeerWorker) subscriptions, to process BGB route events and to dispatch BGP routes to the Workers according to their subscriptions. Mock class is used to stub Workers. Tests are organized as follow : - testAx use cases to test worker subscriptions to route target (or match) 1- with no route to synthesize 2- with routes to synthesize <=> advertise existing routes to the new worker subscription according following rules : - route should not be synthesized to its source, - route should not be synthesized between BGPPeerWorkers - route should not be synthesized if already send to worker for another RT For both use cases check that (worker, match) is correctly recorded by RouteTableManager Other test cases : re-subscription to check routes are not synthesized - testBx use cases to test worker unsubscriptions to match (without and with routes to synthesize) : same rules should be applied to generate withdraw events. Check (worker, match) has been deleted Other test cases : unsubscription to match or by worker not registered - testCx to test the processing of route event generated by a worker. 2 types of route event : advertise (new or update) or withdraw 1- advertise or withdraw a route entry without event propagation to workers: check that entry is recorded/deleted 2- advertise (new or update) or withdraw with event propagation: to test dispatching of route events to the workers according to their subscriptions. Other test cases : withdraw of a not registered route, advertise of the same route (same attr and RTs) - testDx : to test worker cleanup - testEx : to test dumpState """ import testtools from unittest import mock from networking_bagpipe.bagpipe_bgp import engine from networking_bagpipe.bagpipe_bgp.engine import bgp_peer_worker as bpw from networking_bagpipe.bagpipe_bgp.engine import exa from networking_bagpipe.bagpipe_bgp.engine import route_table_manager as rtm from networking_bagpipe.bagpipe_bgp.engine import worker from networking_bagpipe.tests.unit.bagpipe_bgp import base as t MATCH1 = rtm.Match(exa.AFI(exa.AFI.ipv4), exa.SAFI(exa.SAFI.mpls_vpn), t.RT1) MATCH2 = rtm.Match(exa.AFI(exa.AFI.ipv4), exa.SAFI(exa.SAFI.mpls_vpn), t.RT2) MATCH3 = rtm.Match(exa.AFI(exa.AFI.ipv4), exa.SAFI(exa.SAFI.mpls_vpn), t.RT3) class TestRouteTableManager(testtools.TestCase, t.BaseTestBagPipeBGP): def setUp(self): super().setUp() self.rtm = rtm.RouteTableManager(mock.Mock(), mock.Mock()) self.rtm.start() self.set_event_target_worker(self.rtm) def tearDown(self): super().tearDown() self.rtm.stop() self.rtm.join() def _new_worker(self, worker_name, worker_type): worker = mock.Mock(spec=worker_type, name=worker_name) worker.name = worker_name worker.enqueue = mock.Mock() worker._rtm_matches = set() worker._rtm_route_entries = set() return worker def _worker_subscriptions(self, worker, rts, afi=exa.AFI(exa.AFI.ipv4), safi=exa.SAFI(exa.SAFI.mpls_vpn)): for rt in rts: subscribe = engine.Subscription(afi, safi, rt, worker) self.rtm._on_event(subscribe) def _worker_unsubscriptions(self, worker, rts, afi=exa.AFI(exa.AFI.ipv4), safi=exa.SAFI(exa.SAFI.mpls_vpn)): for rt in rts: unsubscribe = engine.Unsubscription(afi, safi, rt, worker) self.rtm._on_event(unsubscribe) def _check_subscriptions(self, worker, matches): for match in matches: self.assertIn(match, worker._rtm_matches, "Subscription not found") def _check_unsubscriptions(self, worker, matches): if '_rtm_matches' not in worker.__dict__: return for match in matches: self.assertNotIn(match, worker._rtm_matches, "Subscription found while it should not: %s" % worker._rtm_matches) def _check_events_calls(self, events, advertised_routes, withdrawn_nlris): # checks that each advertise event in 'events' is in advertised_routes, # that each withdraw event in 'events' is in withdrawn_nlris # and that all events in withdrawn_nlris and advertised_routes are in # 'events' for (call_args, _) in events: if (call_args[0].type == engine.RouteEvent.ADVERTISE): self.assertIn(call_args[0].route_entry, advertised_routes, "Bad advertised route") advertised_routes.remove(call_args[0].route_entry) else: # WITHDRAW self.assertIn(call_args[0].route_entry.nlri, withdrawn_nlris, "Bad withdrawn route") withdrawn_nlris.remove(call_args[0].route_entry.nlri) self.assertEqual(0, len(advertised_routes), "some routes not advert'd") self.assertEqual(0, len(withdrawn_nlris), "some routes not withdrawn") def test_a1_subscriptions_with_no_route_to_synthesize(self): # Worker1 subscribes to RT1 and RT2 worker1 = self._new_worker("worker.Worker-1", worker.Worker) self._worker_subscriptions(worker1, [t.RT1, t.RT2]) # check subscriptions self._check_subscriptions(worker1, [MATCH1, MATCH2]) def test_a1_check_first_last_local_worker_callback(self): bgp_worker1 = self._new_worker("worker.Worker-1", bpw.BGPPeerWorker) self._worker_subscriptions(bgp_worker1, [t.RT1]) self._wait() self.assertEqual( 0, self.rtm.first_local_subscriber_callback.call_count, "first_local_subscriber_callback should not have been called " " (non local worker)") worker1 = self._new_worker("worker.Worker-1", worker.Worker) self._worker_subscriptions(worker1, [t.RT1]) self.assertEqual( 1, self.rtm.first_local_subscriber_callback.call_count, "first_local_subscriber_callback should have been called") worker2 = self._new_worker("worker.Worker-2", worker.Worker) self._worker_subscriptions(worker2, [t.RT1]) self.assertEqual( 1, self.rtm.first_local_subscriber_callback.call_count, "first_local_subscriber_callback should not have been called a " "second time") self._worker_unsubscriptions(worker2, [t.RT1]) self.assertEqual( 0, self.rtm.last_local_subscriber_callback.call_count, "last_local_subscriber_callback should not have been called") self._worker_unsubscriptions(worker1, [t.RT1]) self.assertEqual( 1, self.rtm.last_local_subscriber_callback.call_count, "last_local_subscriber_callback should have been called") self._worker_unsubscriptions(bgp_worker1, [t.RT1]) self.assertEqual( 1, self.rtm.last_local_subscriber_callback.call_count, "last_local_subscriber_callback should not have been called " " (non local worker)") def test_a2_subscriptions_with_route_to_synthesize(self): # BGPPeerWorker1 advertises a route for RT1 and RT2 bgp_peer_worker1 = self._new_worker("BGPWorker1", bpw.BGPPeerWorker) evt1 = self._new_route_event(engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2], bgp_peer_worker1, t.NH1) # BGPPeerWorker1 advertises an other route for RT2 evt2 = self._new_route_event(engine.RouteEvent.ADVERTISE, t.NLRI2, [t.RT2], bgp_peer_worker1, t.NH1) # BGPPeerWorker1 subscribes to RT1 self._worker_subscriptions(bgp_peer_worker1, [t.RT1]) # Worker1 subscribes to RT1 and RT2 worker1 = self._new_worker("worker.Worker-1", worker.Worker) self._worker_subscriptions(worker1, [t.RT1, t.RT2]) # Worker2 subscribes to RT1 worker2 = self._new_worker("worker.Worker-2", worker.Worker) self._worker_subscriptions(worker2, [t.RT1]) # Worker3 subscribes to RT3 worker3 = self._new_worker("worker.Worker-3", worker.Worker) self._worker_subscriptions(worker3, [t.RT3]) # BGPPeerWorker2 subscribes to RT1 bgp_peer_worker2 = self._new_worker("BGPWorker2", bpw.BGPPeerWorker) self._worker_subscriptions(bgp_peer_worker2, [t.RT1]) # Waiting for RouteTableManager thread finishes to process the # subscription self._wait() # check route entry synthesized self.assertEqual(0, bgp_peer_worker1.enqueue.call_count, "Route should not be synthesized to its source") self.assertEqual(0, worker3.enqueue.call_count, "no route should be synthesized to Worker3") self.assertEqual(0, bgp_peer_worker2.enqueue.call_count, "Route should not be synthesized between BGP workers") self.assertEqual(2, worker1.enqueue.call_count, "2 advertise events should be synthesized to Worker1") self._check_events_calls(worker1.enqueue.call_args_list, [evt1.route_entry, evt2.route_entry], []) self.assertEqual(1, worker2.enqueue.call_count, "1 advertise event should be synthesized to Worker2") self._check_events_calls( worker2.enqueue.call_args_list, [evt1.route_entry], []) def test_a3_resubscription(self): # BGPPeerWorker1 advertises a route for RT1 and RT2 bgp_peer_worker1 = self._new_worker("BGPWorker1", bpw.BGPPeerWorker) route_event = self._new_route_event(engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2], bgp_peer_worker1, t.NH1) # Worker1 subscribes to RT1 and RT2 worker1 = self._new_worker("worker.Worker-1", worker.Worker) self._worker_subscriptions(worker1, [t.RT1, t.RT2]) # Worker1 subscribes again to RT1 self._worker_subscriptions(worker1, [t.RT1]) # Worker2 subscribes to RT1 worker2 = self._new_worker("worker.Worker-2", worker.Worker) self._worker_subscriptions(worker2, [t.RT1]) # Worker1 subscribes again to RT2 self._worker_subscriptions(worker2, [t.RT2]) # check route entry synthesized self.assertEqual(1, worker1.enqueue.call_count, "1 route advertised should be synthesized to Worker1") self._check_events_calls(worker1.enqueue.call_args_list, [route_event.route_entry], []) self.assertEqual(1, worker2.enqueue.call_count, "1 route advertised should be synthesized to Worker2") self._check_events_calls(worker2.enqueue.call_args_list, [route_event.route_entry], []) def test_a4_two_subscriptions(self): # Worker1 subscribes to RT1 worker1 = self._new_worker("worker.Worker-1", worker.Worker) self._worker_subscriptions(worker1, [t.RT1]) # Worker2 subscribes to RT1 worker2 = self._new_worker("worker.Worker-2", worker.Worker) self._worker_subscriptions(worker2, [t.RT1]) # Worker2 advertises a route to RT1 self._new_route_event(engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1], worker2, t.NH1) self.assertEqual(1, worker1.enqueue.call_count, "1 route advertised should be synthesized to Worker1") def test_b1_unsubscription_with_no_route_to_synthesize(self): # Worker1 subscribes to RT1 and RT2 worker1 = self._new_worker("worker.Worker-1", worker.Worker) self._worker_subscriptions(worker1, [t.RT1, t.RT2]) # BGPPeerWorker1 subscribes to RT1 and RT2 bgp_peer_worker1 = self._new_worker("BGPWorker1", bpw.BGPPeerWorker) self._worker_subscriptions(bgp_peer_worker1, [t.RT1, t.RT2]) # Worker1 unsubscribes to RT1 self._worker_unsubscriptions(worker1, [t.RT1]) # BGPPeerWorker1 unsubscribes to RT1 and RT2 self._worker_unsubscriptions(bgp_peer_worker1, [t.RT1, t.RT2]) # check subscription/unsubscriptions self._check_unsubscriptions(worker1, [MATCH1]) self._check_subscriptions(worker1, [MATCH2]) self._check_unsubscriptions(bgp_peer_worker1, [MATCH1, MATCH2]) def test_b2_unsubscription_with_route_to_synthesize(self): # BGPPeerWorker1 advertises a route for RT1 bgp_peer_worker1 = self._new_worker("BGPWorker1", bpw.BGPPeerWorker) evt1 = self._new_route_event(engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2], bgp_peer_worker1, t.NH1) # BGPPeerWorker1 advertises an other route for RT2 evt2 = self._new_route_event(engine.RouteEvent.ADVERTISE, t.NLRI2, [t.RT2], bgp_peer_worker1, t.NH1) # BGPPeerWorker1 subscribes to RT1 self._worker_subscriptions(bgp_peer_worker1, [t.RT1]) # Worker1 subscribes to RT1 and RT2 worker1 = self._new_worker("worker.Worker-1", worker.Worker) self._worker_subscriptions(worker1, [t.RT1, t.RT2]) # Worker2 subscribes to RT2 worker2 = self._new_worker("worker.Worker-2", worker.Worker) self._worker_subscriptions(worker2, [t.RT2]) # Worker3 subscribes to RT3 worker3 = self._new_worker("worker.Worker-3", worker.Worker) self._worker_subscriptions(worker3, [t.RT3]) # BGPPeerWorker2 subscribes to RT1 bgp_peer_worker2 = self._new_worker("BGPWorker2", bpw.BGPPeerWorker) self._worker_subscriptions(bgp_peer_worker2, [t.RT1]) # Workers and BGPPeerWorker unsubscriptions self._worker_unsubscriptions(bgp_peer_worker1, [t.RT1]) self._worker_unsubscriptions(worker1, [t.RT1]) self._worker_unsubscriptions(worker2, [t.RT2]) self._worker_unsubscriptions(worker3, [t.RT3]) self._worker_unsubscriptions(bgp_peer_worker2, [t.RT1]) # Waiting for RouteTableManager thread finishes to process the # subscription self._wait() # check route entry synthesized self.assertEqual(0, bgp_peer_worker1.enqueue.call_count, "Route should not be synthesized to its source") self.assertEqual(0, worker3.enqueue.call_count, "no route should be synthesized to Worker3") self.assertEqual(0, bgp_peer_worker2.enqueue.call_count, "Route should not be synthesized between " "BGPPeerWorkers") self.assertEqual(2, worker1.enqueue.call_count, "2 advertise event should be synthesized to Worker1") self._check_events_calls(worker1.enqueue.call_args_list, [evt1.route_entry, evt2.route_entry], []) self.assertEqual(4, worker2.enqueue.call_count, "4 events should be synthesized to Worker2: " "2 advertise and 2 withdraw") self._check_events_calls(worker2.enqueue.call_args_list, [evt1.route_entry, evt2.route_entry], [evt1.route_entry.nlri, evt2.route_entry.nlri]) def test_b3_unsubscription_not_registered(self): # Worker1 subscribes to RT1 worker1 = self._new_worker("worker.Worker-1", worker.Worker) self._worker_subscriptions(worker1, [t.RT1]) # Worker1 unsubscribes to RT2 self._worker_unsubscriptions(worker1, [t.RT2]) # BGPPeerWorker1 unsubscribes to RT1 bgp_peer_worker1 = self._new_worker("BGPWorker1", bpw.BGPPeerWorker) self._worker_unsubscriptions(bgp_peer_worker1, [t.RT1, t.RT2]) # check subscription/unsubscriptions self._check_subscriptions(worker1, [MATCH1]) self._check_unsubscriptions(bgp_peer_worker1, [MATCH1, MATCH2]) def test_c1_route_advertise_by_worker_without_propagation(self): # Worker1 advertises a route for RT1 and RT2 worker1 = self._new_worker("worker.Worker-1", worker.Worker) route_event = self._new_route_event(engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2], worker1, t.NH1) # check route entry has been inserted self.assertIn(route_event.route_entry, worker1._rtm_route_entries, "Route entry not found") def test_c2_route_withdraw_by_worker_without_propagation(self): # Worker1 advertises then withdraws a route worker1 = self._new_worker("worker.Worker-1", worker.Worker) self._new_route_event(engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2], worker1, t.NH1) route_event = self._new_route_event(engine.RouteEvent.WITHDRAW, t.NLRI1, [t.RT1], worker1, t.NH1) # check route entry has been removed self.assertNotIn(route_event.route_entry, worker1._rtm_route_entries, "Route entry found") def test_c3_route_advertise_by_bgp_peer_with_propagation(self): # Worker1 subscribes to RT1 worker1 = self._new_worker("worker.Worker-1", worker.Worker) self._worker_subscriptions(worker1, [t.RT1]) # Worker2 subscribes to RT2 worker2 = self._new_worker("worker.Worker-2", worker.Worker) self._worker_subscriptions(worker2, [t.RT2]) # BGPPeerWorker1 subscribes to RT1 and RT2 bgp_peer_worker1 = self._new_worker("BGPWorker1", bpw.BGPPeerWorker) self._worker_subscriptions(bgp_peer_worker1, [t.RT1, t.RT2]) # BGPPeerWorker2 subscribes to RT1 and RT2 bgp_peer_worker2 = self._new_worker("BGPWorker2", bpw.BGPPeerWorker) self._worker_subscriptions(bgp_peer_worker2, [t.RT1, t.RT2]) # BGPPeerWorker1 advertises a route for RT1 route_event = self._new_route_event(engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1], bgp_peer_worker1, t.NH1) # check route_event propagation self.assertEqual(1, worker1.enqueue.call_count, "1 route should be propagated to Worker1") self._check_events_calls(worker1.enqueue.call_args_list, [route_event.route_entry], []) self.assertEqual(0, worker2.enqueue.call_count, "no route should be propagated to Worker2") self.assertEqual(0, bgp_peer_worker1.enqueue.call_count, "Route should not be propagated to its source") self.assertEqual(0, bgp_peer_worker2.enqueue.call_count, "Route should not be propagated between BGP workers") def test_c4_route_withdraw_by_peer_worker_with_propagation(self): # Worker1 subscribes to RT1 worker1 = self._new_worker("worker.Worker-1", worker.Worker) self._worker_subscriptions(worker1, [t.RT1]) # Worker2 subscribes to RT2 worker2 = self._new_worker("worker.Worker-2", worker.Worker) self._worker_subscriptions(worker2, [t.RT2]) # Worker3 subscribes to RT3 worker3 = self._new_worker("worker.Worker-3", worker.Worker) self._worker_subscriptions(worker3, [t.RT3]) # BGPPeerWorker1 subscribes to RT1 bgp_peer_worker1 = self._new_worker("BGPWorker1", bpw.BGPPeerWorker) self._worker_subscriptions(bgp_peer_worker1, [t.RT1]) # BGPPeerWorker2 subscribes to RT2 bgp_peer_worker2 = self._new_worker("BGPWorker2", bpw.BGPPeerWorker) self._worker_subscriptions(bgp_peer_worker2, [t.RT2]) # BGPPeerWorker1 advertises a route for RT1 and RT2 route_eventA = self._new_route_event(engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2], bgp_peer_worker1, t.NH1) # BGPPeerWorker1 withdraw previous route (without RT route_eventW = self._new_route_event(engine.RouteEvent.WITHDRAW, t.NLRI1, [], bgp_peer_worker1, t.NH1) # check route_event propagation self.assertEqual(2, worker1.enqueue.call_count, "2 routes should be propagated to Worker1") self._check_events_calls(worker1.enqueue.call_args_list, [route_eventA.route_entry], [route_eventW.route_entry.nlri]) self.assertEqual(2, worker2.enqueue.call_count, "2 routes should be propagated to Worker2") self._check_events_calls(worker2.enqueue.call_args_list, [route_eventA.route_entry], [route_eventW.route_entry.nlri]) self.assertEqual(0, worker3.enqueue.call_count, "No route should be propagated to Worker3") self.assertEqual(0, bgp_peer_worker1.enqueue.call_count, "Route should not be propagated to its source") self.assertEqual(0, bgp_peer_worker2.enqueue.call_count, "Route should not be propagated between BGP workers") def test_c5_route_update_by_bgp_peer_with_withdraw_propagation(self): # Worker1 subscribes to RT1 worker1 = self._new_worker("worker.Worker-1", worker.Worker) self._worker_subscriptions(worker1, [t.RT1]) # Worker2 subscribes to RT2 worker2 = self._new_worker("worker.Worker-2", worker.Worker) self._worker_subscriptions(worker2, [t.RT2]) # Worker3 subscribes to RT3 worker3 = self._new_worker("worker.Worker-3", worker.Worker) self._worker_subscriptions(worker3, [t.RT3]) # BGPPeerWorker1 advertises a route for RT1, RT2 and RT3 bgp_peer_worker1 = self._new_worker("BGPWorker1", bpw.BGPPeerWorker) evt1 = self._new_route_event(engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2, t.RT3], bgp_peer_worker1, t.NH1) # BGPPeerWorker1 advertises the same nlri with attributes NH and RTs # modification evt2 = self._new_route_event(engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2], bgp_peer_worker1, t.NH2) # check route event propagation # TO DO : check route_event.replaced_route self.assertEqual(0, bgp_peer_worker1.enqueue.call_count, "Route should not be propagated to its source") self.assertEqual(2, worker1.enqueue.call_count, "2 routes should be advertised to Worker1") self._check_events_calls(worker1.enqueue.call_args_list, [evt1.route_entry, evt2.route_entry], []) self.assertEqual(2, worker2.enqueue.call_count, "2 routes should be advertised to Worker2") self._check_events_calls(worker2.enqueue.call_args_list, [evt1.route_entry, evt2.route_entry], []) self.assertEqual(2, worker3.enqueue.call_count, "2 routes should be advertised/withdrawn to Worker3") self._check_events_calls(worker3.enqueue.call_args_list, [evt1.route_entry], [evt1.route_entry.nlri]) def test_c6_route_readvertised(self): # Worker1 subscribes to RT1 worker1 = self._new_worker("worker.Worker-1", worker.Worker) self._worker_subscriptions(worker1, [t.RT1, t.RT2, t.RT3]) # BGPPeerWorker1 advertises a route for RT1, RT2 and RT3 bgp_peer_worker1 = self._new_worker("BGPWorker1", bpw.BGPPeerWorker) evt1 = self._new_route_event(engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2], bgp_peer_worker1, t.NH1) # BGPPeerWorker1 advertises the same nlri with same attributes and RTs self._new_route_event(engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2], bgp_peer_worker1, t.NH1) # check route event propagation self.assertEqual(0, bgp_peer_worker1.enqueue.call_count, "Route should not be propagated to its source") self.assertEqual(1, worker1.enqueue.call_count, "only 1 route should be advertised to Worker1") self._check_events_calls(worker1.enqueue.call_args_list, [evt1.route_entry], []) def test_c7_route_withdraw_not_registered(self): # Worker1 subscribes to RT1 worker1 = self._new_worker("worker.Worker-1", worker.Worker) self._worker_subscriptions(worker1, [t.RT1, t.RT2]) # BGPPeerWorker1 advertises a route for RT1 and RT2 bgp_peer_worker1 = self._new_worker("BGPWorker1", bpw.BGPPeerWorker) evt1 = self._new_route_event(engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2], bgp_peer_worker1, t.NH1) # BGPPeerWorker1 withdraw a not registered route (without RT self._new_route_event(engine.RouteEvent.WITHDRAW, t.NLRI2, [], bgp_peer_worker1, t.NH1) # Waiting for RouteTableManager thread finishes to process route_event self._wait() # check route_event propagation self.assertEqual(1, worker1.enqueue.call_count, "1 route1 should be propagated to Worker1") self._check_events_calls(worker1.enqueue.call_args_list, [evt1.route_entry], []) self.assertEqual(0, bgp_peer_worker1.enqueue.call_count, "Route should not be propagated back to its source") def test_d1_worker_cleanup(self): # Worker1 subscribes to RT1 worker1 = self._new_worker("worker.Worker-1", worker.Worker) self._worker_subscriptions(worker1, [t.RT1]) # Worker2 subscribes to RT2 worker2 = self._new_worker("worker.Worker-2", worker.Worker) self._worker_subscriptions(worker2, [t.RT2]) # BGPPeerWorker1 subscribes to RT1 and RT2 bgp_peer_worker1 = self._new_worker("BGPWorker1", bpw.BGPPeerWorker) self._worker_subscriptions(bgp_peer_worker1, [t.RT1, t.RT2]) # BGPPeerWorker1 advertises a route for RT1 and RT2 evt1 = self._new_route_event(engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2], bgp_peer_worker1, t.NH1) # BGPPeerWorker1 advertises an other route for RT2 evt2 = self._new_route_event(engine.RouteEvent.ADVERTISE, t.NLRI2, [t.RT2], bgp_peer_worker1, t.NH1) # Cleanup Worker1 self.rtm.enqueue(engine.WorkerCleanupEvent(bgp_peer_worker1)) # Waiting for RouteTableManager thread finishes to process the # subscriptions self._wait() self.assertEqual( 0, self.rtm.last_local_subscriber_callback.call_count, "last_local_subscriber_callback should not have been called " " (non local worker)") # check unsubscriptions self._check_unsubscriptions(bgp_peer_worker1, [MATCH1, MATCH2]) # Check route synthesize to Worker1 and Worker2 self.assertEqual(2, worker1.enqueue.call_count, "2 routes should be advert/withdraw to Worker1") self._check_events_calls(worker1.enqueue.call_args_list, [evt1.route_entry], [evt1.route_entry.nlri]) self.assertEqual(4, worker2.enqueue.call_count, "4 routes should be advert/withdraw to Worker2") self._check_events_calls(worker2.enqueue.call_args_list, [evt1.route_entry, evt2.route_entry], [evt1.route_entry.nlri, evt2.route_entry.nlri]) # Check route entries have been removed for BGPPeerWorker1 self.assertNotIn(evt1.route_entry, bgp_peer_worker1._rtm_route_entries, "Route entry found") self.assertNotIn(evt2.route_entry, bgp_peer_worker1._rtm_route_entries, "Route entry found") # Cleanup Worker1 self.rtm.enqueue(engine.WorkerCleanupEvent(worker1)) self._wait() # check that last local subscriber callback for RT1 is called self.assertEqual(1, self.rtm.last_local_subscriber_callback.call_count) def test_e1_dump_state(self): # BGPPeerWorker1 advertises a route for RT1 and RT2 bgp_peer_worker1 = self._new_worker("BGPWorker1", bpw.BGPPeerWorker) self._new_route_event(engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2], bgp_peer_worker1, t.NH1) # BGPPeerWorker1 advertises an other route for RT2 self._new_route_event(engine.RouteEvent.ADVERTISE, t.NLRI2, [t.RT2], bgp_peer_worker1, t.NH1) # BGPPeerWorker1 subscribes to RT1 self._worker_subscriptions(bgp_peer_worker1, [t.RT1]) # Worker1 subscribes to RT1 and RT2 worker1 = self._new_worker("worker.Worker-1", worker.Worker) self._worker_subscriptions(worker1, [t.RT1, t.RT2]) # Worker2 subscribes to RT1 worker2 = self._new_worker("worker.Worker-2", worker.Worker) self._worker_subscriptions(worker2, [t.RT1]) # Worker3 subscribes to RT3 worker3 = self._new_worker("worker.Worker-3", worker.Worker) self._worker_subscriptions(worker3, [t.RT3]) self.rtm._dump_state() def test_7_matches(self): m1a = rtm.Match(exa.AFI(exa.AFI.ipv4), exa.SAFI(exa.SAFI.mpls_vpn), exa.RouteTarget(64512, 1)) m1b = rtm.Match(exa.AFI(exa.AFI.ipv4), exa.SAFI(exa.SAFI.mpls_vpn), exa.RouteTarget(64512, 1)) m1c = rtm.Match(exa.AFI(exa.AFI.ipv4), exa.SAFI(exa.SAFI.mpls_vpn), exa.RouteTarget(64512, 1, False)) m2 = rtm.Match(exa.AFI(exa.AFI.ipv4), exa.SAFI(exa.SAFI.mpls_vpn), exa.RouteTarget(64512, 2)) m3 = rtm.Match(exa.AFI(exa.AFI.ipv4), exa.SAFI(exa.SAFI.mpls_vpn), exa.RouteTarget(64513, 1)) self.assertEqual(hash(m1a), hash(m1b)) self.assertEqual(hash(m1a), hash(m1c)) self.assertNotEqual(hash(m1a), hash(m2)) self.assertNotEqual(hash(m1a), hash(m3)) self.assertEqual(m1a, m1b) self.assertEqual(m1a, m1c) self.assertNotEqual(m1a, m2) self.assertNotEqual(m1a, m3) def test_f1_test_empty_rt(self): # worker advertises a route with no RT w1 = self._new_worker("Worker1", worker.Worker) subscribe = engine.Subscription(exa.AFI(exa.AFI.ipv4), exa.SAFI(exa.SAFI.mpls_vpn), None, w1) self.rtm.enqueue(subscribe) w2 = self._new_worker("Worker2", worker.Worker) route_event = engine.RouteEvent( engine.RouteEvent.ADVERTISE, engine.RouteEntry(t.NLRI1, None, exa.Attributes()), w2) self.rtm.enqueue(route_event) self._wait() self.assertEqual(1, w1.enqueue.call_count, "1 route advertised should be synthesized to Worker1") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/unit/bagpipe_bgp/test_tracker_worker.py0000664000175000017500000011456000000000000032376 0ustar00zuulzuul00000000000000# Copyright 2014 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ .. module:: test_tracker_worker :synopsis: a module that defines several test cases for the tracker_worker module. In particular, unit tests for TrackerWorker class. Setup: Run TrackerWorker instance. TearDown: Stop TrackerWorker instance. TrackerWorker is in charge to receive RouteEvent from RouteTableManager. A RouteEvent contains an event type ADVERTIZE or WITHDRAW, and a RouteEntry. TrackerWorker should call new_best_route and/or best_route_removed if the new RouteEntry changes the current list of the known best routes. The current list of the known best routes, which can be modified by the new RouteEntry, is selected thanks to the tracked_entry associated to the new RouteEntry. The tracked_entry is obtained thanks to _route2TrackedEntry. _compare_routes is used to compare 2 RouteEntry. Unit tests are organized as follow: TestA: basic tests, advertise several routes with different NLRI and same or different sources TestB: same routes (with _compare_routes) announced by different sources TestC: different routes (with _compare_routes) announced by different sources, TrackerWorker selects the best route. TestD: ECMP routes or same routes (with _compare_routes), same source, same attributes except NextHop TestE: different routes (with compare_routes announced by the same source with replaced_route not none """ import copy import threading from unittest import mock import testtools from networking_bagpipe.bagpipe_bgp import engine from networking_bagpipe.bagpipe_bgp.engine import exa from networking_bagpipe.bagpipe_bgp.engine import tracker_worker from networking_bagpipe.bagpipe_bgp.engine import worker from networking_bagpipe.tests.unit.bagpipe_bgp import base as t def _test_compare_routes(self, route_a, route_b): if (route_a.nlri != route_b.nlri or route_a.afi != route_b.afi or route_a.safi != route_b.safi): raise Exception('Bug: compare_routes called with routes having ' 'different nlri/afi/safi') else: if (route_a.attributes.sameValuesAs(route_b.attributes)): return 0 else: lp_a = route_a.attributes[exa.Attribute.CODE.LOCAL_PREF].localpref nh_a = route_a.attributes[exa.Attribute.CODE.NEXT_HOP].top() lp_b = route_b.attributes[exa.Attribute.CODE.LOCAL_PREF].localpref nh_b = route_b.attributes[exa.Attribute.CODE.NEXT_HOP].top() if nh_a != nh_b and lp_a == lp_b: # ECMP routes return 0 else: return (lp_a > lp_b) - (lp_b > lp_a) class TrackerWorkerThread(tracker_worker.TrackerWorker, threading.Thread): def __init__(self): threading.Thread.__init__(self, name='TrackerWorkerThread') self.setDaemon(True) tracker_worker.TrackerWorker.__init__( self, mock.Mock(), 'TrackerWorker', _test_compare_routes) def stop(self): self._please_stop.set() self._queue.put(worker.STOP_EVENT) self._stopped() def route_to_tracked_entry(self, route): return route.nlri # the definitions below are needed because TrackerWorker is an abstract # class def new_best_route(self, entry, route): pass def best_route_removed(self, entry, route, last): pass class TestTrackerWorker(testtools.TestCase, t.BaseTestBagPipeBGP): def setUp(self): super().setUp() self.tracker_worker = TrackerWorkerThread() self.tracker_worker.start() self.set_event_target_worker(self.tracker_worker) self._calls = [] def tearDown(self): super().tearDown() self.tracker_worker.stop() self.tracker_worker.join() def _check_calls(self, call_args_list, expected_list, ordered=True): # use to check the calls to new_best_route and best_route_removed # against a list of expected calls expected_list_copy = [] # clear source field in the routes in expected calls # because the new_best_route and best_route_removed do not receive # routes with this field set for expected in expected_list: route = copy.copy(expected[1]) route.source = None self.assertIn(len(expected), (2, 3)) if len(expected) == 2: expected_list_copy.append((expected[0], route)) elif len(expected) == 3: expected_list_copy.append((expected[0], route, expected[2])) if not ordered: expected_list_copy = sorted(expected_list_copy, key=repr) call_args_list = sorted(call_args_list, key=lambda x: repr(x[0])) for ((call_args, _), expected) in zip(call_args_list, expected_list_copy): self.assertEqual(expected[0], call_args[0], 'Bad prefix') observed_route_entry = call_args[1] expected_route_entry = expected[1] self.assertEqual(expected_route_entry, observed_route_entry) if len(expected) >= 3: self.assertEqual(expected[2], call_args[2], "wrong 'last' flag") def _call_list(self, method): def side_effect(*args, **kwargs): self._append_call(method) return side_effect def test_a1_different_nlri_same_source(self): # A source A advertises and withdraws routes for different NLRI. # Mock objects self.tracker_worker.new_best_route = mock.Mock() self.tracker_worker.best_route_removed = mock.Mock() # Only 1 source A worker_a = worker.Worker(mock.Mock(), 'worker.Worker-A') # Source A advertises a route for NLRI1 route_nlri1a = self._new_route_event( engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2], worker_a, t.NH1, 100) # Source A advertises a route for NLRI2 route_nlri2a = self._new_route_event( engine.RouteEvent.ADVERTISE, t.NLRI2, [t.RT1, t.RT2], worker_a, t.NH1, 100) # Source A withdraws the route for NLRI1 self._new_route_event( engine.RouteEvent.WITHDRAW, t.NLRI1, [t.RT1, t.RT2], worker_a, t.NH1, 100) # Source A withdraws the route for NLRI2 self._new_route_event( engine.RouteEvent.WITHDRAW, t.NLRI2, [t.RT1, t.RT2], worker_a, t.NH1, 100) # Check calls and arguments list to new_best_route and # best_route_removed self.assertEqual(2, self.tracker_worker.new_best_route.call_count, '2 new best routes: 1 for NLRI1 and 1 for NLRI2') self._check_calls(self.tracker_worker.new_best_route.call_args_list, [(t.NLRI1, route_nlri1a.route_entry), (t.NLRI2, route_nlri2a.route_entry)]) self.assertEqual(2, self.tracker_worker.best_route_removed.call_count, '2 old routes removed: 1 for NLRI1 and 1 for NLRI2') self._check_calls( self.tracker_worker.best_route_removed.call_args_list, [(t.NLRI1, route_nlri1a.route_entry, True), (t.NLRI2, route_nlri2a.route_entry, True)]) def test_a2_different_nlri_different_source(self): # 2 sources A and B advertise and withdraw routes for different NLRI. # Mock objects self.tracker_worker.new_best_route = mock.Mock() self.tracker_worker.best_route_removed = mock.Mock() # 2 sources: A and B worker_a = worker.Worker(mock.Mock(), 'worker.Worker-A') worker_b = worker.Worker(mock.Mock(), 'worker.Worker-B') # Source A advertises a route for NLRI1 route_nlri1a = self._new_route_event( engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2], worker_a, t.NH1, 100) # Source B advertises a route for NLRI2 route_nlri2B = self._new_route_event( engine.RouteEvent.ADVERTISE, t.NLRI2, [t.RT1, t.RT2], worker_b, t.NH1, 100) # Source A withdraws the route for NLRI1 self._new_route_event( engine.RouteEvent.WITHDRAW, t.NLRI1, [t.RT1, t.RT2], worker_a, t.NH1, 100) # Source B withdraws the route for NLRI2 self._new_route_event( engine.RouteEvent.WITHDRAW, t.NLRI2, [t.RT1, t.RT2], worker_b, t.NH1, 100) # Check calls and arguments list to new_best_route and # best_route_removed self.assertEqual(2, self.tracker_worker.new_best_route.call_count, '2 new_best_route calls: 1 for NLRI1 and 1 for NLRI2') self._check_calls(self.tracker_worker.new_best_route.call_args_list, [(t.NLRI1, route_nlri1a.route_entry), (t.NLRI2, route_nlri2B.route_entry)]) self.assertEqual(2, self.tracker_worker.best_route_removed.call_count, '2 best_route_removed calls: 1 for NLRI1 and 1 for ' 'NLRI2') self._check_calls( self.tracker_worker.best_route_removed.call_args_list, [(t.NLRI1, route_nlri1a.route_entry, True), (t.NLRI2, route_nlri2B.route_entry, True)]) def test_a3_same_nlri_same_source(self): # A source A advertises the same route for the same NLRI # Mock objects self.tracker_worker.new_best_route = mock.Mock() self.tracker_worker.best_route_removed = mock.Mock() # 1 source: A worker_a = worker.Worker(mock.Mock(), 'worker.Worker-A') # Source A advertises a route for NLRI1 route_nlri1a = self._new_route_event( engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2], worker_a, t.NH1, 100) # Source A advertises the same route for NLRI1 self._new_route_event( engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2], worker_a, t.NH1, 100) # Check calls and arguments list to new_best_route and # best_route_removed self.assertEqual(1, self.tracker_worker.new_best_route.call_count, 'expected 1 new_best_route call for NLRI1') self._check_calls(self.tracker_worker.new_best_route.call_args_list, [(t.NLRI1, route_nlri1a.route_entry), (t.NLRI1, route_nlri1a.route_entry)]) def test_a4_withdraw_nlri_not_known(self): # A source A withdraws a route that does not exist. self.tracker_worker.new_best_route = mock.Mock() self.tracker_worker.best_route_removed = mock.Mock() # 1 source: A worker_a = worker.Worker(mock.Mock(), 'worker.Worker-A') # Source A withdraws a route for NLRI1 which is not known by # tracker_worker self._new_route_event( engine.RouteEvent.WITHDRAW, t.NLRI1, [t.RT1, t.RT2], worker_a, t.NH1, 100) # Check calls to new_best_route and best_route_removed self.assertEqual(0, self.tracker_worker.new_best_route.call_count, 'new_best_route should not have been called') self.assertEqual(0, self.tracker_worker.best_route_removed.call_count, 'best_route_removed should not have been called') def test_b1_is_the_current_best_route(self): # The route which is advertised by another source is the current best # route self.tracker_worker.new_best_route = mock.Mock( side_effect=self._call_list(t.NBR)) self.tracker_worker.best_route_removed = mock.Mock( side_effect=self._call_list(t.BRR)) # 2 sources: A and B worker_a = worker.Worker(mock.Mock(), 'worker.Worker-A') worker_b = worker.Worker(mock.Mock(), 'worker.Worker-B') # Source A advertises a route for NLRI1 self._append_call("RE1") route_nlri1a = self._new_route_event( engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2], worker_a, t.NH1, 100) # Source B advertises the same route for NLRI1 self._append_call("RE2") route_nlri1B = self._new_route_event( engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2], worker_b, t.NH1, 100) # Source A withdraws the route for NLRI1 self._append_call("RE3") self._new_route_event( engine.RouteEvent.WITHDRAW, t.NLRI1, [t.RT1, t.RT2], worker_a, t.NH1, 100) # Source B withdraws the route for NLRI1 self._append_call("RE4") self._new_route_event( engine.RouteEvent.WITHDRAW, t.NLRI1, [t.RT1, t.RT2], worker_b, t.NH1, 100) # Check calls and arguments list to new_best_route and # best_route_removed self.assertEqual( 1, self.tracker_worker.new_best_route.call_count, '1 new best route call for NLRI1') self._check_calls( self.tracker_worker.new_best_route.call_args_list, [(t.NLRI1, route_nlri1a.route_entry)]) self.assertEqual( 1, self.tracker_worker.best_route_removed.call_count, '1 best_route_removed call for NLRI1') self._check_calls( self.tracker_worker.best_route_removed.call_args_list, [(t.NLRI1, route_nlri1B.route_entry, True)]) expected_calls = ["RE1", t.NBR, "RE2", "RE3", "RE4", t.BRR] self.assertEqual(expected_calls, self._calls, 'Wrong call sequence') def test_b2_is_not_the_current_best_route(self): # The route which is advertised by an other source is not the current # best route but will become the best route self.tracker_worker.new_best_route = mock.Mock( side_effect=self._call_list(t.NBR)) self.tracker_worker.best_route_removed = mock.Mock( side_effect=self._call_list(t.BRR)) # 3 sources: A, B and C worker_a = worker.Worker(mock.Mock(), 'worker.Worker-A') worker_b = worker.Worker(mock.Mock(), 'worker.Worker-B') worker_c = worker.Worker(mock.Mock(), 'worker.Worker-C') # Source A advertises route1 for NLRI1 self._append_call("RE1") route1Nlri1 = self._new_route_event( engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2], worker_a, t.NH1, 300) # Source B advertises route2 for NLRI1 : route1 is better than route2 self._append_call("RE2") route2Nlri1 = self._new_route_event( engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2], worker_b, t.NH1, 200) # Source C advertises also route2 self._append_call("RE3") self._new_route_event( engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2], worker_c, t.NH1, 200) # Source A withdraws route1 self._append_call("RE4") self._new_route_event( engine.RouteEvent.WITHDRAW, t.NLRI1, [t.RT1, t.RT2], worker_a, t.NH1, 300) # Check calls and arguments list to new_best_route and # best_route_removed expected_calls = ["RE1", t.NBR, "RE2", "RE3", "RE4", t.NBR, t.BRR] self.assertEqual(expected_calls, self._calls, 'Wrong call sequence') self.assertEqual( 2, self.tracker_worker.new_best_route.call_count, '2 new best route call for NLRI1') self._check_calls(self.tracker_worker.new_best_route.call_args_list, [(t.NLRI1, route1Nlri1.route_entry), (t.NLRI1, route2Nlri1.route_entry)]) self.assertEqual( 1, self.tracker_worker.best_route_removed.call_count, '1 best_route_removed call for NLRI1') self._check_calls( self.tracker_worker.best_route_removed.call_args_list, [(t.NLRI1, route1Nlri1.route_entry, False)]) def test_c1_route1_best_route(self): # Route1 is the best route # Mock objects self.tracker_worker.new_best_route = mock.Mock( side_effect=self._call_list(t.NBR)) self.tracker_worker.best_route_removed = mock.Mock( side_effect=self._call_list(t.BRR)) # 2 sources : A and B worker_a = worker.Worker(mock.Mock(), 'worker.Worker-A') worker_b = worker.Worker(mock.Mock(), 'worker.Worker-B') # Source A advertises a route1 for NLRI1 self._append_call("RE1") route1_nlri1a = self._new_route_event( engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2], worker_a, t.NH1, 300) # Source B advertises a route2 for NLRI1 with different attributes. # Route1 is better than Route2 self._append_call("RE2") route2_nlri1b = self._new_route_event( engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2], worker_b, t.NH1, 200) # Source A withdraws route1 for NLRI1 self._append_call("RE3") self._new_route_event( engine.RouteEvent.WITHDRAW, t.NLRI1, [t.RT1, t.RT2], worker_a, t.NH1, 300) # Source B withdraws route2 for NLRI1 self._append_call("RE4") self._new_route_event( engine.RouteEvent.WITHDRAW, t.NLRI1, [t.RT1, t.RT2], worker_b, t.NH1, 200) # Check calls and arguments list to new_best_route and # best_route_removed expected_calls = ["RE1", t.NBR, "RE2", "RE3", t.NBR, t.BRR, "RE4", t.BRR] self.assertEqual(expected_calls, self._calls, 'Wrong call sequence') self.assertEqual( 2, self.tracker_worker.new_best_route.call_count, '2 new new_best_route calls for NLRI1') self._check_calls(self.tracker_worker.new_best_route.call_args_list, [(t.NLRI1, route1_nlri1a.route_entry), (t.NLRI1, route2_nlri1b.route_entry)]) self.assertEqual( 2, self.tracker_worker.best_route_removed.call_count, '2 best_route_removed calls for NLRI1') self._check_calls( self.tracker_worker.best_route_removed.call_args_list, [(t.NLRI1, route1_nlri1a.route_entry, False), (t.NLRI1, route2_nlri1b.route_entry, True)]) def test_c2_route2_best_route(self): # Route2 is the best route # Mock objects self.tracker_worker.new_best_route = mock.Mock( side_effect=self._call_list(t.NBR)) self.tracker_worker.best_route_removed = mock.Mock( side_effect=self._call_list(t.BRR)) # 2 sources: A and B worker_a = worker.Worker(mock.Mock(), 'worker.Worker-A') worker_b = worker.Worker(mock.Mock(), 'worker.Worker-B') # Source A advertises a route1 for NLRI1 self._append_call("RE1") route1_nlri1a = self._new_route_event( engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2], worker_a, t.NH1, 100) # Source B advertises a route2 for NLRI1. Route2 is better than Route1 self._append_call("RE2") route2_nlri1b = self._new_route_event( engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2], worker_b, t.NH1, 200) # Source A withdraws route1 for NLRI1 self._append_call("RE3") self._new_route_event( engine.RouteEvent.WITHDRAW, t.NLRI1, [t.RT1, t.RT2], worker_a, t.NH1, 100) # Check calls and arguments list to new_best_route and # best_route_removed expected_calls = ["RE1", t.NBR, "RE2", t.NBR, t.BRR, "RE3"] self.assertEqual(expected_calls, self._calls, 'Wrong call sequence') self.assertEqual( 2, self.tracker_worker.new_best_route.call_count, '2 new new_best_route calls for NLRI1') self._check_calls(self.tracker_worker.new_best_route.call_args_list, [(t.NLRI1, route1_nlri1a.route_entry), (t.NLRI1, route2_nlri1b.route_entry)]) self.assertEqual( 1, self.tracker_worker.best_route_removed.call_count, '1 best_route_removed call for NLRI1') self._check_calls( self.tracker_worker.best_route_removed.call_args_list, [(t.NLRI1, route1_nlri1a.route_entry, False)]) def test_c3_select_new_best_route_among_several(self): # When current best route is withdrawn, the new best route should be # selected among several routes self.tracker_worker.new_best_route = mock.Mock( side_effect=self._call_list(t.NBR)) self.tracker_worker.best_route_removed = mock.Mock( side_effect=self._call_list(t.BRR)) # 3 sources: A, B and C worker_a = worker.Worker(mock.Mock(), 'worker.Worker-A') worker_b = worker.Worker(mock.Mock(), 'worker.Worker-B') worker_c = worker.Worker(mock.Mock(), 'worker.Worker-C') # Source A advertises a route1 for NLRI1 self._append_call("RE1") route1_nlri1a = self._new_route_event( engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2], worker_a, t.NH1, 300) # Source B advertises a route2 for NLRI1. Route1 is better than Route2 self._append_call("RE2") route2_nlri1b = self._new_route_event( engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2], worker_b, t.NH1, 200) # Source C advertises a route3 for NLRI1. Route2 is better than Route3 self._append_call("RE3") route3_nlri1c = self._new_route_event( engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2], worker_c, t.NH1, 100) # Source A withdraws route1 for NLRI1 self._append_call("RE4") self._new_route_event( engine.RouteEvent.WITHDRAW, t.NLRI1, [t.RT1, t.RT2], worker_a, t.NH1, 300) # Source B withdraws route2 for NLRI1 self._append_call("RE5") self._new_route_event( engine.RouteEvent.WITHDRAW, t.NLRI1, [t.RT1, t.RT2], worker_b, t.NH1, 200) # Source C withdraws route3 for NLRI1 self._append_call("RE6") self._new_route_event( engine.RouteEvent.WITHDRAW, t.NLRI1, [t.RT1, t.RT2], worker_c, t.NH1, 100) # Check calls and arguments list to new_best_route and # best_route_removed expected_calls = ["RE1", t.NBR, "RE2", "RE3", "RE4", t.NBR, t.BRR, "RE5", t.NBR, t.BRR, "RE6", t.BRR] self.assertEqual(expected_calls, self._calls, 'Wrong call sequence') self.assertEqual( 3, self.tracker_worker.new_best_route.call_count, '3 new new_best_route calls for NLRI1') self._check_calls(self.tracker_worker.new_best_route.call_args_list, [(t.NLRI1, route1_nlri1a.route_entry), (t.NLRI1, route2_nlri1b.route_entry), (t.NLRI1, route3_nlri1c.route_entry)]) self.assertEqual( 3, self.tracker_worker.best_route_removed.call_count, '3 best_route_removed calls for NLRI1') self._check_calls( self.tracker_worker.best_route_removed.call_args_list, [(t.NLRI1, route1_nlri1a.route_entry, False), (t.NLRI1, route2_nlri1b.route_entry, False), (t.NLRI1, route3_nlri1c.route_entry, True)]) def test_d1_ecmp_routes(self): # ECMP routes are routes advertised by the same worker with the same # LP and different NH self.tracker_worker.new_best_route = mock.Mock( side_effect=self._call_list(t.NBR)) self.tracker_worker.best_route_removed = mock.Mock( side_effect=self._call_list(t.BRR)) # 1 source: A worker_a = worker.Worker(mock.Mock(), 'worker.Worker-A') # Source A advertises a route1 for NLRI1 self._append_call("RE1") route1_nlri1a = self._new_route_event( engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2], worker_a, t.NH1, 100) # Source A advertises a route2 for NLRI1. route2 is equal to route1 # with compare_routes, but the next_hop are different self._append_call("RE2") route2_nlri1a = self._new_route_event( engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2], worker_a, t.NH2, 100) # Source A withdraws route1 for NLRI1 self._append_call("RE3") self._new_route_event( engine.RouteEvent.WITHDRAW, t.NLRI1, [t.RT1, t.RT2], worker_a, t.NH1, 100) # Source A withdraws route2 for NLRI1 self._append_call("RE4") self._new_route_event( engine.RouteEvent.WITHDRAW, t.NLRI1, [t.RT1, t.RT2], worker_a, t.NH2, 100) # Check calls and arguments list to new_best_route and # best_route_removed expected_calls = ["RE1", t.NBR, "RE2", t.NBR, "RE3", t.BRR, "RE4", t.BRR] self.assertEqual(expected_calls, self._calls, 'Wrong call sequence') self.assertEqual( 2, self.tracker_worker.new_best_route.call_count, '2 new new_best_route calls for NLRI1') self._check_calls(self.tracker_worker.new_best_route.call_args_list, [(t.NLRI1, route1_nlri1a.route_entry), (t.NLRI1, route2_nlri1a.route_entry)]) self.assertEqual( 2, self.tracker_worker.best_route_removed.call_count, '2 best_route_removed calls for NLRI1') self._check_calls( self.tracker_worker.best_route_removed.call_args_list, [(t.NLRI1, route1_nlri1a.route_entry, False), (t.NLRI1, route2_nlri1a.route_entry, True)]) def test_e1_replace_br_is_nbr(self): # Advertise a route that replaces the best route and becomes the new # best route self.tracker_worker.new_best_route = mock.Mock( side_effect=self._call_list(t.NBR)) self.tracker_worker.best_route_removed = mock.Mock( side_effect=self._call_list(t.BRR)) # 1 source: A worker_a = worker.Worker(mock.Mock(), 'worker.Worker-A') # Source A advertises a route1 for NLRI1 self._append_call("RE1") route1_nlri1a = self._new_route_event( engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2], worker_a, t.NH1, 200) # Source A advertises a route2 for NLRI1. Route1 is better than Route2 # BUT Route2 replaces Route1 self._append_call("RE2") route2_nrli1a = self._new_route_event( engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2], worker_a, t.NH1, 100, route1_nlri1a.route_entry) # Check calls and arguments list to new_best_route and # best_route_removed expected_calls = ["RE1", t.NBR, "RE2", t.NBR, t.BRR] self.assertEqual(expected_calls, self._calls, 'Wrong call sequence') self.assertEqual( 2, self.tracker_worker.new_best_route.call_count, '2 new new_best_route calls for NLRI1') self._check_calls(self.tracker_worker.new_best_route.call_args_list, [(t.NLRI1, route1_nlri1a.route_entry), (t.NLRI1, route2_nrli1a.route_entry)]) self.assertEqual( 1, self.tracker_worker.best_route_removed.call_count, '1 best_route_removed call for NLRI1') self._check_calls( self.tracker_worker.best_route_removed.call_args_list, [(t.NLRI1, route1_nlri1a.route_entry, False)]) def test_e2_replace_br_is_not_nbr(self): # Advertise a route that replaces the best route but does not become # the new best route self.tracker_worker.new_best_route = mock.Mock( side_effect=self._call_list(t.NBR)) self.tracker_worker.best_route_removed = mock.Mock( side_effect=self._call_list(t.BRR)) # 2 sources : A and B worker_a = worker.Worker(mock.Mock(), 'worker.Worker-A') worker_b = worker.Worker(mock.Mock(), 'worker.Worker-B') # Source A advertises a route1 for NLRI1 self._append_call("RE1") route1_nlri1a = self._new_route_event( engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2], worker_a, t.NH1, 300) # Source B advertises a route2. Route1 is better than Route2 self._append_call("RE2") route2_nrli1b = self._new_route_event( engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2], worker_b, t.NH1, 200) # Source A advertises a route3 for NLRI1. Route3 replaces Route1. # Route2 is better than route3. self._append_call("RE3") route3_nrli1a = self._new_route_event( engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2], worker_a, t.NH1, 100, route1_nlri1a.route_entry) # Source B withdraws route2 for NLRI1 self._append_call("RE4") self._new_route_event( engine.RouteEvent.WITHDRAW, t.NLRI1, [t.RT1, t.RT2], worker_b, t.NH1, 200) # Check calls and arguments list to new_best_route and # best_route_removed expected_calls = ["RE1", t.NBR, "RE2", "RE3", t.NBR, t.BRR, "RE4", t.NBR, t.BRR] self.assertEqual(expected_calls, self._calls, 'Wrong call sequence') self.assertEqual( 3, self.tracker_worker.new_best_route.call_count, '3 new new_best_route calls for NLRI1') self._check_calls(self.tracker_worker.new_best_route.call_args_list, [(t.NLRI1, route1_nlri1a.route_entry), (t.NLRI1, route2_nrli1b.route_entry), (t.NLRI1, route3_nrli1a.route_entry)]) self.assertEqual( 2, self.tracker_worker.best_route_removed.call_count, '2 best_route_removed calls for NLRI1') self._check_calls( self.tracker_worker.best_route_removed.call_args_list, [(t.NLRI1, route1_nlri1a.route_entry, False), (t.NLRI1, route2_nrli1b.route_entry, False)]) def test_e3_replace_br_is_not_nbr(self): # Advertise a route that replaces the best route but does not become # the new best route self.tracker_worker.new_best_route = mock.Mock( side_effect=self._call_list(t.NBR)) self.tracker_worker.best_route_removed = mock.Mock( side_effect=self._call_list(t.BRR)) # 3 sources: A, B and C worker_a = worker.Worker(mock.Mock(), 'worker.Worker-A') worker_b = worker.Worker(mock.Mock(), 'worker.Worker-B') worker_c = worker.Worker(mock.Mock(), 'worker.Worker-C') # Source A advertises route1 for NLRI1 self._append_call("RE1") route1_nlri1 = self._new_route_event( engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2], worker_a, t.NH1, 300) # Source B advertises route2 for NLRI1 : route1 is better than route2 self._append_call("RE2") route2_nlri1 = self._new_route_event( engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2], worker_b, t.NH1, 200) # Source C advertises also route2 self._append_call("RE3") self._new_route_event( engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2], worker_c, t.NH1, 200) # Source A advertises route3 which replaces route1 self._append_call("RE4") self._new_route_event(engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2], worker_a, t.NH1, 100, route1_nlri1.route_entry) # Check calls and arguments list to new_best_route and # best_route_removed expected_calls = ["RE1", t.NBR, "RE2", "RE3", "RE4", t.NBR, t.BRR] self.assertEqual(expected_calls, self._calls, 'Wrong call sequence') self.assertEqual( 2, self.tracker_worker.new_best_route.call_count, '2 new best route call for NLRI1') self._check_calls(self.tracker_worker.new_best_route.call_args_list, [(t.NLRI1, route1_nlri1.route_entry), (t.NLRI1, route2_nlri1.route_entry)]) self.assertEqual( 1, self.tracker_worker.best_route_removed.call_count, '1 best_route_removed call for NLRI1') self._check_calls( self.tracker_worker.best_route_removed.call_args_list, [(t.NLRI1, route1_nlri1.route_entry)]) def test_e4_not_replace_br(self): # Advertise a route that does not replaces the best route and becomes # the new best route when the best route is withdrawn self.tracker_worker.new_best_route = mock.Mock( side_effect=self._call_list(t.NBR)) self.tracker_worker.best_route_removed = mock.Mock( side_effect=self._call_list(t.BRR)) # 2 sources : A and B worker_a = worker.Worker(mock.Mock(), 'worker.Worker-A') worker_b = worker.Worker(mock.Mock(), 'worker.Worker-B') # Source A advertises a route1 for NLRI1 self._append_call("RE1") route1_nlri1a = self._new_route_event( engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2], worker_a, t.NH1, 300) # Source B advertises a route2. Route1 is better than Route2 self._append_call("RE2") route2_nlri1b = self._new_route_event( engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2], worker_b, t.NH1, 200) # Source B advertises a route3 for NLRI1. Route3 replaces Route2. # Route1 is better than Route3 self._append_call("RE3") route3_nlri1b = self._new_route_event( engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2], worker_b, t.NH1, 100, route2_nlri1b.route_entry) # Source A withdraws route1 for NLRI1 self._append_call("RE4") self._new_route_event( engine.RouteEvent.WITHDRAW, t.NLRI1, [t.RT1, t.RT2], worker_a, t.NH1, 300) # Check calls and arguments list to new_best_route and # best_route_removed expected_calls = ["RE1", t.NBR, "RE2", "RE3", "RE4", t.NBR, t.BRR] self.assertEqual(expected_calls, self._calls, 'Wrong call sequence') self.assertEqual( 2, self.tracker_worker.new_best_route.call_count, '2 new new_best_route calls for NLRI1') self._check_calls(self.tracker_worker.new_best_route.call_args_list, [(t.NLRI1, route1_nlri1a.route_entry), (t.NLRI1, route3_nlri1b.route_entry)]) self.assertEqual( 1, self.tracker_worker.best_route_removed.call_count, '1 best_route_removed call for NLRI1') self._check_calls( self.tracker_worker.best_route_removed.call_args_list, [(t.NLRI1, route1_nlri1a.route_entry, False)]) def test_e5_replace_br_is_nbr_equal(self): # Same as E3, but the route that replaces our current best compares # equally to the two initially less preferred routes, and becomes best # route with them self.tracker_worker.new_best_route = mock.Mock( side_effect=self._call_list(t.NBR)) self.tracker_worker.best_route_removed = mock.Mock( side_effect=self._call_list(t.BRR)) # 3 sources: A, B and C worker_a = worker.Worker(mock.Mock(), 'worker.Worker-A') worker_b = worker.Worker(mock.Mock(), 'worker.Worker-B') worker_c = worker.Worker(mock.Mock(), 'worker.Worker-C') # Source A advertises route1 for NLRI1 route1 = self._new_route_event( engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2], worker_a, t.NH1, 300) # We will only check events after this first one # to allow for a order-independent test after RE4 del self.tracker_worker.new_best_route.call_args_list[:] # Source B advertises route2 for NLRI1 : route1 is better than route2 self._append_call("RE2") route2 = self._new_route_event( engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2], worker_b, t.NH1, 200) # Source C advertises also route2 self._append_call("RE3") route3 = self._new_route_event( engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2], worker_c, t.NH2, 200) # Source A advertises route3 which replaces route1 self._append_call("RE4") route4 = self._new_route_event(engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2], worker_a, t.NH3, 200, route1.route_entry) # Check calls and arguments list to new_best_route and # best_route_removed expected_calls = [t.NBR, "RE2", "RE3", "RE4", t.NBR, t.NBR, t.NBR, t.BRR] self.assertEqual(expected_calls, self._calls, 'Wrong call sequence') self._check_calls(self.tracker_worker.new_best_route.call_args_list, [(t.NLRI1, route2.route_entry), (t.NLRI1, route3.route_entry), (t.NLRI1, route4.route_entry)], False) self._check_calls( self.tracker_worker.best_route_removed.call_args_list, [(t.NLRI1, route1.route_entry, False)]) def test_max_lp_is_best(self): worker_a = worker.Worker(mock.Mock(), 'worker') route_lp55 = self._new_route_event( engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2], worker_a, t.NH1, 55).route_entry route_lp45 = self._new_route_event( engine.RouteEvent.ADVERTISE, t.NLRI1, [t.RT1, t.RT2], worker_a, t.NH1, 45).route_entry self.assertGreater( tracker_worker.compare_ecmp(mock.Mock(), route_lp55, route_lp45), 0) self.assertGreater( tracker_worker.compare_no_ecmp(mock.Mock(), route_lp55, route_lp45), 0) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/unit/bagpipe_bgp/test_vpn_instance.py0000664000175000017500000022076200000000000032043 0ustar00zuulzuul00000000000000# Copyright 2014 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ .. module:: test_vpn_instance :synopsis: module that defines several test cases for the vpn_instance module. TestInitVPNInstance defines a class dedicated to test init a VPNInstance with optional vni. TestVPNInstance class is dedicated to unit tests for VPNInstance class. Setup : Start VPNInstance thread instance. TearDown : Stop VPNInstance thread instance. VPNInstance is a base class for objects used to manage an E-VPN instance (EVI) or IP-VPN instance (VRF) Tests are organized as follow : - testAx use cases to test endpoints plug with different combinations of MAC and IP addresses on a port - testBx use cases to test endpoints plug with different combinations of MAC and IP addresses on different ports - testCx use cases to test endpoints unplug with different combinations of MAC and IP addresses as the one plugged on a port - testDx use cases to test endpoints unplug with different combinations of MAC and IP addresses as the ones plugged on different ports """ import testtools from unittest import mock from networking_bagpipe.bagpipe_bgp.common import exceptions as exc from networking_bagpipe.bagpipe_bgp import engine from networking_bagpipe.bagpipe_bgp.engine import exa from networking_bagpipe.bagpipe_bgp.engine import flowspec from networking_bagpipe.bagpipe_bgp.engine import ipvpn as ipvpn_routes from networking_bagpipe.bagpipe_bgp.engine import worker from networking_bagpipe.bagpipe_bgp.vpn import dataplane_drivers from networking_bagpipe.bagpipe_bgp.vpn import identifier_allocators from networking_bagpipe.bagpipe_bgp.vpn import ipvpn from networking_bagpipe.bagpipe_bgp.vpn import vpn_instance from networking_bagpipe.tests.unit.bagpipe_bgp import base as t MAC1 = "00:00:de:ad:be:ef" IP1 = "10.0.0.1/32" LOCAL_PORT1 = {'linuxif': 'tap1'} MAC2 = "00:00:fe:ed:fa:ce" IP2 = "10.0.0.2/32" LOCAL_PORT2 = {'linuxif': 'tap2'} MAC3 = "00:00:de:ad:c0:de" IP3 = "10.0.0.3/32" LOCAL_PORT3 = {'linuxif': 'tap3'} MAC4 = "00:00:fe:ed:f0:0d" IP4 = "10.0.0.4/32" LOCAL_PORT4 = {'linuxif': 'tap4'} RTRecord1 = exa.RTRecord.from_rt(t.RT1) RTRecord2 = exa.RTRecord.from_rt(t.RT2) RTRecord3 = exa.RTRecord.from_rt(t.RT3) RTRecord4 = exa.RTRecord.from_rt(t.RT4) VPN_ID = 1 VPN_EXT_ID = 1 GW_IP = "10.0.0.1" GW_MASK = 24 VNID = 255 def _extract_nlri_from_call(vpn_inst, method, call_index=0): calls = getattr(vpn_inst, method).call_args_list return calls[call_index][0][0].nlri def _extract_rt_from_call(vpn_inst, method, call_index=0): calls = getattr(vpn_inst, method).call_args_list return calls[call_index][0][0].route_targets def _extract_rtrec_from_call(vpn_inst, method, call_index=0): calls = getattr(vpn_inst, method).call_args_list route = calls[call_index][0][0] return route.ecoms(exa.RTRecord) def _extract_traffic_redirect_from_call(vpn_inst, method, call_index=0): calls = getattr(vpn_inst, method).call_args_list route = calls[call_index][0][0] for ecom in route.ecoms(exa.TrafficRedirect): return exa.RouteTarget(int(ecom.asn), int(ecom.target)) return None def _extract_traffic_classifier_from_call(vpn_inst, method, call_index=0): calls = getattr(vpn_inst, method).call_args_list traffic_classifier = vpn_instance.TrafficClassifier() traffic_classifier.map_redirect_rules_2_traffic_classifier( calls[call_index][0][0].nlri.rules) return traffic_classifier class TestableVPNInstance(vpn_instance.VPNInstance): afi = exa.AFI(exa.AFI.ipv4) safi = exa.SAFI(exa.SAFI.mpls_vpn) def best_route_removed(self, entry, route): pass def new_best_route(self, entry, route, last): pass def route_to_tracked_entry(self, route): return route def generate_vif_bgp_route(self): pass API_PARAMS = { 'vpn_type': 'EVPN', 'vpn_instance_id': 'testinstance', 'mac_address': 'de:ad:00:00:be:ef', 'ip_address': '192.168.0.1/24', 'import_rt': ['64512:47'], 'export_rt': ['64512:47'], 'local_port': 'tap42' } def api_params(): # return a new dict each time # to avoid concurrency issues return dict(API_PARAMS) class TestVPNInstanceAPIChecks(testtools.TestCase): def _test_validate_convert_missing(self, method, missing_param, params=None): if params is None: params = api_params() params.pop(missing_param) self.assertRaises(exc.APIMissingParameterException, method, params) def test_validate_convert_attach(self): method = vpn_instance.VPNInstance.validate_convert_attach_params self._test_validate_convert_missing(method, 'vpn_instance_id') self._test_validate_convert_missing(method, 'mac_address') self._test_validate_convert_missing(method, 'local_port') self._test_validate_convert_missing(method, 'import_rt') self._test_validate_convert_missing(method, 'export_rt') def test_validate_convert_detach(self): method = vpn_instance.VPNInstance.validate_convert_detach_params self._test_validate_convert_missing(method, 'vpn_instance_id') self._test_validate_convert_missing(method, 'mac_address') self._test_validate_convert_missing(method, 'local_port') def test_api_internal_translation(self): params = api_params() vpn_instance.VPNInstance.validate_convert_attach_params(params) self.assertIn('external_instance_id', params) self.assertIn('import_rts', params) self.assertIn('export_rts', params) self.assertIn('localport', params) def test_check_vrf_gateway_ip(self): params = api_params() params['vpn_type'] = 'IPVPN' params['gateway_ip'] = '1.1.1.1' ipvpn.VRF.validate_convert_attach_params(params) self._test_validate_convert_missing( ipvpn.VRF.validate_convert_attach_params, 'gateway_ip', params) def test_direction(self): params = api_params() vpn_instance.VPNInstance.validate_convert_attach_params(params) def test_direction_none(self): params = api_params() params['direction'] = None vpn_instance.VPNInstance.validate_convert_attach_params(params) def test_direction_ok(self): params = api_params() params['direction'] = 'to-port' vpn_instance.VPNInstance.validate_convert_attach_params(params) params = api_params() params['direction'] = 'from-port' vpn_instance.VPNInstance.validate_convert_attach_params(params) def test_direction_bogus(self): params = api_params() params['direction'] = 'floop' self.assertRaises( exc.APIException, vpn_instance.VPNInstance.validate_convert_attach_params, params) def test_mac_address_bogus(self): params = api_params() params['mac_address'] = 'gg:gg:gg:gg:gg:gg' self.assertRaises( exc.MalformedMACAddress, vpn_instance.VPNInstance.validate_convert_attach_params, params) def test_ip_address_bogus(self): params = api_params() params['ip_address'] = '257.303.1.' self.assertRaises( exc.MalformedIPAddress, vpn_instance.VPNInstance.validate_convert_attach_params, params) class TestInitVPNInstance(testtools.TestCase): def setUp(self): super().setUp() self.mock_manager = mock.Mock() self.mock_manager.label_allocator.release = mock.Mock() self.mock_dp_driver = mock.Mock() self.mock_dp_driver.initialize_dataplane_instance = mock.Mock() def test_init_stop_VPNInstance_with_forced_vni(self): # Initialize a VPNInstance with a forced VNID > 0 vpn = TestableVPNInstance(self.mock_manager, self.mock_dp_driver, VPN_EXT_ID, VPN_ID, [t.RT1], [t.RT1], GW_IP, GW_MASK, None, None, vni=VNID) # Check that forced VNID is used as instance_label self.assertTrue(vpn.forced_vni) self.assertEqual(VNID, vpn.instance_label, "VPN instance label should be forced to VNID") vpn.dp_driver.initialize_dataplane_instance.assert_called_once_with( VPN_ID, VPN_EXT_ID, GW_IP, GW_MASK, VNID) # Stop the VPNInstance to check that label release is not called vpn.stop() vpn.manager.label_allocator.release.assert_not_called() def test_init_stop_VPNInstance_without_forced_vni(self): # Initialize a VPNInstance with no vni vpn = TestableVPNInstance(self.mock_manager, self.mock_dp_driver, VPN_EXT_ID, VPN_ID, [t.RT1], [t.RT1], GW_IP, GW_MASK, None, None) # Check that VPN instance_label is locally-assigned self.assertFalse(vpn.forced_vni) vpn.dp_driver.initialize_dataplane_instance.assert_called_once_with( VPN_ID, VPN_EXT_ID, GW_IP, GW_MASK, vpn.instance_label) # Stop the VPNInstance to check that label release is called # with locally assigned instance label vpn.stop() vpn.manager.label_allocator.release.assert_called_once_with( vpn.instance_label) class TestVPNInstance(t.BaseTestBagPipeBGP, testtools.TestCase): def setUp(self): super().setUp() self.mock_dataplane = mock.Mock( spec=dataplane_drivers.VPNInstanceDataplane) mock_dp_driver = mock.Mock( spec=dataplane_drivers.DataplaneDriver) mock_dp_driver.initialize_dataplane_instance.return_value = ( self.mock_dataplane ) self.vpn = TestableVPNInstance(mock.Mock(name='VPNManager'), mock_dp_driver, 1, 1, [t.RT1], [t.RT1], '10.0.0.1', 24, None, None) self.vpn.synthesize_vif_bgp_route = mock.Mock( return_value=engine.RouteEntry(t.NLRI1, [t.RT1])) self.vpn._advertise_route = mock.Mock() self.vpn._withdraw_route = mock.Mock() self.vpn.start() self.set_event_target_worker(self.vpn) def tearDown(self): super().tearDown() self.vpn.stop() self.vpn.join() def _get_ip_address(self, ip_address_prefix): return ip_address_prefix[0:ip_address_prefix.find('/')] def _validate_ip_address_2_mac_address_consistency(self, mac_address, ip_address1, ip_address2=None): # Validate IP address -> MAC address consistency self.assertIn(ip_address1, self.vpn.ip_address_2_mac) if ip_address2: self.assertIn(ip_address1, self.vpn.ip_address_2_mac) self.assertEqual( self.vpn.ip_address_2_mac[ip_address1], self.vpn.ip_address_2_mac[ip_address2]) else: self.assertIn( mac_address, self.vpn.ip_address_2_mac[ip_address1]) def _chk_mac_2_localport_data_consistency(self, mac_address, localport): # Validate MAC address -> Port informations consistency self.assertIn(mac_address, self.vpn.mac_2_localport_data) port_info = self.vpn.mac_2_localport_data[ mac_address]['port_info'] self.assertEqual(localport['linuxif'], port_info['linuxif']) def _validate_localport_2_endpoints_consistency(self, length, localport, endpoints): # Validate Port -> Endpoint (MAC, IP) tuple consistency self.assertEqual( length, len(self.vpn.localport_2_endpoints[localport['linuxif']])) for endpoint in endpoints: self.assertIn( endpoint, self.vpn.localport_2_endpoints[localport['linuxif']]) def test_validate_convert_params_duplicate_rts(self): test_params = {'vpn_instance_id': 'foo', 'mac_address': 'aa:bb:cc:dd:ee:ff', 'ip_address': '1.2.3.4', 'local_port': 'foo', 'import_rt': ['64512:1', '64512:1'], 'export_rt': '64512:4, 64512:4'} vpn_instance.VPNInstance.validate_convert_params(test_params) self.assertEqual(['64512:1'], test_params['import_rt']) self.assertEqual(['64512:4'], test_params['export_rt']) def test_a1_plug_endpoint_twice_same_port(self): # Plug one endpoint with same MAC and IP addresses twice on a port self.vpn.vif_plugged(MAC1, IP1, LOCAL_PORT1) self.vpn.vif_plugged(MAC1, IP1, LOCAL_PORT1) self.vpn.dataplane.vif_plugged.assert_called_once() self._validate_ip_address_2_mac_address_consistency(MAC1, IP1) self._chk_mac_2_localport_data_consistency(MAC1, LOCAL_PORT1) self._validate_localport_2_endpoints_consistency( 1, LOCAL_PORT1, [(MAC1, IP1)]) def test_a2_plug_multiple_endpoints_with_same_ip_same_port(self): # Plug multiple endpoints with different MAC addresses and same IP # address on a port self.vpn.vif_plugged(MAC1, IP1, LOCAL_PORT1) # An IP address correspond to only one MAC address, exception must be # raised self.assertRaises(exc.APIException, self.vpn.vif_plugged, MAC2, IP1, LOCAL_PORT1) self.vpn.dataplane.vif_plugged.assert_called_once() self.vpn._advertise_route.assert_called_once() self._validate_ip_address_2_mac_address_consistency(MAC1, IP1) self._chk_mac_2_localport_data_consistency(MAC1, LOCAL_PORT1) self._validate_localport_2_endpoints_consistency( 1, LOCAL_PORT1, [(MAC1, IP1)]) self.assertNotIn(MAC2, self.vpn.mac_2_localport_data) def test_a3_plug_multiple_endpoints_with_same_mac_same_port(self): # Plug multiple endpoints with same MAC address and different IP # addresses on a port self.vpn.vif_plugged(MAC1, IP1, LOCAL_PORT1) self.vpn.vif_plugged(MAC1, IP2, LOCAL_PORT1) self.assertEqual(2, self.vpn.dataplane.vif_plugged.call_count, "Port different IP addresses must be plugged on " "dataplane") self.assertEqual(2, self.vpn._advertise_route.call_count, "Route for port different IP addresses must be " "advertised") self._validate_ip_address_2_mac_address_consistency(MAC1, IP1, IP2) self._chk_mac_2_localport_data_consistency(MAC1, LOCAL_PORT1) self._validate_localport_2_endpoints_consistency( 2, LOCAL_PORT1, [(MAC1, IP1), (MAC1, IP2)]) def test_a4_plug_multiple_endpoints_same_port(self): # Plug multiple endpoints with different MAC and IP addresses on a port self.vpn.vif_plugged(MAC1, IP1, LOCAL_PORT1) self.vpn.vif_plugged(MAC2, IP2, LOCAL_PORT1) self.assertEqual(2, self.vpn.dataplane.vif_plugged.call_count, "Port different endpoints must be plugged on " "dataplane") self.assertEqual(2, self.vpn._advertise_route.call_count, "Route for port different endpoints must be " "advertised") self._validate_ip_address_2_mac_address_consistency(MAC1, IP1) self._validate_ip_address_2_mac_address_consistency(MAC2, IP2) self._chk_mac_2_localport_data_consistency(MAC1, LOCAL_PORT1) self._chk_mac_2_localport_data_consistency(MAC2, LOCAL_PORT1) self._validate_localport_2_endpoints_consistency( 2, LOCAL_PORT1, [(MAC1, IP1), (MAC2, IP2)]) def test_b1_plug_endpoint_twice_different_port(self): # Plug one endpoint with same MAC and IP addresses twice on different # ports self.vpn.vif_plugged(MAC1, IP1, LOCAL_PORT1) # A port correspond to only one MAC address, exception must be raised self.assertRaises(exc.APIException, self.vpn.vif_plugged, MAC1, IP1, LOCAL_PORT2) self.vpn.dataplane.vif_plugged.assert_called_once() self.vpn._advertise_route.assert_called_once() self._validate_ip_address_2_mac_address_consistency(MAC1, IP1) self._chk_mac_2_localport_data_consistency(MAC1, LOCAL_PORT1) self._validate_localport_2_endpoints_consistency( 1, LOCAL_PORT1, [(MAC1, IP1)]) self.assertNotIn( LOCAL_PORT2['linuxif'], self.vpn.localport_2_endpoints) def test_b2_plug_multiple_endpoints_with_same_ip_different_port(self): # Plug multiple endpoints with different MAC addresses and same IP # address on different port self.vpn.vif_plugged(MAC1, IP1, LOCAL_PORT1) # An IP address correspond to only one MAC address, exception must be # raised self.assertRaises(exc.APIException, self.vpn.vif_plugged, MAC2, IP1, LOCAL_PORT2) self.vpn.dataplane.vif_plugged.assert_called_once() self.vpn._advertise_route.assert_called_once() self._validate_ip_address_2_mac_address_consistency(MAC1, IP1) self._chk_mac_2_localport_data_consistency(MAC1, LOCAL_PORT1) self._validate_localport_2_endpoints_consistency( 1, LOCAL_PORT1, [(MAC1, IP1)]) self.assertNotIn( LOCAL_PORT2['linuxif'], self.vpn.localport_2_endpoints) def test_b4_plug_multiple_endpoints_with_same_mac_different_port(self): # Plug multiple endpoints with same MAC address and different IP # addresses on different ports self.vpn.vif_plugged(MAC1, IP1, LOCAL_PORT1) # A port correspond to only one MAC address, exception must be raised self.assertRaises(exc.APIException, self.vpn.vif_plugged, MAC1, IP2, LOCAL_PORT2) self.vpn.dataplane.vif_plugged.assert_called_once() self.vpn._advertise_route.assert_called_once() self._validate_ip_address_2_mac_address_consistency(MAC1, IP1) self._chk_mac_2_localport_data_consistency(MAC1, LOCAL_PORT1) self._validate_localport_2_endpoints_consistency( 1, LOCAL_PORT1, [(MAC1, IP1)]) self.assertNotIn( LOCAL_PORT2['linuxif'], self.vpn.localport_2_endpoints) def test_b5_plug_multiple_endpoints_different_port(self): # Plug multiple endpoints with different MAC and IP addresses on # different ports self.vpn.vif_plugged(MAC1, IP1, LOCAL_PORT1) self.vpn.vif_plugged(MAC2, IP2, LOCAL_PORT2) self.assertEqual(2, self.vpn.dataplane.vif_plugged.call_count, "All ports must be plugged on dataplane") self.assertEqual(2, self.vpn._advertise_route.call_count, "Routes for all ports must be advertised") self._validate_ip_address_2_mac_address_consistency(MAC1, IP1) self._chk_mac_2_localport_data_consistency(MAC1, LOCAL_PORT1) self._validate_localport_2_endpoints_consistency( 1, LOCAL_PORT1, [(MAC1, IP1)]) self._validate_ip_address_2_mac_address_consistency(MAC2, IP2) self._chk_mac_2_localport_data_consistency(MAC2, LOCAL_PORT2) self._validate_localport_2_endpoints_consistency( 1, LOCAL_PORT2, [(MAC2, IP2)]) def test_c1_unplug_unique_endpoint_same_port(self): # Unplug one endpoint with same MAC and IP addresses as the one plugged # on port self.vpn.vif_plugged(MAC1, IP1, LOCAL_PORT1) label1 = self.vpn.mac_2_localport_data[MAC1]['label'] self.vpn.vif_unplugged(MAC1, IP1) self.vpn.dataplane.vif_unplugged.assert_called_once() self.vpn.dataplane.vif_unplugged.assert_called_with( MAC1, self._get_ip_address(IP1), LOCAL_PORT1, label1, None, True) self.vpn._advertise_route.assert_called_once() self.vpn._withdraw_route.assert_called_once() self.assertEqual({}, self.vpn.mac_2_localport_data) self.assertEqual({}, self.vpn.ip_address_2_mac) self.assertEqual({}, self.vpn.localport_2_endpoints) def test_c2_unplug_unique_endpoint_with_same_ip_same_port(self): # Unplug one endpoint with different MAC addresses and same IP address # as the one plugged on port self.vpn.vif_plugged(MAC1, IP1, LOCAL_PORT1) self.assertRaises(exc.APIException, self.vpn.vif_unplugged, MAC2, IP1) self.vpn.dataplane.vif_unplugged.assert_not_called() self.vpn._advertise_route.assert_called_once() self.assertIn(MAC1, self.vpn.mac_2_localport_data) self.assertIn(IP1, self.vpn.ip_address_2_mac) self.assertIn(LOCAL_PORT1['linuxif'], self.vpn.localport_2_endpoints) def test_c3_unplug_unique_endpoint_with_same_mac_same_port(self): # Unplug one endpoint with same MAC address and different IP addresses # as the one plugged on port self.vpn.vif_plugged(MAC1, IP1, LOCAL_PORT1) self.assertRaises(exc.APIException, self.vpn.vif_unplugged, MAC1, IP2) self.vpn.dataplane.vif_unplugged.assert_not_called() self.vpn._advertise_route.assert_called_once() self.vpn._withdraw_route.assert_not_called() self.assertIn(MAC1, self.vpn.mac_2_localport_data) self.assertIn(IP1, self.vpn.ip_address_2_mac) self.assertIn(LOCAL_PORT1['linuxif'], self.vpn.localport_2_endpoints) def test_c4_unplug_one_endpoint_same_port(self): # Unplug only one endpoint with same MAC and IP addresses # corresponding to one plugged on port self.vpn.vif_plugged(MAC1, IP1, LOCAL_PORT1) self.vpn.vif_plugged(MAC2, IP2, LOCAL_PORT1) label1 = self.vpn.mac_2_localport_data[MAC1]['label'] self.vpn.vif_unplugged(MAC1, IP1) self.vpn.dataplane.vif_unplugged.assert_called_once() self.vpn.dataplane.vif_unplugged.assert_called_with( MAC1, self._get_ip_address(IP1), LOCAL_PORT1, label1, None, False) self.assertEqual(2, self.vpn._advertise_route.call_count, "Routes for all port endpoints must be first " "advertised and only one withdrawn") self.vpn._withdraw_route.assert_called_once() self._validate_ip_address_2_mac_address_consistency(MAC2, IP2) self._chk_mac_2_localport_data_consistency(MAC2, LOCAL_PORT1) self._validate_localport_2_endpoints_consistency( 1, LOCAL_PORT1, [(MAC2, IP2)]) def test_c5_unplug_all_endpoints_same_port(self): # Unplug all endpoints with same MAC and IP addresses # corresponding to those plugged on port self.vpn.vif_plugged(MAC1, IP1, LOCAL_PORT1) self.vpn.vif_plugged(MAC2, IP2, LOCAL_PORT1) label1 = self.vpn.mac_2_localport_data[MAC1]['label'] label2 = self.vpn.mac_2_localport_data[MAC2]['label'] self.vpn.vif_unplugged(MAC1, IP1) self.vpn.vif_unplugged(MAC2, IP2) self.assertEqual(2, self.vpn.dataplane.vif_unplugged.call_count, "All port endpoints must be unplugged from dataplane") self.vpn.dataplane.vif_unplugged.assert_has_calls([ mock.call(MAC1, self._get_ip_address(IP1), LOCAL_PORT1, label1, None, False), mock.call(MAC2, self._get_ip_address(IP2), LOCAL_PORT1, label2, None, True) ]) self.assertEqual(2, self.vpn._advertise_route.call_count, "Routes for all port endpoints must be first " "advertised and after withdrawn") self.assertEqual(2, self.vpn._withdraw_route.call_count, "Routes for all port endpoints must be first " "advertised and after withdrawn") self.assertEqual({}, self.vpn.mac_2_localport_data) self.assertEqual({}, self.vpn.ip_address_2_mac) self.assertEqual({}, self.vpn.localport_2_endpoints) def test_d1_unplug_unique_endpoints_different_port(self): # Unplug the endpoints with different MAC and IP addresses # corresponding to those plugged on different ports self.vpn.vif_plugged(MAC1, IP1, LOCAL_PORT1) self.vpn.vif_plugged(MAC2, IP2, LOCAL_PORT2) label1 = self.vpn.mac_2_localport_data[MAC1]['label'] label2 = self.vpn.mac_2_localport_data[MAC2]['label'] self.vpn.vif_unplugged(MAC1, IP1) self.vpn.vif_unplugged(MAC2, IP2) self.assertEqual(2, self.vpn.dataplane.vif_unplugged.call_count, "All different ports endpoints must be unplugged " "from dataplane") self.vpn.dataplane.vif_unplugged.assert_has_calls([ mock.call(MAC1, self._get_ip_address(IP1), LOCAL_PORT1, label1, None, True), mock.call(MAC2, self._get_ip_address(IP2), LOCAL_PORT2, label2, None, True) ]) self.assertEqual(2, self.vpn._advertise_route.call_count, "Routes for all different ports endpoints must be " "first advertised and after withdrawn") self.assertEqual(2, self.vpn._withdraw_route.call_count, "Routes for all different ports endpoints must be " "first advertised and after withdrawn") self.assertEqual({}, self.vpn.mac_2_localport_data) self.assertEqual({}, self.vpn.ip_address_2_mac) self.assertEqual({}, self.vpn.localport_2_endpoints) def test_d2_unplug_one_endpoint_same_ip_different_port(self): # Unplug one endpoint with different MAC or IP address corresponding to # one plugged on another port self.vpn.vif_plugged(MAC1, IP1, LOCAL_PORT1) self.vpn.vif_plugged(MAC2, IP2, LOCAL_PORT2) self.assertRaises(exc.APIException, self.vpn.vif_unplugged, MAC1, IP2) self.vpn.dataplane.vif_unplugged.assert_not_called() self.assertEqual(2, self.vpn._advertise_route.call_count, "Routes for all different ports endpoints must only " "be advertised") self._validate_ip_address_2_mac_address_consistency(MAC1, IP1) self._chk_mac_2_localport_data_consistency(MAC1, LOCAL_PORT1) self._validate_localport_2_endpoints_consistency( 1, LOCAL_PORT1, [(MAC1, IP1)]) self._validate_ip_address_2_mac_address_consistency(MAC2, IP2) self._chk_mac_2_localport_data_consistency(MAC2, LOCAL_PORT2) self._validate_localport_2_endpoints_consistency( 1, LOCAL_PORT2, [(MAC2, IP2)]) def test_d3_unplug_multiple_endpoints_different_port(self): # Unplug multiple endpoints with same MAC and IP addresses # corresponding to those plugged on different ports self.vpn.vif_plugged(MAC1, IP1, LOCAL_PORT1) self.vpn.vif_plugged(MAC2, IP2, LOCAL_PORT1) self.vpn.vif_plugged(MAC3, IP3, LOCAL_PORT2) self.vpn.vif_plugged(MAC4, IP4, LOCAL_PORT2) label1 = self.vpn.mac_2_localport_data[MAC1]['label'] label2 = self.vpn.mac_2_localport_data[MAC2]['label'] label3 = self.vpn.mac_2_localport_data[MAC3]['label'] label4 = self.vpn.mac_2_localport_data[MAC4]['label'] self.vpn.vif_unplugged(MAC1, IP1) self.vpn.vif_unplugged(MAC2, IP2) self.vpn.vif_unplugged(MAC3, IP3) self.vpn.vif_unplugged(MAC4, IP4) self.assertEqual(4, self.vpn.dataplane.vif_unplugged.call_count, "All different ports endpoints must be unplugged " "from dataplane") self.vpn.dataplane.vif_unplugged.assert_has_calls([ mock.call(MAC1, self._get_ip_address(IP1), LOCAL_PORT1, label1, None, False), mock.call(MAC2, self._get_ip_address(IP2), LOCAL_PORT1, label2, None, True), mock.call(MAC3, self._get_ip_address(IP3), LOCAL_PORT2, label3, None, False), mock.call(MAC4, self._get_ip_address(IP4), LOCAL_PORT2, label4, None, True) ]) self.assertEqual(4, self.vpn._withdraw_route.call_count, "Routes for all different ports endpoints must be " "first advertised and after withdrawn") self.assertEqual(4, self.vpn._advertise_route.call_count, "Routes for all different ports endpoints must be " "first advertised and after withdrawn") self.assertEqual({}, self.vpn.mac_2_localport_data) self.assertEqual({}, self.vpn.ip_address_2_mac) self.assertEqual({}, self.vpn.localport_2_endpoints) def test_plug_unplug_wildcard_ip(self): self.vpn.vif_plugged(MAC1, None, LOCAL_PORT1) self.vpn.vif_plugged(MAC1, IP1, LOCAL_PORT1) self.vpn.vif_plugged(MAC1, IP2, LOCAL_PORT1) # 3 advertisements should be seen: one without IP, then one for each IP self.assertEqual(3, self.vpn._advertise_route.call_count) # the wildcard should be removed self.assertEqual(1, self.vpn._withdraw_route.call_count) self.vpn.vif_unplugged(MAC1, None) # 3 withdraw should be seen, one for each IP, one without IP self.assertEqual(3, self.vpn._withdraw_route.call_count) def test_plug_unplug_wildcard_ip_no_ip(self): self.vpn.vif_plugged(MAC1, None, LOCAL_PORT1) self.assertEqual(1, self.vpn._advertise_route.call_count) self.vpn.vif_unplugged(MAC1, None) self.assertEqual(1, self.vpn._withdraw_route.call_count) def test_get_lg_localport_data(self): self.vpn.vif_plugged(MAC1, IP1, LOCAL_PORT1) self.vpn.vif_plugged(MAC2, IP2, LOCAL_PORT1) self.vpn.vif_plugged(MAC3, IP3, LOCAL_PORT2) self.vpn.vif_plugged(MAC4, IP4, LOCAL_PORT2) self.vpn.get_lg_local_port_data("") # tests of update_route_targets def _test_update_rts_init(self): self.vpn._advertise_route.reset_mock() route = engine.RouteEntry(t.NLRI1, [t.RT1]) self.vpn.endpoint_2_route = {None: route} def test_update_rts_1(self): self._test_update_rts_init() # no change -> no route update self.vpn.update_route_targets([t.RT1], [t.RT1]) self.vpn._advertise_route.assert_not_called() def test_update_rts_2(self): self._test_update_rts_init() # change imports -> no route update self.vpn.update_route_targets([t.RT2], [t.RT1]) self.vpn._advertise_route.assert_not_called() def test_update_rts_3(self): self._test_update_rts_init() # change exports # check that previously advertised routes are readvertised self.vpn.update_route_targets([t.RT1], [t.RT2]) self.vpn._advertise_route.assert_called_once() self.assertIn(t.RT2, _extract_rt_from_call(self.vpn, '_advertise_route')) self.assertNotIn(t.RT1, _extract_rt_from_call(self.vpn, '_advertise_route')) def test_update_rts_3bis(self): self._test_update_rts_init() # change exports # check that previously advertised routes are readvertised self.vpn.update_route_targets([t.RT1], [t.RT1, t.RT2]) self.vpn._advertise_route.assert_called_once() self.assertIn(t.RT2, _extract_rt_from_call(self.vpn, '_advertise_route')) self.assertIn(t.RT1, _extract_rt_from_call(self.vpn, '_advertise_route')) def test_cleanup_assist(self): # simulate a route injected in our VPNInstance worker_a = worker.Worker(mock.Mock(), 'worker.Worker-A') self._new_route_event(engine.RouteEvent.ADVERTISE, self._fake_nlri("fake NLRI"), [t.RT1, t.RT2], worker_a, t.NH1, 200) self.mock_dataplane.needs_cleanup_assist.return_value = True with mock.patch.object(self.vpn, 'best_route_removed') as mock_brr: self.vpn.stop() mock_brr.assert_called_once() def test_plug_endpoint_direction_to_port(self): self.vpn.vif_plugged(MAC1, IP1, LOCAL_PORT1, direction='to-port') self.vpn.dataplane.vif_plugged.assert_called_once() self.vpn._advertise_route.assert_called_once() def test_plug_endpoint_direction_from_port(self): self.vpn.vif_plugged(MAC1, IP1, LOCAL_PORT1, direction='from-port') self.vpn.dataplane.vif_plugged.assert_called_once() self.vpn._advertise_route.assert_not_called() LOCAL_ADDRESS = '4.5.6.7' NEXT_HOP = '45.45.45.45' IP_ADDR_PREFIX1 = '1.1.1.1/32' IP_ADDR_PREFIX2 = '2.2.2.2/32' IP_ADDR_PREFIX3 = '3.3.3.3/32' STATIC_ADDR_PREFIX1 = '10.10.10.10/32' STATIC_ADDR_PREFIX2 = '20.20.20.20/32' ATTRACT_TRAFFIC_1 = {'redirect_rts': [t.RT5], 'classifier': {'destinationPort': '80', 'protocol': 'tcp' } } ATTRACT_STATIC_1 = {'to_rt': [t.RT4], 'static_destination_prefixes': [STATIC_ADDR_PREFIX1], 'redirect_rts': [t.RT5], 'classifier': {'destinationPort': '80', 'protocol': 'tcp' } } ATTRACT_STATIC_2 = {'to_rt': [t.RT4], 'static_destination_prefixes': [STATIC_ADDR_PREFIX1, STATIC_ADDR_PREFIX2], 'redirect_rts': [t.RT5], 'classifier': {'destinationPort': '80', 'protocol': 'tcp' } } TC1 = vpn_instance.TrafficClassifier(destination_prefix="1.1.1.1/32", destination_port="80", protocol="tcp") TC2 = vpn_instance.TrafficClassifier(destination_prefix="2.2.2.2/32", destination_port="80", protocol="tcp") TC_STATIC1 = vpn_instance.TrafficClassifier( destination_prefix=STATIC_ADDR_PREFIX1, destination_port="80", protocol="tcp" ) TC_STATIC2 = vpn_instance.TrafficClassifier( destination_prefix=STATIC_ADDR_PREFIX2, destination_port="80", protocol="tcp" ) class TestVRF(t.BaseTestBagPipeBGP, testtools.TestCase): def setUp(self): super().setUp() self.mock_dp = mock.Mock( spec=ipvpn.DummyVPNInstanceDataplane) mock_dp_driver = mock.Mock( spec=ipvpn.DummyDataplaneDriver) mock_dp_driver.initialize_dataplane_instance.return_value = \ self.mock_dp mock_dp_driver.get_local_address.return_value = LOCAL_ADDRESS mock_dp_driver.supported_encaps.return_value = \ [exa.Encapsulation(exa.Encapsulation.Type.DEFAULT)] label_alloc = identifier_allocators.LabelAllocator() bgp_manager = mock.Mock() bgp_manager.get_local_address.return_value = LOCAL_ADDRESS rd_alloc = ( identifier_allocators.RDAllocator(bgp_manager.get_local_address()) ) self.manager = mock.Mock(bgp_manager=bgp_manager, label_allocator=label_alloc, rd_allocator=rd_alloc) self.vpn = ipvpn.VRF(self.manager, mock_dp_driver, 1, 1, [t.RT1], [t.RT1], '10.0.0.1', 24, {'from_rt': [t.RT3], 'to_rt': [t.RT4]}, None) self.vpn._advertise_route = mock.Mock() self.vpn._withdraw_route = mock.Mock() self.vpn.start() self.set_event_target_worker(self.vpn) def _reset_mocks(self): self.vpn._advertise_route.reset_mock() self.vpn._withdraw_route.reset_mock() self.mock_dp.setup_dataplane_for_remote_endpoint.reset_mock() self.mock_dp.vif_plugged.reset_mock() self.mock_dp.vif_unplugged.reset_mock() def tearDown(self): super().tearDown() self.vpn.stop() self.vpn.join() def _config_vrf_with_attract_traffic(self, attract_traffic, no_readvertise=False): self.vpn.attract_traffic = True self.vpn.attract_rts = attract_traffic['redirect_rts'] self.vpn.attract_classifier = attract_traffic['classifier'] if no_readvertise: self.vpn.readvertise = False self.vpn.readvertise_from_rts = [] self.vpn.readvertise_to_rts = attract_traffic['to_rt'] else: if attract_traffic.get('to_rt'): self.assertEqual(self.vpn.readvertise_to_rts, attract_traffic['to_rt']) if (attract_traffic.get('to_rt') and attract_traffic.get('static_destination_prefixes')): self.vpn.attract_static_dest_prefixes = ( attract_traffic['static_destination_prefixes'] ) def _mock_vpnmanager_for_attract_traffic(self): self.manager.redirect_traffic_to_vpn = mock.Mock( spec=ipvpn.VPNInstanceDataplane) self.manager.stop_redirect_to_vpn = mock.Mock() def _reset_mocks_vpnmanager(self): self.manager.redirect_traffic_to_vpn.reset_mock() self.manager.stop_redirect_to_vpn.reset_mock() def _generate_route_nlri(self, ip_address_prefix, nexthop=NEXT_HOP): # Parse address/mask (_, prefix_len) = self.vpn._parse_ipaddress_prefix(ip_address_prefix) prefix_rd = self.manager.rd_allocator.get_new_rd( "Route distinguisher for prefix %s" % ip_address_prefix ) rd = prefix_rd if prefix_len == 32 else self.vpn.instance_rd label = self.manager.label_allocator.get_new_label( "Label for prefix %s" % ip_address_prefix ) return ipvpn_routes.IPVPNRouteFactory(exa.AFI(exa.AFI.ipv4), ip_address_prefix, label, rd, nexthop) def _generate_flow_spec_nlri(self, classifier): rd = self.manager.rd_allocator.get_new_rd( "Route distinguisher for FlowSpec NLRI" ) flow_nlri = flowspec.FlowRouteFactory(exa.AFI(exa.AFI.ipv4), rd) for rule in classifier.map_traffic_classifier_2_redirect_rules(): flow_nlri.add(rule) return flow_nlri def test_validate_convert_attach(self): params = api_params() params.pop('ip_address') self.assertRaises(exc.APIMissingParameterException, ipvpn.VRF.validate_convert_attach_params, params) def test_validate_convert_detach(self): params = api_params() params.pop('ip_address') self.assertRaises(exc.APIMissingParameterException, ipvpn.VRF.validate_convert_detach_params, params) # unit test for IPVPN re-advertisement def test_re_advertisement_1(self): self._reset_mocks() self.vpn.vif_plugged(MAC1, IP1, LOCAL_PORT1) worker_a = worker.Worker(mock.Mock(), 'worker.Worker-A') vpn_nlri_1 = self._generate_route_nlri(IP_ADDR_PREFIX1) self._new_route_event(engine.RouteEvent.ADVERTISE, vpn_nlri_1, [t.RT1, t.RT2], worker_a, t.NH1, 200) # no re-advertisement supposed to happen self.vpn._advertise_route.assert_called_once() # dataplane supposed to be updated for this route self.mock_dp.setup_dataplane_for_remote_endpoint.assert_called_once() self._reset_mocks() vpn_nlri_2 = self._generate_route_nlri(IP_ADDR_PREFIX2) event2 = self._new_route_event(engine.RouteEvent.ADVERTISE, vpn_nlri_2, [t.RT3], worker_a, t.NH1, 200, rtrecords=[RTRecord1]) # re-advertisement of VPN NLRI2 supposed to happen, to RT4 self.vpn._advertise_route.assert_called_once() self.assertIn(t.RT4, _extract_rt_from_call(self.vpn, '_advertise_route')) self.assertNotIn(t.RT2, _extract_rt_from_call(self.vpn, '_advertise_route')) self.assertNotIn(t.RT3, _extract_rt_from_call(self.vpn, '_advertise_route')) self.assertIn(RTRecord3, _extract_rtrec_from_call(self.vpn, '_advertise_route')) self.assertIn(RTRecord1, _extract_rtrec_from_call(self.vpn, '_advertise_route')) # check that event is for re-advertised route vpn_nlri_2 and #  contains what we expect route_entry = self.vpn._advertise_route.call_args_list[0][0][0] self.assertNotEqual(vpn_nlri_2.rd, route_entry.nlri.rd) # dataplane *not* supposed to be updated for this route self.mock_dp.setup_dataplane_for_remote_endpoint.assert_not_called() self._reset_mocks() # new interface plugged in # route vpn_nlri_2 should be re-advertized with this new next hop as #  next-hop self.vpn.vif_plugged(MAC2, IP2, LOCAL_PORT2) # advertised route count should increment by 2: # - vif route itself # - re-adv of NLRI1 with this new port as next-hop self.assertEqual(2, self.vpn._advertise_route.call_count) self.vpn._withdraw_route.assert_not_called() self.assertIn(t.RT1, _extract_rt_from_call(self.vpn, '_advertise_route', 0)) self.assertNotIn(t.RT4, _extract_rt_from_call(self.vpn, '_advertise_route', 0)) self.assertIn(t.RT4, _extract_rt_from_call(self.vpn, '_advertise_route', 1)) self.assertNotIn(t.RT1, _extract_rt_from_call(self.vpn, '_advertise_route', 1)) # check that second event is for re-advertised route vpn_nlri_2 and #  contains what we expect route_entry = self.vpn._advertise_route.call_args_list[1][0][0] vpn_nlri_2_readv_rd = route_entry.nlri.rd self.assertEqual(vpn_nlri_2.cidr.prefix(), route_entry.nlri.cidr.prefix()) self.assertNotEqual(vpn_nlri_2.labels, route_entry.nlri.labels) self.assertNotEqual(vpn_nlri_2.nexthop, route_entry.nlri.nexthop) self.assertNotEqual(vpn_nlri_2.rd, route_entry.nlri.rd) self.assertEqual(vpn_nlri_2_readv_rd, route_entry.nlri.rd) self._reset_mocks() # new route, that, because it contains the redirectRT in RTRecord # will not be re-advertized vpn_nlri3 = self._generate_route_nlri(IP_ADDR_PREFIX3) event3 = self._new_route_event(engine.RouteEvent.ADVERTISE, vpn_nlri3, [t.RT3], worker_a, t.NH1, 200, rtrecords=[RTRecord4]) self.vpn._advertise_route.assert_not_called() self.vpn._withdraw_route.assert_not_called() self._revert_event(event3) self._reset_mocks() # vif unplugged, routes VPN NLRI2 with next-hop # corresponding to this ports should now be withdrawn self.vpn.vif_unplugged(MAC2, IP2) self.assertEqual(2, self.vpn._withdraw_route.call_count) route_entry = self.vpn._withdraw_route.call_args_list[0][0][0] self.assertEqual(vpn_nlri_2.cidr.prefix(), route_entry.nlri.cidr.prefix()) self.assertNotEqual(vpn_nlri_2.labels, route_entry.nlri.labels) self.assertNotEqual(vpn_nlri_2.nexthop, route_entry.nlri.nexthop) self.assertNotEqual(vpn_nlri_2.rd, route_entry.nlri.rd) self.assertNotEqual(vpn_nlri_2.rd, route_entry.nlri.rd) self.assertEqual(vpn_nlri_2_readv_rd, route_entry.nlri.rd) self._reset_mocks() # RTs of route NLRI1 now include a re-advertiseed RT self._new_route_event(engine.RouteEvent.ADVERTISE, vpn_nlri_1, [t.RT1, t.RT2, t.RT3], worker_a, t.NH1, 200) self.vpn._advertise_route.assert_called_once() self.assertIn(t.RT4, _extract_rt_from_call(self.vpn, '_advertise_route')) # dataplane supposed to be updated for this route self.mock_dp.setup_dataplane_for_remote_endpoint.assert_called_once() self._reset_mocks() self._revert_event(event2) # withdraw of re-adv route supposed to happen self.vpn._withdraw_route.assert_called_once() self.vpn._advertise_route.assert_not_called() # dataplane *not* supposed to be updated for this route self.mock_dp.setup_dataplane_for_remote_endpoint.assert_not_called() def test_re_advertisement_last(self): self._reset_mocks() self.vpn.vif_plugged(MAC1, IP1, LOCAL_PORT1) worker_a = worker.Worker(mock.Mock(), 'worker.Worker-A') vpn_nlri_1 = self._generate_route_nlri(IP_ADDR_PREFIX1, t.NH1) event1 = self._new_route_event(engine.RouteEvent.ADVERTISE, vpn_nlri_1, [t.RT3], worker_a, t.NH1, 200, rtrecords=[RTRecord1]) # advertised route count increment by 2: # - vif route for port # - re-advertisement of VPN NLRI1 with port as next-hop self.assertEqual(2, self.vpn._advertise_route.call_count) self.vpn._withdraw_route.assert_not_called() self._reset_mocks() vpn_nlri_1bis = self._generate_route_nlri(IP_ADDR_PREFIX1, t.NH1) event1bis = self._new_route_event(engine.RouteEvent.ADVERTISE, vpn_nlri_1bis, [t.RT3], worker_a, t.NH1, 200, rtrecords=[RTRecord1]) # second re-advertisement of VPN NLRI1 supposed to happen # (must be futher fixed to only append once) self.vpn._advertise_route.assert_called_once() self.vpn._withdraw_route.assert_not_called() self._reset_mocks() self._revert_event(event1bis) self.vpn._advertise_route.assert_not_called() self.vpn._withdraw_route.assert_not_called() self._reset_mocks() self._revert_event(event1) # withdraw of re-adv route supposed to happen self.vpn._withdraw_route.assert_called_once() self.vpn._advertise_route.assert_not_called() # unit test for FlowSpec re-advertisement def test_flowspec_re_advertisement_1(self): # Configure VRF to generate traffic redirection, based on a 5-tuple # classifier, to a specific route target self._config_vrf_with_attract_traffic(ATTRACT_TRAFFIC_1) self.vpn.vif_plugged(MAC1, IP1, LOCAL_PORT1) self._reset_mocks() worker_a = worker.Worker(mock.Mock(), 'worker.Worker-A') # FlowSpec route flow_nlri1 = self._generate_flow_spec_nlri(TC1) self._new_flow_event(engine.RouteEvent.ADVERTISE, flow_nlri1, [t.RT5], [t.RT3], worker_a) # re-advertisement of Flow NLRI1 supposed to happen, to RT4 self.assertEqual(2, self.vpn._advertise_route.call_count) self.manager.redirect_traffic_to_vpn.assert_not_called() # 1 - re-advertisement of a default route supposed to happen # to RT4 self.assertIn(t.RT4, _extract_rt_from_call(self.vpn, '_advertise_route', 0)) ipvpn_nlri = _extract_nlri_from_call(self.vpn, '_advertise_route', 0) self.assertEqual(ipvpn.DEFAULT_ADDR_PREFIX, ipvpn_nlri.cidr.prefix()) # 2 - advertisement of FlowSpec NLRI supposed to happen to RT4 # for traffic redirection to RT5 on TCP destination port 80 self.assertIn(t.RT4, _extract_rt_from_call(self.vpn, '_advertise_route', 1)) self.assertNotIn(t.RT3, _extract_rt_from_call(self.vpn, '_advertise_route', 1)) self.assertIn(RTRecord3, _extract_rtrec_from_call(self.vpn, '_advertise_route', 1)) self.assertEqual( t.RT5, _extract_traffic_redirect_from_call(self.vpn, '_advertise_route', 1)) # check that second event is for re-advertised route flow_nlri1 and #  contains what we expect route_entry = self.vpn._advertise_route.call_args_list[1][0][0] self.assertNotEqual(flow_nlri1.rd, route_entry.nlri.rd) self.assertEqual(self.vpn.instance_rd, route_entry.nlri.rd) # dataplane *not* supposed to be updated for this route self.mock_dp.setup_dataplane_for_remote_endpoint.assert_not_called() def _check_attract_traffic(self, method, redirect_rts, expected_classifiers): self.assertEqual(len(expected_classifiers), getattr(self.vpn, method).call_count) for index, classifier in enumerate(expected_classifiers): if not classifier: # Skip advertisement to exported route targets if (self.vpn.export_rts == _extract_rt_from_call( self.vpn, method, index)): continue # 1 - re-advertisement of a default route supposed to happen # to RT4 self.assertIn(self.vpn.readvertise_to_rts[0], _extract_rt_from_call(self.vpn, method, index)) ipvpn_nlri = _extract_nlri_from_call(self.vpn, method, index) self.assertEqual(ipvpn.DEFAULT_ADDR_PREFIX, ipvpn_nlri.cidr.prefix()) if self.vpn.readvertise: self.assertNotIn(self.vpn.readvertise_from_rts[0], _extract_rt_from_call(self.vpn, method, index)) else: # 2 - advertisement of FlowSpec NLRI supposed to happen to RT5 # for traffic redirection to RT4 on TCP destination port 80 flow_nlri = _extract_nlri_from_call(self.vpn, method, index) self.assertIsInstance(flow_nlri, exa.Flow) self.assertEqual(flow_nlri.rd, self.vpn.instance_rd) self.assertIn(redirect_rts[0], _extract_rt_from_call(self.vpn, method, index)) self.assertEqual( self.vpn.readvertise_to_rts[0], _extract_traffic_redirect_from_call(self.vpn, method, index) ) self.assertEqual( classifier, _extract_traffic_classifier_from_call(self.vpn, method, index) ) # unit test for IPVPN traffic redirection def test_attract_traffic_re_advertisement(self): # Configure VRF to generate traffic redirection, based on a 5-tuple # classifier, to a specific route target self._config_vrf_with_attract_traffic(ATTRACT_TRAFFIC_1) self.vpn.vif_plugged(MAC1, IP1, LOCAL_PORT1) # new Route for plugged if supposed to be advertised self.vpn._advertise_route.assert_called_once() self._reset_mocks() worker_a = worker.Worker(mock.Mock(), 'worker.Worker-A') vpn_nlri1 = self._generate_route_nlri(IP_ADDR_PREFIX1) event1 = self._new_route_event(engine.RouteEvent.ADVERTISE, vpn_nlri1, [t.RT3], worker_a, t.NH1, 200) # Default and FlowSpec routes are supposed to be advertised self.assertEqual(2, self.vpn._advertise_route.call_count) self.vpn._withdraw_route.assert_not_called() ipvpn_nlri = _extract_nlri_from_call(self.vpn, '_advertise_route', 0) self.assertIsInstance(ipvpn_nlri, exa.IPVPN) self.assertEqual(ipvpn.DEFAULT_ADDR_PREFIX, ipvpn_nlri.cidr.prefix()) flow_nlri = _extract_nlri_from_call(self.vpn, '_advertise_route', 1) self.assertIsInstance(flow_nlri, exa.Flow) self._reset_mocks() vpn_nlri2 = self._generate_route_nlri(IP_ADDR_PREFIX2) event2 = self._new_route_event(engine.RouteEvent.ADVERTISE, vpn_nlri2, [t.RT3], worker_a, t.NH1, 200) # Only FlowSpec route is supposed to be advertised self.vpn._advertise_route.assert_called_once() self.vpn._withdraw_route.assert_not_called() flow_nlri = _extract_nlri_from_call(self.vpn, '_advertise_route', 0) self.assertIsInstance(flow_nlri, exa.Flow) self._reset_mocks() self._revert_event(event2) # Only FlowSpec route is supposed to be withdrawn self.vpn._withdraw_route.assert_called_once() self.vpn._advertise_route.assert_not_called() flow_nlri = _extract_nlri_from_call(self.vpn, '_withdraw_route', 0) self.assertIsInstance(flow_nlri, exa.Flow) self._reset_mocks() self._revert_event(event1) # Default and FlowSpec routes are supposed to be withdrawn self.assertEqual(2, self.vpn._withdraw_route.call_count) self.vpn._advertise_route.assert_not_called() ipvpn_nlri = _extract_nlri_from_call(self.vpn, '_withdraw_route', 0) self.assertIsInstance(ipvpn_nlri, exa.IPVPN) self.assertEqual(ipvpn.DEFAULT_ADDR_PREFIX, ipvpn_nlri.cidr.prefix()) flow_nlri = _extract_nlri_from_call(self.vpn, '_withdraw_route', 1) self.assertIsInstance(flow_nlri, exa.Flow) def test_attract_traffic_single_prefix_advertise(self): # Configure VRF to generate traffic redirection, based on a 5-tuple # classifier, to a specific route target self._config_vrf_with_attract_traffic(ATTRACT_TRAFFIC_1) self.vpn.vif_plugged(MAC1, IP1, LOCAL_PORT1) # new Route for plugged if supposed to be advertised self.vpn._advertise_route.assert_called_once() self._reset_mocks() worker_a = worker.Worker(mock.Mock(), 'worker.Worker-A') vpn_nlri1 = self._generate_route_nlri(IP_ADDR_PREFIX1) self._new_route_event(engine.RouteEvent.ADVERTISE, vpn_nlri1, [t.RT3], worker_a, t.NH1, 200) self._check_attract_traffic('_advertise_route', ATTRACT_TRAFFIC_1['redirect_rts'], [None, TC1]) def test_attract_traffic_single_prefix_withdraw(self): # Configure VRF to generate traffic redirection, based on a 5-tuple # classifier, to a specific route target self._config_vrf_with_attract_traffic(ATTRACT_TRAFFIC_1) self.vpn.vif_plugged(MAC1, IP1, LOCAL_PORT1) # new Route for plugged if supposed to be advertised self.vpn._advertise_route.assert_called_once() self._reset_mocks() worker_a = worker.Worker(mock.Mock(), 'worker.Worker-A') vpn_nlri1 = self._generate_route_nlri(IP_ADDR_PREFIX1) event1 = self._new_route_event(engine.RouteEvent.ADVERTISE, vpn_nlri1, [t.RT3], worker_a, t.NH1, 200) self.assertEqual(2, self.vpn._advertise_route.call_count) self._reset_mocks() self._revert_event(event1) self._check_attract_traffic('_withdraw_route', ATTRACT_TRAFFIC_1['redirect_rts'], [None, TC1]) def test_attract_traffic_multiple_prefix_advertise(self): # Configure VRF to generate traffic redirection, based on a 5-tuple # classifier, to a specific route target self._config_vrf_with_attract_traffic(ATTRACT_TRAFFIC_1) self.vpn.vif_plugged(MAC1, IP1, LOCAL_PORT1) # new Route for plugged if supposed to be advertised self.vpn._advertise_route.assert_called_once() self._reset_mocks() worker_a = worker.Worker(mock.Mock(), 'worker.Worker-A') vpn_nlri1 = self._generate_route_nlri(IP_ADDR_PREFIX1) self._new_route_event(engine.RouteEvent.ADVERTISE, vpn_nlri1, [t.RT3], worker_a, t.NH1, 200) vpn_nlri2 = self._generate_route_nlri(IP_ADDR_PREFIX2) self._new_route_event(engine.RouteEvent.ADVERTISE, vpn_nlri2, [t.RT3], worker_a, t.NH1, 200) self._check_attract_traffic( '_advertise_route', ATTRACT_TRAFFIC_1['redirect_rts'], [None, TC1, TC2]) def test_attract_traffic_multiple_prefix_withdraw(self): # Configure VRF to generate traffic redirection, based on a 5-tuple # classifier, to a specific route target self._config_vrf_with_attract_traffic(ATTRACT_TRAFFIC_1) self.vpn.vif_plugged(MAC1, IP1, LOCAL_PORT1) # new Route for plugged if supposed to be advertised self.vpn._advertise_route.assert_called_once() self._reset_mocks() worker_a = worker.Worker(mock.Mock(), 'worker.Worker-A') vpn_nlri1 = self._generate_route_nlri(IP_ADDR_PREFIX1) event1 = self._new_route_event(engine.RouteEvent.ADVERTISE, vpn_nlri1, [t.RT3], worker_a, t.NH1, 200) vpn_nlri2 = self._generate_route_nlri(IP_ADDR_PREFIX2) event2 = self._new_route_event(engine.RouteEvent.ADVERTISE, vpn_nlri2, [t.RT3], worker_a, t.NH1, 200) self.assertEqual(3, self.vpn._advertise_route.call_count) self._reset_mocks() self._revert_event(event2) self._revert_event(event1) self._check_attract_traffic( '_withdraw_route', ATTRACT_TRAFFIC_1['redirect_rts'], [TC2, None, TC1]) def test_attract_traffic_static_dest_prefix_no_readvertise_advertise(self): # Configure VRF to generate traffic redirection, based on a 5-tuple # classifier and a static destination prefix, to a specific route # target self._config_vrf_with_attract_traffic(ATTRACT_STATIC_1, no_readvertise=True) self.vpn.vif_plugged(MAC1, IP1, LOCAL_PORT1) self._check_attract_traffic('_advertise_route', ATTRACT_STATIC_1['redirect_rts'], [None, None, TC_STATIC1]) self._reset_mocks() worker_a = worker.Worker(mock.Mock(), 'worker.Worker-A') vpn_nlri1 = self._generate_route_nlri(IP_ADDR_PREFIX1) self._new_route_event(engine.RouteEvent.ADVERTISE, vpn_nlri1, [t.RT3], worker_a, t.NH1, 200) self.vpn._advertise_route.assert_not_called() def test_attract_traffic_static_dest_prefix_advertise(self): # Configure VRF to generate traffic redirection, based on a 5-tuple # classifier and a static destination prefix, to a specific route # target self._config_vrf_with_attract_traffic(ATTRACT_STATIC_1) self.vpn.vif_plugged(MAC1, IP1, LOCAL_PORT1) self._check_attract_traffic('_advertise_route', ATTRACT_STATIC_1['redirect_rts'], [None, None, TC_STATIC1]) self._reset_mocks() worker_a = worker.Worker(mock.Mock(), 'worker.Worker-A') vpn_nlri1 = self._generate_route_nlri(IP_ADDR_PREFIX1) self._new_route_event(engine.RouteEvent.ADVERTISE, vpn_nlri1, [t.RT3], worker_a, t.NH1, 200) self._check_attract_traffic('_advertise_route', ATTRACT_STATIC_1['redirect_rts'], [None, TC1]) def test_attract_traffic_static_dest_prefix_advertise_multiple(self): # Configure VRF to generate traffic redirection, based on a 5-tuple # classifier and multiple static destination prefixes, to a specific # route target self._config_vrf_with_attract_traffic(ATTRACT_STATIC_2) self.vpn.vif_plugged(MAC1, IP1, LOCAL_PORT1) self._check_attract_traffic('_advertise_route', ATTRACT_STATIC_1['redirect_rts'], [None, None, TC_STATIC1, None, TC_STATIC2]) self._reset_mocks() worker_a = worker.Worker(mock.Mock(), 'worker.Worker-A') vpn_nlri1 = self._generate_route_nlri(IP_ADDR_PREFIX1) self._new_route_event(engine.RouteEvent.ADVERTISE, vpn_nlri1, [t.RT3], worker_a, t.NH1, 200) self._check_attract_traffic('_advertise_route', ATTRACT_STATIC_1['redirect_rts'], [None, TC1]) def test_redirected_vrf_single_flow_advertised(self): self._mock_vpnmanager_for_attract_traffic() self.vpn.vif_plugged(MAC1, IP1, LOCAL_PORT1) # new Route for plugged if supposed to be advertised self.vpn._advertise_route.assert_called_once() self._reset_mocks() worker_a = worker.Worker(mock.Mock(), 'worker.Worker-A') # FlowSpec route flow_nlri1 = self._generate_flow_spec_nlri(TC1) self._new_flow_event(engine.RouteEvent.ADVERTISE, flow_nlri1, [t.RT5], [t.RT1], worker_a) redirect_rt5 = t._rt_to_string(t.RT5) self.manager.redirect_traffic_to_vpn.assert_called_once() self.assertIn(TC1, self.vpn.redirect_rt_2_classifiers[redirect_rt5]) def test_redirected_vrf_multiple_flow_advertised(self): self._mock_vpnmanager_for_attract_traffic() self.vpn.vif_plugged(MAC1, IP1, LOCAL_PORT1) # new Route for plugged if supposed to be advertised self.vpn._advertise_route.assert_called_once() self._reset_mocks() worker_a = worker.Worker(mock.Mock(), 'worker.Worker-A') # FlowSpec route flow_nlri1 = self._generate_flow_spec_nlri(TC1) self._new_flow_event(engine.RouteEvent.ADVERTISE, flow_nlri1, [t.RT5], [t.RT1], worker_a) flow_nlri2 = self._generate_flow_spec_nlri(TC2) self._new_flow_event(engine.RouteEvent.ADVERTISE, flow_nlri2, [t.RT5], [t.RT1], worker_a) redirect_rt5 = t._rt_to_string(t.RT5) self.assertEqual(2, self.manager.redirect_traffic_to_vpn.call_count) self.assertIn(TC1, self.vpn.redirect_rt_2_classifiers[redirect_rt5]) self.assertIn(TC2, self.vpn.redirect_rt_2_classifiers[redirect_rt5]) def test_redirected_vrf_multiple_flow_withdrawn(self): self._mock_vpnmanager_for_attract_traffic() self.vpn.vif_plugged(MAC1, IP1, LOCAL_PORT1) # new Route for plugged if supposed to be advertised self.vpn._advertise_route.assert_called_once() self._reset_mocks() worker_a = worker.Worker(mock.Mock(), 'worker.Worker-A') # FlowSpec route flow_nlri1 = self._generate_flow_spec_nlri(TC1) event1 = self._new_flow_event(engine.RouteEvent.ADVERTISE, flow_nlri1, [t.RT5], [t.RT1], worker_a) flow_nlri2 = self._generate_flow_spec_nlri(TC2) event2 = self._new_flow_event(engine.RouteEvent.ADVERTISE, flow_nlri2, [t.RT5], [t.RT1], worker_a) self.assertEqual(2, self.manager.redirect_traffic_to_vpn.call_count) self._reset_mocks_vpnmanager() self._revert_event(event2) redirect_rt5 = t._rt_to_string(t.RT5) self.assertNotIn(TC2, self.vpn.redirect_rt_2_classifiers[redirect_rt5]) self._revert_event(event1) self.assertTrue(not self.vpn.redirect_rt_2_classifiers) self.manager.stop_redirect_to_vpn.assert_called_once() def test_load_balancing_single_prefix_advertise(self): # Configure VRF to generate traffic redirection, based on a 5-tuple # classifier, to a specific route target self._config_vrf_with_attract_traffic(ATTRACT_TRAFFIC_1) self.vpn.vif_plugged(MAC1, IP1, LOCAL_PORT1) self.vpn.vif_plugged(MAC2, IP2, LOCAL_PORT1) # new Route for plugged if supposed to be advertised self.assertEqual(2, self.vpn._advertise_route.call_count) self._reset_mocks() worker_a = worker.Worker(mock.Mock(), 'worker.Worker-A') vpn_nlri1 = self._generate_route_nlri(IP_ADDR_PREFIX1) self._new_route_event(engine.RouteEvent.ADVERTISE, vpn_nlri1, [t.RT3], worker_a, t.NH1, 200) self._check_attract_traffic('_advertise_route', ATTRACT_TRAFFIC_1['redirect_rts'], [None, None, TC1]) def test_load_balancing_single_prefix_withdraw(self): # Configure VRF to generate traffic redirection, based on a 5-tuple # classifier, to a specific route target self._config_vrf_with_attract_traffic(ATTRACT_TRAFFIC_1) self.vpn.vif_plugged(MAC1, IP1, LOCAL_PORT1) self.vpn.vif_plugged(MAC2, IP2, LOCAL_PORT1) # new Route for plugged if supposed to be advertised self.assertEqual(2, self.vpn._advertise_route.call_count) self._reset_mocks() worker_a = worker.Worker(mock.Mock(), 'worker.Worker-A') vpn_nlri1 = self._generate_route_nlri(IP_ADDR_PREFIX1) event1 = self._new_route_event(engine.RouteEvent.ADVERTISE, vpn_nlri1, [t.RT3], worker_a, t.NH1, 200) self.assertEqual(3, self.vpn._advertise_route.call_count) self._reset_mocks() self._revert_event(event1) self._check_attract_traffic('_withdraw_route', ATTRACT_TRAFFIC_1['redirect_rts'], [None, None, TC1]) def test_load_balancing_multiple_prefix_advertise(self): # Configure VRF to generate traffic redirection, based on a 5-tuple # classifier, to a specific route target self._config_vrf_with_attract_traffic(ATTRACT_TRAFFIC_1) self.vpn.vif_plugged(MAC1, IP1, LOCAL_PORT1) self.vpn.vif_plugged(MAC2, IP2, LOCAL_PORT1) # new Route for plugged if supposed to be advertised self.assertEqual(2, self.vpn._advertise_route.call_count) self._reset_mocks() worker_a = worker.Worker(mock.Mock(), 'worker.Worker-A') vpn_nlri1 = self._generate_route_nlri(IP_ADDR_PREFIX1) self._new_route_event(engine.RouteEvent.ADVERTISE, vpn_nlri1, [t.RT3], worker_a, t.NH1, 200) vpn_nlri2 = self._generate_route_nlri(IP_ADDR_PREFIX2) self._new_route_event(engine.RouteEvent.ADVERTISE, vpn_nlri2, [t.RT3], worker_a, t.NH1, 200) self._check_attract_traffic( '_advertise_route', ATTRACT_TRAFFIC_1['redirect_rts'], [None, None, TC1, TC2]) def test_load_balancing_multiple_prefix_withdraw(self): # Configure VRF to generate traffic redirection, based on a 5-tuple # classifier, to a specific route target self._config_vrf_with_attract_traffic(ATTRACT_TRAFFIC_1) self.vpn.vif_plugged(MAC1, IP1, LOCAL_PORT1) self.vpn.vif_plugged(MAC2, IP2, LOCAL_PORT1) # new Route for plugged if supposed to be advertised self.assertEqual(2, self.vpn._advertise_route.call_count) self._reset_mocks() worker_a = worker.Worker(mock.Mock(), 'worker.Worker-A') vpn_nlri1 = self._generate_route_nlri(IP_ADDR_PREFIX1) event1 = self._new_route_event(engine.RouteEvent.ADVERTISE, vpn_nlri1, [t.RT3], worker_a, t.NH1, 200) vpn_nlri2 = self._generate_route_nlri(IP_ADDR_PREFIX2) event2 = self._new_route_event(engine.RouteEvent.ADVERTISE, vpn_nlri2, [t.RT3], worker_a, t.NH1, 200) self.assertEqual(4, self.vpn._advertise_route.call_count) self._reset_mocks() self._revert_event(event2) self._revert_event(event1) self._check_attract_traffic( '_withdraw_route', ATTRACT_TRAFFIC_1['redirect_rts'], [TC2, None, None, TC1]) def test_load_balancing_new_plug(self): # Configure VRF to generate traffic redirection, based on a 5-tuple # classifier, to a specific route target self._config_vrf_with_attract_traffic(ATTRACT_TRAFFIC_1) self.vpn.vif_plugged(MAC1, IP1, LOCAL_PORT1) self.vpn.vif_plugged(MAC2, IP2, LOCAL_PORT1) # new Route for plugged if supposed to be advertised self.assertEqual(2, self.vpn._advertise_route.call_count) self._reset_mocks() worker_a = worker.Worker(mock.Mock(), 'worker.Worker-A') vpn_nlri1 = self._generate_route_nlri(IP_ADDR_PREFIX1) self._new_route_event(engine.RouteEvent.ADVERTISE, vpn_nlri1, [t.RT3], worker_a, t.NH1, 200) self.assertEqual(3, self.vpn._advertise_route.call_count) self._reset_mocks() self.vpn.vif_plugged(MAC3, IP3, LOCAL_PORT1) self._check_attract_traffic( '_advertise_route', ATTRACT_TRAFFIC_1['redirect_rts'], [None, None]) def test_load_balancing_unplug_all(self): # Configure VRF to generate traffic redirection, based on a 5-tuple # classifier, to a specific route target self._config_vrf_with_attract_traffic(ATTRACT_TRAFFIC_1) self.vpn.vif_plugged(MAC1, IP1, LOCAL_PORT1) self.vpn.vif_plugged(MAC2, IP2, LOCAL_PORT1) # new Route for plugged if supposed to be advertised self.assertEqual(2, self.vpn._advertise_route.call_count) self._reset_mocks() worker_a = worker.Worker(mock.Mock(), 'worker.Worker-A') vpn_nlri1 = self._generate_route_nlri(IP_ADDR_PREFIX1) self._new_route_event(engine.RouteEvent.ADVERTISE, vpn_nlri1, [t.RT3], worker_a, t.NH1, 200) self.assertEqual(3, self.vpn._advertise_route.call_count) self._reset_mocks() self.vpn.vif_unplugged(MAC1, IP1) self._check_attract_traffic( '_withdraw_route', ATTRACT_TRAFFIC_1['redirect_rts'], [None, None]) self._reset_mocks() self.vpn.vif_unplugged(MAC2, IP2) self._check_attract_traffic( '_withdraw_route', ATTRACT_TRAFFIC_1['redirect_rts'], [None, TC1, None]) def test_cleanup_assist(self): # Configure VRF to generate traffic redirection, based on a 5-tuple # classifier, to a specific route target self._config_vrf_with_attract_traffic(ATTRACT_TRAFFIC_1) worker_a = worker.Worker(mock.Mock(), 'worker.Worker-A') vpn_nlri1 = self._generate_route_nlri(IP_ADDR_PREFIX1) self._new_route_event(engine.RouteEvent.ADVERTISE, vpn_nlri1, [t.RT1], worker_a, t.NH1, 200) # FlowSpec route flow_nlri1 = self._generate_flow_spec_nlri(TC1) self._new_flow_event(engine.RouteEvent.ADVERTISE, flow_nlri1, [t.RT5], [t.RT1], worker_a) self.mock_dp.needs_cleanup_assist.return_value = False with mock.patch.object(self.vpn, 'best_route_removed') as mock_brr: self.vpn.stop() mock_brr.assert_called_once() self.assertIsInstance(mock_brr.call_args[0][1].nlri, flowspec.Flow) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/unit/bagpipe_bgp/test_vpn_manager.py0000664000175000017500000003173400000000000031650 0ustar00zuulzuul00000000000000# Copyright 2014 Orange # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from networking_bagpipe.bagpipe_bgp.common import exceptions from networking_bagpipe.bagpipe_bgp.common import utils from networking_bagpipe.bagpipe_bgp import constants as consts from networking_bagpipe.bagpipe_bgp.vpn import manager from networking_bagpipe.tests.unit.bagpipe_bgp import base as t REDIRECTED_INSTANCE_ID1 = 'redirected-id1' REDIRECTED_INSTANCE_ID2 = 'redirected-id2' MAC = "00:00:de:ad:be:ef" IP = "10.0.0.1/32" BRIDGE_NAME = "br-test" LOCAL_PORT = {'linuxif': 'tap1'} VPN_EXT_ID = "ext_id_1" VPN_EXT_ID_bis = "ext_id_2" GW_IP = "10.0.0.1" GW_MASK = 24 VNID = 255 class MockVPNInstance: type = consts.EVPN def __init__(self, vpn_manager, dataplane_driver, external_instance_id, instance_id, import_rts, export_rts, gateway_ip, mask, readvertise, attract_traffic, fallback=None, **kwargs): self.manager = vpn_manager self.external_instance_id = external_instance_id self.instance_type = self.__class__.__name__ self.instance_id = instance_id self.import_rts = import_rts self.export_rts = export_rts self.forced_vni = False def __repr__(self): return "{}:{}:{}".format(self.instance_type, self.instance_id, self.external_instance_id) @classmethod def validate_convert_attach_params(*args): pass @classmethod def validate_convert_detach_params(*args): pass def update_fallback(self, *args): pass def update_route_targets(self, *args): pass def vif_plugged(self, *args, **kwargs): pass def vif_unplugged(self, *args): pass def start(self): pass def stop_if_empty(self): pass def stop(self): pass def join(self): pass class TestVPNManager(t.TestCase): def setUp(self): super().setUp() mock.patch("networking_bagpipe.bagpipe_bgp.vpn.dataplane_drivers." "instantiate_dataplane_drivers", return_value={ 'evpn': mock.Mock(), 'ipvpn': mock.Mock() }).start() self.manager = manager.VPNManager() def tearDown(self): super().tearDown() self.manager.stop() def test_redirect_traffic_single_instance(self): redirect_instance = self.manager.redirect_traffic_to_vpn( REDIRECTED_INSTANCE_ID1, consts.IPVPN, t._rt_to_string(t.RT5) ) # Check some VPN manager and redirect instance lists consistency self.assertIn( manager.redirect_instance_extid(consts.IPVPN, t._rt_to_string(t.RT5)), self.manager.vpn_instances) self.assertIn(REDIRECTED_INSTANCE_ID1, redirect_instance.redirected_instances) def test_redirect_traffic_multiple_instance(self): redirect_instance_1 = self.manager.redirect_traffic_to_vpn( REDIRECTED_INSTANCE_ID1, consts.IPVPN, t._rt_to_string(t.RT5) ) redirect_instance_2 = self.manager.redirect_traffic_to_vpn( REDIRECTED_INSTANCE_ID2, consts.IPVPN, t._rt_to_string(t.RT5) ) # Check that same redirect instance is returned self.assertEqual(redirect_instance_2, redirect_instance_1) # Check some VPN manager and redirect instance lists consistency self.assertIn( manager.redirect_instance_extid(consts.IPVPN, t._rt_to_string(t.RT5)), self.manager.vpn_instances) self.assertIn(REDIRECTED_INSTANCE_ID1, redirect_instance_1.redirected_instances) self.assertIn(REDIRECTED_INSTANCE_ID2, redirect_instance_1.redirected_instances) def test_stop_redirect_traffic_multiple_instance(self): redirect_instance = self.manager.redirect_traffic_to_vpn( REDIRECTED_INSTANCE_ID1, consts.IPVPN, t._rt_to_string(t.RT5) ) self.manager.redirect_traffic_to_vpn( REDIRECTED_INSTANCE_ID2, consts.IPVPN, t._rt_to_string(t.RT5) ) # Check some VPN manager and redirect instance lists consistency self.manager.stop_redirect_to_vpn(REDIRECTED_INSTANCE_ID2, consts.IPVPN, t._rt_to_string(t.RT5)) self.assertNotIn(REDIRECTED_INSTANCE_ID2, redirect_instance.redirected_instances) self.manager.stop_redirect_to_vpn(REDIRECTED_INSTANCE_ID1, consts.IPVPN, t._rt_to_string(t.RT5)) self.assertTrue(not self.manager.vpn_instances) def test_plug_vif_to_vpn_with_forced_vni(self): with mock.patch.object(self.manager, "_get_vpn_instance", return_value=(mock.Mock(), False) ) as mock_get_vpn_instance, \ mock.patch.object(utils, "convert_route_targets"): self.manager.plug_vif_to_vpn(vpn_instance_id=VPN_EXT_ID, vpn_type=consts.EVPN, import_rt=[t.RT1], export_rt=[t.RT1], mac_address=MAC, ip_address=IP, gateway_ip=GW_IP, local_port=LOCAL_PORT, linuxbr=BRIDGE_NAME, vni=VNID) mock_get_vpn_instance.assert_called_once_with( VPN_EXT_ID, consts.EVPN, mock.ANY, mock.ANY, GW_IP, mock.ANY, None, None, None, linuxbr=BRIDGE_NAME, vni=VNID) def test_plug_vif_to_vpn_without_forced_vni(self): with mock.patch.object(self.manager, "_get_vpn_instance", return_value=(mock.Mock(), False) ) as mock_get_vpn_instance, \ mock.patch.object(utils, "convert_route_targets"): self.manager.plug_vif_to_vpn(vpn_instance_id=VPN_EXT_ID, vpn_type=consts.EVPN, import_rt=[t.RT1], export_rt=[t.RT1], mac_address=MAC, ip_address=IP, gateway_ip=GW_IP, local_port=LOCAL_PORT, linuxbr=BRIDGE_NAME) mock_get_vpn_instance.assert_called_once_with( VPN_EXT_ID, consts.EVPN, mock.ANY, mock.ANY, GW_IP, mock.ANY, None, None, None, linuxbr=BRIDGE_NAME) def test_get_vpn_instance_with_forced_vni(self): instannce, _ = self.manager._get_vpn_instance(VPN_EXT_ID, consts.IPVPN, [], [], GW_IP, GW_MASK, None, None, vni=VNID) instannce.start() self.assertEqual(VNID, instannce.instance_label, "VPN instance label should be forced to VNID") def test_get_vpn_instance_without_forced_vni(self): instannce, _ = self.manager._get_vpn_instance(VPN_EXT_ID, consts.IPVPN, [], [], GW_IP, GW_MASK, None, None) instannce.start() self.assertIsNot(0, instannce.instance_label, "VPN instance label should be assigned locally") def test_forced_vni_same_vni_twice(self): instannce, _ = self.manager._get_vpn_instance(VPN_EXT_ID, consts.IPVPN, [], [], GW_IP, GW_MASK, None, None, vni=VNID) instannce.start() self.assertRaises(exceptions.APIAlreadyUsedVNI, self.manager._get_vpn_instance, VPN_EXT_ID_bis, consts.EVPN, [], [], GW_IP, GW_MASK, None, None, vni=VNID) # unregister first VPN instance (free the VNI) self.manager.unregister_vpn_instance(instannce) # this time, using the VNI should work instance2, _ = self.manager._get_vpn_instance(VPN_EXT_ID, consts.IPVPN, [], [], GW_IP, GW_MASK, None, None, vni=VNID) instance2.start() def test_instance_id_uniqueness(self): with mock.patch.object(manager.VPNManager, 'type2class', {consts.IPVPN: MockVPNInstance, consts.EVPN: MockVPNInstance }): vpn_instance_unplug_args = dict(vpn_type=consts.EVPN, mac_address=MAC, ip_address=IP) vpn_instance_plug_args = dict(vpn_type=consts.EVPN, import_rts=['64512:74'], export_rts=[], mac_address=MAC, ip_address_prefix=IP, gateway_ip=GW_IP, local_port=LOCAL_PORT) BASE_VPN_EXT = "extid-" for i in (1, 2, 3, 4, 5): self.manager.plug_vif_to_vpn( external_instance_id=BASE_VPN_EXT + str(i), **vpn_instance_plug_args) for i in (2, 4): self.manager.unplug_vif_from_vpn( external_instance_id=BASE_VPN_EXT + str(i), **vpn_instance_unplug_args) for i in (6, 7, 8): self.manager.plug_vif_to_vpn( external_instance_id=BASE_VPN_EXT + str(i), **vpn_instance_plug_args) instance_ids = [i.instance_id for i in self.manager.vpn_instances.values()] # ensure that each value is unique self.assertEqual(len(self.manager.vpn_instances.values()), len(set(instance_ids))) def test_instance_id_max(self): with mock.patch.object(manager.VPNManager, 'type2class', {consts.IPVPN: MockVPNInstance, consts.EVPN: MockVPNInstance }): self.manager.next_vpn_instance_id = 2**32 - 1 self.manager.plug_vif_to_vpn( external_instance_id="dummy1", vpn_type=consts.EVPN, mac_address=MAC, import_rts=[], export_rts=[], ip_address_prefix=IP, local_port=LOCAL_PORT) self.assertRaises( manager.MaxInstanceIDReached, self.manager.plug_vif_to_vpn, external_instance_id="dummy2", vpn_type=consts.EVPN, import_rts=[], export_rts=[], mac_address=MAC, ip_address_prefix=IP, local_port=LOCAL_PORT) @mock.patch('networking_bagpipe.bagpipe_bgp.engine.bgp_manager.Manager') def test_manager_stop(self, mocked_bgp_manager): instance, _ = self.manager._get_vpn_instance( "TEST_VPN_INSTANCE", consts.IPVPN, [t.RT1], [t.RT1], "192.168.0.1", 24, {}, {}) instance.start() self.manager.stop() self.assertTrue(not self.manager.vpn_instances) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9463053 networking_bagpipe-22.0.0/networking_bagpipe/tests/unit/driver/0000775000175000017500000000000000000000000024766 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/unit/driver/__init__.py0000664000175000017500000000000000000000000027065 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/unit/driver/test_mech_bagpipe.py0000664000175000017500000000231100000000000030777 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Orange. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron.tests.unit.plugins.ml2 import test_plugin from neutron_lib import constants as n_consts class TestBaGpipeML2MechDriver(test_plugin.Ml2PluginV2TestCase): _mechanism_drivers = ['bagpipe'] def setUp(self): cfg.CONF.set_override('type_drivers', n_consts.TYPE_VXLAN, 'ml2') cfg.CONF.set_override('tenant_network_types', n_consts.TYPE_VXLAN, 'ml2') super().setUp() def test_setup(self): pass ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9463053 networking_bagpipe-22.0.0/networking_bagpipe/tests/unit/objects/0000775000175000017500000000000000000000000025124 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/unit/objects/__init__.py0000664000175000017500000000000000000000000027223 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/unit/objects/test_bgpvpn.py0000664000175000017500000003261500000000000030040 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import random from unittest import mock import netaddr from oslo_utils import uuidutils from networking_bagpipe.objects import bgpvpn as bgpvpn_obj from neutron.tests.unit.objects import test_base from neutron.tests.unit import testlib_api from neutron_lib.api.definitions import bgpvpn as bgpvpn_api from neutron_lib.api.definitions import bgpvpn_routes_control as bgpvpn_rc_api from neutron_lib import constants from neutron_lib import context from neutron_lib.objects import registry as obj_reg from neutron_lib.utils import net as net_utils test_base.FIELD_TYPE_VALUE_GENERATOR_MAP[bgpvpn_obj.BGPVPNTypeField] = ( lambda: random.choice(bgpvpn_api.BGPVPN_TYPES) ) test_base.FIELD_TYPE_VALUE_GENERATOR_MAP[ bgpvpn_obj.BGPVPNPortAssociationRouteTypeField] = ( # do not generate bgpvpn type routes for now: lambda: bgpvpn_rc_api.PREFIX_TYPE) CIDR = "10.10.0.0/16" GW_IP = "10.10.0.1" GW_MAC = "ba:ad:00:00:ca:fe" TEST_RT = "64512:42" def _subnet_dict(gw_mac=None): return { 'id': mock.ANY, 'ip_version': 4, 'gateway_mac': gw_mac, 'cidr': net_utils.AuthenticIPNetwork(CIDR), 'gateway_ip': netaddr.IPAddress(GW_IP) } class _BPGVPNObjectsTestCommon: def _create_test_bgpvpn(self): bgpvpn = bgpvpn_obj.BGPVPN(self.context, route_targets=[TEST_RT], name='test-bgpvpn-U', type='l3') bgpvpn.create() return bgpvpn def _create_test_bgpvpn_id(self): return self._create_test_bgpvpn().id def _make_subnet(self, network_id): _subnet = obj_reg.new_instance( 'Subnet', self.context, network_id=network_id, ip_version=4, cidr=netaddr.IPNetwork(CIDR), gateway_ip=GW_IP) _subnet.create() return _subnet def _connect_router_network(self, router_id, network_id, subnet_id=None, gw_network=False): port = obj_reg.new_instance( 'Port', self.context, network_id=network_id, mac_address=netaddr.EUI( GW_MAC, dialect=netaddr.mac_unix_expanded), device_id='test_device_id', device_owner=constants.DEVICE_OWNER_ROUTER_INTF, status="DUMMY_STATUS", admin_state_up=True) if gw_network: port.device_owner = constants.DEVICE_OWNER_ROUTER_GW port.create() if subnet_id: allocation = obj_reg.new_instance( 'IPAllocation', self.context, port_id=port.id, subnet_id=subnet_id, network_id=network_id, ip_address=netaddr.IPNetwork(GW_IP)) allocation.create() port.fixed_ips = [allocation] port.update() router_if = obj_reg.new_instance( 'RouterPort', self.context, router_id=router_id, port_id=port.id) router_if.create() class BGPVPNTest(test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase, _BPGVPNObjectsTestCommon): _test_class = bgpvpn_obj.BGPVPN def test_get_objects_supports_extra_filtername(self): self.skipTest("no support for extra filtername") def test_get_object(self): bgpvpn = self._create_test_bgpvpn() bgpvpn_bis = bgpvpn_obj.BGPVPN.get_object(self.context, id=bgpvpn.id) self.assertEqual(bgpvpn, bgpvpn_bis) class BGPVPNNetAssociationTest(test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase, _BPGVPNObjectsTestCommon): _test_class = bgpvpn_obj.BGPVPNNetAssociation def setUp(self): test_base.BaseDbObjectTestCase.setUp(self) self.network_id = self._create_test_network_id() self.update_obj_fields( {'network_id': self.network_id, 'bgpvpn_id': self._create_test_bgpvpn_id}) self.subnet = self._make_subnet(self.network_id) def test_get_objects_queries_constant(self): self.skipTest("test not passing yet, remains to be investigated why") def test_all_subnets(self): for db_obj in self.objs: self.assertCountEqual(db_obj.all_subnets(self.network_id), [_subnet_dict()]) def test_subnets(self): for obj in self.objs: obj.create() self.assertCountEqual(obj.subnets, [_subnet_dict()]) # plug a router _router = obj_reg.new_instance('Router', self.context) _router.create() self._connect_router_network(_router.id, self.network_id, self.subnet.id) # check .subnets in associations, after refreshing for obj in self.objs: refreshed_obj = bgpvpn_obj.BGPVPNNetAssociation.get_object( self.context, id=obj.id) self.assertCountEqual(refreshed_obj.subnets, [_subnet_dict(GW_MAC)]) class BGPVPNRouterAssociationTest(test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase, _BPGVPNObjectsTestCommon): _test_class = bgpvpn_obj.BGPVPNRouterAssociation def setUp(self): test_base.BaseDbObjectTestCase.setUp(self) self.router_id = self._create_test_router_id() self.update_obj_fields( {'router_id': self.router_id, 'bgpvpn_id': self._create_test_bgpvpn_id, }) self.context = context.get_admin_context() def test_get_objects_queries_constant(self): self.skipTest("test not passing yet, remains to be investigated why") def test_all_subnets(self): for obj in self.objs: obj.create() network_id = self._create_test_network_id() subnet_ = self._make_subnet(network_id) # initially the network is not connected to the router for obj in self.objs: self.assertCountEqual(obj.all_subnets(network_id), []) self._connect_router_network(self.router_id, network_id, subnet_.id) # connect a gateway network gw_network_id = self._create_test_network_id() self._connect_router_network(self.router_id, gw_network_id, gw_network=True) # check .subnets in associations, after refreshing # (except gateway network that should not be present) for obj in self.objs: refreshed_obj = bgpvpn_obj.BGPVPNRouterAssociation.get_object( self.context, id=obj.id) self.assertCountEqual(refreshed_obj.all_subnets(network_id), [_subnet_dict(GW_MAC)]) self.assertCountEqual(refreshed_obj.all_subnets("dummy-uuid"), []) def test_get_objects_from_network_id(self): router_ = obj_reg.new_instance('Router', self.context) router_.create() self.project = uuidutils.generate_uuid() # put a network behind a router network_ = obj_reg.new_instance('Network', self.context) network_.create() subnet_ = self._make_subnet(network_.id) self._connect_router_network(router_.id, network_.id) bgpvpn_ = self._create_test_bgpvpn() router_assoc_ = bgpvpn_obj.BGPVPNRouterAssociation( self.context, project_id=self.project, router_id=router_.id, bgpvpn_id=bgpvpn_.id) router_assoc_.create() # unrelated router and BGPVPN router_2 = obj_reg.new_instance('Router', self.context) router_2.create() router_assoc_2 = bgpvpn_obj.BGPVPNRouterAssociation( self.context, project_id=self.project, router_id=router_2.id, bgpvpn_id=self._create_test_bgpvpn_id()) router_assoc_2.create() # test get_objects get_assocs = bgpvpn_obj.BGPVPNRouterAssociation.get_objects( self.context, network_id=network_.id) self.assertEqual(1, len(get_assocs)) self.assertEqual(get_assocs[0].bgpvpn.id, bgpvpn_.id) self.assertIn( subnet_.id, [s['id'] for s in get_assocs[0].all_subnets(network_.id)]) class BGPVPNPortAssociationTest(test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase, _BPGVPNObjectsTestCommon): _test_class = bgpvpn_obj.BGPVPNPortAssociation def setUp(self): test_base.BaseDbObjectTestCase.setUp(self) self.project = uuidutils.generate_uuid() self.port_id = self._create_test_port_id() self.update_obj_fields( {'port_id': self.port_id, 'bgpvpn_id': self._create_test_bgpvpn_id, 'routes': { 'bgpvpn_id': self._create_test_bgpvpn_id, }} ) def test_get_objects_queries_constant(self): self.skipTest("test not passing yet, remains to be investigated why") class BGPVPNPortAssociationRouteTest(test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase, _BPGVPNObjectsTestCommon): _test_class = bgpvpn_obj.BGPVPNPortAssociationRoute def setUp(self): test_base.BaseDbObjectTestCase.setUp(self) self.project = uuidutils.generate_uuid() self.update_obj_fields( {'port_association_id': self._create_test_port_assoc_id, 'bgpvpn_id': self._create_test_bgpvpn_id}) self.context = context.get_admin_context() def _create_test_port_assoc(self): bgpvpn_id = self._create_test_bgpvpn_id() port_id = self._create_test_port_id() port_assoc = bgpvpn_obj.BGPVPNPortAssociation(self.context, project_id=self.project, port_id=port_id, bgpvpn_id=bgpvpn_id) port_assoc.create() return port_assoc def _create_test_port_assoc_id(self): return self._create_test_port_assoc().id def test_eq_hash_prefix(self): r1 = bgpvpn_obj.BGPVPNPortAssociationRoute( type='prefix', prefix=netaddr.IPNetwork('1.2.3.4')) r2 = bgpvpn_obj.BGPVPNPortAssociationRoute( type='prefix', prefix=netaddr.IPNetwork('1.2.3.4')) self.assertEqual(r1, r2) self.assertEqual(hash(r1), hash(r2)) def test_eq_hash_bgpvpn(self): bgpvpn = self._create_test_bgpvpn() r1 = bgpvpn_obj.BGPVPNPortAssociationRoute(type='bgpvpn', bgpvpn=bgpvpn) r2 = bgpvpn_obj.BGPVPNPortAssociationRoute(type='bgpvpn', bgpvpn=bgpvpn) self.assertEqual(r1, r2) self.assertEqual(hash(r1), hash(r2)) def test_neq_type(self): r1 = bgpvpn_obj.BGPVPNPortAssociationRoute( type='bgpvpn', bgpvpn_id='12345') r2 = bgpvpn_obj.BGPVPNPortAssociationRoute( type='prefix', prefix=netaddr.IPNetwork('1.2.3.4')) self.assertNotEqual(r1, r2) def test_neq_prefix(self): r1 = bgpvpn_obj.BGPVPNPortAssociationRoute( type='prefix', prefix=netaddr.IPNetwork('11.22.33.44')) r2 = bgpvpn_obj.BGPVPNPortAssociationRoute( type='prefix', prefix=netaddr.IPNetwork('1.2.3.4')) self.assertNotEqual(r1, r2) def test_neq_bgpvpn(self): bgpvpn1 = self._create_test_bgpvpn() bgpvpn2 = self._create_test_bgpvpn() r1 = bgpvpn_obj.BGPVPNPortAssociationRoute(type='bgpvpn', bgpvpn=bgpvpn1) r2 = bgpvpn_obj.BGPVPNPortAssociationRoute(type='bgpvpn', bgpvpn=bgpvpn2) self.assertNotEqual(r1, r2) def test_bgpvpn_route_get_object_access_bgpvpn(self): route_id = uuidutils.generate_uuid() route = bgpvpn_obj.BGPVPNPortAssociationRoute( self.context, id=route_id, port_association_id=self._create_test_port_assoc().id, type=bgpvpn_rc_api.BGPVPN_TYPE, bgpvpn_id=self._create_test_bgpvpn_id()) route.create() route_again = bgpvpn_obj.BGPVPNPortAssociationRoute.get_object( self.context, id=route_id ) self.assertEqual([TEST_RT], route_again.bgpvpn.route_targets) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9463053 networking_bagpipe-22.0.0/networking_bagpipe/tests/unit/privileged/0000775000175000017500000000000000000000000025625 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/unit/privileged/__init__.py0000664000175000017500000000000000000000000027724 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/unit/privileged/privsep_fixtures.py0000664000175000017500000000150000000000000031614 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures from networking_bagpipe import privileged class PrivelegedFixture(fixtures.Fixture): def setUp(self): super().setUp() self.useFixture(fixtures.MockPatchObject( privileged.default_cmd, 'client_mode', False)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/tests/unit/privileged/test_privileged_utils.py0000664000175000017500000000750500000000000032617 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_concurrency import processutils from neutron.tests import base from networking_bagpipe.privileged import privileged_utils from networking_bagpipe.tests.unit.privileged import privsep_fixtures class TestPrivilegedSysctl(base.BaseTestCase): def setUp(self): super().setUp() self.useFixture(privsep_fixtures.PrivelegedFixture()) @mock.patch('oslo_concurrency.processutils.execute') def test_sysctl_simple(self, mock_execute): mock_execute.return_value = ['', ''] ret = privileged_utils.sysctl('a.b.c', 0) mock_execute.assert_called_with('sysctl', '-w', 'a.b.c=0', check_exit_code=True) self.assertEqual(ret, 0) @mock.patch('oslo_concurrency.processutils.execute') def test_sysctl_failed(self, mock_execute): mock_execute.return_value = ['', 'error'] ret = privileged_utils.sysctl('a.b.c', 1) self.assertEqual(ret, 1) @mock.patch('oslo_concurrency.processutils.execute') def test_sysctl_failed_raise_exception(self, mock_execute): mock_execute.side_effect = processutils.ProcessExecutionError( 'Unexpected error') self.assertRaises(processutils.ProcessExecutionError, privileged_utils.sysctl, 'a.b.c', 1) @mock.patch('oslo_concurrency.processutils.execute') def test_modprobe_simple(self, mock_execute): mock_execute.return_value = ['', ''] privileged_utils.modprobe('foo_module') mock_execute.assert_called_once_with( 'modprobe', 'foo_module', check_exit_code=True) @mock.patch('oslo_concurrency.processutils.execute') def test_modprobe_failed(self, mock_execute): mock_execute.side_effect = processutils.ProcessExecutionError( 'Unexpected error') self.assertRaises( processutils.ProcessExecutionError, privileged_utils.modprobe, 'foo_module') @mock.patch('oslo_concurrency.processutils.execute') def test_bridge_simple(self, mock_execute): mock_execute.return_value = ['', ''] privileged_utils.bridge('fdb replace foo') mock_execute.assert_called_once_with( 'bridge', 'fdb', 'replace', 'foo', run_as_root=True) @mock.patch('oslo_concurrency.processutils.execute') def test_bridge_failed(self, mock_execute): mock_execute.side_effect = processutils.ProcessExecutionError( 'Unexpected error') self.assertRaises( processutils.ProcessExecutionError, privileged_utils.bridge, 'fdb replace foo') @mock.patch('oslo_concurrency.processutils.execute') def test_brctl_simple(self, mock_execute): mock_execute.return_value = ['', ''] privileged_utils.brctl('addif foo_bridge foo') mock_execute.assert_called_once_with( 'brctl', 'addif', 'foo_bridge', 'foo', check_exit_code=True, run_as_root=True) @mock.patch('oslo_concurrency.processutils.execute') def test_brctl_failed(self, mock_execute): mock_execute.side_effect = processutils.ProcessExecutionError( 'Unexpected error') self.assertRaises( processutils.ProcessExecutionError, privileged_utils.brctl, 'addif foo_bridge foo') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/networking_bagpipe/version.py0000664000175000017500000000127100000000000023412 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pbr.version version_info = pbr.version.VersionInfo('networking-bagpipe') ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1743591455.958305 networking_bagpipe-22.0.0/networking_bagpipe.egg-info/0000775000175000017500000000000000000000000023044 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591455.0 networking_bagpipe-22.0.0/networking_bagpipe.egg-info/PKG-INFO0000644000175000017500000000453100000000000024142 0ustar00zuulzuul00000000000000Metadata-Version: 2.1 Name: networking-bagpipe Version: 22.0.0 Summary: Mechanism driver for Neutron ML2 plugin using BGP E-VPNs/IP VPNs as a backend Home-page: https://docs.openstack.org/networking-bagpipe/latest/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3.12 Requires-Python: >=3.9 License-File: LICENSE Requires-Dist: netaddr>=0.7.18 Requires-Dist: neutron-lib>=2.19.0 Requires-Dist: oslo.db>=4.37.0 Requires-Dist: oslo.config>=5.2.0 Requires-Dist: oslo.concurrency>=3.26.0 Requires-Dist: oslo.i18n>=3.15.3 Requires-Dist: oslo.log>=3.36.0 Requires-Dist: oslo.messaging>=5.29.0 Requires-Dist: oslo.privsep>=2.3.0 Requires-Dist: oslo.rootwrap>=5.8.0 Requires-Dist: oslo.serialization!=2.19.1,>=2.18.0 Requires-Dist: oslo.service!=1.28.1,>=1.24.0 Requires-Dist: oslo.versionedobjects>=1.35.1 Requires-Dist: pyroute2>=0.5.7 Requires-Dist: stevedore>=1.20.0 Requires-Dist: exabgp>=4.0.4 Requires-Dist: pecan>=1.3.2 Requires-Dist: neutron>=23.0.0 Requires-Dist: networking-bgpvpn>=12.0.0 Requires-Dist: networking-sfc>=10.0.0 ================== networking-bagpipe ================== Driver and agent code to use BaGPipe lightweight implementation of BGP-based VPNs as a backend for Neutron. * Free software: Apache license * Documentation: https://docs.openstack.org/networking-bagpipe/latest/ * Source: https://opendev.org/openstack/networking-bagpipe * Bugs: https://bugs.launchpad.net/networking-bagpipe * Release notes: https://docs.openstack.org/releasenotes/networking-bagpipe/ Team and repository tags ------------------------ .. image:: https://governance.openstack.org/tc/badges/networking-bagpipe.svg :target: https://governance.openstack.org/tc/reference/tags/index.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591455.0 networking_bagpipe-22.0.0/networking_bagpipe.egg-info/SOURCES.txt0000664000175000017500000002720000000000000024731 0ustar00zuulzuul00000000000000.coveragerc .gitmodules .mailmap .pre-commit-config.yaml .pylintrc .stestr.conf .zuul.yaml AUTHORS CONTRIBUTING.rst ChangeLog HACKING.rst LICENSE README.rst bindep.txt pyproject.toml requirements.txt setup.cfg setup.py test-requirements.txt tox.ini devstack/devstack-gate-rc devstack/gate_hook.sh devstack/override-defaults devstack/plugin.sh devstack/plugin.sh.bagpipe_bgp devstack/post_test_hook.sh devstack/pre_test_hook.sh devstack/settings devstack/settings.bagpipe_bgp devstack/gate-hooks/README devstack/gate-hooks/bagpipe devstack/gate-hooks/bgpvpn devstack/gate-hooks/dstat devstack/gate-hooks/go-env devstack/gate-hooks/gobgp devstack/gate-hooks/stack_base doc/requirements.txt doc/source/conf.py doc/source/index.rst doc/source/overview.rst doc/source/_static/.placeholder doc/source/configuration/bagpipe-bgp.rst doc/source/configuration/dataplane-evpn-linux-vxlan.rst doc/source/configuration/dataplane-ipvpn-mpls-linux.rst doc/source/configuration/dataplane-ipvpn-mpls-ovs.rst doc/source/configuration/index.rst doc/source/configuration/neutron-agent.rst doc/source/configuration/neutron.rst doc/source/configuration/samples/bagpipe-bgp.rst doc/source/configuration/samples/dataplane-evpn-linux-vxlan.rst doc/source/configuration/samples/dataplane-ipvpn-mpls-linux.rst doc/source/configuration/samples/dataplane-ipvpn-mpls-ovs.rst doc/source/configuration/samples/neutron-agent.rst doc/source/contributor/index.rst doc/source/install/index.rst doc/source/user/applications.rst doc/source/user/bagpipe-bgp.rst doc/source/user/bgpvpn.blockdiag doc/source/user/design.rst doc/source/user/index.rst doc/source/user/overview.blockdiag doc/source/user/figures/bgpvpn_blockdiag.png doc/source/user/figures/ml2_blockdiag.png doc/source/user/figures/overview_blockdiag.png etc/README.txt etc/bagpipe-bgp/bgp.conf.template etc/bagpipe-bgp/rootwrap.conf etc/bagpipe-bgp/rootwrap.d/linux-vxlan.filters etc/bagpipe-bgp/rootwrap.d/mpls-ovs-dataplane.filters etc/oslo-config-generator/bagpipe-bgp.conf etc/oslo-config-generator/dataplane-evpn-linux-vxlan.conf etc/oslo-config-generator/dataplane-ipvpn-mpls-linux.conf etc/oslo-config-generator/dataplane-ipvpn-mpls-ovs.conf etc/oslo-config-generator/neutron-agent.conf etc/oslo-config-generator/neutron-sfc.conf networking_bagpipe/__init__.py networking_bagpipe/_i18n.py networking_bagpipe/opts.py networking_bagpipe/version.py networking_bagpipe.egg-info/PKG-INFO networking_bagpipe.egg-info/SOURCES.txt networking_bagpipe.egg-info/dependency_links.txt networking_bagpipe.egg-info/entry_points.txt networking_bagpipe.egg-info/not-zip-safe networking_bagpipe.egg-info/pbr.json networking_bagpipe.egg-info/requires.txt networking_bagpipe.egg-info/top_level.txt networking_bagpipe/agent/__init__.py networking_bagpipe/agent/agent_base_info.py networking_bagpipe/agent/bagpipe_bgp_agent.py networking_bagpipe/agent/bgpvpn/__init__.py networking_bagpipe/agent/bgpvpn/agent_extension.py networking_bagpipe/agent/bgpvpn/constants.py networking_bagpipe/agent/bgpvpn/rpc_client.py networking_bagpipe/agent/common/__init__.py networking_bagpipe/bagpipe_bgp/__init__.py networking_bagpipe/bagpipe_bgp/bgp_daemon.py networking_bagpipe/bagpipe_bgp/constants.py networking_bagpipe/bagpipe_bgp/fakerr.py networking_bagpipe/bagpipe_bgp/api/__init__.py networking_bagpipe/bagpipe_bgp/api/api.py networking_bagpipe/bagpipe_bgp/api/config.py networking_bagpipe/bagpipe_bgp/api/controllers.py networking_bagpipe/bagpipe_bgp/cli/__init__.py networking_bagpipe/bagpipe_bgp/cli/impex2dot.py networking_bagpipe/bagpipe_bgp/cli/looking_glass.py networking_bagpipe/bagpipe_bgp/cli/rest_attach.py networking_bagpipe/bagpipe_bgp/common/__init__.py networking_bagpipe/bagpipe_bgp/common/config.py networking_bagpipe/bagpipe_bgp/common/dataplane_utils.py networking_bagpipe/bagpipe_bgp/common/exceptions.py networking_bagpipe/bagpipe_bgp/common/log_decorator.py networking_bagpipe/bagpipe_bgp/common/looking_glass.py networking_bagpipe/bagpipe_bgp/common/net_utils.py networking_bagpipe/bagpipe_bgp/common/run_command.py networking_bagpipe/bagpipe_bgp/common/utils.py networking_bagpipe/bagpipe_bgp/engine/__init__.py networking_bagpipe/bagpipe_bgp/engine/bgp_manager.py networking_bagpipe/bagpipe_bgp/engine/bgp_peer_worker.py networking_bagpipe/bagpipe_bgp/engine/exa.py networking_bagpipe/bagpipe_bgp/engine/exabgp_peer_worker.py networking_bagpipe/bagpipe_bgp/engine/flowspec.py networking_bagpipe/bagpipe_bgp/engine/ipvpn.py networking_bagpipe/bagpipe_bgp/engine/route_table_manager.py networking_bagpipe/bagpipe_bgp/engine/tracker_worker.py networking_bagpipe/bagpipe_bgp/engine/worker.py networking_bagpipe/bagpipe_bgp/vpn/__init__.py networking_bagpipe/bagpipe_bgp/vpn/dataplane_drivers.py networking_bagpipe/bagpipe_bgp/vpn/identifier_allocators.py networking_bagpipe/bagpipe_bgp/vpn/manager.py networking_bagpipe/bagpipe_bgp/vpn/vpn_instance.py networking_bagpipe/bagpipe_bgp/vpn/evpn/__init__.py networking_bagpipe/bagpipe_bgp/vpn/evpn/linux_vxlan.py networking_bagpipe/bagpipe_bgp/vpn/evpn/ovs.py networking_bagpipe/bagpipe_bgp/vpn/ipvpn/__init__.py networking_bagpipe/bagpipe_bgp/vpn/ipvpn/mpls_linux_dataplane.py networking_bagpipe/bagpipe_bgp/vpn/ipvpn/mpls_ovs_dataplane.py networking_bagpipe/db/__init__.py networking_bagpipe/db/migration/__init__.py networking_bagpipe/db/migration/alembic_migrations/README networking_bagpipe/db/migration/alembic_migrations/env.py networking_bagpipe/db/migration/alembic_migrations/script.py.mako networking_bagpipe/db/migration/alembic_migrations/versions/CONTRACT_HEAD networking_bagpipe/db/migration/alembic_migrations/versions/EXPAND_HEAD networking_bagpipe/db/migration/alembic_migrations/versions/start_networking_bagpipe.py networking_bagpipe/db/migration/alembic_migrations/versions/2025.1/contract/796580a58032_remove_linux_bridge.py networking_bagpipe/db/migration/alembic_migrations/versions/liberty/__init__.py networking_bagpipe/db/migration/alembic_migrations/versions/liberty/contract/0a2ee5cbb1a5_initial.py networking_bagpipe/db/migration/alembic_migrations/versions/liberty/contract/__init__.py networking_bagpipe/db/migration/alembic_migrations/versions/liberty/expand/__init__.py networking_bagpipe/db/migration/alembic_migrations/versions/liberty/expand/d4d4d7f03b21_initial.py networking_bagpipe/db/migration/alembic_migrations/versions/pike/expand/d2c2dcb6c2d4_defining_sfc_data_model.py networking_bagpipe/db/models/__init__.py networking_bagpipe/db/models/head.py networking_bagpipe/driver/__init__.py networking_bagpipe/driver/constants.py networking_bagpipe/driver/mech_bagpipe.py networking_bagpipe/objects/__init__.py networking_bagpipe/objects/bgpvpn.py networking_bagpipe/privileged/__init__.py networking_bagpipe/privileged/privileged_utils.py networking_bagpipe/tests/__init__.py networking_bagpipe/tests/common/__init__.py networking_bagpipe/tests/common/json_fixtures.py networking_bagpipe/tests/fullstack/__init__.py networking_bagpipe/tests/fullstack/base.py networking_bagpipe/tests/fullstack/test_bagpipe_ml2_connectivity.py networking_bagpipe/tests/fullstack/test_bgpvpn_connectivity.py networking_bagpipe/tests/fullstack/resources/__init__.py networking_bagpipe/tests/fullstack/resources/bagpipe_ml2/__init__.py networking_bagpipe/tests/fullstack/resources/bagpipe_ml2/config.py networking_bagpipe/tests/fullstack/resources/bgpvpn/__init__.py networking_bagpipe/tests/fullstack/resources/bgpvpn/client.py networking_bagpipe/tests/fullstack/resources/bgpvpn/config.py networking_bagpipe/tests/fullstack/resources/common/__init__.py networking_bagpipe/tests/fullstack/resources/common/config.py networking_bagpipe/tests/fullstack/resources/common/environment.py networking_bagpipe/tests/fullstack/resources/common/process.py networking_bagpipe/tests/functional/__init__.py networking_bagpipe/tests/functional/requirements.txt networking_bagpipe/tests/functional/db/__init__.py networking_bagpipe/tests/functional/db/test_migrations.py networking_bagpipe/tests/unit/__init__.py networking_bagpipe/tests/unit/agent/__init__.py networking_bagpipe/tests/unit/agent/base.py networking_bagpipe/tests/unit/agent/test_bagpipe_bgp_agent.py networking_bagpipe/tests/unit/agent/bgpvpn/__init__.py networking_bagpipe/tests/unit/agent/bgpvpn/test_agent_extension.py networking_bagpipe/tests/unit/agent/common/__init__.py networking_bagpipe/tests/unit/agent/common/constants.py networking_bagpipe/tests/unit/bagpipe_bgp/__init__.py networking_bagpipe/tests/unit/bagpipe_bgp/base.py networking_bagpipe/tests/unit/bagpipe_bgp/test_api.py networking_bagpipe/tests/unit/bagpipe_bgp/test_bgp_manager.py networking_bagpipe/tests/unit/bagpipe_bgp/test_config.py networking_bagpipe/tests/unit/bagpipe_bgp/test_dataplane_utils.py networking_bagpipe/tests/unit/bagpipe_bgp/test_engine_objects.py networking_bagpipe/tests/unit/bagpipe_bgp/test_identifier_allocators.py networking_bagpipe/tests/unit/bagpipe_bgp/test_ipvpn_objects.py networking_bagpipe/tests/unit/bagpipe_bgp/test_route_table_manager.py networking_bagpipe/tests/unit/bagpipe_bgp/test_tracker_worker.py networking_bagpipe/tests/unit/bagpipe_bgp/test_vpn_instance.py networking_bagpipe/tests/unit/bagpipe_bgp/test_vpn_manager.py networking_bagpipe/tests/unit/bagpipe_bgp/evpn/__init__.py networking_bagpipe/tests/unit/bagpipe_bgp/evpn/test_ovs.py networking_bagpipe/tests/unit/bagpipe_bgp/ipvpn/__init__.py networking_bagpipe/tests/unit/bagpipe_bgp/ipvpn/mpls_ovs_dataplane.py networking_bagpipe/tests/unit/driver/__init__.py networking_bagpipe/tests/unit/driver/test_mech_bagpipe.py networking_bagpipe/tests/unit/objects/__init__.py networking_bagpipe/tests/unit/objects/test_bgpvpn.py networking_bagpipe/tests/unit/privileged/__init__.py networking_bagpipe/tests/unit/privileged/privsep_fixtures.py networking_bagpipe/tests/unit/privileged/test_privileged_utils.py releasenotes/notes/.placeholder releasenotes/notes/Remove-LinuxBridge-related-code-2e4a7d76478c120c.yaml releasenotes/notes/bagpipe_bgp-00387f95d7c4ed9e.yaml releasenotes/notes/bgpvpn-l2-ovs-6898d9ee5dbcf77f.yaml releasenotes/notes/bgpvpn-routes-control-660a16ff9b1c24ca.yaml releasenotes/notes/drop-py27-support-cd16e5d82985d9e9.yaml releasenotes/notes/drop-python-3-6-and-3-7-8f08c90689525b35.yaml releasenotes/notes/improve_fallback-96e524daf96ebcc1.yaml releasenotes/notes/ipvpn_ovs-2eab0293cb97585b.yaml releasenotes/notes/linux_mpls-1c4fd22486ad5733.yaml releasenotes/notes/privsep-sysctl-a6321b31de29fce1.yaml releasenotes/notes/remove-bagpipe-bgp-deprecated-action-cli-option-dfafa6b05d487e0c.yaml releasenotes/notes/sfc-2000351597a8c160.yaml releasenotes/source/2023.1.rst releasenotes/source/2023.2.rst releasenotes/source/2024.1.rst releasenotes/source/2024.2.rst releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/newton.rst releasenotes/source/ocata.rst releasenotes/source/pike.rst releasenotes/source/queens.rst releasenotes/source/rocky.rst releasenotes/source/stein.rst releasenotes/source/train.rst releasenotes/source/unreleased.rst releasenotes/source/ussuri.rst releasenotes/source/victoria.rst releasenotes/source/wallaby.rst releasenotes/source/xena.rst releasenotes/source/yoga.rst releasenotes/source/zed.rst releasenotes/source/_static/.placeholder releasenotes/source/_templates/.placeholder samples/gobgp.conf samples/bagpipe-bgp/basic samples/bagpipe-bgp/chain-example1 samples/bagpipe-bgp/chain-example1bis samples/bagpipe-bgp/chain-example2 samples/bagpipe-bgp/chain-example3 samples/bagpipe-bgp/chain-redirect-lb samples/bagpipe-bgp/chain-redirect-lb2 samples/bagpipe-bgp/chain-traffic-redirect samples/bagpipe-bgp/evpn2ipvpn samples/bagpipe-bgp/examples.md samples/bagpipe-bgp/generic-functions samples/bagpipe-bgp/setup-cross-routing samples/bagpipe-bgp/setup-cross-routing-per-interface samples/init.d/bagpipe-bgp samples/init.d/bagpipe-fakerr samples/systemd/system/bagpipe-bgp.service tools/generate_config_file_samples.sh././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591455.0 networking_bagpipe-22.0.0/networking_bagpipe.egg-info/dependency_links.txt0000664000175000017500000000000100000000000027112 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591455.0 networking_bagpipe-22.0.0/networking_bagpipe.egg-info/entry_points.txt0000664000175000017500000000567500000000000026357 0ustar00zuulzuul00000000000000[bagpipe.dataplane.evpn] DummyDataplaneDriver = networking_bagpipe.bagpipe_bgp.vpn.evpn:DummyDataplaneDriver bagpipe.bgp.vpn.evpn.DummyDataplaneDriver = networking_bagpipe.bagpipe_bgp.vpn.evpn:DummyDataplaneDriver dummy = networking_bagpipe.bagpipe_bgp.vpn.evpn:DummyDataplaneDriver ovs = networking_bagpipe.bagpipe_bgp.vpn.evpn.ovs:OVSDataplaneDriver [bagpipe.dataplane.ipvpn] DummyDataplaneDriver = networking_bagpipe.bagpipe_bgp.vpn.ipvpn:DummyDataplaneDriver bagpipe.bgp.vpn.ipvpn.DummyDataplaneDriver = networking_bagpipe.bagpipe_bgp.vpn.ipvpn:DummyDataplaneDriver bagpipe.bgp.vpn.ipvpn.mpls_linux_dataplane.MPLSLinuxDataplaneDriver = networking_bagpipe.bagpipe_bgp.vpn.ipvpn.mpls_linux_dataplane:MPLSLinuxDataplaneDriver bagpipe.bgp.vpn.ipvpn.mpls_ovs_dataplane.MPLSOVSDataplaneDriver = networking_bagpipe.bagpipe_bgp.vpn.ipvpn.mpls_ovs_dataplane:MPLSOVSDataplaneDriver dummy = networking_bagpipe.bagpipe_bgp.vpn.ipvpn:DummyDataplaneDriver linux = networking_bagpipe.bagpipe_bgp.vpn.ipvpn.mpls_linux_dataplane:MPLSLinuxDataplaneDriver mpls_linux_dataplane.MPLSLinuxDataplaneDriver = networking_bagpipe.bagpipe_bgp.vpn.ipvpn.mpls_linux_dataplane:MPLSLinuxDataplaneDriver mpls_ovs_dataplane.MPLSOVSDataplaneDriver = networking_bagpipe.bagpipe_bgp.vpn.ipvpn.mpls_ovs_dataplane:MPLSOVSDataplaneDriver ovs = networking_bagpipe.bagpipe_bgp.vpn.ipvpn.mpls_ovs_dataplane:MPLSOVSDataplaneDriver [console_scripts] bagpipe-bgp = networking_bagpipe.bagpipe_bgp.bgp_daemon:daemon_main bagpipe-bgp-cleanup = networking_bagpipe.bagpipe_bgp.bgp_daemon:cleanup_main bagpipe-fakerr = networking_bagpipe.bagpipe_bgp.fakerr:main bagpipe-impex2dot = networking_bagpipe.bagpipe_bgp.cli.impex2dot:main bagpipe-looking-glass = networking_bagpipe.bagpipe_bgp.cli.looking_glass:main bagpipe-rest-attach = networking_bagpipe.bagpipe_bgp.cli.rest_attach:main [neutron.agent.l2.extensions] bagpipe = networking_bagpipe.agent.bagpipe_ml2.agent_extension:BagpipeML2AgentExtension bagpipe_bgpvpn = networking_bagpipe.agent.bgpvpn.agent_extension:BagpipeBgpvpnAgentExtension [neutron.db.alembic_migrations] networking-bagpipe = networking_bagpipe.db.migration:alembic_migrations [neutron.ml2.mechanism_drivers] bagpipe = networking_bagpipe.driver.mech_bagpipe:BaGPipeMechanismDriver [oslo.config.opts] networking_bagpipe.api = networking_bagpipe.opts:list_api_opts networking_bagpipe.bagpipe_bgp_agent = networking_bagpipe.opts:list_bagpipe_bgp_agent_opts networking_bagpipe.bgp_common = networking_bagpipe.opts:list_bgp_common_opts networking_bagpipe.dataplane.evpn = networking_bagpipe.opts:list_dataplane_driver_evpn_opts networking_bagpipe.dataplane.ipvpn = networking_bagpipe.opts:list_dataplane_driver_ipvpn_opts networking_bagpipe.dataplane.ipvpn.mpls_linux = networking_bagpipe.opts:list_dataplane_driver_ipvpn_mpls_linux_opts networking_bagpipe.dataplane.ipvpn.mpls_ovs = networking_bagpipe.opts:list_dataplane_driver_ipvpn_mpls_ovs_opts networking_bagpipe.run_command = networking_bagpipe.opts:list_run_command_opts ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591455.0 networking_bagpipe-22.0.0/networking_bagpipe.egg-info/not-zip-safe0000664000175000017500000000000100000000000025272 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591455.0 networking_bagpipe-22.0.0/networking_bagpipe.egg-info/pbr.json0000664000175000017500000000005600000000000024523 0ustar00zuulzuul00000000000000{"git_version": "5e24f34", "is_release": true}././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591455.0 networking_bagpipe-22.0.0/networking_bagpipe.egg-info/requires.txt0000664000175000017500000000064100000000000025445 0ustar00zuulzuul00000000000000netaddr>=0.7.18 neutron-lib>=2.19.0 oslo.db>=4.37.0 oslo.config>=5.2.0 oslo.concurrency>=3.26.0 oslo.i18n>=3.15.3 oslo.log>=3.36.0 oslo.messaging>=5.29.0 oslo.privsep>=2.3.0 oslo.rootwrap>=5.8.0 oslo.serialization!=2.19.1,>=2.18.0 oslo.service!=1.28.1,>=1.24.0 oslo.versionedobjects>=1.35.1 pyroute2>=0.5.7 stevedore>=1.20.0 exabgp>=4.0.4 pecan>=1.3.2 neutron>=23.0.0 networking-bgpvpn>=12.0.0 networking-sfc>=10.0.0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591455.0 networking_bagpipe-22.0.0/networking_bagpipe.egg-info/top_level.txt0000664000175000017500000000002300000000000025571 0ustar00zuulzuul00000000000000networking_bagpipe ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/pyproject.toml0000664000175000017500000000014500000000000020410 0ustar00zuulzuul00000000000000[build-system] requires = ["pbr>=5.7.0", "setuptools>=64.0.0", "wheel"] build-backend = "pbr.build" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.8903065 networking_bagpipe-22.0.0/releasenotes/0000775000175000017500000000000000000000000020165 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1743591455.9503052 networking_bagpipe-22.0.0/releasenotes/notes/0000775000175000017500000000000000000000000021315 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/releasenotes/notes/.placeholder0000664000175000017500000000000000000000000023566 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/releasenotes/notes/Remove-LinuxBridge-related-code-2e4a7d76478c120c.yaml0000664000175000017500000000032500000000000032333 0ustar00zuulzuul00000000000000--- upgrade: - | The ``linuxbridge`` Ml2 driver is removed from core ``Neutron``, and from networking-bagpipe. See the Neutron change for details: https://review.opendev.org/c/openstack/neutron/+/927216 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/releasenotes/notes/bagpipe_bgp-00387f95d7c4ed9e.yaml0000664000175000017500000000064000000000000026674 0ustar00zuulzuul00000000000000--- features: - | This release integrates BaGPipe-BGP: rather than depending on the old bagpipe-bgp version found on github, the recent version of this component is developped in the networking_bagpipe package itself. This version has been adapted to leverage Openstack libraries for logging, configuration, driver loading, and only rely on packages found in generic Openstack requirements ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/releasenotes/notes/bgpvpn-l2-ovs-6898d9ee5dbcf77f.yaml0000664000175000017500000000026000000000000027226 0ustar00zuulzuul00000000000000--- features: - | bagpipe-bgp now supports E-VPN with OVS thanks to this new dataplane driver, BGPVPNs of type L2 can now be supported, with both linuxbridge and OVS ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/releasenotes/notes/bgpvpn-routes-control-660a16ff9b1c24ca.yaml0000664000175000017500000000043600000000000030763 0ustar00zuulzuul00000000000000--- features: - | the project now supports the features required for the `bgpvpn-routes-control` API extension, including: control of local_pref, control of per port routes, control of redistribution of routes between BGPVPN with next-hop modified to point to a port. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/releasenotes/notes/drop-py27-support-cd16e5d82985d9e9.yaml0000664000175000017500000000034200000000000027721 0ustar00zuulzuul00000000000000--- upgrade: - | Python 2.7 support has been dropped. Last release of networking-bagpipe to support python 2.7 is OpenStack Train. The minimum version of Python now supported by networking-bagpipe is Python 3.6. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/releasenotes/notes/drop-python-3-6-and-3-7-8f08c90689525b35.yaml0000664000175000017500000000020100000000000030040 0ustar00zuulzuul00000000000000--- upgrade: - | Python 3.6 & 3.7 support has been dropped. The minimum version of Python now supported is Python 3.8. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/releasenotes/notes/improve_fallback-96e524daf96ebcc1.yaml0000664000175000017500000000122300000000000030063 0ustar00zuulzuul00000000000000--- fixes: - | The IPVPN bgpvpn/router fallback mechanism has been improved. This mechanism allows traffic not matching any VRF route in the distributed BGPVPN implementation of bagpipe, to "fallback" and reach a Neutron router connected on the network. The implementation has been simplified and the risk removed of not reaching VRF destinations because of a stale ARP entry for the gateway IP. This improvement comes with the requirement of using the OpenVSwitch security group firewall driver on any node having both the l3agent and VMs with a need to reach BGPVPN destinations (e.g. single node setup or DVR setup). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/releasenotes/notes/ipvpn_ovs-2eab0293cb97585b.yaml0000664000175000017500000000041700000000000026444 0ustar00zuulzuul00000000000000--- features: - | Rewrite of the IPVPN dataplane driver for OVS, mainly with the objective of implementing ECMP using OVS `select` groups instead of the `multipath` action (which requires maintaining per-flow state in the datapath, losing wildcarding). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/releasenotes/notes/linux_mpls-1c4fd22486ad5733.yaml0000664000175000017500000000047500000000000026535 0ustar00zuulzuul00000000000000--- features: - | Base support was added to allow the use of 'bagpipe' driver for networking-bgpvpn with the linuxbridge compute node agent, using the kernel network stack MPLS implementation. This is usable although not fully complete yet (see bug `1627902 `_). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/releasenotes/notes/privsep-sysctl-a6321b31de29fce1.yaml0000664000175000017500000000026300000000000027474 0ustar00zuulzuul00000000000000--- security: - | Change sysctl execution to use oslo.privsep and add necessary filters to rootwrap.d. Privsep daemons are started now for sysctl command execution. ././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=networking_bagpipe-22.0.0/releasenotes/notes/remove-bagpipe-bgp-deprecated-action-cli-option-dfafa6b05d487e0c.yaml 22 mtime=1743591409.0 networking_bagpipe-22.0.0/releasenotes/notes/remove-bagpipe-bgp-deprecated-action-cli-option-dfafa6b0000664000175000017500000000017400000000000033625 0ustar00zuulzuul00000000000000--- upgrade: - | bagpipe-bgp's old daemon CLI option ``action`` deprecated since Pike is now removed permanently. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/releasenotes/notes/sfc-2000351597a8c160.yaml0000664000175000017500000000044700000000000024661 0ustar00zuulzuul00000000000000--- features: - | networking-bagpipe now provides a driver for the networking-sfc project. Using the `bagpipe` sfc driver will result in the use of BGPVPN stiching route redistribution, and BGP flowspec for the realisation of a service chain defined by the networking-sfc API. ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1743591455.954305 networking_bagpipe-22.0.0/releasenotes/source/0000775000175000017500000000000000000000000021465 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/releasenotes/source/2023.1.rst0000664000175000017500000000021000000000000022735 0ustar00zuulzuul00000000000000=========================== 2023.1 Series Release Notes =========================== .. release-notes:: :branch: unmaintained/2023.1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/releasenotes/source/2023.2.rst0000664000175000017500000000020200000000000022737 0ustar00zuulzuul00000000000000=========================== 2023.2 Series Release Notes =========================== .. release-notes:: :branch: stable/2023.2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/releasenotes/source/2024.1.rst0000664000175000017500000000020200000000000022737 0ustar00zuulzuul00000000000000=========================== 2024.1 Series Release Notes =========================== .. release-notes:: :branch: stable/2024.1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/releasenotes/source/2024.2.rst0000664000175000017500000000020200000000000022740 0ustar00zuulzuul00000000000000=========================== 2024.2 Series Release Notes =========================== .. release-notes:: :branch: stable/2024.2 ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1743591455.954305 networking_bagpipe-22.0.0/releasenotes/source/_static/0000775000175000017500000000000000000000000023113 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/releasenotes/source/_static/.placeholder0000664000175000017500000000000000000000000025364 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1743591455.954305 networking_bagpipe-22.0.0/releasenotes/source/_templates/0000775000175000017500000000000000000000000023622 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/releasenotes/source/_templates/.placeholder0000664000175000017500000000000000000000000026073 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/releasenotes/source/conf.py0000664000175000017500000002147500000000000022775 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # Networking-bagpipe Release Notes documentation build configuration file # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'openstackdocstheme', 'reno.sphinxext', ] # openstackdocstheme options openstackdocs_repo_name = 'openstack/networking-bagpipe' openstackdocs_auto_name = False openstackdocs_bug_project = 'networking-bagpipe' openstackdocs_bug_tag = '' # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = 'Networking-bagpipe Release Notes' copyright = '2016, Networking-bagpipe Developers' # Release notes are version independent # The full version, including alpha/beta/rc tags. release = '' # The short X.Y version. version = '' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'openstackdocs' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'NetworkingBagpipeReleaseNotesdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'NetworkingBagpipeReleaseNotes.tex', 'Networking-bagpipe Release Notes Documentation', 'Networking-bagpipe Developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'networkingbagpipereleasenotes', 'Networking-bagpipe Release Notes Documentation', ['Networking-bagpipe Developers'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'NetworkingBagpipeReleaseNotes', 'Networking-bagpipe Release Notes Documentation', 'Networking-bagpipe Developers', 'NetworkingBagpipeReleaseNotes', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # -- Options for Internationalization output ------------------------------ locale_dirs = ['locale/'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/releasenotes/source/index.rst0000664000175000017500000000046000000000000023326 0ustar00zuulzuul00000000000000================================ Networking-bagpipe Release Notes ================================ .. toctree:: :maxdepth: 1 unreleased 2024.2 2024.1 2023.2 2023.1 zed yoga xena wallaby victoria ussuri train stein rocky queens pike ocata newton ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/releasenotes/source/newton.rst0000664000175000017500000000022000000000000023523 0ustar00zuulzuul00000000000000============================== Newton Series Release Notes ============================== .. release-notes:: :branch: origin/stable/newton ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/releasenotes/source/ocata.rst0000664000175000017500000000023000000000000023301 0ustar00zuulzuul00000000000000=================================== Ocata Series Release Notes =================================== .. release-notes:: :branch: origin/stable/ocata ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/releasenotes/source/pike.rst0000664000175000017500000000021700000000000023147 0ustar00zuulzuul00000000000000=================================== Pike Series Release Notes =================================== .. release-notes:: :branch: stable/pike ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/releasenotes/source/queens.rst0000664000175000017500000000022300000000000023514 0ustar00zuulzuul00000000000000=================================== Queens Series Release Notes =================================== .. release-notes:: :branch: stable/queens ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/releasenotes/source/rocky.rst0000664000175000017500000000022100000000000023341 0ustar00zuulzuul00000000000000=================================== Rocky Series Release Notes =================================== .. release-notes:: :branch: stable/rocky ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/releasenotes/source/stein.rst0000664000175000017500000000022100000000000023334 0ustar00zuulzuul00000000000000=================================== Stein Series Release Notes =================================== .. release-notes:: :branch: stable/stein ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/releasenotes/source/train.rst0000664000175000017500000000017600000000000023340 0ustar00zuulzuul00000000000000========================== Train Series Release Notes ========================== .. release-notes:: :branch: stable/train ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/releasenotes/source/unreleased.rst0000664000175000017500000000015300000000000024345 0ustar00zuulzuul00000000000000============================ Current Series Release Notes ============================ .. release-notes:: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/releasenotes/source/ussuri.rst0000664000175000017500000000020200000000000023543 0ustar00zuulzuul00000000000000=========================== Ussuri Series Release Notes =========================== .. release-notes:: :branch: stable/ussuri ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/releasenotes/source/victoria.rst0000664000175000017500000000022000000000000024031 0ustar00zuulzuul00000000000000============================= Victoria Series Release Notes ============================= .. release-notes:: :branch: unmaintained/victoria ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/releasenotes/source/wallaby.rst0000664000175000017500000000021400000000000023647 0ustar00zuulzuul00000000000000============================ Wallaby Series Release Notes ============================ .. release-notes:: :branch: unmaintained/wallaby ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/releasenotes/source/xena.rst0000664000175000017500000000020000000000000023142 0ustar00zuulzuul00000000000000========================= Xena Series Release Notes ========================= .. release-notes:: :branch: unmaintained/xena ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/releasenotes/source/yoga.rst0000664000175000017500000000020000000000000023146 0ustar00zuulzuul00000000000000========================= Yoga Series Release Notes ========================= .. release-notes:: :branch: unmaintained/yoga ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/releasenotes/source/zed.rst0000664000175000017500000000017400000000000023003 0ustar00zuulzuul00000000000000======================== Zed Series Release Notes ======================== .. release-notes:: :branch: unmaintained/zed ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/requirements.txt0000664000175000017500000000251000000000000020756 0ustar00zuulzuul00000000000000# Requirements lower bounds listed here are our best effort to keep them up to # date but we do not test them so no guarantee of having them all correct. If # you find any incorrect lower bounds, let us know or propose a fix. netaddr>=0.7.18 # BSD neutron-lib>=2.19.0 # Apache-2.0 oslo.db>=4.37.0 # Apache-2.0 oslo.config>=5.2.0 # Apache-2.0 oslo.concurrency>=3.26.0 # Apache-2.0 oslo.i18n>=3.15.3 # Apache-2.0 oslo.log>=3.36.0 # Apache-2.0 oslo.messaging>=5.29.0 # Apache-2.0 oslo.privsep>=2.3.0 # Apache-2.0 oslo.rootwrap>=5.8.0 # Apache-2.0 oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0 oslo.service!=1.28.1,>=1.24.0 # Apache-2.0 oslo.versionedobjects>=1.35.1 # Apache-2.0 pyroute2>=0.5.7;sys_platform!='win32' # Apache-2.0 (+ dual licensed GPL2) stevedore>=1.20.0 # Apache-2.0 exabgp>=4.0.4 # BSD pecan>=1.3.2 # BSD # OpenStack CI will install the following projects from git # if they are in the required-projects list for a job: neutron>=23.0.0 # Apache-2.0 networking-bgpvpn>=12.0.0 # Apache-2.0 networking-sfc>=10.0.0 # Apache-2.0 # The comment below indicates this project repo is current with neutron-lib # and should receive neutron-lib consumption patches as they are released # in neutron-lib. It also implies the project will stay current with TC # and infra initiatives ensuring consumption patches can land. # neutron-lib-current ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1743591455.954305 networking_bagpipe-22.0.0/samples/0000775000175000017500000000000000000000000017140 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1743591455.958305 networking_bagpipe-22.0.0/samples/bagpipe-bgp/0000775000175000017500000000000000000000000021315 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/samples/bagpipe-bgp/basic0000775000175000017500000000171400000000000022327 0ustar00zuulzuul00000000000000#!/bin/bash ip_netns_a=2.2.2.1 ip_netns_b=2.2.2.2 source $(dirname $0)/generic-functions clean_start for type in ipvpn evpn; do echo echo "---------------------- $type --------------------" echo netnsa="tst${type}a" netnsb="tst${type}b" r_a ip netns delete $netnsa r_a bagpipe-rest-attach --attach --port netns --network-type $type --vpn-instance-id $netnsa --ip $ip_netns_a r_b ip netns delete $netnsb r_b bagpipe-rest-attach --attach --port netns --network-type $type --vpn-instance-id $netnsb --ip $ip_netns_b wait_ready r_a ip netns exec $netnsa ping -c 3 -W 4 $ip_netns_b # r_a bagpipe-rest-attach --detach --port netns --network-type $type --vpn-instance-id $netnsa --ip $ip_netns_a # r_b bagpipe-rest-attach --detach --port netns --network-type $type --vpn-instance-id $netnsb --ip $ip_netns_b done echo echo "---------------------- logs --------------------" echo r_both bagpipe-looking-glass logs clean_stop ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/samples/bagpipe-bgp/chain-example10000775000175000017500000000720200000000000024040 0ustar00zuulzuul00000000000000#!/bin/bash # # This script will setup network namespaces SVM1 ("service VM1") and SVM2 so that # traffic between net1 (to which VM1 is attached) and net2 (to which VM2 is attached) # goes through SMV1 and SVM2: # # VM1-------net1 SVM1 --- SVM2 net2-----VM2 # .1 11.0.0. 12.0.0 .1 # # Route targets.... # # net1: :1 # net2: :2 # # - chain for traffic from net1 to net2: # hop12_0: first hop: net1 to SVM1 # hop12_1: second hop: SVM1 to SVM2 as=64512 hop12_0=$as:120 hop12_1=$as:121 ## ## - chain for traffic from net2 to net1: ## hop21_0: first hop: net2 to SVM2 ## hop21_1: second hop: SVM2 to SVM1 hop21_0=$as:210 hop21_1=$as:211 source $(dirname $0)/generic-functions clean_start clean_netns_all net1vm1 svm1 svm2 net2vm2 ## ## VM1: sur A ## r_a bagpipe-rest-attach --attach --port netns:to-vm1 --network-type ipvpn --vpn-instance-id net1vm1 --ip 11.0.0.1 --rt $as:1 --import-rt $hop12_0 ## SVM1: sur B ## ## [vrf:x1][if:to-svm1-x1]---[x1][netns:svm1][h0]---[to-svm1-h0][vrf:h01] ## ## ## x1:100.0.0.0/24 ## h01:30.0.0.0/24 ## h02:40.0.0.0/24 ## x2:200.0.0.0/24 ## attach x1: r_b bagpipe-rest-attach --attach --port netns:to-svm1-x1 --network-type ipvpn --vpn-instance-id x1 --ip 100.0.0.1/24 --netns svm1 --if2vpn x1 \ --import $as:1 \ --readv-from-rt $hop12_1 --readv-to-rt $hop12_0 ## attach h0: #r_b bagpipe-rest-attach --attach --port netns:to-svm1-h0 --network-type ipvpn --vpn-instance-id h01 --ip 30.0.0.1/24 --netns svm1 --if2vpn h0 \ # --import-rt $hop12_1 \ # --readv-from-rt $as:1 --readv-to-rt $hop21_1 r_b bagpipe-rest-attach --attach --port netns:to-svm1-h0 --mac 52:54:00:99:99:99 --network-type ipvpn --vpn-instance-id h01 --ip 30.0.0.1/24 --netns svm1 --if2vpn h0 \ --import-rt $hop12_1 \ r_b bagpipe-rest-attach --attach --port to-svm1-h0 --mac 52:54:00:99:99:99 --network-type ipvpn --vpn-instance-id h01 --ip 11.0.0.0/24 --advertise-subnet --export-rt $hop21_1 --import-rt $hop12_1 r_b ./setup-cross-routing svm1 x1 100.0.0.254 h0 30.0.0.254 ## SVM2: sur A # # [vrf:h02][to-svm2-h0]---[h0][netns:svm2][x2]---[if:to-svm2-x2][vrf:x2] ## attach h0: #r_a bagpipe-rest-attach --attach --port netns:to-svm2-h0 --network-type ipvpn --vpn-instance-id h02 --ip 40.0.0.1/24 --netns svm2 --if2vpn h0 \ # --import-rt $hop21_1 \ # --readv-from-rt $as:2 --readv-to-rt $hop12_1 r_a bagpipe-rest-attach --attach --port netns:to-svm2-h0 --mac 52:54:00:99:99:99 --network-type ipvpn --vpn-instance-id h02 --ip 40.0.0.1/24 --netns svm2 --if2vpn h0 \ --import-rt $hop21_1 \ r_a bagpipe-rest-attach --attach --port to-svm2-h0 --mac 52:54:00:99:99:99 --network-type ipvpn --vpn-instance-id h02 --ip 12.0.0.0/24 --advertise-subnet --export-rt $hop12_1 --import-rt $hop21_1 ## attach x2: r_a bagpipe-rest-attach --attach --port netns:to-svm2-x2 --network-type ipvpn --vpn-instance-id x2 --ip 200.0.0.1/24 --netns svm2 --if2vpn x2 \ --import $as:2 \ --readv-from-rt $hop21_1 --readv-to-rt $hop21_0 r_a ./setup-cross-routing svm2 x2 200.0.0.254 h0 40.0.0.254 ## VM2: sur B ## r_b bagpipe-rest-attach --attach --port netns:to-vm2 --network-type ipvpn --vpn-instance-id net2vm2 --ip 12.0.0.1 --rt $as:2 --import-rt $hop21_0 # # Test # wait_ready r_a ip netns exec net1vm1 ping 12.0.0.1 -c 3 r_a bagpipe-looking-glass vpns instances net1vm1 dataplane flows r_b bagpipe-looking-glass vpns instances x1 dataplane flows r_b bagpipe-looking-glass vpns instances h01 dataplane flows r_a bagpipe-looking-glass vpns instances h02 dataplane flows r_a bagpipe-looking-glass vpns instances x2 dataplane flows r_b bagpipe-looking-glass vpns instances net2vm2 dataplane flows r_both bagpipe-looking-glass logs clean_stop ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/samples/bagpipe-bgp/chain-example1bis0000775000175000017500000000657300000000000024550 0ustar00zuulzuul00000000000000#!/bin/bash # # Destination-based setup with two-directions traffic combined and default routes announced at # intermediate hops (Ingress VRF between hops configured to statically announce a 0.0.0.0/0) # # This script will setup network namespaces SVM1 ("service VM1") and SVM2 so that # traffic between net1 (to which VM1 is attached) and net2 (to which VM2 is attached) # goes through SMV1 and SVM2: # # VM1-------net1 SVM1 --- SVM2 net2-----VM2 # .1 11.0.0. 12.0.0 .1 # # Route targets.... # # net1: :1 # net2: :2 # # - chain for traffic from net1 to net2: # hop12_0: first hop: net1 to SVM1 # hop12_1: second hop: SVM1 to SVM2 as=64512 hop12_0=$as:120 hop12_1=$as:121 ## ## - chain for traffic from net2 to net1: ## hop21_0: first hop: net2 to SVM2 ## hop21_1: second hop: SVM2 to SVM1 hop21_0=$as:210 hop21_1=$as:211 source $(dirname $0)/generic-functions clean_start clean_netns_all net1vm1 svm1 svm2 net2vm2 ## ## VM1: sur A ## r_a bagpipe-rest-attach --attach --port netns:to-vm1 --network-type ipvpn --vpn-instance-id net1vm1 --ip 11.0.0.1 --rt $as:1 --import-rt $hop12_0 ## SVM1: sur B ## ## [vrf:x1][if:to-svm1-x1]---[x1][netns:svm1][h0]---[to-svm1-h0][vrf:h01] ## ## ## x1:100.0.0.0/24 ## h01:30.0.0.0/24 ## h02:40.0.0.0/24 ## x2:200.0.0.0/24 ## attach x1: r_b bagpipe-rest-attach --attach --port netns:to-svm1-x1 --network-type ipvpn --vpn-instance-id x1 --ip 100.0.0.1/24 --netns svm1 --if2vpn x1 \ --import $as:1 \ --readv-from-rt $as:2 --readv-to-rt $hop12_0 ## attach h0: r_b bagpipe-rest-attach --attach --port netns:to-svm1-h0 --mac 52:54:00:99:99:99 --network-type ipvpn --vpn-instance-id h01 --ip 30.0.0.1/24 --netns svm1 --if2vpn h0 \ --import-rt $hop12_1 \ r_b bagpipe-rest-attach --attach --port to-svm1-h0 --mac 52:54:00:99:99:99 --network-type ipvpn --vpn-instance-id h01 --ip 0.0.0.0/0 --advertise-subnet --export-rt $hop21_1 --import-rt $hop12_1 r_b ./setup-cross-routing svm1 x1 100.0.0.254 h0 30.0.0.254 ## SVM2: sur A # # [vrf:h02][to-svm2-h0]---[h0][netns:svm2][x2]---[if:to-svm2-x2][vrf:x2] ## attach h0: r_a bagpipe-rest-attach --attach --port netns:to-svm2-h0 --mac 52:54:00:99:99:99 --network-type ipvpn --vpn-instance-id h02 --ip 40.0.0.1/24 --netns svm2 --if2vpn h0 \ --import-rt $hop21_1 \ r_a bagpipe-rest-attach --attach --port to-svm2-h0 --mac 52:54:00:99:99:99 --network-type ipvpn --vpn-instance-id h02 --ip 0.0.0.0/0 --advertise-subnet --export-rt $hop12_1 --import-rt $hop21_1 ## attach x2: r_a bagpipe-rest-attach --attach --port netns:to-svm2-x2 --network-type ipvpn --vpn-instance-id x2 --ip 200.0.0.1/24 --netns svm2 --if2vpn x2 \ --import $as:2 \ --readv-from-rt $as:1 --readv-to-rt $hop21_0 r_a ./setup-cross-routing svm2 x2 200.0.0.254 h0 40.0.0.254 ## VM2: sur B ## r_b bagpipe-rest-attach --attach --port netns:to-vm2 --network-type ipvpn --vpn-instance-id net2vm2 --ip 12.0.0.1 --rt $as:2 --import-rt $hop21_0 # # Test # wait_ready r_a ip netns exec net1vm1 ping 12.0.0.1 -c 3 r_a bagpipe-looking-glass vpns instances net1vm1 dataplane flows r_b bagpipe-looking-glass vpns instances x1 dataplane flows r_b bagpipe-looking-glass vpns instances h01 dataplane flows r_a bagpipe-looking-glass vpns instances h02 dataplane flows r_a bagpipe-looking-glass vpns instances x2 dataplane flows r_b bagpipe-looking-glass vpns instances net2vm2 dataplane flows r_both bagpipe-looking-glass logs clean_stop ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/samples/bagpipe-bgp/chain-example20000775000175000017500000000556200000000000024050 0ustar00zuulzuul00000000000000#!/bin/bash # # This script will setup network namespaces SVM1 ("service VM1") and SVM2 so that # traffic between net1 (to which VM1 is attached) and net2 (to which VM2 is attached) # goes through SMV1 and SVM2: # # VM1-------net1 SVM1 --- SVM2 net2-----VM2 # .1 11.0.0. 12.0.0 .1 # # Route targets.... # # net1: :1 # net2: :2 # # - chain for traffic from net1 to net2: # hop12_0: first hop: net1 to SVM1 # hop12_1: second hop: SVM1 to SVM2 as=64512 hop12_0=$as:120 hop12_1=$as:121 ## ## - chain for traffic from net2 to net1: ## hop21_0: first hop: net2 to SVM2 ## hop21_1: second hop: SVM2 to SVM1 hop21_0=$as:210 hop21_1=$as:211 source $(dirname $0)/generic-functions clean_start clean_netns_all net1vm1 svm1 svm2 net2vm2 ## ## VM1: sur A ## r_a bagpipe-rest-attach --attach --port netns:to-vm1 --network-type ipvpn --vpn-instance-id net1vm1 --ip 11.0.0.1 --rt $as:1 --import-rt $hop12_0 ## SVM1: sur B ## ## [vrf:x1][if:to-svm1-x1]---[x1][netns:svm1][h0]---[to-svm1-h0][vrf:h01] ## ## ## x1:100.0.0.0/24 ## h01:30.0.0.0/24 ## h02:40.0.0.0/24 ## x2:200.0.0.0/24 ## attach x1: r_b bagpipe-rest-attach --attach --port netns:to-svm1-x1 --network-type ipvpn --vpn-instance-id x1 --ip 100.0.0.1/24 --netns svm1 --if2vpn x1 \ --import $as:1 \ --readv-from-rt $hop12_1 --readv-to-rt $hop12_0 ## attach h0: r_b bagpipe-rest-attach --attach --port netns:to-svm1-h0 --network-type ipvpn --vpn-instance-id h01 --ip 30.0.0.1/24 --netns svm1 --if2vpn h0 \ --import-rt $hop12_1 \ --readv-from-rt $as:1 --readv-to-rt $hop21_1 r_b ./setup-cross-routing svm1 x1 100.0.0.254 h0 30.0.0.254 ## SVM2: sur A # # [vrf:h02][to-svm2-h0]---[h0][netns:svm2][x2]---[if:to-svm2-x2][vrf:x2] ## attach h0: r_a bagpipe-rest-attach --attach --port netns:to-svm2-h0 --network-type ipvpn --vpn-instance-id h02 --ip 40.0.0.1/24 --netns svm2 --if2vpn h0 \ --import-rt $hop21_1 \ --readv-from-rt $as:2 --readv-to-rt $hop12_1 ## attach x2: r_a bagpipe-rest-attach --attach --port netns:to-svm2-x2 --network-type ipvpn --vpn-instance-id x2 --ip 200.0.0.1/24 --netns svm2 --if2vpn x2 \ --import $as:2 \ --readv-from-rt $hop21_1 --readv-to-rt $hop21_0 r_a ./setup-cross-routing svm2 x2 200.0.0.254 h0 40.0.0.254 ## VM2: sur B ## r_b bagpipe-rest-attach --attach --port netns:to-vm2 --network-type ipvpn --vpn-instance-id net2vm2 --ip 12.0.0.1 --rt $as:2 --import-rt $hop21_0 # # Test # wait_ready r_a ip netns exec net1vm1 ping 12.0.0.1 -c 3 r_a bagpipe-looking-glass vpns instances net1vm1 dataplane flows r_b bagpipe-looking-glass vpns instances x1 dataplane flows r_b bagpipe-looking-glass vpns instances h01 dataplane flows r_a bagpipe-looking-glass vpns instances h02 dataplane flows r_a bagpipe-looking-glass vpns instances x2 dataplane flows r_b bagpipe-looking-glass vpns instances net2vm2 dataplane flows r_both bagpipe-looking-glass logs clean_stop ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/samples/bagpipe-bgp/chain-example30000775000175000017500000000660500000000000024050 0ustar00zuulzuul00000000000000#!/bin/bash # # This is an example of a service chain between two IP VPNs # for which we do not modify the RT import/export lists to # establish chaining. # # This script will setup network namespaces SVM1 ("service VM1") and SVM2 so that # traffic between net1 (to which VM1 is attached) and net2 (to which VM2 is attached) # goes through SMV1 and SVM2: # # VM1-------net1 SVM1 --- SVM2 net2-----VM2 # .1 11.0.0. 12.0.0 .1 # # Route targets.... # # net1: :1 # net2: :2 # # - chain for traffic from net1 to net2: # hop12_0: first hop: net1 to SVM1 # hop12_1: second hop: SVM1 to SVM2 as=64512 hop12_0=$as:120 hop12_1=$as:121 ## ## - chain for traffic from net2 to net1: ## hop21_0: first hop: net2 to SVM2 ## hop21_1: second hop: SVM2 to SVM1 hop21_0=$as:210 hop21_1=$as:211 source $(dirname $0)/generic-functions clean_start clean_netns_all net1vm1 svm1 svm2 net2vm2 ## ## VM1: sur A ## r_a bagpipe-rest-attach --attach --port netns:to-vm1 --network-type ipvpn --vpn-instance-id net1vm1 --ip 11.0.0.1 --rt $as:1 ## SVM1: sur B ## ## [vrf:x1][if:to-svm1-x1]---[x1][netns:svm1][h0]---[to-svm1-h0][vrf:h01] ## ## ## x1:100.0.0.0/24 ## h01:30.0.0.0/24 ## h02:40.0.0.0/24 ## x2:200.0.0.0/24 ## attach x1: r_b bagpipe-rest-attach --attach --port netns:to-svm1-x1 --network-type ipvpn --vpn-instance-id x1 --ip 100.0.0.1/24 --netns svm1 --if2vpn x1 \ --import $as:1 \ --readv-from-rt $hop12_1 --readv-to-rt $as:1 ## attach h0: r_b bagpipe-rest-attach --attach --port netns:to-svm1-h0 --mac 52:54:00:99:99:99 --network-type ipvpn --vpn-instance-id h01 --ip 30.0.0.1/24 --netns svm1 --if2vpn h0 \ --import-rt $hop12_1 \ --readv-from-rt $as:1 --readv-to-rt $hop21_1 r_b bagpipe-rest-attach --attach --port to-svm1-h0 --mac 52:54:00:99:99:99 --network-type ipvpn --vpn-instance-id h01 --ip 11.0.0.0/24 --advertise-subnet --export-rt $hop21_1 --import-rt $hop12_1 r_b ./setup-cross-routing svm1 x1 100.0.0.254 h0 30.0.0.254 ## SVM2: sur A # # [vrf:h02][to-svm2-h0]---[h0][netns:svm2][x2]---[if:to-svm2-x2][vrf:x2] ## attach h0: r_a bagpipe-rest-attach --attach --port netns:to-svm2-h0 --mac 52:54:00:99:99:99 --network-type ipvpn --vpn-instance-id h02 --ip 40.0.0.1/24 --netns svm2 --if2vpn h0 \ --import-rt $hop21_1 \ --readv-from-rt $as:2 --readv-to-rt $hop12_1 r_a bagpipe-rest-attach --attach --port to-svm2-h0 --mac 52:54:00:99:99:99 --network-type ipvpn --vpn-instance-id h02 --ip 12.0.0.0/24 --advertise-subnet --export-rt $hop12_1 --import-rt $hop21_1 ## attach x2: r_a bagpipe-rest-attach --attach --port netns:to-svm2-x2 --network-type ipvpn --vpn-instance-id x2 --ip 200.0.0.1/24 --netns svm2 --if2vpn x2 \ --import $as:2 \ --readv-from-rt $hop21_1 --readv-to-rt $as:2 r_a ./setup-cross-routing svm2 x2 200.0.0.254 h0 40.0.0.254 ## VM2: sur B ## r_b bagpipe-rest-attach --attach --port netns:to-vm2 --network-type ipvpn --vpn-instance-id net2vm2 --ip 12.0.0.1 --rt $as:2 # # Test # wait_ready r_a ip netns exec net1vm1 ping 12.0.0.1 -c 3 r_a bagpipe-looking-glass vpns instances net1vm1 dataplane flows r_b bagpipe-looking-glass vpns instances x1 dataplane flows r_b bagpipe-looking-glass vpns instances h01 dataplane flows r_a bagpipe-looking-glass vpns instances h02 dataplane flows r_a bagpipe-looking-glass vpns instances x2 dataplane flows r_b bagpipe-looking-glass vpns instances net2vm2 dataplane flows r_both bagpipe-looking-glass logs clean_stop ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/samples/bagpipe-bgp/chain-redirect-lb0000775000175000017500000001653100000000000024525 0ustar00zuulzuul00000000000000#!/bin/bash # # 5-tuple classified chain setup with two-directions traffic combined and default routes announced at # intermediate hops (Ingress VRF between hops configured to statically announce a 0.0.0.0/0) # # This script will setup network namespaces SVM1 ("service VM1") and SVM2 so that # traffic between net1 (to which VM1 is attached) and net2 (to which VM2 is attached) # goes through SMV1 and SVM2 based on a 5-tuple classification: # # destination |-------| # port 80 |-| SVM20 | # -------------> |------| | |-------| -------------> # VM1-------net1 | SVM1 |--| net2-----VM2 # .1 11.0.0 <------------- |------| | |-------| <------------- 12.0.0 .1 # |-| SVM21 | source # |-------| port 80 # # Route targets.... # # net1: :1 # net2: :2 # # - chain for traffic from net1 to net2: # hop12_0: first hop: net1 to SVM1 # hop12_1: second hop: SVM1 to SVM2 as=64512 hop12_0=$as:120 hop12_1=$as:121 ## ## - chain for traffic from net2 to net1: ## hop21_0: first hop: net2 to SVM2 ## hop21_1: second hop: SVM2 to SVM1 hop21_0=$as:210 hop21_1=$as:211 ## ## - traffic from net1 to net2 redirection to chain based on a classifier: ## hop_redirect: redirect hop redirect_hop12_0=$as:300 redirect_hop21_0=$as:400 source $(dirname $0)/generic-functions clean_start clean_netns_all net1vm1 svm1 svm2 net2vm2 ## ## VM1: sur A ## ## [netns:net1vm1][tovpn]---[to-vm1][vrf:net1vm1] ## r_a bagpipe-rest-attach --attach --port netns:to-vm1 --network-type ipvpn --vpn-instance-id net1vm1 --ip 11.0.0.1 --rt $as:1 --import-rt $hop12_0 ## SVM1: sur B ## ## [vrf:x1][if:to-svm1-x1]---[x1][netns:svm1][h0]---[to-svm1-h0][vrf:h01] ## ## ## x1:100.0.0.0/24 ## h01:30.0.0.0/24 ## h02:40.0.0.0/24 ## x2:200.0.0.0/24 ## attach x1 and configure vrf:x1 to attract traffic destined for TCP port 80: ## ## [vrf:net1vm1]--->[vrf:redirect_hop12_0] r_b bagpipe-rest-attach --attach --port netns:to-svm1-x1 --network-type ipvpn --vpn-instance-id x1 --ip 100.0.0.1/24 --netns svm1 --if2vpn x1 \ --import $as:1 \ --readv-from-rt $as:2 --readv-to-rt $redirect_hop12_0 \ --redirect-rt $hop12_0 --destination-port 80 ## attach h0: r_b bagpipe-rest-attach --attach --port netns:to-svm1-h0 --mac 52:54:00:99:99:99 --network-type ipvpn --vpn-instance-id h01 --ip 30.0.0.1/24 --netns svm1 --if2vpn h0 \ --import-rt $hop12_1 \ r_b bagpipe-rest-attach --attach --port to-svm1-h0 --mac 52:54:00:99:99:99 --network-type ipvpn --vpn-instance-id h01 --ip 0.0.0.0/0 --advertise-subnet --export-rt $hop21_1 --import-rt $hop12_1 r_b ./setup-cross-routing svm1 x1 100.0.0.254 h0 30.0.0.254 ## SVM20: sur A # # [vrf:h02][to-svm20-h0]---[h0][netns:svm20][x2]---[if:to-svm20-x2][vrf:x2] ## attach h0: r_a bagpipe-rest-attach --attach --port netns:to-svm20-h0 --mac 52:54:20:99:99:99 --network-type ipvpn --vpn-instance-id h02 --ip 40.0.0.1/24 --netns svm20 --if2vpn h0 \ --import-rt $hop21_1 r_a bagpipe-rest-attach --attach --port to-svm20-h0 --mac 52:54:20:99:99:99 --network-type ipvpn --vpn-instance-id h02 --ip 0.0.0.0/0 --advertise-subnet \ --export-rt $hop12_1 --import-rt $hop21_1 \ --lb-consistent-hash-order 0 ## attach x2: r_a bagpipe-rest-attach --attach --port netns:to-svm20-x2 --network-type ipvpn --vpn-instance-id x2 --ip 200.0.0.1/24 --netns svm20 --if2vpn x2 \ --import $as:2 \ --readv-from-rt $as:1 --readv-to-rt $redirect_hop21_0 \ --redirect-rt $hop21_0 --source-port 80 \ --lb-consistent-hash-order 0 r_a ./setup-cross-routing svm20 x2 200.0.0.254 h0 40.0.0.254 ## SVM21: sur B # # [vrf:h02][to-svm21-h0]---[h0][netns:svm21][x2]---[if:to-svm21-x2][vrf:x2] ## attach h0: r_b bagpipe-rest-attach --attach --port netns:to-svm21-h0 --mac 52:54:21:99:99:99 --network-type ipvpn --vpn-instance-id h02 --ip 40.0.0.2/24 --netns svm21 --if2vpn h0 \ --import-rt $hop21_1 r_b bagpipe-rest-attach --attach --port to-svm21-h0 --mac 52:54:21:99:99:99 --network-type ipvpn --vpn-instance-id h02 --ip 0.0.0.0/0 --advertise-subnet \ --export-rt $hop12_1 --import-rt $hop21_1 \ --lb-consistent-hash-order 1 ## attach x2: r_b bagpipe-rest-attach --attach --port netns:to-svm21-x2 --network-type ipvpn --vpn-instance-id x2 --ip 200.0.0.2/24 --netns svm21 --if2vpn x2 \ --import $as:2 \ --readv-from-rt $as:1 --readv-to-rt $redirect_hop21_0 \ --redirect-rt $hop21_0 --source-port 80 \ --lb-consistent-hash-order 1 r_b ./setup-cross-routing svm21 x2 200.0.0.253 h0 40.0.0.253 ## VM2: sur B ## r_b bagpipe-rest-attach --attach --port netns:to-vm2 --network-type ipvpn --vpn-instance-id net2vm2 --ip 12.0.0.1 --rt $as:2 --import-rt $hop21_0 # # Test # wait_ready ## Attended result: 100% packet loss r_a ip netns exec net1vm1 ping 12.0.0.1 -c 5 -W 1 ## Attended result: Connection to 12.0.0.1 80 port [tcp/http] succeeded! r_b ip netns exec net2vm2 screen -m -d "sh -c \"echo -n '\\n\\n---- SUCCESS !!! ----\\n\\n' | nc -l -p 80 -q 1\"" r_a ip netns exec net1vm1 nc -nv 12.0.0.1 80 sleep 5 r_b ip netns exec net2vm2 screen -m -d "sh -c \"echo -n '\\n\\n---- SUCCESS !!! ----\\n\\n' | nc -l -p 80 -q 1\"" r_a ip netns exec net1vm1 nc -nv 12.0.0.1 80 r_a bagpipe-looking-glass vpns instances net1vm1 dataplane flows r_a bagpipe-looking-glass vpns instances redirect-to-ipvpn-64512_300 dataplane flows r_b bagpipe-looking-glass vpns instances x1 dataplane flows r_b bagpipe-looking-glass vpns instances h01 dataplane flows r_a bagpipe-looking-glass vpns instances h02 dataplane flows r_a bagpipe-looking-glass vpns instances x2 dataplane flows r_b bagpipe-looking-glass vpns instances h02 dataplane flows r_b bagpipe-looking-glass vpns instances x2 dataplane flows r_b bagpipe-looking-glass vpns instances redirect-to-ipvpn-64512_400 dataplane flows r_b bagpipe-looking-glass vpns instances net2vm2 dataplane flows r_b bagpipe-rest-attach --detach --port netns:to-svm1-x1 --network-type ipvpn --vpn-instance-id x1 --ip 100.0.0.1/24 --netns svm1 --if2vpn x1 r_b bagpipe-rest-attach --detach --port to-svm1-h0 --mac 52:54:00:99:99:99 --network-type ipvpn --vpn-instance-id h01 --ip 0.0.0.0/0 --advertise-subnet r_b bagpipe-rest-attach --detach --port netns:to-svm1-h0 --mac 52:54:00:99:99:99 --network-type ipvpn --vpn-instance-id h01 --ip 30.0.0.1/24 --netns svm1 --if2vpn h0 r_a bagpipe-rest-attach --detach --port to-svm20-h0 --mac 52:54:20:99:99:99 --network-type ipvpn --vpn-instance-id h02 --ip 0.0.0.0/0 --advertise-subnet \ --lb-consistent-hash-order 0 r_a bagpipe-rest-attach --detach --port netns:to-svm20-h0 --mac 52:54:20:99:99:99 --network-type ipvpn --vpn-instance-id h02 --ip 40.0.0.1/24 --netns svm20 --if2vpn h0 r_a bagpipe-rest-attach --detach --port netns:to-svm20-x2 --network-type ipvpn --vpn-instance-id x2 --ip 200.0.0.1/24 --netns svm20 --if2vpn x2 r_b bagpipe-rest-attach --detach --port to-svm21-h0 --mac 52:54:21:99:99:99 --network-type ipvpn --vpn-instance-id h02 --ip 0.0.0.0/0 --advertise-subnet \ --lb-consistent-hash-order 1 r_b bagpipe-rest-attach --detach --port netns:to-svm21-h0 --mac 52:54:21:99:99:99 --network-type ipvpn --vpn-instance-id h02 --ip 40.0.0.2/24 --netns svm21 --if2vpn h0 r_b bagpipe-rest-attach --detach --port netns:to-svm21-x2 --network-type ipvpn --vpn-instance-id x2 --ip 200.0.0.2/24 --netns svm21 --if2vpn x2 r_both bagpipe-looking-glass logs clean_stop ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/samples/bagpipe-bgp/chain-redirect-lb20000775000175000017500000002230400000000000024602 0ustar00zuulzuul00000000000000#!/bin/bash # # 5-tuple classified chain setup with two-directions traffic combined and default routes announced at # intermediate hops (Ingress VRF between hops configured to statically announce a 0.0.0.0/0) # # This script will setup network namespaces SVM1 ("service VM1") and SVM2 so that # traffic between net1 (to which VM1 is attached) and net2 (to which VM2 is attached) # goes through SMV1 and SVM2 based on a 5-tuple classification: # # |-------| # |-| SVM20 | \ # destination | |-------| | # port 80 | | # -------------> |------| | |-------| | -------------> # VM1-------net1 | SVM1 |--|-| SVM21 | > net2-----VM2 # .1 11.0.0 <------------- |------| | |-------| | <------------- 12.0.0 .1 # | | source # | |-------| | port 80 # |-| SVM22 | / # |-------| # # Route targets.... # # net1: :1 # net2: :2 # # - chain for traffic from net1 to net2: # hop12_0: first hop: net1 to SVM1 # hop12_1: second hop: SVM1 to SVM2 as=64512 hop12_0=$as:120 hop12_1=$as:121 ## ## - chain for traffic from net2 to net1: ## hop21_0: first hop: net2 to SVM2 ## hop21_1: second hop: SVM2 to SVM1 hop21_0=$as:210 hop21_1=$as:211 ## ## - traffic from net1 to net2 redirection to chain based on a classifier: ## hop_redirect: redirect hop redirect_hop12_0=$as:300 redirect_hop21_0=$as:400 source $(dirname $0)/generic-functions clean_start clean_netns_all net1vm1 svm1 svm2 net2vm2 ## ## VM1: sur A ## ## [netns:net1vm1][tovpn]---[to-vm1][vrf:net1vm1] ## r_a bagpipe-rest-attach --attach --port netns:to-vm1 --network-type ipvpn --vpn-instance-id net1vm1 --ip 11.0.0.1 --rt $as:1 --import-rt $hop12_0 ## SVM1: sur B ## ## [vrf:x1][if:to-svm1-x1]---[x1][netns:svm1][h0]---[to-svm1-h0][vrf:h01] ## ## ## x1:100.0.0.0/24 ## h01:30.0.0.0/24 ## h02:40.0.0.0/24 ## x2:200.0.0.0/24 ## attach x1 and configure vrf:x1 to attract traffic destined for TCP port 80: ## ## [vrf:net1vm1]--->[vrf:redirect_hop12_0] r_b bagpipe-rest-attach --attach --port netns:to-svm1-x1 --network-type ipvpn --vpn-instance-id x1 --ip 100.0.0.1/24 --netns svm1 --if2vpn x1 \ --import $as:1 \ --readv-from-rt $as:2 --readv-to-rt $redirect_hop12_0 \ --redirect-rt $hop12_0 --destination-port 80 ## attach h0: r_b bagpipe-rest-attach --attach --port netns:to-svm1-h0 --mac 52:54:00:99:99:99 --network-type ipvpn --vpn-instance-id h01 --ip 30.0.0.1/24 --netns svm1 --if2vpn h0 \ --import-rt $hop12_1 \ r_b bagpipe-rest-attach --attach --port to-svm1-h0 --mac 52:54:00:99:99:99 --network-type ipvpn --vpn-instance-id h01 --ip 0.0.0.0/0 --advertise-subnet --export-rt $hop21_1 --import-rt $hop12_1 r_b ./setup-cross-routing svm1 x1 100.0.0.254 h0 30.0.0.254 ## SVM2x: sur A # ## SVM22 # [vrf:h02][to-svm22-h0]---[h0][netns:svm22][x2]---[if:to-svm22-x2][vrf:x2] ## attach h0: r_a bagpipe-rest-attach --attach --port netns:to-svm22-h0 --mac 52:54:22:99:99:99 --network-type ipvpn --vpn-instance-id h02 --ip 40.0.0.3/24 --netns svm22 --if2vpn h0 \ --import-rt $hop21_1 r_a bagpipe-rest-attach --attach --port to-svm22-h0 --mac 52:54:22:99:99:99 --network-type ipvpn --vpn-instance-id h02 --ip 0.0.0.0/0 --advertise-subnet \ --export-rt $hop12_1 --import-rt $hop21_1 \ --lb-consistent-hash-order 2 ## attach x2: r_a bagpipe-rest-attach --attach --port netns:to-svm22-x2 --network-type ipvpn --vpn-instance-id x2 --ip 200.0.0.3/24 --netns svm22 --if2vpn x2 \ --import $as:2 \ --readv-from-rt $as:1 --readv-to-rt $redirect_hop21_0 \ --redirect-rt $hop21_0 --source-port 80 \ --lb-consistent-hash-order 2 r_a ./setup-cross-routing svm22 x2 200.0.0.254 h0 40.0.0.254 ## SVM21 # [vrf:h02][to-svm21-h0]---[h0][netns:svm21][x2]---[if:to-svm21-x2][vrf:x2] ## attach h0: r_a bagpipe-rest-attach --attach --port netns:to-svm21-h0 --mac 52:54:21:99:99:99 --network-type ipvpn --vpn-instance-id h02 --ip 40.0.0.2/24 --netns svm21 --if2vpn h0 \ --import-rt $hop21_1 r_a bagpipe-rest-attach --attach --port to-svm21-h0 --mac 52:54:21:99:99:99 --network-type ipvpn --vpn-instance-id h02 --ip 0.0.0.0/0 --advertise-subnet \ --export-rt $hop12_1 --import-rt $hop21_1 \ --lb-consistent-hash-order 1 ## attach x2: r_a bagpipe-rest-attach --attach --port netns:to-svm21-x2 --network-type ipvpn --vpn-instance-id x2 --ip 200.0.0.2/24 --netns svm21 --if2vpn x2 \ --import $as:2 \ --readv-from-rt $as:1 --readv-to-rt $redirect_hop21_0 \ --redirect-rt $hop21_0 --source-port 80 \ --lb-consistent-hash-order 1 r_a ./setup-cross-routing svm21 x2 200.0.0.254 h0 40.0.0.254 ## SVM20 # [vrf:h02][to-svm20-h0]---[h0][netns:svm20][x2]---[if:to-svm20-x2][vrf:x2] ## attach h0: r_a bagpipe-rest-attach --attach --port netns:to-svm20-h0 --mac 52:54:20:99:99:99 --network-type ipvpn --vpn-instance-id h02 --ip 40.0.0.1/24 --netns svm20 --if2vpn h0 \ --import-rt $hop21_1 r_a bagpipe-rest-attach --attach --port to-svm20-h0 --mac 52:54:20:99:99:99 --network-type ipvpn --vpn-instance-id h02 --ip 0.0.0.0/0 --advertise-subnet \ --export-rt $hop12_1 --import-rt $hop21_1 \ --lb-consistent-hash-order 0 ## attach x2: r_a bagpipe-rest-attach --attach --port netns:to-svm20-x2 --network-type ipvpn --vpn-instance-id x2 --ip 200.0.0.1/24 --netns svm20 --if2vpn x2 \ --import $as:2 \ --readv-from-rt $as:1 --readv-to-rt $redirect_hop21_0 \ --redirect-rt $hop21_0 --source-port 80 \ --lb-consistent-hash-order 0 r_a ./setup-cross-routing svm20 x2 200.0.0.254 h0 40.0.0.254 ## VM2: sur B ## r_b bagpipe-rest-attach --attach --port netns:to-vm2 --network-type ipvpn --vpn-instance-id net2vm2 --ip 12.0.0.1 --rt $as:2 --import-rt $hop21_0 # # Test # wait_ready ## Attended result: 100% packet loss r_a ip netns exec net1vm1 ping 12.0.0.1 -c 5 -W 1 ## Attended result: Connection to 12.0.0.1 80 port [tcp/http] succeeded! r_b ip netns exec net2vm2 screen -m -d "sh -c \"echo -n '\\n\\n---- SUCCESS !!! ----\\n\\n' | nc -l -p 80 -q 1\"" r_a ip netns exec net1vm1 nc -nv 12.0.0.1 80 sleep 5 r_b ip netns exec net2vm2 screen -m -d "sh -c \"echo -n '\\n\\n---- SUCCESS !!! ----\\n\\n' | nc -l -p 80 -q 1\"" r_a ip netns exec net1vm1 nc -nv 12.0.0.1 80 sleep 5 r_b ip netns exec net2vm2 screen -m -d "sh -c \"echo -n '\\n\\n---- SUCCESS !!! ----\\n\\n' | nc -l -p 80 -q 1\"" r_a ip netns exec net1vm1 nc -nv 12.0.0.1 80 r_a bagpipe-looking-glass vpns instances net1vm1 dataplane flows r_a bagpipe-looking-glass vpns instances redirect-to-ipvpn-64512_300 dataplane flows r_b bagpipe-looking-glass vpns instances x1 dataplane flows r_b bagpipe-looking-glass vpns instances h01 dataplane flows r_a bagpipe-looking-glass vpns instances h02 dataplane flows r_a bagpipe-looking-glass vpns instances x2 dataplane flows r_b bagpipe-looking-glass vpns instances redirect-to-ipvpn-64512_400 dataplane flows r_b bagpipe-looking-glass vpns instances net2vm2 dataplane flows r_b bagpipe-rest-attach --detach --port netns:to-svm1-x1 --network-type ipvpn --vpn-instance-id x1 --ip 100.0.0.1/24 --netns svm1 --if2vpn x1 r_b bagpipe-rest-attach --detach --port to-svm1-h0 --mac 52:54:00:99:99:99 --network-type ipvpn --vpn-instance-id h01 --ip 0.0.0.0/0 --advertise-subnet r_b bagpipe-rest-attach --detach --port netns:to-svm1-h0 --mac 52:54:00:99:99:99 --network-type ipvpn --vpn-instance-id h01 --ip 30.0.0.1/24 --netns svm1 --if2vpn h0 r_a bagpipe-rest-attach --detach --port to-svm20-h0 --mac 52:54:20:99:99:99 --network-type ipvpn --vpn-instance-id h02 --ip 0.0.0.0/0 --advertise-subnet \ --lb-consistent-hash-order 0 r_a bagpipe-rest-attach --detach --port netns:to-svm20-h0 --mac 52:54:20:99:99:99 --network-type ipvpn --vpn-instance-id h02 --ip 40.0.0.1/24 --netns svm20 --if2vpn h0 r_a bagpipe-rest-attach --detach --port netns:to-svm20-x2 --network-type ipvpn --vpn-instance-id x2 --ip 200.0.0.1/24 --netns svm20 --if2vpn x2 \ --lb-consistent-hash-order 0 r_a bagpipe-rest-attach --detach --port to-svm22-h0 --mac 52:54:22:99:99:99 --network-type ipvpn --vpn-instance-id h02 --ip 0.0.0.0/0 --advertise-subnet \ --lb-consistent-hash-order 2 r_a bagpipe-rest-attach --detach --port netns:to-svm22-h0 --mac 52:54:22:99:99:99 --network-type ipvpn --vpn-instance-id h02 --ip 40.0.0.3/24 --netns svm20 --if2vpn h0 r_a bagpipe-rest-attach --detach --port netns:to-svm22-x2 --network-type ipvpn --vpn-instance-id x2 --ip 200.0.0.3/24 --netns svm22 --if2vpn x2 \ --lb-consistent-hash-order 2 r_a bagpipe-rest-attach --detach --port to-svm21-h0 --mac 52:54:21:99:99:99 --network-type ipvpn --vpn-instance-id h02 --ip 0.0.0.0/0 --advertise-subnet \ --lb-consistent-hash-order 1 r_a bagpipe-rest-attach --detach --port netns:to-svm21-h0 --mac 52:54:21:99:99:99 --network-type ipvpn --vpn-instance-id h02 --ip 40.0.0.2/24 --netns svm21 --if2vpn h0 r_a bagpipe-rest-attach --detach --port netns:to-svm21-x2 --network-type ipvpn --vpn-instance-id x2 --ip 200.0.0.2/24 --netns svm21 --if2vpn x2 \ --lb-consistent-hash-order 1 r_a bagpipe-looking-glass vpns instances r_b bagpipe-looking-glass vpns instances r_both bagpipe-looking-glass logs clean_stop ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/samples/bagpipe-bgp/chain-traffic-redirect0000775000175000017500000001112200000000000025535 0ustar00zuulzuul00000000000000#!/bin/bash # # 5-tuple classified chain setup with two-directions traffic combined and default routes announced at # intermediate hops (Ingress VRF between hops configured to statically announce a 0.0.0.0/0) # # This script will setup network namespaces SVM1 ("service VM1") and SVM2 so that # traffic between net1 (to which VM1 is attached) and net2 (to which VM2 is attached) # goes through SMV1 and SVM2 based on a 5-tuple classification: # # destination # port 80 # -------------> |------| |------| -------------> # VM1-------net1 | SVM1 |----| SVM2 | net2-----VM2 # .1 11.0.0 <------------- |------| |------| <------------- 12.0.0 .1 # source # port 80 # # Route targets.... # # net1: :1 # net2: :2 # # - chain for traffic from net1 to net2: # hop12_0: first hop: net1 to SVM1 # hop12_1: second hop: SVM1 to SVM2 as=64512 hop12_0=$as:120 hop12_1=$as:121 ## ## - chain for traffic from net2 to net1: ## hop21_0: first hop: net2 to SVM2 ## hop21_1: second hop: SVM2 to SVM1 hop21_0=$as:210 hop21_1=$as:211 ## ## - traffic from net1 to net2 redirection to chain based on a classifier: ## hop_redirect: redirect hop redirect_hop12_0=$as:300 redirect_hop21_0=$as:400 source $(dirname $0)/generic-functions clean_start clean_netns_all net1vm1 svm1 svm2 net2vm2 ## ## VM1: sur A ## ## [netns:net1vm1][tovpn]---[to-vm1][vrf:net1vm1] ## r_a bagpipe-rest-attach --attach --port netns:to-vm1 --network-type ipvpn --vpn-instance-id net1vm1 --ip 11.0.0.1 --rt $as:1 --import-rt $hop12_0 ## SVM1: sur B ## ## [vrf:x1][if:to-svm1-x1]---[x1][netns:svm1][h0]---[to-svm1-h0][vrf:h01] ## ## ## x1:100.0.0.0/24 ## h01:30.0.0.0/24 ## h02:40.0.0.0/24 ## x2:200.0.0.0/24 ## attach x1 and configure vrf:x1 to attract traffic destined for TCP port 80: ## ## [vrf:net1vm1]--->[vrf:redirect_hop12_0] r_b bagpipe-rest-attach --attach --port netns:to-svm1-x1 --network-type ipvpn --vpn-instance-id x1 --ip 100.0.0.1/24 --netns svm1 --if2vpn x1 \ --import $as:1 \ --readv-from-rt $as:2 --readv-to-rt $redirect_hop12_0 \ --redirect-rt $hop12_0 --destination-port 80 ## attach h0: r_b bagpipe-rest-attach --attach --port netns:to-svm1-h0 --mac 52:54:00:99:99:99 --network-type ipvpn --vpn-instance-id h01 --ip 30.0.0.1/24 --netns svm1 --if2vpn h0 \ --import-rt $hop12_1 \ r_b bagpipe-rest-attach --attach --port to-svm1-h0 --mac 52:54:00:99:99:99 --network-type ipvpn --vpn-instance-id h01 --ip 0.0.0.0/0 --advertise-subnet --export-rt $hop21_1 --import-rt $hop12_1 r_b ./setup-cross-routing svm1 x1 100.0.0.254 h0 30.0.0.254 ## SVM2: sur A # # [vrf:h02][to-svm2-h0]---[h0][netns:svm2][x2]---[if:to-svm2-x2][vrf:x2] ## attach h0: r_a bagpipe-rest-attach --attach --port netns:to-svm2-h0 --mac 52:54:00:99:99:99 --network-type ipvpn --vpn-instance-id h02 --ip 40.0.0.1/24 --netns svm2 --if2vpn h0 \ --import-rt $hop21_1 \ r_a bagpipe-rest-attach --attach --port to-svm2-h0 --mac 52:54:00:99:99:99 --network-type ipvpn --vpn-instance-id h02 --ip 0.0.0.0/0 --advertise-subnet --export-rt $hop12_1 --import-rt $hop21_1 ## attach x2: r_a bagpipe-rest-attach --attach --port netns:to-svm2-x2 --network-type ipvpn --vpn-instance-id x2 --ip 200.0.0.1/24 --netns svm2 --if2vpn x2 \ --import $as:2 \ --readv-from-rt $as:1 --readv-to-rt $redirect_hop21_0 \ --redirect-rt $hop21_0 --source-port 80 r_a ./setup-cross-routing svm2 x2 200.0.0.254 h0 40.0.0.254 ## VM2: sur B ## r_b bagpipe-rest-attach --attach --port netns:to-vm2 --network-type ipvpn --vpn-instance-id net2vm2 --ip 12.0.0.1 --rt $as:2 --import-rt $hop21_0 # # Test # wait_ready ## Attended result: 100% packet loss r_a ip netns exec net1vm1 ping 12.0.0.1 -c 5 -W 1 ## Attended result: Connection to 12.0.0.1 80 port [tcp/http] succeeded! r_b ip netns exec net2vm2 screen -m -d "sh -c \"echo -n '\\n\\n---- SUCCESS !!! ----\\n\\n' | nc -l -p 80 -q 1\"" r_a ip netns exec net1vm1 nc -nv 12.0.0.1 80 r_a bagpipe-looking-glass vpns instances net1vm1 dataplane flows r_a bagpipe-looking-glass vpns instances redirect-to-ipvpn-64512_300 dataplane flows r_b bagpipe-looking-glass vpns instances x1 dataplane flows r_b bagpipe-looking-glass vpns instances h01 dataplane flows r_a bagpipe-looking-glass vpns instances h02 dataplane flows r_a bagpipe-looking-glass vpns instances x2 dataplane flows r_b bagpipe-looking-glass vpns instances redirect-to-ipvpn-64512_400 dataplane flows r_b bagpipe-looking-glass vpns instances net2vm2 dataplane flows r_both bagpipe-looking-glass logs clean_stop ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/samples/bagpipe-bgp/evpn2ipvpn0000775000175000017500000000276100000000000023360 0ustar00zuulzuul00000000000000#!/bin/bash # # This script will setup traffic between E-VPN net1 (to which VM1 is attached) # and E-VPN net2 (to which VM2 is attached) goes through an IP-VPN: # # VM1-------net1 <---- IP-VPN ----> net2-------VM2 # .1 11.0.0. 12.0.0 .1 # # Route targets: # # net1: :1 # net2: :2 # IP-VPN: :10 as=64512 source $(dirname $0)/generic-functions clean_start ## ## VM1: sur A ## ## attach on E-VPN: r_a bagpipe-rest-attach --attach --netns net1vm1 --port netns:to-vm1 --network-type evpn --vpn-instance-id net1vm1_evpn --ip 11.0.0.1 --rt $as:1 ## attach on IP-VPN: MAC_VM1=$(r_a ip netns exec net1vm1 cat /sys/class/net/tovpn/address) r_a bagpipe-rest-attach --attach --port evpn:net1vm1_evpn --network-type ipvpn --vpn-instance-id net1vm1_ipvpn --ip 11.0.0.1 --mac $MAC_VM1 --rt $as:10 ## ## VM2: sur B ## ## attach on E-VPN: r_b bagpipe-rest-attach --attach --netns net2vm2 --port netns:to-vm2 --network-type evpn --vpn-instance-id net2vm2_evpn --ip 12.0.0.1 --rt $as:2 ## attach on IP-VPN: MAC_VM2=$(r_b ip netns exec net2vm2 cat /sys/class/net/tovpn/address) r_b bagpipe-rest-attach --attach --port evpn:net2vm2_evpn --network-type ipvpn --vpn-instance-id net2vm2_ipvpn --ip 12.0.0.1 --mac $MAC_VM2 --rt $as:10 # # Test # wait_ready r_a ip netns exec net1vm1 ping 12.0.0.1 -c 3 r_a bagpipe-looking-glass vpns instances net1vm1_ipvpn dataplane flows r_b bagpipe-looking-glass vpns instances net2vm2_ipvpn dataplane flows r_both bagpipe-looking-glass logs clean_stop ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1743591409.0 networking_bagpipe-22.0.0/samples/bagpipe-bgp/examples.md0000664000175000017500000000102200000000000023450 0ustar00zuulzuul00000000000000Examples ======== Each example script is executed as