././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.834714 networking-odl-16.0.0.0b2.dev1/0000755000175000017500000000000000000000000017674 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/.coveragerc0000644000175000017500000000015100000000000022012 0ustar00jamespagejamespage00000000000000[run] branch = True source = networking_odl omit = networking_odl/tests/* [report] ignore_errors = True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/.mailmap0000644000175000017500000000121000000000000021307 0ustar00jamespagejamespage00000000000000# Format is: # # lawrancejing Jiajun Liu Zhongyue Luo Kun Huang Zhenguo Niu Isaku Yamahata Isaku Yamahata Morgan Fainberg Michel Peterson ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/.pylintrc0000644000175000017500000000627700000000000021555 0ustar00jamespagejamespage00000000000000# The format of this file isn't really documented; just use --generate-rcfile [MASTER] # Add to the black list. It should be a base name, not a # path. You may set this option multiple times. # ignore=.git,tests [MESSAGES CONTROL] # NOTE(gus): This is a long list. A number of these are important and # should be re-enabled once the offending code is fixed (or marked # with a local disable) disable= # "F" Fatal errors that prevent further processing import-error, # "I" Informational noise locally-disabled, # "E" Error for important programming issues (likely bugs) access-member-before-definition, no-member, no-method-argument, no-self-argument, # "W" Warnings for stylistic problems or minor programming issues abstract-method, arguments-differ, attribute-defined-outside-init, bad-builtin, bad-indentation, broad-except, cyclic-import, dangerous-default-value, deprecated-lambda, expression-not-assigned, fixme, global-statement, no-init, non-parent-init-called, protected-access, redefined-builtin, redefined-outer-name, signature-differs, star-args, super-init-not-called, unpacking-non-sequence, unused-argument, unused-import, unused-variable, # "C" Coding convention violations bad-continuation, invalid-name, missing-docstring, superfluous-parens, # "R" Refactor recommendations abstract-class-little-used, abstract-class-not-used, duplicate-code, interface-not-implemented, no-self-use, too-few-public-methods, too-many-ancestors, too-many-arguments, too-many-branches, too-many-instance-attributes, too-many-lines, too-many-locals, too-many-public-methods, too-many-return-statements, too-many-statements, # new for python3 version of pylint chained-comparison, consider-using-dict-comprehension, consider-using-in, consider-using-set-comprehension, unnecessary-pass, useless-object-inheritance, self-cls-assignment, no-else-return, [BASIC] # Variable names can be 1 to 31 characters long, with lowercase and underscores variable-rgx=[a-z_][a-z0-9_]{0,30}$ # Argument names can be 2 to 31 characters long, with lowercase and underscores argument-rgx=[a-z_][a-z0-9_]{1,30}$ # Method names should be at least 3 characters long # and be lowecased with underscores method-rgx=([a-z_][a-z0-9_]{2,}|setUp|tearDown)$ # Module names matching neutron-* are ok (files in bin/) module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(neutron-[a-z0-9_-]+))$ # Don't require docstrings on tests. no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$ [FORMAT] # Maximum number of characters on a single line. max-line-length=79 [VARIABLES] # List of additional names supposed to be defined in builtins. Remember that # you should avoid to define new builtins when possible. # _ is used by our localization additional-builtins=_ [CLASSES] # List of interface methods to ignore, separated by a comma. ignore-iface-methods= [IMPORTS] # Deprecated modules which should not be used, separated by a comma deprecated-modules= # should use oslo_serialization.jsonutils json [TYPECHECK] # List of module names for which member attributes should not be checked ignored-modules=six.moves,_MovedItems [REPORTS] # Tells whether to display a full report or only the messages reports=no ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/.stestr.conf0000644000175000017500000000011400000000000022141 0ustar00jamespagejamespage00000000000000[DEFAULT] test_path=${OS_TEST_PATH:-./networking_odl/tests/unit} top_dir=./ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585130284.7867138 networking-odl-16.0.0.0b2.dev1/.zuul.d/0000755000175000017500000000000000000000000021173 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/.zuul.d/jobs.yaml0000644000175000017500000002057200000000000023022 0ustar00jamespagejamespage00000000000000- job: name: networking-odl-config-job description: Fake job to hold configuration settings for jobs vars: odl_version_map: oxygen: &oxygen oxygen-latest fluorine: &fluorine fluorine-snapshot-0.9 neon: &neon neon-latest sodium: &sodium sodium-snapshot-0.11 common_devstack_vars: &devstack_vars devstack_localrc: ODL_TIMEOUT: 60 ODL_RELEASE: latest-snapshot # Set here which ODL openstack service provider to use ODL_NETVIRT_KARAF_FEATURE: odl-neutron-service,odl-restconf-all,odl-aaa-authn,odl-dlux-core,odl-mdsal-apidocs,odl-netvirt-openstack,odl-neutron-logger,odl-neutron-hostconfig-ovs # Switch to using the ODL's L3 implementation ODL_L3: True # public network connectivity ODL_PROVIDER_MAPPINGS: public:br-ex PUBLIC_PHYSICAL_NETWORK: public PUBLIC_BRIDGE: br-ex Q_USE_PUBLIC_VETH: False # Enable debug logs for odl ovsdb ODL_NETVIRT_DEBUG_LOGS: True # Database MYSQL_PASSWORD: secretmysql DATABASE_QUERY_LOGGING: True OS_LOG_PATH: '{{ zuul.executor.log_root }}' IS_GATE: True devstack_services: &devstack_services c-api: True c-bak: True c-sch: True c-vol: True cinder: True dstat: True g-api: True g-reg: True horizon: False key: True mysql: True n-api-meta: True n-api: True n-cauth: False n-cond: True n-cpu: True n-crt: True n-novnc: False n-obj: True n-sch: True neutron: True neutron-agent: False neutron-api: True neutron-dhcp: True neutron-l3: False neutron-metadata-agent: True neutron-qos: True placement-api: True placement-client: False q-agt: False q-dhcp: False q-l3: False q-meta: False q-svc: False rabbit: True devstack_plugins: &devstack_plugins networking-odl: https://opendev.org/openstack/networking-odl neutron: https://opendev.org/openstack/neutron - job: name: networking-odl-tempest-base parent: devstack-tempest description: | Base job for tempest-based tests pre-run: playbooks/tempest/pre.yaml run: playbooks/tempest/run.yaml post-run: playbooks/tempest/post.yaml voting: false required-projects: - openstack/networking-odl - openstack/networking-l2gw - openstack/networking-sfc - openstack/networking-bgpvpn - openstack/neutron-fwaas - openstack/neutron-tempest-plugin roles: - zuul: openstack/devstack timeout: 9000 irrelevant-files: &irrelevant_files - ^.*\.rst$ - ^doc/.*$ - ^releasenotes/.*$ vars: <<: *devstack_vars tox_envlist: all-plugin zuul_copy_output: '{{ devstack_log_dir }}/screen-karaf.log': 'logs' extensions_to_txt: log: True ini: True devstack_plugins: <<: *devstack_plugins neutron-tempest-plugin: https://opendev.org/openstack/neutron-tempest-plugin.git # Only scenario tests and some compute API tests actually verify ODL is working # Any API networking tests don't bring up any VMs and just verify that stuff happened in the # Neutron DB, so they don't actually fail even if ODL is not running at all. tempest_test_regex: tempest\.(api.compute|scenario|thirdparty)|neutron_tempest_plugin.scenario tempest_test_blacklist: "{{ ansible_user_dir }}/{{ zuul.project.src_dir }}/tempest-blacklist.txt" - job: name: networking-odl-tempest-oxygen parent: networking-odl-tempest-base vars: devstack_localrc: ODL_RELEASE: *oxygen - job: name: networking-odl-tempest-fluorine parent: networking-odl-tempest-base vars: devstack_localrc: ODL_RELEASE: *fluorine - job: name: networking-odl-devstack-base parent: devstack description: | Base job for devstack-based tests pre-run: playbooks/devstack/pre.yaml required-projects: - openstack/networking-odl - openstack/networking-l2gw - openstack/networking-sfc - openstack/networking-bgpvpn - openstack/neutron-fwaas roles: - zuul: openstack/devstack timeout: 9000 irrelevant-files: *irrelevant_files vars: <<: *devstack_vars run_devstack: True - job: name: networking-odl-devstack-base-tox parent: networking-odl-devstack-base pre-run: playbooks/devstack-tox/pre.yaml run: playbooks/devstack-tox/run.yaml post-run: playbooks/devstack-tox/post.yaml description: | Base job for devstack tests that use a tox environment - job: name: networking-odl-functional-base parent: networking-odl-devstack-base-tox pre-run: playbooks/functional/pre.yaml description: | Base job for functional tests timeout: 1800 roles: - zuul: openstack/devstack vars: tox_envlist: functional zuul_copy_output: '{{ devstack_log_dir }}/functional-logs': 'logs' '{{ devstack_log_dir }}/screen-karaf.log': 'logs' extensions_to_txt: log: True devstack_localrc: HOST_IP: 127.0.0.1 UNSTACK_KEEP_ODL: True run_devstack: False - job: name: networking-odl-functional-neon parent: networking-odl-functional-base vars: devstack_localrc: ODL_RELEASE: *neon - job: name: networking-odl-functional-sodium parent: networking-odl-functional-base vars: devstack_localrc: ODL_RELEASE: *sodium - job: name: networking-odl-rally-neon parent: rally-task-at-devstack vars: devstack_localrc: ODL_RELEASE: *neon devstack_plugins: rally-openstack: https://opendev.org/openstack/rally-openstack networking-odl: https://opendev.org/openstack/networking-odl rally_task: rally-jobs/odl.yaml timeout: 7500 required-projects: - openstack/devstack - openstack/devstack-gate - openstack/networking-odl - openstack/rally - openstack/rally-openstack irrelevant-files: - ^.*\.rst$ - ^doc/.*$ - ^neutron/locale/.*$ - ^neutron/tests/unit/.*$ - ^releasenotes/.*$ - ^tools/.*$ - ^tox.ini$ - job: name: networking-odl-rally-sodium parent: rally-task-at-devstack vars: devstack_localrc: ODL_RELEASE: *sodium devstack_plugins: rally-openstack: https://opendev.org/openstack/rally-openstack networking-odl: https://opendev.org/openstack/networking-odl rally_task: rally-jobs/odl.yaml timeout: 7500 required-projects: - openstack/devstack - openstack/devstack-gate - openstack/networking-odl - openstack/rally - openstack/rally-openstack irrelevant-files: - ^.*\.rst$ - ^doc/.*$ - ^neutron/locale/.*$ - ^neutron/tests/unit/.*$ - ^releasenotes/.*$ - ^tools/.*$ - ^tox.ini$ # >>> LEGACY JOBS TO REPLACE - job: name: networking-odl-grenade parent: legacy-dsvm-base run: playbooks/legacy/grenade-dsvm-networking-odl/run.yaml post-run: playbooks/legacy/grenade-dsvm-networking-odl/post.yaml timeout: 9000 required-projects: - openstack/grenade - openstack/devstack-gate - openstack/networking-odl - job: name: networking-odl-tempest-oxygen-multinode parent: legacy-dsvm-base-multinode run: playbooks/legacy/tempest-dsvm-networking-odl-multinode-oxygen-snapshot/run.yaml post-run: playbooks/legacy/tempest-dsvm-networking-odl-multinode-oxygen-snapshot/post.yaml timeout: 10800 required-projects: - openstack/devstack-gate - openstack/networking-odl - openstack/tempest nodeset: legacy-ubuntu-xenial-2-node - job: name: networking-odl-tempest-fluorine-multinode parent: legacy-dsvm-base-multinode run: playbooks/legacy/tempest-dsvm-networking-odl-multinode-fluorine-snapshot/run.yaml post-run: playbooks/legacy/tempest-dsvm-networking-odl-multinode-fluorine-snapshot/post.yaml timeout: 10800 required-projects: - openstack/devstack-gate - openstack/networking-odl - openstack/tempest nodeset: legacy-ubuntu-xenial-2-node # <<< LEGACY JOBS TO REPLACE ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/.zuul.d/project.yaml0000644000175000017500000001006300000000000023525 0ustar00jamespagejamespage00000000000000- project: templates: - openstack-python3-ussuri-jobs-neutron - release-notes-jobs-python3 - periodic-stable-jobs-neutron - publish-openstack-docs-pti - check-requirements check: jobs: - openstack-tox-pep8: required-projects: - openstack/ceilometer - openstack/neutron-fwaas - openstack/networking-l2gw - openstack/networking-sfc - openstack/networking-bgpvpn - openstack-tox-cover: required-projects: - openstack/ceilometer - openstack/neutron - openstack/neutron-fwaas - openstack/networking-l2gw - openstack/networking-sfc - openstack/networking-bgpvpn - openstack-tox-docs: required-projects: - openstack/ceilometer - openstack/neutron-fwaas - openstack/networking-l2gw - openstack/networking-sfc - openstack/networking-bgpvpn - openstack-tox-lower-constraints: required-projects: - openstack/ceilometer - openstack/neutron - openstack/neutron-fwaas - openstack/networking-l2gw - openstack/networking-sfc - openstack/networking-bgpvpn - openstack-tox-py36: required-projects: - openstack/ceilometer - openstack/neutron-fwaas - openstack/networking-l2gw - openstack/networking-sfc - openstack/networking-bgpvpn - networking-odl-grenade: voting: false irrelevant-files: - ^(test-|)requirements.txt$ - ^.*\.rst$ - ^doc/.*$ - ^releasenotes/.*$ - ^setup.cfg$ - networking-odl-tempest-oxygen - networking-odl-tempest-fluorine - networking-odl-tempest-oxygen-multinode: voting: false irrelevant-files: - ^(test-|)requirements.txt$ - ^.*\.rst$ - ^doc/.*$ - ^releasenotes/.*$ - ^setup.cfg$ - networking-odl-tempest-fluorine-multinode: voting: false irrelevant-files: - ^(test-|)requirements.txt$ - ^.*\.rst$ - ^doc/.*$ - ^releasenotes/.*$ - ^setup.cfg$ - networking-odl-rally-neon: voting: false irrelevant-files: - ^.*\.rst$ - ^doc/.*$ - ^releasenotes/.*$ - networking-odl-rally-sodium: voting: false irrelevant-files: - ^.*\.rst$ - ^doc/.*$ - ^releasenotes/.*$ - networking-odl-functional-neon - networking-odl-functional-sodium gate: jobs: - openstack-tox-pep8: required-projects: - openstack/ceilometer - openstack/neutron-fwaas - openstack/networking-l2gw - openstack/networking-sfc - openstack/networking-bgpvpn - openstack-tox-docs: required-projects: - openstack/ceilometer - openstack/neutron-fwaas - openstack/networking-l2gw - openstack/networking-sfc - openstack/networking-bgpvpn - openstack-tox-lower-constraints: required-projects: - openstack/ceilometer - openstack/neutron - openstack/neutron-fwaas - openstack/networking-l2gw - openstack/networking-sfc - openstack/networking-bgpvpn - openstack-tox-py36: required-projects: - openstack/ceilometer - openstack/neutron-fwaas - openstack/networking-l2gw - openstack/networking-sfc - openstack/networking-bgpvpn - networking-odl-functional-neon - networking-odl-functional-sodium ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130283.0 networking-odl-16.0.0.0b2.dev1/AUTHORS0000644000175000017500000002615400000000000020754 0ustar00jamespagejamespage00000000000000AKamyshnikova Aaron Rosen Achuth Maniyedath Achuth Maniyedath Adam Harwell Akihiro MOTOKI Akihiro Motoki Aleks Chirko Alessandro Pilotti Alessio Ababilov Alessio Ababilov Alon Kochba Amir Sadoughi Andre Pech Andreas Jaeger Andreas Jaeger Angus Lees Anh Tran Anil Vishnoi Ankur Gupta Ann Kamyshnikova Armando Migliaccio Arvind Somy Arvind Somya Ashik Alias Assaf Muller Atsushi SAKAI Barak Dabush Bernard Cafarelli Bhuvan Arumugam Bob Kukura Bob Melander Boden R Brad Hall Brant Knudson Brian Waldon Cao Xuan Hoang Carl Baldwin Cedric Brandily Chang Bo Guo Christian Berendt Chuck Short Clark Boylan Clint Byrum Corey Bryant Cédric Ollivier Dan Prince Dan Wendlandt Davanum Srinivas Dave Lapsley Dave Tucker Deepak N Deepthi V V Dirk Mueller Dong Jun Doug Hellmann Doug Hellmann Doug Wiegley Ed Warnicke Edan David Edgar Magana Elod Illes Elod Illes Emilien Macchi Eugene Nikanorov Federico Federico Ressi Flavio Fernandes Flavio Percoco Frederick F. Kautz IV Gary Kotton Gary Kotton Gauvain Pocentek Gordon Chung Guilherme Salgado Guo Ruijing Guoshuai Li Han Manjong Hareesh Puthalath He Jie Xu Hemanth Ravi Henry Gessau Henry Gessau Henry Gessau HenryVIII Hirofumi Ichihara Ian Wienand Ignacio Scopetta Igor Duarte Cardoso Ihar Hrachyshka Ionuț Arțăriși Irena Berezovsky Isaku Yamahata Isaku Yamahata Isaku Yamahata JJ Asghar Jacek Swiderski Jaime Caamaño Ruiz Jakub Libosvar James E. Blair James E. Blair James E. Blair James Page Jamo Luhrsen Jason Kölker Jay Pipes Jeremy Liu Jeremy Stanley Jiajun Liu Joe Gordon Joe Heck John Dunning Jon Schlueter Jordan Tardif Josh Juan Vidal Juliano Martinez Julien Danjou Justin Lund Keshava Bharadwaj Kevin Benton Kevin L. Mitchell Koby Aizer Kris Lindgren Kun Huang Kyle Mestery Kyle Mestery Lajos Katona Luis Tomas Bolivar Luke Gorrie Luong Anh Tuan Major Hayden Manjeet Singh Bhatia Manuel Buil MaoyangLiu Marcelo Amaral Marcus G K Williams Mark McClain Mark McClain Mark McLoughlin Maru Newby Maru Newby Mate Lakat Matt Riedemann Matthew Treinish Matthew Treinish Michel Peterson Miguel Angel Ajo Mike Kolesnik Mohammad Banikazemi Mohammed Zaheeruddin Malick Monty Taylor Morgan Fainberg Moshe Levi Motohiro OTSUKA N Vivekanandan Nachi Ueno Nachi Ueno Nader Lahouti Nam Nguyen Hoai Nguyen Hai Truong Nikolas Hermanns Oleg Bondarev OpenStack Release Bot Patrick Laurin Paul Michali Pramod Pramod Praneet Bachheti Prince Nana Rajaram Mallya Rajiv Kumar Rajiv Kumar Rajiv Kumar Ralf Haferkamp Reedip Rich Curran Ritu Sood Robert Kukura Roman Podoliaka Romil Gupta Ronald Bradford Rui Zang Russell Bryant Ryota MIBU Sai Sindhur Malleni Salvatore Orlando Salvatore Orlando Sam Hague Samer Deeb Santhosh Santhosh Kumar Sascha Peilicke Sascha Peilicke Sascha Peilicke Sean Dague Sean Dague Sean M. Collins Sean M. Collins Sean McGinnis Sergey Lukjanov Sergey Skripnick Shiv Haris Somik Behera Somik Behera SongmingYan Sridhar Gaddam Sukhdev Sumit Naiksatam Sushil Kumar Swaminathan Vasudevan Swapnil Kulkarni (coolsvap) Sylvain Afchain Sławek Kapłoński Terry Wilson Thierry Carrez Thomas Bechtold Thomas Morin Tim Miller Tim Rozet Tony Breeds Trinath Somanchi Tuan Do Anh Tyler Smith Victor Pickard Vieri <15050873171@163.com> Vikram Hosakote Vishal Thapar Vivekanandan Narasimhan Vu Cong Tuan Waldemar Znoinski Weidong Shao Wu Wenxiang XieYingYun YAMAMOTO Takashi YAMAMOTO Takashi Yaguang Tang Yalei Wang Yibo Cai Ying Liu Yinon Yong Sheng Gong Yong Sheng Gong Yoshihiro Kaneko Zang MingJie Zhenguo Niu ZhiQiang Fan ZhiQiang Fan Zhongyue Luo alexpilotti armando-migliaccio armando-migliaccio avnish brandonzhao caoyuan chengebj5238 da52700 elajkat fumihiko kakuma gecong1973 gengchc2 gong yong sheng gongysh gongysh gordon chung juraj.linkes justin Lund karthik.prasad lawrancejing lijunjie lingyongxu liu-sheng liuqing llg8212 loooosy luke.li mailravi02 mark mcclain mathieu-rohon mzmalick oshvartz qinchunhua rajat29 rajiv reedip rohitagarwalla ronak root rtmdk shihanzhang shubhendu sukhdev sunyandi trinaths vikram.choudhary vinkesh banka wangqi xhzhf xuanyandong xurong00037997 yaxuanwang zhang.lei zhangdebo zhangyanxian zhhuabj zhulingjie ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/CONTRIBUTING.rst0000644000175000017500000000103000000000000022327 0ustar00jamespagejamespage00000000000000If you would like to contribute to the development of OpenStack, you must follow the steps documented at: https://docs.openstack.org/infra/manual/developers.html Once those steps have been completed, changes to OpenStack should be submitted for review via the Gerrit tool, following the workflow documented at: https://docs.openstack.org/infra/manual/developers.html#development-workflow Pull requests submitted through GitHub will be ignored. Bugs should be filed on Launchpad, not GitHub: https://bugs.launchpad.net/networking-odl ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130283.0 networking-odl-16.0.0.0b2.dev1/ChangeLog0000644000175000017500000021575500000000000021465 0ustar00jamespagejamespage00000000000000CHANGES ======= * Fix gate failure 16.0.0.0b1 ---------- * Remove references for unittest2 * Additional python2 removal cleanups * Remove networking-odl python2 jobs * Try deinit odl\_features in TestOdlFeaturesNoFixture setUpClass * Change function.func\_doc to function.\_\_doc\_\_ * Switch to Ussuri jobs * Remove the remaining neutron-lbaas related constants * Update master for stable/train 15.0.0 ------ * Bump pylint version to one that supports python3.7 15.0.0.0b1 ---------- * Remove unneeded Zuul branch matcher * use callback payloads for ROUTER\_CONTROLLER events * PDF documentation build * Fix double-digit ODL version number handling for devstack * Make functional jobs use the latest ODL versions * Make rally jobs use the latest ODL versions * Update api-ref location * Add Python 3 Train unit tests * py37: fix regex unknown escapes * Replace git.openstack.org URLs with opendev.org URLs * Make functional jobs using py3 * Convert filter\_metadata\_diff result to list * set\_ovs\_hostconfigs on py3 fails with TypeError * Update master for stable/stein * Blacklist bandit, bump neutron-lib and retire neutron-lbaas * stop using common db mixin * OpenDev Migration Patch * use trunk constants from neutron-lib * Fix HOST\_IP fetching method in local.conf.example * Replace openstack.org git:// URLs with https:// 14.0.0 ------ * Fix the misspelling of "available" * Use latest networking-sfc release * Add bgpvpn-vni as a supported extension for ODL BGPVPN driver * Use latest Oxygen release instead of snapshot 14.0.0.0b1 ---------- * Change openstack-dev to openstack-discuss * Remove unused ryu from lower constraints * fix typo mistakes * Add the project source code repository in README * [Trivial fix] Correct spelling error * [Trivial fix] Correct spelling error * Dont disable services that are not enabled * devstack: enable flow based tunnels for sfc * use context manager from neutron-lib * Deprecate the ceilometer driver * Use extras for ceilometer dependency * Remove test-requirements from functional tox env * Reorder Zuul jobs for better organization * Remove the duplicated doc8 executa * Remove openstack-tox-py35-with-neutron-lib-master * Increment versioning with pbr instruction * use include\_tasks instead of include * Remove extra publish-openstack-python-branch-tarball job * fix tox python3 overrides * Use constraints in tox venv * add local tox targets for pep8, py27 and py3 * opt in for neutron-lib consumption patches * Implement Baked Query * use is\_retriable from neutron-lib * Get ceilometer from pypi * Update Zuul config * Revert "pseudo port binding: teach agent aliveness" * Fix PEP8 tox environment * add python 3.6 unit test job * switch documentation job to new PTI * import zuul job settings from project-config * Removing unused client methods * Update reno for stable/rocky * Removing the fullstack tests 13.0.0 ------ * Remove dns-integration from NETWORK\_API\_EXTENSIONS * Ceilometer meters for ODL should use project id of admin * Narrow down tempest tests to relevant tests * Assure that PeriodicTask passes context as args * Consume DB retry decorator from neutron-lib 13.0.0.0b3 ---------- * update requirements for neutron-lib 1.18.0 * ODL l3 service provider * Enabled feature negotation for qos rules * Add docs requirements to venv * Remove outdated disclaimer from setup.cfg * Remove fullstack from the gate * Add Fluorine functional to the gate * Skip tempest test test\_snat\_external\_ip * Fixate pylint version, fullstack non-voting * use retry\_db\_errors from neutron-lib * Remove the v1 drivers * Add release notes link in README * Remove vpnservice conditional from pre test hook * Adopt the new enginefacade in networking-odl * Fixes for the CI * ODL feature fetch: parse configs * Remove untested ODL releases * Remove mock for odl\_features.init() * use CORE constant from neutron-lib plugins * Update README reference to latest OpenDaylight version * Remove old OpenDaylight build references * Remove nitrogen references as it is EOL * Clean over verbose import * Removing unused variable * Retry journal recording in L3 * Cleanup l3\_odl\_v2 code * Remove attribute db\_session from ODLBaseDbTestCase * Remove unnecesary cleanup * Simplify retry testing functions to only accept context * Remove session parameter from \_test\_reset\_processing\_rows * Update DB functions to handle contexts * Abstract how retriable methods are tested 13.0.0.0b2 ---------- * Add flourine jobs for the CI * Trivial: Update pypi url to new url * Make oxygen-functional/fullstack voting * Retire carbon jobs for rocky * Remove vagrant scripts from the repo * Make carbon jobs non-voting * use neutron-lib plugin utils * Add more information to the ovs flows dump * Add zuul role show-odl-info to fullstack * Change default loopingcall interval for tests * Update references of neutron services * Clean old output before new doc builds 13.0.0.0b1 ---------- * Enabling ML2 for new full sync and recovery mechanism * Follow the new PTI for document build * Revert temporary patch that disables linter errors locally * use rpc Connection rather than create\_connection * Add lower-constraints job * Add 'flat' network type to host config docs * Fix pep8 errors * Remove tox\_install.sh helper * Use ALIAS instead of LABEL for BGPVPN API * Remove usage of the reserved keyword id * Updated from global requirements * Better defaults for the debug context * fix a typo * Allow Tempest CI to add txt extensions to ini files * Fix CI issues that block the gate * use callback payloads for PRECOMMIT\_UPDATE events * Fix tox installation of neutron * Updated from global requirements * use plugin common utils from neutron-lib * devstack: update carbon definition for carbon SR-3 * Updated from global requirements * Imported Translations from Zanata * Remove incorrect DB retry decorators * ODL DHCP Port to be created only for IPv4 subnets * use common agent topics from neutron-lib * Fix log format for oxygen logging * Update mailmap * Switch to a blacklist file for test excludes * Log exceptions on security group callbacks * devstack: add nitrogen SR2 definition * Make fullstack and functional native Zuul v3 jobs * reno: Remove remote names from branch specifiers * Enable hacking-extensions H204, H205 * Imported Translations from Zanata * Update reno for stable/queens 12.0.0 ------ * Add version specific debug tox environments * Move zuul\_copy\_output to be a job variable * Move extensions\_to\_txt to the job defintion * Zuul: Remove project name * Replace Chinese quotes to English quotes * Fix OpenDaylight setup with oxygen * Use Zuul v3 fetch-subunit-output * Remove branches filter from jobs * Wait for worker start before testing in JournalPeriodicProcessorTest * devstack: update local.conf.example to use lib/neutron * Remove unit tests for ML2 Mechanism driver * Fix unit tests for ML2 Mechanism driver * Add devstack base jobs specific to the project * Add neutron's tempest plugin to the CI * Updated from global requirements * Fix missing parentid on rule delete journal record * zuulv3: add jobs for ODL oxygen master branch * Fixes SSL websocket disconnects with client 12.0.0.0b3 ---------- * Updated from global requirements * Fixes websocket to use TLS when ODL NB is TLS * Make tempest native Zuul v3 jobs * Improve IPC and forking reliability in tests * Fix missing variables in devstack/override-defaults * use multiprovidernet api definition from neutron-lib * Remove redundant exception * Base mechanism for recovery * Correct link address * modify spelling error of variable * Updated from global requirements * Pass binding:profile attribute as a string * Updated from global requirements * keystone spelling errors * Fix broken if in devstack/functions * Add show-odl-info role * Don't set use\_stderr = False for tests * devstack: remove nitrogen snapshot 0.7.1 * Fix dependency calculation when two fixed IPs under same subnet * Switch to get\_writer\_session * Remove "-y" option for package install command * Base mechanism for full sync * Don't truncate subnetpools from subnet filters * use callback payloads for \_SPAWN events * Add OS\_DEBUG to passenv of tox * Add default timeout for tests triggered by tox * Capture logging while running tests * Modify JournalPeriodicProcessor settings on tests at setUp * Add helper function for JournalPeriodicProcessor * Force maintenance task when it is started * Add pidfile to the JournalPeriodicProcessor worker * Force maintenance task on HUP on the worker * Add forced processing to the PeriodicTask * Move maintenance task to a worker * Add SIGHUP handling to journal periodic processing * Stoppable sync thread on OpenDaylightJournalThread * Use nitrogen snapshot for grenade job * Updated from global requirements * Disable cinder for grenade Job * Imported Translations from Zanata * Raise an exception for unsupported vif * Follow raising-format-tuple check * Fix 3rd party import order * Fix Inconsistent return statements * Fix unit tests py27, py35 * Add hacking to enforce the config fixture over direct overrides * Add a NOOP function * Removing JournalCleanup class 12.0.0.0b2 ---------- * Design for full sync and recovery of resources * Correct missleading example of l2gw in readme * Fix tests that were changing settings but not reverting * Make test\_periodic\_task.test\_back\_to\_back\_job more consistent * Add command line tool to analyze logs * Reduce timer for periodic task tests * devstack: add nitrogen-0.7.2 snapshot definition * devstack: add nitrogen-SR1 release definition * Switch to lib/neutron * Add pre\_test\_grenade\_hook * Add hacking to enforce the config fixture * Override settings with a config fixture * Added raw flag to regexp strings missing it * Enable networking-odl only once * Use requests.codes.XX constants instead of hardcoded constants * Update sample config to use v2 for l3 * Remove setting of version/release from releasenotes * Fix on PeriodicTask locking mechanism * Updated from global requirements * use l3 ext gw mode api def from neutron-lib * use l3 api def from neutron-lib * Remove JVM memory limitations by default * Cleanup of OpenDaylight on ./clean.sh * Initializing logging for set ovs hostconfig command * devstack: add error check to \_wget and \_xpath * use qos api def from neutron-lib * Do not use “-y” for package install * Rename Zuul jobs according to naming conventions * Remove boron job * Ignore rally nitrogen job for ocata branch * Revert skip of test l3 test case * tox/pep8: add bandit check * use command line arguments in the main method * Updated from global requirements * Log additional info about entries * Fix exception handling in journal * Zuul: add file extension to playbook path * Delete completed rows immediately when retention=0 * Have create\_pending\_row return the entry * Reusing context defined in base class * UT for testing urls for all the objects * Fixes error handling of DB calls * use ml2 driver api from neutron-lib * Ignore I202 in pep8 (flake8), skip Testodll3 * Fix to use . to source script files * Deprecated the V1 drivers 12.0.0.0b1 ---------- * use external net api def from lib * Imported Translations from Zanata * use addr pairs api def from lib * Fixes URL path for SFC v2 Driver * Correction in dependency calculation for port pair group * Correction in dependency calculation for port pair chains * Adding Zuul v3 migrated legacy jobs * Remove SCREEN\_LOGDIR from devstack * unblock fullstack/functional tests * Fixed vhost user prefix in test code & doc * devstack: add oxygen name * devstack: add carbon SR2 definition * devstack: update nitrogen snapshot 0.7.0 -> 0.7.1 * devstack: add nitrogen release definition * Added ODL installation flag * Updated from global requirements * consume common constants from lib * Trivial Fix: correct typo artifcat to artifact * devstack: teach how to handle latest ODL release * devstack: add oxygen-snapshot release definition * use new payload objects for \*\_INIT callbacks * Updated from global requirements * Updated from global requirements * Don't sleep on exception * Delete FWaaS * tests: fix ml2 plugin config path * change testrepository to stestr * devstack: set ODL\_GATE\_SERVICE\_PROVIDER to fullstack/functional tests * devstack: save ODL configuration file for debug * devstack: symlink odl logfile to $BASET/logs * odl-releases/README.rst: add nitrogen RC3 example * devstack: rename local NEXUSPATH to \_NEXUSPATH * fullstack: remove fullstack+carbon workaround * full/functest: remove screen usage * Cleanup registered plugins * fullstack: wait for network-topology/netvirt * devstack: setting ovsdb manager last * Removed unnecessary code * Updated from global requirements * Fix to use . to source script files * Update stable networking-odl release to pike * use synchronized decorator from neutron-lib * fullstack: load nicira extension early for carbon * devstack: show install ODL features * Full Sync: Moved resource fetching into drivers * devstack: add xpath into required packages * Carbon tempest CI fix: let ODL create br-int * bashate devstack shell scripts * Remove WebsocketBadStatusException * Fix: retry journal.record on dependency deleted * fullstack: skip test\_VM\_connectivity temporally * Update rows one by one in journal cleanup * Delete completed rows one by one in during cleanup * fullstack: don't install mysql/postgres * devstack: dump more odl restconf info for debug * Fixes db.delete\_row deletion by row\_id * devstack: dump group for debug * fullstack: test arping in addition to ping * pseudo port binding: teach agent aliveness * devstack: fix URL for karaf distribution * Update the documentation link for doc migration * Add reference deployment guide * Fix gate issue: br-int not getting controller * hardware offload support for openvswitch * Full Sync: Correction in bgpvpn assoc variable * Fix: tests were no longer avoiding journal calls * Updated from global requirements * Fixes SFCv2 full sync errors * devstack: try metadata in parent dir * devstack: examples of odl release definitions * hacking: check string for Opendaylight and noqa support * devstack: stop odl server after test * Fix in documentation on how to enable BGPVPN * db migration: create\_at in opendaylightjournal removed * Use maintenance interval for maintenance task * Fixes to PEP8 checks when running test with tox * fullstack: use v2driver * fullstack/functional test: setup neutron log * Update reno for stable/pike * update sample ml2\_conf\_odl.ini * devstack: remove boron snapshot release definition * Add configuration reference * tox.ini: ignore \*~ * devstack: remove useless cat * devstack: revise ODL log level * devstack: use karaf for Nitrogen snapshot * [Gate] Reduce SSH timeout for gate jobs * Remove WebTest from test requirements 11.0.0.0rc1 ----------- * pseudo agent: pre-populate agentdb if missing * add function disassociate\_floatingips to refresh floatingip information when delete port * [Gate] Reduce test load on tempest jobs * use neutron-lib for callbacks * Functional Test for OpenDaylight DHCP Service * Update subport status for trunk ports * Add Flag to support OpenDaylight DHCP Service in Devstack * Addition of driver class for lbaas driver * use common.utils.get\_odl\_url * unit: commit session after precommit * Enabling support for DHCP Service on OpenDaylight Controller * unit: use self.db\_context * test\_l3\_odl\_v2.py: use correct context/session * test\_mechanism\_odl\_v2: use given session * set-ovs-hostconfig: enable 'flat' by default * Updated from global requirements 11.0.0.0b3 ---------- * Move journal periodic processing to a worker * unit: fix up merge botch * Use neutron\_lib for qos driver\_base * Enabling support for DHCP Service on OpenDaylight Controller * full\_sync: use given session * recovery: use given session * pseudo agent port binding: use neutron worker * journal: use context instead of session for dhcp port service * Enable Placement-api for grenade job * Allowing lock to be applied per task basis * Rearranging the documentaion layout * pseudo agent port binding: log owner and device\_id * Add test\_connectivity test case in fakemachines * unit test: mock.patch before super.setUp * Updated from global requirements * Update URLs in documents according to document migration * Enable members of lbaas to use custom url builder * journal: partially implement sg/sgrule dependency * Use v2 driver for mechanism and l3 * Load port status update worker in V1 driver * Change dependency validation to calculation * Create journal dependencies table * unit: consolidate mocking start\_odl\_sync\_thread * devstack: add carbon SR1 release definition * Support for recovery of all resources * enable warning-is-error for sphinx build * Bug 1704057: port status upate: missing add of provisioning component * OpenDaylight Ceilometer Driver * journal: sleep when error * Use new Netvirt for dsvm-fullstack * [Gate] Remove releases older than Boron * Revert "Update Full stack config" * Update full stack test * Change journal entry selection to optimistic locking * Enable full-sync for the bgpvpn, lbaas, qos, sfc, trunk driver * Update Full stack config * functional tests: don't run journal timer * Allow user to specify own method to make url * Select new entry when validation fails * Fetching of session using get\_session is deprecated * Substantially improve SFC support documentation * tox\_install\_project.sh: Use git clone --depth 1 * pep8: use import-order-style * hacking: enable H106, H203 and H904 * new testenv to check Opendaylight * hacking: enforce OpenDaylight instead of Opendaylight * Switch from oslosphinx to openstackdocstheme * Capitalize D in OpenDaylight * fix up of I4a526ee84784ca6ff8061692437a8c874bb33d6a * unit test: precommit is called without commit * devstackgaterc: Enable n-api-meta * Initialize odl\_features in mech\_driver\_v1 * make odl\_features default to empty feature set * tox: enable pylint * Run OdlPortStatusUpdate only in one worker * Retrieve and process port status updates from ODL * Revert "devstack: bug workaround 1698129" * tox: add bash to externals for pep8 and bashate * Updated from global requirements * eliminate portbinding by ODL networking topology * Utilty for determining ODL neutron features * Correction in Resource URL Mapping * test\_l3\_odl: use odl l3 plugin, not neutron's * remove unused code * Use UUID for SG test * Use port bindings * use service type constants from neutron\_lib plugins * Send port[fixed\_ips] update to ODL Controller * devstack: add Boron SR4 release definition * mech\_driver\_v2: remove update\_security\_group work around * enable test\_security\_group\_update * Fix vhost string comparison * Fix config for grenade * V2.0 Driver for LBaaS V2.0 * Moving \_make\_odl\_url method to common utils * try tempest tests with floatingip and others * unbreak gate * Spec for blueprint neutron-port-dhcp * Using assertFalse(A) instead of assertEqual(False, A) * Revert "Allowing lock to be applied per operation basis" * tox: enable bashate * tox: generate config with pep8 11.0.0.0b2 ---------- * Updated from global requirements * Rename argument object\_id to object\_ids for \_no\_older\_operations * Add precommit calls to qos * devstack: use neutron hostconfig-ovs from 0.6.0 carbon * Added decorator for bgpvpn, ml2 and l2gateway for postcommit * The local.conf.example file in the master branch of networking-odl repo does not install the DLUX UI Karaf features needed for the ODL GUI * devstack: add release definition of 0.6.0 carbon * Updated from global requirements * Allowing lock to be applied per operation basis * Added decorator to assign postcommit method * Set Initial Status for FIP down * Add date and organization to copyright of script.py.mako * Updated from global requirements * Replaced neutron command with OpenStack commands * Updated from global requirements * Adding Websocket client for ODL * use MechanismDriver from neutron-lib * Updated from global requirements * Log exception when journal entry processing fails * Don't call journal's run\_sync\_thread in unit tests * don't use run\_process for odl * logging method call in ml2 driver * Logging method call for sfc driver * Add direction to known bandwidth\_limit\_rules parameters * Logging method call for trunk driver * Send MAC updates to ODL for SRIOV PFs * use requests.session to avoid http open/close * Stop translating log messages * consume neutron-lib callbacks * Removed extra call to \_fake\_trunk\_payload * Correct SUPPORTED\_RULES of QoS driver * BGPVPN V2 Driver - Moves journal call to precommit * Migrate neutron.plugins.common to neutron-lib * Remove notification\_driver from docs * Add functional tests for QoS * Remove QoS V1 driver * Adapt new driver base for QoS * Disable new N537 hacking check from next neutron-lib * enable new netvirt * release note on version bump to 11 * devstack: add release definitions for nitrogen * bgpvpn: update v2 driver to use precommit * devstack: odl\_snapshot\_full\_version misargument * Updated from global requirements * Updated from global requirements 11.0.0.0b1 ---------- * Added quickstart guide for networking-odl * Update sample conf * devstack: add boron SR3 release definition * Add Initialize Parent when OpenDaylightL3RouterPlugin is initialized * Fixing a typo in function and variable name * Updated from global requirements * remove workarounds in devstackgaterc * Fix unit tests * Remove subunit-trace fork * Revert "test-requirement: avoid sqlalchemy 1.1.5+" * Add unit test for sqltestcase * Simplify the query by using filter\_by * Send only data to dependency generators * test-requirement: avoid sqlalchemy 1.1.5+ * tox.ini: pass OS\_TEST\_DBAPI\_ADMIN\_CONNECTION * Restructure of qos driver * full\_sync: sync router before port * full sync: correct sync order of resource * secgroup: convert icmpv6 variant name into icmpv6 * Propose spec for dependency validations move * Correcting links in documentation * OVS connects to ODL using IP instead of hostname * Missing 's' in error message string * Removed old or un necessary configurations * Fix to correct Opendaylight trunk driver registration * Fix typos in set\_ovs\_hostconfigs.py * Updated from global requirements * Fix call to xpath which causes ODL download to fail on CentOS * Remove unused logging import * port binding: trim port\_prefix + PORT\_ID to 14 length * Functional tests for L2Gateway V2 Driver * Refactor journal main loop * Remove references of V1 driver * Use journal.record everywhere * bug work around: disable several test cases * spec: move completed spec to completed directory * Fix N536 hacking check from neutron-lib * Make ml2\_context optional in journal.record * tox: remove sitepackage=True * Functional tests for BGPVPN V2 Driver * Adapt new api from db\_api * Add deprecating warning for qos v1 driver * Fix ODL URL creation logic * Switch to neutron\_lib for context * Revert "odl bug: skip test\_port\_security\_macspoofing\_port" * TrivialFix: Move portbindings to neutron-lib * Updated from global requirements * odl bug: skip test\_port\_security\_macspoofing\_port * devstack: route for floatingip/ipv6 public range * Updated from global requirements * devstack: use localrc\_set and use local.conf * Fix neutron-odl-ovs-hostconfig failure on compute * devstack: enable placement-client for subnode * Enable placement-api for compute node to fix multinode tempest failure * tox.ini: allows to pass TRACE\_FAILONLY to ostestr * Drop MANIFEST.in - pbr doesn't need it * pylint: update .pylint * Revert "Add a method to query operations" * Update reno for stable/ocata * Adding a threshold for coverage 4.0.0 ----- * QoS V2 driver for ODL * OpenDaylight BGPVPN Version 2 Driver * put back TrunkDependencyValidationsTestCase * refactor test\_dependency\_validations * OpenStack Networking-SFC Ver.2 driver for ODL * Fix typos (sunbet -> subnet) * test\_dependency: sort retrieved journal rows * l2gw/dependency validator: missing comma * Add a method to query operations * Add/Update hostconfig examples for OVS-DPDK and VPP * devstack: show ODL neutron northbound data * delete sg rule on sg deletion on ODL neutron northbound * Updated from global requirements * L2Gateway version 2 driver for OpenDaylight * Fix typo in doc/source/installation.rst * devstack: configure external net for new netvirt * Remove support for py34 * tox: pass OS\_POST\_MORTEM\_DEBUGGER env * pseudo agent: don't set start\_flag * avoid ovsdb port conflict * Typo fix: choses to chooses * ODL Drivers for Vlan Aware VMs * Update link reference in README.rst * Fix typo in doc/source/specs/journal-recovery.rst * devstack: skip several test cases with v2driver and old netvirt * Typo fix: binded => bound * Fix typo in maintenance.rst * Update hacking version * Enable smoke tests on grenade job * Revert "devstack: disable metadata for rally" * Simplify dependency validations * Remove the register\_validator method * devstack: enable placement-api * Remove SG validations * devstack: remove trailing - in ODL\_GATE\_SERVICE\_PROVIDER * Remove link to modindex * Disable some tempest tests temporarily * devstack: show info even after tempest fails * Replace six.iteritems/itervalues with dict.items()/values() * Use neutron-lib portbindings api-def * Use neutron-lib provider net api-def * Use V2 driver by default in devstack * Updated from global requirements * devstack: make new netvirt default for ODL boron+ * devstack: check latest revision of ODL snapshot * Split tempest tests for V2 driver * run functional/fullstack tests with v2driver * devstack: run rally with v2driver * devstack: run rally with new netvirt * Revert "use osc-lib git master branch" * Fix failing stack on compute node * use osc-lib git master branch * devstack: make pseudo agent port binding as default * using sys.exit(main()) instead of main() * Update lbaas-driver-v2 releasenotes * fix some issues in legacy netvirt with mulitnode in carbon * java: update oracle java 8 jdk version * devstack: update NETWORK\_API\_EXTENSIONS * create Openstack with ODL by vagrant * remove unused self.url from qos driver * neutron-lib: use L3 constant from neutron-lib * fullstack - use configure\_for\_func\_testing.sh to setup env * functional test: documentation and relnotes * gitignore: ignore vagrant generated dir * Removes unnecessary utf-8 encoding * odl-release: update boron definition * Upgrade script for networking-odl * H803 hacking have been deprecated * fullstack: increase check\_flow\_existance retry times and intervals * Refactor config code for v2 tests * Show team and repository tags * devstack: disable metadata for rally * Remove q-dhcp from compute node q-dhcp service in compute node causes metadata proxy failure as: checking http://169.254.169.254/2009-04-04/instance-id failed 1/20: up 11.03. request failed failed 2/20: up 23.35. request failed * doc: unbreak build\_sphinx * functional: vagrantfile for functional test * configure\_for\_func\_testing: don't install rabbitmq * configure\_for\_func\_testing: don't use realpath * devstack: refactor install\_opendaylight * Enable networking-odl compute mode in subnode * Consolidate qos v1 driver classes * devstack multinode: disable some test cases * odl client: remove unnecessary except and log * increase odl http timeout(experiment) * Add seqnum to dependency checks * Fix typo in devstack/settings.odl * Fix devstack for fedora 25 * functional test: configure opendaylight * devstack: improve odl-release definition * Fix typo * devstack: tempest CI fails * mech driver v2: build dict for sg on update * Configure L3 for grenade job * Fix the update of qos-policy * Show team and repo badges on README * devstack: reorder mech driver for debug * Use system subunit command if python-subunit is installed globally * db: use neutron\_lib.db.model\_base * functional test: install acl package * Disable live migration tests * devstack: enable scenario test\_security\_groups\_basic\_ops * Fix fullstack CI * devstack: make tempest timeout longer * Update reno for stable/newton * Forgot to reference arch in docs index 3.1.0 ----- * Added basic L3 functional tests * Run ovs appctl execution of flows more times * devstack: show related info for debug * Fixed None reference in SG code in V2 * Add security groups basic functional tests * Add utils.neutronify * devstack: enable scenario test\_network\_basic * devstack: enable scenario test\_minium\_basic * devstack: exclude scenario tests known to fail * devstack: option for conntrack for old netvirt * prevent initial networks create on subnode * devstack: Stop setting route pointing back to tenant router * devstack: enable odl-neutron-logger by default * Switch to using plugins directory in lieu of neutron manager * devstack: disable configuring neutron on compute nodes * Added ML2 basic functional tests * Added FLAT type network * Complement the implementation of odl lbaas driver\_v2 * devstack: add odl-neutron-logger to ODL karaf feature * devstack: update beryllium release definition * Scripts to enable fullstack testing in gate * devstack: enable stably passing scenario tests * devstack: remove source devstackgaterc * fullstack test for networking-odl * devstack: enable c-api,c-bak,c-sch,c-vol,cinder * Fixed link to drivers architecture * Add installation guide for networking-odl * devstack: run tests with tempest run command * add dsvm-functional tests tox and gate\_hook * Allow forwarding of OS\_FAIL\_ON\_MISSING\_DEPS to test envs * Added initial reference architecture * Updated from global requirements 3.0.0 ----- * update Boron release definitions since SR1 release * devstack: create public network connectivity * mech v2: bug/1546910 work around * Excluding Tempest from Rally job to avoid failure * Updated from global requirements * Fix the implementation of ODLMemberManager in lbaas driver\_v2 * devstack: enable force config drive * Fix set\_ovs\_hostconfigs exit issue * Fix few typographical errors * Add a reference to Neutron Devref and Apiref * Remove last vestiges of oslo-incubator * Add grenade plugin * secgroup: pushdown default secgroup rules to ODL * Fix up some documentation quirks * sort values for OVERRIDE\_ENABLED\_SERVICES * Updated from global requirements * follow up for https://review.openstack.org/#/c/268820/ * Journal recovery for syncing with ODL * Add developer docs for ODL drivers * Added maintenance devref * tools: catch up neutron-lbaas change * releasenotes: deprecate lbaasv1 driver * releasenotes: vlan-transparency * Adding a line space for proper rendering of doc * Remove call to configure\_neutron\_odl in case of odl-compute and non-pseudo-agent port binding * Change import statement to not rename the module * alembic: db migration fails * OpenStack Networking-SFC driver for OpenDaylight * Updated from global requirements * Journal recovery release notes added * Full sync release notes added * Maintenace thread release notes added * Modify the "create" and "update" method in lbaas driver\_v2 * Release notes for psuedo agent port binding * Add instructions to enable qos * Release notes for Host Config * Imported Translations from Zanata * add test\_migrations test * tox: use ostestr * stop doing any magic cloning of neutron during CI * devstack: exclude tests that uses ssh * bug/1614766 work around * Wrapper method for client#sendjson * Bug 1608659 - pseudo\_agentdb\_binding AttributeError * transparent\_vlan support * trivial fix * devstack: call odl-ovs-hostconfig conditionally * Remove dependency from neutron and move main() to the end * Improve README * journal: filter.py: AttributeError: 'NoneType' * Fix typo in DB migration script * journal: created\_at of second is too coarse * Enable release notes translation * Updated from global requirements * devstack: setup hostconfig on compute node * Cloud admin script auto-config hostconfig defaults * Fix a typo in documentation * Add E123,E125 check and Solve several problems * Add Apache 2.0 license to source file * Fix a typo in override-defaults * Updated from global requirements * remove add/remove router interface to ODL * Updated from global requirements * Include alembic migrations in module * security group rule: convert unknown protocol name * Update homepage with developer documentation page * simplify odl release definition * use v2 driver for v2 test * rest client: move parameter check to rest client * update odl release definitions for Boron 0.5.0 * Journal recovery for basic scenarios * unbreak test\_l3\_odl failure * sqlite: datetime should be second precision * test: journal/maintenance db clean up properly * test\_maintenance: cleanup looping call * populate json with both 'project\_id', 'tenant\_id' * models: use neutron\_lib.db.model\_base * more bug work around of Moxy bug of 475475 * Sync Security Groups and SG Rules before other resources * journal.record should use callers plugin context * Revert "Temporary fix for gate" * tests/unit: consolidate journal db setup logic * Add qos extension to devstack override defaults * Temporary fix for gate * Treat ODL's 404 hostconfigs, as an empty list On pseudo agent db, when odl response with 404 not\_found for hostconfigs, treat it as an empty list rather then an error. Issue a debug log in this flow * ml2 v1 driver: work around full\_sync * Updated from global requirements * nuke lbaasv1 driver * devstack: make NEXUSPATH configurable * ODL QoS driver of v1 Type * Pass OptGroup variable for RequiredOptError * Remove reference to neutron.i18n * Fix the members name in OpenDaylightLbaasDriverV2 * Change the OpenDaylightManager url\_path * wrap\_db\_retry: retry\_on\_request was deprecated * add the synchronization between neutron and ODL in driver\_v1 * use carbon snapshot as default odl-release * devstack: add carbon snapshot to odl-releases * Enable DeprecationWarning in test environments * test\_pseudo\_agentdb\_binding.py: adopt neutron\_lib * journal: port::securitygroup needs only ids * db: add Mitaka tag for alembic migration revisions * devstack: refactor ODL\_L3 logic depending on used feature * Revert "devstack: setup hostconfig in ovsdb" * Updated from global requirements * pseudo agent, devstack: hostconf\_uri is set to '' * Run set\_ovs\_hostconfigs as root with neutron-utils * devstack:ovsdbd doesn't understand localhost * Add Python 3.5 classifier and venv * pseudo agent fails to load with unit test * devstack: remove the definition of lithium snapshot * devstack: setup hostconfig in ovsdb * legacy\_port\_binding: teach VNIC type * pep8, unittest: unbreak gate failure * tox: add doc8 check * devstack: the definition of beryllium snapshot 0.4.4 * devstack: add definition of beryllium SR3 * devstack: add more gate jobs * settings.odl: eliminate 0.4.2-snapshot which was deleted * Add releasenotes support with reno * various update for tox.ini * devstack: configurable ODL repositories paths * devstack: add specific ODL snapshot functionality * devstack: allow to enable v2 driver * devstack: remove beryllium snapshot 0.4.2 * devstack: load neutron-northbound-service first * refactor odl-release definition * Replace assertEqual(None, \*) with assertIsNone in tests * Remove discover from test-requirements * Fix bug in call to get\_network - missing network\_id param * devstack: remove optional bridge configuration * Fix the order of arguments in assertEqual * Support for ovs-dpdk, vpp in port binding * Updated from global requirements * fix \_enrich\_port() to return the modified "data" * Add name property to ENUM type * Fix devstack README indentation * Corrects pep8 failure in set\_ovs\_hostconfigs.py * Add \_\_ne\_\_ built-in function * Fix tox unit test issue * Switch-agnostic ODL port binding controller * Add Spec for QoS driver * Adding Host Config doc * devstack: support bridge configuration, for vpnservice-openstack * make dependency validator dynamically registerable * create dir, doc/specs, for spec * Updated from global requirements * Fix db error when running python34 Unit tests * adopt neutron\_lib for constants and exceptions * Remove unused Params * Fixed test\_mechanism\_odl.py due to functions map deprecation in ML2 * Full Sync for L3 resources * Add a hook for test debug * more bug work around of Java MOXy bug of 475475 * Use already defined constants * Revert "Workaround to fix gate py27 and py34 issue." * Spec for journal recovery * Updated from global requirements * drop unnecessary exec permission * Refactor SG callbacks * beryllium SR2 definition * Simplify filtering logic * ODL v2: Full sync resources * Complete port details by journal instead of mech driver * [Trivial Fix] Correct log.debug() format * Add cleanup operation to maintenance thread * Journal entries can get stuck forever causing busy wait * Add journal maintenance thread * Updated from global requirements 2.0.0 ----- * Provide driver in ODL for L2Gateway * Workaround to fix gate py27 and py34 issue * Switch to using hacking checks from neutron-lib * Fix race between event write and thread processing * ODL v2: Fix delay in sync pending rows * move beryllium snapshot to 4.2 from 4.1 * Clean up odl releases definition * Remove useless argument * Reduce update db row code duplication * Service Function Chaining Driver for OpenDaylight * correct config help message format * Updated from global requirements * Make port binding implementation configurable * Fix journal row locking * ODL v2: Fix multiple updates race * Reduce dependency validations code duplication * Replace operation magic strings by constants * Move validations to seperate module * Moved to package networking\_odl.journal * tox.ini: show-source and ignore in hacking are unnessary * Fix N231 error about '\_' using with \_i18n lib * devstack: allow to override ML2\_L3\_PLUGIN * Preserve existing environment when adding JAVA ppa * Reduce test code duplication * devstack: add odl beryllium 0.4.1 SR1 definition * devstack: remove stale snapstho definition * devstack: refactor release definition * Remove unused method * devstack: switch default odl version to beryllium snapshot * ODL v2: Improve L3 validation * Install OpenJDK using yum\_install * No need to convert mac address to upper case anymore * Install networking-odl in develop mode * Cleanup unused oslo-incubator code * devstack: release definition for lithium 0.3.4-SR4 and 0.3.5 snapshot * Migrate to oslo.context from Oslo incubator * Select current java by setting PATH variable * Setup using the last Oracle JDK 8 * Prevent unit tests from accidentally connecting to OpenDaylight * Introduce security group callback PRECOMMIT functionality * Improve validation in the V2 mechanism driver * ODL v2: Security Group support * Revert "bug work around of bug #1545218" * devstack: add odl release definition for lithium 0.3.4 snapshot * Pass all tests with Python 3.4 and Tox * ODL v2: Assign row back to pending after validation failure * devstack: add definition of lithium SR[123] * Fix the coverage issue * devstack/settings.odl: document definitions of new release/snapshot * Fix link address typo error in beryllium-0.4.0 * Lightweight testing to test neutron/networking-odl without ODL * Q\_ML2\_PLUGIN\_MECHANISM\_DRIVERS should not always be overrode * devstack: add odl release definition for Beryllium 0.4.1 snapshot * Nit: Occurances of Openstack * bug work around of Java MOXy bug of 475475 * bug work around of bug #1545218 * devstack: add odl release definition for Beryllium release * devstack: add odl release definition for boron snapshot * Opendaylight L3 service plugin refactor to handle out of sync issues * Timestamps out of sync in the V2 driver * Move the IP address from physical interface to the OVS physical bridge * Enable vhost-user ports on supported platforms * Show text of response message when failed * Improve Testing.rst * add snapshots to the list of directories that need to be removed * Updated from global requirements * floatingip's status doesn't change on disassociation * drop unnecessary executable permission * mock shouldn't return global value * Add rally-jobs directory * Add fixed\_ips fields to update port operation * mech\_driver: don't send post request to create none resource * Instance creation fails with the new V2 driver * drop unnecessary executable permission * devstack: use odl lithium snapshot 0.3.3 instead of 0.3.1 * devstack: make odl logging friendly for gate job * Allow skipping installation of Open vSwitch * Fix the typo in message correctly * Fix the format of README.rst for devstack * Reinstate PUBLIC\_BRIDGE as a way of adding interface to PUBLIC\_BRIDGE * Correct typo in comment * ODL internal error with allowed\_address\_pairs * Update Oracle's JDK url to 1.8.0\_66 * Pass environment variable of proxy to tox * Fix the odl-router entry point * Don't use install\_package when handling failures * Use existing java env * Add ODL\_OVS\_MANAGERS to support clustering * Use ODL Provider Mappings to instruct ODL to add port to bridges * Detect and setup required java version in devstack * Use ODL stable/lithium (aka SR3) by default * Updated from global requirements * OpenDaylightTestCase replaces sendjson permenently instead of mock * Fix up issues after decomposition * Opendaylight driver refactor to handle out of sync issues * Correct the developers guide link * Decompose mechanism driver out of neutron completely * Add ODL\_BOOT\_WAIT\_URL to odl-releases/beryllium-snapshot-0.4.0 * Update import oslo\_serialization/utils for config-ref generation * remove unnecessary use\_stderr=True in \_\_init\_\_.py * Update import of oslo.config * db: prepare scripts for subproject db tables * make tempest.api.network.test\_extensions.ExtensionsTestJSON pass * tox.ini: Fix cover by giving the source directory explicitly * .coveragerc: Fix paths * Stale OF entries retained in br-int * client: consolidate odl client creation * Change ignore-errors to ignore\_errors 1.0.1 ----- * requirements: Move neutron requirement into tox.ini 1.0.0 ----- * Updated from global requirements * update the args when init SubnetContext obj * odl client: gracefully ignore 404 when deleting * mark out-of-sync when failure in sync\_from\_callback * Updated from global requirements * l3\_odl: put request for remove\_interface, not delete * Support delegation of bind\_port to networking-odl backend driver * Tweak CI configuration a bit * Make ODL\_NETVIRT\_DEBUG\_LOGS disabled by default, enabled in pre\_test\_hook.sh * Updated from global requirements * Switch to using Opendaylight L3 in the gate * Major overhaul of plugin.sh and ODL settings * Fix unit tests * Remove quantum untracked files from .gitignore * Corrected URL information * py34: Add support for python34 jobs * Updated from global requirements * Updated from global requirements * Misc fixes for networking ODL in devstack * Wipe out the journal directory, in addition to data directory * Use url to check if ODL is fully initialized * mech\_driver: full\_sync uses unrelated context for resources * Updated from global requirements * IS\_GATE should be disabled by default, set to True in devstack/pre\_test\_hook.sh * workaround: l3 plugin misses dvr\_deletens\_if\_no\_port method * lbaas: Fix incorrect url path * l3\_odl: delete\_router results in exception * requirements: Move neutron-[fwaas,lbaas] requirement into tox.ini * define ODL\_NAME unconditionally * Add Beryllium and Lithium Stable support * Adding unittest for l3 to create,remove,update a router, a floatingip and an interface * devstack: Add devstackgaterc file * Overhaul pre\_test\_hook * Updated from global requirements * Updated from global requirements * Refactoring post infra changes * Bug 1466917: Explicitly set link up PUBLIC\_INTERFACE after br-ex add * Update version for Liberty 1.0.0a0 ------- * We should whitelist bash rather than sh * Add pre\_test\_hook.sh script * Replace stackforge with openstack * Be kind, and tell us where it went wrong if you please * Add capability to save more than 10 logfiles per CI run * Updated from global requirements * mech\_driver: don't pass empty string as tenant\_id to ODL * Update .gitreview file for project rename * Send mtu and vlan\_transparent for network operations * Give the JVM more juice * Add logic to pass sg and sg-rules to ODL * Use latest Lithium daily build, move ODL\_NETVIRT\_KARAF\_FEATURE * Get upstream CI job working * devstack: unzip -u may wait for user input * devstack/settings.odl: syntax error * import error by l3\_odl * devstack: OFFLINE=True is ignored * install\_opendaylight: eliminate unused local variable, \_pwd * Make karaf file available as log artifact * Use latest Helium from daily build instead of unstable Lithium * De-clutter plugin.sh from release specific logic * Add create br-ex in odl\_compute nodes * Use ODL\_RELEASE instead of version number to determine release * Fix setting up of ODL package variables * Update version to 2015.1.2 to make pbr happy * Add logic to use the latest Lithium release * Revert "Add logic to pass sg and sg-rules to ODL" * Fix incorrect add/remove router\_interface calls * Add logic to pass sg and sg-rules to ODL * Add an example local.conf * Expose Karaf feature used by Opendaylight's net-virt * Send router\_interface add/remove calls to ODL * Switch from neutron.common.log.log to oslo\_log.helpers.log\_method\_call * Fix broken unit tests for networking-odl 2015.1.1 -------- * Add instructions for configuring LBaaS V2 with ODL * First cut at LBaaS V2 driver for ODL * Remove session\_timeout parameter * Add new LBaaS V2 API shim * Bump version to 2015.1.1 * Update to distribution-karaf-0.2.3-Helium-SR3 * Add unit tests for L3, LBaaS, and FWaaS * Update L3, LBaaS, and FWaaS code * Remove vlan\_transparent and mtu for network APIs * Correct test\_update\_port\_mac test * Sync the latest oslo incubated libraries and use oslo.log * Revert "Limit the tempest tests we run" * Update ODL port logic to work with Lithium (cont.) * Limit the tempest tests we run * Update ODL port logic to work with Lithium * Wipe out the data directory * Allow the java memory parameters to be configurable * Fix karaf logging * Fix the check/merge jobs for ODL * Update oslotest version in test-requirements.txt * Fix documentation files * Few corrections in devstack/settings in networking-odl * Use git.openstack.org URL in devstack README * Fix plugin.sh to handle OFFLINE=True mode * Add odl-router entrypoint * ODL\_MODE for plugin settings * Use HTTP BASIC AUTH exclusively (no longer use JSESSIONID) * Add pluggable devstack for networking-odl * Fix oslo imports * Use in-module ODL driver * Fix the syntax of the cache module * This adds existing L3, LBaas and FWaaS drivers * Re-enable check for @author tag * Bump hacking and allow author tags * Fix unit tests for networking-odl * Thin networking-odl driver * Establish a successful baseline for CI jobs * Fixups post split commit * Rename module * Updated from global requirements * Move classes out of l3\_agent.py * Prettify tox output for functional tests * Services split, pass 2 * Remove TODO for H404 * Updated from global requirements * Use comments rather than no-op string statements * Workflow documentation is now in infra-manual * tox.ini: Prevent casual addition of bash dependency * Updated from global requirements * Get rid of py26 references: OrderedDict, httplib, xml testing * Updated the README.rst * pretty\_tox.sh: Portablity improvement * test\_dhcp\_agent: Fix no-op tests * Enable undefined-loop-variable pylint check * Fix incorrect exception order in \_execute\_request * Migrate to oslo.i18n * Migrate to oslo.middleware * Migrate to oslo.utils * Remove Python 2.6 classifier * Remove ryu plugin * Updated from global requirements * Show progress output while running unit tests * enable H401 hacking check * enable H237 check * Updated from global requirements * Updated from global requirements * Update i18n translation for neutron.agents log msg's * enable F812 check for flake8 * enable F811 check for flake8 * Support pudb as a different post mortem debugger * switch to oslo.serialization * Add rootwrap filters for ofagent * Remove openvswitch core plugin entry point * Updated from global requirements * Use correct base class for unit tests for ML2 drivers * Updated from global requirements * enable F402 check for flake8 * enable E713 in pep8 tests * Hyper-V: Remove useless use of "else" clause on for loop * Enable no-name-in-module pylint check * Updated from global requirements * Remove duplicate import of constants module * Switch run-time import to using importutils.import\_module * Enable assignment-from-no-return pylint check * tox.ini: Avoid using bash where unnecessary * Empty files should not contain copyright or license * Remove single occurrence of lost-exception warning * Updated fileutils and its dependencies * remove E251 exemption from pep8 check * mock.assert\_called\_once() is not a valid method * Add pylint tox environment and disable all existing warnings * Updated from global requirements * Ignore top-level hidden dirs/files by default * Remove some duplicate unit tests * Drop sslutils and versionutils modules * Removed kombu from requirements * Updated from global requirements * Updated from global requirements * Remove sslutils from openstack.common * remove linuxbridge plugin * Open Kilo development * Implement ModelsMigrationsSync test from oslo.db * Fix entrypoint of OneConvergencePlugin plugin * Set dsvm-functional job to use system packages * Separate Configuration from Freescale SDN ML2 mechanism Driver * Remove @author(s) from copyright statements * Stop ignoring 400 errors returned by ODL * Updated from global requirements * Adds ipset support for Security Groups * Add requests\_mock to test-requirements.txt * Removed kombu from requirements * Supply missing cisco\_cfg\_agent.ini file * Updated from global requirements * Work toward Python 3.4 support and testing * Revert "Cisco DFA ML2 Mechanism Driver" * Big Switch: Separate L3 functions into L3 service * Remove reference to cisco\_cfg\_agent.ini from setup.cfg again * Adds router service plugin for CSR1kv * Support for extensions in ML2 * Cisco DFA ML2 Mechanism Driver * Adding mechanism driver in ML2 plugin for Nuage Networks * Fix state\_path in tests * Remove ovs dependency in embrane plugin * Use lockutils module for tox functional env * Updated from global requirements * Add unit tests covering single operations to ODL * Add specific docs build option to tox * Fix bigswitch setup.cfg lines * Remove auto-generation of db schema from models at startup * Updated from global requirements * Use jsonutils instead of stdlib json * Opencontrail plug-in implementation for core resources * Add delete operations for the ODL MechanismDriver * Add a tox test environment for random hashseed testing * Updated from global requirements * Remove reference to cisco\_cfg\_agent.ini from setup.cfg * Removed configobj from test requirements * Updated from global requirements * Functional tests work fine with random PYTHONHASHSEED * Set python hash seed to 0 in tox.ini * Configuration agent for Cisco devices * Updated from global requirements * ML2 mechanism driver for SR-IOV capable NIC based switching, Part 2 * This patch changes the name of directory from mech\_arista to arista * ML2 mechanism driver for SR-IOV capable NIC based switching, Part 1 * Allow to import \_LC, \_LE, \_LI and \_LW functions directly * Make readme reference git.openstack.org not github * Bump hacking to version 0.9.2 * Use auth\_token from keystonemiddleware * Remove reference to setuptools\_git * Add a gate-specific tox env for functional tests * Add CONTRIBUTING.rst * Updated from global requirements * Updated from global requirements * Updated from global requirements * Fix example for running individual tests * Switch to using of oslo.db * remove unsupported middleware * Add config for performance gate job * Synced log module and its dependencies from olso-incubator * don't ignore rules that are already enforced * Updated from global requirements * Updated from global requirements * ofagent: move main module from ryu repository * Remove the useless vim modelines * Removed 'rpc' and 'notifier' incubator modules * Use openstack.common.lockutils module for locks in tox functional tests * Port to oslo.messaging * Updated from global requirements * Ignore emacs checkpoint files * Added missing core\_plugins symbolic names * remove pep8 E122 exemption and correct style * remove E112 hacking exemption and fix errors * Updated from global requirements * Freescale SDN Mechanism Driver for ML2 Plugin * Remove run-time version checking for openvswitch features * Added missing plugin .ini files to setup.cfg * Updated from global requirements * Synced jsonutils from oslo-incubator * Cisco APIC ML2 mechanism driver, part 2 * NSX: get rid of the last Nicira/NVP bits * Allow vlan type usage for OpenDaylight ml2 * Add missing translation support * Add mailmap entry * Ensure core plugin deallocation after every test * Updated from global requirements * Remove explicit dependency on amqplib * Remove duplicate module-rgx line in .pylintrc * Fix H302 violations * Updated from global requirements * Improve ODL ML2 Exception Handling * Updated from global requirements * Exclude .ropeproject from flake8 checks * Enable flake8 E711 and E712 checking * Enforce required config params for ODL driver * Updated from global requirements * Sync service and systemd modules from oslo-incubator * Move bash whitelisting to pep8 testenv * ML2: ODL driver sets port status * Fix Jenkins translation jobs * ignore build directory for pep8 * Enable hacking H301 check * Updated from global requirements * Remove last parts of Quantum compatibility shim * Open Juno development * Start using oslosphinx theme for docs * Updated from global requirements * ML2: Remove validate\_port\_binding() and unbind\_port() * add HEAD sentinel file that contains migration revision * Bugfix and refactoring for ovs\_lib flow methods * Updated from global requirements * Updated from global requirements * Updated from global requirements * One Convergence Neutron Plugin l3 ext support * One Convergence Neutron Plugin Implementation * BigSwitch: Add SSL Certificate Validation * Updated from global requirements * Add OpenDaylight ML2 MechanismDriver * Implementaion of Mechanism driver for Brocade VDX cluster of switches * Implement Mellanox ML2 MechanismDriver * Implement OpenFlow Agent mechanism driver * Finish off rebranding of the Nicira NVP plugin * BigSwitch: Add agent to support neutron sec groups * Adds the new IBM SDN-VE plugin * Updated from global requirements * Developer documentation * Rename Neutron core/service plugins for VMware NSX * Updated from global requirements * Sync minimum requirements * Copy cache package from oslo-incubator * Remove dependent module py3kcompat * Add migration support from agent to NSX dhcp/metadata services * Remove psutil dependency * LBaaS: move agent based driver files into a separate dir * mailmap: update .mailmap * Return request-id in API response * Prepare for multiple cisco ML2 mech drivers * Support building wheels (PEP-427) * Use oslo.rootwrap library instead of local copy * Enables BigSwitch/Restproxy ML2 VLAN driver * Add an explicit tox job for functional tests * Base ML2 bulk support on the loaded drivers * Enable hacking H233 rule * Update RPC code from oslo * Configure plugins by name * Update lockutils and fixture in openstack.common * Rename nicira configuration elements to match new naming structure * Remove unused imports * Rename check\_nvp\_config utility tool * Corrects broken format strings in check\_i18n.py * Updates tox.ini to use new features * Updated from global requirements * Sync global requirements to pin sphinx to sphinx>=1.1.2,<1.2 * Add fwaas\_driver.ini to setup.cfg * Add vpnaas and debug filters to setup.cfg * Updates .gitignore * Update Zhenguo Niu's mailmap * Replace stubout with fixtures * Ensure get\_pid\_to\_kill works with rootwrap script * Updated from global requirements * Cleanup HACKING.rst * Fix import log\_handler error with publish\_errors set * Updated from global requirements * Updated from global requirements * Cleanup and make HACKING.rst DRYer * Add support for managing async processes * Remove obsolete redhat-eventlet.patch * Open Icehouse development * Updated from global requirements * Require oslo.config 1.2.0 final * Use built-in print() instead of print statement * Add l2 population base classes * Fix message i18n error * Install metering\_agent.ini and vpn\_agent.ini * fix conversion type missing * Enclose command args in with\_venv.sh * ML2 Mechanism Driver for Cisco Nexus * Reference driver implementation (IPsec) for VPNaaS * Implement ML2 port binding * Arista ML2 Mechanism driver * ML2 Mechanism Driver for Tail-f Network Control System (NCS) * Default to not capturing log output in tests * Add Neutron l3 metering agent * Update mailmap * Fix wrong example in HACKING.rst * Bumps hacking to 0.7.0 * remove binaries under bin * Fixes Windows setup dependency bug * Restore Babel to requirements.txt * Remove DHCP lease logic * Remove last vestiges of nose * Updated from global requirements * Ignore pbr\*.egg directory * Fix H102, H103 Apache 2.0 license hacking check error * Remove openstack.common.exception usage * Adds Babel dependency missing from 555d27c * Fix the alphabetical order in requirement files * Remove comments from requirements.txt (workaround pbr bug) * remove netifaces dependency of ryu-agent * Add gre tunneling support for the ML2 plugin * Add VXLAN tunneling support for the ML2 plugin * xenapi - rename quantum to neutron * Fix issue with pip installing oslo.config-1.2.0 * Initial Modular L2 Mechanism Driver implementation * Add cover/ to .gitignore * fix some missing change from quantum to neutron * git remove old non-working packaging files * Rename Quantum to Neutron * Rename quantum to neutron in .gitreview * Sync install\_venv\_common from oslo * Update to use OSLO db * Require greenlet 0.3.2 (or later) * Remove single-version-externally-managed in setup.cfg * Fix single-version-externally-mananged typo in setup.cfg * Allow use of lowercase section names in conf files * Require pbr 0.5.16 or newer * Update to the latest stevedore * Rename agent\_loadbalancer directory to loadbalancer * Remove unit tests that are no longer run * Update with latest OSLO code * Remove explicit distribute depend * Fix and enable H90x tests * Remove generic Exception when using assertRaises * Add \*.swo/swp to .gitignore * python3: Introduce py33 to tox.ini * Rename README to README.rst * Rename requires files to standard names * Initial Modular L2 plugin implementation * Revert dependency on oslo.config 1.2.0 * Perform a sync with oslo-incubator * Require oslo.config 1.2.0a2 * update mailmap * Revert "Fix ./run\_tests.sh --pep8" * Move to pbr * Docstrings formatted according to pep257 * relax amqplib and kombu version requirements * Fix ./run\_tests.sh --pep8 * blueprint mellanox-quantum-plugin * Update flake8 pinned versions * Let the cover venv run individual tests * Copy the RHEL6 eventlet workaround from Oslo * Remove locals() from strings substitutions * Enable automatic validation of many HACKING rules * Shorten the path of the nicira nvp plugin * Allow pdb debugging in manually-invoked tests * Reformat openstack-common.conf * Switch to flake8 from pep8 * Parallelize quantum unit testing: * blueprint cisco-single-config * Add lbaas\_agent files to setup.py * Add VIRTUAL\_ENV key to enviroment passed to patch\_tox\_env * Pin SQLAlchemy to 0.7.x * Sync latest Oslo components for updated copyright * drop rfc.sh * Replace "OpenStack LLC" with "OpenStack Foundation" * First havana commit * remove references to netstack in setup.py * Switch to final 1.1.0 oslo.config release * Update to Quantum Client 2.2.0 * Update tox.ini to support RHEL 6.x * Switch to oslo.config * Add common test base class to hold common things * Pin pep8 to 1.3.3 * Add initial testr support * LBaaS Agent Reference Implementation * Bump python-quantumclient version to 2.1.2 * Add scheduling feature basing on agent management extension * Remove compat cfg wrapper * Unpin PasteDeploy dependency version * Use testtools instead of unittest or unittest2 * Add midonet to setup.py * Sync latest install\_venv\_common.py with olso * Add check-nvp-config utility * Add unit test for ryu-agent * Use oslo-config-2013.1b3 * Adds Brocade Plugin implementation * Synchronize code from oslo * PLUMgrid quantum plugin * Update .coveragerc * Allow tools/install\_venv\_common.py to be run from within the source directory * Updated to latest oslo-version code * Use install\_venv\_common.py from oslo * Cisco plugin cleanup * Use babel to generate translation file * Update WebOb version to >=1.2 * Update latest OSLO * Adding multi switch support to the Cisco Nexus plugin * Adds support for deploying Quantum on Windows * Latest OSLO updates * Port to argparse based cfg * Add migration support to Quantum * Undo change to require WebOb 1.2.3, instead, require only >=1.0.8 * .gitignore cleanup * Upgrade WebOb to 1.2.3 * Logging module cleanup * Add OVS cleanup utility * Add tox artifacts to .gitignore * Add restproxy.ini to config\_path in setup.py * Add script for checking i18n message * l3 agent rpc * Add metadata\_agent.ini to config\_path in setup.py * Remove \_\_init\_\_.py from bin/ and tools/ * add metadata proxy support for Quantum Networks * Use auth\_token middleware in keystoneclient * Add QUANTUM\_ prefix for env used by quantum-debug * Make tox.ini run pep8 checks on bin * Explicitly include versioninfo in tarball * Import lockutils and fileutils from openstack-common * Updated openstack-common setup and version code * Ensure that the anyjson version is correct * Add eventlet\_backdoor and threadgroup from openstack-common * Add loopingcall from openstack-common * Added service from openstack-common * Drop lxml dependency * Add uuidutils module * Import order clean-up * pin sqlalchemy to 0.7 * Correct Intended Audience * Add OpenStack trove classifier for PyPI * Improve unit test times * l3\_nat\_agent was renamed to l3\_agent and this was missed * Support for several HA RabbitMQ servers * add missing files from setup.py * Create .mailmap file * Lower webob dep from v1.2.0 to v1.0.8 * Implements agent for Quantum Networking testing * Create utility to clean-up netns * Update rootwrap; track changes in nova/cinder * Execute unit tests for Cisco plugin with Quantum tests * Add lease expiration script support for dnsmasq * Add nosehtmloutput as a test dependency * quantum l3 + floating IP support * Updates pip requirements * NEC OpenFlow plugin support * remove old gflags config code * RPC support for OVS Plugin and Agent * Initial implemention of MetaPlugin * RPC support for Linux Bridge Plugin and Agent * Exempt openstack-common from pep8 check * fix bug lp:1025526,update iniparser.py to accept empty value * Introduce files from openstack common * fix bug lp:1019230,update rpc from openstack-common * implement dhcp agent for quantum * Use setuptools git plugin for file inclusion * Remove paste configuration details to a seperate file. blueprint use-common-cfg * Implements the blueprint use-common-cfg for the quantum service. More specifically uses global CONF for the quantum.conf file * Add authZ through incorporation of policy checks * Bug #1013967 - Quantum is breaking on tests with pep 1.3 * Use openstack.common.exception * API v2: mprove validation of post/put, rename few attributes * Add API v2 support * Fix up test running to match jenkins expectation * Add build\_sphinx options * Quantum should use openstack.common.jsonutils * Remove hardcoded version for pep8 from tools/test-requires * Quantum should use openstack.common.importutils * PEP8 fixes * Bug #1002605 * Parse linuxbridge plugins using openstack.common.cfg * Add HACKING.rst to tarball generation bug 1001220 * Include AUTHORS in release package * Change Resource.\_\_call\_\_() to not leak internal errors * Removed simplejson from pip-requires * Remove dependency on python-quantumclient * Add sphinx to the test build deps * Add HACKING.rst coding style doc * bug 963152: add a few missing files to sdist tarball * Fix path to python-quantumclient * Split out pip requires and aligned tox file * Fix missing files in sdist package [bug 954906] * Downgraded required version of WebOb to 1.0.8 * more files missing in sdist tarball * make sure pip-requires is included in setup.py sdist * remove pep8 and strict lxml version from setup.py * plugin: introduce ryu plugin * bug 934459: pip no longer supports -E * blueprint quantum-ovs-tunnel-agent * Initial commit: nvp plugin * Cleanup the source distribution * blueprint quantum-linux-bridge-plugin * Remove quantum CLI console script * Bug 925372: remove deprecated webob attributes (and also specify stable webob version in pip-requires) * Make tox config work * Pin versions to standard versions * Split out quantum.client and quantum.common * Quantum was missing depend on lxml * moving batch config out of quantum-server repo * Getting ready for the client split * Removed erroneous print from setup.py * Base version.py on glance * Fix lp bug 897882 * Install a good version of pip in the venv * Rename .quantum-venv to .venv * Remove plugin pip-requires * Bug #890028 * Fix for bug 900316 * Second round of packaging changes * Changes to make pip-based tests work with jenkins * Fix for bug 888811 * Fix for Bug #888820 - pip-requires file support for plugins * blueprint quantum-packaging * Add .gitreview config file for gerrit * Add code-coverage support to run\_tests.sh (lp860160) 2011.3 ------ * Add rfc.sh to help with gerrit workflow * merge tyler's unit tests for cisco plugin changes lp845140 * merge salv's no-cheetah CLI branch lp 842190 * merge sumit's branch for lp837752 * Merging latest from lp:quantum * Merging lo:~salvatore-orlando/quantum/quantum-api-auth * Updating CLI for not using Cheetah anymore. Now using a mechanism based on Python built-in templates * Merging Sumit's changes including fixes for multinic support, and CLI module for working with extensions * Merging from Cisco branch * Merging from lp:quantum * merge cisco consolidated plugin changes * Merging lp:~salvatore-orlando/quantum/bug834449 * merge trunk * Merging from lp:quantum * merge salvatore's new cli code * Addressing comments from Dan * Merging from quantum * merge cisco extensions branch * Merging from Sumit's branch, changes to VIF-driver and Scheduler; extension action names have been changed in response to Salvatore's review comments in the extensions branch review * Syncing with Cisco extensions branch * Merging from Sumit's branch, import ordering related changes * Merging the Cisco branch * Finishing cli work Fixing bug with XML deserialization * Merging lp:~salvatore-orlando/quantum/quantum-api-alignment * merge latest quantum branch and resolve conflicts * Merging lp:~asomya/quantum/lp833163 Fix for Bug #833163: Pep8 violations in recent packaging changes that were merged into trunk (Critical) * PEP8 fixes for setup.py * Merging lp:~cisco-openstack/quantum/802dot1qbh-vifdriver-scheduler * Merging lp:~cisco-openstack/quantum/l2network-plugin-persistence * Merging lp:quantum * merging with lp:quantum * Making Keystone version configurable * Merging branch: lp:~danwent/quantum/test-refactor * Syncing with lp:quantum * Merging fixes and changes batch-config script. Thanks lp:danwent ! * Merging lp:~asomya/quantum/lp824145 Fix for Bug#824145 : Adding a setup script for quantum * merge trunk pep8 fixes adapting CLI to API v1.0 Fixing wsgi to avoid failure with extensions * merge trunk * Pulling in changes from lp:quantum * Merging Cisco's contribution to Quantum. Thanks to various folks at Cisco Systems, Quantum will have plugins to integrate with Cisco UCS blade servers using 802.1Qbh, Cisco Nexus family of switches and the ability for Quantum plugin to have multiple switches/devices within a single Quantum plugin * Merging from Sumit's branch pylint fixes and incorporating review comments * Mergin from cisco brach * Merging from lp:quantum * Introducting cheetah Updating list\_nets in CLI Writing unit tests for list\_nets Stubbing out with FakeConnection now * Merging quantum extenions framework into trunk. Thanks rajaram vinkesh, deepak & santhosh for the great work! * lp Bug#824145 : Adding a setup script for quantum * skeleton for cli unit tests * merge trunk * Merged quantum trunk * - Adding setup script * force batch\_config.py to use json, as XML has issues (see bug: 798262) * update batch\_config.py to use new client lib, hooray for deleting code * Merging changes addressing Bug # 802772. Thanks lp:danwent ! * Merging bugfix for Bug 822890 - Added License file for Quantum code distribution * L2 Network Plugin Framework merge * Adding Apache Version 2.0 license file. This is the official license agreement under which Quantum code is available to the Open Source community * merge * merge heckj's pip-requires fixes * updates to pip-requires for CI * Merged quantum trunk * Merging changes from lp:quantum * Completing API spec alignment Unit tests aligned with changes in the API spec * Merging the brand new Quantum-client-library feature * Merging lp:quantum updates * persistence of l2network & ucs plugins using mysql - db\_conn.ini - configuration details of making a connection to the database - db\_test\_plugin.py - contains abstraction methods for storing database values in a dict and unit test cases for DB testing - l2network\_db.py - db methods for l2network models - l2network\_models.py - class definitions for the l2 network tables - ucs\_db.py - db methods for ucs models - ucs\_models.py - class definition for the ucs tables dynamic loading of the 2nd layer plugin db's based on passed arguments Create, Delete, Get, Getall, Update database methods at - Quantum, L2Network and Ucs Unit test cases for create, delete, getall and update operations for L2Network and Ucs plugins pep8 checks done branch based off revision 34 plugin-framework * Merged from trunk * merged the latest changes from plugin-framework branch - revision 39 conforming to the new cisco plugin directory structure and moving all db related modules into cisco/db folder updated db\_test\_plugin.py - added import of cisco constants module - added LOG.getLogger for logging component name - updated import module paths for l2network\_models/db and ucs\_models/db to use the new directory structure - updated (rearranged) imports section to obey openstack alphabetical placement convention updated db\_conn.ini - updated database name from cisco\_naas to quantum\_l2network unit test cases ran successfully and pep8 checks done again * merge branch for to fix bug817826 * Merging the latest changes from lp:quantum * fix bug 817826 and similar error in batch\_config.py * merge Salvatore's api branch with fixes for tests. Tweaking branch to remove unwanted bin/quantum.py as part of merge * Santhosh/Rajaram|latest merge from quantum and made extensions use options to load plugin * Apply fix for bug #797419 merging lp:~salvatore-orlando/quantum/bug797419 * Merging branch lp:~netstack/quantum/quantum-unit-tests * Merged from quantum trunk * Adapated plugin infrastructure to allow API to pass options to plugins Now using in-memory sqlite db for tests on FakePlugin teardown() now 'resets' the in-memory db Adding unit tests for APIs * Adding Routes>=1.12.3 to tools/pip-requires * Merging dan wendlandt's bugfixes for Bug #800466 and improvements that enable Quantum to seamlessly run on KVM! * more pep8 goodness * refactor batch\_config, allow multiple attaches with the empty string * merge and pep8 cleanup * Merging latest changes from parent repo - lp:network-service , Parent repo had approved merge proposal for merging lp:~santhom/network-service/quantum\_testing\_framework , which has now been merged into lp:network-service * Merging pep8 and functional test related changes lp:~santhom/network-service/quantum\_testing\_framework branch * add example to usage string for batch\_config.py * Bug fixes and clean-up, including supporting libvirt * Santhosh/Vinkesh | Added the testing framework. Moved the smoketest to tests/functional * Pushing initial started code based on Glance project and infrstructure work done by the melange team * Merging in latest changes from lp:quantum ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/HACKING.rst0000644000175000017500000000256300000000000021500 0ustar00jamespagejamespage00000000000000Neutron Style Commandments ======================= - Step 1: Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/ - Step 2: Read on Neutron Specific Commandments -------------------------- - [N319] Validate that debug level logs are not translated - [N320] Validate that LOG messages, except debug ones, have translations - [N321] Validate that jsonutils module is used instead of json - [N322] We do not use @authors tags in source files. We have git to track authorship. - [N323] Detect common errors with assert_called_once_with Creating Unit Tests ------------------- For every new feature, unit tests should be created that both test and (implicitly) document the usage of said feature. If submitting a patch for a bug that had no unit test, a new passing unit test should be added. If a submitted bug fix does have a unit test, be sure to add a new one that fails without the patch and passes with the patch. All unittest classes must ultimately inherit from testtools.TestCase. In the Neutron test suite, this should be done by inheriting from neutron.tests.base.BaseTestCase. All setUp and tearDown methods must upcall using the super() method. tearDown methods should be avoided and addCleanup calls should be preferred. Never manually create tempfiles. Always use the tempfile fixtures from the fixture library to ensure that they are cleaned up. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/LICENSE0000644000175000017500000002363700000000000020714 0ustar00jamespagejamespage00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.834714 networking-odl-16.0.0.0b2.dev1/PKG-INFO0000644000175000017500000000430400000000000020772 0ustar00jamespagejamespage00000000000000Metadata-Version: 2.1 Name: networking-odl Version: 16.0.0.0b2.dev1 Summary: OpenStack Networking Home-page: https://docs.openstack.org/networking-odl/latest/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org License: UNKNOWN Description: ========================== Welcome to networking-odl! ========================== .. Team and repository tags .. image:: http://governance.openstack.org/badges/networking-odl.svg :target: http://governance.openstack.org/reference/tags/index.html .. Change things from this point on Summary ------- OpenStack networking-odl is a library of drivers and plugins that integrates OpenStack Neutron API with OpenDaylight Backend. For example it has ML2 driver and L3 plugin to enable communication of OpenStack Neutron L2 and L3 resources API to OpenDayLight Backend. To report and discover bugs in networking-odl the following link can be used: https://bugs.launchpad.net/networking-odl Any new code submission or proposal must follow the development guidelines detailed in HACKING.rst and for further details this link can be checked: https://docs.openstack.org/networking-odl/latest/ The OpenDaylight homepage: https://www.opendaylight.org/ Release notes for the project can be found at: https://docs.openstack.org/releasenotes/networking-odl/ The project source code repository is located at: https://opendev.org/openstack/networking-odl Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Requires-Python: >=3.6 Provides-Extra: ceilometer Provides-Extra: test ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/README.rst0000644000175000017500000000217500000000000021370 0ustar00jamespagejamespage00000000000000========================== Welcome to networking-odl! ========================== .. Team and repository tags .. image:: http://governance.openstack.org/badges/networking-odl.svg :target: http://governance.openstack.org/reference/tags/index.html .. Change things from this point on Summary ------- OpenStack networking-odl is a library of drivers and plugins that integrates OpenStack Neutron API with OpenDaylight Backend. For example it has ML2 driver and L3 plugin to enable communication of OpenStack Neutron L2 and L3 resources API to OpenDayLight Backend. To report and discover bugs in networking-odl the following link can be used: https://bugs.launchpad.net/networking-odl Any new code submission or proposal must follow the development guidelines detailed in HACKING.rst and for further details this link can be checked: https://docs.openstack.org/networking-odl/latest/ The OpenDaylight homepage: https://www.opendaylight.org/ Release notes for the project can be found at: https://docs.openstack.org/releasenotes/networking-odl/ The project source code repository is located at: https://opendev.org/openstack/networking-odl ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130284.0 networking-odl-16.0.0.0b2.dev1/RELEASENOTES.rst0000644000175000017500000000073700000000000022326 0ustar00jamespagejamespage00000000000000============== networking-odl ============== .. _networking-odl_16.0.0.0b1: 16.0.0.0b1 ========== .. _networking-odl_16.0.0.0b1_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/drop-py27-support-3bc8094e1823cfcf.yaml @ b'358da8623ad8cb8df40f2a93cea64c3ce66305db' - Python 2.7 support has been dropped. Last release of networking-odl to support python 2.7 is OpenStack Train. The minimum version of Python now supported by networking-odl is Python 3.6. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/TESTING.rst0000644000175000017500000001503400000000000021546 0ustar00jamespagejamespage00000000000000Testing Networking-odl + neutron ================================ Overview -------- The unit tests (networking_odl/tests/unit/) are meant to cover as much code as possible and should be executed without the service running. They are designed to test the various pieces of the neutron tree to make sure any new changes don't break existing functionality. # TODO (Manjeet): Update functional testing doc. Development process ------------------- It is expected that any new changes that are proposed for merge come with tests for that feature or code area. Ideally any bugs fixes that are submitted also have tests to prove that they stay fixed! In addition, before proposing for merge, all of the current tests should be passing. Virtual environments ~~~~~~~~~~~~~~~~~~~~ Testing OpenStack projects, including Neutron, is made easier with `DevStack `_. Create a machine (such as a VM or Vagrant box) running a distribution supported by DevStack and install DevStack there. For example, there is a Vagrant script for DevStack at https://github.com/bcwaldon/vagrant_devstack. .. note:: If you prefer not to use DevStack, you can still check out source code on your local machine and develop from there. Running unit tests ------------------ There are two mechanisms for running tests: tox, and nose. Before submitting a patch for review you should always ensure all test pass; a tox run is triggered by the jenkins gate executed on gerrit for each patch pushed for review. With these mechanisms you can either run the tests in the standard environment or create a virtual environment to run them in. By default after running all of the tests, any pep8 errors found in the tree will be reported. With `nose` ~~~~~~~~~~~ You can use `nose`_ to run individual tests, as well as use for debugging portions of your code:: . .venv/bin/activate pip install nose nosetests There are disadvantages to running Nose - the tests are run sequentially, so race condition bugs will not be triggered, and the full test suite will take significantly longer than tox & testr. The upside is that testr has some rough edges when it comes to diagnosing errors and failures, and there is no easy way to set a breakpoint in the Neutron code, and enter an interactive debugging session while using testr. .. _nose: https://nose.readthedocs.org/en/latest/index.html With `tox` ~~~~~~~~~~ Networking-odl, like other OpenStack projects, uses `tox`_ for managing the virtual environments for running test cases. It uses `Testr`_ for managing the running of the test cases. Tox handles the creation of a series of `virtualenvs`_ that target specific versions of Python (2.6, 2.7, 3.3, etc). Testr handles the parallel execution of series of test cases as well as the tracking of long-running tests and other things. Running unit tests is as easy as executing this in the root directory of the Neutron source code:: tox Running tests for syntax and style check for written code:: tox -e pep8 For more information on the standard Tox-based test infrastructure used by OpenStack and how to do some common test/debugging procedures with Testr, see this wiki page: https://wiki.openstack.org/wiki/Testr .. _Testr: https://wiki.openstack.org/wiki/Testr .. _tox: http://tox.readthedocs.org/en/latest/ .. _virtualenvs: https://pypi.org/project/virtualenv/ Tests written can also be debugged by adding pdb break points. Normally if you add a break point and just run the tests with normal flags they will end up in failing. There is debug flag you can use to run after adding pdb break points in the tests. Set break points in your test code and run:: tox -e debug networking_odl.tests.unit.db.test_db.DbTestCase.test_validate_updates_same_object_uuid The package oslotest was used to enable debugging in the tests. For more information see the link: https://docs.openstack.org/oslotest/latest/user/features.html Running individual tests ~~~~~~~~~~~~~~~~~~~~~~~~ For running individual test modules or cases, you just need to pass the dot-separated path to the module you want as an argument to it. For executing a specific test case, specify the name of the test case class separating it from the module path with a colon. For example, the following would run only the TestUtils tests from networking_odl/tests/unit/common/test_utils.py :: $ tox -e py37 networking_odl.tests.unit.common.test_utils.TestUtils Adding more tests ~~~~~~~~~~~~~~~~~ There might not be full coverage yet. New patches for adding tests which are not there are always welcome. To get a grasp of the areas where tests are needed, you can check current coverage by running:: $ tox -e cover Debugging --------- It's possible to debug tests in a tox environment:: $ tox -e venv -- python -m testtools.run [test module path] Tox-created virtual environments (venv's) can also be activated after a tox run and reused for debugging:: $ tox -e venv $ . .tox/venv/bin/activate $ python -m testtools.run [test module path] Tox packages and installs the neutron source tree in a given venv on every invocation, but if modifications need to be made between invocation (e.g. adding more pdb statements), it is recommended that the source tree be installed in the venv in editable mode:: # run this only after activating the venv $ pip install --editable . Editable mode ensures that changes made to the source tree are automatically reflected in the venv, and that such changes are not overwritten during the next tox run. Running functional tests ------------------------ Neutron defines different classes of test cases. One of them is functional test. It requires pre-configured environment. But it's lighter than running devstack or openstack deployment. For definitions of functional tests, please refer to: https://docs.openstack.org/neutron/latest/contributor/index.html The script is provided to setup the environment. At first make sure the latest version of pip command:: # ensure you have the latest version of pip command # for example on ubuntu $ sudo apt-get install python-pip $ sudo pip --upgrade pip And then run functional test as follows:: # assuming devstack is setup with networking-odl $ cd networking-odl $ ./tools/configure_for_func_testing.sh /path/to/devstack $ tox -e dsvm-functional For setting up devstack, please refer to neutron documentation: * https://wiki.openstack.org/wiki/NeutronDevstack * https://docs.openstack.org/neutron/latest/contributor/index.html * https://docs.openstack.org/neutron/latest/contributor/testing/testing.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/babel.cfg0000644000175000017500000000002100000000000021413 0ustar00jamespagejamespage00000000000000[python: **.py] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/bindep.txt0000644000175000017500000000051000000000000021672 0ustar00jamespagejamespage00000000000000# This overrides the default fallback that can be located at: # https://opendev.org/openstack/project-config/src/branch/master/nodepool/elements/bindep-fallback.txt isc-dhcp-client [platform:ubuntu] netcat-openbsd [platform:ubuntu] iputils-arping [platform:ubuntu test] dhclient [platform:fedora] arping [platform:fedora test] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585130284.7867138 networking-odl-16.0.0.0b2.dev1/devstack/0000755000175000017500000000000000000000000021500 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/devstack/README.rst0000644000175000017500000001720200000000000023171 0ustar00jamespagejamespage00000000000000====================== Enabling in Devstack ====================== 1. Download DevStack 2. Copy the sample local.conf over:: cp devstack/local.conf.example local.conf 3. Optionally, to manually configure this: Add this repo as an external repository:: > cat local.conf [[local|localrc]] enable_plugin networking-odl http://opendev.org/openstack/networking-odl 4. Optionally, to enable support for OpenDaylight L3 router functionality, add the below:: > cat local.conf [[local|localrc]] ODL_L3=True .. note:: This is only relevant when using old netvirt (ovsdb based, default). 5. If you need to route the traffic out of the box (e.g. br-ex), set ODL_PROVIDER_MAPPINGS to map the physical provider network to device mapping, as shown below:: > cat local.conf [[local|localrc]] ODL_L3=True ODL_PROVIDER_MAPPINGS=${ODL_PROVIDER_MAPPINGS:-br-ex:eth2} # for old netvirt (ovsdb based) ODL_PROVIDER_MAPPINGS=${ODL_PROVIDER_MAPPINGS:-physnet1:eth2} # for new netvirt (vpnservice based) 7. run ``stack.sh`` 8. Note: In a multi-node devstack environment, for each compute node you will want to add this to the local.conf file:: > cat local.conf [[local|localrc]] enable_plugin networking-odl http://opendev.org/openstack/networking-odl ODL_MODE=compute 9. Note: In a node using a release of Open vSwitch provided from another source than your Linux distribution you have to enable in your local.conf skipping of OVS installation step by setting *SKIP_OVS_INSTALL=True*. For example when stacking together with `networking-ovs-dpdk `_ Neutron plug-in to avoid conflicts between openvswitch and ovs-dpdk you have to add this to the local.conf file:: > cat local.conf [[local|localrc]] enable_plugin networking-ovs-dpdk http://opendev.org/openstack/networking-ovs-dpdk enable_plugin networking-odl http://opendev.org/openstack/networking-odl SKIP_OVS_INSTALL=True 10. Note: Optionally, to use the new netvirt implementation (netvirt-vpnservice-openstack), add the following to the local.conf file (only allinone topology is currently supported by devstack, since tunnel endpoints are not automatically configured). For tunnel configurations after loading devstack, please refer to this guide https://wiki.opendaylight.org/view/Netvirt:_L2Gateway_HowTo#Configuring_Tunnels:: > cat local.conf [[local|localrc]] ODL_NETVIRT_KARAF_FEATURE=odl-restconf-all,odl-aaa-authn,odl-dlux-core,odl-mdsal-apidocs,odl-netvirt-vpnservice-openstack ODL_BOOT_WAIT_URL=restconf/operational/network-topology:network-topology/ # Workaround since netvirt:1 no longer exists in DS! 11. Note: To enable Quality Of Service (QoS) with OpenDaylight Backend, add the following lines in neutron.conf:: > in /etc/neutron/neutron.conf service_plugins = qos, odl-router enable qos extension driver in ml2 conf:: > in /etc/neutron/plugins/ml2/ml2_conf.ini extensions_drivers = qos, port_security restart neutron service neutron-api 12. Note: legacy netvirt specific options - OVS conntrack support :variable: ODL_LEGACY_NETVIRT_CONNTRACK By default it's False for compatibility and version requirements. - version requirement :ODL version: Boron release or later. (ODL legacy netvirt support is from Beryllium. But networking-odl devstack supports Boron+) :OVS version: 2.5 or later enable OVS conntrack support:: > cat local.conf [[local|localrc]] ODL_LEGACY_NETVIRT_CONNTRACK=True 13. Note: To enable Vlan Aware VMs (Trunk) with OpenDaylight Backend, make the following entries in local.conf:: > cat local.conf [[local|localrc]] Q_SERVICE_PLUGIN_CLASSES=trunk 14. Enabling L2Gateway Backend for OpenDaylight - The package networking-l2gw must be installed as a pre-requisite. So include in your localrc (or local.conf) the following:: enable_plugin networking-l2gw http://opendev.org/openstack/networking-l2gw enable_service l2gw-plugin NETWORKING_L2GW_SERVICE_DRIVER=L2GW:OpenDaylight:networking_odl.l2gateway.driver_v2.OpenDaylightL2gwDriver:default - Now stack up Devstack and after stacking completes, we are all set to use l2gateway-as-a-service with OpenDaylight. 15. Note: To enable Service Function Chaining support driven by networking-sfc, the following steps have to be taken: - local.conf should contain the following lines:: # enable our plugin: enable_plugin networking-odl https://github.com/openstack/networking-odl.git # enable the networking-sfc plugin: enable_plugin networking-sfc https://github.com/openstack/networking-sfc.git # enable the odl-netvirt-sfc Karaf feature in OpenDaylight ODL_NETVIRT_KARAF_FEATURE+=,odl-netvirt-sfc # enable the networking-sfc OpenDaylight driver pair [[post-config|$NEUTRON_CONF]] [sfc] drivers = odl_v2 [flowclassifier] drivers = odl_v2 - A special commit of Open vSwitch should be compiled and installed (containing compatible NSH OpenFlow support). This isn't done automatically by networking-odl or DevStack, so the user has to manually install. Please follow the instructions in: https://wiki.opendaylight.org/view/Service_Function_Chaining:Main#Building_Open_vSwitch_with_VxLAN-GPE_and_NSH_support - Fluorine is the recommended and latest version of OpenDaylight to use, you can specify it by adding the following to local.conf:: ODL_RELEASE=fluorine-snapshot-0.9.0 - To clarify, OpenDaylight doesn't have to be running/installed before stacking with networking-odl (and it shouldn't). The networking-odl DevStack plugin will download and start OpenDaylight automatically. However, it will not fetch the correct Open vSwitch version, so the instructions above and the usage of ``SKIP_OVS_INSTALL`` are important. 16. To enable BGPVPN driver to use with OpenDaylight controller Include the following lines in your localrc (or local.conf):: enable_plugin networking-bgpvpn https://opendev.org/openstack/networking-bgpvpn.git [[post-config|$NETWORKING_BGPVPN_CONF]] [service_providers] service_provider=BGPVPN:OpenDaylight:networking_odl.bgpvpn.odl_v2.OpenDaylightBgpvpnDriver:default and then stack up your devstack. 17. To enable DHCP Service in OpenDaylight deployments with Openstack, please use:: [[local|localrc]] ODL_DHCP_SERVICE=True 18. To enable ODL with OVS hardware Offload support please use:: [[local|localrc]] ODL_OVS_HOSTCONFIGS_OPTIONS="--noovs_dpdk --debug --ovs_sriov_offload" Note: OVS offload support minimal version requirements - Linux kernel from version 4.12 OVS from version 2.8.0 ODL from version Nitrogen (please note that Nitrogen is no longer maintained) 19. For development environment, if opendaylight installation is not required for stack.sh then a parameter ODL_INSTALL should be set to False. By default it is set to True therefore it is backward compatible with gate and already existing scripts:: [[local|localrc]] ODL_INSTALL=False 20. To Enable L3 Flavors with ODL, service providers should be added to neutron.conf:: [service_providers] service_provider = L3_ROUTER_NAT:ODL:networking_odl.l3.l3_flavor.ODLL3ServiceProvider:default Note: Service Plugin 'router' should be used in neutron.conf for enabling L3 flavors completely ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/devstack/devstackgaterc0000644000175000017500000000631700000000000024424 0ustar00jamespagejamespage00000000000000#!/bin/bash # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # This script is executed in the OpenStack CI job that runs DevStack + tempest. # You can find the CI job configuration here: # # https://opendev.org/openstack/project-config/src/branch/master/grafana/networking-odl.yaml # # TODO(mkolesni): Remove in case we need more timeout. # Currently timeout is too big causing the gate job to timeout entirely and # not collect almost any logs, making it hard to debug. export DEVSTACK_LOCAL_CONFIG+=$'\n'"BUILD_TIMEOUT=60" CINDER_SERVICES=c-api,c-bak,c-sch,c-vol,cinder COMMON_SERVICES=dstat,g-api,g-reg,key,mysql,n-api,n-cond,n-cpu,n-crt,n-obj,n-sch,rabbit,placement-api,n-api-meta NEUTRON_NEW=neutron-dhcp,neutron-metadata-agent,neutron-api,neutron # This variable is not being used, it will be removed once migration is complete NEUTRON_LEGACY=q-dhcp,q-meta,q-svc,quantum if [[ "$IS_GRENADE_JOB" == "True" ]]; then # TODO(rajivk): workaround, Remove this once, grenade has moved to lib/neutron # from legacy. Currently, it checks service with q-*, so it does not detect new # neutron-*. Therefore neutron does not get registered for upgrade, if we # override and register neutron from networking-odl because it will look for # required files available with neutron only. For now, lib/neutron-legacy to # lib/neutron is not done for grenade jobs. ALL_ENABLED_SERVICES=${COMMON_SERVICES},${NEUTRON_LEGACY} else ALL_ENABLED_SERVICES=${CINDER_SERVICES},${COMMON_SERVICES},${NEUTRON_NEW} fi export OVERRIDE_ENABLED_SERVICES=${ALL_ENABLED_SERVICES} if [ -z "${RALLY_SCENARIO}" ] ; then # Only include tempest if this is not a rally job, As running tempest in Rally is likely to cause failure export OVERRIDE_ENABLED_SERVICES=${OVERRIDE_ENABLED_SERVICES},tempest fi # NOTE(manjeets) To prevent create of public network twice if [[ "$DEVSTACK_GATE_TOPOLOGY" == "multinode" ]] ; then # NOTE(manjeets) Temporarily disabling LM test due to bug 1643678 # https://bugs.launchpad.net/networking-odl/+bug/1643678 export DEVSTACK_LOCAL_CONFIG+=$'\n'"LIVE_MIGRATION_AVAILABLE=False" export DEVSTACK_LOCAL_CONFIG+=$'\n'"USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION=False" # DEVSTACK_GATE_NEUTRON_DVR in devstack-gate set Q_DVR_MODE as dvr_snat export DEVSTACK_LOCAL_CONFIG+=$'\n'"Q_DVR_MODE=legacy" export DEVSTACK_SUBNODE_CONFIG+=$'\n'"Q_DVR_MODE=legacy" export DEVSTACK_SUBNODE_CONFIG+=$'\n'"disable_all_services" export DEVSTACK_SUBNODE_CONFIG+=$'\n'"ENABLED_SERVICES=n-cpu,dstat,c-vol,c-bak,mysql,placement-client" export DEVSTACK_SUBNODE_CONFIG+=$'\n'"RABBIT_HOST=\$SERVICE_HOST" export DEVSTACK_SUBNODE_CONFIG+=$'\n'"ODL_MODE=compute" export DEVSTACK_SUBNODE_CONFIG+=$'\n'"LIBVIRT_TYPE=qemu" fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/devstack/entry_points0000644000175000017500000004171100000000000024164 0ustar00jamespagejamespage00000000000000#!/bin/bash # cleanup_opendaylight() - Remove residual data files, anything left over # from previous runs that a clean run would need to clean up function cleanup_opendaylight { # Wipe out the data, journal and snapshots directories ... grumble grumble grumble rm -rf $ODL_DIR/$ODL_NAME/{data,journal,snapshots} # Remove existing logfiles if [[ -n "$LOGDIR" ]]; then rm -f "$LOGDIR/$ODL_KARAF_LOG_BASE*" fi rm -f "$DEST/logs/$ODL_KARAF_LOG_BASE*" move_interface_addresses "outof_bridge" unbind_opendaylight_controller } # configure_opendaylight() - Set config files, create data dirs, etc function configure_opendaylight { echo "Configuring OpenDaylight" # The logging config file in ODL local ODL_LOGGING_CONFIG=${ODL_DIR}/${ODL_NAME}/etc/org.ops4j.pax.logging.cfg # The feature config file in ODL for booting karaf features local ODL_KARAF_CONFIG=$ODL_DIR/$ODL_NAME/etc/org.apache.karaf.features.cfg # Add netvirt feature in Karaf, if it's not already there if ! (grep -w '^featuresBoot' $ODL_KARAF_CONFIG | grep $ODL_NETVIRT_KARAF_FEATURE); then # This format was introduced in oxygen sed -i "/^featuresBoot = / s/$/,$ODL_NETVIRT_KARAF_FEATURE/" \ $ODL_KARAF_CONFIG fi # Move Jetty to $ODL_PORT if ! grep $ODL_PORT $ODL_DIR/$ODL_NAME/etc/jetty.xml; then # NOTE(yamahata): https://git.opendaylight.org/gerrit/#/c/51531/ # removed 8080 port. if ! grep 808. $ODL_DIR/$ODL_NAME/etc/jetty.xml; then patch --input=$NETWORKING_ODL_DIR/devstack/jetty-legacy.patch $ODL_DIR/$ODL_NAME/etc/jetty.xml fi sed -i "/\> $ODL_DIR/$ODL_NAME/etc/custom.properties fi # Configure L3 GW MAC if it's not there if ! grep ^ovsdb.l3gateway.mac $ODL_DIR/$ODL_NAME/etc/custom.properties && [[ -n "$ODL_L3GW_MAC" ]]; then echo "ovsdb.l3gateway.mac=$ODL_L3GW_MAC" >> $ODL_DIR/$ODL_NAME/etc/custom.properties fi fi # create symbolic link from ODL etc, configuration dir under /etc/networking-odl # so that those config files are copied to log server local NETWORKING_ODL_ETC_DIR=/etc/networking-odl local ODL_CONF_DIR=$NETWORKING_ODL_ETC_DIR/odl sudo mkdir -p $ODL_CONF_DIR local d for d in etc configuration; do sudo ln -sf "$ODL_DIR/$ODL_NAME/$d" "$ODL_CONF_DIR/" done # NOTE(yamahata): by default LOGDIR=$DEST via ${LOG_FILE%/*} # default value of LOGFILE by devstack-vm-gate.sh: $BASE/new/devstacklog.txt # default value of LOG_DIR by devstack/stackrc: $BASE/new # however, cleanup_host() in devstack-gate/functions.sh # doesn't copy files under $BASE/new, but $BASE/{old, new}/logs # try to $BASE/logs local ODL_LOGDIR if [[ -n "$LOGDIR" ]]; then if [[ "$LOGDIR" != "$DEST" ]]; then ODL_LOGDIR=$LOGDIR else ODL_LOGDIR=${LOGDIR%/*}/logs fi else ODL_LOGDIR=${DEST%/*}/logs fi # Remove existing logfiles rm -f "$ODL_LOGDIR/$ODL_KARAF_LOG_BASE*" # Log karaf output to a file _LF=$ODL_LOGDIR/$ODL_KARAF_LOG_NAME LF=$(echo $_LF | sed 's/\//\\\//g') # Soft link for easy consumption sudo mkdir -p "$ODL_LOGDIR" sudo chown $(id -un):$(id -gn) "$ODL_LOGDIR" sudo ln -sf $_LF "$ODL_LOGDIR/screen-karaf.log" # This format was introduced in oxygen sed -i -e "/^log4j2\.appender\.rolling\.fileName/ s/.*/log4j2\.appender\.rolling\.fileName\ = $LF/" \ -e "/^log4j2\.appender\.rolling\.policies\.size\.size/ s/.*/log4j2\.appender\.rolling\.policies\.size\.size\ = 1024GB/" \ $ODL_DIR/$ODL_NAME/etc/org.ops4j.pax.logging.cfg # Configure DEBUG logs for network virtualization in odl, if the user wants it if [ "${ODL_NETVIRT_DEBUG_LOGS}" == "True" ]; then if ! grep ^log4j.logger.org.opendaylight.ovsdb $ODL_LOGGING_CONFIG; then echo 'log4j.logger.org.opendaylight.ovsdb = INFO, out' >> $ODL_LOGGING_CONFIG fi if ! grep ^log4j.logger.org.opendaylight.netvirt $ODL_LOGGING_CONFIG; then echo 'log4j.logger.org.opendaylight.netvirt = DEBUG, out' >> $ODL_LOGGING_CONFIG fi if ! grep ^log4j.logger.org.opendaylight.neutron $ODL_LOGGING_CONFIG; then echo 'log4j.logger.org.opendaylight.neutron = DEBUG, out' >> $ODL_LOGGING_CONFIG fi fi } # configure_neutron_opendaylight() - Set Neutron config files according to ODL settings function configure_neutron_odl { echo "Configuring ML2 for OpenDaylight" # NOTE(mpeterson): Create the state_path that will be used by neutron # since although it expects it to exist, it is not created at any time # and therefore when we first want to use it in the driver it fails. # refer to: https://github.com/openstack-dev/devstack/blob/d37119e797d3140aeb0038a1129ce5e9016c1a36/lib/neutron#L46 # and: https://github.com/openstack-dev/devstack/blob/d37119e797d3140aeb0038a1129ce5e9016c1a36/lib/neutron-legacy#L698 sudo mkdir -p $DATA_DIR/neutron safe_chown -R $STACK_USER $DATA_DIR safe_chmod 0755 $DATA_DIR/neutron # https://bugs.launchpad.net/neutron/+bug/1614766 # Allow ovsdb_interface native by avoiding port conflict. if [[ -n "$ODL_OVSDB_ALTPORT" ]]; then iniset $NEUTRON_CONF OVS ovsdb_connection tcp:127.0.0.1:$ODL_OVSDB_ALTPORT iniset $NEUTRON_DHCP_CONF OVS ovsdb_connection tcp:127.0.0.1:$ODL_OVSDB_ALTPORT fi # Addition of L3 service_plugin if ! is_neutron_legacy_enabled; then neutron_service_plugin_class_add odl-router_v2 # NOTE: workaround, mechanism driver is not being set to opendaylight_v2 # by lib/neutron. It seems to be hardcoded at # https://github.com/openstack-dev/devstack/blob/master/lib/neutron#L184, fix it, if # fixed in lib/neutron iniset $NEUTRON_CORE_PLUGIN_CONF ml2 mechanism_drivers $Q_ML2_PLUGIN_MECHANISM_DRIVERS fi populate_odl_ml2_config ml2_odl url $ODL_ENDPOINT populate_odl_ml2_config ml2_odl username $ODL_USERNAME populate_odl_ml2_config ml2_odl password $ODL_PASSWORD populate_odl_ml2_config ml2_odl port_binding_controller $ODL_PORT_BINDING_CONTROLLER populate_odl_ml2_config ml2_odl enable_dhcp_service $ODL_DHCP_SERVICE if [[ -n "$ODL_TIMEOUT" ]]; then populate_odl_ml2_config ml2_odl timeout $ODL_TIMEOUT fi # When it's not set, the default value is set by networking-odl if [[ -n "$ODL_HOSTCONF_URI" ]]; then populate_odl_ml2_config ml2_odl odl_hostconf_uri $ODL_HOSTCONF_URI fi # NOTE(mgkwill): ODL layer-3 and DHCP services currently lack support # for metadata. Enabling both native services also requires enabling # config drive to provide instances with metadata. If conventional DHCP agent # is used instead, configure it to provide instances with metadata. # TODO(rajivk) Remove q-dhcp on adoption of lib/neutron if is_service_enabled neutron-dhcp; then # Conventional DHCP agent must provide all metadata when ODL # layer-3 is enabled. The conventional DHCP agent will be forced # to provide metadata for all networks. iniset $NEUTRON_DHCP_CONF DEFAULT force_metadata True elif is_service_enabled q-dhcp; then iniset $Q_DHCP_CONF_FILE DEFAULT force_metadata True fi if [[ "$ODL_L3" == "True" ]]; then if is_service_enabled n-cpu; then iniset $NOVA_CONF DEFAULT force_config_drive True fi fi } function configure_neutron_odl_lightweight_testing { echo "Configuring lightweight testing for OpenDaylight" if is_service_enabled q-dhcp neutron-dhcp; then populate_odl_ml2_config ml2_odl enable_lightweight_testing True fi } # init_opendaylight() - Initialize databases, etc. function init_opendaylight { # clean up from previous (possibly aborted) runs # create required data files : } # install_opendaylight() - Collect source and prepare function install_opendaylight { if [[ "$ODL_INSTALL" == "False" ]]; then return fi echo "Installing OpenDaylight and dependent packages" if [[ "$ODL_USING_EXISTING_JAVA" != "True" ]]; then if ! setup_java "${ODL_REQUIRED_JAVA_VERSION:-7}"; then exit 1 fi fi # Download OpenDaylight cd $ODL_DIR if [[ "$OFFLINE" != "True" ]]; then wget -N $ODL_URL/$ODL_PKG fi unzip -u -o $ODL_PKG } # install_networking_odl() - Install the ML2 driver and other plugins/drivers function install_networking_odl { echo "Installing the Networking-ODL driver for OpenDaylight" setup_develop $NETWORKING_ODL_DIR } # install_opendaylight_compute() - Make sure OVS is installed function install_opendaylight_compute { if [[ "$SKIP_OVS_INSTALL" = "True" ]]; then echo "Skipping OVS installation." else # packages are the same as for Neutron OVS agent _neutron_ovs_base_install_agent_packages fi } # start_opendaylight() - Start running processes, including screen function start_opendaylight { echo "Starting OpenDaylight" # Wipe out the data and journal directories ... grumble grumble grumble rm -rf $ODL_DIR/$ODL_NAME/{data,journal} # There variables needed by the running karaf process are set in the # function setup_java_env. See the "bin/setenv" file in the OpenDaylight # distribution for their individual meaning. setup_java_env # Extra configuration variables that may be used if required. if [[ -n "$JAVA_MIN_MEM" ]]; then export JAVA_MIN_MEM=$ODL_JAVA_MIN_MEM fi if [[ -n "$JAVA_MAX_MEM" ]]; then export JAVA_MAX_MEM=$ODL_JAVA_MAX_MEM fi if [[ -n "$JAVA_MAX_PERM_MEM" ]]; then export JAVA_MAX_PERM_MEM=$ODL_JAVA_MAX_PERM_MEM fi # this is a forking process, just start it in the background $ODL_DIR/$ODL_NAME/bin/start if [ -n "$ODL_BOOT_WAIT_URL" ]; then echo "Waiting for OpenDaylight to start via $ODL_BOOT_WAIT_URL ..." # Probe ODL restconf for netvirt until it is operational local testcmd="curl -o /dev/null --fail --silent --head -u \ ${ODL_USERNAME}:${ODL_PASSWORD} http://${ODL_MGR_HOST}:${ODL_PORT}/${ODL_BOOT_WAIT_URL}" test_with_retry "$testcmd" "OpenDaylight did not start after $ODL_BOOT_WAIT" \ $ODL_BOOT_WAIT $ODL_RETRY_SLEEP_INTERVAL else echo "Waiting for OpenDaylight to start ..." # Sleep a bit to let OpenDaylight finish starting up sleep $ODL_BOOT_WAIT fi } # stop_opendaylight() - Stop running processes (non-screen) function stop_opendaylight { # Stop the karaf container $ODL_DIR/$ODL_NAME/bin/stop } # cleanup_opendaylight_compute() - Remove all OVS ports, bridges and disconnects # controller from switch function cleanup_opendaylight_compute { # Remove the patch ports for port in $(sudo ovs-vsctl show | grep Port | awk '{print $2}' | cut -d '"' -f 2 | grep patch); do sudo ovs-vsctl del-port ${port} done # remove all OVS ports that look like Neutron created ports for port in $(sudo ovs-vsctl list port | grep -o -e tap[0-9a-f\-]* -e q[rg]-[0-9a-f\-]*); do sudo ovs-vsctl del-port ${port} done # Remove all the vxlan ports for port in $(sudo ovs-vsctl list port | grep name | grep vxlan | awk '{print $3}' | cut -d '"' -f 2); do sudo ovs-vsctl del-port ${port} done # Disconnect controller from switch unbind_opendaylight_controller # remove all OVS bridges created by ODL for bridge in $(sudo ovs-vsctl list-br | grep -o -e ${OVS_BR} -e ${PUBLIC_BRIDGE}); do sudo ovs-vsctl del-br ${bridge} done } # bind_opendaylight_controller() - set control manager to OVS function bind_opendaylight_controller { echo_summary "Initializing OpenDaylight" ODL_LOCAL_IP=${ODL_LOCAL_IP:-$HOST_IP} ODL_MGR_PORT=${ODL_MGR_PORT:-6640} read ovstbl <<< $(sudo ovs-vsctl get Open_vSwitch . _uuid) # NOTE(yamahata): setup ovsdb configuration first before setting # ovsdb manager not to show transitional state if [[ -n "$ODL_PROVIDER_MAPPINGS" ]]; then sudo ovs-vsctl set Open_vSwitch $ovstbl \ other_config:provider_mappings=$ODL_PROVIDER_MAPPINGS fi sudo ovs-vsctl set Open_vSwitch $ovstbl other_config:local_ip=$ODL_LOCAL_IP if [[ ",$ODL_NETVIRT_KARAF_FEATURE," =~ ",odl-netvirt-sfc," ]]; then sudo ovs-vsctl set Open_vSwitch $ovstbl external_ids:of-tunnel=true fi # for pseudo agent port binding if [ "$ODL_PORT_BINDING_CONTROLLER" == "pseudo-agentdb-binding" ]; then ODL_OVS_HOSTCONFIGS_OPTIONS=${ODL_OVS_HOSTCONFIGS_OPTIONS:---debug --noovs_dpdk} if [[ -n "$ODL_PROVIDER_MAPPINGS" ]]; then ODL_OVS_HOSTCONFIGS_OPTIONS="${ODL_OVS_HOSTCONFIGS_OPTIONS} --bridge_mappings=${ODL_PROVIDER_MAPPINGS}" fi if [[ -n "$ODL_OVS_HOSTCONFIGS" ]]; then ODL_OVS_HOSTCONFIGS_OPTIONS=${ODL_OVS_HOSTCONFIGS_OPTIONS} --ovs_hostconfigs="$ODL_OVS_HOSTCONFIGS" fi if [[ ! -f $NEUTRON_CONF ]]; then sudo neutron-odl-ovs-hostconfig $ODL_OVS_HOSTCONFIGS_OPTIONS else sudo neutron-odl-ovs-hostconfig --config-file=$NEUTRON_CONF $ODL_OVS_HOSTCONFIGS_OPTIONS fi fi if [[ -n "$PUBLIC_BRIDGE" ]]; then sudo ovs-vsctl --no-wait -- --may-exist add-br $PUBLIC_BRIDGE fi # Lastly setup ovsdb manager local ODL_MANAGERS_PARAM=() for manager in $(echo $ODL_OVS_MANAGERS | tr "," "\n"); do local manager_ip manager_ip=$(gethostip -d ${manager}) ODL_MANAGERS_PARAM=( "${ODL_MANAGERS_PARAM[@]}" "tcp:${manager_ip}:$ODL_MGR_PORT" ) done # don't overwrite the already existing managers local ODL_MANAGERS_OLD ODL_MANAGERS_OLD=$(sudo ovs-vsctl get-manager) local ODL_MANAGERS ODL_MANAGERS=$(echo $ODL_MANAGERS_OLD ${ODL_MANAGERS_PARAM[@]} | tr ' ' '\n' | sort | uniq | tr '\n' ' ') sudo ovs-vsctl set-manager ${ODL_MANAGERS} } # unbind_opendaylight_controller() - disconnect controller from switch and clear bridges function unbind_opendaylight_controller { sudo ovs-vsctl del-manager BRIDGES=$(sudo ovs-vsctl list-br) for bridge in $BRIDGES ; do sudo ovs-vsctl del-controller $bridge done } function _configure_veth { ip link show $Q_PUBLIC_VETH_INT > /dev/null 2>&1 || sudo ip link add $Q_PUBLIC_VETH_INT type veth \ peer name $Q_PUBLIC_VETH_EX sudo ip link set $Q_PUBLIC_VETH_INT up sudo ip link set $Q_PUBLIC_VETH_EX up sudo ip addr flush dev $Q_PUBLIC_VETH_EX if [[ ",$ODL_NETVIRT_KARAF_FEATURE," =~ ",$ODL_NETVIRT_KARAF_FEATURE_OVSDB," ]]; then local OVSBR_EX OVSBR_EX=$(echo $ODL_PROVIDER_MAPPINGS | cut -d ':' -f1) sudo ovs-vsctl --may-exist add-port $OVSBR_EX $Q_PUBLIC_VETH_INT else sudo ovs-vsctl --may-exist add-port $OVS_BR $Q_PUBLIC_VETH_INT fi local cidr_len=${FLOATING_RANGE#*/} sudo ip addr replace ${PUBLIC_NETWORK_GATEWAY}/$cidr_len dev $Q_PUBLIC_VETH_EX sudo ip route replace $FLOATING_RANGE dev $Q_PUBLIC_VETH_EX if [[ -n "$IPV6_PUBLIC_RANGE" ]] && [[ -n "$IPV6_PUBLIC_NETWORK_GATEWAY" ]] && [[ -n "$FIXED_RANGE_V6" ]] && [[ -n "$IPV6_ROUTER_GW_IP" ]]; then local ipv6_cidr_len=${IPV6_PUBLIC_RANGE#*/} sudo ip -6 addr replace ${IPV6_PUBLIC_NETWORK_GATEWAY}/$ipv6_cidr_len dev ${Q_PUBLIC_VETH_EX} sudo ip -6 route replace $IPV6_PUBLIC_RANGE dev $Q_PUBLIC_VETH_EX fi } function _configure_opendaylight_l3_legacy_netvirt { wait_for_active_bridge $PUBLIC_BRIDGE $ODL_RETRY_SLEEP_INTERVAL $ODL_BOOT_WAIT if [[ "$Q_USE_PUBLIC_VETH" == "True" ]]; then _configure_veth fi } function _configure_opendaylight_l3_new_netvirt { if [[ "$Q_USE_PUBLIC_VETH" == "True" ]]; then _configure_veth fi } # configure_opendaylight_l3() - configure bridges for OpenDaylight L3 forwarding function configure_opendaylight_l3 { if [[ ",$ODL_NETVIRT_KARAF_FEATURE," =~ ",$ODL_NETVIRT_KARAF_FEATURE_OVSDB," ]]; then _configure_opendaylight_l3_legacy_netvirt else _configure_opendaylight_l3_new_netvirt fi } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.778714 networking-odl-16.0.0.0b2.dev1/devstack/files/0000755000175000017500000000000000000000000022602 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585130284.7867138 networking-odl-16.0.0.0b2.dev1/devstack/files/debs/0000755000175000017500000000000000000000000023517 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/devstack/files/debs/networking-odl0000644000175000017500000000011700000000000026404 0ustar00jamespagejamespage00000000000000libxml-xpath-perl # for xpath command patch # for patch command syslinux-utils ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585130284.7867138 networking-odl-16.0.0.0b2.dev1/devstack/files/rpms/0000755000175000017500000000000000000000000023563 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/devstack/files/rpms/networking-odl0000644000175000017500000000010600000000000026446 0ustar00jamespagejamespage00000000000000perl-XML-XPath # for xpath command patch # for patch command syslinux ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/devstack/functions0000644000175000017500000002164400000000000023442 0ustar00jamespagejamespage00000000000000#!/bin/bash # # functions - OpenDaylight driver utility functions function _odl_nexus_path { local ODL_URL_PREFIX=$1 echo "${NEXUSPATH:-${ODL_URL_PREFIX}/${ODL_URL_SNAPSHOT_REPOSITORY_PATH}/${ODL_URL_DISTRIBUTION_KARAF_PATH}}" } function _wget { local MAVENMETAFILE=$1 local URL=$2 local $OFFLINE=$3 local ALLOW_MISSING=${4:-False} if [[ "$OFFLINE" == "True" ]]; then if [[ ! -r $MAVENMETAFILE && "$ALLOW_MISSING" != "True" ]]; then echo "$MAVENMETAFILE doesn't exist. Please try with OFFLINE=False to download $URL" exit 1 fi return fi # Remove stale MAVENMETAFILE for cases where you switch releases rm -f $MAVENMETAFILE # Acquire the timestamp information from maven-metadata.xml wget -O $MAVENMETAFILE $URL if [[ ! -r $MAVENMETAFILE && "$ALLOW_MISSING" != "True" ]]; then echo "can't download $URL. Please check intenet connection, site availability or directory permision to create file $MAVENMETAFILE" exit 1 fi } function _xpath { local XPATH=$1 local MAVENMETAFILE=$2 local result="" if is_ubuntu; then result=`xpath -e "$XPATH" $MAVENMETAFILE 2>/dev/null` elif [ "$os_VENDOR" = "Fedora" ]; then result=`xpath -e "$XPATH" $MAVENMETAFILE 2>/dev/null` else result=`xpath $MAVENMETAFILE "$XPATH" 2>/dev/null` fi if [[ -z "$result" ]]; then echo "xpath can't find matching xpath $XPATH" 1>&2 cat $MAVENMETAFILE 1>&2 exit 1 fi echo $result } function odl_artifact_id { local ODL_DIR=$1 local ODL_URL_PREFIX=$2 local BUNDLEVERSION=$3 local OFFLINE=$4 local MAVENMETAFILE=$ODL_DIR/maven-metadata-artifact-id.xml local _NEXUSPATH _NEXUSPATH=$(_odl_nexus_path $ODL_URL_PREFIX) _wget $MAVENMETAFILE ${_NEXUSPATH}/${BUNDLEVERSION}/maven-metadata.xml $OFFLINE True if [[ -f "$MAVENMETAFILE" ]]; then # In auto release case, metadata file exists only in parent directly. # e.g. https://nexus.opendaylight.org/content/repositories/autorelease-1929/org/opendaylight/integration/distribution-karaf/maven-metadata.xml _wget $MAVENMETAFILE ${_NEXUSPATH}/maven-metadata.xml $OFFLINE fi local result result=$(_xpath "//artifactId/text()" $MAVENMETAFILE) echo $result } # get snapshot version . -> .. function odl_snapshot_full_version { local ODL_DIR=$1 local ODL_URL_PREFIX=$2 local MAJOR_MINOR=$3 local OFFLINE=$4 local MAVENMETAFILE=$ODL_DIR/maven-metadata-snapshot.xml local _NEXUSPATH _NEXUSPATH=$(_odl_nexus_path $ODL_URL_PREFIX) _wget $MAVENMETAFILE ${_NEXUSPATH}/maven-metadata.xml $OFFLINE if [[ "$MAJOR_MINOR" == "latest" ]]; then local ODL_FULL_VERSION ODL_FULL_VERSION=$(_xpath "//latest/text()" $MAVENMETAFILE) else local ODL_FULL_VERSION ODL_FULL_VERSION=$(_xpath "//version[starts-with(text(), '$MAJOR_MINOR')][last()]/text()" $MAVENMETAFILE) fi ODL_FULL_VERSION=${ODL_FULL_VERSION/-SNAPSHOT/} echo $ODL_FULL_VERSION } function _odl_export_snapshot_url_pkg { local ODL_DIR=$1 local ODL_URL_PREFIX=$2 local BUNDLEVERSION=$3 local OFFLINE=$4 local BUNDLE_TIMESTAMP=$5 local MAVENMETAFILE=$ODL_DIR/maven-metadata.xml local _NEXUSPATH _NEXUSPATH=$(_odl_nexus_path $ODL_URL_PREFIX) if [ "$BUNDLE_TIMESTAMP" == "latest" ]; then # Get build information _wget $MAVENMETAFILE ${_NEXUSPATH}/${BUNDLEVERSION}/maven-metadata.xml $OFFLINE BUNDLE_TIMESTAMP=$(_xpath "//snapshotVersion[extension='zip'][1]/value/text()" $MAVENMETAFILE) fi export ODL_URL=${_NEXUSPATH}/${BUNDLEVERSION} export ODL_PKG=${ODL_ARTIFACT_ID}-${BUNDLE_TIMESTAMP}.zip } # get release bundle version -> ..-[-SR] function odl_release_bundleversion { local ODL_DIR=$1 local ODL_URL_PREFIX=$2 local RELEASE_NAME=$3 local OFFLINE=$4 local MAVENMETAFILE=$ODL_DIR/maven-metadata-release.xml local _NEXUSPATH local _NEXUSPATH="${NEXUSPATH:-${ODL_URL_PREFIX}/${ODL_URL_RELEASE_REPOSITORY_PATH}/${ODL_URL_DISTRIBUTION_KARAF_PATH}}" _wget $MAVENMETAFILE ${_NEXUSPATH}/maven-metadata.xml $OFFLINE local _ODL_BUNDLEVERSION if [[ "$RELEASE_NAME" == "latest" ]]; then _ODL_BUNDLEVERSION=$(_xpath "//release/text()" $MAVENMETAFILE) else # until carbon, karaf is named as ..- # from Nitrogen, it's named as .. local _ODL_MAJOR_MINOR=${ODL_NAME_TO_MAJOR_MINOR["$RELEASE_NAME"]} _ODL_BUNDLEVERSION=$(_xpath "//version[starts-with(text(), '$_ODL_MAJOR_MINOR')][last()]/text()" $MAVENMETAFILE) fi echo $_ODL_BUNDLEVERSION } function _odl_export_release_url_pkg { local ODL_URL_PREFIX=$1 local BUNDLEVERSION=$2 local _NEXUSPATH="${NEXUSPATH:-${ODL_URL_PREFIX}/${ODL_URL_RELEASE_REPOSITORY_PATH}/${ODL_URL_DISTRIBUTION_KARAF_PATH}}" export ODL_URL=${_NEXUSPATH}/${BUNDLEVERSION} export ODL_PKG=${ODL_ARTIFACT_ID}-${BUNDLEVERSION}.zip } function setup_opendaylight_package { if [[ -n "$ODL_SNAPSHOT_VERSION" ]]; then _odl_export_snapshot_url_pkg ${ODL_DIR} ${ODL_URL_PREFIX} ${ODL_BUNDLEVERSION} ${OFFLINE} ${ODL_SNAPSHOT_VERSION} else _odl_export_release_url_pkg ${ODL_URL_PREFIX} ${ODL_BUNDLEVERSION} fi } # Test if OpenDaylight is enabled function is_opendaylight_enabled { [[ ,${ENABLED_SERVICES} =~ ,"odl-" ]] && return 0 return 1 } # Check that the bridge is up and running function wait_for_active_bridge { local BRIDGE=$1 local SLEEP_INTERVAL=$2 local MAX_WAIT=$3 echo "Waiting for bridge $BRIDGE to be available..." local testcmd="sudo ovs-vsctl list Bridge | grep $BRIDGE" test_with_retry "$testcmd" \ "$BRIDGE did not become available in $MAX_WAIT seconds." \ $MAX_WAIT $SLEEP_INTERVAL echo "Bridge $BRIDGE is available." } # Move the public IP addresses to the OVS bridge on startup, # or back to the public interface on cleanup function move_interface_addresses { local direction=$1 if [[ -n "$ODL_PROVIDER_MAPPINGS" ]]; then local VETH_INTERFACE VETH_INTERFACE=$(echo $ODL_PROVIDER_MAPPINGS | cut -d ':' -f1) local PHYSICAL_INTERFACE PHYSICAL_INTERFACE=$(echo $ODL_PROVIDER_MAPPINGS | cut -d ':' -f2) if [[ "$direction" == "into_bridge" ]]; then _move_neutron_addresses_route "$PHYSICAL_INTERFACE" "$VETH_INTERFACE" True False "inet" if _has_public_ipv6_address "$PHYSICAL_INTERFACE"; then _move_neutron_addresses_route "$PHYSICAL_INTERFACE" "$VETH_INTERFACE" False False "inet6" fi elif [[ "$direction" == "outof_bridge" ]]; then _move_neutron_addresses_route "$VETH_INTERFACE" "$PHYSICAL_INTERFACE" False True "inet" if _has_public_ipv6_address "$VETH_INTERFACE"; then _move_neutron_addresses_route "$VETH_INTERFACE" "$PHYSICAL_INTERFACE" False False "inet6" fi fi fi } # Check that the interface has an IP v6 address which # is routable on external network function _has_public_ipv6_address { local interface=$1 local interface_public_ipv6_addresses interface_public_ipv6_addresses=$(ip -f inet6 a s dev "$interface" | grep -c 'global') echo "$interface public IPv6 address count: $interface_public_ipv6_addresses" if [[ "$interface_public_ipv6_addresses" != 0 ]]; then return 0 else return 1 fi } # NOTE(manjeets) br-ex is not up when neutron initial networks are created so this workaround # is done to bring up the br-ex. # TODO delete this workaround once br-ex bringup is fixed. function purge_and_recreate_initial_networks { DEVSTACK_DIR=$1 if [ -z "$DEVSTACK_DIR" ]; then echo "Please specify devstack directory" exit 1 fi cat <> $DEVSTACK_DIR/local.sh #!/usr/bin/env bash sudo ifconfig br-ex 172.24.5.1/24 up source $DEVSTACK_DIR/openrc admin openstack router unset --external-gateway router1 openstack port list --router router1 -c ID -f value | xargs -I {} openstack router remove port router1 {} openstack router delete router1 openstack subnet list | grep -e public -e private | cut -f2 -d'|' | xargs openstack subnet delete openstack network list | grep -e public -e private | cut -f2 -d'|' | xargs openstack network delete openstack network create public --external --provider-network-type=flat --provider-physical-network=public openstack subnet create --network=public --subnet-range=172.24.5.0/24 --gateway 172.24.5.1 public-subnet EOF chmod 755 $DEVSTACK_DIR/local.sh } function populate_odl_ml2_config { section=$1 param=$2 value=$3 if is_neutron_legacy_enabled; then populate_ml2_config /$Q_PLUGIN_CONF_FILE $section $param=$value else iniset $NEUTRON_CORE_PLUGIN_CONF $section $param $value fi } function neutron_plugin_create_nova_conf { : } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/devstack/jetty-legacy.patch0000644000175000017500000000317500000000000025130 0ustar00jamespagejamespage00000000000000diffjetty.xml a/karaf/opendaylight-karaf-resources/src/main/resources/etc/jetty.xml index 1d954bd..df22f9d 100644 --- jetty.xml +++ jetty.xml @@ -93,6 +93,37 @@ DTD Configure//EN" "http://jetty.mortbay.org/configure.dtd"> + + + + + + + + + + + + + + + + + + + + + + + + + + q + jetty-legacy + + + + ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/devstack/local.conf.example0000644000175000017500000000605000000000000025074 0ustar00jamespagejamespage00000000000000[[local|localrc]] # This will fetch the latest ODL snapshot ODL_RELEASE=latest-snapshot # Default is psuedo-port-binding-controller #ODL_PORT_BINDING_CONTROLLER= # Set here which ODL openstack service provider to use # These are core ODL features ODL_NETVIRT_KARAF_FEATURE=odl-neutron-service,odl-restconf-all,odl-aaa-authn,odl-dlux-core,odl-mdsal-apidocs # Set DLUX Karaf features needed for the ODL GUI at http://:8181/index.html ODL_NETVIRT_KARAF_FEATURE+=,odl-dluxapps-nodes,odl-dluxapps-topology,odl-dluxapps-yangui,odl-dluxapps-yangvisualizer # Set L2 Karaf features needed for the ODL GUI at http://:8181/index.html ODL_NETVIRT_KARAF_FEATURE+=,odl-l2switch-switch,odl-l2switch-switch-ui,odl-ovsdb-hwvtepsouthbound-ui,odl-ovsdb-southbound-impl-ui,odl-netvirt-ui # Set OpenFlow Karaf features needed for the ODL GUI at http://:8181/index.html ODL_NETVIRT_KARAF_FEATURE+=,odl-openflowplugin-flow-services-ui # odl-netvirt-openstack is used for new netvirt ODL_NETVIRT_KARAF_FEATURE+=,odl-netvirt-openstack # optional feature neutron-logger to log changes of neutron yang models ODL_NETVIRT_KARAF_FEATURE+=,odl-neutron-logger # Switch to using the ODL's L3 implementation ODL_L3=True # Set Host IP here. It is externally reachable network, set # below param to use ip from a different network HOST_IP=$(hostname -I | awk '{print $1}') # public network connectivity Q_USE_PUBLIC_VETH=True Q_PUBLIC_VETH_EX=veth-pub-ex Q_PUBLIC_VETH_INT=veth-pub-int ODL_PROVIDER_MAPPINGS=public:${Q_PUBLIC_VETH_INT} # Enable debug logs for odl ovsdb ODL_NETVIRT_DEBUG_LOGS=True #Q_USE_DEBUG_COMMAND=True DEST=/opt/stack/ # move DATA_DIR outside of DEST to keep DEST a bit cleaner DATA_DIR=/opt/stack/data ADMIN_PASSWORD=password MYSQL_PASSWORD=${ADMIN_PASSWORD} RABBIT_PASSWORD=${ADMIN_PASSWORD} SERVICE_PASSWORD=${ADMIN_PASSWORD} SERVICE_TOKEN=supersecrettoken enable_service dstat enable_service g-api enable_service g-reg enable_service key enable_service mysql enable_service n-api enable_service n-cond enable_service n-cpu enable_service n-crt enable_service n-novnc enable_service n-sch enable_service placement-api enable_service placement-client enable_service neutron-dhcp enable_service neutron-metadata-agent enable_service neutron-api enable_service rabbit enable_service tempest # These can be enabled if storage is needed to do # any feature or testing for integration disable_service c-api disable_service c-vol disable_service c-sch SKIP_EXERCISES=boot_from_volume,bundle,client-env,euca # Screen console logs will capture service logs. SYSLOG=False LOGFILE=/opt/stack/new/devstacklog.txt VERBOSE=True FIXED_RANGE=10.1.0.0/20 FLOATING_RANGE=172.24.5.0/24 PUBLIC_NETWORK_GATEWAY=172.24.5.1 FIXED_NETWORK_SIZE=4096 VIRT_DRIVER=libvirt export OS_NO_CACHE=1 # Additional repositories need to be cloned can be added here. #LIBS_FROM_GIT= # Enable MySql Logging DATABASE_QUERY_LOGGING=True # set this until all testing platforms have libvirt >= 1.2.11 # see bug #1501558 EBTABLES_RACE_FIX=True enable_plugin networking-odl https://opendev.org/openstack/networking-odl ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.778714 networking-odl-16.0.0.0b2.dev1/devstack/odl-etc/0000755000175000017500000000000000000000000023027 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.778714 networking-odl-16.0.0.0b2.dev1/devstack/odl-etc/opendaylight/0000755000175000017500000000000000000000000025516 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.778714 networking-odl-16.0.0.0b2.dev1/devstack/odl-etc/opendaylight/datastore/0000755000175000017500000000000000000000000027504 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.778714 networking-odl-16.0.0.0b2.dev1/devstack/odl-etc/opendaylight/datastore/initial/0000755000175000017500000000000000000000000031135 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585130284.7867138 networking-odl-16.0.0.0b2.dev1/devstack/odl-etc/opendaylight/datastore/initial/config/0000755000175000017500000000000000000000000032402 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000024100000000000011452 xustar0000000000000000139 path=networking-odl-16.0.0.0b2.dev1/devstack/odl-etc/opendaylight/datastore/initial/config/netvirt-impl-config_netvirt-impl-config.xml 22 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/devstack/odl-etc/opendaylight/datastore/initial/config/netvirt-impl-c0000644000175000017500000000024300000000000035176 0ustar00jamespagejamespage00000000000000 true ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585130284.7867138 networking-odl-16.0.0.0b2.dev1/devstack/odl-releases/0000755000175000017500000000000000000000000024057 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/devstack/odl-releases/README.rst0000644000175000017500000000334500000000000025553 0ustar00jamespagejamespage00000000000000======================= ODL release definitions ======================= This directory contains definitions for OpenDaylight releases so that devstack can determine the URI for ODL distribution to download. Examples ======== Even when not full version is specified, it downloads maven metadata and determine latest version. Now devstack scripts is smart to guess URI based on release name and version which can be deduced from its filename. So for typical cases, empty file will do. Release boron-0.5.1:: export ODL_BUNDLEVERSION='0.5.1-Boron' export ODL_NAME=distribution-karaf-${BUNDLEVERSION} ODL_REQUIRED_JAVA_VERSION=${ODL_REQUIRED_JAVA_VERSION:-8} SR Release boron-0.5.1-SR1:: export ODL_BUNDLEVERSION='0.5.1-Boron-SR1' export ODL_NAME=distribution-karaf-${BUNDLEVERSION} ODL_REQUIRED_JAVA_VERSION=${ODL_REQUIRED_JAVA_VERSION:-8} snapshot beryllium-snapshot-0.4.2:: BUNDLEVERSION='0.4.2-SNAPSHOT' ODL_SNAPSHOT_VERSION=${ODL_SNAPSHOT_VERSION:-latest} export ODL_NAME=distribution-karaf-${BUNDLEVERSION} ODL_REQUIRED_JAVA_VERSION=${ODL_REQUIRED_JAVA_VERSION:-8} latest snapshot without revision boron-snapshot-0.5:: ODL_SNAPSHOT_VERSION=${ODL_SNAPSHOT_VERSION:-latest} ODL_REQUIRED_JAVA_VERSION=${ODL_REQUIRED_JAVA_VERSION:-8} latest snapshot latest-snapshot:: ODL_SNAPSHOT_VERSION=${ODL_SNAPSHOT_VERSION:-latest} ODL_REQUIRED_JAVA_VERSION=${ODL_REQUIRED_JAVA_VERSION:-8} RC carbon-0.6.2-SR2-RC:: ODL_BUNDLEVERSION='0.6.2-Carbon' NEXUSPATH="${ODL_URL_PREFIX}/content/repositories/autorelease-1929/org/opendaylight/integration/distribution-karaf/" RC nitroge-0.7.0-RC3:: ODL_BUNDLEVERSION="0.7.0" NEXUSPATH="${ODL_URL_PREFIX}/content/repositories/autorelease-1963/org/opendaylight/integration/karaf/" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/devstack/odl-releases/common0000644000175000017500000001162100000000000025273 0ustar00jamespagejamespage00000000000000#!/bin/bash _XTRACE_ODL_RELEASE_COMMON=$(set +o | grep xtrace) set -o xtrace # code name -> version(Major.Minor) relationship declare -A ODL_NAME_TO_MAJOR_MINOR ODL_NAME_TO_MAJOR_MINOR["Helium"]="0.2" ODL_NAME_TO_MAJOR_MINOR["Lithium"]="0.3" ODL_NAME_TO_MAJOR_MINOR["Beryllium"]="0.4" ODL_NAME_TO_MAJOR_MINOR["Boron"]="0.5" ODL_NAME_TO_MAJOR_MINOR["Carbon"]="0.6" ODL_NAME_TO_MAJOR_MINOR["Nitrogen"]="0.7" ODL_NAME_TO_MAJOR_MINOR["Oxygen"]="0.8" ODL_NAME_TO_MAJOR_MINOR["Fluorine"]="0.9" ODL_NAME_TO_MAJOR_MINOR["Neon"]="0.10" ODL_NAME_TO_MAJOR_MINOR["Sodium"]="0.11" _odl_release=$1 if [[ "$_odl_release" =~ -snapshot ]]; then # -snapshot-.. -> ..-SNAPSHOT _odl_version=${_odl_release/[[:alpha:]]*-snapshot-/} if [[ "$_odl_release" == "latest-snapshot" ]]; then # get latest revision of snapshot _odl_version=$(odl_snapshot_full_version $ODL_DIR $ODL_URL_PREFIX "latest" $OFFLINE) # update ODL_RELEASE to prevent odl_snapshot_full_version from being called # every time networking-odl/devstack/plugin.sh is called by devstack # latest-snapshot -> latest-snapshot-.. ODL_RELEASE=${ODL_RELEASE}-${_odl_version} elif [[ "${_odl_version}" =~ ^[[:digit:]]+\.[[:digit:]]+$ ]]; then # get latest revision of given major.minor # . -> .. _odl_version=$(odl_snapshot_full_version $ODL_DIR $ODL_URL_PREFIX $_odl_version $OFFLINE) # update ODL_RELEASE to prevent odl_snapshot_full_version from being called # every time networking-odl/devstack/plugin.sh is called by devstack # -snapshot-. -> -snapshot-.. _odl_revision=${_odl_version/[[:digit:]]\.[[:digit:]]\./} ODL_RELEASE=${ODL_RELEASE}.${_odl_revision} fi _odl_bundleversion_default=${_odl_version}-SNAPSHOT export ODL_BUNDLEVERSION=${ODL_BUNDLEVERSION:-${_odl_bundleversion_default}} export ODL_SNAPSHOT_VERSION=${ODL_SNAPSHOT_VERSION:-latest} else if [[ "$_odl_release" == "latest-release" ]]; then _odl_bundleversion_default=$(odl_release_bundleversion $ODL_DIR $ODL_URL_PREFIX "latest" $OFFLINE) elif [[ "$_odl_release" =~ "-latest" ]]; then # -latest _name=$(echo ${_odl_release} | awk -F- '{print toupper(substr($1, 1, 1))substr($1, 2)}') _odl_bundleversion_default=$(odl_release_bundleversion $ODL_DIR $ODL_URL_PREFIX $_name $OFFLINE) else # -..[-SR] -> ..-[-SR] _name=$(echo ${_odl_release} | awk -F- '{print toupper(substr($1, 1, 1))substr($1, 2)}') _version=$(echo ${_odl_release} | awk -F- '{print $2}') _sr=$(echo ${_odl_release} | awk -F- '{print $3}') _odl_bundleversion_default=${_version}-${_name} if [[ -n $_sr ]]; then _odl_bundleversion_default=${_odl_bundleversion_default}-${_sr} fi fi export ODL_BUNDLEVERSION=${ODL_BUNDLEVERSION:-${_odl_bundleversion_default}} fi # Java major version required to run OpenDaylight: 7, 8, ... # by default, ODL uses jdk 8 as of Boron export ODL_REQUIRED_JAVA_VERSION=${ODL_REQUIRED_JAVA_VERSION:-8} # artifact id _default_artifact_id=$(odl_artifact_id ${ODL_DIR} ${ODL_URL_PREFIX} ${ODL_BUNDLEVERSION} ${OFFLINE}) export ODL_ARTIFACT_ID=${ODL_ARTIFACT_ID:-${_default_artifact_id}} # karaf distribution name of ODL to download export ODL_NAME=${ODL_NAME:-${ODL_ARTIFACT_ID}-${ODL_BUNDLEVERSION}} # The network virtualization older feature name (ovsdb based) export ODL_NETVIRT_KARAF_FEATURE_OVSDB=${ODL_NETVIRT_KARAF_FEATURE_OVSDB:-odl-ovsdb-openstack} # The network virtualization newer feature name (vpnservice based) export ODL_NETVIRT_KARAF_FEATURE_VPNSERVICE=${ODL_NETVIRT_KARAF_FEATURE_VPNSERVICE:-odl-netvirt-openstack} ODL_NETVIRT_KARAF_FEATURE_DEFAULT=odl-neutron-service,odl-restconf-all,odl-aaa-authn,odl-dlux-core,odl-mdsal-apidocs # new netvirt has been introduced into netvirt from Boron release # odl-neutron-logger has been introduced from Boron release case "$ODL_BUNDLEVERSION" in 0.5.?-*) # 0.5.?-* ODL_NETVIRT_KARAF_FEATURE_DEFAULT+=,$ODL_NETVIRT_KARAF_FEATURE_VPNSERVICE ODL_NETVIRT_KARAF_FEATURE_DEFAULT+=,odl-neutron-logger ;; *) # 0.6.?-* or later ODL_NETVIRT_KARAF_FEATURE_DEFAULT+=,$ODL_NETVIRT_KARAF_FEATURE_VPNSERVICE ODL_NETVIRT_KARAF_FEATURE_DEFAULT+=,odl-neutron-hostconfig-ovs ODL_NETVIRT_KARAF_FEATURE_DEFAULT+=,odl-neutron-logger ;; esac # The network virtualization feature used by opendaylight loaded by Karaf export ODL_NETVIRT_KARAF_FEATURE=${ODL_NETVIRT_KARAF_FEATURE:-$ODL_NETVIRT_KARAF_FEATURE_DEFAULT} # The url that this version of ODL netvirt can use to know ODL is fully up export ODL_BOOT_WAIT_URL=${ODL_BOOT_WAIT_URL:-restconf/operational/network-topology:network-topology/topology/netvirt:1} $_XTRACE_ODL_RELEASE_COMMON ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/devstack/odl-releases/fluorine-latest0000644000175000017500000000000000000000000027105 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/devstack/odl-releases/fluorine-snapshot-0.90000644000175000017500000000000000000000000027754 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/devstack/odl-releases/fluorine-snapshot-0.9.00000644000175000017500000000000000000000000030112 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/devstack/odl-releases/latest-release0000644000175000017500000000000000000000000026702 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/devstack/odl-releases/latest-snapshot0000644000175000017500000000000000000000000027121 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/devstack/odl-releases/neon-latest0000644000175000017500000000000000000000000026221 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/devstack/odl-releases/neon-snapshot-0.10.20000644000175000017500000000000000000000000027300 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/devstack/odl-releases/sodium-latest0000644000175000017500000000000000000000000026562 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/devstack/override-defaults0000644000175000017500000000326000000000000025050 0ustar00jamespagejamespage00000000000000#!/bin/bash # # Override few things here as early as we can Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-"logger,opendaylight_v2"} # This triggers the provisioning of L3 resources like routers and # external network, if not overridden. Q_L3_ENABLED=${Q_L3_ENABLED:-True} # We have to disable the neutron L2 agent. OpenDaylight does not use the # L2 agent, it instead uses a combination of OpenFlow and OVSDB commands # to program OVS on each compute and network node host. # TODO(rajivk) Both are to be supported for now therefore disabled explicitly # both of them. Can be removed once moved to lib/neutron if is_service_enabled "neutron-agent"; then disable_service neutron-agent fi if is_service_enabled "q-agt"; then disable_service q-agt fi # If ODL_L3 is enabled, then we don't need the L3 agent and OpenDaylight # is going to act as the ML2's L3 service plugin. # NETVIRT_VPNSERVICE feature enables ODL L3 by default, so ODL_L3 is disregarded. if [[ ",$ODL_NETVIRT_KARAF_FEATURE," =~ ",$ODL_NETVIRT_KARAF_FEATURE_VPNSERVICE," ]] || [ "$ODL_L3" == "True" ]; then # TODO(rajivk) Both are to be supported for now therefore disabled explicitly # both of them. Can be removed once moved to lib/neutron if is_service_enabled "neutron-l3"; then disable_service neutron-l3 fi if is_service_enabled "q-l3"; then disable_service q-l3 fi fi # bug work around # https://bugs.launchpad.net/neutron/+bug/1614766 # ODL ovsdb listens to 6640 and # neutron agent with native uses also 6640 to connect to ovsdb-server # If ODL server and neutron agent run in same box, alternative port is needed. export ODL_OVSDB_ALTPORT=${ODL_OVSDB_ALTPORT:-6641} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/devstack/plugin.sh0000644000175000017500000001006200000000000023331 0ustar00jamespagejamespage00000000000000#!/bin/bash # # devstack/plugin.sh # Functions to control the configuration and operation of the opendaylight service # Save trace setting _XTRACE_NETWORKING_ODL=$(set +o | grep xtrace) set +o xtrace # OpenDaylight directories NETWORKING_ODL_DIR=${NETWORKING_ODL_DIR:-$DEST/networking-odl} ODL_DIR=$DEST/opendaylight # Make sure $ODL_DIR exists mkdir -p $ODL_DIR # Import utility functions source $TOP_DIR/functions source $NETWORKING_ODL_DIR/devstack/functions source $TOP_DIR/lib/neutron # Import bridge data source $TOP_DIR/lib/neutron_plugins/ovs_base # Import ODL settings source $NETWORKING_ODL_DIR/devstack/settings.odl if [ -r $NETWORKING_ODL_DIR/devstack/odl-releases/$ODL_RELEASE ]; then source $NETWORKING_ODL_DIR/devstack/odl-releases/$ODL_RELEASE fi source $NETWORKING_ODL_DIR/devstack/odl-releases/common $ODL_RELEASE # Utilities functions for setting up Java source $NETWORKING_ODL_DIR/devstack/setup_java.sh # Import Entry Points # ------------------- source $NETWORKING_ODL_DIR/devstack/entry_points # Restore xtrace $_XTRACE_NETWORKING_ODL if [[ "$ODL_USING_EXISTING_JAVA" == "True" ]]; then echo 'Using installed java.' java -version || exit 1 fi # main loop if is_service_enabled odl-server; then if [[ "$1" == "stack" && "$2" == "install" ]]; then install_networking_odl setup_opendaylight_package install_opendaylight configure_opendaylight init_opendaylight elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then configure_neutron_odl # This has to start before Neutron start_opendaylight elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then # no-op : fi if [[ "$1" == "unstack" && "$UNSTACK_KEEP_ODL" != "True" ]]; then stop_opendaylight cleanup_opendaylight fi if [[ "$1" == "clean" ]]; then stop_opendaylight cleanup_opendaylight fi fi if is_service_enabled odl-compute; then if [[ "$1" == "stack" && "$2" == "install" ]]; then install_networking_odl install_opendaylight_compute elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then if is_service_enabled nova; then configure_neutron_nova fi bind_opendaylight_controller if [[ -z "$ODL_DONT_WAIT_OVS_BR" ]]; then wait_for_active_bridge $OVS_BR $ODL_RETRY_SLEEP_INTERVAL $ODL_BOOT_WAIT fi # L3 needs to be configured only for netvirt-ovsdb - in netvirt-vpnservice L3 is configured # by provider_mappings, and the provider mappings are added to br-int by default if [[ "${ODL_L3}" == "True" ]]; then configure_opendaylight_l3 fi elif [[ "$1" == "stack" && "$2" == "extra" ]]; then # no-op : elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then # no-op : fi if [[ "$1" == "unstack" && "$UNSTACK_KEEP_ODL" != "True" ]]; then cleanup_opendaylight_compute fi if [[ "$1" == "clean" ]]; then # no-op : fi fi if is_service_enabled odl-neutron; then if [[ "$1" == "stack" && "$2" == "install" ]]; then install_networking_odl elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then configure_neutron_odl elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then # no-op : fi if [[ "$1" == "unstack" ]]; then # no-op : fi if [[ "$1" == "clean" ]]; then # no-op : fi fi if is_service_enabled odl-lightweight-testing; then if [[ "$1" == "stack" && "$2" == "install" ]]; then install_networking_odl elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then configure_neutron_odl configure_neutron_odl_lightweight_testing elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then # no-op : fi if [[ "$1" == "unstack" ]]; then # no-op : fi if [[ "$1" == "clean" ]]; then # no-op : fi fi # Tell emacs to use shell-script-mode ## Local variables: ## mode: shell-script ## End: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/devstack/post_test_hook.sh0000644000175000017500000000401500000000000025100 0ustar00jamespagejamespage00000000000000#!/usr/bin/env bash set -xe GATE_DEST=$BASE/new DEVSTACK_PATH=$GATE_DEST/devstack source $DEVSTACK_PATH/functions source $DEVSTACK_PATH/openrc admin admin TEMPEST_CODE_DIR="$BASE/new/tempest" TEMPEST_DATA_DIR="$DATA_DIR/tempest" NETWORKING_ODL_DIR="${NETWORKING_ODL_DIR:-$BASE/new/networking-odl}" owner=stack sudo_env="TEMPEST_CONFIG_DIR=$TEMPEST_CODE_DIR/etc" cd $TEMPEST_CODE_DIR sudo chown -R $owner:stack $TEMPEST_CODE_DIR sudo mkdir -p "$TEMPEST_DATA_DIR" sudo chown -R $owner:stack $TEMPEST_DATA_DIR function _odl_show_info { sudo ip address sudo ip link sudo ip route sudo ovsdb-client dump sudo ovs-vsctl show for br in $(sudo ovs-vsctl list-br); do echo "--- flows on $br ---" sudo ovs-ofctl --protocols OpenFlow13 dump-ports $br sudo ovs-ofctl --protocols OpenFlow13 dump-ports-desc $br sudo ovs-ofctl --protocols OpenFlow13 dump-flows $br sudo ovs-ofctl --protocols OpenFlow13 dump-groups $br sudo ovs-ofctl --protocols OpenFlow13 dump-group-stats $br done openstack network list openstack port list openstack subnet list openstack router list # ODL_UESRNAME=admin # ODL_PASSWORD=admin # ODL_MGR_HOST=$SERVICE_HOST # ODL_PORT=8087 # There is no good way to retrieve from setting.odl at the moment local PATHES="config/neutron:neutron config/opendaylight-inventory:nodes config/elan:elan-instances config/elan:elan-interfaces" for path in $PATHES; do echo "path=${path}" curl --silent --user admin:admin "http://localhost:8087/restconf/${path}?prettyPrint=true" echo done echo echo } echo "Some pre-process info" _odl_show_info $BASE/new/opendaylight/*karaf-*/bin/client "feature:list -i" echo "Running networking-odl test suite" set +e sudo -H -u $owner $sudo_env tox -eall -- "$DEVSTACK_GATE_TEMPEST_REGEX" --serial retval=$? set -e echo "Some post-process info" _odl_show_info # stop ODL server for complete log $BASE/new/opendaylight/*karaf-*/bin/stop sleep 3 return $retval ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/devstack/pre_test_hook.sh0000644000175000017500000000625100000000000024705 0ustar00jamespagejamespage00000000000000#!/usr/bin/env bash set -xe # Drop a token that marks the build as coming from openstack infra GATE_DEST=$BASE/new DEVSTACK_PATH=$GATE_DEST/devstack # for localrc_set source $DEVSTACK_PATH/inc/ini-config source $GATE_DEST/networking-odl/devstack/functions case "$ODL_RELEASE_BASE" in latest-snapshot) ODL_RELEASE=latest-snapshot ;; fluorine-snapshot) ODL_RELEASE=fluorine-snapshot-0.9 ;; oxygen-snapshot) ODL_RELEASE=oxygen-latest ;; *) echo "Unknown ODL release base: $ODL_RELEASE_BASE" exit 1 ;; esac ODL_PORT_BINDING_CONTROLLER=pseudo-agentdb-binding ODL_GATE_SERVICE_PROVIDER=${ODL_GATE_SERVICE_PROVIDER%-} if [[ -z "$ODL_GATE_SERVICE_PROVIDER" ]]; then ODL_GATE_SERVICE_PROVIDER=vpnservice fi case "$ODL_GATE_SERVICE_PROVIDER" in vpnservice) ODL_NETVIRT_KARAF_FEATURE=odl-neutron-service,odl-restconf-all,odl-aaa-authn,odl-dlux-core,odl-mdsal-apidocs,odl-netvirt-openstack # $PUBLIC_PHYSICAL_NETWORK = public by default ODL_MAPPING_KEY=public ;; netvirt|*) ODL_NETVIRT_KARAF_FEATURE=odl-neutron-service,odl-restconf-all,odl-aaa-authn,odl-dlux-core,odl-mdsal-apidocs,odl-ovsdb-openstack # $PUBLIC_BRIDGE = br-ex by default ODL_MAPPING_KEY=br-ex ;; esac ODL_NETVIRT_KARAF_FEATURE=$ODL_NETVIRT_KARAF_FEATURE,odl-neutron-logger local localrc_file=$DEVSTACK_PATH/local.conf localrc_set $localrc_file "IS_GATE" "True" # Set here the ODL release to use for the Gate job localrc_set $localrc_file "ODL_RELEASE" "${ODL_RELEASE}" # Set timeout in seconds for http client to ODL neutron northbound localrc_set $localrc_file "ODL_TIMEOUT" "60" # Set here which port binding controller localrc_set $localrc_file "ODL_PORT_BINDING_CONTROLLER" "${ODL_PORT_BINDING_CONTROLLER}" # Set here which ODL openstack service provider to use localrc_set $localrc_file "ODL_NETVIRT_KARAF_FEATURE" "${ODL_NETVIRT_KARAF_FEATURE}" # Switch to using the ODL's L3 implementation localrc_set $localrc_file "ODL_L3" "True" # Since localrc_set adds it in reverse order, ODL_PROVIDER_MAPPINGS needs to be # before depending variables if [[ "$ODL_GATE_SERVICE_PROVIDER" == "vpnservice" ]]; then localrc_set $localrc_file "ODL_PROVIDER_MAPPINGS" "public:br-ex" localrc_set $localrc_file "PUBLIC_PHYSICAL_NETWORK" "public" localrc_set $localrc_file "PUBLIC_BRIDGE" "br-ex" localrc_set $localrc_file "Q_USE_PUBLIC_VETH" "False" else localrc_set $localrc_file "ODL_PROVIDER_MAPPINGS" "\${ODL_PROVIDER_MAPPINGS:-${ODL_MAPPING_KEY}:\${Q_PUBLIC_VETH_INT}}" localrc_set $localrc_file "Q_USE_PUBLIC_VETH" "True" localrc_set $localrc_file "Q_PUBLIC_VETH_EX" "veth-pub-ex" localrc_set $localrc_file "Q_PUBLIC_VETH_INT" "veth-pub-int" fi # Enable debug logs for odl ovsdb localrc_set $localrc_file "ODL_NETVIRT_DEBUG_LOGS" "True" localrc_set $localrc_file "RALLY_SCENARIO" "${RALLY_SCENARIO}" # delete and recreate network to workaroud netvirt bug: # https://bugs.opendaylight.org/show_bug.cgi?id=7456 # https://bugs.opendaylight.org/show_bug.cgi?id=8133 if [[ "$DEVSTACK_GATE_TOPOLOGY" == "multinode" ]]; then purge_and_recreate_initial_networks $DEVSTACK_PATH fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/devstack/settings0000644000175000017500000000776300000000000023300 0ustar00jamespagejamespage00000000000000#!/bin/bash # # Devstack settings # Each service you enable has the following meaning: # odl-neutron - Add this config flag if OpenDaylight controller and OpenStack # Controller are on different nodes. # odl-server - Add this config flag if OpenDaylight controller and OpenStack # Controller are on the same node. # odl-compute - Add this config flag for OpenStack Compute. # # odl-lightweight-testing - Add this config flag for testing neutron ODL ML2 # driver and networking-odl without a real running # OpenDaylight instance # # NOTE: odl-server includes odl-neutron. # # An example of enabling all-in-one ODL is below. #enable_service odl-compute odl-server # This can be overridden in the localrc file ODL_MODE=${ODL_MODE:-allinone} # ODL_MODE is used to configure how devstack works with OpenDaylight. You # can configure this three ways: # # ODL_MODE=allinone # Use this mode if you want to run ODL in this devstack instance. Useful # for a single node deployment or on the control node of a multi-node # devstack environment. # # ODL_MODE=compute # Use this for the compute nodes of a multi-node devstack install. # # ODL_MODE=externalodl # This installs the neutron code for ODL, but does not attempt to # manage ODL in devstack. This is used for development environments # similar to the allinone case except where you are using bleeding edge ODL # which is not yet released, and thus don't want it managed by # devstack. # # ODL_MODE=lightweight-testing # Use this for testing neutron ML2 driver plus networking-odl without # a running OpenDaylight instance. # # ODL_MODE=manual # You're on your own here, and are enabling services outside the scope of # the ODL_MODE variable. case $ODL_MODE in allinone) enable_service odl-server odl-compute ;; externalodl) enable_service odl-neutron odl-compute ;; compute) enable_service odl-compute ;; lightweight-testing) enable_service odl-lightweight-testing ;; manual) echo "Manual mode: Enabling services explicitly." ;; esac IS_GATE=$(trueorfalse False IS_GATE) if [[ "$IS_GATE" == "True" ]]; then NETWORKING_ODL_DIR=${NETWORKING_ODL_DIR:-$DEST/networking-odl} fi # in tempest.conf # [networking-feature-enabled] api-extensions # api-extensions=all means any kind of extensions is enabled irrelevant of # what plugin supports ML2 plugin with ODL driver supports only the following # extensions, not all Those list must be maintained as ML2 plugin # with ODL driver supports more extensions if [[ -z "$NETWORK_API_EXTENSIONS" ]]; then NETWORK_API_EXTENSIONS=address-scope NETWORK_API_EXTENSIONS+=,agent NETWORK_API_EXTENSIONS+=,allowed-address-pairs NETWORK_API_EXTENSIONS+=,binding NETWORK_API_EXTENSIONS+=,dhcp_agent_scheduler NETWORK_API_EXTENSIONS+=,dvr NETWORK_API_EXTENSIONS+=,ext-gw-mode NETWORK_API_EXTENSIONS+=,external-net NETWORK_API_EXTENSIONS+=,extra_dhcp_opt NETWORK_API_EXTENSIONS+=,extraroute NETWORK_API_EXTENSIONS+=,flavors NETWORK_API_EXTENSIONS+=,multi-provider NETWORK_API_EXTENSIONS+=,net-mtu NETWORK_API_EXTENSIONS+=,network-ip-availability NETWORK_API_EXTENSIONS+=,pagination NETWORK_API_EXTENSIONS+=,port-security NETWORK_API_EXTENSIONS+=,project-id NETWORK_API_EXTENSIONS+=,provider NETWORK_API_EXTENSIONS+=,qos NETWORK_API_EXTENSIONS+=,quotas NETWORK_API_EXTENSIONS+=,rbac-policies NETWORK_API_EXTENSIONS+=,router NETWORK_API_EXTENSIONS+=,router-interface-fip NETWORK_API_EXTENSIONS+=,security-group NETWORK_API_EXTENSIONS+=,service-type NETWORK_API_EXTENSIONS+=,sorting NETWORK_API_EXTENSIONS+=,standard-attr-description NETWORK_API_EXTENSIONS+=,standard-attr-revisions NETWORK_API_EXTENSIONS+=,standard-attr-timestamp NETWORK_API_EXTENSIONS+=,subnet_allocation NETWORK_API_EXTENSIONS+=,tag NETWORK_API_EXTENSIONS+=,timestamp_core NETWORK_API_EXTENSIONS+=,vlan-transparent fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/devstack/settings.odl0000644000175000017500000001164400000000000024046 0ustar00jamespagejamespage00000000000000#!/bin/bash # # Add here any global default values that apply for any ODL release # ----------------------------------------------------------------- # What release to use. Choices are: # https://wiki.opendaylight.org/view/Release_Plan # # latest-snapshot (master latest snapshot) # latest-release # sodium-latest (master) # neon-latest # fluorine-latest # fluorine-snapshot-0.9.3 # fluorine-snapshot-0.9.4 ODL_RELEASE=${ODL_RELEASE:-latest-snapshot} # The IP address of ODL. Set this in local.conf. #Set ODL_MGR_HOST to ODL_MGR_IP if ODL_MGR_HOST is not set ODL_MGR_HOST=${ODL_MGR_HOST:-$ODL_MGR_IP} # Set ODL_MGR_HOST to SERVICE_HOST if neither ODL_MGR_HOST nor ODL_MGR_IP is set ODL_MGR_HOST=${ODL_MGR_HOST:-$SERVICE_HOST} # The list of IP addresses used as OVS manager, separated by a comma. # In non-clustering cases, this is normally the same as ODL_MGR_HOST. However, # for HA deployments the southbound portion to ODL is expected to # use the ip addresses of the ODL instances instead of a single vip. That # enables OVS to simultaneously connect to more than one ODL instance. # Example of expected format: ODL_OVS_MANAGERS=1.1.1.1,2.2.2.2,3.3.3.3 ODL_OVS_MANAGERS=${ODL_OVS_MANAGERS:-$ODL_MGR_HOST} # The default ODL port for Jetty to use # NOTE: We make this configurable because by default, ODL uses port 8080 for # Jetty, and this conflicts with swift which also uses port 8080. ODL_PORT=${ODL_PORT:-8087} # The ODL endpoint URL ODL_ENDPOINT=${ODL_ENDPOINT:-http://${ODL_MGR_HOST}:${ODL_PORT}/controller/nb/v2/neutron} # The ODL username ODL_USERNAME=${ODL_USERNAME:-admin} # The ODL password ODL_PASSWORD=${ODL_PASSWORD:-admin} # The http timeout in seconds for http client to ODL neutron northbound. # unset or empty string means default. ODL_TIMEOUT=${ODL_TIMEOUT:-""} # use opendaylight dhcp server ODL_DHCP_SERVICE=${ODL_DHCP_SERVICE:-False} # NOTE, this parameter is used for development purpose where opendaylight is # already installed and does not have to install again. It will save a lot of # time for developer ODL_INSTALL=${ODL_INSTALL:-True} # The OpenDaylight URL PREFIX ODL_URL_PREFIX=${ODL_URL_PREFIX:-https://nexus.opendaylight.org} # OpenDaylight snapshot & release repositories paths # Can be overridden in case you host proxy repositories which have a different directory structure than OpenDaylight's ODL_URL_SNAPSHOT_REPOSITORY_PATH=${ODL_URL_SNAPSHOT_REPOSITORY_PATH:-content/repositories/opendaylight.snapshot} ODL_URL_RELEASE_REPOSITORY_PATH=${ODL_URL_RELEASE_REPOSITORY_PATH:-content/repositories/opendaylight.release} ODL_URL_DISTRIBUTION_KARAF_PATH=${ODL_URL_DISTRIBUTION_KARAF_PATH:-org/opendaylight/integration/karaf} # How long (in seconds) to pause after ODL starts to let it complete booting ODL_BOOT_WAIT=${ODL_BOOT_WAIT:-600} # Enable conntrack support for legacy netvirt ODL_LEGACY_NETVIRT_CONNTRACK=${ODL_LEGACY_NETVIRT_CONNTRACK:-False} # Enable OpenDaylight l3 forwarding ODL_L3=${ODL_L3:-False} # If you need to route the traffic out of the box, set # ODL_PROVIDER_MAPPINGS to map br-ex as shown below. Note # This used to be accomplished via PUBLIC_BRIDGE, but that # is no longer necessary. # # The physical provider network to device mapping. Use this # to instruct ODL to map ports into specific bridges # Examples: # ODL_PROVIDER_MAPPINGS=${ODL_PROVIDER_MAPPINGS:-br-ex:eth2} # ODL_PROVIDER_MAPPINGS=${ODL_PROVIDER_MAPPINGS:-physnet1:eth1,br-ex:eth2} # MAC address for next hop gateway at external network ODL_L3GW_MAC=${ODL_L3GW_MAC:-''} # Enable debug logs for odl ovsdb ODL_NETVIRT_DEBUG_LOGS=${ODL_NETVIRT_DEBUG_LOGS:-False} # Karaf logfile information ODL_KARAF_LOG_DATE=$(date +%Y-%m-%d-%H%M%S) ODL_KARAF_LOG_BASE=${ODL_KARAF_LOG_BASE:-screen-karaf.log} ODL_KARAF_LOG_NAME=$ODL_KARAF_LOG_BASE.$ODL_KARAF_LOG_DATE # The bridge to configure OVS_BR=${OVS_BR:-br-int} # Use the existing ready java env ODL_USING_EXISTING_JAVA=${ODL_USING_EXISTING_JAVA:-False} # Allow the min/max/perm Java memory to be configurable ODL_JAVA_MIN_MEM=${ODL_JAVA_MIN_MEM:-} ODL_JAVA_MAX_MEM=${ODL_JAVA_MAX_MEM:-} ODL_JAVA_MAX_PERM_MEM=${ODL_JAVA_MAX_PERM_MEM:-} # Interval in test_with_retry calls ODL_RETRY_SLEEP_INTERVAL=${ODL_RETRY_SLEEP_INTERVAL:-5} # Skip installation of distribution provided Open vSwitch SKIP_OVS_INSTALL=$(trueorfalse False SKIP_OVS_INSTALL) # The ODL Restconf URL # URI to hostconfigs: empty for default value ODL_HOSTCONF_URI=${ODL_HOSTCONF_URI:-} # Port binding controller # pseudo-agentdb-binding, legacy-port-binding # pseudo-agentdb-binding is supported by Boron or later ODL_PORT_BINDING_CONTROLLER=${ODL_PORT_BINDING_CONTROLLER:-pseudo-agentdb-binding} # Snapshot version - allows using a specific version e.g. 0.5.0-20160719.101233-3643 # latest: check the latest snapshot # specific version: the specific version of the snapshot # "": odl release ODL_SNAPSHOT_VERSION=${ODL_SNAPSHOT_VERSION:-} # Set to True to keep odl running after unstack UNSTACK_KEEP_ODL=${UNSTACK_KEEP_ODL:-False} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/devstack/setup_java.sh0000644000175000017500000001441500000000000024202 0ustar00jamespagejamespage00000000000000#!/bin/bash ORACLE_JAVA_URL="http://download.oracle.com/otn-pub/java/jdk" ORACLE_JAVA7_URL="${ORACLE_JAVA7_URL:-$ORACLE_JAVA_URL/7u80-b15/jdk-7u80}" ORACLE_JAVA7_NAME="jdk1.7.0_80" ORACLE_JAVA8_URL="${ORACLE_JAVA8_URL:-$ORACLE_JAVA_URL/8u112-b15/jdk-8u112}" ORACLE_JAVA8_NAME="jdk1.8.0_112" function setup_java { # Java version 8 is the last stable one local VERSION="${1:-8}" echo "Setup Java version: $VERSION" if test_java_version "$VERSION" && setup_java_env; then echo "Current Java version is already $VERSION." elif select_java "$VERSION"; then echo "Java version $VERSION has been selected." elif install_openjdk "$VERSION" && select_java "$VERSION"; then echo "OpenJDK version $VERSION has been installed and selected." elif install_other_java "$VERSION" && select_java "$VERSION"; then echo "Some Java version $VERSION has been installed and selected." else echo "ERROR: Unable to setup Java version $VERSION." return 1 fi return 0 } function setup_java_env { local JAVA_COMMAND="${1:-${JAVA:-java}}" JAVA_LINK="$(which $JAVA_COMMAND)" if [[ "$JAVA_LINK" == "" ]]; then return 1 fi export JAVA="$(readlink -f $JAVA_LINK)" export JAVA_HOME=$(echo $JAVA | sed "s:/bin/java::" | sed "s:/jre::") if [ "$JAVA" != "$(readlink -f $(which java))" ]; then export PATH="$(dirname $JAVA):$PATH" if [ "$JAVA" != "$(readlink -f $(which java))" ]; then echo "Unable to set $JAVA as current." return 1 fi fi echo "JAVA is: $JAVA" echo "JAVA_HOME is: $JAVA_HOME" echo "Java version is:" $JAVA -version 2>&1 } function select_java { local VERSION="$1" local COMMAND for COMMAND in $(list_java_commands); do if test_java_version "$VERSION" "$COMMAND"; then if setup_java_env "$COMMAND"; then return 0 fi fi done echo 'Required java version not found.' return 1 } function test_java_version { local EXPECTED_VERSION="'"*' version "1.'$1'.'*'"'"'" local COMMAND="${2:-${JAVA:-java}}" local ACTUAL_VERSION="'"$($COMMAND -version 2>&1 | head -n 1)"'" if [[ $ACTUAL_VERSION == $EXPECTED_VERSION ]]; then echo "Found matching java version: $ACTUAL_VERSION" return 0 else return 1 fi } if is_ubuntu; then # --- Ubuntu ------------------------------------------------------------- function list_java_commands { update-alternatives --list java } function install_openjdk { local REQUIRED_VERSION="$1" apt_get install "openjdk-$REQUIRED_VERSION-jre-headless" } function install_other_java { local VERSION="$1" local PPA_REPOSITORY="ppa:webupd8team/java" local JAVA_INSTALLER="oracle-java${VERSION}-installer" local JAVA_SET_DEFAULT="oracle-java${VERSION}-set-default" # Accept installer license echo "$JAVA_INSTALLER" shared/accepted-oracle-license-v1-1 select true | sudo /usr/bin/debconf-set-selections # Remove all existing set-default versions apt_get remove oracle-java*-set-default if apt_get install $JAVA_INSTALLER ; then if apt_get install $JAVA_SET_DEFAULT ; then return 0 # Some PPA was already providing desired packages fi fi # Add PPA only when package is not available if apt_get install software-properties-common; then # I pipe this after echo to emulate an user key-press if echo | sudo -E add-apt-repository "$PPA_REPOSITORY"; then if apt_get update; then if apt_get install $JAVA_INSTALLER ; then if apt_get install $JAVA_SET_DEFAULT ; then return 0 fi fi fi fi fi # Something has gone wrong! return 1 } else # --- Red Hat ------------------------------------------------------------- function list_java_commands { alternatives --display java 2>&1 | grep -v '^[[:space:]]' | awk '/[[:space:]]- priority[[:space:]]/{print $1}' } function install_openjdk { local VERSION="$1" yum_install java-1.$VERSION.*-openjdk-headless } function install_other_java { local VERSION="$1" if [[ "$(uname -m)" == "x86_64" ]]; then local ARCH=linux-x64 else local ARCH=linux-i586 fi if [[ "$VERSION" == "7" ]]; then ORIGIN=$ORACLE_JAVA7_URL TARGET=$ORACLE_JAVA7_NAME elif [[ "$VERSION" == "8" ]]; then ORIGIN=$ORACLE_JAVA8_URL TARGET=$ORACLE_JAVA8_NAME else echo "Unsupported Java version: $VERSION." return 1 fi local NEW_JAVA="/usr/java/$TARGET/jre/bin/java" if test_java_version "$VERSION" "$NEW_JAVA"; then if sudo alternatives --install /usr/bin/java java "$NEW_JAVA" 200000; then return 0 fi fi local EXT local WGET_OPTIONS="-c --no-check-certificate --no-cookies" local HEADER="Cookie: oraclelicense=accept-securebackup-cookie" for EXT in "rpm" "tar.gz"; do local URL="$ORIGIN-$ARCH.$EXT" local PACKAGE="/tmp/$(basename $URL)" if wget $WGET_OPTIONS --header "$HEADER" "$URL" -O "$PACKAGE"; then case "$EXT" in "rpm") sudo rpm -i "$PACKAGE" ;; "tar.gz") sudo mkdir -p /usr/java && sudo tar -C /usr/java -xzf "$PACKAGE" ;; *) echo "Unsupported extension: $EXT" ;; esac if test_java_version "$VERSION" "$NEW_JAVA"; then if sudo alternatives --install /usr/bin/java java "$NEW_JAVA" 200000; then return 0 fi fi echo "Unable to register installed java." else echo "Unable to download java archive: $URL" fi done return 1 } fi ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585130284.7867138 networking-odl-16.0.0.0b2.dev1/devstack/upgrade/0000755000175000017500000000000000000000000023127 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/devstack/upgrade/resources.sh0000755000175000017500000000000000000000000025466 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/devstack/upgrade/settings0000644000175000017500000000356100000000000024717 0ustar00jamespagejamespage00000000000000#!/bin/bash register_project_for_upgrade networking-odl NET_ODL_DIR=/opt/stack/new/networking-odl # NOTE(manjeets) Workaround for bug 1648176 to upgrade # networking-odl before neutron UPGRADE_PROJECTS="networking-odl ${UPGRADE_PROJECTS/ networking-odl/}" # Add karaf features to be enabled for ODL ODL_NETVIRT_KARAF_FEATURE=odl-neutron-service,odl-restconf-all,odl-dlux-core,odl-aaa-authn,odl-mdsal-apidocs ODL_NETVIRT_KARAF_FEATURE+=,odl-netvirt-openstack,odl-neutron-logger,odl-neutron-hostconfig-ovs # NOTE(manjeets) br-ex is not up when neutron initial networks are created so this workaround # is done to bring up the br-ex, remove the existing networks and recreate public network. # without this l3 connectivity for vm doesn't work. # TODO This can be removed later on once odl start bringing up br-ex properly function pre_test_grenade_hook { RELEASE_TARGET=$1 if [ -z "$RELEASE_TARGET" ]; then echo "Release_target is required" exit 1 else if [[ "$RELEASE_TARGET" == "base" ]] ; then DEVSTACK_DIR=/opt/stack/old/devstack else DEVSTACK_DIR=/opt/stack/new/devstack fi source $NET_ODL_DIR/devstack/functions purge_and_recreate_initial_networks $DEVSTACK_DIR fi } for w in base target; do devstack_localrc $w ODL_NETVIRT_KARAF_FEATURE=${ODL_NETVIRT_KARAF_FEATURE} devstack_localrc $w Q_PLUGIN=ml2 devstack_localrc $w ODL_L3=True devstack_localrc $w Q_USE_PUBLIC_VETH=False devstack_localrc $w PUBLIC_BRIDGE=br-ex devstack_localrc $w ODL_PROVIDER_MAPPINGS=public:br-ex devstack_localrc $w ODL_PORT_BINDING_CONTROLLER=pseudo-agentdb-binding devstack_localrc $w enable_plugin networking-odl http://github.com/openstack/networking-odl devstack_localrc $w ODL_RELEASE=nitrogen-snapshot-0.7 devstack_localrc $w ODL_TIMEOUT=60 pre_test_grenade_hook $w done ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/devstack/upgrade/upgrade.sh0000755000175000017500000000126500000000000025121 0ustar00jamespagejamespage00000000000000echo "*********************************************************************" echo "Begin $0" echo "*********************************************************************" set -o xtrace # Set for DevStack compatibility source $GRENADE_DIR/grenaderc source $TARGET_DEVSTACK_DIR/stackrc # Get functions from current DevStack source $TARGET_DEVSTACK_DIR/inc/python NETWORKING_ODL_DIR="$TARGET_RELEASE_DIR/networking-odl" source $NETWORKING_ODL_DIR/devstack/entry_points install_networking_odl set +x set +o xtrace echo "*********************************************************************" echo "SUCCESS: End $0" echo "*********************************************************************" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585130284.7867138 networking-odl-16.0.0.0b2.dev1/doc/0000755000175000017500000000000000000000000020441 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/doc/requirements.txt0000644000175000017500000000055600000000000023733 0ustar00jamespagejamespage00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. sphinx!=1.6.6,!=1.6.7,!=2.1.0,>=1.6.2;python_version>='3.4' # BSD openstackdocstheme>=1.20.0 # Apache-2.0 reno>=2.7.0 # Apache-2.0 doc8>=0.8.0 # Apache-2.0 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585130284.7867138 networking-odl-16.0.0.0b2.dev1/doc/source/0000755000175000017500000000000000000000000021741 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.790714 networking-odl-16.0.0.0b2.dev1/doc/source/admin/0000755000175000017500000000000000000000000023031 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/doc/source/admin/index.rst0000644000175000017500000000015500000000000024673 0ustar00jamespagejamespage00000000000000==================== Administration Guide ==================== .. toctree:: :maxdepth: 2 :glob: * ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/doc/source/admin/reference_architecture.rst0000644000175000017500000000672600000000000030276 0ustar00jamespagejamespage00000000000000Reference Architecture ====================== This document lists the minimum reference architecture to get OpenStack installed with OpenDayLight. Wherever possible, additional resources will be stated. Cloud Composition ----------------- The basic cloud will have 3 types of nodes: * Controller Node - Runs OpenStack services and the ODL controller. * Network Node - Runs the DHCP agent, the metadata agent, and the L3 agent (for SNAT). * Compute Node - VMs live here. Usually each of the first 2 types of nodes will have a cluster of 3 nodes to support HA. It's also possible to run the ODL controller on separate hardware than the OpenStack services, but this isn't mandatory. The last type of nodes can have as many nodes as scale requirements dictate. Networking Requirements ----------------------- There are several types of networks on the cloud, the most important for the reference architecture are: * Management Network - This is the network used to communicate between the different management components, i.e. Nova controller to Nova agent, Neutron to ODL, ODL to OVS, etc. * External Network - This network provides VMs with external connectivity (i.e. internet) usually via virtual routers. * Data Network - This is the network used to connect the VMs to each other and to network resources such as virtual routers. The Control Nodes usually are only connected to the Management Network, unless they have an externally reachable IP on the External Network. The other node types are connected to all the networks since ODL uses a distributed routing model so that each Compute Node hosts a "virtual router" responsible for connecting the VMs from that node to other networks (including the External Network). This diagram illustrates how these nodes might be connected:: Controller Node +-----------------+ | | +-----------+192.168.0.251 | | | | | +-----------------+ | | Compute Node +----------------+ | +---------------+ | Legend | | | | +----------------+ +-----------+192.168.0.1 | | | | | | | --- Management | | +~~~~~~~~~+10.0.0.1 | | | | | | | | ~~~ Data | | | +=======+br-int | | | | | | | | | === External | | | | +---------------+ | | | | | +----------------+ | | | Network Node | | | +-----------------+ | | | | | +-----------+192.168.0.100 | | | | | +~~~~~~~~~+10.0.0.100 | | | | |=======+br-int | | | | | +-----------------+ +----+---+ | | | Router | | | +--------+ Minimal Hardware Requirements ----------------------------- The rule of thumb is the bigger the better, more RAM and more cores will translate to a better environment. For a POC environment the following is necessary: Management Node ~~~~~~~~~~~~~~~ CPU: 2 cores Memory: 8 GB Storage: 100 GB Network: 1 * 1 Gbps NIC Network Node ~~~~~~~~~~~~ CPU: 2 cores Memory: 2 GB Storage: 50 GB Network: 1 Gbps NIC (Management Network), 2 * 1+ Gbps NICs Compute Node ~~~~~~~~~~~~ CPU: 2+ cores Memory: 8+ GB Storage: 100 GB Network: 1 Gbps NIC (Management Network), 2 * 1+ Gbps NICs ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/doc/source/conf.py0000644000175000017500000000533600000000000023247 0ustar00jamespagejamespage00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys sys.path.insert(0, os.path.abspath('../..')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', #'sphinx.ext.intersphinx', 'openstackdocstheme', 'oslo_config.sphinxext', ] # openstackdocstheme options repository_name = 'openstack/networking-odl' bug_project = 'networking-odl' bug_tag = 'doc' # autodoc generation is a bit aggressive and a nuisance when doing heavy # text edit cycles. # execute "export SPHINX_DEBUG=1" in your terminal to disable # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = u'networking-odl' copyright = u'2013, OpenStack Foundation' # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] html_theme = 'openstackdocs' # html_static_path = ['static'] # Output file base name for HTML help builder. htmlhelp_basename = '%sdoc' % project html_last_updated_fmt = '%Y-%m-%d %H:%M' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'doc-%s.tex' % project, u'%s Documentation' % project, u'OpenStack Foundation', 'manual'), ] # Example configuration for intersphinx: refer to the Python standard library. #intersphinx_mapping = {'http://docs.python.org/': None} latex_elements = { 'makeindex': '', 'printindex': '', 'preamble': r'\setcounter{tocdepth}{3}', } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.790714 networking-odl-16.0.0.0b2.dev1/doc/source/configuration/0000755000175000017500000000000000000000000024610 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/doc/source/configuration/index.rst0000644000175000017500000000100300000000000026443 0ustar00jamespagejamespage00000000000000.. _configuring: ======================= Configuration Reference ======================= This section provides configuration options for networking-odl, that needs to be set in addition to neutron configuration, for all other configuration examples like neutron.conf and ml2_conf.ini, neutron repo can be referred. .. show-options:: ml2_odl Configuration Samples --------------------- This section provides sample configuration file ml2_conf_odl.ini .. toctree:: :maxdepth: 1 samples/ml2_odl.rst ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.790714 networking-odl-16.0.0.0b2.dev1/doc/source/configuration/samples/0000755000175000017500000000000000000000000026254 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/doc/source/configuration/samples/ml2_odl.rst0000644000175000017500000000027000000000000030335 0ustar00jamespagejamespage00000000000000======================= Sample ml2_conf_odl.ini ======================= This is sample for ml2_conf_odl.ini. .. literalinclude:: ../../../../etc/neutron/plugins/ml2/ml2_conf_odl.ini ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.790714 networking-odl-16.0.0.0b2.dev1/doc/source/contributor/0000755000175000017500000000000000000000000024313 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/doc/source/contributor/contributing.rst0000644000175000017500000000011600000000000027552 0ustar00jamespagejamespage00000000000000============ Contributing ============ .. include:: ../../../CONTRIBUTING.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/doc/source/contributor/drivers_architecture.rst0000644000175000017500000001646500000000000031301 0ustar00jamespagejamespage00000000000000ODL Drivers Architecture ======================== This document covers architectural concepts of the ODL drivers. Although 'driver' is an ML2 term, it's used widely in ODL to refer to any implementation of APIs. Any mention of ML2 in this document is solely for reference purposes. V1 Driver Overview (Removed in Rocky) ------------------------------------- Note: This architecture has been deprecated in Queens and removed in Rocky. The documentation is kept as a reference to understand the necessity of a different architecture. The first driver version was a naive implementation which synchronously mirrored all calls to the ODL controller. For example, a create network request would first get written to the DB by Neutron's ML2 plugin, and then the ODL driver would send the request to POST the network to the ODL controller. Although this implementation is simple, it has a few problems: * ODL is not really synchronous, so if the REST call succeeds it doesn't mean the action really happened on ODL. * The "synchronous" call can be a bottleneck under load. * Upon failure the V1 driver would try to "full sync" the entire Neutron DB over on the next call, so the next call could take a very long time. * It doesn't really handle race conditions: - For example, create subnet and then create port could be sent in parallel by the driver in an HA Neutron environment, causing the port creation to fail. - Full-sync could possibly recreate deleted resources if the deletion happens in parallel. .. _v2_design: V2 Driver Design ---------------- The V2 driver set upon to tackle problems encountered in the V1 driver while maintaining feature parity. The major design concept of the V2 driver is *journaling* - instead of passing the calls directly to the ODL controller, they get registered in the journal table which keeps a sort of queue of the various operations that occurred on Neutron and should be mirrored to the controller. The journal is processed mainly by a journaling thread which runs periodically and checks if the journal table has any entries in need of processing. Additionally the thread is triggered in the postcommit hook of the operation (where applicable). If we take the example of create network again, after it gets stored in the Neutron DB by the ML2 plugin, the ODL driver stores a "journal entry" representing that operation and triggers the journaling thread to take care of the entry. The journal entry is recorded in the pre-commit phase (whenever applicable) so that in case of a commit failure the journal entry gets aborted along with the original operation, and there's nothing extra needed. The *get_resources_for_full_sync* method is defined in the ResourceBaseDriver class, it fetches all the resources needed for full sync, based on resource type. To override the default behaviour of *get_resources_for_full_sync* define it in driver class, For example L2 gateway driver needs to provide customized method for filtering of fetched gateway connection information from database. Neutron defines *l2_gateway_id* for a l2 gateway connection but ODL expects *gateway_id*, these kind of pre or post processing can be done in this method. Journal Entry Lifecycle ----------------------- The first state in which a journal entry is created is the 'pending' state. In this state, the entry is awaiting a thread to pick it up and process it. Multiple threads can try to grab the same journal entry, but only one will succeed since the "selection" is done inside a 'select for update' clause. Special care is taken for GaleraDB since it reports a deadlock if more than one thread selects the same row simultaneously. Once an entry has been selected it will be put into the 'processing' state which acts as a lock. This is done in the same transaction so that in case multiple threads try to "lock" the same entry only one of them will succeed. When the winning thread succeeds it will continue with processing the entry. The first thing the thread does is check for dependencies - if the entry depends on another one to complete. If a dependency is found, the entry is put back into the queue and the thread moves on to the next entry. When there are no dependencies for the entry, the thread analyzes the operation that occurred and performs the appropriate call to the ODL controller. The call is made to the correct resource or collection and the type of call (PUT, POST, DELETE) is determined by the operation type. At this point if the call was successful (i.e. got a 200 class HTTP code) the entry is marked 'completed'. In case of a failure the thread determines if this is an expected failure (e.g. network connectivity issue) or an unexpected failure. For unexpected failures a counter is raised, so that a given entry won't be retried more than a given amount of times. Expected failures don't change the counter. If the counter exceeds the configured amount of retries, the entry is marked as 'failed'. Otherwise, the entry is marked back as 'pending' so that it can later be retried. Full Sync & Recovery -------------------- .. code:: python file: networking_odl/journal/base_driver.py ALL_RESOURCES = {} class ResourceBaseDriver(object): # RESOURCES is dictionary of resource_type and resource_suffix to # be defined by the drivers class. RESOURCES = {} def __init__(self, plugin_type, *args, **kwargs): super(ResourceBaseDriver, self).__init__(*args, **kwargs) self.plugin_type = plugin_type # All the common methods to be used by full sync and recovery # specific to driver. # Only driver is enough for all the information. Driver has # plugin_type for fetching the information from db and resource # suffix is available through driver.RESOURCES. for resource, resource_suffix in self.RESOURCES.items(): ALL_RESOURCES[resource] = self def get_resource_for_recovery(self, resource_type, resource_id): # default definition to be used, if get_resource method is not # defined then this method gets called by recovery def get_resources_for_full_sync(self, resource_type): # default definition to be used, if get_resources method is not # defined then this method gets called by full sync @staticmethod def get_method_name_by_resource_suffix(method_suffix): # Returns method name given resource suffix @staticmethod def get_method(plugin, method_name): # Returns method for a specific plugin file: networking_odl//.py class XXXXDriver(ResourceBaseDriver, XXXXDriverBase): RESOURCES = { odl_const.XXXX: odl_const.XXXY, odl_const.XXXY: odl_const.XXYY } def __init__(self, *args, **kwargs): super(XXXXDriver, self)(plugin_type, *args, **kwargs) # driver specific things # get_resources_for_full_sync and get_resource_for_recovery methods are # optional and they have to be defined, if customized behaviour is # required. If these methods are not defined in the driver then default # methods defined in ResourceBaseDriver is used. def get_resources_for_full_sync(self, resource_type): # returns resource for full sync def get_resource_for_recovery(self, resource_type, resource_id): # returns resource for recovery ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/doc/source/contributor/hostconfig.rst0000644000175000017500000001325000000000000027211 0ustar00jamespagejamespage00000000000000Host Configuration ================== Overview -------- ODL is agentless configuration. In this scenario Host Configuration is used to specify the physical host type and other configurations for the host system. This information is populated by the Cloud Operator is in OVSDB in Open_vSwitch configuration data in the external_ids field as a key value pair. This information is then read by ODL and made available to networking-odl through REST API. Networking-odl populates this information in agent_db in Neutron and is then used by Neutron scheduler. This information is required for features like Port binding and Router scheduling. Refer to this link for detailed design for this feature. https://docs.google.com/presentation/d/1kq0elysCDEmIWs3omTi5RoXTSBbrewn11Je2d26cI4M/edit?pref=2&pli=1#slide=id.g108988d1e3_0_6 Related ODL changes: https://git.opendaylight.org/gerrit/#/c/36767/ https://git.opendaylight.org/gerrit/#/c/40143/ Host Configuration fields ------------------------- - **host-id** This represents host identification string. This string will be stored in external_ids field with the key as odl_os_hostconfig_hostid. Refer to Neutron config definition for host field for details on this field. https://docs.openstack.org/kilo/config-reference/content/section_neutron.conf.html - **host-type** The field is for type of the node. This value corresponds to agent_type in agent_db. Example value are "ODL L2" and "ODL L3" for Compute and Network node respectively. Same host can be configured to have multiple configurations and can therefore can have both L2, L3 and other configurations at the same time. This string will be populated by ODL based on the configurations available on the host. See example in section below. - **config** This is the configuration data for the host type. Since same node can be configured to store multiple configurations different external_ids key value pair are used to store these configuration. The external_ids with keys as odl_os_hostconfig_config_odl_XXXXXXXX store different configurations. 8 characters after the suffix odl_os_hostconfig_config_odl are host type. ODL extracts these characters and store that as the host-type fields. For example odl_os_hostconfig_config_odl_l2, odl_os_hostconfig_config_odl_l3 keys are used to provide L2 and L3 configurations respectively. ODL will extract "ODL L2" and "ODL L3" as host-type field from these keys and populate host-type field. Config is a Json string. Some examples of config: OVS configuration example:: {"supported_vnic_types": [{ "vnic_type": "normal", "vif_type": "ovs", "vif_details": "{}" }] "allowed_network_types": ["local", "flat", "gre", "vlan", "vxlan"]", "bridge_mappings": {"physnet1":"br-ex"} }" OVS SR-IOV Hardware Offload configuration example:: {"supported_vnic_types": [{ "vnic_type": "normal", "vif_type": "ovs", "vif_details": "{}"}, {"vnic_type": "direct", "vif_type": "ovs", "vif_details": "{}"} }] "allowed_network_types": ["local", "flat", "gre", "vlan", "vxlan"]", "bridge_mappings": {"physnet1":"br-ex"} }" OVS_DPDK configuration example:: {"supported_vnic_types": [{ "vnic_type": "normal", "vif_type": "vhostuser", "vif_details": { "uuid": "TEST_UUID", "has_datapath_type_netdev": True, "support_vhost_user": True, "port_prefix": "vhu", # Assumption: /var/run mounted as tmpfs "vhostuser_socket_dir": "/var/run/openvswitch", "vhostuser_ovs_plug": True, "vhostuser_mode": "client", "vhostuser_socket": "/var/run/openvswitch/vhu$PORT_ID"} }] "allowed_network_types": ["local", "flat", "gre", "vlan", "vxlan"]", "bridge_mappings": {"physnet1":"br-ex"} }" VPP configuration example:: { {"supported_vnic_types": [ {"vnic_type": "normal", "vif_type": "vhostuser", "vif_details": { "uuid": "TEST_UUID", "has_datapath_type_netdev": True, "support_vhost_user": True, "port_prefix": "socket_", "vhostuser_socket_dir": "/tmp", "vhostuser_ovs_plug": True, "vhostuser_mode": "server", "vhostuser_socket": "/tmp/socket_$PORT_ID" }}], "allowed_network_types": ["local", "flat", "vlan", "vxlan", "gre"], "bridge_mappings": {"physnet1": "br-ex"}}} **Host Config URL** Url : https://ip:odlport/restconf/operational/neutron:neutron/hostconfigs/ **Commands to setup host config in OVSDB** :: export OVSUUID=$(ovs-vsctl get Open_vSwitch . _uuid) ovs-vsctl set Open_vSwitch $OVSUUID external_ids:odl_os_hostconfig_hostid=test_host ovs-vsctl set Open_vSwitch $OVSUUID external_ids:odl_os_hostconfig_config_odl_l2 = "{"supported_vnic_types": [{"vnic_type": "normal", "vif_type": "ovs", "vif_details": {} }], "allowed_network_types": ["local"], "bridge_mappings": {"physnet1":"br-ex"}}" Example for host configuration ------------------------------- :: { "hostconfigs": { "hostconfig": [ { "host-id": "test_host1", "host-type": "ODL L2", "config": "{"supported_vnic_types": [{ "vnic_type": "normal", "vif_type": "ovs", "vif_details": {} }] "allowed_network_types": ["local", "flat", "gre", "vlan", "vxlan"], "bridge_mappings": {"physnet1":"br-ex"}}" }, { "host-id": "test_host2", "host-type": "ODL L3", "config": {} }] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/doc/source/contributor/index.rst0000644000175000017500000000150700000000000026157 0ustar00jamespagejamespage00000000000000Contributor Guide ================= In the Developer/Contributor Guide, you will find information on networking-odl's lower level design and implementation details. We will cover only essential details related to just networking-odl and we won't repeat neutron devref here, for details in neutron, neutron's devref can be checked: https://docs.openstack.org/neutron/latest/contributor/index.html For details regarding OpenStack Neutron's Api: https://docs.openstack.org/api-ref/network/ Contributor's Reference ----------------------- .. toctree:: :maxdepth: 2 testing drivers_architecture maintenance usage contributing specs/index Tutorial -------- .. toctree:: :maxdepth: 2 quickstart.rst Networking OpenDayLight Internals --------------------------------- .. toctree:: :maxdepth: 2 hostconfig ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/doc/source/contributor/maintenance.rst0000644000175000017500000000303100000000000027324 0ustar00jamespagejamespage00000000000000Journal Maintenance =================== Overview -------- The V2 ODL driver is Journal based [#]_, which means that there's a journal of entries detailing the various operations done on a Neutron resource. The driver has a thread which is in charge of processing the journal of operations which entails communicating the operation forward to the ODL controller. The journal entries can wind up in several states due to various reasons: * PROCESSING - Stale lock left by a thread due to thread dying or other error * COMPLETED - After the operation is processed successfully * FAILED - If there was an unexpected error during the operation These journal entries need to be dealt with appropriately, hence a maintenance thread was introduced that takes care of journal maintenance and other related tasks. This thread runs in a configurable interval and is HA safe using a shared state kept in the DB. Currently the maintenance thread performs: * Stale lock release * Completed entries clean up * Failed entries are handled by the recovery mechanism * Full sync detect when ODL is "tabula rasa" and syncs all the resources to it Creating New Maintenance Operations ----------------------------------- Creating a new maintenance operation is as simple as writing a function that receives the database session object and registering it using a call to:: MaintenanceThread.register_operation The best place to do so would be at the _start_maintenance_thread method of the V2 OpenDaylightMechanismDriver class. .. [#] See :ref:`v2_design` for details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/doc/source/contributor/quickstart.rst0000644000175000017500000002334500000000000027246 0ustar00jamespagejamespage00000000000000.. _quickstart: ===================== Developer Quick-Start ===================== This is a quick walkthrough to get you started developing code for networking-odl. This assumes you are already familiar with submitting code reviews to an OpenStack project. .. see also:: https://docs.openstack.org/infra/manual/developers.html Setup Dev Environment ===================== Install OS-specific prerequisites:: # Ubuntu/Debian 14.04: sudo apt-get update sudo apt-get install python-dev libssl-dev libxml2-dev curl \ libmysqlclient-dev libxslt1-dev libpq-dev git \ libffi-dev gettext build-essential # CentOS/RHEL 7.2: sudo yum install python-devel openssl-devel mysql-devel curl \ libxml2-devel libxslt-devel postgresql-devel git \ libffi-devel gettext gcc # openSUSE/SLE 12: sudo zypper --non-interactive install git libffi-devel curl \ libmysqlclient-devel libopenssl-devel libxml2-devel \ libxslt-devel postgresql-devel python-devel \ gettext-runtime Install pip:: curl -s https://bootstrap.pypa.io/get-pip.py | sudo python Install common prerequisites:: sudo pip install virtualenv flake8 tox testrepository git-review You may need to explicitly upgrade virtualenv if you've installed the one from your OS distribution and it is too old (tox will complain). You can upgrade it individually, if you need to:: sudo pip install -U virtualenv Networking-odl source code should be pulled directly from git:: # from your home or source directory cd ~ git clone https://opendev.org/openstack/networking-odl cd networking-odl For installation of networking-odl refer to :doc:`/install/index`. For testing refer to :doc:`Testing ` guide. Verifying Successful Installation ================================== There are some checks you can run quickly to verify that networking-odl has been installed successfully. #. Neutron agents must be in running state, if you are using pseudo-agent for port binding then output of **openstack network agent list** should be something like:: ubuntu@ubuntu-14:~/devstack$ openstack network agent list +----------------------------+----------------+-----------+-------------------+-------+-------+-----------------------------+ | ID | Agent Type | Host | Availability Zone | Alive | State | Binary | +----------------------------+----------------+-----------+-------------------+-------+-------+-----------------------------+ | 00628905-6550-43a5-9cda- | ODL L2 | ubuntu-14 | None | True | UP | neutron-odlagent- | | 175a309ea538 | | | | | | portbinding | | 37491134-df2a- | DHCP agent | ubuntu-14 | nova | True | UP | neutron-dhcp-agent | | 45ab-8373-e186154aebee | | | | | | | | 8e0e5614-4d68-4a42-aacb- | Metadata agent | ubuntu-14 | None | True | UP | neutron-metadata-agent | | d0a10df470fb | | | | | | | +----------------------------+----------------+-----------+-------------------+-------+-------+-----------------------------+ Your output of this command may vary depending on the your environment, for example hostname etc. #. You can check that opendaylight is running by executing following command:: ubuntu@ubuntu-14:~/devstack$ ps -eaf | grep opendaylight Launching Instance and floating IP ================================== #. Gather paramters required for launching instance. We need flavor Id, image Id and network id, following comand can be used for launching an instance:: openstack server create --flavor --image \ --nic net-id= --security-group \ \ For details on creating instances refer to [#third]_ and [#fourth]_. #. Attaching floating IPs to created server can be done by following command:: openstack server add floating ip For details on attaching floating IPs refer to [#fifth]_. Useful Commands ================ #. For verifying status try following command:: ubuntu@ubuntu-14:/distribution-karaf-0.6.0-SNAPSHOT/bin$ ./karaf status You should receive following output:: Running ... #. You can login using available client:: ubuntu@ubuntu-14:/distribution-karaf-0.6.0-SNAPSHOT/bin$ ./client You will receive output in following format:: Logging in as karaf 3877 [sshd-SshClient[6dbb137d]-nio2-thread-3] WARN org.apache.sshd.client.keyverifier.AcceptAllServerKeyVerifier - Server at [/0.0.0.0:8101, RSA, 56:41:48:1c:38:3b:73:a8:a5:96:8e:69:a5:4c:93:e0] presented unverified {} key: {} ________ ________ .__ .__ .__ __ \_____ \ ______ ____ ____ \______ \ _____ ___.__.| | |__| ____ | |___/ |_ / | \\____ \_/ __ \ / \ | | \\__ \< | || | | |/ ___\| | \ __\ / | \ |_> > ___/| | \| ` \/ __ \\___ || |_| / /_/ > Y \ | \_______ / __/ \___ >___| /_______ (____ / ____||____/__\___ /|___| /__| \/|__| \/ \/ \/ \/\/ /_____/ \/ Hit '' for a list of available commands and '[cmd] --help' for help on a specific command. Hit '' or type 'system:shutdown' or 'logout' to shutdown OpenDaylight. Now you can run commands as per your for example:: opendaylight-user@root>subnet-show No SubnetOpData configured. Following subnetId is present in both subnetMap and subnetOpDataEntry Following subnetId is present in subnetMap but not in subnetOpDataEntry Uuid [_value=2131f292-732d-4ba4-b74e-d70c07eceeb4] Uuid [_value=7a03e5d8-3adb-4b19-b1ec-a26691a08f26] Uuid [_value=7cd269ea-e06a-4aa3-bc11-697d71be4cbd] Uuid [_value=6da591bc-6bba-4c8a-a12b-671265898c4f] Usage 1: To display subnetMaps for a given subnetId subnet-show --subnetmap [] Usage 2: To display subnetOpDataEntry for a given subnetId subnet-show --subnetopdata [] To get help on some command:: opendaylight-user@root>help feature COMMANDS info Shows information about selected feature. install Installs a feature with the specified name and version. list Lists all existing features available from the defined repositories. repo-add Add a features repository. repo-list Displays a list of all defined repositories. repo-refresh Refresh a features repository. repo-remove Removes the specified repository features service. uninstall Uninstalls a feature with the specified name and version. version-list Lists all versions of a feature available from the currently available repositories. There are other helpful commands, for example, log:tail, log:set, shutdown to get tail of logs, set log levels and shutdown. For checking neutron bundle is installed:: opendaylight-user@root>feature:list -i | grep neutron odl-neutron-service | 0.8.0-SNAPSHOT | x | odl-neutron-0.8.0-SNAPSHOT | OpenDaylight :: Neutron :: API odl-neutron-northbound-api | 0.8.0-SNAPSHOT | x | odl-neutron-0.8.0-SNAPSHOT | OpenDaylight :: Neutron :: Northbound odl-neutron-spi | 0.8.0-SNAPSHOT | x | odl-neutron-0.8.0-SNAPSHOT | OpenDaylight :: Neutron :: API odl-neutron-transcriber | 0.8.0-SNAPSHOT | x | odl-neutron-0.8.0-SNAPSHOT | OpenDaylight :: Neutron :: Implementation odl-neutron-logger | 0.8.0-SNAPSHOT | x | odl-neutron-0.8.0-SNAPSHOT | OpenDaylight :: Neutron :: Logger For checking netvirt bundle is installed:: opendaylight-user@root>feature:list -i | grep netvirt odl-netvirt-api | 0.4.0-SNAPSHOT | x | odl-netvirt-0.4.0-SNAPSHOT | OpenDaylight :: NetVirt :: api odl-netvirt-impl | 0.4.0-SNAPSHOT | x | odl-netvirt-0.4.0-SNAPSHOT | OpenDaylight :: NetVirt :: impl odl-netvirt-openstack | 0.4.0-SNAPSHOT | x | odl-netvirt-0.4.0-SNAPSHOT | OpenDaylight :: NetVirt :: OpenStack #. For exploration of API's following links can be used:: API explorer: http://localhost:8080/apidoc/explorer Karaf: http://localhost:8181/apidoc/explorer/index.html Detailed information can be found [#sixth]_. .. rubric:: References .. [#third] https://docs.openstack.org/mitaka/install-guide-rdo/launch-instance-selfservice.html .. [#fourth] https://docs.openstack.org/draft/install-guide-rdo/launch-instance.html .. [#fifth] https://docs.openstack.org/user-guide/cli-manage-ip-addresses.html .. [#sixth] https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL:Restconf_API_Explorer ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.790714 networking-odl-16.0.0.0b2.dev1/doc/source/contributor/specs/0000755000175000017500000000000000000000000025430 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/doc/source/contributor/specs/index.rst0000644000175000017500000000050200000000000027266 0ustar00jamespagejamespage00000000000000.. networking-odl specs documentation index ============== Specifications ============== Pike specs ========== .. toctree:: :glob: :maxdepth: 1 pike/* Ocata specs =========== .. toctree:: :glob: :maxdepth: 1 ocata/* Newton specs ============ .. toctree:: :glob: :maxdepth: 1 newton/* ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.790714 networking-odl-16.0.0.0b2.dev1/doc/source/contributor/specs/newton/0000755000175000017500000000000000000000000026742 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/doc/source/contributor/specs/newton/qos-driver.rst0000644000175000017500000001137300000000000031574 0ustar00jamespagejamespage00000000000000========================================== Quality of Service Driver for OpenDaylight ========================================== This spec describes the plan to implement quality of service driver for OpenDaylight Controller. Problem Statement ================= OpenStack networking project (neutron [1]) have a extension plugin implemented and which expose api for quality of service that can be also be implemented by any backend networking service provider to support QoS. These APIs provide a way to integrate OpenStack Neutron QoS with any of the backend QoS providers. OpenDaylight will provide backend for existing functionalities in neutron-QoS. A notification driver is needed for integration of existing api in Openstack neutron for QoS with OpenDaylight backend. Proposed Change =============== This change will introduce a new notification driver in networking-odl that will take CRUD requests data for QoS policies from OpenStack neutron and notify the OpenDaylight controller about the respective operation. Detailed Design =============== To enable the formal end to end integration between OpenStack QoS and OpenDaylight requires an networking-odl QoS notification driver. QoS driver will act as a shim layer between OpenStack and OpenDaylight that will carry out following task: #. After getting QoS policy request data from neutron, It will log a operation request in opendaylightjournal table. #. The operation will be picked from opendaylightjournal table and a rest call for notifying OpenDaylight server will be prepared and sent. #. This request will processed by neutron northbound in OpenDaylight. The OpenDaylight neutron northbound project. These models will be based on the existing neutron qos plugin APIs. QoS providers in OpenDaylight can listen to these OpenDaylight Neutron Northbound QoS models and translate it to their specific yang models for QoS. The following diagram shows the high level integration between OpenStack and the OpenDaylight QoS provider:: +---------------------------------------------+ | OpenStack Network Server (neutron qos) | | | | +---------------------+ | | | networking-odl | | | | | | | | +---------------| | | | | Notification | | | | | driver QoS | | +----------------------|----------------------+ | | Rest Communication | OpenDaylight Controller | +-----------------------|------------+ | +----------V----+ | | ODL | QoS Yang Model| | | Northbound | | | | (neutron) +---------------+ | | | | | | | | ODL +----V----+ | | Southbound | QoS | | | (neutron) +---------+ | +-----------------|------------------+ | | +------------------------------------+ | Network/OVS | | | +------------------------------------+ In the above diagram, the OpenDaylight components are shown just to understand the overall architecture, but it's out of scope of this spec's work items. This spec will only track progress related to networking-odl notification QoS driver work. Dependencies ============ It has a dependency on OpenDaylight Neutron Northbound QoS yang models, but that is out of scope of this spec. Impact ====== None Assignee(s) =========== Following developers will be the initial contributor to the driver, but we will be happy to have more contributor on board. * Manjeet Singh Bhatia (manjeet.s.bhatia@intel.com, irc: manjeets) References ========== * [1] https://docs.openstack.org/neutron/latest/contributor/internals/quality_of_service.html * [2] https://wiki.opendaylight.org/view/NeutronNorthbound:Main ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/doc/source/contributor/specs/newton/sfc-driver.rst0000644000175000017500000001516100000000000031544 0ustar00jamespagejamespage00000000000000================================================= Service Function Chaining Driver for OpenDaylight ================================================= This spec describes the plan to implement OpenStack networking-sfc[1] driver for OpenDaylight Controller. Problem Statement =================== OpenStack SFC project (networking-sfc [1]) exposes generic APIs[2] for Service Function Chaining (SFC) that can be implemented by any backend networking service provider to support SFC. These APIs provide a way to integrate OpenStack SFC with any of the backend SFC providers. OpenDaylight SFC project provides a very mature implementation of SFC [3], but currently there is no formal integration mechanism present to consume OpenDaylight as an SFC provider for networking-sfc. Recently Tacker project [4] has been approved as an official project in OpenStack, that opens many possibilities to realize the NFV use cases (e.g SFC) using OpenStack as a platform. Providing a formal end to end integration between OpenStack and OpenDaylight for SFC use case will help NFV users leverage OpenStack, Tacker and OpenDaylight as a solution. A POC for this integration work has already been implemented [5][6] by Tim Rozet, but in this POC work, Tacker directly communicates to OpenDaylight SFC & classifier providers and not through OpenStack SFC APIs (networking-sfc). Proposed Change =============== Implementation of this spec will introduce a networking-sfc[1] driver for OpenDaylight Controller in networking-odl project that will pass through the networking-sfc API's call to the OpenDaylight Controller. Detailed Design =============== To enable the formal end to end integration between OpenStack SFC and OpenDaylight requires an SFC driver for OpenDaylight. ODL SFC driver will act as a shim layer between OpenStack and OpenDaylight that will carry out following two main tasks: * Translation of OpenStack SFC Classifier API to ODL SFC classifier yang models**. * Translation of OpenStack SFC API's to OpenDaylight Neutron Northbound SFC models** [8]. ** This work is not yet done, but the OpenDaylight neutron northbound project needs to come up with yang models for SFC classification/chain. These models will be based on the existing networking-sfc APIs. This work is out of scope of networking-odl work and will be collaborated in the scope of OpenDaylight Neutron Northbound project. SFC providers (E.g Net-Virt, GBP, SFC ) in OpenDaylight can listen to these OpenDaylight Neutron Northbound SFC models and translate it to their specific yang models for classification/sfc. The following diagram shows the high level integration between OpenStack and the OpenDaylight SFC provider:: +---------------------------------------------+ | OpenStack Network Server (networking-sfc) | | +-------------------+ | | | networking-odl | | | | SFC Driver | | | +-------------------+ | +----------------------|----------------------+ | REST Communication | ----------------------- OpenDaylight Controller | | +-----------------------|-----------------------|---------------+ | +----------v----+ +---v---+ | | Neutron | SFC Classifier| |SFC | Neutron | | Northbound | Models | |Models | Northbound| | Project +---------------+ +-------+ Project | | / \ | | | / \ | | | / \ | | | +-----V--+ +---V----+ +---V---+ | | |Net-Virt| ... | GBP | | SFC | ... | | +---------+ +--------+ +-------+ | +-----------|----------------|------------------|---------------+ | | | | | | +-----------V----------------V------------------V---------------+ | Network/OVS | | | +---------------------------------------------------------------+ In the above architecture, the opendaylight components are shown just to understand the overall architecture, but it's out of scope of this spec's work items. This spec will only track progress related to networking-odl OpenStack sfc driver work. Given that OpenStack SFC APIs are port-pair based API's and OpenDaylight SFC API's are based on IETF SFC yang models[8], there might be situations where translation might requires API enhancement from OpenStack SFC. Networking SFC team is open for these new enhancement requirements given that they are generic enough to be leveraged by other backend SFC providers[9]. This work will be leveraging the POC work done by Tim [10] to come up with the first version of SFC driver. Dependencies ============ It has a dependency on OpenDaylight Neutron Northbound SFC classifier and chain yang models, but that is out of scope of this spec. Impact ====== None Assignee(s) =========== Following developers will be the initial contributor to the driver, but we will be happy to have more contributor on board. * Anil Vishnoi (vishnoianil@gmail.com, irc: vishnoianil) * Tim Rozet (trozet@redhat.com, irc: trozet) References ========== [1] https://docs.openstack.org/networking-sfc/latest/ [2] https://github.com/openstack/networking-sfc/blob/master/doc/source/contributor/api.rst [3] https://wiki.opendaylight.org/view/Service_Function_Chaining:Main [4] https://wiki.openstack.org/wiki/Tacker [5] https://github.com/trozet/tacker/tree/SFC_brahmaputra/tacker/sfc [6] https://github.com/trozet/tacker/tree/SFC_brahmaputra/tacker/sfc_classifier [7] https://tools.ietf.org/html/draft-ietf-netmod-acl-model-05 [8] https://wiki.opendaylight.org/view/NeutronNorthbound:Main [9] http://eavesdrop.openstack.org/meetings/service_chaining/2016/service_chaining.2016-03-31-17.00.log.html [10] https://github.com/trozet/tacker/blob/SFC_brahmaputra/tacker/sfc/drivers/opendaylight.py ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.790714 networking-odl-16.0.0.0b2.dev1/doc/source/contributor/specs/ocata/0000755000175000017500000000000000000000000026517 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/doc/source/contributor/specs/ocata/journal-recovery.rst0000644000175000017500000001237000000000000032562 0ustar00jamespagejamespage00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode ================ Journal Recovery ================ https://blueprints.launchpad.net/networking-odl/+spec/journal-recovery Journal entries in the failed state need to be handled somehow. This spec will try to address the issue and propose a solution. Problem Description =================== Currently there is no handling for Journal entries that reach the failed state. A journal entry can reach the failed state for several reasons, some of which are: * Reached maximum failed attempts for retrying the operation. * Inconsistency between ODL and the Neutron DB. * For example: An update fails because the resource doesn't exist in ODL. * Bugs that can lead to failure to sync up. These entries will be left in the journal table forever which is a bit wasteful since they take up some space on the DB storage and also affect the performance of the journal table. Albeit each entry has a negligible effect on it's own, the impact of a large number of such entries can become quite significant. Proposed Change =============== A "journal recovery" routine will run as part of the current journal maintenance process. This routine will scan the journal table for rows in the "failed" state and will try to sync the resource for that entry. The procedure can be best described by the following flow chart: asciiflow:: +-----------------+ | For each entry | | in failed state | +-------+---------+ | +-------v--------+ | Query resource | | on ODL (REST) | +-----+-----+----+ | | +-----------+ Resource | | Determine | exists +--Resource doesn't exist--> operation | | | type | +-----v-----+ +-----+-----+ | Determine | | | operation | | | type | | +-----+-----+ | | +------------+ | +--Create------> Mark entry <--Delete--+ | | completed | | | +----------^-+ Create/ | | Update | | | | +------------+ | +-----v-----+ +--Delete--> Mark entry | | | Determine | | | pending | | | parent | | +---------^--+ | | relation | | | | +-----+-----+ +-----v------+ | | | | Compare to +--Different--+ | | | resource | | | | in DB +--Same------------+ | +------------+ | | +-------------------+ | | Create entry for <-----Has no parent------+ | resource creation | | +--------^----------+ Has a parent | | | +---------v-----+ +------Parent exists------+ Query parent | | on ODL (REST) | +---------+-----+ +------------------+ | | Create entry for <---Parent doesn't exist--+ | parent creation | +------------------+ For every error during the process the entry will remain in failed state but the error shouldn't stop processing of further entries. The implementation could be done in two phases where the parent handling is done in a second phase. For the first phase if we detect an entry that is in failed for a create/update operation and the resource doesn't exist on ODL we create a new "create resource" journal entry for the resource. This proposal utilises the journal mechanism for it's operation while the only part that deviates from the standard mode of operation is when it queries ODL directly. This direct query has to be done to get ODL's representation of the resource. Performance Impact ------------------ The maintenance thread will have another task to handle. This can lead to longer processing time and even cause the thread to skip an iteration. This is not an issue since the maintenance thread runs in parallel and doesn't directly impact the responsiveness of the system. Since most operations here involve I/O then CPU probably won't be impacted. Network traffic would be impacted slightly since we will attempt to fetch the resource each time from ODL and we might attempt to fetch it's parent. This is however negligible as we do this only for failed entries, which are expected to appear rarely. Alternatives ------------ The partial sync process could make this process obsolete (along with full sync), but it's a far more complicated and problematic process. It's better to start with this process which is more lightweight and doable and consider partial sync in the future. Assignee(s) =========== Primary assignee: mkolesni Other contributors: None References ========== https://goo.gl/IOMpzJ ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.790714 networking-odl-16.0.0.0b2.dev1/doc/source/contributor/specs/pike/0000755000175000017500000000000000000000000026360 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/doc/source/contributor/specs/pike/dep-validations-on-create.rst0000644000175000017500000001171700000000000034057 0ustar00jamespagejamespage00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode ================================ Dependency Validations on Create ================================ https://blueprints.launchpad.net/networking-odl/+spec/dep-validations-on-create Right now V2 driver entry dependency validations happen when a journal entry is picked for processing. This spec proposes that this be moved to entry creation time, in order to have a clear understanding of the entry dependencies and conserve journal resources. Problem Description =================== Dependency validations are necessary in the V2 driver because each operation gets recorded in a journal entry and sent to ODL asynchronously. Thus, a consecutive operation might be sent to ODL before the first one finishes, while relying on the first operation. For example, when a subnet gets created it references a network, but if the network was created right before the subnet was then the subnet create shouldn't be sent over until the network create was sent. Currently these checks are performed each time an entry is selected for processing - if the entry passes the dependency checks then it gets processed and if the dependency check fails (i.e. finds a previous unhandled entry that needs to execute before this one) then the entry gets sent back to the queue. Generally this is not optimal for several reasons: * No clear indication of relations between the entries. * The logic is hidden in the code and there's no good way to know why an entry fails a dependency check. * Difficult to debug in case of problems. * Difficult to spot phenomenon such as a cyclic dependency. * Wasted CPU effort. * An entry can be checked multiple times for dependencies. * Lots of redundant DB queries to determine dependencies each time. Proposed Change =============== The proposed solution is to move the dependency calculation to entry creation time. When a journal entry is created the dependency management system will calculate the dependencies on other entries (Similarly to how it does now) and if there are journal entries the new entry should depend on, their IDs will be inserted into a link table. Thus, when the journal looks for an entry to pick up it will only look for entries that no other entry depends on by making sure there aren't any entries in the dependency table. When a journal entry is done processing (either successfully or reaches failed state), the dependency links will be removed from the dependency table so that dependent rows can be processed. The proposed table:: +------------------------+ | odl_journal_dependency | +------------------------+ | parent_id | | dependent_id | +------------------------+ The table columns will be foreign keys to the seqnum column in the journal table. The constraints will be defined as "ON DELETE CASCADE" so that when a journal entry is removed any possible rows will be removed as well. The primary key will be made from both columns of the table as this is a link table and not an actual entity. If we face DB performance issues (highly unlikely, since this table should normally have a very small amount of rows if any at all) then an index can be constructed on the dependent_id column. The dependency management mechanism will locate parent entries for the given entry and will populate the table so that the parent entry's seqnum will be set as the parent_id, and the dependent entry id will be set as dependent_id. When the journal picks up an entry for processing it will condition it on not having any rows with the parent_id in the dependency table. This will ensure that dependent rows get handled after the parent rows have finished processing. Performance Considerations ========================== Generally the performance shouldn't be impacted as we're moving the part of code that does dependency calculations from the entry selection time to entry creation time. This will assure that dependency calculations happen only once per journal entry. However, some simple benchmarks should be performed before & after the change: * Average Tempest run time. * Average CPU consumption on Tempest. * Full sync run time (Start to finish of all entries). If performance suffers a severe degradation then we should consider alternative solutions. Questions ========= Q: Should entries in "failed" state block other entries? A: Currently "failed" rows are not considered as blocking for dependency validations, but we might want to change this as it makes little sense to process a dependent entry that failed processing. Q: How will this help debug-ability? A: It will be easy to query the table contents at any time to figure out which entries depend on which other entries. Q: How will we be able to spot cyclic dependencies? A: Currently this isn't planned as part of the spec, but a DB query (or a series of them) can help determine if this problem exists. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/doc/source/contributor/specs/pike/neutron-port-dhcp.rst0000644000175000017500000002414600000000000032511 0ustar00jamespagejamespage00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode ====================================================================== Neutron Port Allocation per Subnet for OpenDaylight DHCP Proxy Service ====================================================================== This spec describes the proposal to allocate a Neutron DHCP Port just for use by OpenDaylight Controller on Subnets that are created or updated with enable-dhcp to True. When in OpenDaylight controller, the "controller-dhcp-enabled" configuration flag is set to true, these Neutron DHCP Ports will be used by the OpenDaylight Controller to provide DHCP Service instead of using the subnet-gateway-ip as the DHCP Server IP as it stands today. The networking-odl driver is not aware about the above OpenDaylight controller parameter configuration. When controller-dhcp-enabled configuration flag is set to false the DHCP port will be created and destroyed without causing any harm to either OpenDaylight controller or networking-odl driver. Problem Statement ================= The DHCP service within OpenDaylight currently assumes availability of the subnet gateway IP address. The subnet gateway ip is not a mandatory parameter for an OpenStack subnet, and so it might not be available from OpenStack orchestration. This renders the DHCP service in OpenDaylight to not be able to serve DHCP offers to virtual endpoints requesting for IP addresses, thereby resulting in service unavailability. Even if subnet-gateway-ip is available in the subnet, it is not a good design in OpenDaylight to hijack that ip address and use that as the DHCP Server IP Address. Problem - 1: L2 Deployment with 3PP gateway ------------------------------------------- There can be deployment scenario in which L2 network is created with no distributed Router/VPN functionality. This deployment can have a separate gateway for the network such as a 3PP LB VM, which acts as a TCP termination point and this LB VM is configured with a default gateway IP. It means all inter-subnet traffic is terminated on this VM which takes the responsibility of forwarding the traffic. But the current DHCP service in OpenDaylight controller hijacks gateway IP address for serving DHCP discover/request messages. If the LB is up, this can continue to work, DHCP broadcasts will get hijacked by the OpenDaylight, and responses sent as PKT_OUTs with SIP = GW IP. However, if the LB is down, and the VM ARPs for the same IP as part of a DHCP renew workflow, the ARP resolution can fail, due to which renew request will not be generated. This can cause the DHCP lease to lapse. Problem - 2: Designated DHCP for SR-IOV VMs via HWVTEP ------------------------------------------------------ In this Deployment scenario, L2 network is created with no distributed Router/ VPN functionality, and HWVTEP for SR-IOV VMs. DHCP flood requests from SR-IOV VMs(DHCP discover, request during bootup), are flooded by the HWVTEP on the L2 Broadcast domain, and punted to the controller by designated vswitch. DHCP offers are sent as unicast responses from Controller, which are forwarded by the HWVTEP to the VM. DHCP renews can be unicast requests, which the HWVTEP may forward to an external Gateway VM (3PPLB VM) as unicast packets. Designated vswitch will never receive these pkts, and thus not be able to punt them to the controller, so renews will fail. Proposed Change =============== In general as part of implementation of this spec, we are introducing a new configuration parameter 'create_opendaylight_dhcp_port' whose truth value determines whether the dhcp-proxy-service within the openstack-odl framework need to be made functional. This service will be responsible for managing the create/update/delete lifecycle for a new set of Neutron DHCP Ports which will be provisioned specifically for use by the OpenDaylight Controller's existing DHCP Service Module. Detailed Design =============== Introduce a driver config parameter(create_opendaylight_dhcp_port) to determine if OpenDaylight based DHCP service is being used. Default setting for the parameter is false. When 'create_opendaylight_dhcp_port' is set to True, it triggers the networking -odl ml2 driver to hook on to OpenStack subnet resource lifecycle and use that to manage a special DHCP port per subnet for OpenDaylight Controller use. These special DHCP ports will be shipped to OpenDaylight controller, so that DHCP Service within the OpenDaylight controller can make use of these as DHCP Server ports themselves. The port will be used to service DHCP requests for virtual end points belonging to that subnet. These special DHCP Ports (one per subnet), will carry unique device-id and device-owner values. * device-owner(network:dhcp) * device-id(OpenDaylight-) OpenDaylight DHCP service will also introduce a new config parameter controller -dhcp-mode to indicate if the above DHCP port should be used for servicing DHCP requests. When the parameter is set to use-odl-dhcp-neutron-port, it is recommended to enable the create_opendaylight_dhcp_port flag for the networking -odl driver. Alternative 1 -------------- The creation of Neutron OpenDaylight DHCP port will be invoked within the OpenDaylight mechanism Driver subnet-postcommit execution. Any failures during the neutron dhcp port creation or allocation for the subnet should trigger failure of the subnet create operation with an appropriate failure message in logs. On success the subnet and port information will be persisted to Journal DB and will subsequently synced with the OpenDaylight controller. The plugin should initiate the removal of allocated dhcp neutron port at the time of subnet delete. The port removal will be handled in a subnet-delete- post-commit execution and any failure during this process should rollback the subnet delete operation. The subnet delete operation will be allowed only when all other VMs launched on this subnet are already removed as per existing Neutron behavior. A subnet update operation configuring the DHCP state as enabled should allocate such a port if not previously allocated for the subnet. Similarly a subnet update operation configuring DHCP state to disabled should remove any previously allocated OpenDaylight DHCP neutron ports. Since the invocation of create/delete port will be synchronous within subnet post-commit, a failure to create/delete port will result in an exception being thrown which makes the ML2 Plugin to fail the subnet operation and not alter Openstack DB. Alternative 2 ------------- The OpenDaylight Neutron DHCP Port creation/deletion is invoked asynchronously driven by a journal entry callback for any Subnet resource state changes as part of create/update/delete. A generic journal callback mechanism to be implemented. Initial consumer of this callback would be the OpenDaylight DHCP proxy service but this could be used by other services in future. The Neutron DHCP Port (for OpenDaylight use) creation is triggered when the subnet journal-entry is moved from PENDING to PROCESSING. On a failure of port-creation, the journal will be retained in PENDING state and the subnet itself won't be synced to the OpenDaylight controller. The journal-entry state is marked as COMPLETED only on successful port creation and successful synchronization of that subnet resource to OpenDaylight controller. The same behavior is applicable for subnet update and delete operations too. The subnet create/update operation that allocates an OpenDaylight DHCP port to always check if a port exists and allocate new port only if none exists for the subnet. Since the invocation of create/delete port will be within the journal callback and asynchronous to subnet-postcommit, the failure to create/delete port will result in the created (or updated) subnet to remain in PENDING state. Next journal sync of this pending subnet will again retry creation/deletion of port and this cycle will happen until either create/delete port succeeds or the subnet is itself deleted by the orchestrating tenant. This could result in piling up of journal PENDING entries for these subnets when there is an unexpected failure in create/delete DHCP port operation. It is recommended to not keep retrying the port operation and instead failures would be indicated in OpenDaylight as DHCP offers/renews will not be honored by the dhcp service within the OpenDaylight controller, for that subnet. Recommended Alternative ----------------------- All of the following cases will need to be addressed by the design. * Neutron server can crash after submitting information to DB but before invoking post-commit during a subnet create/update/delete operation. The dhcp-proxy-service should handle the DHCP port creation/deletion during such failures when the service is enabled. * A subnet update operation to disable-dhcp can be immediately followed by a subnet update operation to enable-dhcp, and such a situation should end up in creating the neutron-dhcp-port for consumption by OpenDaylight. * A subnet update operation to enable-dhcp can be immediately followed by a subnet update operation to disable-dhcp, and such a situation should end up in deleting the neutron-dhcp-port that was created for use by OpenDaylight. * A subnet update operation to enable-dhcp can be immediately followed by a subnet delete operation,and such a situation should end up deleting the neutron-dhcp-port that was about to be provided for use by OpenDaylight. * A subnet create operation (with dhcp enabled) can be immediately followed by a subnet update operation to disable-dhcp, and such a situation should end up in deleting the neutron-dhcp-port that was created for use by OpenDaylight. Design as per Alternative 2 meets the above cases better and is what we propose to take as the approach that we will pursue for this spec. Dependencies ============ Feature is dependent on enhancement in OpenDaylight DHCP Service as per the Spec in [1] Impact ====== None Assignee(s) =========== * Achuth Maniyedath (achuth.m@altencalsoftlabs.com) * Karthik Prasad(karthik.p@altencalsoftlabs.com) References ========== * [1] OpenDaylight spec to cover this feature https://git.opendaylight.org/gerrit/#/c/52298/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/doc/source/contributor/testing.rst0000644000175000017500000000004200000000000026516 0ustar00jamespagejamespage00000000000000.. include:: ../../../TESTING.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/doc/source/contributor/usage.rst0000644000175000017500000000013100000000000026144 0ustar00jamespagejamespage00000000000000======== Usage ======== To use networking-odl in a project:: import networking_odl ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/doc/source/index.rst0000644000175000017500000000323000000000000023600 0ustar00jamespagejamespage00000000000000========================== Welcome to networking-odl! ========================== .. Team and repository tags .. only:: html .. image:: http://governance.openstack.org/badges/networking-odl.svg :target: http://governance.openstack.org/reference/tags/index.html Summary ------- OpenStack networking-odl is a library of drivers and plugins that integrates OpenStack Neutron API with OpenDaylight Backend. For example it has ML2 driver and L3 plugin to enable communication of OpenStack Neutron L2 and L3 resources API to OpenDayLight Backend. To report and discover bugs in networking-odl the following link can be used: https://bugs.launchpad.net/networking-odl Any new code submission or proposal must follow the development guidelines detailed in HACKING.rst and for further details this link can be checked: https://docs.openstack.org/networking-odl/latest/ The OpenDaylight homepage: https://www.opendaylight.org/ Release notes for the project can be found at: https://docs.openstack.org/releasenotes/networking-odl/ The project source code repository is located at: https://opendev.org/openstack/networking-odl Installation ------------ .. toctree:: :maxdepth: 2 install/index Configuration options --------------------- .. toctree:: :maxdepth: 2 configuration/index Administration Guide -------------------- .. toctree:: :maxdepth: 2 admin/index Contributor Guide ----------------- .. toctree:: :maxdepth: 2 contributor/index Reference Deployment Guide -------------------------- .. toctree:: :maxdepth: 2 reference/index .. only:: html Indices and tables ------------------ * :ref:`genindex` * :ref:`search` ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.790714 networking-odl-16.0.0.0b2.dev1/doc/source/install/0000755000175000017500000000000000000000000023407 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/doc/source/install/devstack.rst0000644000175000017500000000005200000000000025742 0ustar00jamespagejamespage00000000000000.. include:: ../../../devstack/README.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/doc/source/install/index.rst0000644000175000017500000000016300000000000025250 0ustar00jamespagejamespage00000000000000Installation Guide ================== .. toctree:: :maxdepth: 2 installation DevStack plugin ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/doc/source/install/installation.rst0000644000175000017500000001734700000000000026656 0ustar00jamespagejamespage00000000000000.. _installation: Installation ============ The ``networking-odl`` repository includes integration with DevStack that enables creation of a simple OpenDaylight (ODL) development and test environment. This document discusses what is required for manual installation and integration into a production OpenStack deployment tool of conventional architectures that include the following types of nodes: * Controller - Runs OpenStack control plane services such as REST APIs and databases. * Network - Provides connectivity between provider (public) and project (private) networks. Services provided include layer-3 (routing), DHCP, and metadata agents. Layer-3 agent is optional. When using netvirt (vpnservice) DHCP/metadata are optional. * Compute - Runs the hypervisor and layer-2 agent for the Networking service. ODL Installation ---------------- http://docs.opendaylight.org provides manual and general documentation for ODL Review the following documentation regardless of install scenario: * `ODL installation `_. * `OpenDaylight with OpenStack `_. Choose and review one of the following installation scenarios: * `GBP with OpenStack `_. OpenDaylight Group Based Policy allows users to express network configuration in a declarative rather than imperative way. Often described as asking for "what you want", rather than "how you can do it", Group Based Policy achieves this by implementing an Intent System. The Intent System is a process around an intent driven data model and contains no domain specifics but is capable of addressing multiple semantic definitions of intent. * `OVSDB with OpenStack `_. OpenDaylight OVSDB allows users to take advantage of Network Virtualization using OpenDaylight SDN capabilities whilst utilizing OpenvSwitch. The stack includes a Neutron Northbound, a Network Virtualization layer, an OVSDB southbound plugin, and an OpenFlow southbound plugin. * `VTN with OpenStack `_. OpenDaylight Virtual Tenant Network (VTN) is an application that provides multi-tenant virtual network on an SDN controller. VTN Manager is implemented as one plugin to the OpenDaylight controller and provides a REST interface to create/update/delete VTN components. It provides an implementation of Openstack L2 Network Functions API. Networking-odl Installation --------------------------- .. code-block:: console # sudo pip install networking-odl .. note:: pip need to be installed before running above command. Networking-odl Configuration ---------------------------- All related neutron services need to be restarted after configuration change. #. Configure Openstack neutron server. The neutron server implements ODL as an ML2 driver. Edit the ``/etc/neutron/neutron.conf`` file: * Enable the ML2 core plug-in. .. code-block:: ini [DEFAULT] ... core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin * (Optional) Enable ODL L3 router, if QoS feature is desired, then qos should be appended to service_plugins .. code-block:: ini [DEFAULT] ... service_plugins = odl-router_v2 #. Configure the ML2 plug-in. Edit the ``/etc/neutron/plugins/ml2/ml2_conf.ini`` file: * Configure the ODL mechanism driver, network type drivers, self-service (tenant) network types, and enable extension drivers(optional). .. code-block:: ini [ml2] ... mechanism_drivers = opendaylight_v2 type_drivers = local,flat,vlan,vxlan tenant_network_types = vxlan extension_drivers = port_security, qos .. note:: The enabling of extension_driver qos is optional, it should be enabled if service_plugins for qos is also enabled. * Configure the vxlan range. .. code-block:: ini [ml2_type_vxlan] ... vni_ranges = 1:1000 * Optionally, enable support for VLAN provider and self-service networks on one or more physical networks. If you specify only the physical network, only administrative (privileged) users can manage VLAN networks. Additionally specifying a VLAN ID range for a physical network enables regular (non-privileged) users to manage VLAN networks. The Networking service allocates the VLAN ID for each self-service network using the VLAN ID range for the physical network. .. code-block:: ini [ml2_type_vlan] ... network_vlan_ranges = PHYSICAL_NETWORK:MIN_VLAN_ID:MAX_VLAN_ID Replace ``PHYSICAL_NETWORK`` with the physical network name and optionally define the minimum and maximum VLAN IDs. Use a comma to separate each physical network. For example, to enable support for administrative VLAN networks on the ``physnet1`` network and self-service VLAN networks on the ``physnet2`` network using VLAN IDs 1001 to 2000: .. code-block:: ini network_vlan_ranges = physnet1,physnet2:1001:2000 * Enable security groups. .. code-block:: ini [securitygroup] ... enable_security_group = true * Configure ML2 ODL .. code-block:: ini [ml2_odl] ... username = password = url = http://:/controller/nb/v2/neutron port_binding_controller = pseudo-agentdb-binding * Optionally, To enable ODL DHCP service in an OpenDaylight enabled cloud, set `enable_dhcp_service=True` under the `[ml2_odl]` section. It will load the openstack-odl-v2-dhcp-driver which will create special DHCP ports in neutron for use by the OpenDaylight Controller's DHCP Service. Please make sure to set `controller-dhcp-enabled = True` within the OpenDaylight Controller configuration file ``netvirt-dhcpservice-config.xml`` along with the above configuration. `OpenDaylight Spec Documentation Link: `_. .. code-block:: ini [ml2_odl] ... enable_dhcp_service = True Compute/network nodes --------------------- Each compute/network node runs the OVS services. If compute/network nodes are already configured to run with Neutron ML2 OVS driver, more steps are necessary. `OVSDB with OpenStack `_ can be referred to. #. Install the ``openvswitch`` packages. #. Start the OVS service. Using the *systemd* unit: .. code-block:: console # systemctl start openvswitch Using the ``ovs-ctl`` script: .. code-block:: console # /usr/share/openvswitch/scripts/ovs-ctl start #. Configure OVS to use ODL as a manager. .. code-block:: console # ovs-vsctl set-manager tcp:${ODL_IP_ADDRESS}:6640 Replace ``ODL_IP_ADDRESS`` with the IP address of ODL controller node #. Set host OVS configurations if port_binding_controller is pseudo-agent .. code-block:: console # sudo neutron-odl-ovs-hostconfig #. Verify the OVS service. .. code-block:: console # ovs-vsctl show .. note:: After setting config files, you have to restart the neutron server if you are using screen then it can be directly started from neutron-api window or you can use service neutron-server restart, latter may or may not work depending on OS you are using. ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.790714 networking-odl-16.0.0.0b2.dev1/doc/source/reference/0000755000175000017500000000000000000000000023677 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/doc/source/reference/index.rst0000644000175000017500000000051400000000000025540 0ustar00jamespagejamespage00000000000000==================== Reference Deployment ==================== This document is intended to guide for versions of OpenStack and OpenDaylight components to use when OpenStack is deployed with OpenDaylight. OpenStack Version Reference --------------------------- .. toctree:: :maxdepth: 2 pike.rst ocata.rst newton.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/doc/source/reference/newton.rst0000644000175000017500000000135100000000000025743 0ustar00jamespagejamespage00000000000000Newton ODL Reference ==================== .. contents:: OpenDaylight Components ----------------------- +-------------------------------------------------------+ | OpenDaylight Components | +===============================+=======================+ | Boron Snapshot | Yes | +-------------------------------+-----------------------+ | Carbon Snapshot | No | +-------------------------------+-----------------------+ | Nitrogen Snapshot | No | +-------------------------------+-----------------------+ | Netvirt | odl-ovsdb-openstack | +-------------------------------+-----------------------+ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/doc/source/reference/ocata.rst0000644000175000017500000000157000000000000025523 0ustar00jamespagejamespage00000000000000Ocata ODL Reference =================== .. contents:: OpenDaylight Components ----------------------- With ocata legacy netvirt is recommended to use with boron snapshot. However legacy netvirt may not work properly with carbon snapshot onwards. +-------------------------------------------------------+ | OpenDaylight Componetns | +===============================+=======================+ | Boron Snapshot | Yes | +-------------------------------+-----------------------+ | Carbon Snapshot | Yes | +-------------------------------+-----------------------+ | Nitrogen Snapshot | No | +-------------------------------+-----------------------+ | Netvirt | odl-openstack-netvirt | +-------------------------------+-----------------------+ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/doc/source/reference/pike.rst0000644000175000017500000000134500000000000025364 0ustar00jamespagejamespage00000000000000Pike ODL Reference ================== .. contents:: OpenDaylight Components ----------------------- +-------------------------------------------------------+ | OpenDaylight Componetns | +===============================+=======================+ | Boron Snapshot | No | +-------------------------------+-----------------------+ | Carbon Snapshot | Yes | +-------------------------------+-----------------------+ | Nitrogen Snapshot | Yes | +-------------------------------+-----------------------+ | Netvirt | odl-openstack-netvirt | +-------------------------------+-----------------------+ ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.790714 networking-odl-16.0.0.0b2.dev1/etc/0000755000175000017500000000000000000000000020447 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.778714 networking-odl-16.0.0.0b2.dev1/etc/neutron/0000755000175000017500000000000000000000000022141 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.778714 networking-odl-16.0.0.0b2.dev1/etc/neutron/plugins/0000755000175000017500000000000000000000000023622 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.790714 networking-odl-16.0.0.0b2.dev1/etc/neutron/plugins/ml2/0000755000175000017500000000000000000000000024314 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/etc/neutron/plugins/ml2/ml2_conf_odl.ini0000644000175000017500000000401600000000000027353 0ustar00jamespagejamespage00000000000000[DEFAULT] [ml2_odl] # # From ml2_odl # # HTTP URL of OpenDaylight REST interface. (string value) #url = # HTTP username for authentication. (string value) #username = # HTTP password for authentication. (string value) #password = # HTTP timeout in seconds. (integer value) #timeout = 10 # Tomcat session timeout in minutes. (integer value) #session_timeout = 30 # Sync thread timeout in seconds. (integer value) #sync_timeout = 10 # Number of times to retry a row before failing. (integer value) #retry_count = 5 # Journal maintenance operations interval in seconds. (integer value) #maintenance_interval = 300 # Time to keep completed rows (in seconds). # For performance reasons it's not recommended to change this from the default # value (0) which indicates completed rows aren't kept. # This value will be checked every maintenance_interval by the cleanup # thread. To keep completed rows indefinitely, set the value to -1 # (integer value) #completed_rows_retention = 0 # Test without real ODL. (boolean value) #enable_lightweight_testing = false # Name of the controller to be used for port binding. (string value) #port_binding_controller = pseudo-agentdb-binding # Time in seconds to wait before a processing row is # marked back to pending. (integer value) #processing_timeout = 100 # Path for ODL host configuration REST interface (string value) #odl_hostconf_uri = /restconf/operational/neutron:neutron/hostconfigs # Poll interval in seconds for getting ODL hostconfig (integer value) #restconf_poll_interval = 30 # Enable websocket for pseudo-agent-port-binding. (boolean value) #enable_websocket_pseudo_agentdb = false # Wait this many seconds before retrying the odl features fetch # (integer value) #odl_features_retry_interval = 5 # A list of features supported by ODL (list value) #odl_features = # Enables the networking-odl driver to supply special neutron ports of # "dhcp" type to OpenDaylight Controller for its use in providing DHCP # Service. (boolean value) #enable_dhcp_service = false ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/etc/policy.json0000644000175000017500000001465200000000000022651 0ustar00jamespagejamespage00000000000000{ "context_is_admin": "role:admin", "admin_or_owner": "rule:context_is_admin or tenant_id:%(tenant_id)s", "context_is_advsvc": "role:advsvc", "admin_or_network_owner": "rule:context_is_admin or tenant_id:%(network:tenant_id)s", "admin_only": "rule:context_is_admin", "regular_user": "", "shared": "field:networks:shared=True", "shared_firewalls": "field:firewalls:shared=True", "external": "field:networks:router:external=True", "default": "rule:admin_or_owner", "create_subnet": "rule:admin_or_network_owner", "get_subnet": "rule:admin_or_owner or rule:shared", "update_subnet": "rule:admin_or_network_owner", "delete_subnet": "rule:admin_or_network_owner", "create_network": "", "get_network": "rule:admin_or_owner or rule:shared or rule:external or rule:context_is_advsvc", "get_network:router:external": "rule:regular_user", "get_network:segments": "rule:admin_only", "get_network:provider:network_type": "rule:admin_only", "get_network:provider:physical_network": "rule:admin_only", "get_network:provider:segmentation_id": "rule:admin_only", "get_network:queue_id": "rule:admin_only", "create_network:shared": "rule:admin_only", "create_network:router:external": "rule:admin_only", "create_network:segments": "rule:admin_only", "create_network:provider:network_type": "rule:admin_only", "create_network:provider:physical_network": "rule:admin_only", "create_network:provider:segmentation_id": "rule:admin_only", "update_network": "rule:admin_or_owner", "update_network:segments": "rule:admin_only", "update_network:shared": "rule:admin_only", "update_network:provider:network_type": "rule:admin_only", "update_network:provider:physical_network": "rule:admin_only", "update_network:provider:segmentation_id": "rule:admin_only", "update_network:router:external": "rule:admin_only", "delete_network": "rule:admin_or_owner", "create_port": "", "create_port:mac_address": "rule:admin_or_network_owner or rule:context_is_advsvc", "create_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc", "create_port:port_security_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc", "create_port:binding:host_id": "rule:admin_only", "create_port:binding:profile": "rule:admin_only", "create_port:mac_learning_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc", "get_port": "rule:admin_or_owner or rule:context_is_advsvc", "get_port:queue_id": "rule:admin_only", "get_port:binding:vif_type": "rule:admin_only", "get_port:binding:vif_details": "rule:admin_only", "get_port:binding:host_id": "rule:admin_only", "get_port:binding:profile": "rule:admin_only", "update_port": "rule:admin_or_owner or rule:context_is_advsvc", "update_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc", "update_port:port_security_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc", "update_port:binding:host_id": "rule:admin_only", "update_port:binding:profile": "rule:admin_only", "update_port:mac_learning_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc", "delete_port": "rule:admin_or_owner or rule:context_is_advsvc", "get_router:ha": "rule:admin_only", "create_router": "rule:regular_user", "create_router:external_gateway_info:enable_snat": "rule:admin_only", "create_router:distributed": "rule:admin_only", "create_router:ha": "rule:admin_only", "get_router": "rule:admin_or_owner", "get_router:distributed": "rule:admin_only", "update_router:external_gateway_info:enable_snat": "rule:admin_only", "update_router:distributed": "rule:admin_only", "update_router:ha": "rule:admin_only", "delete_router": "rule:admin_or_owner", "add_router_interface": "rule:admin_or_owner", "remove_router_interface": "rule:admin_or_owner", "create_router:external_gateway_info:external_fixed_ips": "rule:admin_only", "update_router:external_gateway_info:external_fixed_ips": "rule:admin_only", "create_firewall": "", "get_firewall": "rule:admin_or_owner", "create_firewall:shared": "rule:admin_only", "get_firewall:shared": "rule:admin_only", "update_firewall": "rule:admin_or_owner", "update_firewall:shared": "rule:admin_only", "delete_firewall": "rule:admin_or_owner", "create_firewall_policy": "", "get_firewall_policy": "rule:admin_or_owner or rule:shared_firewalls", "create_firewall_policy:shared": "rule:admin_or_owner", "update_firewall_policy": "rule:admin_or_owner", "delete_firewall_policy": "rule:admin_or_owner", "create_firewall_rule": "", "get_firewall_rule": "rule:admin_or_owner or rule:shared_firewalls", "update_firewall_rule": "rule:admin_or_owner", "delete_firewall_rule": "rule:admin_or_owner", "create_qos_queue": "rule:admin_only", "get_qos_queue": "rule:admin_only", "update_agent": "rule:admin_only", "delete_agent": "rule:admin_only", "get_agent": "rule:admin_only", "create_dhcp-network": "rule:admin_only", "delete_dhcp-network": "rule:admin_only", "get_dhcp-networks": "rule:admin_only", "create_l3-router": "rule:admin_only", "delete_l3-router": "rule:admin_only", "get_l3-routers": "rule:admin_only", "get_dhcp-agents": "rule:admin_only", "get_l3-agents": "rule:admin_only", "get_loadbalancer-agent": "rule:admin_only", "get_loadbalancer-pools": "rule:admin_only", "create_floatingip": "rule:regular_user", "create_floatingip:floating_ip_address": "rule:admin_only", "update_floatingip": "rule:admin_or_owner", "delete_floatingip": "rule:admin_or_owner", "get_floatingip": "rule:admin_or_owner", "create_network_profile": "rule:admin_only", "update_network_profile": "rule:admin_only", "delete_network_profile": "rule:admin_only", "get_network_profiles": "", "get_network_profile": "", "update_policy_profiles": "rule:admin_only", "get_policy_profiles": "", "get_policy_profile": "", "create_metering_label": "rule:admin_only", "delete_metering_label": "rule:admin_only", "get_metering_label": "rule:admin_only", "create_metering_label_rule": "rule:admin_only", "delete_metering_label_rule": "rule:admin_only", "get_metering_label_rule": "rule:admin_only", "get_service_provider": "rule:regular_user", "get_lsn": "rule:admin_only", "create_lsn": "rule:admin_only" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/lower-constraints.txt0000644000175000017500000001063200000000000024134 0ustar00jamespagejamespage00000000000000alabaster==0.7.10 alembic==0.9.8 amqp==2.2.2 appdirs==1.4.3 asn1crypto==0.24.0 astroid==1.6.2 Babel==2.5.3 bandit==1.4.0 bashate==0.5.1 beautifulsoup4==4.6.0 blockdiag==1.5.3 cachetools==2.0.1 certifi==2018.1.18 ceilometer==11.0.0 cffi==1.11.5 chardet==3.0.4 cliff==2.11.0 cmd2==0.8.1 contextlib2==0.5.5 cotyledon==1.6.8 coverage==4.5.1 cryptography==2.1.4 debtcollector==1.19.0 decorator==4.2.1 deprecation==2.0 Django==2.2 django-appconf==1.0.2 django-babel==0.6.2 django-compressor==2.2 django-pyscss==2.0.2 doc8==0.8.0 docutils==0.14 dogpile.cache==0.6.5 dulwich==0.19.0 enum-compat==0.0.2 eventlet==0.20.0 exabgp==4.0.5 extras==1.0.0 fasteners==0.14.1 fixtures==3.0.0 flake8==2.6.2 flake8-import-order==0.17.1 funcparserlib==0.3.6 future==0.16.0 futurist==1.6.0 gitdb2==2.0.3 GitPython==2.1.8 greenlet==0.4.13 hacking==1.1.0 horizon==17.1.0 httplib2==0.10.3 idna==2.6 imagesize==1.0.0 iso8601==0.1.12 isort==4.3.4 Jinja2==2.10 jmespath==0.9.3 jsonpatch==1.21 jsonpath-rw==1.4.0 jsonpath-rw-ext==1.1.3 jsonpointer==2.0 jsonschema==2.6.0 kazoo==2.4.0 keystoneauth1==3.14.0 keystonemiddleware==4.21.0 kombu==4.1.0 lazy-object-proxy==1.3.1 linecache2==1.0.0 logutils==0.3.5 lxml==4.1.1 Mako==1.0.7 MarkupSafe==1.0 mccabe==0.2.1 mock==2.0.0 monotonic==1.4 mox3==0.25.0 msgpack==0.5.6 msgpack-python==0.5.6 munch==2.2.0 netaddr==0.7.19 netifaces==0.10.6 networking-bagpipe==8.0.0 networking-l2gw==12.0.0 networking-sfc==10.0.0.0b1 networking-bgpvpn==12.0.0b1 neutron==16.0.0.0b1 neutron-lib==2.0.0 openstackdocstheme==1.30.0 openstacksdk==0.31.2 os-client-config==1.29.0 os-service-types==1.7.0 os-xenapi==0.3.1 osc-lib==1.10.0 oslo.cache==1.29.0 oslo.concurrency==3.26.0 oslo.config==5.2.0 oslo.context==2.20.0 oslo.db==4.37.0 oslo.i18n==3.20.0 oslo.log==3.37.0 oslo.messaging==5.36.0 oslo.middleware==3.35.0 oslo.policy==1.34.0 oslo.privsep==1.32.0 oslo.reports==1.27.0 oslo.rootwrap==5.13.0 oslo.serialization==2.25.0 oslo.service==1.30.0 oslo.utils==3.36.0 oslo.versionedobjects==1.35.1 oslotest==3.3.0 osprofiler==2.3.0 ovs==2.8.1 ovsdbapp==1.0.0 packaging==17.1 Paste==2.0.3 PasteDeploy==1.5.2 pbr==4.0.0 pecan==1.3.2 pep8==1.5.7 pika==0.10.0 pika-pool==0.1.3 Pillow==5.0.0 Pint==0.8.1 ply==3.11 prettytable==0.7.2 psutil==5.4.3 pyasn1==0.4.2 pyasn1-modules==0.2.1 pycadf==2.7.0 pycodestyle==2.4.0 pycparser==2.18 pycryptodomex==3.5.1 pyflakes==0.8.1 Pygments==2.2.0 pyinotify==0.9.6 pylint==1.8.3 pymongo==3.6.1 pyOpenSSL==17.5.0 pyparsing==2.2.0 pyperclip==1.6.0 pyroute2==0.5.7 pyScss==1.3.4 pysmi==0.2.2 pysnmp==4.4.4 python-barbicanclient==4.6.0 python-cinderclient==5.0.0 python-dateutil==2.7.0 python-designateclient==2.9.0 python-editor==1.0.3 python-glanceclient==2.9.1 python-keystoneclient==3.22.0 python-mimeparse==1.6.0 python-neutronclient==6.7.0 python-novaclient==10.1.0 python-subunit==1.2.0 python-swiftclient==3.5.0 pytz==2018.3 PyYAML==3.12 rcssmin==1.0.6 reno==2.7.0 repoze.lru==0.7 requests==2.18.4 requestsexceptions==1.4.0 restructuredtext-lint==1.1.3 rfc3986==1.1.0 rjsmin==1.0.12 Routes==2.4.1 semantic-version==2.6.0 seqdiag==0.9.5 setproctitle==1.1.10 simplejson==3.13.2 six==1.11.0 smmap2==2.0.3 snowballstemmer==1.2.1 Sphinx==1.6.5 sphinxcontrib-blockdiag==1.5.5 sphinxcontrib-seqdiag==0.8.5 sphinxcontrib-websupport==1.0.1 SQLAlchemy==1.2.5 sqlalchemy-migrate==0.11.0 sqlparse==0.2.4 statsd==3.2.2 stestr==2.0.0 stevedore==1.28.0 Tempita==0.5.2 tenacity==4.9.0 testrepository==0.0.20 testresources==2.0.1 testscenarios==0.5.0 testtools==2.3.0 tinyrpc==0.8 tooz==1.61.0 traceback2==1.4.0 urllib3==1.22 vine==1.1.4 voluptuous==0.11.1 waitress==1.1.0 warlock==1.3.0 webcolors==1.8.1 WebOb==1.8.2 websocket-client==0.47.0 WebTest==2.0.29 wrapt==1.10.11 XStatic==1.0.1 XStatic-Angular==1.5.8.0 XStatic-Angular-Bootstrap==2.2.0.0 XStatic-Angular-FileUpload==12.0.4.0 XStatic-Angular-Gettext==2.3.8.0 XStatic-Angular-lrdragndrop==1.0.2.2 XStatic-Angular-Schema-Form==0.8.13.0 XStatic-Bootstrap-Datepicker==1.3.1.0 XStatic-Bootstrap-SCSS==3.3.7.1 XStatic-bootswatch==3.3.7.0 XStatic-D3==3.5.17.0 XStatic-Font-Awesome==4.7.0.0 XStatic-Hogan==2.0.0.2 XStatic-Jasmine==2.4.1.1 XStatic-jQuery==1.10.2.1 XStatic-JQuery-Migrate==1.2.1.1 XStatic-jquery-ui==1.12.0.1 XStatic-JQuery.quicksearch==2.0.3.1 XStatic-JQuery.TableSorter==2.14.5.1 XStatic-JSEncrypt==2.3.1.1 XStatic-mdi==1.4.57.0 XStatic-objectpath==1.2.1.0 XStatic-Rickshaw==1.5.0.0 XStatic-roboto-fontface==0.5.0.0 XStatic-smart-table==1.4.13.2 XStatic-Spin==1.2.5.2 XStatic-term.js==0.0.7.0 XStatic-tv4==1.2.7.0 zake==0.2.2 ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.790714 networking-odl-16.0.0.0b2.dev1/networking_odl/0000755000175000017500000000000000000000000022721 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/__init__.py0000644000175000017500000000125600000000000025036 0ustar00jamespagejamespage00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import gettext gettext.install('networking_odl') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/_i18n.py0000644000175000017500000000226400000000000024215 0ustar00jamespagejamespage00000000000000# Copyright 2016 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """oslo.i18n integration module. See https://docs.openstack.org/oslo.i18n/latest/user/index.html . """ import oslo_i18n DOMAIN = "networking_odl" _translators = oslo_i18n.TranslatorFactory(domain=DOMAIN) # The translation function using the well-known name "_" _ = _translators.primary # The contextual translation function using the name "_C" # requires oslo.i18n >=2.1.0 _C = _translators.contextual_form # The plural translation function using the name "_P" # requires oslo.i18n >=2.1.0 _P = _translators.plural_form def get_available_languages(): return oslo_i18n.get_available_languages(DOMAIN) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.794714 networking-odl-16.0.0.0b2.dev1/networking_odl/bgpvpn/0000755000175000017500000000000000000000000024215 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/bgpvpn/__init__.py0000644000175000017500000000000000000000000026314 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/bgpvpn/odl_v2.py0000644000175000017500000001417200000000000025761 0ustar00jamespagejamespage00000000000000# # Copyright (C) 2017 Ericsson India Global Services Pvt Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_lib.api.definitions import bgpvpn as bgpvpn_const from neutron_lib.api.definitions import bgpvpn_vni as bgpvpn_vni_def from neutron_lib.plugins import directory from oslo_config import cfg from oslo_log import helpers as log_helpers from oslo_log import log as logging from networking_bgpvpn.neutron.extensions import bgpvpn as bgpvpn_ext from networking_bgpvpn.neutron.services.service_drivers import driver_api from networking_odl.common import constants as odl_const from networking_odl.common import odl_features from networking_odl.common import postcommit from networking_odl.journal import full_sync from networking_odl.journal import journal cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config') LOG = logging.getLogger(__name__) BGPVPN_VNI = 'bgpvpn-vni' BGPVPN_RESOURCES = { odl_const.ODL_BGPVPN: odl_const.ODL_BGPVPNS, odl_const.ODL_BGPVPN_NETWORK_ASSOCIATION: odl_const.ODL_BGPVPN_NETWORK_ASSOCIATIONS, odl_const.ODL_BGPVPN_ROUTER_ASSOCIATION: odl_const.ODL_BGPVPN_ROUTER_ASSOCIATIONS } @postcommit.add_postcommit('bgpvpn', 'net_assoc', 'router_assoc') class OpenDaylightBgpvpnDriver(driver_api.BGPVPNDriver): """OpenDaylight BGPVPN Driver This code is the backend implementation for the OpenDaylight BGPVPN driver for Openstack Neutron. """ @log_helpers.log_method_call def __init__(self, service_plugin): LOG.info("Initializing OpenDaylight BGPVPN v2 driver") super(OpenDaylightBgpvpnDriver, self).__init__(service_plugin) self.journal = journal.OpenDaylightJournalThread() full_sync.register(bgpvpn_const.ALIAS, BGPVPN_RESOURCES, self.get_resources) if odl_features.has(BGPVPN_VNI): self.more_supported_extension_aliases = [bgpvpn_vni_def.ALIAS] @staticmethod def get_resources(context, resource_type): plugin = directory.get_plugin(bgpvpn_const.ALIAS) if resource_type == odl_const.ODL_BGPVPN: obj_getter = getattr(plugin, 'get_%s' % BGPVPN_RESOURCES[resource_type]) return obj_getter(context) method_name = 'get_%s' % BGPVPN_RESOURCES[resource_type] return full_sync.get_resources_require_id(plugin, context, plugin.get_bgpvpns, method_name) @log_helpers.log_method_call def create_bgpvpn_precommit(self, context, bgpvpn): journal.record(context, odl_const.ODL_BGPVPN, bgpvpn['id'], odl_const.ODL_CREATE, bgpvpn) @log_helpers.log_method_call def update_bgpvpn_precommit(self, context, bgpvpn): journal.record(context, odl_const.ODL_BGPVPN, bgpvpn['id'], odl_const.ODL_UPDATE, bgpvpn) @log_helpers.log_method_call def delete_bgpvpn_precommit(self, context, bgpvpn): journal.record(context, odl_const.ODL_BGPVPN, bgpvpn['id'], odl_const.ODL_DELETE, []) @log_helpers.log_method_call def create_net_assoc_precommit(self, context, net_assoc): our_bgpvpn = None bgpvpns = self.get_bgpvpns(context) for bgpvpn in bgpvpns: # ODL only allows a network to be associated with one BGPVPN if bgpvpn['id'] == net_assoc['bgpvpn_id']: our_bgpvpn = bgpvpn else: if bgpvpn['networks'] and (net_assoc['network_id'] in bgpvpn['networks']): raise bgpvpn_ext.BGPVPNNetworkAssocExistsAnotherBgpvpn( driver="OpenDaylight V2", network=net_assoc['network_id'], bgpvpn=bgpvpn['id']) journal.record(context, odl_const.ODL_BGPVPN, our_bgpvpn['id'], odl_const.ODL_UPDATE, our_bgpvpn) @log_helpers.log_method_call def delete_net_assoc_precommit(self, context, net_assoc): bgpvpn = self.get_bgpvpn(context, net_assoc['bgpvpn_id']) # NOTE(yamahata): precommit is called within db transaction. # so removing network_id is still associated. # it needs to be removed explicitly from dict. bgpvpn['networks'].remove(net_assoc['network_id']) journal.record(context, odl_const.ODL_BGPVPN, bgpvpn['id'], odl_const.ODL_UPDATE, bgpvpn) @log_helpers.log_method_call def create_router_assoc_precommit(self, context, router_assoc): associated_routers = self.get_router_assocs(context, router_assoc['bgpvpn_id']) for assoc_router in associated_routers: if(router_assoc["router_id"] != assoc_router["router_id"]): raise bgpvpn_ext.BGPVPNMultipleRouterAssocNotSupported( driver="OpenDaylight V2") bgpvpn = self.get_bgpvpn(context, router_assoc['bgpvpn_id']) journal.record(context, odl_const.ODL_BGPVPN, bgpvpn['id'], odl_const.ODL_UPDATE, bgpvpn) @log_helpers.log_method_call def delete_router_assoc_precommit(self, context, router_assoc): bgpvpn = self.get_bgpvpn(context, router_assoc['bgpvpn_id']) # NOTE(yamahata): precommit is called within db transaction. # so removing router_id is still associated. # it needs to be removed explicitly from dict. bgpvpn['routers'].remove(router_assoc['router_id']) journal.record(context, odl_const.ODL_BGPVPN, bgpvpn['id'], odl_const.ODL_UPDATE, bgpvpn) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.794714 networking-odl-16.0.0.0b2.dev1/networking_odl/ceilometer/0000755000175000017500000000000000000000000025051 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/ceilometer/__init__.py0000644000175000017500000000000000000000000027150 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.794714 networking-odl-16.0.0.0b2.dev1/networking_odl/ceilometer/network/0000755000175000017500000000000000000000000026542 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/ceilometer/network/__init__.py0000644000175000017500000000000000000000000030641 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.794714 networking-odl-16.0.0.0b2.dev1/networking_odl/ceilometer/network/statistics/0000755000175000017500000000000000000000000030734 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/ceilometer/network/statistics/__init__.py0000644000175000017500000000000000000000000033033 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.794714 networking-odl-16.0.0.0b2.dev1/networking_odl/ceilometer/network/statistics/opendaylight_v2/0000755000175000017500000000000000000000000034032 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=networking-odl-16.0.0.0b2.dev1/networking_odl/ceilometer/network/statistics/opendaylight_v2/__init__.py 22 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/ceilometer/network/statistics/opendaylight_v2/__init__0000644000175000017500000000000000000000000035502 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=networking-odl-16.0.0.0b2.dev1/networking_odl/ceilometer/network/statistics/opendaylight_v2/client.py 22 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/ceilometer/network/statistics/opendaylight_v2/client.p0000644000175000017500000001036100000000000035472 0ustar00jamespagejamespage00000000000000# # Copyright 2017 Ericsson India Global Services Pvt Ltd. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from debtcollector import removals from oslo_log import log import requests from requests import auth import six from ceilometer.i18n import _ LOG = log.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class _Base(object): """Base class of OpenDaylight REST APIs Clients.""" @abc.abstractproperty def base_url(self): """Returns base url for each REST API.""" def __init__(self, client): self.client = client def get_statistics(self): return self.client.request(self.base_url) class OpenDaylightRESTAPIFailed(Exception): pass @removals.removed_class( 'SwitchStatisticsAPIClient', version='Stein', removal_version='release T', message="Ceilometer driver is deprecated and will get removed from n-odl") class SwitchStatisticsAPIClient(_Base): """OpenDaylight Switch Statistics REST API Client Base URL: {endpoint}/flow-capable-switches """ base_url = '/flow-capable-switches' @removals.removed_class( 'Client', version='Stein', removal_version='release T', message="Ceilometer driver is deprecated and will get removed from n-odl") class Client(object): def __init__(self, conf, endpoint, params): self.switch_statistics = SwitchStatisticsAPIClient(self) self._endpoint = endpoint self.conf = conf self._req_params = self._get_req_params(params) self.session = requests.Session() def _get_req_params(self, params): req_params = { 'headers': { 'Accept': 'application/json' }, 'timeout': self.conf.http_timeout, } auth_way = params.get('auth') if auth_way in ['basic', 'digest']: user = params.get('user') password = params.get('password') if auth_way == 'basic': auth_class = auth.HTTPBasicAuth else: auth_class = auth.HTTPDigestAuth req_params['auth'] = auth_class(user, password) return req_params def _log_req(self, url): curl_command = ['REQ: curl -i -X GET', '"%s"' % (url)] if 'auth' in self._req_params: auth_class = self._req_params['auth'] if isinstance(auth_class, auth.HTTPBasicAuth): curl_command.append('--basic') else: curl_command.append('--digest') curl_command.append('--user "%s":"***"' % auth_class.username) for name, value in six.iteritems(self._req_params['headers']): curl_command.append('-H "%s: %s"' % (name, value)) LOG.debug(' '.join(curl_command)) @staticmethod def _log_res(resp): dump = ['RES: \n', 'HTTP %.1f %s %s\n' % (resp.raw.version, resp.status_code, resp.reason)] dump.extend('%s: %s\n' % (k, v) for k, v in six.iteritems(resp.headers)) dump.append('\n') if resp.content: dump.extend([resp.content, '\n']) LOG.debug(''.join(dump)) def _http_request(self, url): if self.conf.debug: self._log_req(url) resp = self.session.get(url, **self._req_params) if self.conf.debug: self._log_res(resp) if resp.status_code // 100 != 2: raise OpenDaylightRESTAPIFailed( _('OpenDaylight API returned %(status)s %(reason)s') % {'status': resp.status_code, 'reason': resp.reason}) return resp.json() def request(self, path): url = self._endpoint + path return self._http_request(url) ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=networking-odl-16.0.0.0b2.dev1/networking_odl/ceilometer/network/statistics/opendaylight_v2/driver.py 22 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/ceilometer/network/statistics/opendaylight_v2/driver.p0000644000175000017500000003001100000000000035501 0ustar00jamespagejamespage00000000000000# # Copyright 2017 Ericsson India Global Services Pvt Ltd. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from debtcollector import removals from oslo_log import log from six.moves import urllib_parse as urlparse from ceilometer import keystone_client from ceilometer.network.statistics import driver from networking_odl.ceilometer.network.statistics.opendaylight_v2 import client LOG = log.getLogger(__name__) INT64_MAX_VALUE = (2 ** 64 / 2 - 1) @removals.removed_class( 'OpenDaylightDriver', version='Stein', removal_version='release T', message="Ceilometer driver is deprecated and will get removed from n-odl") class OpenDaylightDriver(driver.Driver): """Driver of network info collector from OpenDaylight. This driver uses resources in "/etc/ceilometer/polling.yaml". Resource requires below conditions: * resource is url * scheme is "opendaylight.v2" This driver can be configured via query parameters. Supported parameters: * scheme: The scheme of request url to OpenDaylight REST API endpoint. (default http) * auth: Auth strategy of http. This parameter can be set basic or digest.(default None) * user: This is username that is used by auth.(default None) * password: This is password that is used by auth.(default None) e.g.:: opendaylight.v2://127.0.0.1:8080/controller/statistics ?auth=basic&user=admin&password=admin&scheme=http In this case, the driver send request to below URLs: http://127.0.0.1:8080/controller/statistics/flow-capable-switches Example JSON response from OpenDaylight { flow_capable_switches: [{ packet_in_messages_received: 501, packet_out_messages_sent: 300, ports: 1, flow_datapath_id: 55120148545607, tenant_id: ADMIN_ID, switch_port_counters: [{ bytes_received: 1000, bytes_sent: 1000, duration: 600, packets_internal_received: 100, packets_internal_sent: 200, packets_received: 100, packets_received_drop: 0, packets_received_error: 0, packets_sent: 100, port_id: 4, tenant_id: PORT_1_TENANT_ID, uuid: PORT_1_ID }], table_counters: [{ flow_count: 90, table_id: 0 }] }] } """ admin_project_id = None @staticmethod def _get_int_sample(key, statistic, resource_id, resource_meta, tenant_id): if key not in statistic: return None value = int(statistic[key]) if not (0 <= value <= INT64_MAX_VALUE): value = 0 return value, resource_id, resource_meta, tenant_id def _prepare_cache(self, endpoint, params, cache): if 'network.statistics.opendaylight_v2' in cache: return cache['network.statistics.opendaylight_v2'] data = {} odl_params = {} if 'auth' in params: odl_params['auth'] = params['auth'][0] if 'user' in params: odl_params['user'] = params['user'][0] if 'password' in params: odl_params['password'] = params['password'][0] cs = client.Client(self.conf, endpoint, odl_params) if not self.admin_project_id: try: ks_client = keystone_client.get_client(self.conf) project = ks_client.projects.find(name='admin') if project: self.admin_project_id = project.id except Exception: LOG.exception('Unable to fetch admin tenant id') cache['network.statistics.opendaylight_v2'] = data return data try: # get switch statistics data['switch'] = cs.switch_statistics.get_statistics() data['admin_tenant_id'] = self.admin_project_id except client.OpenDaylightRESTAPIFailed: LOG.exception('OpenDaylight REST API Failed. ') except Exception: LOG.exception('Failed to connect to OpenDaylight' ' REST API') cache['network.statistics.opendaylight_v2'] = data return data def get_sample_data(self, meter_name, parse_url, params, cache): extractor = self._get_extractor(meter_name) if extractor is None: # The way to getting meter is not implemented in this driver or # OpenDaylight REST API has not api to getting meter. return None iter = self._get_iter(meter_name) if iter is None: # The way to getting meter is not implemented in this driver or # OpenDaylight REST API has not api to getting meter. return None parts = urlparse.ParseResult(params.get('scheme', ['http'])[0], parse_url.netloc, parse_url.path, None, None, None) endpoint = urlparse.urlunparse(parts) data = self._prepare_cache(endpoint, params, cache) samples = [] if data: for sample in iter(extractor, data): if sample is not None: # set controller name to resource_metadata sample[2]['controller'] = 'OpenDaylight_V2' samples.append(sample) return samples def _get_iter(self, meter_name): if meter_name == 'switch' or meter_name == 'switch.ports': return self._iter_switch elif meter_name.startswith('switch.table'): return self._iter_table elif meter_name.startswith('switch.port'): return self._iter_switch_port elif meter_name.startswith('port'): return self._iter_port return None def _get_extractor(self, meter_name): if (meter_name == 'switch.port' or meter_name.startswith('switch.port.')): meter_name = meter_name.split('.', 1)[1] method_name = '_' + meter_name.replace('.', '_') return getattr(self, method_name, None) @staticmethod def _iter_switch(extractor, data): for switch in data['switch']['flow_capable_switches']: yield (extractor(switch, str(switch['flow_datapath_id']), {}, (switch.get('tenant_id') or data['admin_tenant_id']))) @staticmethod def _switch(statistic, resource_id, resource_meta, tenant_id): return 1, resource_id, resource_meta, tenant_id @staticmethod def _switch_ports(statistic, resource_id, resource_meta, tenant_id): return OpenDaylightDriver._get_int_sample( 'ports', statistic, resource_id, resource_meta, tenant_id) @staticmethod def _iter_switch_port(extractor, data): for switch in data['switch']['flow_capable_switches']: if 'switch_port_counters' in switch: switch_id = str(switch['flow_datapath_id']) tenant_id = (switch.get('tenant_id') or data['admin_tenant_id']) for port_statistic in switch['switch_port_counters']: port_id = port_statistic['port_id'] resource_id = '%s:%d' % (switch_id, port_id) resource_meta = {'switch': switch_id, 'port_number_on_switch': port_id} if 'uuid' in port_statistic: neutron_port_id = port_statistic['uuid'] resource_meta['neutron_port_id'] = neutron_port_id yield extractor(port_statistic, resource_id, resource_meta, tenant_id) @staticmethod def _iter_port(extractor, data): resource_meta = {} for switch in data['switch']['flow_capable_switches']: if 'switch_port_counters' in switch: for port_statistic in switch['switch_port_counters']: if 'uuid' in port_statistic: resource_id = port_statistic['uuid'] tenant_id = port_statistic.get('tenant_id') yield extractor( port_statistic, resource_id, resource_meta, tenant_id or data['admin_tenant_id']) @staticmethod def _port(statistic, resource_id, resource_meta, tenant_id): return 1, resource_id, resource_meta, tenant_id @staticmethod def _port_uptime(statistic, resource_id, resource_meta, tenant_id): return OpenDaylightDriver._get_int_sample( 'duration', statistic, resource_id, resource_meta, tenant_id) @staticmethod def _port_receive_packets(statistic, resource_id, resource_meta, tenant_id): return OpenDaylightDriver._get_int_sample( 'packets_received', statistic, resource_id, resource_meta, tenant_id) @staticmethod def _port_transmit_packets(statistic, resource_id, resource_meta, tenant_id): return OpenDaylightDriver._get_int_sample( 'packets_sent', statistic, resource_id, resource_meta, tenant_id) @staticmethod def _port_receive_bytes(statistic, resource_id, resource_meta, tenant_id): return OpenDaylightDriver._get_int_sample( 'bytes_received', statistic, resource_id, resource_meta, tenant_id) @staticmethod def _port_transmit_bytes(statistic, resource_id, resource_meta, tenant_id): return OpenDaylightDriver._get_int_sample( 'bytes_sent', statistic, resource_id, resource_meta, tenant_id) @staticmethod def _port_receive_drops(statistic, resource_id, resource_meta, tenant_id): return OpenDaylightDriver._get_int_sample( 'packets_received_drop', statistic, resource_id, resource_meta, tenant_id) @staticmethod def _port_receive_errors(statistic, resource_id, resource_meta, tenant_id): return OpenDaylightDriver._get_int_sample( 'packets_received_error', statistic, resource_id, resource_meta, tenant_id) @staticmethod def _iter_table(extractor, data): for switch_statistic in data['switch']['flow_capable_switches']: if 'table_counters' in switch_statistic: switch_id = str(switch_statistic['flow_datapath_id']) tenant_id = (switch_statistic.get('tenant_id') or data['admin_tenant_id']) for table_statistic in switch_statistic['table_counters']: resource_meta = {'switch': switch_id} resource_id = ("%s:table:%d" % (switch_id, table_statistic['table_id'])) yield extractor(table_statistic, resource_id, resource_meta, tenant_id) @staticmethod def _switch_table_active_entries(statistic, resource_id, resource_meta, tenant_id): return OpenDaylightDriver._get_int_sample( 'flow_count', statistic, resource_id, resource_meta, tenant_id) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.794714 networking-odl-16.0.0.0b2.dev1/networking_odl/cmd/0000755000175000017500000000000000000000000023464 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/cmd/__init__.py0000644000175000017500000000000000000000000025563 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/cmd/analyze_journal.py0000755000175000017500000001106000000000000027234 0ustar00jamespagejamespage00000000000000#!/usr/bin/env python # Copyright (c) 2017 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Command line script to analyze journal entry processing time based on logs. By default the input is read through pipe, unless a log file is specified. Examples: Analyzing devstack's Neutron log: journalctl -u devstack@neutron-api | python analyze_journal.py Analyzing an arbitrary log file: python analyze_journal.py --file /path/to/file.log """ import collections import re import sys import six from oslo_config import cfg from networking_odl._i18n import _ from networking_odl.journal import journal COMMAND_LINE_OPTIONS = [ cfg.StrOpt('file', default=None, help=_("Log file to analyze.")), cfg.IntOpt('slowest', min=1, default=10, help=_("Prints the N slowest entries (10 by default).")), ] # This regex will match any replacement key in the log message and extract # the key name. KEY_MATCHER = re.compile(r'\%\((\S+)\)s') LOG_KEYS = KEY_MATCHER.findall(journal.LOG_ENTRY_TEMPLATE) KEY_TEMP_PATTERN = 'KEYPATTERN' LOG_MATCHER = re.compile( re.sub(KEY_TEMP_PATTERN, r'(\\S+)', re.escape( KEY_MATCHER.sub(KEY_TEMP_PATTERN, journal.LOG_ENTRY_TEMPLATE)))) ENTRY_LOG_TEMPLATE = ' * Entry id: %s, processing time: %.3fs; %s %s %s' EntryStats = collections.namedtuple( 'EntryStats', 'entry_id time op obj_type obj_id') def setup_conf(output, args): """setup cmdline options.""" if any(flag in args for flag in ('-h', '--help')): six.print_(__doc__, file=output) conf = cfg.ConfigOpts() conf.register_cli_opts(COMMAND_LINE_OPTIONS) conf(args=args) return conf def parse_log(content): entries = {} for line in content: matched = LOG_MATCHER.search(line) if matched is None: continue entry_log = dict(zip(LOG_KEYS, matched.groups())) entry_id = entry_log['entry_id'] entry = entries.get(entry_id, entry_log) log_type = entry_log['log_type'] entry[log_type] = float(entry_log['timestamp']) entries[entry_id] = entry return entries def analyze_entries(entries): entries_stats = [] for entry_id, entry in entries.items(): recorded_time = entry.get(journal.LOG_RECORDED, None) completed_time = entry.get(journal.LOG_COMPLETED, None) if recorded_time is None or completed_time is None: continue delta = completed_time - recorded_time entries_stats.append(EntryStats( entry_id=entry_id, time=delta, op=entry['op'], obj_type=entry['obj_type'], obj_id=entry['obj_id'])) return entries_stats def _percentile(timings, percent): location = int(len(timings) * (percent / 100.0)) return int(timings[location]) def print_stats(output, slowest, entries_stats): entries_stats = sorted( entries_stats, key=lambda entry_stats: entry_stats.time) timings = [entry_stats.time for entry_stats in entries_stats] avg = sum(timings) / len(timings) six.print_('Average processing time: %ss' % avg, file=output) six.print_('90th percentile: %ss' % _percentile(timings, 90), file=output) six.print_('99th percentile: %ss' % _percentile(timings, 99), file=output) six.print_('99.9th percentile: %ss' % _percentile(timings, 99.9), file=output) six.print_('%s slowest entries:' % slowest, file=output) slowest = entries_stats[:-(slowest + 1):-1] for entry_stats in slowest: six.print_(ENTRY_LOG_TEMPLATE % entry_stats, file=output) def get_content(file_name): return open(file_name) if file_name else sys.stdin def main(output=sys.stdout): conf = setup_conf(output, sys.argv[1:]) with get_content(conf.file) as content: entries = parse_log(content) entries_stats = analyze_entries(entries) if not entries_stats: six.print_('No entry statistics found.', file=output) return 1 print_stats(output, conf.slowest, entries_stats) return 0 if __name__ == '__main__': exit(main()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/cmd/set_ovs_hostconfigs.py0000755000175000017500000003767000000000000030146 0ustar00jamespagejamespage00000000000000#!/usr/bin/env python # Copyright (c) 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Command line script to set host OVS configurations (it requires ovsctl) Examples: NOTE: bash accepts new line characters between quotes To give a full custom json python set_ovs_hostconfigs.py --ovs_hostconfigs='{ "ODL L2": { "allowed_network_types": ["local","vlan", "vxlan","gre"], "bridge_mappings": {"physnet1":"br-ex"} "supported_vnic_types": [ { "vnic_type":"normal", "vif_type":"ovs", "vif_details":{} } ], }, "ODL L3": {} }' To make sure to use system data path (Kernel) python set_ovs_hostconfigs.py --noovs_dpdk To make sure to use user space data path (vhostuser) python set_ovs_hostconfigs.py --ovs_dpdk To give bridge mappings python --bridge_mapping=physnet1:br-ex,physnet2:br-eth0 """ import os import socket import subprocess # nosec import sys from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils import six from networking_odl._i18n import _ LOG = logging.getLogger(__name__) USERSPACE_DATAPATH_TYPES = ['netdev', 'dpdkvhostuser'] COMMAND_LINE_OPTIONS = [ cfg.ListOpt( 'allowed_network_types', default=['local', 'flat', 'vlan', 'vxlan', 'gre'], help=_(""" Specifies allowed network types given as a Comma-separated list of types. Default: --allowed_network_types=local,vlan,vxlan,gre """)), cfg.DictOpt( 'bridge_mappings', default={}, help=_(""" Comma-separated list of : tuples mapping physical network names to the agent's node-specific Open vSwitch bridge names to be used for flat and VLAN networks. The length of bridge names should be no more than 11. Each bridge must exist, and should have a physical network interface configured as a port. All physical networks configured on the server should have mappings to appropriate bridges on each agent. Note: If you remove a bridge from this mapping, make sure to disconnect it from the integration bridge as it won't be managed by the agent anymore. Default: --bridge_mappings= """)), cfg.StrOpt( 'datapath_type', choices=['system', 'netdev', 'dpdkvhostuser'], default=None, help=_(""" It specifies the OVS data path to use. If this value is given then --ovs_dpdk will be ignored. If neither this option or --ovs_dpdk are given then it will use a valid value for current host. Choices: --datapath_type= --datapath_type=system # kernel data path --datapath_type=netdev # userspace data path --datapath_type=dpdkvhostuser # userspace data path Default: --datapath_type=netdev # if support is detected --datapath_type=system # in all other cases """)), cfg.StrOpt( 'host', default=socket.gethostname(), # pylint: disable=no-member help=_(""" It specifies the host name of the target machine. Default: --host=$HOSTNAME # running machine host name """)), cfg.IPOpt( 'local_ip', help=_(""" IP address of local overlay (tunnel) network end-point. It accepts either an IPv4 or IPv6 address that resides on one of the host network interfaces. The IP version of this value must match the value of the 'overlay_ip_version' option in the ML2 plug-in configuration file on the Neutron server node(s). Default: local_ip= """)), cfg.BoolOpt( 'ovs_dpdk', default=None, help=_(""" It uses user-space type of virtual interface (vhostuser) instead of the system based one (ovs). If this option is not specified it tries to detect vhostuser support on running host and in case of positive match it uses it. NOTE: if --datapath_type is given then this option is ignored. Default: """)), cfg.BoolOpt( 'ovs_sriov_offload', default=None, help=_(""" It adds SR-IOV virtual interface support to allow ovs hardware offload. NOTE: This feature should be used with ovs>=2.8.0 and SR-IOV NIC which support switchdev mode and tc offload. Default: """)), cfg.StrOpt( 'ovs_hostconfigs', help=_(""" Fives pre-made host configuration for OpenDaylight as a JSON string. NOTE: when specified all other options are ignored! An entry should look like: --ovs_hostconfigs='{ "ODL L2": { "allowed_network_types": ["local","vlan", "vxlan","gre"], "bridge_mappings": {"physnet1":"br-ex"} "supported_vnic_types": [ { "vnic_type":"normal", "vif_type":"ovs", "vif_details":{} } ], }, "ODL L3": {} }' Default: --ovs_hostconfigs= """)), cfg.StrOpt( 'vhostuser_mode', choices=['client', 'server'], default='client', help=_(""" It specifies the OVS VHostUser mode. Choices: --vhostuser_mode=client --vhostuser_mode=server Default: --vhostuser_mode=client """)), cfg.BoolOpt( 'vhostuser_ovs_plug', default=True, help=_(""" Enable VHostUser OVS Plug. Default: --vhostuser_ovs_plug """)), cfg.StrOpt( 'vhostuser_port_prefix', choices=['vhu', 'socket'], default='vhu', help=_(""" VHostUser socket port prefix. Choices: --vhostuser_socket_dir=vhu --vhostuser_socket_dir=socket Default: --vhostuser_socket_dir=vhu """)), cfg.StrOpt( 'vhostuser_socket_dir', default='/var/run/openvswitch', help=_(""" OVS VHostUser socket directory. Default: --vhostuser_socket_dir=/var/run/openvswitch """)), ] def set_ovs_extid_hostconfigs(conf, ovs_vsctl): if conf.ovs_hostconfigs: json_str = conf.ovs_hostconfigs.replace("\'", "\"") LOG.debug("SET-HOSTCONFIGS: JSON String %s", json_str) hostconfigs = jsonutils.loads(json_str) else: uuid = ovs_vsctl.uuid() userspace_datapath_types = ovs_vsctl.userspace_datapath_types() hostconfigs = _hostconfigs_from_conf( conf=conf, uuid=uuid, userspace_datapath_types=userspace_datapath_types) ovs_vsctl.set_host_name(conf.host) for name in sorted(hostconfigs): ovs_vsctl.set_host_config(name, hostconfigs[name]) # for new netvirt if conf.local_ip: ovs_vsctl.set_local_ip(conf.local_ip) if conf.bridge_mappings: provider_mappings = ",".join( "{}:{}".format(k, v) for k, v in conf.bridge_mappings.items()) ovs_vsctl.set_provider_mappings(provider_mappings) def _hostconfigs_from_conf(conf, uuid, userspace_datapath_types): vif_type = _vif_type_from_conf( conf=conf, userspace_datapath_types=userspace_datapath_types) datapath_type = conf.datapath_type or ( 'system' if vif_type == 'ovs' else userspace_datapath_types[0]) vif_details = _vif_details_from_conf( conf=conf, uuid=uuid, vif_type=vif_type) host_config = { "ODL L2": { "allowed_network_types": conf.allowed_network_types, "bridge_mappings": conf.bridge_mappings, "datapath_type": datapath_type, "supported_vnic_types": [ { "vif_details": vif_details, "vif_type": vif_type, "vnic_type": "normal", }, ] } } if vif_type == 'ovs' and conf.ovs_sriov_offload: direct_vnic = { "vif_details": vif_details, "vif_type": vif_type, "vnic_type": "direct", } host_config["ODL L2"]["supported_vnic_types"].append(direct_vnic) return host_config def _vif_type_from_conf(conf, userspace_datapath_types): # take vif_type from datapath_type ------------------------------------ if conf.datapath_type: # take it from datapath_type if conf.datapath_type in USERSPACE_DATAPATH_TYPES: if conf.datapath_type not in userspace_datapath_types: LOG.warning( "Using user space data path type '%s' even if no " "support was detected.", conf.datapath_type) return 'vhostuser' else: return 'ovs' # take vif_type from ovs_dpdk ----------------------------------------- if conf.ovs_dpdk is True: if userspace_datapath_types: return 'vhostuser' raise ValueError(_( "--ovs_dpdk option was specified but the 'netdev' datapath_type " "was not enabled. " "To override use option --datapath_type=netdev")) elif conf.ovs_dpdk is False: return 'ovs' # take detected dtype ------------------------------------------------- if userspace_datapath_types: return 'vhostuser' return 'ovs' def _vif_details_from_conf(conf, uuid, vif_type): host_addresses = [conf.local_ip or conf.host] if vif_type == 'ovs': # OVS legacy mode return {"uuid": uuid, "host_addresses": host_addresses, "has_datapath_type_netdev": False, "support_vhost_user": False} elif vif_type == 'vhostuser': # enable VHOSTUSER return {"uuid": uuid, "host_addresses": host_addresses, "has_datapath_type_netdev": True, "support_vhost_user": True, "port_prefix": conf.vhostuser_port_prefix, "vhostuser_socket_dir": conf.vhostuser_socket_dir, "vhostuser_ovs_plug": conf.vhostuser_ovs_plug, "vhostuser_mode": conf.vhostuser_mode, "vhostuser_socket": os.path.join( conf.vhostuser_socket_dir, conf.vhostuser_port_prefix + '$PORT_ID')} raise ValueError(_("vif type: '%s' not supported") % vif_type) def setup_conf(args): """setup cmdline options.""" conf = cfg.ConfigOpts() # NOTE, Logging options must be registered before parsing cli # options, refer:- # https://docs.openstack.org/oslo.log/latest/user/usage.html#oslo-logging-setup-methods logging.register_options(conf) if '-h' in args or '--help' in args: # Prints out script documentation." print(__doc__) conf.register_cli_opts(COMMAND_LINE_OPTIONS) conf(args=args) return conf class OvsVsctl(object): """Wrapper class for ovs-vsctl command tool """ COMMAND = 'ovs-vsctl' TABLE = 'Open_vSwitch' _uuid = None def uuid(self): uuid = self._uuid if uuid is None: self._uuid = uuid = self._get('.', '_uuid') return uuid _datapath_types = None def datapath_types(self): datapath_types = self._datapath_types if datapath_types is None: try: datapath_types = self._get('.', 'datapath_types') except subprocess.CalledProcessError: datapath_types = 'system' self._datapath_types = datapath_types return datapath_types _userspace_datapath_types = None def userspace_datapath_types(self): userspace_datapath_types = self._userspace_datapath_types if userspace_datapath_types is None: datapath_types = self.datapath_types() userspace_datapath_types = tuple( datapath_type for datapath_type in USERSPACE_DATAPATH_TYPES if datapath_type in datapath_types) self._userspace_datapath_types = userspace_datapath_types return userspace_datapath_types def set_host_name(self, host_name): self._set_external_ids('odl_os_hostconfig_hostid', host_name) def set_host_config(self, name, value): self._set_external_ids( name='odl_os_hostconfig_config_' + name.lower().replace(' ', '_'), value=jsonutils.dumps(value)) def set_local_ip(self, local_ip): self._set_other_config("local_ip", local_ip) def set_provider_mappings(self, provider_mappings): self._set_other_config("provider_mappings", provider_mappings) # --- implementation details ---------------------------------------------- def _set_external_ids(self, name, value): # Refer below for ovs ext-id strings # https://review.opendev.org/#/c/309630/ value = 'external_ids:{}={}'.format(name, value) self._set(record=self.uuid(), value=value) def _set_other_config(self, name, value): value = 'other_config:{}={}'.format(name, value) self._set(record=self.uuid(), value=value) def _get(self, record, name): return self._execute('get', self.TABLE, record, name) def _set(self, record, value): self._execute('set', self.TABLE, record, value) def _execute(self, *args): command_line = (self.COMMAND,) + args LOG.info( "SET-HOSTCONFIGS: Executing cmd: %s", ' '.join(command_line)) res = subprocess.check_output(command_line).strip() # nosec # Note(lajoskatona): on py3 subprocess.check_output returns back binary # to make that consumable we have to decode that. if isinstance(res, six.binary_type): return res.decode() return res def setup_logging(conf): # NOTE, Hacky way to enable logging. oslo log needs other parameters also # to register, but neutron.conf is not present on compute node therefore # we can not dependant on it. However, nova-compute is present on compute # node, so code can depend on oslo log # if script does not have write permission in the directory then # stack trace appear on console. logging.setup(conf, 'networking-odl') LOG.info("Logging enabled!") def main(args=None): """Main.""" if args is None: args = sys.argv[1:] conf = setup_conf(args) setup_logging(conf) if os.geteuid() != 0: LOG.error('Root permissions are required to configure ovsdb.') return 1 try: set_ovs_extid_hostconfigs(conf=conf, ovs_vsctl=OvsVsctl()) except Exception as ex: # pylint: disable=broad-except LOG.error("Fatal error: %s", ex, exc_info=conf.debug) return 1 else: return 0 if __name__ == '__main__': exit(main()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/cmd/test_setup_hostconfigs.sh0000755000175000017500000000046600000000000030636 0ustar00jamespagejamespage00000000000000#!/bin/sh python set_ovs_hostconfigs.py --debug --ovs_hostconfigs='{"ODL L2": {"supported_vnic_types":[{"vnic_type":"normal", "vif_type":"ovs", "vif_details":{}}], "allowed_network_types":["local","vlan", "vxlan","gre"], "bridge_mappings":{"physnet1":"br-ex"}}, "ODL L3": {"some_details": "dummy_details"}}' ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.794714 networking-odl-16.0.0.0b2.dev1/networking_odl/common/0000755000175000017500000000000000000000000024211 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/common/__init__.py0000644000175000017500000000000000000000000026310 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/common/callback.py0000644000175000017500000001226400000000000026324 0ustar00jamespagejamespage00000000000000# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib.db import api as db_api from oslo_log import log as logging from oslo_utils import excutils from networking_odl.common import constants as odl_const LOG = logging.getLogger(__name__) ODLResource = collections.namedtuple('ODLResource', ('singular', 'plural')) _RESOURCE_MAPPING = { resources.SECURITY_GROUP: ODLResource(odl_const.ODL_SG, odl_const.ODL_SGS), resources.SECURITY_GROUP_RULE: ODLResource(odl_const.ODL_SG_RULE, odl_const.ODL_SG_RULES), } _OPERATION_MAPPING = { events.PRECOMMIT_CREATE: odl_const.ODL_CREATE, events.PRECOMMIT_UPDATE: odl_const.ODL_UPDATE, events.PRECOMMIT_DELETE: odl_const.ODL_DELETE, events.AFTER_CREATE: odl_const.ODL_CREATE, events.AFTER_UPDATE: odl_const.ODL_UPDATE, events.AFTER_DELETE: odl_const.ODL_DELETE, } LOG_TEMPLATE = ("(%(msg)s) with ODL_OPS (%(op)s) ODL_RES_TYPE (%(res_type)s) " "ODL_RES_ID (%(res_id)s)) ODL_RES_DICT (%(res_dict)s) " "DATA (%(data)s)") def _log_on_callback(lvl, msg, op, res_type, res_id, res_dict, data): LOG.log(lvl, LOG_TEMPLATE, {'msg': msg, 'op': op, 'res_type': res_type, 'res_id': res_id, 'res_dict': res_dict, 'data': data, 'exc_info': lvl >= logging.ERROR}) class OdlSecurityGroupsHandler(object): def __init__(self, precommit, postcommit): assert postcommit is not None self._precommit = precommit self._postcommit = postcommit self._subscribe() def _subscribe(self): if self._precommit is not None: for event in (events.PRECOMMIT_CREATE, events.PRECOMMIT_DELETE): registry.subscribe(self.sg_callback_precommit, resources.SECURITY_GROUP, event) registry.subscribe(self.sg_callback_precommit, resources.SECURITY_GROUP_RULE, event) registry.subscribe( self.sg_callback_precommit, resources.SECURITY_GROUP, events.PRECOMMIT_UPDATE) for event in (events.AFTER_CREATE, events.AFTER_DELETE): registry.subscribe(self.sg_callback_postcommit, resources.SECURITY_GROUP, event) registry.subscribe(self.sg_callback_postcommit, resources.SECURITY_GROUP_RULE, event) registry.subscribe(self.sg_callback_postcommit, resources.SECURITY_GROUP, events.AFTER_UPDATE) def _sg_callback(self, callback, resource, event, trigger, **kwargs): if 'payload' in kwargs: # TODO(boden): remove shim once all callbacks use payloads context = kwargs['payload'].context res = kwargs['payload'].desired_state res_id = kwargs['payload'].resource_id copy_kwargs = kwargs else: context = kwargs['context'] res = kwargs.get(resource) res_id = kwargs.get("%s_id" % resource) copy_kwargs = kwargs.copy() copy_kwargs.pop('context') if res_id is None: res_id = res.get('id') odl_res_type = _RESOURCE_MAPPING[resource] odl_ops = _OPERATION_MAPPING[event] odl_res_dict = None if res is None else {odl_res_type.singular: res} _log_on_callback(logging.DEBUG, "Calling callback", odl_ops, odl_res_type, res_id, odl_res_dict, copy_kwargs) try: callback(context, odl_ops, odl_res_type, res_id, odl_res_dict, **copy_kwargs) except Exception as e: # In case of precommit, neutron registry notification caller # doesn't log its exception. In networking-odl case, we don't # normally throw exception. So log it here for debug with excutils.save_and_reraise_exception(): if not db_api.is_retriable(e): _log_on_callback(logging.ERROR, "Exception from callback", odl_ops, odl_res_type, res_id, odl_res_dict, copy_kwargs) def sg_callback_precommit(self, resource, event, trigger, **kwargs): self._sg_callback(self._precommit, resource, event, trigger, **kwargs) def sg_callback_postcommit(self, resource, event, trigger, **kwargs): self._sg_callback(self._postcommit, resource, event, trigger, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/common/client.py0000644000175000017500000001131100000000000026036 0ustar00jamespagejamespage00000000000000# Copyright (c) 2014 Red Hat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import threading from oslo_config import cfg from oslo_log import log from oslo_serialization import jsonutils from oslo_utils import excutils import requests from requests import sessions from networking_odl.common import utils LOG = log.getLogger(__name__) cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config') class OpenDaylightRestClient(object): @staticmethod def _check_opt(url): if not url: raise cfg.RequiredOptError('url', cfg.OptGroup('ml2_odl')) required_opts = ('url', 'username', 'password') for opt in required_opts: if not getattr(cfg.CONF.ml2_odl, opt): raise cfg.RequiredOptError(opt, cfg.OptGroup('ml2_odl')) @classmethod def create_client(cls, url=None): if cfg.CONF.ml2_odl.enable_lightweight_testing: LOG.debug("ODL lightweight testing is enabled, " "returning a OpenDaylightLwtClient instance") # Have to import at here, otherwise we create a dependency loop from networking_odl.common import lightweight_testing as lwt cls = lwt.OpenDaylightLwtClient url = url or cfg.CONF.ml2_odl.url cls._check_opt(url) return cls( url, cfg.CONF.ml2_odl.username, cfg.CONF.ml2_odl.password, cfg.CONF.ml2_odl.timeout) def __init__(self, url, username, password, timeout): super(OpenDaylightRestClient, self).__init__() self.url = url self.timeout = timeout self.session = sessions.Session() self.session.auth = (username, password) def get_resource(self, resource_type, resource_id): response = self.get(utils.make_url_object(resource_type) + '/' + resource_id) if response.status_code == requests.codes.not_found: return None return self._check_response(response).json() def get(self, urlpath='', data=None): return self.request('get', urlpath, data) def request(self, method, urlpath='', data=None): headers = {'Content-Type': 'application/json'} url = '/'.join([self.url, urlpath]) LOG.debug( "Sending METHOD (%(method)s) URL (%(url)s) JSON (%(data)s)", {'method': method, 'url': url, 'data': data}) return self.session.request( method, url=url, headers=headers, data=data, timeout=self.timeout) def sendjson(self, method, urlpath, obj): """Send json to the OpenDaylight controller.""" data = jsonutils.dumps(obj, indent=2) if obj else None try: return self._check_response( self.request(method, urlpath, data)) except Exception: with excutils.save_and_reraise_exception(): LOG.error("REST request ( %(method)s ) to " "url ( %(urlpath)s ) is failed. " "Request body : [%(body)s] service", {'method': method, 'urlpath': urlpath, 'body': obj}) def _check_response(self, response): try: response.raise_for_status() except requests.HTTPError as error: with excutils.save_and_reraise_exception(): LOG.debug("Exception from ODL: %(e)s %(text)s", {'e': error, 'text': response.text}, exc_info=1) else: LOG.debug("Got response:\n" "(%(response)s)", {'response': response.text}) return response class OpenDaylightRestClientGlobal(object): """ODL Rest client as global variable The creation of OpenDaylightRestClient needs to be delayed until configuration values need to be configured at first. """ def __init__(self): super(OpenDaylightRestClientGlobal, self).__init__() self._lock = threading.Lock() self._client = None def get_client(self): with self._lock: if self._client is None: self._client = OpenDaylightRestClient.create_client() return self._client ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/common/config.py0000644000175000017500000000746100000000000026040 0ustar00jamespagejamespage00000000000000# Copyright (c) 2014 Red Hat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from networking_odl._i18n import _ odl_opts = [ cfg.StrOpt('url', help=_("HTTP URL of OpenDaylight REST interface.")), cfg.StrOpt('username', help=_("HTTP username for authentication.")), cfg.StrOpt('password', secret=True, help=_("HTTP password for authentication.")), cfg.IntOpt('timeout', default=10, help=_("HTTP timeout in seconds.")), cfg.IntOpt('session_timeout', default=30, help=_("Tomcat session timeout in minutes.")), cfg.FloatOpt('sync_timeout', default=10, help=_("Sync thread timeout in seconds or fraction.")), cfg.IntOpt('retry_count', default=5, help=_("Number of times to retry a row before failing.")), cfg.IntOpt('maintenance_interval', default=300, help=_("Journal maintenance operations interval in seconds.")), cfg.IntOpt('completed_rows_retention', default=0, help=_("Time to keep completed rows (in seconds)." "For performance reasons it's not recommended to " "change this from the default value (0) which " "indicates completed rows aren't kept." "This value will be checked every maintenance_interval " "by the cleanup thread. To keep completed rows " "indefinitely, set the value to -1")), cfg.BoolOpt('enable_lightweight_testing', default=False, help=_('Test without real ODL.')), cfg.StrOpt('port_binding_controller', default='pseudo-agentdb-binding', help=_('Name of the controller to be used for port binding.')), cfg.IntOpt('processing_timeout', default='100', help=_("Time in seconds to wait before a " "processing row is marked back to pending.")), cfg.StrOpt('odl_hostconf_uri', help=_("Path for ODL host configuration REST interface"), default="/restconf/operational/neutron:neutron/hostconfigs"), cfg.IntOpt('restconf_poll_interval', default=30, help=_("Poll interval in seconds for getting ODL hostconfig")), cfg.BoolOpt('enable_websocket_pseudo_agentdb', default=False, help=_('Enable websocket for pseudo-agent-port-binding.')), cfg.IntOpt('odl_features_retry_interval', default=5, help=_("Wait this many seconds before retrying the odl features" " fetch")), cfg.ListOpt('odl_features', help='A list of features supported by ODL.'), cfg.StrOpt('odl_features_json', help='Features supported by ODL, in the json format returned' 'by ODL. Note: This config option takes precedence over' 'odl_features.'), cfg.BoolOpt('enable_dhcp_service', default=False, help=_('Enables the networking-odl driver to supply special' ' neutron ports of "dhcp" type to OpenDaylight' ' Controller for its use in providing DHCP Service.')), ] cfg.CONF.register_opts(odl_opts, "ml2_odl") def list_opts(): return [('ml2_odl', odl_opts)] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/common/constants.py0000644000175000017500000000564700000000000026613 0ustar00jamespagejamespage00000000000000# Copyright (c) 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ODL_NETWORK = 'network' ODL_NETWORKS = 'networks' ODL_SUBNET = 'subnet' ODL_SUBNETS = 'subnets' ODL_PORT = 'port' ODL_PORTS = 'ports' ODL_SG = 'security_group' ODL_SGS = 'security_groups' ODL_SG_RULE = 'security_group_rule' ODL_SG_RULES = 'security_group_rules' ODL_ROUTER = 'router' ODL_ROUTERS = 'routers' ODL_FLOATINGIP = 'floatingip' ODL_FLOATINGIPS = 'floatingips' ODL_QOS = 'qos' ODL_QOS_POLICY = 'policy' ODL_QOS_POLICIES = 'policies' ODL_SFC = 'sfc' ODL_SFC_FLOW_CLASSIFIER = 'flowclassifier' ODL_SFC_FLOW_CLASSIFIERS = 'flowclassifiers' ODL_SFC_PORT_PAIR = 'portpair' ODL_SFC_PORT_PAIRS = 'portpairs' ODL_SFC_PORT_PAIR_GROUP = 'portpairgroup' ODL_SFC_PORT_PAIR_GROUPS = 'portpairgroups' ODL_SFC_PORT_CHAIN = 'portchain' ODL_SFC_PORT_CHAINS = 'portchains' NETWORKING_SFC_FLOW_CLASSIFIER = 'flow_classifier' NETWORKING_SFC_FLOW_CLASSIFIERS = 'flow_classifiers' NETWORKING_SFC_PORT_PAIR = 'port_pair' NETWORKING_SFC_PORT_PAIRS = 'port_pairs' NETWORKING_SFC_PORT_PAIR_GROUP = 'port_pair_group' NETWORKING_SFC_PORT_PAIR_GROUPS = 'port_pair_groups' NETWORKING_SFC_PORT_CHAIN = 'port_chain' NETWORKING_SFC_PORT_CHAINS = 'port_chains' ODL_TRUNK = 'trunk' ODL_TRUNKS = 'trunks' ODL_L2GATEWAY = 'l2_gateway' ODL_L2GATEWAYS = 'l2_gateways' ODL_L2GATEWAY_CONNECTION = 'l2gateway_connection' ODL_L2GATEWAY_CONNECTIONS = 'l2_gateway_connections' ODL_BGPVPN = 'bgpvpn' ODL_BGPVPNS = 'bgpvpns' ODL_BGPVPN_NETWORK_ASSOCIATION = 'bgpvpn_network_association' ODL_BGPVPN_NETWORK_ASSOCIATIONS = 'bgpvpn_network_associations' ODL_BGPVPN_ROUTER_ASSOCIATION = 'bgpvpn_router_association' ODL_BGPVPN_ROUTER_ASSOCIATIONS = 'bgpvpn_router_associations' ODL_ML2_MECH_DRIVER_V2 = "opendaylight_v2" ODL_CREATE = 'create' ODL_UPDATE = 'update' ODL_DELETE = 'delete' # Constants for journal operation states PENDING = 'pending' PROCESSING = 'processing' FAILED = 'failed' COMPLETED = 'completed' # Journal Callback events BEFORE_COMPLETE = 'before_complete' # dict to store url mappings RESOURCE_URL_MAPPINGS = { ODL_QOS_POLICY: "%s/%s" % (ODL_QOS, ODL_QOS_POLICIES), ODL_SFC_FLOW_CLASSIFIER: "%s/%s" % (ODL_SFC, ODL_SFC_FLOW_CLASSIFIERS), ODL_SFC_PORT_CHAIN: "%s/%s" % (ODL_SFC, ODL_SFC_PORT_CHAINS), ODL_SFC_PORT_PAIR: "%s/%s" % (ODL_SFC, ODL_SFC_PORT_PAIRS), ODL_SFC_PORT_PAIR_GROUP: "%s/%s" % (ODL_SFC, ODL_SFC_PORT_PAIR_GROUPS) } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/common/exceptions.py0000644000175000017500000000365400000000000026754 0ustar00jamespagejamespage00000000000000# Copyright (c) 2017 NEC Corp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import exceptions from neutron._i18n import _ class NetworkingODLException(exceptions.NeutronException): """Base Networking-ODL exception.""" pass class FullSyncError(NetworkingODLException): """Base exception for Full Sync""" pass class UnsupportedResourceType(NetworkingODLException): """An exception for unsupported resource for full sync and recovery""" message = _("unsupported resource type: %(resource)s") class PluginMethodNotFound(NetworkingODLException, AttributeError): """An exception indicating plugin method was not found. Specialization of AttributeError and NetworkingODLException indicating requested plugin method could not be found. :param method: Name of the method being accessed. :param plugin: Plugin name expected to have required method. """ message = _("%(method)s not found in %(plugin)s") class ResourceNotRegistered(FullSyncError): """An exception indicating resource is not registered for maintenance task. Specialization of FullSync error indicating resource is not registered for maintenance tasks full sync and recovery. :param resource_type: Resource type not registered for maintenance task. """ message = _("%(resource_type)s resource is not registered for maintenance") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/common/filters.py0000644000175000017500000001610300000000000026234 0ustar00jamespagejamespage00000000000000# Copyright (c) 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants as n_const from oslo_log import log from oslo_serialization import jsonutils from networking_odl.common import constants as odl_const from networking_odl.common import utils as odl_utils LOG = log.getLogger(__name__) # NOTE(yamahata): As neutron keystone v3 support, tenant_id would be renamed to # project_id. In order to keep compatibility, populate both # 'project_id' and 'tenant_id' # for details refer to # https://specs.openstack.org/openstack/neutron-specs/specs/newton/moving-to-keystone-v3.html def _populate_project_id_and_tenant_id(resource_dict): # NOTE(yamahata): l3 plugin passes data as dependency_list as python list # delete_router, delete_floatingip if not isinstance(resource_dict, dict): return project_id = resource_dict.get('project_id', resource_dict.get('tenant_id')) if project_id is not None: # NOTE(yamahata): project_id can be ""(empty string) resource_dict.setdefault('project_id', project_id) resource_dict.setdefault('tenant_id', project_id) def _filter_unmapped_null(resource_dict, unmapped_keys): # NOTE(yamahata): bug work around # https://bugs.eclipse.org/bugs/show_bug.cgi?id=475475 # Null-value for an unmapped element causes next mapped # collection to contain a null value # JSON: { "unmappedField": null, "mappedCollection": [ "a" ] } # # Java Object: # class Root { # Collection mappedCollection = new ArrayList; # } # # Result: # Field B contains one element; null # # TODO(yamahata): update along side with neutron and ODL # add when neutron adds more extensions # delete when ODL neutron northbound supports it # TODO(yamahata): do same thing for other resources keys_to_del = [key for key in unmapped_keys if resource_dict.get(key) is None] if keys_to_del: odl_utils.try_del(resource_dict, keys_to_del) _NETWORK_UNMAPPED_KEYS = ['qos_policy_id'] _SUBNET_UNMAPPED_KEYS = ['segment_id', 'subnetpool_id'] _PORT_UNMAPPED_KEYS = ['binding:profile', 'dns_name', 'port_security_enabled', 'qos_policy_id'] _FIP_UNMAPPED_KEYS = ['port_id', 'fixed_ip_address', 'router_id'] def _filter_network_create(network): odl_utils.try_del(network, ['status', 'subnets']) _filter_unmapped_null(network, _NETWORK_UNMAPPED_KEYS) def _filter_network_update(network): odl_utils.try_del(network, ['id', 'status', 'subnets', 'tenant_id', 'project_id']) _filter_unmapped_null(network, _NETWORK_UNMAPPED_KEYS) def _filter_floatingip(fip): _filter_unmapped_null(fip, _FIP_UNMAPPED_KEYS) def _filter_subnet_create(subnet): _filter_unmapped_null(subnet, _SUBNET_UNMAPPED_KEYS) def _filter_subnet_update(subnet): odl_utils.try_del(subnet, ['id', 'network_id', 'ip_version', 'cidr', 'tenant_id', 'project_id']) _filter_unmapped_null(subnet, _SUBNET_UNMAPPED_KEYS) def _convert_value_to_str(dictionary, key): try: # use jsonutils to convert unicode & ascii dictionary[key] = jsonutils.dumps(dictionary[key]) except KeyError: LOG.warning("key %s is not present in dict %s", key, dictionary) def _filter_port(port, attributes): odl_utils.try_del(port, attributes) _filter_unmapped_null(port, _PORT_UNMAPPED_KEYS) # ODL excpects binding:profile to be a string, not a dict _convert_value_to_str(port, key='binding:profile') def _filter_port_create(port): """Filter out port attributes not required for a create.""" _filter_port(port, ['status']) def _filter_port_update(port): """Filter out port attributes for an update operation.""" _filter_port(port, ['network_id', 'id', 'status', 'tenant_id', 'project_id']) def _filter_router_update(router): """Filter out attributes for an update operation.""" odl_utils.try_del(router, ['id', 'tenant_id', 'project_id', 'status']) # neutron has multiple ICMPv6 names # https://bugs.launchpad.net/tempest/+bug/1671366 # REVISIT(yamahata): once neutron upstream is fixed to store unified form, # this can be removed. _ICMPv6_NAMES = ( n_const.PROTO_NAME_ICMP, n_const.PROTO_NAME_IPV6_ICMP, n_const.PROTO_NAME_IPV6_ICMP_LEGACY, ) def _sgrule_scrub_icmpv6_name(sgrule): if (sgrule.get('ethertype') == n_const.IPv6 and sgrule.get('protocol') in _ICMPv6_NAMES): sgrule['protocol'] = n_const.PROTO_NAME_IPV6_ICMP_LEGACY # ODL neturon northbound knows the following protocol names. # It's safe to pass those names _ODL_KNOWN_PROTOCOL_NAMES = ( n_const.PROTO_NAME_TCP, n_const.PROTO_NAME_UDP, n_const.PROTO_NAME_ICMP, n_const.PROTO_NAME_IPV6_ICMP_LEGACY, ) def _sgrule_scrub_unknown_protocol_name(protocol): """Convert unknown protocol name to actual interger. OpenDaylight does't want to keep catching up list of protocol names. So networking-odl converts unknown protcol name into integer """ if protocol in _ODL_KNOWN_PROTOCOL_NAMES: return protocol if protocol in n_const.IP_PROTOCOL_MAP: return n_const.IP_PROTOCOL_MAP[protocol] return protocol def _filter_security_group_rule(sg_rule): _sgrule_scrub_icmpv6_name(sg_rule) if sg_rule.get('protocol'): sg_rule['protocol'] = _sgrule_scrub_unknown_protocol_name( sg_rule['protocol']) _FILTER_MAP = { (odl_const.ODL_NETWORK, odl_const.ODL_CREATE): _filter_network_create, (odl_const.ODL_NETWORK, odl_const.ODL_UPDATE): _filter_network_update, (odl_const.ODL_SUBNET, odl_const.ODL_CREATE): _filter_subnet_create, (odl_const.ODL_SUBNET, odl_const.ODL_UPDATE): _filter_subnet_update, (odl_const.ODL_PORT, odl_const.ODL_CREATE): _filter_port_create, (odl_const.ODL_PORT, odl_const.ODL_UPDATE): _filter_port_update, (odl_const.ODL_ROUTER, odl_const.ODL_UPDATE): _filter_router_update, (odl_const.ODL_SG_RULE, odl_const.ODL_CREATE): _filter_security_group_rule, (odl_const.ODL_SG_RULE, odl_const.ODL_UPDATE): _filter_security_group_rule, (odl_const.ODL_FLOATINGIP, odl_const.ODL_UPDATE): _filter_floatingip, } def filter_for_odl(object_type, operation, data): """Filter out the attributed before sending the data to ODL""" filter_key = (object_type, operation) if filter_key in _FILTER_MAP: _FILTER_MAP[filter_key](data) _populate_project_id_and_tenant_id(data) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/common/lightweight_testing.py0000644000175000017500000001403600000000000030643 0ustar00jamespagejamespage00000000000000# Copyright (c) 2015 Intel Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from copy import deepcopy import requests import six from oslo_log import log as logging from oslo_serialization import jsonutils from networking_odl._i18n import _ from networking_odl.common import client from networking_odl.common import constants as odl_const LOG = logging.getLogger(__name__) OK = requests.codes.ok NO_CONTENT = requests.codes.no_content NOT_ALLOWED = requests.codes.not_allowed NOT_FOUND = requests.codes.not_found BAD_REQUEST = requests.codes.bad_request class OpenDaylightLwtClient(client.OpenDaylightRestClient): """Lightweight testing client""" lwt_dict = {odl_const.ODL_NETWORKS: {}, odl_const.ODL_SUBNETS: {}, odl_const.ODL_PORTS: {}, odl_const.ODL_SGS: {}, odl_const.ODL_SG_RULES: {}} @classmethod def _make_response(cls, status_code=OK, content=None): """Only supports 'content-type': 'application/json'""" response = requests.models.Response() response.status_code = status_code if content: response.raw = six.BytesIO( jsonutils.dumps(content).encode('utf-8')) return response @classmethod def _get_resource_id(cls, urlpath): # resource ID is the last element of urlpath return str(urlpath).rsplit('/', 1)[-1] @classmethod def post(cls, resource_type, resource_dict, urlpath, resource_list): """No ID in URL, elements in resource_list must have ID""" if resource_list is None: raise ValueError(_("resource_list can not be None")) for resource in resource_list: if resource['id'] in resource_dict: LOG.debug("%s %s already exists", resource_type, resource['id']) response = cls._make_response(NOT_ALLOWED) raise requests.exceptions.HTTPError(response=response) resource_dict[resource['id']] = deepcopy(resource) return cls._make_response(NO_CONTENT) @classmethod def put(cls, resource_type, resource_dict, urlpath, resource_list): resource_id = cls._get_resource_id(urlpath) if resource_list is None: raise ValueError(_("resource_list can not be None")) if resource_id and len(resource_list) != 1: LOG.debug("Updating %s with multiple resources", urlpath) response = cls._make_response(BAD_REQUEST) raise requests.exceptions.HTTPError(response=response) for resource in resource_list: res_id = resource_id or resource['id'] if res_id in resource_dict: resource_dict[res_id].update(deepcopy(resource)) else: LOG.debug("%s %s does not exist", resource_type, res_id) response = cls._make_response(NOT_FOUND) raise requests.exceptions.HTTPError(response=response) return cls._make_response(NO_CONTENT) @classmethod def delete(cls, resource_type, resource_dict, urlpath, resource_list): if resource_list is None: resource_id = cls._get_resource_id(urlpath) id_list = [resource_id] else: id_list = [res['id'] for res in resource_list] for res_id in id_list: removed = resource_dict.pop(res_id, None) if removed is None: LOG.debug("%s %s does not exist", resource_type, res_id) response = cls._make_response(NOT_FOUND) raise requests.exceptions.HTTPError(response=response) return cls._make_response(NO_CONTENT) @classmethod def get(cls, resource_type, resource_dict, urlpath, resource_list=None): resource_id = cls._get_resource_id(urlpath) if resource_id: resource = resource_dict.get(resource_id) if resource is None: LOG.debug("%s %s does not exist", resource_type, resource_id) response = cls._make_response(NOT_FOUND) raise requests.exceptions.HTTPError(response=response) else: # When getting single resource, return value is a dict r_list = {resource_type[:-1]: deepcopy(resource)} return cls._make_response(OK, r_list) r_list = [{resource_type[:-1]: deepcopy(res)} for res in resource_dict.values()] return cls._make_response(OK, r_list) def sendjson(self, method, urlpath, obj=None): """Lightweight testing without ODL""" if '/' not in urlpath: urlpath += '/' resource_type = str(urlpath).split('/', 1)[0] resource_type = resource_type.replace('-', '_') resource_dict = self.lwt_dict.get(resource_type) if resource_dict is None: LOG.debug("Resource type %s is not supported", resource_type) response = self._make_response(NOT_FOUND) raise requests.exceptions.HTTPError(response=response) func = getattr(self, str(method).lower()) resource_list = None if obj: # If obj is not None, it can only have one entry assert len(obj) == 1, "Obj can only have one entry" key, resource_list = list(obj.items())[0] if not isinstance(resource_list, list): # Need to transform resource_list to a real list, i.e. [res] resource_list = [resource_list] return func(resource_type, resource_dict, urlpath, resource_list) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/common/odl_features.py0000644000175000017500000000745400000000000027251 0ustar00jamespagejamespage00000000000000# Copyright (c) 2017 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import itertools import time from oslo_config import cfg from oslo_log import log from oslo_serialization import jsonutils from requests import exceptions from networking_odl.common import client as odl_client from networking_odl.common import utils cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config') LOG = log.getLogger(__name__) OPERATIONAL_PORT_STATUS = 'operational-port-status' EMPTY_FEATURES = {} feature_configs = copy.copy(EMPTY_FEATURES) def init(): '''initialize odl_features. Initialize odl_features. Try first from configuration and then try pulling via rest call from ODL. ''' global feature_configs feature_configs = None if cfg.CONF.ml2_odl.odl_features_json is not None: json = jsonutils.loads(cfg.CONF.ml2_odl.odl_features_json) feature_configs = _load_features(json) return if cfg.CONF.ml2_odl.odl_features is not None: feature_configs = {feature: '' for feature in cfg.CONF.ml2_odl.odl_features} return wait_interval = cfg.CONF.ml2_odl.odl_features_retry_interval for times_tried in itertools.count(): feature_configs = _fetch_features() if feature_configs is not None: break LOG.warning('Failed to retrieve ODL features, attempt %i', times_tried) time.sleep(wait_interval) def has(feature): return feature in feature_configs def get_config(feature): return feature_configs[feature] def deinit(): '''Set odl_features back to it's pre-initlialized ''' global feature_configs feature_configs = copy.copy(EMPTY_FEATURES) def _load_features(json): """parse and save features from json""" features = json['features'] if 'feature' not in features: return copy.copy(EMPTY_FEATURES) # documentation on the JSON received can be found at: # https://github.com/opendaylight/neutron/blob/master/model/src/main/yang/neutron-extensions.yang LOG.info('Retrieved ODL features %s', features) response = {} for feature in features['feature']: cfg = feature.get('configuration', '') response[feature['service-provider-feature'].split(':')[1]] = cfg return response def _fetch_features(): '''Fetch the list of features declared by ODL. This function should be called once during initialization ''' path = 'restconf/operational/neutron:neutron/neutron:features' features_url = utils.get_odl_url(path) client = odl_client.OpenDaylightRestClient.create_client(features_url) try: response = client.request('get') except exceptions.ConnectionError: LOG.error("Error connecting to ODL to retrieve features", exc_info=True) return None if response.status_code == 400: LOG.debug('ODL does not support feature negotiation') return copy.copy(EMPTY_FEATURES) if response.status_code == 404: LOG.debug('No features configured') return copy.copy(EMPTY_FEATURES) if response.status_code != 200: LOG.warning('error fetching features: %i', response.status_code) return None return _load_features(response.json()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/common/postcommit.py0000644000175000017500000000400300000000000026756 0ustar00jamespagejamespage00000000000000# Copyright (c) 2017 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import types from oslo_log import helpers as log_helpers import six def _build_func(client_method): @log_helpers.log_method_call def f(self, *args, **kwargs): self.journal.set_sync_event() f.__name__ = client_method return f def _unboundmethod(func, cls): if six.PY3: # python 3.x doesn't have unbound methods func.__qualname__ = cls.__qualname__ + '.' + func.__name__ # PEP 3155 return func # python 2.x return types.MethodType(func, None, cls) def _get_method_name(op, resource): return op + '_' + resource + '_postcommit' def _build_method(cls, resource): # add methods like the following: # # @log_helpers.log_method_call # def __postcommit(self, *args, **kwargs): # self.journal.set_sync_event() operations = ['create', 'update', 'delete'] for op in operations: client_method = _get_method_name(op, resource) if hasattr(cls, client_method) and client_method not in cls.__dict__: f = _build_func(client_method) unbound = _unboundmethod(f, cls) setattr(cls, client_method, unbound) def _build_methods(cls, *resources): for resource in resources: _build_method(cls, resource) def add_postcommit(*args): def postcommit(cls): _build_methods(cls, *args) return cls return postcommit ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/common/utils.py0000644000175000017500000000314400000000000025725 0ustar00jamespagejamespage00000000000000# Copyright (c) 2014 Red Hat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from six.moves import urllib_parse as urlparse from networking_odl.common import constants as odl_const cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config') def try_del(d, keys): """Ignore key errors when deleting from a dictionary.""" for key in keys: try: del d[key] except KeyError: pass def make_url_object(object_type): obj_pl = odl_const.RESOURCE_URL_MAPPINGS.get(object_type, None) if obj_pl is None: obj_pl = neutronify(object_type + 's') return obj_pl # TODO(manjeets) consolidate this method with make_url_object def neutronify(name): """Adjust the resource name for use with Neutron's API""" return name.replace('_', '-') def get_odl_url(path=''): '''Make a URL for some ODL resource (path)''' purl = urlparse.urlsplit(cfg.CONF.ml2_odl.url) features_url = urlparse.urlunparse(( purl.scheme, purl.netloc, path, '', '', '')) return features_url ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/common/websocket_client.py0000644000175000017500000003307600000000000030120 0ustar00jamespagejamespage00000000000000# Copyright (c) 2017 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import ssl import threading import time from oslo_config import cfg from oslo_log import log from oslo_serialization import jsonutils from oslo_utils import excutils from requests import codes from requests import exceptions import websocket from networking_odl._i18n import _ from networking_odl.common import client as odl_client cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config') LOG = log.getLogger(__name__) ODL_OPERATIONAL_DATASTORE = "OPERATIONAL" ODL_CONFIGURATION_DATASTORE = "CONFIGURATION" ODL_NOTIFICATION_SCOPE_BASE = "BASE" ODL_NOTIFICATION_SCOPE_ONE = "ONE" ODL_NOTIFICATION_SCOPE_SUBTREE = "SUBTREE" ODL_WEBSOCKET_DISCONNECTED = "ODL_WEBSOCKET_DISCONNECTED" ODL_WEBSOCKET_CONNECTING = "ODL_WEBSOCKET_CONNECTING" ODL_WEBSOCKET_CONNECTED = "ODL_WEBSOCKET_CONNECTED" class OpenDaylightWebsocketClient(object): """Thread for the OpenDaylight Websocket """ def __init__(self, odl_rest_client, path, datastore, scope, leaf_node_only, packet_handler, timeout, status_cb=None): self.odl_rest_client = odl_rest_client self.path = path self.datastore = datastore self.scope = scope self.leaf_node_only = leaf_node_only self.packet_handler = packet_handler self.timeout = timeout self.exit_websocket_thread = False self.status_cb = status_cb self.current_status = ODL_WEBSOCKET_DISCONNECTED self._odl_sync_thread = self.start_odl_websocket_thread() @classmethod def odl_create_websocket(cls, odl_url, path, datastore, scope, packet_handler, status_cb=None, leaf_node_only=False): """Create a websocket connection with ODL. This method will create a websocket client based on path, datastore and scope params. On data recv from websocket packet_handler callback is called. status_cb callback can be provided if notifications are requried for socket status changes """ if odl_url is None: LOG.error("invalid odl url", exc_info=True) raise ValueError(_("Invalid ODL URL")) odl_rest_client = odl_client.OpenDaylightRestClient.create_client( odl_url) return cls( odl_rest_client, path, datastore, scope, leaf_node_only, packet_handler, cfg.CONF.ml2_odl.timeout, status_cb ) def start_odl_websocket_thread(self): # Start the websocket thread LOG.debug("starting a new websocket thread") odl_websocket_thread = threading.Thread( name='websocket', target=self.run_websocket_thread) odl_websocket_thread.start() return odl_websocket_thread def set_exit_flag(self, value=True): # set flag to exit self.exit_websocket_thread = value def run_websocket_thread(self, exit_after_run=False): # TBD connections are persistent so there is really no way to know # when it is a "first connection". We need to wait for the # dis/reconnect logic to be able to know this first_connection = True ws = None while not self.exit_websocket_thread: if exit_after_run: # Permanently waiting thread model breaks unit tests # Adding this arg to exit after one run for unit tests self.set_exit_flag() # connect if necessary if ws is None: try: ws = self._connect_ws() except ValueError: LOG.error("websocket irrecoverable error ") return if ws is None: time.sleep(cfg.CONF.ml2_odl.restconf_poll_interval) continue # read off the websocket try: data = ws.recv() if not data: LOG.warning("websocket received 0 bytes") continue except websocket.WebSocketTimeoutException: continue except ssl.SSLError as e: message = e.args[0] if e.args else None # TODO(trozet): Workaround due to SSL Timeout not being caught # in websocket-client lib (issue 387). Remove when fixed in # websocket-client lib. if message and 'timed out' in message: continue else: LOG.error("SSL websocket unexpected exception, " "closing and restarting...", exc_info=True) # TODO(rsood): Websocket reconnect can cause race # conditions self._close_ws(ws) ws = None continue except websocket.WebSocketConnectionClosedException: # per websocket-client, "If remote host closed the connection # or some network error happened" LOG.warning("websocket connection closed or IO error", exc_info=True) self._close_ws(ws) ws = None continue except Exception: # Connection closed trigger reconnection LOG.error("websocket unexpected exception, " "closing and restarting...", exc_info=True) # TODO(rsood): Websocket reconnect can cause race conditions self._close_ws(ws) ws = None continue # Call handler for data received try: self.packet_handler(data, first_connection) first_connection = False except Exception: LOG.error("Error in packet_handler callback", exc_info=True) self._close_ws(ws) def _set_websocket_status(self, status): LOG.info("websocket transition to status %s", status) try: if self.status_cb: self.status_cb(status) except Exception: LOG.error("Error in status_cb", exc_info=True) def _subscribe_websocket(self): """ODL Websocket change notification subscription""" # Check ODL URL for details on this process # https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL:Restconf:Change_event_notification_subscription#rpc_create-data-change-event-subscription # noqa: E501 # pylint: disable=line-too-long # Invoke rpc create-data-change-event-subscription ws_create_dce_subs_url = ("restconf/operations/sal-remote:" "create-data-change-event-subscription") odl_subscription_data = {'input': { 'path': self.path, 'sal-remote-augment:datastore': self.datastore, 'sal-remote-augment:scope': self.scope, 'sal-remote-augment:notification-output-type': 'JSON' }} try: response = self.odl_rest_client.sendjson('post', ws_create_dce_subs_url, odl_subscription_data) response.raise_for_status() except exceptions.ConnectionError: LOG.error("cannot connect to the opendaylight controller") return None except exceptions.HTTPError as e: # restconf returns 400 on operation when path is not available if e.response.status_code == codes.bad_request: LOG.debug("response code bad_request (400)" "check path for websocket connection") raise ValueError(_("bad_request (http400),check path.")) else: LOG.warning("websocket connection failed", exc_info=True) return None except Exception: LOG.error("websocket subscription failed", exc_info=True) return None # Subscribing to stream. Returns websocket URL to listen to ws_dce_subs_url = """restconf/streams/stream/""" try: stream_name = response.json() stream_name = stream_name['output']['stream-name'] url = ws_dce_subs_url + stream_name if self.leaf_node_only: url += "?odl-leaf-nodes-only=true" response = self.odl_rest_client.get(url) response.raise_for_status() stream_url = response.headers['location'] LOG.debug("websocket stream URL: %s", stream_url) return stream_url except exceptions.ConnectionError: LOG.error("cannot connect to the opendaylight controller") return None except exceptions.HTTPError as e: # restconf returns 404 on operation when there is no entry if e.response.status_code == codes.not_found: LOG.debug("response code not_found (404)" "unable to websocket connection url") raise ValueError(_("bad_request (http400),check path")) else: LOG.warning("websocket connection failed") return None except ValueError: with excutils.save_and_reraise_exception(): LOG.error("websocket subscribe got invalid stream name") except KeyError: LOG.error("websocket subscribe got bad stream data") raise ValueError(_("websocket subscribe bad stream data")) except Exception: LOG.error("websocket subscription failed", exc_info=True) return None def _socket_create_connection(self, stream_url): ws = None try: ws = websocket.create_connection(stream_url, timeout=self.timeout) except ValueError: with excutils.save_and_reraise_exception(): LOG.error("websocket create connection invalid URL") except Exception: # Although a number of exceptions can occur here # we handle them all the same way, return None. # As such, enough to just "except Exception." LOG.exception("websocket create connection failed", exc_info=True) return None if ws is None or not ws.connected: LOG.error("websocket create connection unsuccessful") return None LOG.debug("websocket connection established") return ws def _connect_ws(self): self._set_websocket_status(ODL_WEBSOCKET_CONNECTING) stream_url = self._subscribe_websocket() if stream_url is None: return None if 'https:' in self.odl_rest_client.url and 'wss:' not in stream_url: LOG.warning('TLS ODL URL detected, but websocket URL is not. ' 'Forcing websocket URL to TLS') stream_url = stream_url.replace('ws:', 'wss:') # Delay here causes websocket notification lose (ODL Bug 8299) ws = self._socket_create_connection(stream_url) if ws is not None: self._set_websocket_status(ODL_WEBSOCKET_CONNECTED) return ws def _close_ws(self, ws): LOG.debug("closing websocket") try: if ws is not None: ws.close() except Exception: LOG.error("Error while closing websocket", exc_info=True) self._set_websocket_status(ODL_WEBSOCKET_DISCONNECTED) class EventDataParser(object): """Helper class to parse websocket notification data""" NOTIFICATION_TAG = 'notification' DC_NOTIFICATION_TAG = 'data-changed-notification' DC_EVENT_TAG = 'data-change-event' OPERATION_DELETE = 'deleted' OPERATION_CREATE = 'created' OPERATION_UPDATE = 'updated' def __init__(self, item): self.item = item @classmethod def get_item(cls, payload): try: data = jsonutils.loads(payload) except ValueError: LOG.warning("invalid websocket notification") return try: dn_events = (data[cls.NOTIFICATION_TAG] [cls.DC_NOTIFICATION_TAG] [cls.DC_EVENT_TAG]) if not isinstance(dn_events, list): dn_events = [dn_events] for e in dn_events: yield cls(e) except KeyError: LOG.warning("invalid JSON for websocket notification") def get_fields(self): return (self.get_operation(), self.get_path(), self.get_data()) def get_path(self): return self.item.get('path') def get_data(self): return self.item.get('data') def get_operation(self): return self.item.get('operation') @staticmethod def extract_field(text, key): pattern = r'\[' + key + r'=(.*?)\]' match = re.search(pattern, text) if match: return match.group(1) return None ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.794714 networking-odl-16.0.0.0b2.dev1/networking_odl/db/0000755000175000017500000000000000000000000023306 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/db/__init__.py0000644000175000017500000000000000000000000025405 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/db/db.py0000644000175000017500000002220300000000000024244 0ustar00jamespagejamespage00000000000000# Copyright (c) 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from neutron_lib.db import api as db_api from oslo_log import log as logging from sqlalchemy import asc from sqlalchemy import bindparam from sqlalchemy.ext import baked from sqlalchemy import func from sqlalchemy import or_ from sqlalchemy.orm import aliased from networking_odl.common import constants as odl_const from networking_odl.db import models LOG = logging.getLogger(__name__) bakery = baked.bakery() def get_pending_or_processing_ops(context, object_uuid, operation=None): # NOTE (sai): For performance reasons, we expect this method to use baked # query (http://docs.sqlalchemy.org/en/latest/orm/extensions/baked.html) baked_query = bakery(lambda s: s.query( models.OpenDaylightJournal)) baked_query += lambda q: q.filter( or_(models.OpenDaylightJournal.state == odl_const.PENDING, models.OpenDaylightJournal.state == odl_const.PROCESSING), models.OpenDaylightJournal.object_uuid == bindparam('uuid')) if operation: if isinstance(operation, (list, tuple)): baked_query += lambda q: q.filter( models.OpenDaylightJournal.operation.in_(bindparam('op', expanding=True))) else: baked_query += lambda q: q.filter( models.OpenDaylightJournal.operation == bindparam('op')) return baked_query(context.session).params( uuid=object_uuid, op=operation).all() def get_pending_delete_ops_with_parent(context, object_type, parent_id): rows = context.session.query(models.OpenDaylightJournal).filter( or_(models.OpenDaylightJournal.state == odl_const.PENDING, models.OpenDaylightJournal.state == odl_const.PROCESSING), models.OpenDaylightJournal.object_type == object_type, models.OpenDaylightJournal.operation == odl_const.ODL_DELETE ).all() return (row for row in rows if parent_id in row.data) def get_all_db_rows(context): return context.session.query(models.OpenDaylightJournal).all() def get_all_db_rows_by_state(context, state): return context.session.query(models.OpenDaylightJournal).filter_by( state=state).all() # Retry deadlock exception for Galera DB. # If two (or more) different threads call this method at the same time, they # might both succeed in changing the same row to pending, but at least one # of them will get a deadlock from Galera and will have to retry the operation. @db_api.retry_if_session_inactive() @db_api.CONTEXT_WRITER.savepoint def get_oldest_pending_db_row_with_lock(context): # NOTE (sai): For performance reasons, we expect this method to use baked # query (http://docs.sqlalchemy.org/en/latest/orm/extensions/baked.html) journal_dep = aliased(models.OpenDaylightJournal) dep_query = bakery(lambda s1: s1.query(journal_dep)) dep_query += lambda q: q.filter( models.OpenDaylightJournal.seqnum == journal_dep.seqnum) dep_query += lambda q: q.outerjoin( journal_dep.depending_on, aliased=True) dep_query += lambda q: q.filter( or_(models.OpenDaylightJournal.state == odl_const.PENDING, models.OpenDaylightJournal.state == odl_const.PROCESSING)) row = bakery(lambda s2: s2.query(models.OpenDaylightJournal)) row += lambda q: q.filter( models.OpenDaylightJournal.state == odl_const.PENDING, ~ (dep_query._as_query(q.session)).exists()) row += lambda q: q.order_by( asc(models.OpenDaylightJournal.last_retried)) row = row(context.session).first() if row: update_db_row_state(context, row, odl_const.PROCESSING) return row def delete_dependency(context, entry): """Delete dependency upon the given ID""" conn = context.session.connection() stmt = models.journal_dependencies.delete( models.journal_dependencies.c.depends_on == entry.seqnum) conn.execute(stmt) context.session.expire_all() def update_db_row_state(context, row, state, flush=True): row.state = state context.session.merge(row) if flush: context.session.flush() def update_pending_db_row_retry(context, row, retry_count): if row.retry_count >= retry_count: update_db_row_state(context, row, odl_const.FAILED) else: row.retry_count += 1 update_db_row_state(context, row, odl_const.PENDING) def delete_row(context, row=None, row_id=None, flush=True): if row_id: row = context.session.query(models.OpenDaylightJournal).filter_by( seqnum=row_id).one() if row: context.session.delete(row) if flush: context.session.flush() def create_pending_row(context, object_type, object_uuid, operation, data, depending_on=None): if depending_on is None: depending_on = [] row = models.OpenDaylightJournal(object_type=object_type, object_uuid=object_uuid, operation=operation, data=data, state=odl_const.PENDING, depending_on=depending_on) context.session.add(row) # Keep session flush for unit tests. NOOP for L2/L3 events since calls are # made inside database session transaction with subtransactions=True. context.session.flush() return row @db_api.CONTEXT_WRITER.savepoint def delete_pending_rows(context, operations_to_delete): context.session.query(models.OpenDaylightJournal).filter( models.OpenDaylightJournal.operation.in_(operations_to_delete), models.OpenDaylightJournal.state == odl_const.PENDING).delete( synchronize_session=False) context.session.expire_all() def _update_periodic_task_state(context, expected_state, state, task): row = context.session.query(models.OpenDaylightPeriodicTask).filter_by( state=expected_state, task=task).with_for_update().one_or_none() if row is None: return False row.state = state return True def was_periodic_task_executed_recently(context, task, interval): now = context.session.execute(func.now()).scalar() delta = datetime.timedelta(seconds=interval) row = context.session.query(models.OpenDaylightPeriodicTask).filter( models.OpenDaylightPeriodicTask.task == task, (now - delta >= (models.OpenDaylightPeriodicTask.lock_updated)) ).one_or_none() return bool(row is None) def lock_periodic_task(context, task): return _update_periodic_task_state(context, odl_const.PENDING, odl_const.PROCESSING, task) def unlock_periodic_task(context, task): return _update_periodic_task_state(context, odl_const.PROCESSING, odl_const.PENDING, task) def update_periodic_task(context, task, operation=None): """Update the current periodic task details. The function assumes the lock is held, so it mustn't be run outside of a locked context. """ op_text = None if operation: op_text = operation.__name__ row = context.session.query(models.OpenDaylightPeriodicTask).filter_by( task=task).one() row.processing_operation = op_text @db_api.CONTEXT_WRITER.savepoint def delete_rows_by_state_and_time(context, state, time_delta): # NOTE(mpeterson): The reason behind deleting one-by-one is that InnoDB # ignores the WHERE clause to issue a LOCK when executing a DELETE. By # executing each operation indepently, we minimize exposures to DEADLOCKS. now = context.session.execute(func.now()).scalar() rows = context.session.query(models.OpenDaylightJournal).filter( models.OpenDaylightJournal.state == state, models.OpenDaylightJournal.last_retried < now - time_delta).all() for row in rows: delete_row(context, row, flush=False) context.session.expire_all() @db_api.CONTEXT_WRITER.savepoint def reset_processing_rows(context, max_timedelta): # NOTE(mpeterson): The reason behind updating one-by-one is that InnoDB # ignores the WHERE clause to issue a LOCK when executing an UPDATE. By # executing each operation indepently, we minimize exposures to DEADLOCKS. now = context.session.execute(func.now()).scalar() max_timedelta = datetime.timedelta(seconds=max_timedelta) rows = context.session.query(models.OpenDaylightJournal).filter( models.OpenDaylightJournal.last_retried < now - max_timedelta, models.OpenDaylightJournal.state == odl_const.PROCESSING).all() for row in rows: update_db_row_state(context, row, odl_const.PENDING, flush=False) return len(rows) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/db/head.py0000644000175000017500000000162200000000000024562 0ustar00jamespagejamespage00000000000000# Copyright 2016 Intel Corporation. # Copyright 2016 Isaku Yamahata # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.db.migration.models import head from networking_odl.db import models # noqa def get_metadata(): return head.model_base.BASEV2.metadata ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.798714 networking-odl-16.0.0.0b2.dev1/networking_odl/db/migration/0000755000175000017500000000000000000000000025277 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/db/migration/__init__.py0000644000175000017500000000000000000000000027376 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.798714 networking-odl-16.0.0.0b2.dev1/networking_odl/db/migration/alembic_migrations/0000755000175000017500000000000000000000000031127 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/db/migration/alembic_migrations/README0000644000175000017500000000011600000000000032005 0ustar00jamespagejamespage00000000000000This directory contains the migration scripts for the networking_odl project. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/db/migration/alembic_migrations/__init__.py0000644000175000017500000000000000000000000033226 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/db/migration/alembic_migrations/env.py0000644000175000017500000000542000000000000032272 0ustar00jamespagejamespage00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from logging import config as logging_config from alembic import context from neutron_lib.db import model_base from oslo_config import cfg from oslo_db.sqlalchemy import session import sqlalchemy as sa from sqlalchemy import event from neutron.db.migration.alembic_migrations import external from neutron.db.migration.models import head # noqa MYSQL_ENGINE = None ODL_VERSION_TABLE = 'odl_alembic_version' config = context.config neutron_config = config.neutron_config logging_config.fileConfig(config.config_file_name) target_metadata = model_base.BASEV2.metadata def set_mysql_engine(): try: mysql_engine = neutron_config.command.mysql_engine except cfg.NoSuchOptError: mysql_engine = None global MYSQL_ENGINE MYSQL_ENGINE = (mysql_engine or model_base.BASEV2.__table_args__['mysql_engine']) def include_object(object, name, type_, reflected, compare_to): if type_ == 'table' and name in external.TABLES: return False return True def run_migrations_offline(): set_mysql_engine() kwargs = dict() if neutron_config.database.connection: kwargs['url'] = neutron_config.database.connection else: kwargs['dialect_name'] = neutron_config.database.engine kwargs['include_object'] = include_object kwargs['version_table'] = ODL_VERSION_TABLE context.configure(**kwargs) with context.begin_transaction(): context.run_migrations() @event.listens_for(sa.Table, 'after_parent_attach') def set_storage_engine(target, parent): if MYSQL_ENGINE: target.kwargs['mysql_engine'] = MYSQL_ENGINE def run_migrations_online(): set_mysql_engine() engine = session.create_engine(neutron_config.database.connection) connection = engine.connect() context.configure( connection=connection, target_metadata=target_metadata, include_object=include_object, version_table=ODL_VERSION_TABLE ) try: with context.begin_transaction(): context.run_migrations() finally: connection.close() engine.dispose() if context.is_offline_mode(): run_migrations_offline() else: run_migrations_online() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/db/migration/alembic_migrations/script.py.mako0000644000175000017500000000200600000000000033731 0ustar00jamespagejamespage00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """${message} Revision ID: ${up_revision} Revises: ${down_revision} Create Date: ${create_date} """ # revision identifiers, used by Alembic. revision = ${repr(up_revision)} down_revision = ${repr(down_revision)} % if branch_labels: branch_labels = ${repr(branch_labels)} %endif from alembic import op import sqlalchemy as sa ${imports if imports else ""} def upgrade(): ${upgrades if upgrades else "pass"} ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.798714 networking-odl-16.0.0.0b2.dev1/networking_odl/db/migration/alembic_migrations/versions/0000755000175000017500000000000000000000000032777 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/db/migration/alembic_migrations/versions/CONTRACT_HEAD0000644000175000017500000000001500000000000034714 0ustar00jamespagejamespage000000000000007cbef5a56298 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/db/migration/alembic_migrations/versions/EXPAND_HEAD0000644000175000017500000000001500000000000034456 0ustar00jamespagejamespage000000000000006f7dfb241354 ././@PaxHeader0000000000000000000000000000023200000000000011452 xustar0000000000000000132 path=networking-odl-16.0.0.0b2.dev1/networking_odl/db/migration/alembic_migrations/versions/b89a299e19f9_initial_branchpoint.py 22 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/db/migration/alembic_migrations/versions/b89a299e19f9_0000644000175000017500000000144700000000000034654 0ustar00jamespagejamespage00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Initial odl db, branchpoint Revision ID: b89a299e19f9 Revises: None Create Date: 2015-09-03 22:22:22.222222 """ # revision identifiers, used by Alembic. revision = 'b89a299e19f9' down_revision = None def upgrade(): pass ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.778714 networking-odl-16.0.0.0b2.dev1/networking_odl/db/migration/alembic_migrations/versions/mitaka/0000755000175000017500000000000000000000000034245 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000113 path=networking-odl-16.0.0.0b2.dev1/networking_odl/db/migration/alembic_migrations/versions/mitaka/contract/ 27 mtime=1585130284.798714 networking-odl-16.0.0.0b2.dev1/networking_odl/db/migration/alembic_migrations/versions/mitaka/contra0000755000175000017500000000000000000000000035454 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000024700000000000011460 xustar0000000000000000145 path=networking-odl-16.0.0.0b2.dev1/networking_odl/db/migration/alembic_migrations/versions/mitaka/contract/383acb0d38a0_initial_contract.py 22 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/db/migration/alembic_migrations/versions/mitaka/contra0000644000175000017500000000200300000000000035451 0ustar00jamespagejamespage00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Start of odl contract branch Revision ID: 383acb0d38a0 Revises: b89a299e19f9 Create Date: 2015-09-03 22:27:49.306394 """ from neutron.db import migration from neutron.db.migration import cli # revision identifiers, used by Alembic. revision = '383acb0d38a0' down_revision = 'b89a299e19f9' branch_labels = (cli.CONTRACT_BRANCH,) # milestone identifier, used by neutron-db-manage neutron_milestone = [migration.MITAKA] def upgrade(): pass ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000111 path=networking-odl-16.0.0.0b2.dev1/networking_odl/db/migration/alembic_migrations/versions/mitaka/expand/ 27 mtime=1585130284.798714 networking-odl-16.0.0.0b2.dev1/networking_odl/db/migration/alembic_migrations/versions/mitaka/expand0000755000175000017500000000000000000000000035445 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000024300000000000011454 xustar0000000000000000141 path=networking-odl-16.0.0.0b2.dev1/networking_odl/db/migration/alembic_migrations/versions/mitaka/expand/247501328046_initial_expand.py 22 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/db/migration/alembic_migrations/versions/mitaka/expand0000644000175000017500000000160400000000000035450 0ustar00jamespagejamespage00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Start of odl expand branch Revision ID: 247501328046 Revises: b89a299e19f9 Create Date: 2015-09-03 22:27:49.292238 """ from neutron.db.migration import cli # revision identifiers, used by Alembic. revision = '247501328046' down_revision = 'b89a299e19f9' branch_labels = (cli.EXPAND_BRANCH,) def upgrade(): pass ././@PaxHeader0000000000000000000000000000027300000000000011457 xustar0000000000000000165 path=networking-odl-16.0.0.0b2.dev1/networking_odl/db/migration/alembic_migrations/versions/mitaka/expand/37e242787ae5_opendaylight_neutron_mechanism_driver_.py 22 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/db/migration/alembic_migrations/versions/mitaka/expand0000644000175000017500000000350300000000000035450 0ustar00jamespagejamespage00000000000000# Copyright (c) 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """OpenDaylight Neutron mechanism driver refactor Revision ID: 37e242787ae5 Revises: 247501328046 Create Date: 2015-10-30 22:09:27.221767 """ from neutron.db import migration from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '37e242787ae5' down_revision = '247501328046' # milestone identifier, used by neutron-db-manage neutron_milestone = [migration.MITAKA] def upgrade(): op.create_table( 'opendaylightjournal', sa.Column('id', sa.String(36), primary_key=True), sa.Column('object_type', sa.String(36), nullable=False), sa.Column('object_uuid', sa.String(36), nullable=False), sa.Column('operation', sa.String(36), nullable=False), sa.Column('data', sa.PickleType, nullable=True), sa.Column('state', sa.Enum('pending', 'processing', 'failed', 'completed', name='state'), nullable=False, default='pending'), sa.Column('retry_count', sa.Integer, default=0), sa.Column('created_at', sa.DateTime, default=sa.func.now()), sa.Column('last_retried', sa.TIMESTAMP, server_default=sa.func.now(), onupdate=sa.func.now()) ) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.778714 networking-odl-16.0.0.0b2.dev1/networking_odl/db/migration/alembic_migrations/versions/newton/0000755000175000017500000000000000000000000034311 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000113 path=networking-odl-16.0.0.0b2.dev1/networking_odl/db/migration/alembic_migrations/versions/newton/contract/ 27 mtime=1585130284.798714 networking-odl-16.0.0.0b2.dev1/networking_odl/db/migration/alembic_migrations/versions/newton/contra0000755000175000017500000000000000000000000035520 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000026200000000000011455 xustar0000000000000000156 path=networking-odl-16.0.0.0b2.dev1/networking_odl/db/migration/alembic_migrations/versions/newton/contract/fa0c536252a5_update_opendayligut_journal.py 22 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/db/migration/alembic_migrations/versions/newton/contra0000644000175000017500000000316100000000000035523 0ustar00jamespagejamespage00000000000000# Copyright 2016 Isaku Yamahata # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """update opendayligut journal Revision ID: fa0c536252a5 Revises: 383acb0d38a0 Create Date: 2016-08-05 23:03:46.470595 """ from alembic import op # revision identifiers, used by Alembic. revision = 'fa0c536252a5' down_revision = '383acb0d38a0' depends_on = ('3d560427d776', ) def upgrade(): # Since a new primary key is introduced and alembic doesn't allow to # add new primary key, create a new table with new primary key and # rename it. op.execute("INSERT INTO opendaylightjournal_new " "(object_type, object_uuid, operation, data, " "state, retry_count, created_at, last_retried) " "SELECT object_type, object_uuid, operation, data, " "state, retry_count, created_at, last_retried " "FROM opendaylightjournal " "WHERE state != 'completed' " "ORDER BY created_at ASC") op.drop_table('opendaylightjournal') op.rename_table('opendaylightjournal_new', 'opendaylightjournal') ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000111 path=networking-odl-16.0.0.0b2.dev1/networking_odl/db/migration/alembic_migrations/versions/newton/expand/ 27 mtime=1585130284.798714 networking-odl-16.0.0.0b2.dev1/networking_odl/db/migration/alembic_migrations/versions/newton/expand0000755000175000017500000000000000000000000035511 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000026300000000000011456 xustar0000000000000000157 path=networking-odl-16.0.0.0b2.dev1/networking_odl/db/migration/alembic_migrations/versions/newton/expand/3d560427d776_add_sequence_number_to_journal.py 22 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/db/migration/alembic_migrations/versions/newton/expand0000644000175000017500000000337400000000000035522 0ustar00jamespagejamespage00000000000000# Copyright 2016 Isaku Yamahata # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """add sequence number to journal Revision ID: 3d560427d776 Revises: 703dbf02afde Create Date: 2016-08-05 15:50:22.151078 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '3d560427d776' down_revision = '703dbf02afde' def upgrade(): op.create_table( 'opendaylightjournal_new', sa.Column('seqnum', sa.BigInteger(), primary_key=True, autoincrement=True), sa.Column('object_type', sa.String(36), nullable=False), sa.Column('object_uuid', sa.String(36), nullable=False), sa.Column('operation', sa.String(36), nullable=False), sa.Column('data', sa.PickleType, nullable=True), sa.Column('state', sa.Enum('pending', 'processing', 'failed', 'completed', name='state'), nullable=False, default='pending'), sa.Column('retry_count', sa.Integer, default=0), sa.Column('created_at', sa.DateTime, default=sa.func.now()), sa.Column('last_retried', sa.TIMESTAMP, server_default=sa.func.now(), onupdate=sa.func.now()), ) ././@PaxHeader0000000000000000000000000000026200000000000011455 xustar0000000000000000156 path=networking-odl-16.0.0.0b2.dev1/networking_odl/db/migration/alembic_migrations/versions/newton/expand/703dbf02afde_add_journal_maintenance_table.py 22 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/db/migration/alembic_migrations/versions/newton/expand0000644000175000017500000000332200000000000035513 0ustar00jamespagejamespage00000000000000# Copyright 2016 Red Hat Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Add journal maintenance table Revision ID: 703dbf02afde Revises: 37e242787ae5 Create Date: 2016-04-12 10:49:31.802663 """ from alembic import op from oslo_utils import uuidutils import sqlalchemy as sa from networking_odl.common import constants as odl_const # revision identifiers, used by Alembic. revision = '703dbf02afde' down_revision = '37e242787ae5' def upgrade(): maint_table = op.create_table( 'opendaylight_maintenance', sa.Column('id', sa.String(36), primary_key=True), sa.Column('state', sa.Enum(odl_const.PENDING, odl_const.PROCESSING, name='state'), nullable=False), sa.Column('processing_operation', sa.String(70)), sa.Column('lock_updated', sa.TIMESTAMP, nullable=False, server_default=sa.func.now(), onupdate=sa.func.now()) ) # Insert the only row here that is used to synchronize the lock between # different Neutron processes. op.bulk_insert(maint_table, [{'id': uuidutils.generate_uuid(), 'state': odl_const.PENDING}]) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.778714 networking-odl-16.0.0.0b2.dev1/networking_odl/db/migration/alembic_migrations/versions/pike/0000755000175000017500000000000000000000000033727 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000111 path=networking-odl-16.0.0.0b2.dev1/networking_odl/db/migration/alembic_migrations/versions/pike/contract/ 27 mtime=1585130284.798714 networking-odl-16.0.0.0b2.dev1/networking_odl/db/migration/alembic_migrations/versions/pike/contract0000755000175000017500000000000000000000000035465 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000025300000000000011455 xustar0000000000000000149 path=networking-odl-16.0.0.0b2.dev1/networking_odl/db/migration/alembic_migrations/versions/pike/contract/7cbef5a56298_drop_created_at_column.py 22 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/db/migration/alembic_migrations/versions/pike/contract0000644000175000017500000000163100000000000035470 0ustar00jamespagejamespage00000000000000# Copyright 2017, NEC Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Drop created_at column Revision ID: 7cbef5a56298 Revises: eccd865b7d3a Create Date: 2017-08-16 05:49:53.964988 """ from alembic import op # revision identifiers, used by Alembic. revision = '7cbef5a56298' down_revision = 'eccd865b7d3a' def upgrade(): op.drop_column('opendaylightjournal', 'created_at') ././@PaxHeader0000000000000000000000000000027000000000000011454 xustar0000000000000000162 path=networking-odl-16.0.0.0b2.dev1/networking_odl/db/migration/alembic_migrations/versions/pike/contract/eccd865b7d3a_drop_opendaylight_maintenance_table.py 22 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/db/migration/alembic_migrations/versions/pike/contract0000644000175000017500000000163200000000000035471 0ustar00jamespagejamespage00000000000000# Copyright 2017 NEC Corp # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """drop opendaylight_maintenance table Revision ID: eccd865b7d3a Revises: fa0c536252a5 Create Date: 2017-05-24 03:00:40.194278 """ from alembic import op # revision identifiers, used by Alembic. revision = 'eccd865b7d3a' down_revision = 'fa0c536252a5' def upgrade(): op.drop_table('opendaylight_maintenance') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585130284.8027139 networking-odl-16.0.0.0b2.dev1/networking_odl/db/migration/alembic_migrations/versions/pike/expand/0000755000175000017500000000000000000000000035206 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000026100000000000011454 xustar0000000000000000155 path=networking-odl-16.0.0.0b2.dev1/networking_odl/db/migration/alembic_migrations/versions/pike/expand/0472f56ff2fb_add_journal_dependencies_table.py 22 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/db/migration/alembic_migrations/versions/pike/expand/00000644000175000017500000000251600000000000035274 0ustar00jamespagejamespage00000000000000# Copyright 2017 Red Hat Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Add journal dependencies table Revision ID: 0472f56ff2fb Revises: 43af357fd638 Create Date: 2017-04-02 11:02:01.622548 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '0472f56ff2fb' down_revision = '43af357fd638' def upgrade(): op.create_table( 'opendaylight_journal_deps', sa.Column('depends_on', sa.BigInteger(), sa.ForeignKey('opendaylightjournal.seqnum', ondelete='CASCADE'), primary_key=True), sa.Column('dependent', sa.BigInteger(), sa.ForeignKey('opendaylightjournal.seqnum', ondelete='CASCADE'), primary_key=True)) ././@PaxHeader0000000000000000000000000000027200000000000011456 xustar0000000000000000164 path=networking-odl-16.0.0.0b2.dev1/networking_odl/db/migration/alembic_migrations/versions/pike/expand/43af357fd638_added_version_id_for_optimistic_locking.py 22 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/db/migration/alembic_migrations/versions/pike/expand/40000644000175000017500000000211700000000000035275 0ustar00jamespagejamespage00000000000000# Copyright (C) 2017 Red Hat Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Added version_id for optimistic locking Revision ID: 43af357fd638 Revises: 3d560427d776 Create Date: 2016-03-24 10:14:56.408413 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '43af357fd638' down_revision = '3d560427d776' depends_on = ('fa0c536252a5',) def upgrade(): op.add_column('opendaylightjournal', sa.Column('version_id', sa.Integer, server_default='0', nullable=False)) ././@PaxHeader0000000000000000000000000000027200000000000011456 xustar0000000000000000164 path=networking-odl-16.0.0.0b2.dev1/networking_odl/db/migration/alembic_migrations/versions/pike/expand/6f7dfb241354_create_opendaylight_preiodic_task_table.py 22 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/db/migration/alembic_migrations/versions/pike/expand/60000644000175000017500000000324400000000000035301 0ustar00jamespagejamespage00000000000000# Copyright 2017 NEC Corp # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """create opendaylight_preiodic_task table Revision ID: 6f7dfb241354 Revises: 0472f56ff2fb Create Date: 2017-05-24 03:01:00.755796 """ from alembic import op import sqlalchemy as sa from networking_odl.common import constants as odl_const # revision identifiers, used by Alembic. revision = '6f7dfb241354' down_revision = '0472f56ff2fb' def upgrade(): periodic_table = op.create_table( 'opendaylight_periodic_task', sa.Column('state', sa.Enum(odl_const.PENDING, odl_const.PROCESSING, name='state'), nullable=False), sa.Column('processing_operation', sa.String(70)), sa.Column('task', sa.String(70), primary_key=True), sa.Column('lock_updated', sa.TIMESTAMP, nullable=False, server_default=sa.func.now(), onupdate=sa.func.now()) ) op.bulk_insert(periodic_table, [{'task': 'maintenance', 'state': odl_const.PENDING}, {'task': 'hostconfig', 'state': odl_const.PENDING}]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/db/models.py0000644000175000017500000000551000000000000025144 0ustar00jamespagejamespage00000000000000# Copyright (c) 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sa from neutron_lib.db import model_base from networking_odl.common import constants as odl_const IdType = sa.BigInteger().with_variant(sa.Integer(), 'sqlite') journal_dependencies = sa.Table( 'opendaylight_journal_deps', model_base.BASEV2.metadata, sa.Column('depends_on', IdType, sa.ForeignKey('opendaylightjournal.seqnum', ondelete='CASCADE'), primary_key=True), sa.Column('dependent', IdType, sa.ForeignKey('opendaylightjournal.seqnum', ondelete='CASCADE'), primary_key=True)) class OpenDaylightJournal(model_base.BASEV2): __tablename__ = 'opendaylightjournal' seqnum = sa.Column(IdType, primary_key=True, autoincrement=True) object_type = sa.Column(sa.String(36), nullable=False) object_uuid = sa.Column(sa.String(36), nullable=False) operation = sa.Column(sa.String(36), nullable=False) data = sa.Column(sa.PickleType, nullable=True) state = sa.Column(sa.Enum(odl_const.PENDING, odl_const.FAILED, odl_const.PROCESSING, odl_const.COMPLETED), nullable=False, default=odl_const.PENDING) retry_count = sa.Column(sa.Integer, default=0) last_retried = sa.Column(sa.TIMESTAMP, server_default=sa.func.now(), onupdate=sa.func.now()) version_id = sa.Column(sa.Integer, server_default='0', nullable=False) dependencies = sa.orm.relationship( "OpenDaylightJournal", secondary=journal_dependencies, primaryjoin=seqnum == journal_dependencies.c.depends_on, secondaryjoin=seqnum == journal_dependencies.c.dependent, backref="depending_on" ) __mapper_args__ = { 'version_id_col': version_id } class OpenDaylightPeriodicTask(model_base.BASEV2): __tablename__ = 'opendaylight_periodic_task' state = sa.Column(sa.Enum(odl_const.PENDING, odl_const.PROCESSING), nullable=False) processing_operation = sa.Column(sa.String(70)) task = sa.Column(sa.String(70), primary_key=True) lock_updated = sa.Column(sa.TIMESTAMP, nullable=False, server_default=sa.func.now(), onupdate=sa.func.now()) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585130284.8027139 networking-odl-16.0.0.0b2.dev1/networking_odl/dhcp/0000755000175000017500000000000000000000000023637 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/dhcp/__init__.py0000644000175000017500000000000000000000000025736 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/dhcp/odl_dhcp_driver.py0000644000175000017500000000651000000000000027342 0ustar00jamespagejamespage00000000000000# Copyright (c) 2017 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.callbacks import registry from neutron_lib import constants as n_const from neutron_lib.plugins import directory from oslo_log import log as logging from neutron.plugins.ml2 import driver_context from networking_odl.common import constants from networking_odl.dhcp import odl_dhcp_driver_base as driver_base LOG = logging.getLogger(__name__) @registry.has_registry_receivers class OdlDhcpDriver(driver_base.OdlDhcpDriverBase): @registry.receives(constants.ODL_SUBNET, [constants.BEFORE_COMPLETE]) def handle_subnet_event(self, resource, event, trigger, context=None, operation=None, row=None, **kwargs): if (operation == constants.ODL_CREATE or operation == constants.ODL_UPDATE): try: subnet_ctxt = self._get_subnet_context(context, row.data['network_id'], row.data['id']) self.create_or_delete_dhcp_port(subnet_ctxt) except Exception as e: LOG.error("Error while processing %s subnet %s: %s", operation, row.data['id'], e) @registry.receives(constants.ODL_PORT, [constants.BEFORE_COMPLETE]) def handle_port_update_event(self, resource, event, trigger, context=None, operation=None, row=None, **kwargs): if operation == constants.ODL_UPDATE: try: self._delete_if_dhcp_port(context, row) except Exception as e: device_id = row.data['device_id'] subnet_id = device_id[13:] if device_id else '' LOG.error("Error while processing %s port %s of subnet %s: %s", operation, row.data['id'], subnet_id, e) def _get_subnet_context(self, context, network_id, subnet_id): plugin = directory.get_plugin() network = plugin.get_network(context, network_id) subnet = plugin.get_subnet(context, subnet_id) return driver_context.SubnetContext(plugin, context, subnet, network) def _delete_if_dhcp_port(self, context, row): device_owner = row.data['device_owner'] device_id = row.data['device_id'] fixed_ips = row.data['fixed_ips'] device_id_type = driver_base.OPENDAYLIGHT_DEVICE_ID if (device_owner and device_owner == n_const.DEVICE_OWNER_DHCP and device_id and device_id.startswith(device_id_type) and not fixed_ips): plugin = directory.get_plugin() self._delete_port(plugin, context, row.data['id']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/dhcp/odl_dhcp_driver_base.py0000644000175000017500000000747600000000000030350 0ustar00jamespagejamespage00000000000000# Copyright (c) 2017 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants as n_const from neutron_lib.plugins import utils as p_utils from oslo_log import log as logging LOG = logging.getLogger(__name__) OPENDAYLIGHT_DEVICE_ID = 'OpenDaylight' class OdlDhcpDriverBase(object): # NOTE:(Karthik Prasad/karthik.prasad) Not validating based on value change # of enable_dhcp in case of subnet update event, instead validating on # port_id presence in DB by locking the session, this will enable user to # reissue the same command in case of failure. def create_or_delete_dhcp_port(self, subnet_context): # NOTE:(Achuth) Fixes bug 1746715 # DHCP port to be created for IPv4 subnets only, since ODL doesn't # support IPv6 neutron port ARP responses. This prevents validations # in ODL and avoids processing these ports incorrectly. if subnet_context.current['ip_version'] != 4: LOG.warning("ODL DHCP port is supported only for IPv4 subnet %s", subnet_context.current['id']) return port_id = self.get_dhcp_port_if_exists(subnet_context) plugin = subnet_context._plugin if not port_id and subnet_context.current['enable_dhcp']: LOG.debug("Creating ODL DHCP port for subnet %s of network %s", subnet_context.current['id'], subnet_context.current['network_id']) port = self._make_dhcp_port_dict(subnet_context) # TODO(boden): rehome and consume from neutron-lib p_utils.create_port(plugin, subnet_context._plugin_context, port) if port_id and not subnet_context.current['enable_dhcp']: self._delete_port(plugin, subnet_context._plugin_context, port_id) def _delete_port(self, plugin, context, port_id): LOG.debug("Deleting ODL DHCP port with id %s", port_id) plugin.delete_port(context, port_id) def _make_dhcp_port_dict(self, subnet_context): subnet_id = subnet_context.current['id'] port_dict = dict( name='', admin_state_up=True, device_id=OPENDAYLIGHT_DEVICE_ID + '-' + subnet_id, device_owner=n_const.DEVICE_OWNER_DHCP, network_id=subnet_context.current['network_id'], fixed_ips=[dict(subnet_id=subnet_id)], tenant_id=subnet_context.network.current['tenant_id']) return {'port': port_dict} def get_dhcp_port_if_exists(self, subnet_context): plugin = subnet_context._plugin plugin_context = subnet_context._plugin_context network_id = subnet_context._subnet['network_id'] subnet_id = subnet_context.current['id'] device_id = OPENDAYLIGHT_DEVICE_ID + '-' + subnet_id LOG.debug("Retrieving ODL DHCP port for subnet %s", subnet_id) filters = { 'network_id': [network_id], 'device_id': [device_id], 'device_owner': [n_const.DEVICE_OWNER_DHCP] } ports = plugin.get_ports(plugin_context, filters=filters) if ports: port = ports[0] LOG.debug("Retrieved ODL owned port %s for subnet %s", port['id'], subnet_id) return port['id'] return None ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585130284.8027139 networking-odl-16.0.0.0b2.dev1/networking_odl/hacking/0000755000175000017500000000000000000000000024325 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/hacking/__init__.py0000644000175000017500000000000000000000000026424 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/hacking/checks.py0000644000175000017500000001103100000000000026133 0ustar00jamespagejamespage00000000000000# Copyright 2017 Intel Corporation. # Copyright 2017 Isaku Yamahata # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import tokenize from hacking.checks import docstrings # TODO(yamahata): enable neutron checking # from neutron.hacking import checks from neutron_lib.hacking import checks _ND01_MSG = ( "ND01: use OpenDaylight (capital D) instead of Opendaylight") # noqa _ND01_OPENDAYLIGHT = 'Opendaylight' # noqa _ND02_MSG = ( "ND02: use the config fixture provided by oslo_config and use config()" " instead of %s") # noqa _ND02_REGEXP_DIRECT = re.compile(r'cfg\.CONF\..* =') _ND03_MSG = ( "ND03: The import of %s has a redundant alias." ) _ND03_REGEXP_REDUNDANT_IMPORT_ALIAS = re.compile(r'.*import (.+) as \1$') def check_opendaylight_lowercase(logical_line, filename, noqa): """ND01 - Enforce using OpenDaylight.""" if noqa: return if _ND01_OPENDAYLIGHT in logical_line: pos = logical_line.find(_ND01_OPENDAYLIGHT) yield (pos, _ND01_MSG) def _check_opendaylight_lowercase(logical_line, tokens, noqa, token_type): """ND01 - Enforce using OpenDaylight in given token.""" if noqa: return for _token_type, text, start_index, _, _ in tokens: if _token_type == token_type: pos = text.find(_ND01_OPENDAYLIGHT) if pos >= 0: msg = "{} in {}".format( _ND01_MSG, tokenize.tok_name[token_type].lower()) yield (start_index[1] + pos, msg) def check_opendaylight_lowercase_comment(logical_line, tokens, noqa): """ND01 - Enforce using OpenDaylight in comment.""" for res in _check_opendaylight_lowercase( logical_line, tokens, noqa, tokenize.COMMENT): yield res def check_opendaylight_lowercase_string(logical_line, tokens, noqa): """ND01 - Enforce using OpenDaylight in string.""" for res in _check_opendaylight_lowercase( logical_line, tokens, noqa, tokenize.STRING): yield res def check_opendaylight_lowercase_docstring( physical_line, previous_logical, tokens): """ND01 - Enforce using OpenDaylight in docstring.""" docstring = docstrings.is_docstring(tokens, previous_logical) if docstring and _ND01_OPENDAYLIGHT in docstring: pos = physical_line.find(_ND01_OPENDAYLIGHT) return (pos, _ND01_MSG + " in docstring") return None def check_config_over_set_override(logical_line, filename, noqa): """ND02 - Enforcement of config fixture Enforce agreement of not use set_override() but use instead the fixture's config() helper for tests. """ if noqa: return if 'networking_odl/tests/' not in filename: return if 'cfg.CONF.set_override' in logical_line: yield (0, _ND02_MSG % "using cfg.CONF.set_override()") def check_config_over_direct_override(logical_line, filename, noqa): """ND02 - Enforcement of config fixture Enforce usage of the fixture's config() helper instead of overriding a setting directly """ if noqa: return if 'networking_odl/tests/' not in filename: return if _ND02_REGEXP_DIRECT.match(logical_line): yield (0, _ND02_MSG % "overriding it directly.") def check_redundant_import_alias(logical_line): """ND03 - Checking no redundant import alias. ND03: from neutron.plugins.ml2 import driver_context as driver_context OK: from neutron.plugins.ml2 import driver_context """ match = re.match(_ND03_REGEXP_REDUNDANT_IMPORT_ALIAS, logical_line) if match: yield (0, _ND03_MSG % match.group(1)) def factory(register): checks.factory(register) register(check_opendaylight_lowercase) register(check_opendaylight_lowercase_comment) register(check_opendaylight_lowercase_string) register(check_opendaylight_lowercase_docstring) register(check_config_over_set_override) register(check_config_over_direct_override) register(check_redundant_import_alias) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585130284.8027139 networking-odl-16.0.0.0b2.dev1/networking_odl/journal/0000755000175000017500000000000000000000000024373 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/journal/__init__.py0000644000175000017500000000000000000000000026472 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/journal/base_driver.py0000644000175000017500000000554200000000000027240 0ustar00jamespagejamespage00000000000000# Copyright (c) 2017 NEC Corp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.plugins import directory from oslo_log import log as logging from networking_odl.common import exceptions LOG = logging.getLogger(__name__) ALL_RESOURCES = {} def get_driver(resource_type): try: return ALL_RESOURCES[resource_type] except KeyError: raise exceptions.ResourceNotRegistered(resource_type=resource_type) class ResourceBaseDriver(object): """Base class for all the drivers to support full sync ResourceBaseDriver class acts as base class for all the drivers and provides default behaviour for full sync functionality. A driver has to provide class or object attribute RESOURCES, specifying resources it manages. RESOURCES must be a dictionary, keys of the dictionary should be resource type and value should be method suffix or plural used for the resources. A driver has to provide plugin type for itself, as class or object attribute. Its value should be the same, as used by neutron to register plugin for the resources it manages. """ RESOURCES = {} plugin_type = None def __init__(self, *args, **kwargs): super(ResourceBaseDriver, self).__init__(*args, **kwargs) for resource in self.RESOURCES: ALL_RESOURCES[resource] = self def _get_resource_getter(self, method_suffix): method_name = "get_%s" % method_suffix try: return getattr(self.plugin, method_name) except AttributeError: raise exceptions.PluginMethodNotFound(plugin=self.plugin_type, method=method_name) def get_resources_for_full_sync(self, context, resource_type): """Provide all resources of type resource_type """ if resource_type not in self.RESOURCES: raise exceptions.UnsupportedResourceType resource_getter = self._get_resource_getter( self.RESOURCES[resource_type]) return resource_getter(context) @property def plugin(self): return directory.get_plugin(self.plugin_type) def get_resource_for_recovery(self, context, obj): resource_getter = self._get_resource_getter(obj.object_type) return resource_getter(context, obj.object_uuid) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/journal/cleanup.py0000644000175000017500000000313200000000000026373 0ustar00jamespagejamespage00000000000000# # Copyright (C) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from datetime import timedelta from neutron_lib.db import api as db_api from oslo_config import cfg from oslo_log import log as logging from networking_odl.common import constants as odl_const from networking_odl.db import db LOG = logging.getLogger(__name__) @db_api.retry_if_session_inactive() @db_api.CONTEXT_WRITER.savepoint def delete_completed_rows(context): """Journal maintenance operation for deleting completed rows.""" rows_retention = cfg.CONF.ml2_odl.completed_rows_retention if rows_retention <= 0: return LOG.debug("Deleting completed rows") db.delete_rows_by_state_and_time( context, odl_const.COMPLETED, timedelta(seconds=rows_retention)) @db_api.retry_if_session_inactive() @db_api.CONTEXT_WRITER.savepoint def cleanup_processing_rows(context): row_count = db.reset_processing_rows( context, cfg.CONF.ml2_odl.processing_timeout) if row_count: LOG.info("Reset %(num)s orphaned rows back to pending", {"num": row_count}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/journal/dependency_validations.py0000644000175000017500000001622700000000000031470 0ustar00jamespagejamespage00000000000000# Copyright (c) 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from networking_odl._i18n import _ from networking_odl.common import constants as odl_const from networking_odl.db import db def _get_delete_dependencies(context, object_type, object_uuid): """Get dependent operations for a delete operation. Return any operations that pertain to the delete: Either create or update operations on the same object, or delete operations on other objects that depend on the deleted object. """ # Get any pending or processing create or update ops on the row itself deps = db.get_pending_or_processing_ops( context, object_uuid, operation=(odl_const.ODL_UPDATE, odl_const.ODL_CREATE)) # Get dependent operations of other dependent types dependent_resource_types = _DELETE_DEPENDENCIES.get(object_type) if dependent_resource_types is not None: for resource_type in dependent_resource_types: deps.extend(db.get_pending_delete_ops_with_parent( context, resource_type, object_uuid)) return deps def _get_older_operations(context, object_ids): """Get any older operations. Return any operations still in the queue for the given ID(s). """ if not isinstance(object_ids, (list, tuple)): object_ids = (object_ids,) deps = [] for object_id in object_ids: deps.extend( db.get_pending_or_processing_ops(context, object_id)) return deps def _generate_subnet_deps(data): return data['network_id'] def _generate_port_deps(data): object_ids = set(fixed_ip['subnet_id'] for fixed_ip in data['fixed_ips']) object_ids = list(object_ids) object_ids.append(data['network_id']) qos_policy_id = data.get('qos_policy_id') if qos_policy_id is not None: object_ids.append(qos_policy_id) return object_ids def _generate_network_deps(data): return data.get('qos_policy_id') def _generate_sg_rule_deps(data): return data['security_group_id'] def _generate_router_deps(data): return data['gw_port_id'] def _generate_floatingip_deps(data): object_ids = [] network_id = data.get('floating_network_id') if network_id is not None: object_ids.append(network_id) port_id = data.get('port_id') if port_id is not None: object_ids.append(port_id) router_id = data.get('router_id') if router_id is not None: object_ids.append(router_id) return object_ids def _generate_trunk_deps(data): portids = [subport['port_id'] for subport in data['sub_ports']] portids.append(data['port_id']) return portids def _generate_l2gateway_connection_deps(data): object_ids = [] network_id = data.get('network_id') if network_id is not None: object_ids.append(network_id) gateway_id = data.get('gateway_id') if gateway_id is not None: object_ids.append(gateway_id) return object_ids def _generate_sfc_port_pair_deps(data): object_ids = [] ingress_port = data.get('ingress') if ingress_port is not None: object_ids.append(ingress_port) egress_port = data.get('egress') if egress_port is not None: object_ids.append(egress_port) return object_ids def _generate_sfc_port_pair_group_deps(data): return data['port_pairs'] def _generate_sfc_port_chain_deps(data): object_ids = data['port_pair_groups'][:] flow_classifiers = data['flow_classifiers'][:] object_ids.extend(flow_classifiers) return object_ids def _generate_bgpvpn_deps(data): object_ids = [] network_ids = data.get('networks') if network_ids is not None: object_ids.extend(network_ids) router_ids = data.get('routers') if router_ids is not None: object_ids.extend(router_ids) return object_ids _CREATE_OR_UPDATE_DEP_GENERATOR = { odl_const.ODL_NETWORK: _generate_network_deps, odl_const.ODL_SUBNET: _generate_subnet_deps, odl_const.ODL_PORT: _generate_port_deps, # TODO(yamahata): dependency between SG and PORT odl_const.ODL_SG_RULE: _generate_sg_rule_deps, odl_const.ODL_ROUTER: _generate_router_deps, odl_const.ODL_FLOATINGIP: _generate_floatingip_deps, odl_const.ODL_TRUNK: _generate_trunk_deps, odl_const.ODL_L2GATEWAY_CONNECTION: _generate_l2gateway_connection_deps, odl_const.ODL_SFC_PORT_PAIR: _generate_sfc_port_pair_deps, odl_const.ODL_SFC_PORT_PAIR_GROUP: _generate_sfc_port_pair_group_deps, odl_const.ODL_SFC_PORT_CHAIN: _generate_sfc_port_chain_deps, odl_const.ODL_BGPVPN: _generate_bgpvpn_deps, } _DELETE_DEPENDENCIES = { odl_const.ODL_NETWORK: (odl_const.ODL_SUBNET, odl_const.ODL_PORT, odl_const.ODL_ROUTER, odl_const.ODL_L2GATEWAY_CONNECTION, odl_const.ODL_BGPVPN), odl_const.ODL_SUBNET: (odl_const.ODL_PORT,), odl_const.ODL_ROUTER: (odl_const.ODL_PORT, odl_const.ODL_FLOATINGIP, odl_const.ODL_BGPVPN), odl_const.ODL_PORT: (odl_const.ODL_TRUNK,), # TODO(yamahata): dependency between SG and PORT odl_const.ODL_SG: (odl_const.ODL_SG_RULE,), odl_const.ODL_L2GATEWAY: (odl_const.ODL_L2GATEWAY_CONNECTION,), odl_const.ODL_SFC_FLOW_CLASSIFIER: (odl_const.ODL_SFC_PORT_CHAIN,), odl_const.ODL_SFC_PORT_PAIR: (odl_const.ODL_SFC_PORT_PAIR_GROUP,), odl_const.ODL_SFC_PORT_PAIR_GROUP: (odl_const.ODL_SFC_PORT_CHAIN,), odl_const.ODL_QOS_POLICY: (odl_const.ODL_PORT, odl_const.ODL_NETWORK), } def calculate(context, operation, object_type, object_uuid, data): """Calculate resource deps in journaled operations. As a rule of thumb validation takes into consideration only operations in pending or processing state, other states are irrelevant. :param context: enginefacade context :param row: entry in journal entry to be validated """ deps = [] if operation == odl_const.ODL_DELETE: return _get_delete_dependencies(context, object_type, object_uuid) elif operation == odl_const.ODL_UPDATE: deps.extend( db.get_pending_or_processing_ops( context, object_uuid, operation=(odl_const.ODL_CREATE, odl_const.ODL_UPDATE))) elif operation != odl_const.ODL_CREATE: raise ValueError(_("unsupported operation {}").format(operation)) # Validate deps if there are any to validate. dep_generator = _CREATE_OR_UPDATE_DEP_GENERATOR.get(object_type) if dep_generator is not None: object_ids = dep_generator(data) if object_ids is not None: deps.extend(_get_older_operations(context, object_ids)) return deps ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/journal/full_sync.py0000644000175000017500000001230700000000000026746 0ustar00jamespagejamespage00000000000000# # Copyright (C) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_lib.db import api as db_api from neutron_lib.plugins import directory import requests from networking_odl.common import client from networking_odl.common import constants as odl_const from networking_odl.db import db from networking_odl.journal import base_driver from networking_odl.journal import journal # Define which pending operation types should be deleted _CANARY_NETWORK_ID = "bd8db3a8-2b30-4083-a8b3-b3fd46401142" _CANARY_TENANT_ID = "bd8db3a8-2b30-4083-a8b3-b3fd46401142" _CANARY_NETWORK_DATA = {'id': _CANARY_NETWORK_ID, 'tenant_id': _CANARY_TENANT_ID, 'name': 'Sync Canary Network', 'admin_state_up': False} _OPS_TO_DELETE_ON_SYNC = (odl_const.ODL_CREATE, odl_const.ODL_UPDATE) _CLIENT = client.OpenDaylightRestClientGlobal() _ORDERED_ODL_RESOURCES = ( odl_const.ODL_SG, odl_const.ODL_SG_RULE, odl_const.ODL_NETWORK, odl_const.ODL_SUBNET, odl_const.ODL_ROUTER, odl_const.ODL_PORT, odl_const.ODL_FLOATINGIP, odl_const.ODL_QOS_POLICY, odl_const.ODL_TRUNK, odl_const.ODL_BGPVPN, odl_const.ODL_BGPVPN_NETWORK_ASSOCIATION, odl_const.ODL_BGPVPN_ROUTER_ASSOCIATION, odl_const.ODL_SFC_FLOW_CLASSIFIER, odl_const.ODL_SFC_PORT_PAIR, odl_const.ODL_SFC_PORT_PAIR_GROUP, odl_const.ODL_SFC_PORT_CHAIN, odl_const.ODL_L2GATEWAY, odl_const.ODL_L2GATEWAY_CONNECTION, ) # TODO(rajivk): Remove this variable, while fixing recovery ALL_RESOURCES = {} FULL_SYNC_RESOURCES = {} def register(driver, resources, handler=None): def default_handler(context, resource_type): return get_resources(context, driver, resources[resource_type]) ALL_RESOURCES[driver] = resources handler = handler or default_handler for resource in resources: FULL_SYNC_RESOURCES[resource] = handler @db_api.retry_if_session_inactive() @db_api.CONTEXT_WRITER.savepoint def full_sync(context): if not _full_sync_needed(context): return db.delete_pending_rows(context, _OPS_TO_DELETE_ON_SYNC) for resource_type in _ORDERED_ODL_RESOURCES: handler = FULL_SYNC_RESOURCES.get(resource_type) if handler: _sync_resources(context, resource_type, handler) journal.record(context, odl_const.ODL_NETWORK, _CANARY_NETWORK_ID, odl_const.ODL_CREATE, _CANARY_NETWORK_DATA) def _full_sync_needed(context): return (_canary_network_missing_on_odl() and _canary_network_not_in_journal(context)) def _canary_network_missing_on_odl(): # Try to reach the ODL server, sometimes it might be up & responding to # HTTP calls but inoperative.. client = _CLIENT.get_client() response = client.get(odl_const.ODL_NETWORKS) response.raise_for_status() response = client.get(odl_const.ODL_NETWORKS + "/" + _CANARY_NETWORK_ID) if response.status_code == requests.codes.not_found: return True # In case there was an error raise it up because we don't know how to deal # with it.. response.raise_for_status() return False def _canary_network_not_in_journal(context): return not db.get_pending_or_processing_ops( context, _CANARY_NETWORK_ID, operation=odl_const.ODL_CREATE) def get_resources_require_id(plugin, context, get_resources_for_id, method_name_for_resource): dep_id_resources = get_resources_for_id(context) resources = [] for d_resource in dep_id_resources: obj_getter = getattr(plugin, method_name_for_resource) resource = obj_getter(context, d_resource['id']) if resource: resources.extend(resource) return resources def get_resources(context, plugin_type, resource_type): plugin = directory.get_plugin(plugin_type) obj_getter = getattr(plugin, 'get_%s' % resource_type) return obj_getter(context) def _sync_resources(context, object_type, handler): resources = handler(context, object_type) for resource in resources: journal.record(context, object_type, resource['id'], odl_const.ODL_CREATE, resource) @db_api.retry_if_session_inactive() # TODO(rajivk): Change name from sync_resource to _sync_resources # once, we are completely moved to new sync mechanism to plug new syncing # mechanism. def sync_resources(context, resource_type): driver = base_driver.get_driver(resource_type) resources = driver.get_resources_for_full_sync(context, resource_type) with db_api.CONTEXT_WRITER.savepoint.using(context): for resource in resources: journal.record(context, resource_type, resource['id'], odl_const.ODL_CREATE, resource) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/journal/journal.py0000644000175000017500000002534300000000000026426 0ustar00jamespagejamespage00000000000000# Copyright (c) 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from datetime import datetime import threading import time from neutron_lib.callbacks import registry from neutron_lib import context as nl_context from neutron_lib.db import api as db_api from neutron_lib.plugins import directory from oslo_config import cfg from oslo_db import exception from oslo_log import log as logging from requests import exceptions from networking_odl.common import client from networking_odl.common import constants as odl_const from networking_odl.common import filters from networking_odl.common import utils from networking_odl.db import db from networking_odl.journal import dependency_validations LOG = logging.getLogger(__name__) MAKE_URL = {} LOG_ENTRY_TEMPLATE = ("%(log_type)s (Entry ID: %(entry_id)s) - %(op)s " "%(obj_type)s %(obj_id)s (Time stamp: %(timestamp)s)") LOG_RECORDED = 'Recorded' LOG_PROCESSING = 'Processing' LOG_COMPLETED = 'Completed' LOG_ERROR_PROCESSING = 'Error while processing' def call_thread_on_end(func): def new_func(obj, *args, **kwargs): return_value = func(obj, *args, **kwargs) obj.journal.set_sync_event() return return_value return new_func def _enrich_port(plugin_context, ml2_context, object_type, operation, data): """Enrich the port with additional information needed by ODL""" # NOTE(yamahata): work around of ODL neutron northbound # It passes security groups in port as list of dict for historical reasons. # keep its format for compatibility. # TODO(yamahata): drop this format conversion. if data[odl_const.ODL_SGS]: groups = [{'id': id_} for id_ in data['security_groups']] else: groups = [] new_data = copy.deepcopy(data) new_data[odl_const.ODL_SGS] = groups # NOTE(yamahata): work around for port creation for router # tenant_id=''(empty string) is passed when port is created # by l3 plugin internally for router. # On the other hand, ODL doesn't accept empty string for tenant_id. # In that case, deduce tenant_id from network_id for now. # Right fix: modify Neutron so that don't allow empty string # for tenant_id even for port for internal use. # TODO(yamahata): eliminate this work around when neutron side # is fixed # assert port['tenant_id'] != '' if ('tenant_id' not in new_data or new_data['tenant_id'] == ''): if ml2_context: network = ml2_context._network_context._network else: plugin = directory.get_plugin() network = plugin.get_network(plugin_context, new_data['network_id']) new_data['tenant_id'] = network['tenant_id'] return new_data def _log_entry(log_type, entry, log_level=logging.INFO, **kwargs): delta = datetime.now() - datetime.min timestamp = delta.total_seconds() log_dict = {'log_type': log_type, 'op': entry.operation, 'obj_type': entry.object_type, 'obj_id': entry.object_uuid, 'entry_id': entry.seqnum, 'timestamp': timestamp} LOG.log(log_level, LOG_ENTRY_TEMPLATE, log_dict, **kwargs) def record(plugin_context, object_type, object_uuid, operation, data, ml2_context=None): if (object_type == odl_const.ODL_PORT and operation in (odl_const.ODL_CREATE, odl_const.ODL_UPDATE)): data = _enrich_port( plugin_context, ml2_context, object_type, operation, data) # Calculate depending_on on other journal entries depending_on = dependency_validations.calculate( plugin_context, operation, object_type, object_uuid, data) # NOTE(mpeterson): Between the moment that a dependency is calculated and # the new entry is recorded in the journal, an operation can ocurr that # would make the dependency irrelevant. In that case we request a retry. # For more details, read the commit message that introduced this comment. try: entry = db.create_pending_row( plugin_context, object_type, object_uuid, operation, data, depending_on=depending_on) except exception.DBReferenceError as e: raise exception.RetryRequest(e) _log_entry(LOG_RECORDED, entry) LOG.debug('Entry with ID %(entry_id)s depends on these entries: ' '%(depending_on)s', {'entry_id': entry.seqnum, 'depending_on': [d.seqnum for d in depending_on]}) @db_api.retry_if_session_inactive() @db_api.CONTEXT_WRITER.savepoint def entry_complete(context, entry): if cfg.CONF.ml2_odl.completed_rows_retention == 0: db.delete_row(context, entry) else: db.update_db_row_state(context, entry, odl_const.COMPLETED) db.delete_dependency(context, entry) @db_api.retry_if_session_inactive() @db_api.CONTEXT_WRITER.savepoint def entry_reset(context, entry): db.update_db_row_state(context, entry, odl_const.PENDING) @db_api.retry_if_session_inactive() @db_api.CONTEXT_WRITER.savepoint def entry_update_state_by_retry_count(context, entry, retry_count): db.update_pending_db_row_retry(context, entry, retry_count) def _make_url(row): url_object = utils.make_url_object(row.object_type) urlpath = '' if row.operation == odl_const.ODL_CREATE: urlpath = url_object else: urlpath = url_object + '/' + row.object_uuid return urlpath def register_url_builder(object_type, method): MAKE_URL[object_type] = method def _build_url(row): return MAKE_URL.get(row.object_type, _make_url)(row) class OpenDaylightJournalThread(object): """Thread worker for the OpenDaylight Journal Database.""" # make those parameter configurable? _RETRY_SLEEP_MIN = 0.1 _RETRY_SLEEP_MAX = 60 def __init__(self, start_thread=True): self.client = client.OpenDaylightRestClient.create_client() self._max_retry_count = cfg.CONF.ml2_odl.retry_count self._sleep_time = self._RETRY_SLEEP_MIN self.event = threading.Event() self._odl_sync_thread = self._create_odl_sync_thread() self._odl_sync_thread_stop = threading.Event() if start_thread: self.start() def _create_odl_sync_thread(self): return threading.Thread(name='sync', target=self.run_sync_thread) def start(self): # Start the sync thread LOG.debug("Starting a new sync thread") if self._odl_sync_thread_stop.is_set(): self._odl_sync_thread_stop.clear() self._odl_sync_thread = self._create_odl_sync_thread() if not self._odl_sync_thread.is_alive(): self._odl_sync_thread.start() def stop(self, timeout=None): """Allows to stop the sync thread. Args: timeout (float): Time in seconds to wait for joining or None for no timeout. """ # Stop the sync thread LOG.debug("Stopping the sync thread") if self._odl_sync_thread.is_alive(): self._odl_sync_thread_stop.set() # Process the journal one last time before stopping. self.set_sync_event() self._odl_sync_thread.join(timeout) def set_sync_event(self): self.event.set() @staticmethod def _json_data(row): data = copy.deepcopy(row.data) filters.filter_for_odl(row.object_type, row.operation, data) if row.operation == odl_const.ODL_CREATE: method = 'post' to_send = {row.object_type: data} elif row.operation == odl_const.ODL_UPDATE: method = 'put' to_send = {row.object_type: data} elif row.operation == odl_const.ODL_DELETE: method = 'delete' to_send = None return method, _build_url(row), to_send def run_sync_thread(self): while not self._odl_sync_thread_stop.is_set(): try: self.event.wait() self.event.clear() self.sync_pending_entries() except Exception: # Catch exceptions to protect the thread while running LOG.exception("Error on run_sync_thread") def sync_pending_entries(self): LOG.debug("Start processing journal entries") context = nl_context.get_admin_context() entry = db.get_oldest_pending_db_row_with_lock(context) if entry is None: LOG.debug("No journal entries to process") return while entry is not None: stop_processing = self._sync_entry(context, entry) if stop_processing: break entry = db.get_oldest_pending_db_row_with_lock(context) LOG.debug("Finished processing journal entries") def _retry_sleep(self): # When something happened in the connection to ODL, don't busy loop # because it's likely to hit same issue. # Wait for a while for recovery time.sleep(self._sleep_time) self._sleep_time = min(self._sleep_time * 2, self._RETRY_SLEEP_MAX) def _retry_reset(self): self._sleep_time = self._RETRY_SLEEP_MIN def _sync_entry(self, context, entry): _log_entry(LOG_PROCESSING, entry) method, urlpath, to_send = self._json_data(entry) # TODO(mkolesni): This logic is weirdly written, need to refactor it. try: self.client.sendjson(method, urlpath, to_send) registry.notify(entry.object_type, odl_const.BEFORE_COMPLETE, self, context=context, operation=entry.operation, row=entry) entry_complete(context, entry) self._retry_reset() _log_entry(LOG_COMPLETED, entry) except exceptions.ConnectionError: # Don't raise the retry count, just log an error & break entry_reset(context, entry) LOG.error("Cannot connect to the OpenDaylight Controller," " will not process additional entries") self._retry_sleep() return True except Exception: _log_entry(LOG_ERROR_PROCESSING, entry, log_level=logging.ERROR, exc_info=True) entry_update_state_by_retry_count( context, entry, self._max_retry_count) return False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/journal/periodic_task.py0000644000175000017500000001010100000000000027556 0ustar00jamespagejamespage00000000000000# # Copyright (C) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_lib import context as neutron_context from neutron_lib.db import api as db_api from oslo_log import log as logging from oslo_service import loopingcall from networking_odl.db import db LOG = logging.getLogger(__name__) class PeriodicTask(object): def __init__(self, task, interval): self.task = task self.phases = [] self.timer = loopingcall.FixedIntervalLoopingCall(self.execute_ops) self.interval = interval def start(self): self.timer.start(self.interval, stop_on_exception=False) def cleanup(self): # this method is used for unit test to tear down self.timer.stop() try: self.timer.wait() except AttributeError: # NOTE(yamahata): workaround # some tests call this cleanup without calling start pass @db_api.retry_if_session_inactive() @db_api.CONTEXT_WRITER.savepoint def _set_operation(self, context, operation): db.update_periodic_task(context, task=self.task, operation=operation) def _execute_op(self, operation, context): op_details = operation.__name__ if operation.__doc__: op_details += " (%s)" % operation.__doc__ try: LOG.info("Starting %s phase of periodic task %s.", op_details, self.task) self._set_operation(context, operation) operation(context) LOG.info("Finished %s phase of %s task.", op_details, self.task) except Exception: LOG.exception("Failed during periodic task operation %s.", op_details) def task_already_executed_recently(self, context): return db.was_periodic_task_executed_recently( context, self.task, self.interval) @db_api.retry_if_session_inactive() @db_api.CONTEXT_WRITER.savepoint def _clear_and_unlock_task(self, context): db.update_periodic_task(context, task=self.task, operation=None) db.unlock_periodic_task(context, self.task) @db_api.retry_if_session_inactive() @db_api.CONTEXT_WRITER.savepoint def _lock_task(self, context): return db.lock_periodic_task(context, self.task) def execute_ops(self, forced=False): LOG.info("Starting %s periodic task.", self.task) context = neutron_context.get_admin_context() # Lock make sure that periodic task is executed only after # specified interval. It makes sure that maintenance tasks # are not executed back to back. if not forced and self.task_already_executed_recently(context): LOG.info("Periodic %s task executed after periodic interval " "Skipping execution.", self.task) return if not self._lock_task(context): LOG.info("Periodic %s task already running task", self.task) return try: for phase in self.phases: self._execute_op(phase, context) finally: self._clear_and_unlock_task(context) LOG.info("%s task has been finished", self.task) def register_operation(self, phase): """Register a function to be run by the periodic task. :param phase: Function to call when the thread runs. The function will receive a DB session to use for DB operations. """ self.phases.append(phase) LOG.info("%s phase has been registered in %s task", phase, self.task) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/journal/recovery.py0000644000175000017500000000752000000000000026607 0ustar00jamespagejamespage00000000000000# # Copyright (C) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_lib.db import api as db_api from neutron_lib import exceptions as nexc from neutron_lib.plugins import directory from oslo_log import log as logging from networking_odl.common import client from networking_odl.common import constants as odl_const from networking_odl.common import exceptions from networking_odl.db import db from networking_odl.journal import base_driver from networking_odl.journal import full_sync from networking_odl.journal import journal _CLIENT = client.OpenDaylightRestClientGlobal() LOG = logging.getLogger(__name__) @db_api.retry_if_session_inactive() def journal_recovery(context): for row in db.get_all_db_rows_by_state(context, odl_const.FAILED): LOG.debug("Attempting recovery of journal entry %s.", row) try: odl_resource = _CLIENT.get_client().get_resource( row.object_type, row.object_uuid) except exceptions.UnsupportedResourceType: LOG.warning('Unsupported resource %s', row.object_type) except Exception: LOG.exception("Failure while recovering journal entry %s.", row) else: with db_api.CONTEXT_WRITER.savepoint.using(context): if odl_resource is not None: _handle_existing_resource(context, row) else: _handle_non_existing_resource(context, row) def get_latest_resource(context, row): try: driver = base_driver.get_driver(row.object_type) except exceptions.ResourceNotRegistered: raise exceptions.UnsupportedResourceType(resource=row.object_type) return driver.get_resource_for_recovery(context, row) # TODO(rajivk): Remove this method once recovery is fully supported def _get_latest_resource(context, row): object_type = row.object_type for plugin_alias, resources in full_sync.ALL_RESOURCES.items(): if object_type in resources: plugin = directory.get_plugin(plugin_alias) break else: raise exceptions.UnsupportedResourceType(resource=object_type) obj_getter = getattr(plugin, 'get_{}'.format(object_type)) return obj_getter(context, row.object_uuid) def _sync_resource_to_odl(context, row, operation_type, exists_on_odl): resource = None try: resource = _get_latest_resource(context, row) except nexc.NotFound: if exists_on_odl: journal.record(context, row.object_type, row.object_uuid, odl_const.ODL_DELETE, []) else: journal.record(context, row.object_type, row.object_uuid, operation_type, resource) journal.entry_complete(context, row) def _handle_existing_resource(context, row): if row.operation == odl_const.ODL_CREATE: journal.entry_complete(context, row) elif row.operation == odl_const.ODL_DELETE: db.update_db_row_state(context, row, odl_const.PENDING) else: _sync_resource_to_odl(context, row, odl_const.ODL_UPDATE, True) def _handle_non_existing_resource(context, row): if row.operation == odl_const.ODL_DELETE: journal.entry_complete(context, row) else: _sync_resource_to_odl(context, row, odl_const.ODL_CREATE, False) # TODO(mkolesni): Handle missing parent resources somehow. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/journal/worker.py0000644000175000017500000001063700000000000026265 0ustar00jamespagejamespage00000000000000# Copyright (c) 2017 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import atexit import os from neutron_lib import worker from oslo_config import cfg from oslo_log import log as logging from oslo_service import loopingcall # TODO(mpeterson): this should go back to the previous block once the problems # with flake8-import-order are fixed. from neutron.agent.linux import daemon from networking_odl._i18n import _ from networking_odl.journal import cleanup from networking_odl.journal import full_sync from networking_odl.journal import journal from networking_odl.journal import periodic_task from networking_odl.journal import recovery LOG = logging.getLogger(__name__) class JournalPeriodicProcessor(worker.BaseWorker): """Responsible for running the periodic processing of the journal. This is a separate worker as the regular journal thread is called when an operation finishes and that run will take care of any and all entries that might be present in the journal, including the one relating to that operation. A periodic run over the journal is thus necessary for cases when journal entries in the aforementioned run didn't process correctly due to some error (usually a connection problem) and need to be retried. """ def __init__(self): super(JournalPeriodicProcessor, self).__init__() self._journal = journal.OpenDaylightJournalThread(start_thread=False) self._interval = cfg.CONF.ml2_odl.sync_timeout self._timer = None self._maintenance_task = None self._running = None self.pidfile = None def _create_pidfile(self): pidfile = os.path.join(cfg.CONF.state_path, type(self).__name__.lower() + '.pid') self.pidfile = daemon.Pidfile(pidfile, 'python') # NOTE(mpeterson): We want self._running to be None before the first # run so atexit is only registered once and not several times. if self._running is None: atexit.unregister(self._delete_pidfile) atexit.register(self._delete_pidfile) self.pidfile.write(os.getpid()) def _delete_pidfile(self): if self.pidfile is not None: self.pidfile.unlock() os.remove(str(self.pidfile)) self.pidfile = None def start(self): if self._running: raise RuntimeError( _("Thread has to be stopped before started again") ) super(JournalPeriodicProcessor, self).start() LOG.debug('JournalPeriodicProcessor starting') self._journal.start() self._timer = loopingcall.FixedIntervalLoopingCall(self._call_journal) self._timer.start(self._interval) self._start_maintenance_task() self._create_pidfile() self._running = True def stop(self): if not self._running: return LOG.debug('JournalPeriodicProcessor stopping') self._journal.stop() self._timer.stop() self._maintenance_task.cleanup() self._delete_pidfile() super(JournalPeriodicProcessor, self).stop() self._running = False def wait(self): pass def reset(self): if self._maintenance_task is not None: self._maintenance_task.execute_ops(forced=True) def _call_journal(self): self._journal.set_sync_event() def _start_maintenance_task(self): self._maintenance_task = periodic_task.PeriodicTask( 'maintenance', cfg.CONF.ml2_odl.maintenance_interval) for phase in ( cleanup.delete_completed_rows, cleanup.cleanup_processing_rows, full_sync.full_sync, recovery.journal_recovery, ): self._maintenance_task.register_operation(phase) self._maintenance_task.execute_ops(forced=True) self._maintenance_task.start() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585130284.8027139 networking-odl-16.0.0.0b2.dev1/networking_odl/l2gateway/0000755000175000017500000000000000000000000024620 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/l2gateway/__init__.py0000644000175000017500000000000000000000000026717 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/l2gateway/driver_v2.py0000644000175000017500000000735300000000000027104 0ustar00jamespagejamespage00000000000000# Copyright (c) 2017 Ericsson India Global Service Pvt Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_config import cfg from oslo_log import helpers as log_helpers from oslo_log import log as logging from networking_l2gw.services.l2gateway.common import constants from networking_l2gw.services.l2gateway import service_drivers from networking_odl.common import constants as odl_const from networking_odl.common import postcommit from networking_odl.journal import full_sync from networking_odl.journal import journal cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config') LOG = logging.getLogger(__name__) L2GW_RESOURCES = { odl_const.ODL_L2GATEWAY: odl_const.ODL_L2GATEWAYS, odl_const.ODL_L2GATEWAY_CONNECTION: odl_const.ODL_L2GATEWAY_CONNECTIONS } @postcommit.add_postcommit('l2_gateway', 'l2_gateway_connection') class OpenDaylightL2gwDriver(service_drivers.L2gwDriver): """OpenDaylight L2Gateway Service Driver This code is the openstack driver for exciting the OpenDaylight L2GW facility. """ def __init__(self, service_plugin, validator=None): super(OpenDaylightL2gwDriver, self).__init__(service_plugin, validator) self.service_plugin = service_plugin self.journal = journal.OpenDaylightJournalThread() full_sync.register(constants.L2GW, L2GW_RESOURCES) LOG.info("ODL: Started OpenDaylight L2Gateway V2 driver") @property def service_type(self): return constants.L2GW @log_helpers.log_method_call def create_l2_gateway_precommit(self, context, l2_gateway): journal.record(context, odl_const.ODL_L2GATEWAY, l2_gateway['id'], odl_const.ODL_CREATE, l2_gateway) @log_helpers.log_method_call def update_l2_gateway_precommit(self, context, l2_gateway): journal.record(context, odl_const.ODL_L2GATEWAY, l2_gateway['id'], odl_const.ODL_UPDATE, l2_gateway) @log_helpers.log_method_call def delete_l2_gateway_precommit(self, context, l2_gateway_id): journal.record(context, odl_const.ODL_L2GATEWAY, l2_gateway_id, odl_const.ODL_DELETE, l2_gateway_id) @log_helpers.log_method_call def create_l2_gateway_connection_precommit(self, context, l2_gateway_connection): odl_l2_gateway_connection = copy.deepcopy(l2_gateway_connection) odl_l2_gateway_connection['gateway_id'] = ( l2_gateway_connection['l2_gateway_id']) odl_l2_gateway_connection.pop('l2_gateway_id') journal.record(context, odl_const.ODL_L2GATEWAY_CONNECTION, odl_l2_gateway_connection['id'], odl_const.ODL_CREATE, odl_l2_gateway_connection) @log_helpers.log_method_call def delete_l2_gateway_connection_precommit(self, context, l2_gateway_connection_id): journal.record(context, odl_const.ODL_L2GATEWAY_CONNECTION, l2_gateway_connection_id, odl_const.ODL_DELETE, l2_gateway_connection_id) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585130284.8067138 networking-odl-16.0.0.0b2.dev1/networking_odl/l3/0000755000175000017500000000000000000000000023237 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/l3/__init__.py0000644000175000017500000000000000000000000025336 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/l3/l3_flavor.py0000644000175000017500000002015700000000000025505 0ustar00jamespagejamespage00000000000000# Copyright 2018 Intel Corporation. # Copyright 2018 Isaku Yamahata # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from neutron.objects import router as l3_obj from neutron.services.l3_router.service_providers import base from neutron_lib.callbacks import events from neutron_lib.callbacks import priority_group from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants as q_const from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from oslo_log import helpers as log_helpers from oslo_log import log as logging from networking_odl.common import constants as odl_const from networking_odl.journal import full_sync from networking_odl.journal import journal LOG = logging.getLogger(__name__) L3_RESOURCES = { odl_const.ODL_ROUTER: odl_const.ODL_ROUTERS, odl_const.ODL_FLOATINGIP: odl_const.ODL_FLOATINGIPS } @registry.has_registry_receivers class ODLL3ServiceProvider(base.L3ServiceProvider): @log_helpers.log_method_call def __init__(self, l3_plugin): super(ODLL3ServiceProvider, self).__init__(l3_plugin) self.journal = journal.OpenDaylightJournalThread() # TODO(yamahata): add method for fullsync to retrieve # all the router with odl service provider. # other router with other service provider should be filtered. full_sync.register(plugin_constants.L3, L3_RESOURCES) self.odl_provider = __name__ + "." + self.__class__.__name__ @property def _flavor_plugin(self): try: return self._flavor_plugin_ref except AttributeError: self._flavor_plugin_ref = directory.get_plugin( plugin_constants.FLAVORS) return self._flavor_plugin_ref def _validate_l3_flavor(self, context, router_id): if router_id is None: return False router = l3_obj.Router.get_object(context, id=router_id) flavor = self._flavor_plugin.get_flavor(context, router.flavor_id) provider = self._flavor_plugin.get_flavor_next_provider( context, flavor['id'])[0] return str(provider['driver']) == self.odl_provider def _update_floatingip_status(self, context, fip_dict): port_id = fip_dict.get('port_id') status = q_const.ACTIVE if port_id else q_const.DOWN l3_obj.FloatingIP.update_object(context, {'status': status}, id=fip_dict['id']) @registry.receives(resources.ROUTER_CONTROLLER, [events.PRECOMMIT_ADD_ASSOCIATION]) @log_helpers.log_method_call def _router_add_association(self, resource, event, trigger, payload=None): context = payload.context router_dict = payload.request_body router_dict['gw_port_id'] = payload.latest_state.gw_port_id router_id = payload.resource_id if not self._validate_l3_flavor(context, router_id): return journal.record(context, odl_const.ODL_ROUTER, router_dict['id'], odl_const.ODL_CREATE, router_dict) @registry.receives(resources.ROUTER, [events.PRECOMMIT_UPDATE], priority_group.PRIORITY_ROUTER_DRIVER) @log_helpers.log_method_call def _router_update_precommit(self, resource, event, trigger, **kwargs): # NOTE(manjeets) router update bypasses the driver controller # and argument type is different. payload = kwargs.get('payload', None) if payload: context = payload.context router_id = payload.states[0]['id'] router_dict = payload.request_body gw_port_id = payload.states[0]['gw_port_id'] else: # TODO(manjeets) Remove this shim once payload is fully adapted # https://bugs.launchpad.net/neutron/+bug/1747747 context = kwargs['context'] router_id = kwargs['router_db'].id router_dict = kwargs['router'] gw_port_id = kwargs['router_db'].gw_port_id if not self._validate_l3_flavor(context, router_id): return if 'gw_port_id' not in router_dict: router_dict['gw_port_id'] = gw_port_id journal.record(context, odl_const.ODL_ROUTER, router_id, odl_const.ODL_UPDATE, router_dict) @registry.receives(resources.ROUTER_CONTROLLER, [events.PRECOMMIT_DELETE_ASSOCIATIONS]) @log_helpers.log_method_call def _router_del_association(self, resource, event, trigger, payload=None): router_id = payload.latest_state.id context = payload.context if not self._validate_l3_flavor(context, router_id): return # TODO(yamahata): process floating ip etc. or just raise error? dependency_list = [payload.latest_state.gw_port_id] journal.record(context, odl_const.ODL_ROUTER, router_id, odl_const.ODL_DELETE, dependency_list) @registry.receives(resources.FLOATING_IP, [events.PRECOMMIT_CREATE]) @log_helpers.log_method_call def _floatingip_create_precommit(self, resource, event, trigger, **kwargs): context = kwargs['context'] fip_dict = copy.deepcopy(kwargs['floatingip']) router_id = kwargs['floatingip_db'].router_id if not self._validate_l3_flavor(context, router_id): return fip_dict['id'] = kwargs['floatingip_id'] self._update_floatingip_status(context, fip_dict) if fip_dict['floating_ip_address'] is None: fip_dict['floating_ip_address'] = \ kwargs['floatingip_db'].floating_ip_address journal.record(context, odl_const.ODL_FLOATINGIP, fip_dict['id'], odl_const.ODL_CREATE, fip_dict) @registry.receives(resources.FLOATING_IP, [events.PRECOMMIT_UPDATE]) @log_helpers.log_method_call def _floatingip_update_precommit(self, resource, event, trigger, **kwargs): context = kwargs['context'] fip_dict = kwargs['floatingip'] router_id = kwargs['floatingip_db'].router_id fip_dict['id'] = kwargs['floatingip_db'].id if not self._validate_l3_flavor(context, router_id): return self._update_floatingip_status(context, fip_dict) journal.record(context, odl_const.ODL_FLOATINGIP, fip_dict['id'], odl_const.ODL_UPDATE, fip_dict) @registry.receives(resources.FLOATING_IP, [events.PRECOMMIT_DELETE]) @log_helpers.log_method_call def _floatingip_delete_precommit(self, resource, event, trigger, **kwargs): context = kwargs['context'] fip_data = l3_obj.FloatingIP.get_objects( context, floating_port_id=kwargs['port']['id'])[0] if not self._validate_l3_flavor(context, fip_data.router_id): return dependency_list = [fip_data.router_id, fip_data.floating_network_id] journal.record(context, odl_const.ODL_FLOATINGIP, fip_data.id, odl_const.ODL_DELETE, dependency_list) @registry.receives(resources.FLOATING_IP, [events.AFTER_CREATE, events.AFTER_UPDATE, events.AFTER_DELETE]) @registry.receives(resources.ROUTER, [events.AFTER_CREATE, events.AFTER_UPDATE, events.AFTER_DELETE]) @log_helpers.log_method_call def _l3_postcommit(self, resource, event, trigger, **kwargs): self.journal.set_sync_event() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/l3/l3_odl_v2.py0000644000175000017500000001673500000000000025410 0ustar00jamespagejamespage00000000000000# Copyright (c) 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron.db import extraroute_db from neutron.db import l3_agentschedulers_db from neutron.db import l3_dvr_db from neutron.db import l3_gwmode_db from neutron_lib import constants as q_const from neutron_lib.db import api as db_api from neutron_lib.plugins import constants as plugin_constants from oslo_log import log as logging from networking_odl.common import config # noqa from networking_odl.common import constants as odl_const from networking_odl.journal import full_sync from networking_odl.journal import journal LOG = logging.getLogger(__name__) L3_RESOURCES = { odl_const.ODL_ROUTER: odl_const.ODL_ROUTERS, odl_const.ODL_FLOATINGIP: odl_const.ODL_FLOATINGIPS } @db_api.retry_if_session_inactive() @db_api.CONTEXT_WRITER.savepoint def _record_in_journal(context, object_type, operation, object_id, data): journal.record(context, object_type, object_id, operation, data) class OpenDaylightL3RouterPlugin( extraroute_db.ExtraRoute_db_mixin, l3_dvr_db.L3_NAT_with_dvr_db_mixin, l3_gwmode_db.L3_NAT_db_mixin, l3_agentschedulers_db.L3AgentSchedulerDbMixin): """Implementation of the OpenDaylight L3 Router Service Plugin. This class implements a L3 service plugin that provides router and floatingip resources and manages associated request/response. """ supported_extension_aliases = ["dvr", "router", "ext-gw-mode", "extraroute"] def __init__(self): super(OpenDaylightL3RouterPlugin, self).__init__() self.journal = journal.OpenDaylightJournalThread() full_sync.register(plugin_constants.L3, L3_RESOURCES) def get_plugin_type(self): return plugin_constants.L3 def get_plugin_description(self): """Returns string description of the plugin.""" return ("L3 Router Service Plugin for basic L3 forwarding " "using OpenDaylight.") @journal.call_thread_on_end def create_router(self, context, router): router_dict = super( OpenDaylightL3RouterPlugin, self).create_router(context, router) _record_in_journal( context, odl_const.ODL_ROUTER, odl_const.ODL_CREATE, router_dict['id'], router_dict) return router_dict @journal.call_thread_on_end def update_router(self, context, router_id, router): router_dict = super( OpenDaylightL3RouterPlugin, self).update_router( context, router_id, router) _record_in_journal( context, odl_const.ODL_ROUTER, odl_const.ODL_UPDATE, router_id, router_dict) return router_dict @journal.call_thread_on_end def delete_router(self, context, router_id): router_dict = self.get_router(context, router_id) dependency_list = [router_dict['gw_port_id']] super(OpenDaylightL3RouterPlugin, self).delete_router(context, router_id) _record_in_journal( context, odl_const.ODL_ROUTER, odl_const.ODL_DELETE, router_id, dependency_list) @journal.call_thread_on_end def create_floatingip(self, context, floatingip, initial_status=q_const.FLOATINGIP_STATUS_ACTIVE): fip = floatingip['floatingip'] if fip.get('port_id') is None: initial_status = q_const.FLOATINGIP_STATUS_DOWN fip_dict = super( OpenDaylightL3RouterPlugin, self).create_floatingip( context, floatingip, initial_status) _record_in_journal( context, odl_const.ODL_FLOATINGIP, odl_const.ODL_CREATE, fip_dict['id'], fip_dict) return fip_dict @journal.call_thread_on_end def update_floatingip(self, context, floatingip_id, floatingip): fip_dict = super( OpenDaylightL3RouterPlugin, self).update_floatingip( context, floatingip_id, floatingip) # Update status based on association if fip_dict.get('port_id') is None: fip_dict['status'] = q_const.FLOATINGIP_STATUS_DOWN else: fip_dict['status'] = q_const.FLOATINGIP_STATUS_ACTIVE self.update_floatingip_status(context, floatingip_id, fip_dict['status']) _record_in_journal( context, odl_const.ODL_FLOATINGIP, odl_const.ODL_UPDATE, floatingip_id, fip_dict) return fip_dict @journal.call_thread_on_end def delete_floatingip(self, context, floatingip_id): floatingip_dict = self.get_floatingip(context, floatingip_id) dependency_list = [floatingip_dict['router_id'], floatingip_dict['floating_network_id']] super(OpenDaylightL3RouterPlugin, self).delete_floatingip( context, floatingip_id) _record_in_journal( context, odl_const.ODL_FLOATINGIP, odl_const.ODL_DELETE, floatingip_id, dependency_list) def disassociate_floatingips(self, context, port_id, do_notify=True): fip_dicts = self.get_floatingips(context, filters={'port_id': [port_id]}) router_ids = super( OpenDaylightL3RouterPlugin, self).disassociate_floatingips( context, port_id, do_notify) for fip_dict in fip_dicts: fip_dict = self.get_floatingip(context, fip_dict['id']) fip_dict['status'] = q_const.FLOATINGIP_STATUS_DOWN self.update_floatingip_status(context, fip_dict['id'], fip_dict['status']) _record_in_journal( context, odl_const.ODL_FLOATINGIP, odl_const.ODL_UPDATE, fip_dict['id'], fip_dict) return router_ids @journal.call_thread_on_end def add_router_interface(self, context, router_id, interface_info): new_router = super( OpenDaylightL3RouterPlugin, self).add_router_interface( context, router_id, interface_info) return new_router @journal.call_thread_on_end def remove_router_interface(self, context, router_id, interface_info): new_router = super( OpenDaylightL3RouterPlugin, self).remove_router_interface( context, router_id, interface_info) return new_router dvr_deletens_if_no_port_warned = False def dvr_deletens_if_no_port(self, context, port_id): # TODO(yamahata): implement this method or delete this logging # For now, this is defined to avoid attribute exception # Since ODL L3 does not create namespaces, this is always going to # be a noop. When it is confirmed, delete this comment and logging if not self.dvr_deletens_if_no_port_warned: LOG.debug('dvr is not suported yet. ' 'this method needs to be implemented') self.dvr_deletens_if_no_port_warned = True return [] ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.778714 networking-odl-16.0.0.0b2.dev1/networking_odl/locale/0000755000175000017500000000000000000000000024160 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.778714 networking-odl-16.0.0.0b2.dev1/networking_odl/locale/en_GB/0000755000175000017500000000000000000000000025132 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585130284.8067138 networking-odl-16.0.0.0b2.dev1/networking_odl/locale/en_GB/LC_MESSAGES/0000755000175000017500000000000000000000000026717 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/locale/en_GB/LC_MESSAGES/networking_odl.po0000644000175000017500000003321200000000000032305 0ustar00jamespagejamespage00000000000000# Andi Chandler , 2017. #zanata msgid "" msgstr "" "Project-Id-Version: networking-odl VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2018-03-07 20:20+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2017-12-12 09:16+0000\n" "Last-Translator: Andi Chandler \n" "Language-Team: English (United Kingdom)\n" "Language: en_GB\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" msgid "" "\n" " Comma-separated list of : tuples " "mapping\n" " physical network names to the agent's node-specific Open " "vSwitch\n" " bridge names to be used for flat and VLAN networks. The length " "of\n" " bridge names should be no more than 11. Each bridge must exist, " "and\n" " should have a physical network interface configured as a port. " "All\n" " physical networks configured on the server should have mappings " "to\n" " appropriate bridges on each agent.\n" "\n" " Note: If you remove a bridge from this mapping, make sure to\n" " disconnect it from the integration bridge as it won't be managed " "by\n" " the agent anymore.\n" "\n" " Default: --bridge_mappings=\n" " " msgstr "" "\n" " Comma-separated list of : tuples " "mapping\n" " physical network names to the agent's node-specific Open " "vSwitch\n" " bridge names to be used for flat and VLAN networks. The length " "of\n" " bridge names should be no more than 11. Each bridge must exist, " "and\n" " should have a physical network interface configured as a port. " "All\n" " physical networks configured on the server should have mappings " "to\n" " appropriate bridges on each agent.\n" "\n" " Note: If you remove a bridge from this mapping, make sure to\n" " disconnect it from the integration bridge as it won't be managed " "by\n" " the agent any more.\n" "\n" " Default: --bridge_mappings=\n" " " msgid "" "\n" " Enable VHostUser OVS Plug.\n" "\n" " Default: --vhostuser_ovs_plug\n" " " msgstr "" "\n" " Enable VHostUser OVS Plug.\n" "\n" " Default: --vhostuser_ovs_plug\n" " " msgid "" "\n" " Fives pre-made host configuration for OpenDaylight as a JSON\n" " string.\n" "\n" " NOTE: when specified all other options are ignored!\n" "\n" " An entry should look like:\n" " --ovs_hostconfigs='{\n" " \"ODL L2\": {\n" " \"allowed_network_types\":\n" " [\"local\",\"vlan\", \"vxlan\",\"gre\"],\n" " \"bridge_mappings\": {\"physnet1\":\"br-ex\"}\n" " \"supported_vnic_types\": [\n" " {\n" " \"vnic_type\":\"normal\",\n" " \"vif_type\":\"ovs\",\n" " \"vif_details\":{}\n" " }\n" " ],\n" " },\n" " \"ODL L3\": {}\n" " }'\n" "\n" " Default: --ovs_hostconfigs=\n" " " msgstr "" "\n" " Fives pre-made host configuration for OpenDaylight as a JSON\n" " string.\n" "\n" " NOTE: when specified all other options are ignored!\n" "\n" " An entry should look like:\n" " --ovs_hostconfigs='{\n" " \"ODL L2\": {\n" " \"allowed_network_types\":\n" " [\"local\",\"vlan\", \"vxlan\",\"gre\"],\n" " \"bridge_mappings\": {\"physnet1\":\"br-ex\"}\n" " \"supported_vnic_types\": [\n" " {\n" " \"vnic_type\":\"normal\",\n" " \"vif_type\":\"ovs\",\n" " \"vif_details\":{}\n" " }\n" " ],\n" " },\n" " \"ODL L3\": {}\n" " }'\n" "\n" " Default: --ovs_hostconfigs=\n" " " msgid "" "\n" " IP address of local overlay (tunnel) network end-point.\n" " It accepts either an IPv4 or IPv6 address that resides on one\n" " of the host network interfaces. The IP version of this\n" " value must match the value of the 'overlay_ip_version'\n" " option in the ML2 plug-in configuration file on the Neutron\n" " server node(s).\n" "\n" " Default: local_ip=\n" " " msgstr "" "\n" " IP address of local overlay (tunnel) network end-point.\n" " It accepts either an IPv4 or IPv6 address that resides on one\n" " of the host network interfaces. The IP version of this\n" " value must match the value of the 'overlay_ip_version'\n" " option in the ML2 plug-in configuration file on the Neutron\n" " server node(s).\n" "\n" " Default: local_ip=\n" " " msgid "" "\n" " It adds SR-IOV virtual interface support to allow ovs hardware\n" " offload.\n" "\n" " NOTE: This feature should be used with ovs>=2.8.0 and SR-IOV " "NIC\n" " which support switchdev mode and tc offload.\n" "\n" " Default:\n" " " msgstr "" "\n" " It adds SR-IOV virtual interface support to allow OVS hardware\n" " offload.\n" "\n" " NOTE: This feature should be used with ovs>=2.8.0 and SR-IOV " "NIC\n" " which support switchdev mode and tc offload.\n" "\n" " Default:\n" " " msgid "" "\n" " It specifies the OVS VHostUser mode.\n" "\n" " Choices: --vhostuser_mode=client\n" " --vhostuser_mode=server\n" "\n" " Default: --vhostuser_mode=client\n" " " msgstr "" "\n" " It specifies the OVS VHostUser mode.\n" "\n" " Choices: --vhostuser_mode=client\n" " --vhostuser_mode=server\n" "\n" " Default: --vhostuser_mode=client\n" " " msgid "" "\n" " It specifies the OVS data path to use.\n" "\n" " If this value is given then --ovs_dpdk will be ignored.\n" " If neither this option or --ovs_dpdk are given then it will use " "a\n" " valid value for current host.\n" "\n" " Choices: --datapath_type=\n" " --datapath_type=system # kernel data path\n" " --datapath_type=netdev # userspace data path\n" " --datapath_type=dpdkvhostuser # userspace data path\n" "\n" " Default: --datapath_type=netdev # if support is " "detected\n" " --datapath_type=system # in all other cases\n" " " msgstr "" "\n" " It specifies the OVS data path to use.\n" "\n" " If this value is given then --ovs_dpdk will be ignored.\n" " If neither this option or --ovs_dpdk are given then it will use " "a\n" " valid value for current host.\n" "\n" " Choices: --datapath_type=\n" " --datapath_type=system # kernel data path\n" " --datapath_type=netdev # userspace data path\n" " --datapath_type=dpdkvhostuser # userspace data path\n" "\n" " Default: --datapath_type=netdev # if support is " "detected\n" " --datapath_type=system # in all other cases\n" " " msgid "" "\n" " It specifies the host name of the target machine.\n" "\n" " Default: --host=$HOSTNAME # running machine host name\n" " " msgstr "" "\n" " It specifies the host name of the target machine.\n" "\n" " Default: --host=$HOSTNAME # running machine host name\n" " " msgid "" "\n" " It uses user-space type of virtual interface (vhostuser) instead " "of\n" " the system based one (ovs).\n" "\n" " If this option is not specified it tries to detect vhostuser\n" " support on running host and in case of positive match it uses " "it.\n" "\n" " NOTE: if --datapath_type is given then this option is ignored.\n" "\n" " Default:\n" " " msgstr "" "\n" " It uses user-space type of virtual interface (vhostuser) instead " "of\n" " the system based one (ovs).\n" "\n" " If this option is not specified it tries to detect vhostuser\n" " support on running host and in case of positive match it uses " "it.\n" "\n" " NOTE: if --datapath_type is given then this option is ignored.\n" "\n" " Default:\n" " " msgid "" "\n" " OVS VHostUser socket directory.\n" "\n" " Default: --vhostuser_socket_dir=/var/run/openvswitch\n" " " msgstr "" "\n" " OVS VHostUser socket directory.\n" "\n" " Default: --vhostuser_socket_dir=/var/run/openvswitch\n" " " msgid "" "\n" " Specifies allowed network types given as a Comma-separated list " "of\n" " types.\n" "\n" " Default: --allowed_network_types=local,vlan,vxlan,gre\n" " " msgstr "" "\n" " Specifies allowed network types given as a Comma-separated list " "of\n" " types.\n" "\n" " Default: --allowed_network_types=local,vlan,vxlan,gre\n" " " msgid "" "\n" " VHostUser socket port prefix.\n" "\n" " Choices: --vhostuser_socket_dir=vhu\n" " --vhostuser_socket_dir=socket\n" "\n" " Default: --vhostuser_socket_dir=vhu\n" " " msgstr "" "\n" " VHostUser socket port prefix.\n" "\n" " Choices: --vhostuser_socket_dir=vhu\n" " --vhostuser_socket_dir=socket\n" "\n" " Default: --vhostuser_socket_dir=vhu\n" " " msgid "(V2 driver) Journal maintenance operations interval in seconds." msgstr "(V2 driver) Journal maintenance operations interval in seconds." msgid "(V2 driver) Number of times to retry a row before failing." msgstr "(V2 driver) Number of times to retry a row before failing." msgid "" "(V2 driver) Time in seconds to wait before a processing row is marked back " "to pending." msgstr "" "(V2 driver) Time in seconds to wait before a processing row is marked back " "to pending." msgid "" "(V2 driver) Time to keep completed rows (in seconds).For performance reasons " "it's not recommended to change this from the default value (0) which " "indicates completed rows aren't kept.This value will be checked every " "maintenance_interval by the cleanup thread. To keep completed rows " "indefinitely, set the value to -1" msgstr "" "(V2 driver) Time to keep completed rows (in seconds). For performance " "reasons it's not recommended to change this from the default value (0) which " "indicates completed rows aren't kept. This value will be checked every " "maintenance_interval by the cleanup thread. To keep completed rows " "indefinitely, set the value to -1" msgid "" "--ovs_dpdk option was specified but the 'netdev' datapath_type was not " "enabled. To override use option --datapath_type=netdev" msgstr "" "--ovs_dpdk option was specified but the 'netdev' datapath_type was not " "enabled. To override use option --datapath_type=netdev" msgid "Enable websocket for pseudo-agent-port-binding." msgstr "Enable websocket for pseudo-agent-port-binding." msgid "" "Enables the networking-odl driver to supply special neutron ports of \"dhcp" "\" type to OpenDaylight Controller for its use in providing DHCP Service." msgstr "" "Enables the networking-odl driver to supply special neutron ports of \"dhcp" "\" type to OpenDaylight Controller for its use in providing DHCP Service." msgid "HTTP URL of OpenDaylight REST interface." msgstr "HTTP URL of OpenDaylight REST interface." msgid "HTTP password for authentication." msgstr "HTTP password for authentication." msgid "HTTP timeout in seconds." msgstr "HTTP timeout in seconds." msgid "HTTP username for authentication." msgstr "HTTP username for authentication." msgid "Invalid ODL URL" msgstr "Invalid ODL URL" msgid "Name of the controller to be used for port binding." msgstr "Name of the controller to be used for port binding." #, python-format msgid "OpenDaylight API returned %(status)s %(reason)s" msgstr "OpenDaylight API returned %(status)s %(reason)s" msgid "Path for ODL host configuration REST interface" msgstr "Path for ODL host configuration REST interface" msgid "Poll interval in seconds for getting ODL hostconfig" msgstr "Poll interval in seconds for getting ODL hostconfig" msgid "Test without real ODL." msgstr "Test without real ODL." msgid "Tomcat session timeout in minutes." msgstr "Tomcat session timeout in minutes." msgid "Wait this many seconds before retrying the odl features fetch" msgstr "Wait this many seconds before retrying the ODL features fetch" msgid "bad_request (http400),check path" msgstr "bad_request (http400),check path" msgid "bad_request (http400),check path." msgstr "bad_request (http400),check path." msgid "resource_list can not be None" msgstr "resource_list can not be None" msgid "unsupported operation {}" msgstr "unsupported operation {}" msgid "websocket subscribe bad stream data" msgstr "websocket subscribe bad stream data" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585130284.8067138 networking-odl-16.0.0.0b2.dev1/networking_odl/ml2/0000755000175000017500000000000000000000000023413 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/ml2/README.odl0000644000175000017500000000343300000000000025053 0ustar00jamespagejamespage00000000000000OpenDaylight ML2 MechanismDriver ================================ OpenDaylight is an Open Source SDN Controller developed by a plethora of companies and hosted by the Linux Foundation. The OpenDaylight website contains more information on the capabilities OpenDaylight provides: http://www.opendaylight.org Theory of operation =================== The OpenStack Neutron integration with OpenDaylight consists of the ML2 MechanismDriver which acts as a REST proxy and passess all Neutron API calls into OpenDaylight. OpenDaylight contains a NB REST service (called the NeutronAPIService) which caches data from these proxied API calls and makes it available to other services inside of OpenDaylight. One current user of the SB side of the NeutronAPIService is the OVSDB code in OpenDaylight. OVSDB uses the neutron information to isolate tenant networks using GRE or VXLAN tunnels. How to use the OpenDaylight ML2 MechanismDriver =============================================== To use the ML2 MechanismDriver, you need to ensure you have it configured as one of the "mechanism_drivers" in ML2: mechanism_drivers=opendaylight The next step is to setup the "[ml2_odl]" section in either the ml2_conf.ini file or in a separate ml2_conf_odl.ini file. An example is shown below: [ml2_odl] password = admin username = admin url = http://192.168.100.1:8080/controller/nb/v2/neutron When starting OpenDaylight, ensure you have the SimpleForwarding application disabled or remove the .jar file from the plugins directory. Also ensure you start OpenDaylight before you start OpenStack Neutron. There is devstack support for this which will automatically pull down OpenDaylight and start it as part of devstack as well. The patch for this will likely merge around the same time as this patch merges. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/ml2/__init__.py0000644000175000017500000000000000000000000025512 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/ml2/legacy_port_binding.py0000644000175000017500000000602400000000000027771 0ustar00jamespagejamespage00000000000000# Copyright (c) 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import portbindings from neutron_lib import constants as n_const from neutron_lib.plugins.ml2 import api from oslo_log import log from networking_odl.ml2 import port_binding LOG = log.getLogger(__name__) class LegacyPortBindingManager(port_binding.PortBindingController): def __init__(self): self.vif_details = {portbindings.CAP_PORT_FILTER: True} self.supported_vnic_types = [portbindings.VNIC_NORMAL] def bind_port(self, port_context): """Set binding for all valid segments """ vnic_type = port_context.current.get(portbindings.VNIC_TYPE, portbindings.VNIC_NORMAL) if vnic_type not in self.supported_vnic_types: LOG.debug("Refusing to bind due to unsupported vnic_type: %s", vnic_type) return valid_segment = None for segment in port_context.segments_to_bind: if self._check_segment(segment): valid_segment = segment break if valid_segment: vif_type = self._get_vif_type(port_context) LOG.debug("Bind port %(port)s on network %(network)s with valid " "segment %(segment)s and VIF type %(vif_type)r.", {'port': port_context.current['id'], 'network': port_context.network.current['id'], 'segment': valid_segment, 'vif_type': vif_type}) port_context.set_binding( valid_segment[api.ID], vif_type, self.vif_details, status=n_const.PORT_STATUS_ACTIVE) def _check_segment(self, segment): """Verify a segment is valid for the OpenDaylight MechanismDriver. Verify the requested segment is supported by ODL and return True or False to indicate this to callers. """ network_type = segment[api.NETWORK_TYPE] return network_type in [n_const.TYPE_FLAT, n_const.TYPE_LOCAL, n_const.TYPE_GRE, n_const.TYPE_VXLAN, n_const.TYPE_VLAN] def _get_vif_type(self, port_context): """Get VIF type string for given PortContext Dummy implementation: it always returns following constant. neutron_lib.api.definitions.portbindings.VIF_TYPE_OVS """ return portbindings.VIF_TYPE_OVS ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/ml2/mech_driver_v2.py0000644000175000017500000002334600000000000026673 0ustar00jamespagejamespage00000000000000# Copyright (c) 2013-2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import multiprovidernet as mpnet_apidef from neutron_lib.api.definitions import provider_net as providernet from neutron_lib import constants as p_const from neutron_lib.plugins import constants as nlib_const from neutron_lib.plugins.ml2 import api from oslo_config import cfg from oslo_log import helpers as log_helpers from oslo_log import log as logging from networking_odl.common import callback from networking_odl.common import config as odl_conf from networking_odl.common import constants as odl_const from networking_odl.common import odl_features from networking_odl.common import postcommit from networking_odl.dhcp import odl_dhcp_driver as dhcp_driver from networking_odl.journal import base_driver from networking_odl.journal import full_sync from networking_odl.journal import journal from networking_odl.journal import worker from networking_odl.ml2 import port_binding from networking_odl.ml2 import port_status_update from networking_odl.qos import qos_driver_v2 as qos_driver from networking_odl.trunk import trunk_driver_v2 as trunk_driver LOG = logging.getLogger(__name__) @postcommit.add_postcommit('network', 'subnet', 'port') class OpenDaylightMechanismDriver(api.MechanismDriver, base_driver.ResourceBaseDriver): """OpenDaylight Python Driver for Neutron. This code is the backend implementation for the OpenDaylight ML2 MechanismDriver for OpenStack Neutron. """ RESOURCES = { odl_const.ODL_SG: odl_const.ODL_SGS, odl_const.ODL_SG_RULE: odl_const.ODL_SG_RULES, odl_const.ODL_NETWORK: odl_const.ODL_NETWORKS, odl_const.ODL_SUBNET: odl_const.ODL_SUBNETS, odl_const.ODL_PORT: odl_const.ODL_PORTS } plugin_type = nlib_const.CORE def initialize(self): LOG.debug("Initializing OpenDaylight ML2 driver") cfg.CONF.register_opts(odl_conf.odl_opts, "ml2_odl") self.sg_handler = callback.OdlSecurityGroupsHandler( self.sync_from_callback_precommit, self.sync_from_callback_postcommit) self.journal = journal.OpenDaylightJournalThread() self.port_binding_controller = port_binding.PortBindingManager.create() self.trunk_driver = trunk_driver.OpenDaylightTrunkDriverV2.create() if cfg.CONF.ml2_odl.enable_dhcp_service: self.dhcp_driver = dhcp_driver.OdlDhcpDriver() full_sync.register(nlib_const.CORE, self.RESOURCES) odl_features.init() if odl_const.ODL_QOS in cfg.CONF.ml2.extension_drivers: qos_driver.OpenDaylightQosDriver.create() def get_workers(self): workers = [port_status_update.OdlPortStatusUpdate(), worker.JournalPeriodicProcessor()] workers += self.port_binding_controller.get_workers() return workers @staticmethod def _record_in_journal(context, object_type, operation, data=None): if data is None: data = context.current journal.record(context._plugin_context, object_type, context.current['id'], operation, data, ml2_context=context) @log_helpers.log_method_call def create_network_precommit(self, context): OpenDaylightMechanismDriver._record_in_journal( context, odl_const.ODL_NETWORK, odl_const.ODL_CREATE) @log_helpers.log_method_call def create_subnet_precommit(self, context): OpenDaylightMechanismDriver._record_in_journal( context, odl_const.ODL_SUBNET, odl_const.ODL_CREATE) @log_helpers.log_method_call def create_port_precommit(self, context): OpenDaylightMechanismDriver._record_in_journal( context, odl_const.ODL_PORT, odl_const.ODL_CREATE) @log_helpers.log_method_call def update_network_precommit(self, context): OpenDaylightMechanismDriver._record_in_journal( context, odl_const.ODL_NETWORK, odl_const.ODL_UPDATE) @log_helpers.log_method_call def update_subnet_precommit(self, context): OpenDaylightMechanismDriver._record_in_journal( context, odl_const.ODL_SUBNET, odl_const.ODL_UPDATE) @log_helpers.log_method_call def update_port_precommit(self, context): OpenDaylightMechanismDriver._record_in_journal( context, odl_const.ODL_PORT, odl_const.ODL_UPDATE) @log_helpers.log_method_call def delete_network_precommit(self, context): OpenDaylightMechanismDriver._record_in_journal( context, odl_const.ODL_NETWORK, odl_const.ODL_DELETE, data=[]) @log_helpers.log_method_call def delete_subnet_precommit(self, context): # Use the journal row's data field to store parent object # uuids. This information is required for validation checking # when deleting parent objects. new_context = [context.current['network_id']] OpenDaylightMechanismDriver._record_in_journal( context, odl_const.ODL_SUBNET, odl_const.ODL_DELETE, data=new_context) @log_helpers.log_method_call def delete_port_precommit(self, context): # Use the journal row's data field to store parent object # uuids. This information is required for validation checking # when deleting parent objects. new_context = [context.current['network_id']] for subnet in context.current['fixed_ips']: new_context.append(subnet['subnet_id']) OpenDaylightMechanismDriver._record_in_journal( context, odl_const.ODL_PORT, odl_const.ODL_DELETE, data=new_context) def _sync_security_group_create_precommit( self, context, operation, object_type, res_id, sg_dict): journal.record(context, object_type, sg_dict['id'], operation, sg_dict) # NOTE(yamahata): when security group is created, default rules # are also created. for rule in sg_dict['security_group_rules']: journal.record(context, odl_const.ODL_SG_RULE, rule['id'], odl_const.ODL_CREATE, rule) @log_helpers.log_method_call def sync_from_callback_precommit(self, context, operation, res_type, res_id, resource_dict, **kwargs): object_type = res_type.singular if resource_dict is not None: resource_dict = resource_dict[object_type] if (operation == odl_const.ODL_CREATE and object_type == odl_const.ODL_SG): self._sync_security_group_create_precommit( context, operation, object_type, res_id, resource_dict) return object_uuid = (resource_dict.get('id') if operation == 'create' else res_id) data = resource_dict if (operation == odl_const.ODL_DELETE): # NOTE(yamahata): DB auto deletion # Security Group Rule under this Security Group needs to # be deleted. At NeutronDB layer rules are auto deleted with # cascade='all,delete'. if (object_type == odl_const.ODL_SG): for rule_id in kwargs['security_group_rule_ids']: journal.record(context, odl_const.ODL_SG_RULE, rule_id, odl_const.ODL_DELETE, [object_uuid]) elif (object_type == odl_const.ODL_SG_RULE): # Set the parent security group id so that dependencies # to this security rule deletion can be properly found # in the journal. data = [kwargs['security_group_id']] assert object_uuid is not None journal.record(context, object_type, object_uuid, operation, data) def sync_from_callback_postcommit(self, context, operation, res_type, res_id, resource_dict, **kwargs): self._postcommit(context) def _postcommit(self, context): self.journal.set_sync_event() @log_helpers.log_method_call def bind_port(self, port_context): """Set binding for a valid segments """ return self.port_binding_controller.bind_port(port_context) def check_vlan_transparency(self, context): """Check VLAN transparency """ # TODO(yamahata): This should be odl service provider dependent # introduce ODL yang model for ODL to report which network types # are vlan-transparent. # VLAN and FLAT cases, we don't know if the underlying network # supports QinQ or VLAN. # For now, netvirt supports only vxlan tunneling. VLAN_TRANSPARENT_NETWORK_TYPES = [p_const.TYPE_VXLAN] network = context.current # see TypeManager._extend_network_dict_provider() # single providernet if providernet.NETWORK_TYPE in network: return (network[providernet.NETWORK_TYPE] in VLAN_TRANSPARENT_NETWORK_TYPES) # multi providernet segments = network.get(mpnet_apidef.SEGMENTS) if segments is None: return True return all(segment[providernet.NETWORK_TYPE] in VLAN_TRANSPARENT_NETWORK_TYPES for segment in segments) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/ml2/port_binding.py0000644000175000017500000001220500000000000026443 0ustar00jamespagejamespage00000000000000# Copyright (c) 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six import stevedore from oslo_config import cfg from oslo_log import log from oslo_utils import excutils LOG = log.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class PortBindingController(object): def get_workers(self): return [] @abc.abstractmethod def bind_port(self, port_context): """Attempt to bind a port. :param context: PortContext instance describing the port This method is called outside any transaction to attempt to establish a port binding using calling mechanism driver. Bindings may be created at each of multiple levels of a hierarchical network, and are established from the top level downward. At each level, the mechanism driver determines whether it can bind to any of the network segments in the context.segments_to_bind property, based on the value of the context.host property, any relevant port or network attributes, and its own knowledge of the network topology. At the top level, context.segments_to_bind contains the static segments of the port's network. At each lower level of binding, it contains static or dynamic segments supplied by the driver that bound at the level above. If the driver is able to complete the binding of the port to any segment in context.segments_to_bind, it must call context.set_binding with the binding details. If it can partially bind the port, it must call context.continue_binding with the network segments to be used to bind at the next lower level. If the binding results are committed after bind_port returns, they will be seen by all mechanism drivers as update_port_precommit and update_port_postcommit calls. But if some other thread or process concurrently binds or updates the port, these binding results will not be committed, and update_port_precommit and update_port_postcommit will not be called on the mechanism drivers with these results. Because binding results can be discarded rather than committed, drivers should avoid making persistent state changes in bind_port, or else must ensure that such state changes are eventually cleaned up. Implementing this method explicitly declares the mechanism driver as having the intention to bind ports. This is inspected by the QoS service to identify the available QoS rules you can use with ports. """ class PortBindingManager(PortBindingController): # At this point, there is no requirement to have multiple # port binding controllers at the same time. # Stay with single controller until there is a real requirement def __init__(self, name, controller): self.name = name self.controller = controller @classmethod def create( cls, namespace='networking_odl.ml2.port_binding_controllers', name=None): name = name or cfg.CONF.ml2_odl.port_binding_controller ext_mgr = stevedore.named.NamedExtensionManager( namespace, [name], invoke_on_load=True) assert len(ext_mgr.extensions) == 1, ( "Wrong port binding controller is specified") extension = ext_mgr.extensions[0] if isinstance(extension.obj, PortBindingController): return cls(extension.name, extension.obj) else: raise ValueError(_( "Port binding controller '%(name)s (%(controller)r)' " "doesn't implement PortBindingController interface."), {'name': extension.name, 'controller': extension.obj}) def get_workers(self): return self.controller.get_workers() def bind_port(self, port_context): controller_details = {'name': self.name, 'controller': self.controller} try: self.controller.bind_port(port_context) except Exception: with excutils.save_and_reraise_exception(): LOG.exception( "Controller '%(name)s (%(controller)r)' had an error " "when binding port.", controller_details) else: if port_context._new_bound_segment: LOG.info( "Controller '%(name)s (%(controller)r)' has bound port.", controller_details) else: LOG.debug( "Controller %(name)s (%(controller)r) hasn't bound " "port.", controller_details) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/ml2/port_status_update.py0000644000175000017500000001265200000000000027724 0ustar00jamespagejamespage00000000000000# Copyright (c) 2017 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import threading from neutron_lib.callbacks import resources from neutron_lib import constants as n_const from neutron_lib import context from neutron_lib.plugins import directory from neutron_lib import worker from oslo_log import log from neutron.db import provisioning_blocks from networking_odl.common import client as odl_client from networking_odl.common import odl_features from networking_odl.common import utils from networking_odl.common import websocket_client as odl_ws_client LOG = log.getLogger(__name__) class OdlPortStatusUpdate(worker.BaseWorker): """Class to register and handle port status update""" PORT_PATH = "restconf/operational/neutron:neutron/ports/port" def __init__(self): super(OdlPortStatusUpdate, self).__init__() self.odl_websocket_client = None def start(self): super(OdlPortStatusUpdate, self).start() LOG.debug('OdlPortStatusUpdate worker running') if odl_features.has(odl_features.OPERATIONAL_PORT_STATUS): self.run_websocket() def stop(self): if self.odl_websocket_client: self.odl_websocket_client.set_exit_flag() def wait(self): """Wait for service to complete.""" @staticmethod def reset(): pass def run_websocket(self): # OpenDaylight path to recieve websocket notifications on neutron_ports_path = "/neutron:neutron/neutron:ports" self.path_uri = utils.get_odl_url() self.odl_websocket_client = ( odl_ws_client.OpenDaylightWebsocketClient.odl_create_websocket( self.path_uri, neutron_ports_path, odl_ws_client.ODL_OPERATIONAL_DATASTORE, odl_ws_client.ODL_NOTIFICATION_SCOPE_SUBTREE, self._process_websocket_recv, self._process_websocket_reconnect, True )) def _process_websocket_recv(self, payload, reconnect): # Callback for websocket notification LOG.debug("Websocket notification for port status update") for event in odl_ws_client.EventDataParser.get_item(payload): operation, path, data = event.get_fields() if ((operation in [event.OPERATION_UPDATE, event.OPERATION_CREATE])): port_id = event.extract_field(path, "neutron:uuid") port_id = str(port_id).strip("'") status_field = data.get('status') if status_field is not None: status = status_field.get('content') LOG.debug("Update port for port id %s %s", port_id, status) # for now we only support transition from DOWN->ACTIVE # https://bugs.launchpad.net/networking-odl/+bug/1686023 if status == n_const.PORT_STATUS_ACTIVE: provisioning_blocks.provisioning_complete( context.get_admin_context(), port_id, resources.PORT, provisioning_blocks.L2_AGENT_ENTITY) if operation == event.OPERATION_DELETE: LOG.debug("PortStatus: Ignoring delete operation") def _process_websocket_reconnect(self, status): if status == odl_ws_client.ODL_WEBSOCKET_CONNECTED: # Get port data using restconf LOG.debug("Websocket notification on reconnection") reconn_thread = threading.Thread( name='websocket', target=self._pull_missed_statuses) reconn_thread.start() def _pull_missed_statuses(self): LOG.debug("starting to pull pending statuses...") plugin = directory.get_plugin() filter = {"status": [n_const.PORT_STATUS_DOWN], "vif_type": ["unbound"]} ports = plugin.get_ports(context.get_admin_context(), filter) if not ports: LOG.debug("no down ports found, done") return port_fetch_url = utils.get_odl_url(self.PORT_PATH) client = odl_client.OpenDaylightRestClient.create_client( url=port_fetch_url) for port in ports: port_id = port["id"] response = client.get(port_id) if response.status_code != 200: LOG.warning("Non-200 response code %s", str(response)) continue odl_status = response.json()['port'][0]['status'] if odl_status == n_const.PORT_STATUS_ACTIVE: # for now we only support transition from DOWN->ACTIVE # See https://bugs.launchpad.net/networking-odl/+bug/1686023 provisioning_blocks.provisioning_complete( context.get_admin_context(), port_id, resources.PORT, provisioning_blocks.L2_AGENT_ENTITY) LOG.debug("done pulling pending statuses") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/ml2/pseudo_agentdb_binding.py0000644000175000017500000004504400000000000030451 0ustar00jamespagejamespage00000000000000# Copyright (c) 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from string import Template from neutron_lib.api.definitions import portbindings from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants as nl_const from neutron_lib import context from neutron_lib.plugins import directory from neutron_lib.plugins.ml2 import api from neutron_lib import worker from oslo_config import cfg from oslo_log import log from oslo_serialization import jsonutils from requests import codes from requests import exceptions from neutron.db import provisioning_blocks from networking_odl.common import client as odl_client from networking_odl.common import odl_features from networking_odl.common import utils from networking_odl.common import websocket_client as odl_ws_client from networking_odl.journal import periodic_task from networking_odl.ml2 import port_binding cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config') LOG = log.getLogger(__name__) class PseudoAgentDBBindingTaskBase(object): def __init__(self, worker): super(PseudoAgentDBBindingTaskBase, self).__init__() self._worker = worker # extract host/port from ODL URL and append hostconf_uri path hostconf_uri = utils.get_odl_url(cfg.CONF.ml2_odl.odl_hostconf_uri) LOG.debug("ODLPORTBINDING hostconfigs URI: %s", hostconf_uri) # TODO(mzmalick): disable port-binding for ODL lightweight testing self.odl_rest_client = odl_client.OpenDaylightRestClient.create_client( url=hostconf_uri) def _rest_get_hostconfigs(self): try: response = self.odl_rest_client.get() response.raise_for_status() hostconfigs = response.json()['hostconfigs']['hostconfig'] except exceptions.ConnectionError: LOG.error("Cannot connect to the OpenDaylight Controller", exc_info=True) return None except exceptions.HTTPError as e: # restconf returns 404 on operation when there is no entry if e.response.status_code == codes.not_found: LOG.debug("Response code not_found (404)" " treated as an empty list") return [] LOG.warning("REST/GET odl hostconfig failed, ", exc_info=True) return None except KeyError: LOG.error("got invalid hostconfigs", exc_info=True) return None except Exception: LOG.warning("REST/GET odl hostconfig failed, ", exc_info=True) return None else: if LOG.isEnabledFor(logging.DEBUG): _hconfig_str = jsonutils.dumps( response, sort_keys=True, indent=4, separators=(',', ': ')) LOG.debug("ODLPORTBINDING hostconfigs:\n%s", _hconfig_str) return hostconfigs def _get_and_update_hostconfigs(self, context=None): LOG.info("REST/GET hostconfigs from ODL") hostconfigs = self._rest_get_hostconfigs() if not hostconfigs: LOG.warning("ODL hostconfigs REST/GET failed, " "will retry on next poll") return # retry on next poll self._worker.update_agents_db(hostconfigs=hostconfigs) @registry.has_registry_receivers class PseudoAgentDBBindingPrePopulate(PseudoAgentDBBindingTaskBase): @registry.receives(resources.PORT, [events.BEFORE_CREATE, events.BEFORE_UPDATE]) def before_port_binding(self, resource, event, trigger, **kwargs): LOG.debug("before_port resource %s event %s %s", resource, event, kwargs) assert resource == resources.PORT assert event in [events.BEFORE_CREATE, events.BEFORE_UPDATE] ml2_plugin = trigger context = kwargs['context'] port = kwargs['port'] host = nl_const.ATTR_NOT_SPECIFIED if port and portbindings.HOST_ID in port: host = port.get(portbindings.HOST_ID) if host == nl_const.ATTR_NOT_SPECIFIED or not host: return agent_type = PseudoAgentDBBindingWorker.L2_TYPE if self._worker.known_agent(host, agent_type): return agents = ml2_plugin.get_agents( context, filters={'agent_type': [agent_type], 'host': [host]}) if agents and all(agent['alive'] for agent in agents): self._worker.add_known_agents(agents) LOG.debug("agents %s", agents) return # This host may not be created/updated by worker. # try to populate it. urlpath = "hostconfig/{0}/{1}".format( host, PseudoAgentDBBindingWorker.L2_TYPE) try: response = self.odl_rest_client.get(urlpath) response.raise_for_status() except Exception: LOG.warning("REST/GET odl hostconfig/%s failed.", host, exc_info=True) return LOG.debug("response %s", response.json()) hostconfig = response.json().get('hostconfig', []) if hostconfig: self._worker.update_agents_db_row(hostconfig[0]) class PseudoAgentDBBindingPeriodicTask(PseudoAgentDBBindingTaskBase): def __init__(self, worker): super(PseudoAgentDBBindingPeriodicTask, self).__init__(worker) # Start polling ODL restconf using maintenance thread. # default: 30s (should be <= agent keep-alive poll interval) self._periodic = periodic_task.PeriodicTask( 'hostconfig', cfg.CONF.ml2_odl.restconf_poll_interval) self._periodic.register_operation(self._get_and_update_hostconfigs) self._periodic.start() class PseudoAgentDBBindingWebSocket(PseudoAgentDBBindingTaskBase): def __init__(self, worker): super(PseudoAgentDBBindingWebSocket, self).__init__(worker) # Update hostconfig once for the configurations already present self._get_and_update_hostconfigs() odl_url = utils.get_odl_url() self._start_websocket(odl_url) def _start_websocket(self, odl_url): # OpenDaylight path to recieve websocket notifications on neutron_hostconfigs_path = """/neutron:neutron/neutron:hostconfigs""" self.odl_websocket_client = ( odl_ws_client.OpenDaylightWebsocketClient.odl_create_websocket( odl_url, neutron_hostconfigs_path, odl_ws_client.ODL_OPERATIONAL_DATASTORE, odl_ws_client.ODL_NOTIFICATION_SCOPE_SUBTREE, self._process_websocket_recv, self._process_websocket_reconnect )) if self.odl_websocket_client is None: LOG.error("Error starting websocket thread") def _process_websocket_recv(self, payload, reconnect): # Callback for websocket notification LOG.debug("Websocket notification for hostconfig update") for event in odl_ws_client.EventDataParser.get_item(payload): try: operation, path, data = event.get_fields() if operation == event.OPERATION_DELETE: host_id = event.extract_field(path, "neutron:host-id") host_type = event.extract_field(path, "neutron:host-type") if not host_id or not host_type: LOG.warning("Invalid delete notification") continue self._worker.delete_agents_db_row( host_id.strip("'"), host_type.strip("'")) elif operation == event.OPERATION_CREATE: if 'hostconfig' in data: hostconfig = data['hostconfig'] self.update_agents_db_row(hostconfig) except KeyError: LOG.warning("Invalid JSON for websocket notification", exc_info=True) continue # TODO(rsood): Mixing restconf and websocket can cause race conditions def _process_websocket_reconnect(self, status): if status == odl_ws_client.ODL_WEBSOCKET_CONNECTED: # Get hostconfig data using restconf LOG.debug("Websocket notification on reconnection") self._get_and_update_hostconfigs() class PseudoAgentDBBindingWorker(worker.BaseWorker): """Neutron Worker to update agentdb based on ODL hostconfig.""" AGENTDB_BINARY = 'neutron-odlagent-portbinding' L2_TYPE = "ODL L2" # TODO(mzmalick): binary, topic and resource_versions to be provided # by ODL, Pending ODL NB patches. _AGENTDB_ROW = { 'binary': AGENTDB_BINARY, 'host': '', 'topic': nl_const.L2_AGENT_TOPIC, 'configurations': {}, 'resource_versions': '', 'agent_type': L2_TYPE, 'start_flag': True} def __init__(self): LOG.info("PseudoAgentDBBindingWorker init") self._old_agents = set() self._known_agents = set() self.agents_db = None super(PseudoAgentDBBindingWorker, self).__init__() def start(self): LOG.info("PseudoAgentDBBindingWorker starting") super(PseudoAgentDBBindingWorker, self).start() self._start() def stop(self): pass def wait(self): pass def reset(self): pass def _start(self): """Initialization.""" LOG.debug("Initializing ODL Port Binding Worker") if cfg.CONF.ml2_odl.enable_websocket_pseudo_agentdb: self._websocket = PseudoAgentDBBindingWebSocket(self) else: self._periodic_task = (PseudoAgentDBBindingPeriodicTask(self)) def known_agent(self, host_id, agent_type): agent = (host_id, agent_type) return agent in self._known_agents or agent in self._old_agents def add_known_agents(self, agents): for agent in agents: self._known_agents.add((agent['host'], agent['agent_type'])) def update_agents_db(self, hostconfigs): LOG.debug("ODLPORTBINDING Updating agents DB with ODL hostconfigs") self._old_agents = self._known_agents self._known_agents = set() for host_config in hostconfigs: self._update_agents_db_row(host_config) def update_agents_db_row(self, host_config): self._old_agents = self._known_agents self._update_agents_db_row(host_config) def _update_agents_db_row(self, host_config): if self.agents_db is None: self.agents_db = directory.get_plugin() # Update one row in agent db host_id = host_config['host-id'] host_type = host_config['host-type'] config = host_config['config'] try: agentdb_row = self._AGENTDB_ROW.copy() agentdb_row['host'] = host_id agentdb_row['agent_type'] = host_type agentdb_row['configurations'] = jsonutils.loads(config) if (host_id, host_type) in self._old_agents: agentdb_row.pop('start_flag', None) self.agents_db.create_or_update_agent( context.get_admin_context(), agentdb_row) self._known_agents.add((host_id, host_type)) except Exception: LOG.exception("Unable to update agentdb.") def delete_agents_db_row(self, host_id, host_type): """Delete agent row.""" try: filters = {'agent_type': [host_type], 'host': [host_id]} # TODO(rsood): get_agent can be used here agent = self.agents_db.get_agents_db( context.get_admin_context(), filters=filters) if not agent: return LOG.debug("Deleting Agent with Agent id: %s", agent[0]['id']) self.agents_db.delete_agent( context.get_admin_context(), agent[0]['id']) self._known_agents.remove((host_id, host_type)) except Exception: LOG.exception("Unable to delete from agentdb.") @registry.has_registry_receivers class PseudoAgentDBBindingController(port_binding.PortBindingController): """Switch agnostic Port binding controller for OpenDayLight.""" def __init__(self): """Initialization.""" LOG.debug("Initializing ODL Port Binding Controller") super(PseudoAgentDBBindingController, self).__init__() self._worker = PseudoAgentDBBindingWorker() @registry.receives(resources.PROCESS, [events.BEFORE_SPAWN]) def _before_spawn(self, resource, event, trigger, payload=None): self._prepopulate = PseudoAgentDBBindingPrePopulate(self._worker) def get_workers(self): return [self._worker] def _substitute_hconfig_tmpl(self, port_context, hconfig): # TODO(mzmalick): Explore options for inlines string splicing of # port-id to 14 bytes as required by vhostuser types port_id = port_context.current['id'] conf = hconfig.get('configurations') vnics = conf.get('supported_vnic_types') if vnics is None: return hconfig for vnic in vnics: if vnic.get('vif_type') == portbindings.VIF_TYPE_VHOST_USER: details = vnic.get('vif_details') if details is None: continue port_prefix = details.get('port_prefix') port_prefix = port_prefix[:14] subs_ids = { # $IDENTIFER string substitution in hostconfigs JSON string 'PORT_ID': port_id[:(14 - len(port_prefix))], } # Substitute identifiers and Convert JSON string to dict hconfig_conf_json = Template(jsonutils.dumps(details)) substituted_str = hconfig_conf_json.safe_substitute(subs_ids) vnic['vif_details'] = jsonutils.loads(substituted_str) return hconfig def bind_port(self, port_context): """bind port using ODL host configuration.""" # Get all ODL hostconfigs for this host and type agentdb = port_context.host_agents(PseudoAgentDBBindingWorker.L2_TYPE) if not agentdb: LOG.warning("No valid hostconfigs in agentsdb for host %s", port_context.host) return for raw_hconfig in agentdb: # do any $identifier substitution hconfig = self._substitute_hconfig_tmpl(port_context, raw_hconfig) # Found ODL hostconfig for this host in agentdb LOG.debug("ODLPORTBINDING bind port with hostconfig: %s", hconfig) if self._hconfig_bind_port(port_context, hconfig): break # Port binding suceeded! else: # Port binding failed! LOG.warning( "Failed to bind Port %(pid)s devid %(device_id)s " "owner %(owner)s for host %(host)s " "on network %(network)s.", { 'pid': port_context.current['id'], 'device_id': port_context.current['device_id'], 'owner': port_context.current['device_owner'], 'host': port_context.host, 'network': port_context.network.current['id']}) else: # No hostconfig found for host in agentdb. LOG.warning("No ODL hostconfigs for host %s found in agentdb", port_context.host) def _hconfig_bind_port(self, port_context, hconfig): """bind port after validating odl host configuration.""" valid_segment = None for segment in port_context.segments_to_bind: if self._is_valid_segment(segment, hconfig['configurations']): valid_segment = segment break else: LOG.debug("No valid segments found!") return False confs = hconfig['configurations']['supported_vnic_types'] # nova provides vnic_type in port_context to neutron. # neutron provides supported vif_type for binding based on vnic_type # in this case ODL hostconfigs has the vif_type to bind for vnic_type vnic_type = port_context.current.get(portbindings.VNIC_TYPE) vif_details = None for conf in confs: if conf["vnic_type"] == vnic_type: vif_type = conf.get('vif_type', portbindings.VIF_TYPE_OVS) LOG.debug("Binding vnic:'%s' to vif:'%s'", vnic_type, vif_type) vif_details = conf.get('vif_details', {}) break else: LOG.error( "Binding failed: unsupported VNIC %(vnic_type)s on %(host)s", {'vnic_type': vnic_type, 'host': port_context.host}) return False if not vif_details: # empty vif_details could be trouble, warn. LOG.warning("hostconfig:vif_details was empty!") LOG.debug("Bind port %(port)s on network %(network)s with valid " "segment %(segment)s and VIF type %(vif_type)r " "VIF details %(vif_details)r.", {'port': port_context.current['id'], 'network': port_context.network.current['id'], 'segment': valid_segment, 'vif_type': vif_type, 'vif_details': vif_details}) port_status = self._prepare_initial_port_status(port_context) port_context.set_binding(valid_segment[api.ID], vif_type, vif_details, status=port_status) return True def _prepare_initial_port_status(self, port_context): port_status = nl_const.PORT_STATUS_ACTIVE if odl_features.has(odl_features.OPERATIONAL_PORT_STATUS): port_status = nl_const.PORT_STATUS_DOWN provisioning_blocks.add_provisioning_component( port_context._plugin_context, port_context.current['id'], resources.PORT, provisioning_blocks.L2_AGENT_ENTITY) return port_status def _is_valid_segment(self, segment, conf): """Verify a segment is supported by ODL.""" network_type = segment[api.NETWORK_TYPE] return network_type in conf['allowed_network_types'] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585130284.8067138 networking-odl-16.0.0.0b2.dev1/networking_odl/qos/0000755000175000017500000000000000000000000023523 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/qos/__init__.py0000644000175000017500000000000000000000000025622 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/qos/qos_driver_v2.py0000644000175000017500000000752300000000000026670 0ustar00jamespagejamespage00000000000000# Copyright (c) 2016 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import portbindings from neutron_lib import constants from neutron_lib.db import constants as db_const from neutron_lib.plugins import constants as nlib_const from neutron_lib.services.qos import base from neutron_lib.services.qos import constants as qos_consts from oslo_log import helpers as log_helpers from oslo_log import log as logging from networking_odl.common import constants as odl_const from networking_odl.common import odl_features from networking_odl.journal import full_sync from networking_odl.journal import journal from networking_odl.qos import qos_utils LOG = logging.getLogger(__name__) VIF_TYPES = [portbindings.VIF_TYPE_OVS, portbindings.VIF_TYPE_VHOST_USER] VNIC_TYPES = [portbindings.VNIC_NORMAL] QOS_RESOURCES = { odl_const.ODL_QOS_POLICY: odl_const.ODL_QOS_POLICIES } DEFAULT_QOS_RULES = { qos_consts.RULE_TYPE_BANDWIDTH_LIMIT: { qos_consts.MAX_KBPS: { 'type:range': [0, db_const.DB_INTEGER_MAX_VALUE]}, qos_consts.MAX_BURST: { 'type:range': [0, db_const.DB_INTEGER_MAX_VALUE]}, qos_consts.DIRECTION: { 'type:values': [constants.EGRESS_DIRECTION]} } } QOS_RULES = 'qos-rules' class OpenDaylightQosDriver(base.DriverBase): """OpenDaylight QOS Driver This code is backend implementation for OpenDaylight Qos driver for Openstack Neutron. """ @staticmethod def create(): try: supported_rules = odl_features.get_config(QOS_RULES) except KeyError: supported_rules = DEFAULT_QOS_RULES return OpenDaylightQosDriver(supported_rules=supported_rules) def __init__(self, supported_rules, name='OpenDaylightQosDriver', vif_types=VIF_TYPES, vnic_types=VNIC_TYPES, requires_rpc_notifications=False): super(OpenDaylightQosDriver, self).__init__( name, vif_types, vnic_types, supported_rules, requires_rpc_notifications) LOG.debug("Initializing OpenDaylight Qos driver") self.journal = journal.OpenDaylightJournalThread() full_sync.register(nlib_const.QOS, QOS_RESOURCES) def _record_in_journal(self, context, op_const, qos_policy): data = qos_utils.convert_rules_format(qos_policy.to_dict()) journal.record(context, odl_const.ODL_QOS_POLICY, data['id'], op_const, data) @log_helpers.log_method_call def create_policy_precommit(self, context, qos_policy): self._record_in_journal(context, odl_const.ODL_CREATE, qos_policy) @log_helpers.log_method_call def update_policy_precommit(self, context, qos_policy): self._record_in_journal(context, odl_const.ODL_UPDATE, qos_policy) @log_helpers.log_method_call def delete_policy_precommit(self, context, qos_policy): self._record_in_journal(context, odl_const.ODL_DELETE, qos_policy) @log_helpers.log_method_call def create_policy(self, context, policy): self.journal.set_sync_event() @log_helpers.log_method_call def update_policy(self, context, policy): self.journal.set_sync_event() @log_helpers.log_method_call def delete_policy(self, context, policy): self.journal.set_sync_event() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/qos/qos_utils.py0000644000175000017500000000256500000000000026127 0ustar00jamespagejamespage00000000000000# Copyright (c) 2016 Intel Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy def enforce_policy_format(policy): if 'bandwidth_limit_rules' not in policy.keys(): policy['bandwidth_limit_rules'] = [] if 'dscp_marking_rules' not in policy.keys(): policy['dscp_marking_rules'] = [] return policy # NOTE(manjeets) keeping common methods for formatting # qos data in qos_utils for code reuse. def convert_rules_format(data): policy = copy.deepcopy(data) policy.pop('tenant_id', None) policy.pop('rules', None) for rule in data.get('rules', []): rule_type = rule['type'] + '_rules' rule.pop('type', None) rule.pop('qos_policy_id', None) rule['tenant_id'] = data['tenant_id'] policy[rule_type] = [rule] return enforce_policy_format(policy) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585130284.8067138 networking-odl-16.0.0.0b2.dev1/networking_odl/sfc/0000755000175000017500000000000000000000000023474 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/sfc/__init__.py0000644000175000017500000000000000000000000025573 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585130284.8067138 networking-odl-16.0.0.0b2.dev1/networking_odl/sfc/flowclassifier/0000755000175000017500000000000000000000000026510 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/sfc/flowclassifier/__init__.py0000644000175000017500000000000000000000000030607 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/sfc/flowclassifier/sfc_flowclassifier_v2.py0000644000175000017500000000677000000000000033352 0ustar00jamespagejamespage00000000000000# Copyright (c) 2016 Brocade Communication Systems # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import helpers as log_helpers from oslo_log import log as logging from networking_sfc.extensions import flowclassifier as fc_const from networking_sfc.services.flowclassifier.drivers import base as fc_driver from networking_odl.common import constants as odl_const from networking_odl.common import postcommit from networking_odl.journal import full_sync from networking_odl.journal import journal LOG = logging.getLogger(__name__) SFC_FC_RESOURCES = { odl_const.ODL_SFC_FLOW_CLASSIFIER: odl_const.NETWORKING_SFC_FLOW_CLASSIFIERS, } @postcommit.add_postcommit('flow_classifier') class OpenDaylightSFCFlowClassifierDriverV2( fc_driver.FlowClassifierDriverBase): """OpenDaylight SFC Flow Classifier Driver (Version 2) for networking-sfc. This Driver pass through SFC Flow Classifier API calls to OpenDaylight Neutron Northbound Project by using the REST API's exposed by the project. """ def initialize(self): LOG.debug("Initializing OpenDaylight Networking " "SFC Flow Classifier driver Version 2") self.journal = journal.OpenDaylightJournalThread() full_sync.register(fc_const.FLOW_CLASSIFIER_EXT, SFC_FC_RESOURCES) @staticmethod def _record_in_journal(context, object_type, operation, data=None): if data is None: data = context.current journal.record(context._plugin_context, object_type, context.current['id'], operation, data) @log_helpers.log_method_call def create_flow_classifier_precommit(self, context): OpenDaylightSFCFlowClassifierDriverV2._record_in_journal( context, odl_const.ODL_SFC_FLOW_CLASSIFIER, odl_const.ODL_CREATE) @log_helpers.log_method_call def update_flow_classifier_precommit(self, context): OpenDaylightSFCFlowClassifierDriverV2._record_in_journal( context, odl_const.ODL_SFC_FLOW_CLASSIFIER, odl_const.ODL_UPDATE) @log_helpers.log_method_call def delete_flow_classifier_precommit(self, context): OpenDaylightSFCFlowClassifierDriverV2._record_in_journal( context, odl_const.ODL_SFC_FLOW_CLASSIFIER, odl_const.ODL_DELETE, data=[]) # Need to implement these methods, else driver loading fails with error # complaining about no abstract method implementation present. @log_helpers.log_method_call def create_flow_classifier(self, context): super(OpenDaylightSFCFlowClassifierDriverV2, self).create_flow_classifier(context) @log_helpers.log_method_call def update_flow_classifier(self, context): super(OpenDaylightSFCFlowClassifierDriverV2, self).update_flow_classifier(context) @log_helpers.log_method_call def delete_flow_classifier(self, context): super(OpenDaylightSFCFlowClassifierDriverV2, self).delete_flow_classifier(context) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/sfc/sfc_driver_v2.py0000644000175000017500000001321600000000000026606 0ustar00jamespagejamespage00000000000000# Copyright (c) 2017 Brocade Communication Systems # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import helpers as log_helpers from oslo_log import log as logging from networking_sfc.extensions import sfc as sfc_const from networking_sfc.services.sfc.drivers import base as sfc_driver from networking_odl.common import constants as odl_const from networking_odl.common import postcommit from networking_odl.journal import full_sync from networking_odl.journal import journal LOG = logging.getLogger(__name__) SFC_RESOURCES = { odl_const.ODL_SFC_PORT_PAIR: odl_const.NETWORKING_SFC_PORT_PAIRS, odl_const.ODL_SFC_PORT_PAIR_GROUP: odl_const.NETWORKING_SFC_PORT_PAIR_GROUPS, odl_const.ODL_SFC_PORT_CHAIN: odl_const.NETWORKING_SFC_PORT_CHAINS } @postcommit.add_postcommit('port_pair', 'port_pair_group', 'port_chain') class OpenDaylightSFCDriverV2(sfc_driver.SfcDriverBase): """OpenDaylight SFC Driver (Version 2) for networking-sfc. Driver sends REST request for Networking SFC Resources (Port Pair, Port Pair Group & Port Chain) to OpenDaylight Neutron Northbound. OpenDaylight Neutron Northbound has API's defined for these resources based on the Networking SFC APIs. """ def initialize(self): LOG.debug("Initializing OpenDaylight Networking SFC driver(Version 2)") self.journal = journal.OpenDaylightJournalThread() full_sync.register(sfc_const.SFC_EXT, SFC_RESOURCES) @staticmethod def _record_in_journal(context, object_type, operation, data=None): if data is None: data = context.current journal.record(context._plugin_context, object_type, context.current['id'], operation, data) @log_helpers.log_method_call def create_port_pair_precommit(self, context): OpenDaylightSFCDriverV2._record_in_journal( context, odl_const.ODL_SFC_PORT_PAIR, odl_const.ODL_CREATE) @log_helpers.log_method_call def create_port_pair_group_precommit(self, context): OpenDaylightSFCDriverV2._record_in_journal( context, odl_const.ODL_SFC_PORT_PAIR_GROUP, odl_const.ODL_CREATE) @log_helpers.log_method_call def create_port_chain_precommit(self, context): OpenDaylightSFCDriverV2._record_in_journal( context, odl_const.ODL_SFC_PORT_CHAIN, odl_const.ODL_CREATE) @log_helpers.log_method_call def update_port_pair_precommit(self, context): OpenDaylightSFCDriverV2._record_in_journal( context, odl_const.ODL_SFC_PORT_PAIR, odl_const.ODL_UPDATE) @log_helpers.log_method_call def update_port_pair_group_precommit(self, context): OpenDaylightSFCDriverV2._record_in_journal( context, odl_const.ODL_SFC_PORT_PAIR_GROUP, odl_const.ODL_UPDATE) @log_helpers.log_method_call def update_port_chain_precommit(self, context): OpenDaylightSFCDriverV2._record_in_journal( context, odl_const.ODL_SFC_PORT_CHAIN, odl_const.ODL_UPDATE) @log_helpers.log_method_call def delete_port_pair_precommit(self, context): OpenDaylightSFCDriverV2._record_in_journal( context, odl_const.ODL_SFC_PORT_PAIR, odl_const.ODL_DELETE, data=[]) @log_helpers.log_method_call def delete_port_pair_group_precommit(self, context): OpenDaylightSFCDriverV2._record_in_journal( context, odl_const.ODL_SFC_PORT_PAIR_GROUP, odl_const.ODL_DELETE, data=[]) @log_helpers.log_method_call def delete_port_chain_precommit(self, context): OpenDaylightSFCDriverV2._record_in_journal( context, odl_const.ODL_SFC_PORT_CHAIN, odl_const.ODL_DELETE, data=[]) # Need to implement these methods, else driver loading fails with error # complaining about no abstract method implementation present. @log_helpers.log_method_call def create_port_pair(self, context): super(OpenDaylightSFCDriverV2, self).create_port_pair(context) @log_helpers.log_method_call def create_port_pair_group(self, context): super(OpenDaylightSFCDriverV2, self).create_port_pair_group(context) @log_helpers.log_method_call def create_port_chain(self, context): super(OpenDaylightSFCDriverV2, self).create_port_chain(context) @log_helpers.log_method_call def update_port_pair(self, context): super(OpenDaylightSFCDriverV2, self).update_port_pair(context) @log_helpers.log_method_call def update_port_pair_group(self, context): super(OpenDaylightSFCDriverV2, self).update_port_pair_group(context) @log_helpers.log_method_call def update_port_chain(self, context): super(OpenDaylightSFCDriverV2, self).update_port_chain(context) @log_helpers.log_method_call def delete_port_pair(self, context): super(OpenDaylightSFCDriverV2, self).delete_port_pair(context) @log_helpers.log_method_call def delete_port_pair_group(self, context): super(OpenDaylightSFCDriverV2, self).delete_port_pair_group(context) @log_helpers.log_method_call def delete_port_chain(self, context): super(OpenDaylightSFCDriverV2, self).delete_port_chain(context) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585130284.8067138 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/0000755000175000017500000000000000000000000024063 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/__init__.py0000644000175000017500000000000000000000000026162 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/base.py0000644000175000017500000001331100000000000025346 0ustar00jamespagejamespage00000000000000# Copyright (c) 2015-2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures import mock from neutron.tests import base from neutron_lib.callbacks import registry from neutron_lib import fixture as nl_fixture from oslo_config import cfg from oslo_config import fixture as config_fixture from networking_odl.common import odl_features from networking_odl.journal import full_sync from networking_odl.journal import journal from networking_odl.journal import periodic_task from networking_odl.ml2 import pseudo_agentdb_binding class DietTestCase(base.DietTestCase): def patch(self, target, name, *args, **kwargs): context = mock.patch.object(target, name, *args, **kwargs) patch = context.start() self.addCleanup(context.stop) return patch class OpenDaylightRestClientFixture(fixtures.Fixture): # Set URL/user/pass so init doesn't throw a cfg required error. # They are not used in these tests since requests.request is overwritten. def _setUp(self): super(OpenDaylightRestClientFixture, self)._setUp() self.cfg = self.useFixture(config_fixture.Config()) mock.patch('requests.sessions.Session.request').start() self.cfg.config(url='http://localhost:8080/controller/nb/v2/neutron', group='ml2_odl') self.cfg.config(username='someuser', group='ml2_odl') self.cfg.config(password='somepass', group='ml2_odl') self.cfg.config(port_binding_controller='legacy-port-binding', group='ml2_odl') class OpenDaylightRestClientGlobalFixture(fixtures.Fixture): def __init__(self, global_client): super(OpenDaylightRestClientGlobalFixture, self).__init__() self._global_client = global_client def _setUp(self): super(OpenDaylightRestClientGlobalFixture, self)._setUp() mock.patch.object(self._global_client, 'get_client').start() class OpenDaylightFeaturesFixture(fixtures.Fixture): def _setUp(self): super(OpenDaylightFeaturesFixture, self)._setUp() self.cfg = self.useFixture(config_fixture.Config()) if cfg.CONF.ml2_odl.url is None: self.cfg.config(url='http://127.0.0.1:9999', group='ml2_odl') if cfg.CONF.ml2_odl.username is None: self.cfg.config(username='someuser', group='ml2_odl') if cfg.CONF.ml2_odl.password is None: self.cfg.config(password='somepass', group='ml2_odl') # make sure _fetch_features is not called, it'll block the main thread self.cfg.config(odl_features_json='{"features": {"feature": []}}', group='ml2_odl') odl_features.init() self.addCleanup(odl_features.deinit) class OpenDaylightJournalThreadFixture(fixtures.Fixture): def _setUp(self): super(OpenDaylightJournalThreadFixture, self)._setUp() self.journal_thread_mock = mock.patch.object( journal.OpenDaylightJournalThread, 'start') self.journal_thread_mock.start() self.pidfile_fixture = self.useFixture(JournalWorkerPidFileFixture()) def remock_atexit(self): self.pidfile_fixture.atexit_mock.stop() return self.pidfile_fixture.atexit_mock.start() class JournalWorkerPidFileFixture(fixtures.Fixture): def _setUp(self): super(JournalWorkerPidFileFixture, self)._setUp() # Every pidfile that is created for the JournalPeriodicProcessor # worker registers an operation to clean it when the interpreter # is about to exit. Tests each have a temporary directory where # they work, this directory is deleted after each test. That means # that by the time atexit is called the pidfile does not exist anymore # and therefore fails with an error. This avoids this problem. self.atexit_mock = mock.patch( 'networking_odl.journal.worker.atexit.register' ) self.atexit_mock.start() class OpenDaylightPeriodicTaskFixture(fixtures.Fixture): def _setUp(self): super(OpenDaylightPeriodicTaskFixture, self)._setUp() self.task_start_mock = mock.patch.object( periodic_task.PeriodicTask, 'start') self.task_start_mock.start() class OpenDaylightPseudoAgentPrePopulateFixture( nl_fixture.CallbackRegistryFixture): def _setUp(self): super(OpenDaylightPseudoAgentPrePopulateFixture, self)._setUp() mock.patch.object( pseudo_agentdb_binding.PseudoAgentDBBindingPrePopulate, 'before_port_binding').start() # NOTE(yamahata): work around # CallbackRegistryFixture._restore causes stopping unstarted patcher # bacause some of base classes neutron test cases issue stop_all() # with tearDown method def _restore(self): registry._CALLBACK_MANAGER = self._orig_manager if mock.mock._is_started(self.patcher): # this may cause RuntimeError('stop called on unstarted patcher') # due to stop_all called by base test cases self.patcher.stop() class OpenDaylightFullSyncFixture(fixtures.Fixture): def _setUp(self): super(OpenDaylightFullSyncFixture, self)._setUp() self.addCleanup(full_sync.FULL_SYNC_RESOURCES.clear) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.810714 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/functional/0000755000175000017500000000000000000000000026225 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/functional/__init__.py0000644000175000017500000000000000000000000030324 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/functional/base.py0000644000175000017500000001143400000000000027514 0ustar00jamespagejamespage00000000000000# # Copyright (C) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import functools import os from neutron.common import utils from neutron.tests import base from neutron.tests.common import helpers from neutron.tests.unit.plugins.ml2 import test_plugin from oslo_config import cfg from oslo_config import fixture as config_fixture from networking_odl.common import client from networking_odl.common import constants as odl_const from networking_odl.common import utils as odl_utils from networking_odl.db import db from networking_odl.tests import base as test_base from networking_odl.tests.unit import test_base_db class OdlTestsBase(object): # this is stolen from neutron.tests.functional.base # This is the directory from which infra fetches log files # for functional tests. DEFAULT_LOG_DIR = os.path.join(helpers.get_test_log_path(), 'functional-logs') def setUp(self): self.cfg = self.useFixture(config_fixture.Config()) self.cfg.config( url='http://127.0.0.1:8181/controller/nb/v2/neutron', group='ml2_odl') self.cfg.config(username='admin', group='ml2_odl') self.cfg.config(password='admin', group='ml2_odl') self.cfg.config(mechanism_drivers=self._mechanism_drivers, group='ml2') self.cfg.config(extension_drivers=[ 'qos', 'port_security'], group='ml2') self.client = client.OpenDaylightRestClient.create_client() super(OdlTestsBase, self).setUp() base.setup_test_logging( cfg.CONF, self.DEFAULT_LOG_DIR, "%s.txt" % self.id()) def setup_parent(self): """Perform parent setup with the common plugin configuration class.""" # Ensure that the parent setup can be called without arguments # by the common configuration setUp. service_plugins = {'l3_plugin_name': self.l3_plugin} service_plugins.update(self.get_additional_service_plugins()) parent_setup = functools.partial( super(test_plugin.Ml2PluginV2TestCase, self).setUp, plugin=self.get_plugins(), ext_mgr=self.get_ext_managers(), service_plugins=service_plugins ) self.useFixture(test_plugin.Ml2ConfFixture(parent_setup)) def get_plugins(self): return test_plugin.PLUGIN_NAME def get_ext_managers(self): return None def get_odl_resource(self, resource_type, resource): return self.client.get_resource( resource_type, resource[resource_type]['id']) def assert_resource_created(self, resource_type, resource): odl_resource = self.get_odl_resource(resource_type, resource) self.assertIsNotNone(odl_resource) def resource_update_test(self, resource_type, resource): update_field = 'name' update_value = 'bubu' resource = self.get_odl_resource(resource_type, resource) self.assertNotEqual(update_value, resource[resource_type][update_field]) self._update(odl_utils.make_url_object(resource_type), resource[resource_type]['id'], {resource_type: {update_field: update_value}}) resource = self.get_odl_resource(resource_type, resource) self.assertEqual(update_value, resource[resource_type][update_field]) def resource_delete_test(self, resource_type, resource): self._delete(odl_utils.make_url_object(resource_type), resource[resource_type]['id']) self.assertIsNone(self.get_odl_resource(resource_type, resource)) class V2DriverAdjustment(test_base_db.ODLBaseDbTestCase): def setUp(self): super(V2DriverAdjustment, self).setUp() self.useFixture(test_base.JournalWorkerPidFileFixture()) def get_odl_resource(self, resource_type, resource): def no_journal_rows(): pending_rows = db.get_all_db_rows_by_state( self.db_context, odl_const.PENDING) processing_rows = db.get_all_db_rows_by_state( self.db_context, odl_const.PROCESSING) return len(pending_rows) == 0 and len(processing_rows) == 0 utils.wait_until_true(no_journal_rows, 5, 0.5) return super(V2DriverAdjustment, self).get_odl_resource( resource_type, resource) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.814714 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/functional/db/0000755000175000017500000000000000000000000026612 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/functional/db/__init__.py0000644000175000017500000000000000000000000030711 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/functional/db/test_migrations.py0000644000175000017500000001272000000000000032401 0ustar00jamespagejamespage00000000000000# Copyright 2016 Intel Corporation. # Copyright 2016 Isaku Yamahata # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from sqlalchemy import sql from sqlalchemy.sql import schema from neutron.db.migration.alembic_migrations import external from neutron.db.migration import cli as migration from neutron.tests.functional.db import test_migrations from neutron.tests.unit import testlib_api from networking_odl.db import head FWAAS_TABLES = ( 'cisco_firewall_associations', 'firewall_group_port_associations_v2', 'firewall_groups_v2', 'firewall_policies_v2', 'firewall_policy_rule_associations_v2', 'firewall_router_associations', 'firewall_rules_v2', ) L2GW_TABLES = ( 'l2gatewayconnections', 'l2gatewaydevices', 'l2gatewayinterfaces', 'l2gateways', 'l2gw_alembic_version', 'logical_switches', 'pending_ucast_macs_remotes', 'physical_locators', 'physical_ports', 'physical_switches', 'ucast_macs_locals', 'ucast_macs_remotes', 'vlan_bindings', ) BGPVPN_TABLES = ( 'bgpvpns', 'bgpvpn_network_associations', 'bgpvpn_router_associations', 'ml2_route_target_allocations', 'sfc_bagpipe_ppg_rtnn_associations', 'sfc_bagpipe_chain_hops', ) # Tables from other repos that we depend on but do not manage. IGNORED_TABLES_MATCH = set( FWAAS_TABLES + L2GW_TABLES + BGPVPN_TABLES ) # EXTERNAL_TABLES should contain all names of tables that are not related to # current repo. EXTERNAL_TABLES = set(external.TABLES) VERSION_TABLE = 'odl_alembic_version' class _TestModelsMigrationsODL(test_migrations._TestModelsMigrations): def db_sync(self, engine): self.cfg.config(connection=engine.url, group='database') for conf in migration.get_alembic_configs(): self.alembic_config = conf self.alembic_config.neutron_config = cfg.CONF migration.do_alembic_command(conf, 'upgrade', 'heads') def get_metadata(self): return head.get_metadata() def include_object(self, object_, name, type_, reflected, compare_to): if type_ == 'table' and (name.startswith('alembic') or name == VERSION_TABLE or name in EXTERNAL_TABLES or name in IGNORED_TABLES_MATCH): return False if type_ == 'index' and reflected and name.startswith("idx_autoinc_"): return False return True def _filter_mysql_server_func_now(self, diff_elem): # TODO(yamahata): remove this bug work around once it's fixed # example: # when the column has server_default=sa.func.now(), the diff # includes the followings diff # [ ('modify_default', # None, # 'opendaylightjournal', # 'created_at', # {'existing_nullable': True, # 'existing_type': DATETIME()}, # DefaultClause(, for_update=False), # DefaultClause(, # for_update=False))] # another example # [ ('modify_default', # None, # 'opendaylightjournal', # 'created_at', # {'existing_nullable': True, # 'existing_type': DATETIME()}, # None, # DefaultClause(, # for_update=False))] meta_def = diff_elem[0][5] rendered_meta_def = diff_elem[0][6] if (diff_elem[0][0] == 'modify_default' and diff_elem[0][2] in ('opendaylightjournal', 'opendaylight_periodic_task') and isinstance(meta_def, schema.DefaultClause) and isinstance(meta_def.arg, sql.elements.TextClause) and meta_def.reflected and meta_def.arg.text == u'CURRENT_TIMESTAMP' and isinstance(rendered_meta_def, schema.DefaultClause) and isinstance(rendered_meta_def.arg, sql.functions.now) and not rendered_meta_def.reflected and meta_def.for_update == rendered_meta_def.for_update): return False return True def filter_metadata_diff(self, diff): return list(filter(self._filter_mysql_server_func_now, diff)) class TestModelsMigrationsMysql(testlib_api.MySQLTestCaseMixin, _TestModelsMigrationsODL, testlib_api.SqlTestCaseLight): pass class TestModelsMigrationsPostgresql(testlib_api.PostgreSQLTestCaseMixin, _TestModelsMigrationsODL, testlib_api.SqlTestCaseLight): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/functional/requirements.txt0000644000175000017500000000063700000000000031517 0ustar00jamespagejamespage00000000000000# Additional requirements for functional tests # The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. oslotest>=3.3.0 # Apache-2.0 psutil>=1.1.1,<2.0.0 psycopg2 python-subunit>=1.2.0 # Apache-2.0/BSD PyMySQL>=0.6.2 # MIT License stestr>=2.0.0 # Apache-2.0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/functional/test_bgpvpn.py0000644000175000017500000001736000000000000031141 0ustar00jamespagejamespage00000000000000# # Copyright (C) 2017 Ericsson India Global Services Pvt Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import webob.exc from neutron.tests.unit.plugins.ml2 import test_plugin from neutron.tests.unit import testlib_api # BGPVPN Table metadata should be imported before # sqlalchemy metadata.create_all call else tables # will not be created. from networking_bgpvpn.neutron.db import bgpvpn_db # noqa from networking_bgpvpn.tests.unit.services import test_plugin as bgpvpn_plugin from networking_odl.common import constants as odl_const from networking_odl.tests.functional import base class _TestBGPVPNBase(base.OdlTestsBase): rds = ['100:1'] def setUp(self, plugin=None, service_plugins=None, ext_mgr=None): provider = { 'service_type': 'BGPVPN', 'name': 'OpenDaylight', 'driver': 'networking_odl.bgpvpn.odl_v2.OpenDaylightBgpvpnDriver', 'default': True } self.service_providers.return_value = [provider] self.plugin_arg = plugin self.service_plugin_arg = service_plugins self.ext_mgr_arg = ext_mgr super(_TestBGPVPNBase, self).setUp() def get_ext_managers(self): return self.ext_mgr_arg def get_plugins(self): return self.plugin_arg def get_additional_service_plugins(self): return self.service_plugin_arg def _assert_networks_associated(self, net_ids, bgpvpn): response = self.get_odl_resource(odl_const.ODL_BGPVPN, bgpvpn) self.assertItemsEqual(net_ids, response[odl_const.ODL_BGPVPN]['networks']) def _assert_routers_associated(self, router_ids, bgpvpn): response = self.get_odl_resource(odl_const.ODL_BGPVPN, bgpvpn) self.assertItemsEqual(router_ids, response[odl_const.ODL_BGPVPN]['routers']) def test_bgpvpn_create(self): with self.bgpvpn() as bgpvpn: self.assert_resource_created(odl_const.ODL_BGPVPN, bgpvpn) def test_bgpvpn_create_with_rds(self): with self.bgpvpn(route_distinguishers=self.rds) as bgpvpn: response = self.get_odl_resource(odl_const.ODL_BGPVPN, bgpvpn) self.assertItemsEqual(self.rds, response[odl_const.ODL_BGPVPN] ['route_distinguishers']) def test_bgpvpn_delete(self): with self.bgpvpn(do_delete=False) as bgpvpn: self._delete('bgpvpn/bgpvpns', bgpvpn['bgpvpn']['id']) self.assertIsNone( self.get_odl_resource(odl_const.ODL_BGPVPN, bgpvpn)) def test_associate_dissociate_net(self): with (self.network()) as net1, ( self.bgpvpn(route_distinguishers=self.rds)) as bgpvpn: net_id = net1['network']['id'] bgpvpn_id = bgpvpn['bgpvpn']['id'] with self.assoc_net(bgpvpn_id, net_id): self._assert_networks_associated([net_id], bgpvpn) self._assert_networks_associated([], bgpvpn) def test_associate_multiple_networks(self): with (self.network()) as net1, (self.network()) as net2, ( self.bgpvpn(route_distinguishers=self.rds)) as bgpvpn: net_id1 = net1['network']['id'] net_id2 = net2['network']['id'] bgpvpn_id = bgpvpn['bgpvpn']['id'] with self.assoc_net(bgpvpn_id, net_id1), \ self.assoc_net(bgpvpn_id, net_id2): self._assert_networks_associated([net_id1, net_id2], bgpvpn) def test_assoc_multiple_networks_dissoc_one(self): with (self.network()) as net1, (self.network()) as net2, ( self.bgpvpn(route_distinguishers=self.rds)) as bgpvpn: net_id1 = net1['network']['id'] net_id2 = net2['network']['id'] bgpvpn_id = bgpvpn['bgpvpn']['id'] with self.assoc_net(bgpvpn_id, net_id1): with self.assoc_net(bgpvpn_id, net_id2): self._assert_networks_associated([net_id1, net_id2], bgpvpn) self._assert_networks_associated([net_id1], bgpvpn) def test_associate_dissociate_router(self): with (self.router(tenant_id=self._tenant_id)) as router, ( self.bgpvpn(route_distinguishers=self.rds)) as bgpvpn: router_id = router['router']['id'] bgpvpn_id = bgpvpn['bgpvpn']['id'] with self.assoc_router(bgpvpn_id, router_id): self._assert_routers_associated([router_id], bgpvpn) self._assert_routers_associated([], bgpvpn) def test_associate_multiple_routers(self): with (self.router(tenant_id=self._tenant_id, name='r1')) as r1, ( self.router(tenant_id=self._tenant_id, name='r2')) as r2, ( self.bgpvpn(route_distinguishers=self.rds)) as bgpvpn: router_id1 = r1['router']['id'] router_id2 = r2['router']['id'] bgpvpn_id = bgpvpn['bgpvpn']['id'] with self.assoc_router(bgpvpn_id, router_id1): self._assert_routers_associated([router_id1], bgpvpn) with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: with self.assoc_router(bgpvpn_id, router_id2): pass self.assertEqual(webob.exc.HTTPBadRequest.code, ctx_manager.exception.code) self._assert_routers_associated([router_id1], bgpvpn) def test_assoc_router_multiple_bgpvpns(self): with (self.router(tenant_id=self._tenant_id, name='r1')) as router, ( self.bgpvpn(route_distinguishers=self.rds)) as bgpvpn1, ( self.bgpvpn()) as bgpvpn2: router_id = router['router']['id'] bgpvpn_id_1 = bgpvpn1['bgpvpn']['id'] bgpvpn_id_2 = bgpvpn2['bgpvpn']['id'] with (self.assoc_router(bgpvpn_id_1, router_id)), ( self.assoc_router(bgpvpn_id_2, router_id)): self._assert_routers_associated([router_id], bgpvpn1) self._assert_routers_associated([router_id], bgpvpn2) def test_associate_router_network(self): with (self.router(tenant_id=self._tenant_id)) as router, ( self.network()) as net1, ( self.bgpvpn(route_distinguishers=self.rds)) as bgpvpn: router_id = router['router']['id'] net_id = net1['network']['id'] bgpvpn_id = bgpvpn['bgpvpn']['id'] with self.assoc_router(bgpvpn_id, router_id), \ self.assoc_net(bgpvpn_id, net_id): response = self.get_odl_resource(odl_const.ODL_BGPVPN, bgpvpn) self.assertItemsEqual([router_id], response[odl_const.ODL_BGPVPN] ['routers']) self.assertItemsEqual([net_id], response[odl_const.ODL_BGPVPN] ['networks']) class TestBGPVPNV2Driver(base.V2DriverAdjustment, bgpvpn_plugin.BgpvpnTestCaseMixin, _TestBGPVPNBase, test_plugin.Ml2PluginV2TestCase): _mechanism_drivers = ['opendaylight_v2'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/functional/test_l2gateway.py0000644000175000017500000001542700000000000031546 0ustar00jamespagejamespage00000000000000# # Copyright (C) 2017 Ericsson India Global Services Pvt Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import contextlib import copy import mock import webob.exc from neutron.api import extensions as api_extensions from neutron.db import servicetype_db as sdb from neutron.tests.unit.plugins.ml2 import test_plugin from oslo_utils import uuidutils from networking_l2gw import extensions as l2gw_extensions from networking_l2gw.services.l2gateway.common import constants as l2gw_const from networking_l2gw.services.l2gateway.plugin import L2GatewayPlugin from networking_odl.common import constants as odl_const from networking_odl.tests.functional import base _uuid = uuidutils.generate_uuid class L2GatewayTestCaseMixin(object): devices = [{'device_name': 's1', 'interfaces': [{'name': 'int1'}] }, {'device_name': 's2', 'interfaces': [{'name': 'int2', 'segmentation_id': [10, 20]}] }] l2gw_data = {l2gw_const.GATEWAY_RESOURCE_NAME: {'tenant_id': _uuid(), 'name': 'l2gw', 'devices': devices}} def setUp(self): """Perform parent setup with the common plugin configuration class.""" # Ensure that the parent setup can be called without arguments # by the common configuration setUp. bits = self.service_provider.split(':') provider = { 'service_type': bits[0], 'name': bits[1], 'driver': bits[2], 'default': True } # override the default service provider self.service_providers = ( mock.patch.object(sdb.ServiceTypeManager, 'get_service_providers').start()) self.service_providers.return_value = [provider] super(L2GatewayTestCaseMixin, self).setUp() @contextlib.contextmanager def l2gateway(self, do_delete=True, **kwargs): req_data = copy.deepcopy(self.l2gw_data) fmt = 'json' if kwargs.get('data'): req_data = kwargs.get('data') else: req_data[l2gw_const.GATEWAY_RESOURCE_NAME].update(kwargs) l2gw_req = self.new_create_request(l2gw_const.L2_GATEWAYS, req_data, fmt=fmt) res = l2gw_req.get_response(self.ext_api) if res.status_int >= 400: raise webob.exc.HTTPClientError(code=res.status_int) l2gw = self.deserialize('json', res) yield l2gw if do_delete: self._delete(l2gw_const.L2_GATEWAYS, l2gw[l2gw_const.GATEWAY_RESOURCE_NAME]['id']) @contextlib.contextmanager def l2gateway_connection(self, nw_id, l2gw_id, do_delete=True, **kwargs): req_data = { l2gw_const.CONNECTION_RESOURCE_NAME: {'tenant_id': _uuid(), 'network_id': nw_id, 'l2_gateway_id': l2gw_id} } fmt = 'json' if kwargs.get('data'): req_data = kwargs.get('data') else: req_data[l2gw_const.CONNECTION_RESOURCE_NAME].update(kwargs) l2gw_connection_req = self.new_create_request( l2gw_const.L2_GATEWAYS_CONNECTION, req_data, fmt=fmt) res = l2gw_connection_req.get_response(self.ext_api) if res.status_int >= 400: raise webob.exc.HTTPClientError(code=res.status_int) l2gw_connection = self.deserialize('json', res) yield l2gw_connection if do_delete: self._delete(l2gw_const.L2_GATEWAYS_CONNECTION, l2gw_connection [l2gw_const.CONNECTION_RESOURCE_NAME]['id']) @staticmethod def convert_to_odl_l2gw_connection(l2gw_connection_in): odl_l2_gw_conn_data = copy.deepcopy( l2gw_connection_in[l2gw_const.CONNECTION_RESOURCE_NAME]) odl_l2_gw_conn_data['gateway_id'] = ( odl_l2_gw_conn_data['l2_gateway_id']) odl_l2_gw_conn_data.pop('l2_gateway_id') return {odl_const.ODL_L2GATEWAY_CONNECTION: odl_l2_gw_conn_data} class _TestL2GatewayBase(base.OdlTestsBase, L2GatewayTestCaseMixin): def get_ext_managers(self): extensions_path = ':'.join(l2gw_extensions.__path__) return api_extensions.PluginAwareExtensionManager( extensions_path, {'l2gw_plugin': L2GatewayPlugin()}) def get_additional_service_plugins(self): l2gw_plugin_str = ('networking_l2gw.services.l2gateway.plugin.' 'L2GatewayPlugin') service_plugin = {'l2gw_plugin': l2gw_plugin_str} return service_plugin def test_l2gateway_create(self): with self.l2gateway(name='mygateway') as l2gateway: self.assert_resource_created(odl_const.ODL_L2GATEWAY, l2gateway) def test_l2gateway_update(self): with self.l2gateway(name='gateway1') as l2gateway: self.resource_update_test(odl_const.ODL_L2GATEWAY, l2gateway) def test_l2gateway_delete(self): with self.l2gateway(do_delete=False) as l2gateway: self.resource_delete_test(odl_const.ODL_L2GATEWAY, l2gateway) def test_l2gateway_connection_create_delete(self): odl_l2gw_connection = {} with self.network() as network: with self.l2gateway() as l2gateway: net_id = network['network']['id'] l2gw_id = l2gateway[odl_const.ODL_L2GATEWAY]['id'] with (self.l2gateway_connection(net_id, l2gw_id) ) as l2gw_connection: odl_l2gw_connection = ( self.convert_to_odl_l2gw_connection(l2gw_connection)) self.assert_resource_created( odl_const.ODL_L2GATEWAY_CONNECTION, odl_l2gw_connection) self.assertIsNone(self.get_odl_resource( odl_const.ODL_L2GATEWAY_CONNECTION, odl_l2gw_connection)) class TestL2gatewayV2Driver(base.V2DriverAdjustment, _TestL2GatewayBase, test_plugin.Ml2PluginV2TestCase): _mechanism_drivers = ['opendaylight_v2'] service_provider = ('L2GW:OpenDaylight:networking_odl.l2gateway.driver_v2.' 'OpenDaylightL2gwDriver:default') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/functional/test_l3.py0000644000175000017500000000711300000000000030156 0ustar00jamespagejamespage00000000000000# # Copyright (C) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import functools from neutron.tests.unit.extensions import test_l3 from neutron.tests.unit.plugins.ml2 import test_plugin from neutron_lib import constants as q_const from networking_odl.common import constants as odl_const from networking_odl.tests.functional import base class _TestL3Base(test_l3.L3NatTestCaseMixin, base.OdlTestsBase): # Override default behavior so that extension manager is used, otherwise # we can't test security groups. def setup_parent(self): """Perform parent setup with the common plugin configuration class.""" ext_mgr = test_l3.L3TestExtensionManager() # Ensure that the parent setup can be called without arguments # by the common configuration setUp. parent_setup = functools.partial( super(test_plugin.Ml2PluginV2TestCase, self).setUp, plugin=test_plugin.PLUGIN_NAME, ext_mgr=ext_mgr, service_plugins={'l3_plugin_name': self.l3_plugin}, ) self.useFixture(test_plugin.Ml2ConfFixture(parent_setup)) def test_router_create(self): with self.router() as router: self.assert_resource_created(odl_const.ODL_ROUTER, router) def test_router_update(self): with self.router() as router: self.resource_update_test(odl_const.ODL_ROUTER, router) def test_router_delete(self): with self.router() as router: self.resource_delete_test(odl_const.ODL_ROUTER, router) def test_floatingip_create(self): with self.floatingip_with_assoc() as fip: self.assert_resource_created(odl_const.ODL_FLOATINGIP, fip) # Test FIP was deleted since the code creating the FIP deletes it # once the context block exists. odl_fip = self.get_odl_resource(odl_const.ODL_FLOATINGIP, fip) self.assertIsNone(odl_fip) def test_floatingip_status_with_port(self): with self.floatingip_with_assoc() as fip: self.assertEqual( q_const.FLOATINGIP_STATUS_ACTIVE, fip['floatingip']['status']) def test_floatingip_status_without_port(self): with self.subnet() as subnet: with self.floatingip_no_assoc(subnet) as fip: # status should be down when floating ip # is not associated to any port self.assertEqual( q_const.FLOATINGIP_STATUS_DOWN, fip['floatingip']['status']) def test_floatingip_dissociate_port(self): with self.floatingip_with_assoc() as fip: portid = fip['floatingip']['port_id'] self.assertIsNotNone(portid) self._delete(odl_const.ODL_PORTS, portid) updated_fip = self.get_odl_resource(odl_const.ODL_FLOATINGIP, fip) self.assertNotIn('port_id', updated_fip['floatingip'].keys()) class TestL3PluginV2(base.V2DriverAdjustment, _TestL3Base, test_plugin.Ml2PluginV2TestCase): _mechanism_drivers = ['opendaylight_v2'] l3_plugin = 'odl-router_v2' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/functional/test_ml2_drivers.py0000644000175000017500000001141000000000000032063 0ustar00jamespagejamespage00000000000000# # Copyright (C) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import functools from neutron.tests.unit.extensions import test_securitygroup from neutron.tests.unit.plugins.ml2 import test_plugin from networking_odl.common import constants as odl_const from networking_odl.tests.functional import base class _DriverTest(base.OdlTestsBase): def test_network_create(self): with self.network() as network: self.assert_resource_created(odl_const.ODL_NETWORK, network) def test_network_update(self): with self.network() as network: self.resource_update_test(odl_const.ODL_NETWORK, network) def test_network_delete(self): with self.network() as network: self.resource_delete_test(odl_const.ODL_NETWORK, network) def test_subnet_create(self): with self.network() as network: with self.subnet(network=network) as subnet: self.assert_resource_created(odl_const.ODL_SUBNET, subnet) def test_subnet_update(self): with self.network() as network: with self.subnet(network=network) as subnet: self.resource_update_test(odl_const.ODL_SUBNET, subnet) def test_subnet_delete(self): with self.network() as network: with self.subnet(network=network) as subnet: self.resource_delete_test(odl_const.ODL_SUBNET, subnet) def test_port_create(self): with self.network() as network: with self.subnet(network=network) as subnet: with self.port(subnet=subnet) as port: self.assert_resource_created(odl_const.ODL_PORT, port) def test_port_update(self): with self.network() as network: with self.subnet(network=network) as subnet: with self.port(subnet=subnet) as port: self.resource_update_test(odl_const.ODL_PORT, port) def test_port_delete(self): with self.network() as network: with self.subnet(network=network) as subnet: with self.port(subnet=subnet) as port: self.resource_delete_test(odl_const.ODL_PORT, port) class _DriverSecGroupsTests(base.OdlTestsBase): # Override default behavior so that extension manager is used, otherwise # we can't test security groups. def setup_parent(self): """Perform parent setup with the common plugin configuration class.""" ext_mgr = ( test_securitygroup.SecurityGroupTestExtensionManager()) # Ensure that the parent setup can be called without arguments # by the common configuration setUp. parent_setup = functools.partial( super(test_plugin.Ml2PluginV2TestCase, self).setUp, plugin=test_plugin.PLUGIN_NAME, ext_mgr=ext_mgr, ) self.useFixture(test_plugin.Ml2ConfFixture(parent_setup)) def test_security_group_create(self): with self.security_group() as sg: self.assert_resource_created(odl_const.ODL_SG, sg) def test_security_group_update(self): with self.security_group() as sg: self.resource_update_test(odl_const.ODL_SG, sg) def test_security_group_delete(self): with self.security_group() as sg: self.resource_delete_test(odl_const.ODL_SG, sg) def test_security_group_rule_create(self): with self.security_group() as sg: sg_id = sg[odl_const.ODL_SG]['id'] with self.security_group_rule(security_group_id=sg_id) as sg_rule: self.assert_resource_created(odl_const.ODL_SG_RULE, sg_rule) def test_security_group_rule_delete(self): with self.security_group() as sg: sg_id = sg[odl_const.ODL_SG]['id'] with self.security_group_rule(security_group_id=sg_id) as sg_rule: self.resource_delete_test(odl_const.ODL_SG_RULE, sg_rule) class TestV2Driver(base.V2DriverAdjustment, _DriverTest, test_plugin.Ml2PluginV2TestCase): _mechanism_drivers = ['opendaylight_v2'] class TestV2DriverSecGroups(base.V2DriverAdjustment, _DriverSecGroupsTests, test_securitygroup.SecurityGroupsTestCase, test_plugin.Ml2PluginV2TestCase): _mechanism_drivers = ['opendaylight_v2'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/functional/test_odl_dhcp_driver.py0000644000175000017500000001044600000000000032772 0ustar00jamespagejamespage00000000000000# Copyright (c) 2017 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.tests.unit.plugins.ml2 import test_plugin from neutron_lib import constants as n_const from neutron_lib.plugins import directory from oslo_config import fixture as config_fixture from networking_odl.common import constants as odl_const from networking_odl.dhcp import odl_dhcp_driver_base as driver_base from networking_odl.tests.functional import base class TestOdlDhcpDriver(base.V2DriverAdjustment, base.OdlTestsBase, test_plugin.Ml2PluginV2TestCase): _mechanism_drivers = ['opendaylight_v2'] def setUp(self): self.cfg = self.useFixture(config_fixture.Config()) self.cfg.config(enable_dhcp_service=True, group='ml2_odl') super(TestOdlDhcpDriver, self).setUp() def get_port_data(self, network, subnet): plugin = self.get_plugin() device_id = driver_base.OPENDAYLIGHT_DEVICE_ID + \ '-' + subnet[odl_const.ODL_SUBNET]['id'] filters = { 'network_id': [network[odl_const.ODL_NETWORK]['id']], 'device_id': [device_id], 'device_owner': [n_const.DEVICE_OWNER_DHCP] } ports = plugin.get_ports(self.context, filters=filters) if ports: port = ports[0] return {odl_const.ODL_PORT: {'id': port['id']}} def get_plugin(self): return directory.get_plugin() def test_subnet_create(self): with self.network() as network: with self.subnet(network=network) as subnet: self.get_odl_resource(odl_const.ODL_SUBNET, subnet) port = self.get_port_data(network, subnet) self.assert_resource_created(odl_const.ODL_PORT, port) def test_subnet_update_from_disable_to_enable(self): with self.network() as network: with self.subnet(network=network, enable_dhcp=False) as subnet: self.get_odl_resource(odl_const.ODL_SUBNET, subnet) plugin = self.get_plugin() port = self.get_port_data(network, subnet) self.assertIsNone(port) subnet[odl_const.ODL_SUBNET]['enable_dhcp'] = True plugin.update_subnet( self.context, subnet[odl_const.ODL_SUBNET]['id'], subnet) self.get_odl_resource(odl_const.ODL_SUBNET, subnet) port = self.get_port_data(network, subnet) self.assert_resource_created(odl_const.ODL_PORT, port) def test_subnet_update_from_enable_to_disable(self): with self.network() as network: with self.subnet(network=network) as subnet: self.get_odl_resource(odl_const.ODL_SUBNET, subnet) plugin = self.get_plugin() port = self.get_port_data(network, subnet) self.assert_resource_created(odl_const.ODL_PORT, port) subnet[odl_const.ODL_SUBNET]['enable_dhcp'] = False plugin.update_subnet( self.context, subnet[odl_const.ODL_SUBNET]['id'], subnet) resource = self.get_odl_resource(odl_const.ODL_PORT, port) self.assertIsNone(resource) def test_subnet_delete(self): with self.network() as network: with self.subnet(network=network) as subnet: self.get_odl_resource(odl_const.ODL_SUBNET, subnet) plugin = self.get_plugin() port = self.get_port_data(network, subnet) self.assert_resource_created(odl_const.ODL_PORT, port) plugin.delete_subnet( self.context, subnet[odl_const.ODL_SUBNET]['id']) resource = self.get_odl_resource(odl_const.ODL_PORT, port) self.assertIsNone(resource) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/functional/test_qos.py0000644000175000017500000000537500000000000030452 0ustar00jamespagejamespage00000000000000# Copyright (C) 2017 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import contextlib from oslo_utils import uuidutils from neutron.extensions import qos as qos_ext from neutron.services.qos import qos_plugin from neutron.tests.unit.api import test_extensions from neutron.tests.unit.plugins.ml2 import test_plugin from neutron_lib import fixture as nlib_fixture from neutron_lib.plugins import directory from networking_odl.common import constants as odl_const from networking_odl.tests.functional import base class QoSTestExtensionManager(object): def get_resources(self): return qos_ext.Qos.get_resources() def get_actions(self): return [] def get_request_extensions(self): return [] class _QoSDriverTestCase(base.OdlTestsBase): def test_policy_create(self): with self.qos_policy() as policy: self.assert_resource_created( odl_const.ODL_QOS_POLICY, policy) def test_policy_update(self): with self.qos_policy() as policy: self.resource_update_test( odl_const.ODL_QOS_POLICY, policy) def test_policy_delete(self): with self.qos_policy() as policy: self.resource_delete_test( odl_const.ODL_QOS_POLICY, policy) class QoSDriverTests(base.V2DriverAdjustment, _QoSDriverTestCase, test_plugin.Ml2PluginV2TestCase): _mechanism_drivers = ['opendaylight_v2'] def setUp(self): self.useFixture(nlib_fixture.PluginDirectoryFixture()) super(QoSDriverTests, self).setUp() self.qos_plug = qos_plugin.QoSPlugin() directory.add_plugin('QOS', self.qos_plug) ext_mgr = QoSTestExtensionManager() self.resource_prefix_map = {'policies': '/qos'} self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr) tenant_id = uuidutils.generate_uuid() self.policy_data = { 'policy': {'name': 'test-policy', 'tenant_id': tenant_id}} @contextlib.contextmanager def qos_policy(self, fmt='json'): po_res = self.new_create_request('policies', self.policy_data, fmt) po_rep = po_res.get_response(self.ext_api) policy = self.deserialize(fmt, po_rep) yield policy ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/functional/test_trunk_drivers.py0000644000175000017500000001075600000000000032550 0ustar00jamespagejamespage00000000000000# Copyright (c) 2017 Ericsson India Global Service Pvt Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib from networking_odl.common import constants as odl_const from networking_odl.tests.functional import base from neutron.services.trunk import plugin as trunk_plugin from neutron.tests.unit.plugins.ml2 import test_plugin from neutron_lib.plugins import utils from neutron_lib.services.trunk import constants from oslo_utils import uuidutils class _TrunkDriverTest(base.OdlTestsBase): def test_trunk_create(self): with self.trunk() as trunk: self.assert_resource_created(odl_const.ODL_TRUNK, trunk) def test_trunk_update(self): with self.trunk() as trunk: trunk['trunk'].update(admin_state_up=False) self.trunk_plugin.update_trunk(self.context, trunk['trunk']['id'], trunk) response = self.get_odl_resource(odl_const.ODL_TRUNK, trunk) self.assertFalse(response['trunk']['admin_state_up']) def test_subport_create(self): with self.trunk() as trunk: with self.subport() as subport: trunk_obj = self.trunk_plugin.add_subports( self.context, trunk['trunk']['id'], {'sub_ports': [subport]}) response = self.get_odl_resource(odl_const.ODL_TRUNK, {'trunk': trunk_obj}) self.assertEqual(response['trunk']['sub_ports'][0]['port_id'], subport['port_id']) def test_subport_delete(self): with self.subport() as subport: with self.trunk([subport]) as trunk: response = self.get_odl_resource(odl_const.ODL_TRUNK, trunk) self.assertEqual(response['trunk']['sub_ports'][0]['port_id'], subport['port_id']) trunk_obj = self.trunk_plugin.remove_subports( self.context, trunk['trunk']['id'], {'sub_ports': [subport]}) response = self.get_odl_resource(odl_const.ODL_TRUNK, {'trunk': trunk_obj}) self.assertEqual(response['trunk']['sub_ports'], []) def test_trunk_delete(self): with self.trunk() as trunk: self.trunk_plugin.delete_trunk(self.context, trunk['trunk']['id']) self.assertIsNone(self.get_odl_resource(odl_const.ODL_TRUNK, trunk)) @contextlib.contextmanager def trunk(self, subports=None): subports = subports if subports else [] with self.network() as network: with self.subnet(network=network) as subnet: with self.port(subnet=subnet) as trunk_parent: tenant_id = uuidutils.generate_uuid() trunk = {'port_id': trunk_parent['port']['id'], 'tenant_id': tenant_id, 'project_id': tenant_id, 'admin_state_up': True, 'name': 'test_trunk', 'sub_ports': subports} trunk_obj = self.trunk_plugin.create_trunk( self.context, {'trunk': trunk}) yield {'trunk': trunk_obj} @contextlib.contextmanager def subport(self): with self.port() as child_port: subport = {'segmentation_type': 'vlan', 'segmentation_id': 123, 'port_id': child_port['port']['id']} yield subport class TestTrunkV2Driver(base.V2DriverAdjustment, _TrunkDriverTest, test_plugin.Ml2PluginV2TestCase): _mechanism_drivers = ['opendaylight_v2'] def setUp(self): super(TestTrunkV2Driver, self).setUp() self.trunk_plugin = trunk_plugin.TrunkPlugin() self.trunk_plugin.add_segmentation_type( constants.SEGMENTATION_TYPE_VLAN, utils.is_valid_vlan_tag) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/match.py0000644000175000017500000000251300000000000025532 0ustar00jamespagejamespage00000000000000# Copyright (c) 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fnmatch import re from oslo_serialization import jsonutils def json(obj): return MatchJson(obj) class MatchJson(object): def __init__(self, obj): self._obj = obj def __eq__(self, json_text): return self._obj == jsonutils.loads(json_text) def __repr__(self): return "MatchJson({})".format(repr(self._obj)) def wildcard(text): return MatchWildcard(text) class MatchWildcard(object): def __init__(self, obj): self._text = text = str(obj) self._reg = re.compile(fnmatch.translate(text)) def __eq__(self, obj): return self._reg.match(str(obj)) def __repr__(self): return "MatchWildcard({})".format(self._text) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.814714 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/0000755000175000017500000000000000000000000025042 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/__init__.py0000644000175000017500000000000000000000000027141 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/base_v2.py0000644000175000017500000000424600000000000026743 0ustar00jamespagejamespage00000000000000# Copyright (c) 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.tests.unit.plugins.ml2 import test_plugin from networking_odl.common import client from networking_odl.journal import base_driver from networking_odl.journal import journal from networking_odl.ml2 import mech_driver_v2 from networking_odl.tests import base from networking_odl.tests.unit import test_base_db class OpenDaylightConfigBase(test_plugin.Ml2PluginV2TestCase, test_base_db.ODLBaseDbTestCase): def setUp(self): self.journal_thread_fixture = self.useFixture( base.OpenDaylightJournalThreadFixture()) self.useFixture(base.OpenDaylightRestClientFixture()) self.useFixture(base.OpenDaylightFullSyncFixture()) super(OpenDaylightConfigBase, self).setUp() self.thread = journal.OpenDaylightJournalThread() self.addCleanup(base_driver.ALL_RESOURCES.clear) def run_journal_processing(self): """Cause the journal to process the first pending entry""" self.thread.sync_pending_entries() class OpenDaylightTestCase(OpenDaylightConfigBase): def setUp(self): self.mock_sendjson = mock.patch.object(client.OpenDaylightRestClient, 'sendjson').start() super(OpenDaylightTestCase, self).setUp() self.port_create_status = 'DOWN' self.mech = mech_driver_v2.OpenDaylightMechanismDriver() self.mock_sendjson.side_effect = self.check_sendjson def check_sendjson(self, method, urlpath, obj): self.assertFalse(urlpath.startswith("http://")) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.814714 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/bgpvpn/0000755000175000017500000000000000000000000026336 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/bgpvpn/__init__.py0000644000175000017500000000000000000000000030435 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/bgpvpn/test_odl_v2.py0000644000175000017500000002047600000000000031145 0ustar00jamespagejamespage00000000000000# # Copyright (C) 2017 Ericsson India Global Services Pvt Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import mock from networking_odl.bgpvpn import odl_v2 as driverv2 from networking_odl.common import constants as odl_const from networking_odl.common import odl_features from networking_odl.db import db from networking_odl.tests.unit import base_v2 from neutron_lib.api.definitions import bgpvpn_vni as bgpvpn_vni_def class OpenDaylightBgpvpnDriverTestCase(base_v2.OpenDaylightConfigBase): def setUp(self): super(OpenDaylightBgpvpnDriverTestCase, self).setUp() self.driver = driverv2.OpenDaylightBgpvpnDriver(service_plugin=None) def _get_fake_bgpvpn(self, net=False, router=False): net_id = [] router_id = [] if router: router_id = ['ROUTER_ID'] if net: net_id = ['NET_ID'] fake_bgpvpn = {'route_targets': '100:1', 'route_distinguishers': ['100:1'], 'id': 'BGPVPN_ID', 'networks': net_id, 'routers': router_id} return fake_bgpvpn def _get_fake_router_assoc(self): fake_router_assoc = {'id': 'ROUTER_ASSOC_ID', 'bgpvpn_id': 'BGPVPN_ID', 'router_id': 'ROUTER_ID'} return fake_router_assoc def _get_fake_net_assoc(self): fake_net_assoc = {'id': 'NET_ASSOC_ID', 'bgpvpn_id': 'BGPVPN_ID', 'network_id': 'NET_ID'} return fake_net_assoc def _assert_op(self, operation, object_type, data, precommit=True): rows = sorted(db.get_all_db_rows_by_state(self.db_context, odl_const.PENDING), key=lambda x: x.seqnum) if precommit: self.db_context.session.flush() self.assertEqual(operation, rows[0]['operation']) self.assertEqual(object_type, rows[0]['object_type']) self.assertEqual(data['id'], rows[0]['object_uuid']) else: self.assertEqual([], rows) def test_create_bgpvpn(self): fake_data = self._get_fake_bgpvpn() self.driver.create_bgpvpn_precommit(self.db_context, fake_data) self._assert_op(odl_const.ODL_CREATE, odl_const.ODL_BGPVPN, fake_data) self.run_journal_processing() self._assert_op(odl_const.ODL_CREATE, odl_const.ODL_BGPVPN, fake_data, False) def test_update_bgpvpn(self): fake_data = self._get_fake_bgpvpn() self.driver.update_bgpvpn_precommit(self.db_context, fake_data) self._assert_op(odl_const.ODL_UPDATE, odl_const.ODL_BGPVPN, fake_data) self.run_journal_processing() self._assert_op(odl_const.ODL_UPDATE, odl_const.ODL_BGPVPN, fake_data, False) def test_delete_bgpvpn(self): fake_data = self._get_fake_bgpvpn() self.driver.delete_bgpvpn_precommit(self.db_context, fake_data) self._assert_op(odl_const.ODL_DELETE, odl_const.ODL_BGPVPN, fake_data) self.run_journal_processing() self._assert_op(odl_const.ODL_DELETE, odl_const.ODL_BGPVPN, fake_data, False) def test_create_router_assoc(self): fake_rtr_assoc_data = self._get_fake_router_assoc() fake_rtr_upd_bgpvpn_data = self._get_fake_bgpvpn(router=True) with mock.patch.object(self.driver, 'get_router_assocs', return_value=[]), \ mock.patch.object(self.driver, 'get_bgpvpn', return_value=fake_rtr_upd_bgpvpn_data): self.driver.create_router_assoc_precommit(self.db_context, fake_rtr_assoc_data) self._assert_op(odl_const.ODL_UPDATE, odl_const.ODL_BGPVPN, fake_rtr_upd_bgpvpn_data) self.run_journal_processing() self._assert_op(odl_const.ODL_UPDATE, odl_const.ODL_BGPVPN, fake_rtr_upd_bgpvpn_data, False) def test_delete_router_assoc(self): fake_rtr_assoc_data = self._get_fake_router_assoc() fake_bgpvpn_data = self._get_fake_bgpvpn(router=True) with mock.patch.object(self.driver, 'get_bgpvpn', return_value=fake_bgpvpn_data): self.driver.delete_router_assoc_precommit(self.db_context, fake_rtr_assoc_data) self._assert_op(odl_const.ODL_UPDATE, odl_const.ODL_BGPVPN, fake_bgpvpn_data) self.run_journal_processing() self._assert_op(odl_const.ODL_UPDATE, odl_const.ODL_BGPVPN, fake_bgpvpn_data, False) def test_create_net_assoc(self): fake_net_assoc_data = self._get_fake_net_assoc() fake_net_upd_bgpvpn_data = self._get_fake_bgpvpn(net=True) # todo(vivekanandan) add check for case when assoc already exists with mock.patch.object(self.driver, 'get_bgpvpns', return_value=[fake_net_upd_bgpvpn_data]): self.driver.create_net_assoc_precommit(self.db_context, fake_net_assoc_data) self._assert_op(odl_const.ODL_UPDATE, odl_const.ODL_BGPVPN, fake_net_upd_bgpvpn_data) self.run_journal_processing() self._assert_op(odl_const.ODL_UPDATE, odl_const.ODL_BGPVPN, fake_net_upd_bgpvpn_data, False) def test_delete_net_assoc(self): fake_net_assoc_data = self._get_fake_net_assoc() fake_bgpvpn_data = self._get_fake_bgpvpn(net=True) with mock.patch.object(self.driver, 'get_bgpvpn', return_value=fake_bgpvpn_data): self.driver.delete_net_assoc_precommit(self.db_context, fake_net_assoc_data) self._assert_op(odl_const.ODL_UPDATE, odl_const.ODL_BGPVPN, fake_bgpvpn_data) self.run_journal_processing() self._assert_op(odl_const.ODL_UPDATE, odl_const.ODL_BGPVPN, fake_bgpvpn_data, False) def _get_bgpvpn_driver_with_vni(self): feature_json = """{"features": {"feature": [{"service-provider-feature": "neutron-extensions:operational-port-status"}, {"service-provider-feature": "neutron-extensions:bgpvpn-vni"}]}}""" self.cfg.config(odl_features_json=feature_json, group='ml2_odl') odl_features.init() bgpvpn_driver = driverv2.OpenDaylightBgpvpnDriver(service_plugin=None) return bgpvpn_driver def test_bgpvpn_vni_feature(self): bgpvpn_driver = self._get_bgpvpn_driver_with_vni() self.assertIn(bgpvpn_vni_def.ALIAS, bgpvpn_driver.more_supported_extension_aliases) def test_bgpvpn_vni_create_with_vni(self): bgpvpn_driver = self._get_bgpvpn_driver_with_vni() fake_data = self._get_fake_bgpvpn() fake_data['vni'] = 100 bgpvpn_driver.create_bgpvpn_precommit(self.db_context, fake_data) self._assert_op(odl_const.ODL_CREATE, odl_const.ODL_BGPVPN, fake_data) self.run_journal_processing() self._assert_op(odl_const.ODL_CREATE, odl_const.ODL_BGPVPN, fake_data, False) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.814714 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/ceilometer/0000755000175000017500000000000000000000000027172 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/ceilometer/__init__.py0000644000175000017500000000000000000000000031271 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.814714 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/ceilometer/network/0000755000175000017500000000000000000000000030663 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/ceilometer/network/__init__.py0000644000175000017500000000000000000000000032762 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.814714 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/ceilometer/network/statistics/0000755000175000017500000000000000000000000033055 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/ceilometer/network/statistics/__init__.py0000644000175000017500000000000000000000000035154 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000113 path=networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/ceilometer/network/statistics/opendaylight_v2/ 27 mtime=1585130284.814714 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/ceilometer/network/statistics/opendaylight_0000755000175000017500000000000000000000000035624 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/ceilometer/network/statistics/opendaylight_v2/__init__.py 22 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/ceilometer/network/statistics/opendaylight_0000644000175000017500000000000000000000000035614 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000022500000000000011454 xustar0000000000000000127 path=networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/ceilometer/network/statistics/opendaylight_v2/test_client.py 22 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/ceilometer/network/statistics/opendaylight_0000644000175000017500000001074500000000000035635 0ustar00jamespagejamespage00000000000000# # Copyright 2017 Ericsson India Global Services Pvt Ltd.. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import fixture as config_fixture from oslotest import base from requests import auth as req_auth import six from six.moves.urllib import parse as urlparse from ceilometer.i18n import _ from ceilometer import service as ceilometer_service from networking_odl.ceilometer.network.statistics.opendaylight_v2 import client class TestClientHTTPBasicAuth(base.BaseTestCase): auth_way = 'basic' scheme = 'http' def setUp(self): super(TestClientHTTPBasicAuth, self).setUp() conf = ceilometer_service.prepare_service(argv=[], config_files=[]) self.CONF = self.useFixture(config_fixture.Config(conf)).conf self.parsed_url = urlparse.urlparse( 'http://127.0.0.1:8080/controller/statistics?' 'auth=%s&user=admin&password=admin_pass&' 'scheme=%s' % (self.auth_way, self.scheme)) self.params = urlparse.parse_qs(self.parsed_url.query) self.endpoint = urlparse.urlunparse( urlparse.ParseResult(self.scheme, self.parsed_url.netloc, self.parsed_url.path, None, None, None)) odl_params = {'auth': self.params.get('auth')[0], 'user': self.params.get('user')[0], 'password': self.params.get('password')[0]} self.client = client.Client(self.CONF, self.endpoint, odl_params) self.resp = mock.MagicMock() self.get = mock.patch('requests.Session.get', return_value=self.resp).start() self.resp.raw.version = 1.1 self.resp.status_code = 200 self.resp.reason = 'OK' self.resp.headers = {} self.resp.content = 'dummy' def _test_request(self, method, url): data = method() call_args = self.get.call_args_list[0][0] call_kwargs = self.get.call_args_list[0][1] # check url real_url = url % {'scheme': self.scheme} self.assertEqual(real_url, call_args[0]) # check auth parameters auth = call_kwargs.get('auth') if self.auth_way == 'digest': self.assertIsInstance(auth, req_auth.HTTPDigestAuth) else: self.assertIsInstance(auth, req_auth.HTTPBasicAuth) self.assertEqual('admin', auth.username) self.assertEqual('admin_pass', auth.password) # check header self.assertEqual( {'Accept': 'application/json'}, call_kwargs['headers']) # check return value self.assertEqual(self.get().json(), data) def test_switch_statistics(self): self._test_request( self.client.switch_statistics.get_statistics, '%(scheme)s://127.0.0.1:8080/controller' '/statistics/flow-capable-switches') def test_http_error(self): self.resp.status_code = 404 self.resp.reason = 'Not Found' try: self.client.switch_statistics.get_statistics() self.fail('') except client.OpenDaylightRESTAPIFailed as e: self.assertEqual( _('OpenDaylight API returned %(status)s %(reason)s') % {'status': self.resp.status_code, 'reason': self.resp.reason}, six.text_type(e)) def test_other_error(self): class _Exception(Exception): pass self.get = mock.patch('requests.Session.get', side_effect=_Exception).start() self.assertRaises(_Exception, lambda: self.client.switch_statistics.get_statistics()) class TestClientHTTPDigestAuth(TestClientHTTPBasicAuth): auth_way = 'digest' class TestClientHTTPSBasicAuth(TestClientHTTPBasicAuth): scheme = 'https' class TestClientHTTPSDigestAuth(TestClientHTTPDigestAuth): scheme = 'https' ././@PaxHeader0000000000000000000000000000022500000000000011454 xustar0000000000000000127 path=networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/ceilometer/network/statistics/opendaylight_v2/test_driver.py 22 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/ceilometer/network/statistics/opendaylight_0000644000175000017500000006264500000000000035643 0ustar00jamespagejamespage00000000000000# # Copyright 2017 Ericsson India Global Services Pvt Ltd. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import mock from oslotest import base import six from six.moves.urllib import parse as url_parse from ceilometer import service from networking_odl.ceilometer.network.statistics.opendaylight_v2 import driver from oslo_utils import uuidutils ADMIN_ID = uuidutils.generate_uuid() PORT_1_TENANT_ID = uuidutils.generate_uuid() PORT_2_TENANT_ID = uuidutils.generate_uuid() PORT_1_ID = uuidutils.generate_uuid() PORT_2_ID = uuidutils.generate_uuid() @six.add_metaclass(abc.ABCMeta) class _Base(base.BaseTestCase): @abc.abstractproperty def switch_data(self): pass fake_odl_url = url_parse.ParseResult('opendaylight.v2', 'localhost:8080', 'controller/statistics', None, None, None) fake_params = url_parse.parse_qs('user=admin&password=admin&scheme=http&' 'auth=basic') def setUp(self): super(_Base, self).setUp() self.addCleanup(mock.patch.stopall) conf = service.prepare_service([], []) self.driver = driver.OpenDaylightDriver(conf) ks_client = mock.Mock(auth_token='fake_token') ks_client.projects.find.return_value = mock.Mock(name='admin', id=ADMIN_ID) self.ks_client = mock.patch('ceilometer.keystone_client.get_client', return_value=ks_client).start() self.get_statistics = mock.patch( 'networking_odl.ceilometer.network.statistics.opendaylight_v2.' 'client.SwitchStatisticsAPIClient.get_statistics', return_value=self.switch_data).start() def _test_for_meter(self, meter_name, expected_data): sample_data = self.driver.get_sample_data(meter_name, self.fake_odl_url, self.fake_params, {}) self.assertEqual(expected_data, list(sample_data)) class TestOpenDayLightDriverInvalid(_Base): switch_data = {"flow_capable_switches": []} def test_not_implemented_meter(self): sample_data = self.driver.get_sample_data('egg', self.fake_odl_url, self.fake_params, {}) self.assertIsNone(sample_data) sample_data = self.driver.get_sample_data('switch.table.egg', self.fake_odl_url, self.fake_params, {}) self.assertIsNone(sample_data) def test_cache(self): cache = {} self.driver.get_sample_data('switch', self.fake_odl_url, self.fake_params, cache) self.driver.get_sample_data('switch', self.fake_odl_url, self.fake_params, cache) self.assertEqual(1, self.get_statistics.call_count) cache = {} self.driver.get_sample_data('switch', self.fake_odl_url, self.fake_params, cache) self.assertEqual(2, self.get_statistics.call_count) def test_http_error(self): mock.patch( 'networking_odl.ceilometer.network.statistics.opendaylight_v2.' 'client.SwitchStatisticsAPIClient.get_statistics', side_effect=Exception()).start() sample_data = self.driver.get_sample_data('switch', self.fake_odl_url, self.fake_params, {}) self.assertEqual(0, len(sample_data)) mock.patch( 'networking_odl.ceilometer.network.statistics.opendaylight_v2.' 'client.SwitchStatisticsAPIClient.get_statistics', side_effect=[Exception(), self.switch_data]).start() cache = {} self.driver.get_sample_data('switch', self.fake_odl_url, self.fake_params, cache) self.assertIn('network.statistics.opendaylight_v2', cache) class TestOpenDayLightDriverSimple(_Base): switch_data = { "flow_capable_switches": [{ "packet_in_messages_received": 501, "packet_out_messages_sent": 300, "ports": 1, "flow_datapath_id": 55120148545607, "switch_port_counters": [{ "bytes_received": 0, "bytes_sent": 0, "duration": 600, "packets_internal_received": 444, "packets_internal_sent": 0, "packets_received": 0, "packets_received_drop": 0, "packets_received_error": 0, "packets_sent": 0, "port_id": 4, "tenant_id": PORT_1_TENANT_ID, "uuid": PORT_1_ID }], "table_counters": [{ "flow_count": 90, "table_id": 0 }] }] } def test_meter_switch(self): expected_data = [ (1, "55120148545607", {'controller': 'OpenDaylight_V2'}, ADMIN_ID), ] self._test_for_meter('switch', expected_data) def test_meter_switch_ports(self): expected_data = [ (1, "55120148545607", {'controller': 'OpenDaylight_V2'}, ADMIN_ID) ] self._test_for_meter('switch.ports', expected_data) def test_meter_switch_port(self): expected_data = [ (1, '55120148545607:4', { 'controller': 'OpenDaylight_V2', 'port_number_on_switch': 4, 'neutron_port_id': PORT_1_ID, 'switch': '55120148545607' }, ADMIN_ID), ] self._test_for_meter('switch.port', expected_data) def test_meter_switch_port_uptime(self): expected_data = [ (600, '55120148545607:4', { 'controller': 'OpenDaylight_V2', 'port_number_on_switch': 4, 'neutron_port_id': PORT_1_ID, 'switch': '55120148545607' }, ADMIN_ID), ] self._test_for_meter('switch.port.uptime', expected_data) def test_meter_switch_port_receive_packets(self): expected_data = [ (0, '55120148545607:4', { 'controller': 'OpenDaylight_V2', 'port_number_on_switch': 4, 'neutron_port_id': PORT_1_ID, 'switch': '55120148545607' }, ADMIN_ID), ] self._test_for_meter('switch.port.receive.packets', expected_data) def test_meter_switch_port_transmit_packets(self): expected_data = [ (0, '55120148545607:4', { 'controller': 'OpenDaylight_V2', 'port_number_on_switch': 4, 'neutron_port_id': PORT_1_ID, 'switch': '55120148545607' }, ADMIN_ID), ] self._test_for_meter('switch.port.transmit.packets', expected_data) def test_meter_switch_port_receive_bytes(self): expected_data = [ (0, '55120148545607:4', { 'controller': 'OpenDaylight_V2', 'port_number_on_switch': 4, 'neutron_port_id': PORT_1_ID, 'switch': '55120148545607' }, ADMIN_ID), ] self._test_for_meter('switch.port.receive.bytes', expected_data) def test_meter_switch_port_transmit_bytes(self): expected_data = [ (0, '55120148545607:4', { 'controller': 'OpenDaylight_V2', 'port_number_on_switch': 4, 'neutron_port_id': PORT_1_ID, 'switch': '55120148545607' }, ADMIN_ID), ] self._test_for_meter('switch.port.transmit.bytes', expected_data) def test_meter_switch_port_receive_drops(self): expected_data = [ (0, '55120148545607:4', { 'controller': 'OpenDaylight_V2', 'port_number_on_switch': 4, 'neutron_port_id': PORT_1_ID, 'switch': '55120148545607' }, ADMIN_ID), ] self._test_for_meter('switch.port.receive.drops', expected_data) def test_meter_switch_port_receive_errors(self): expected_data = [ (0, '55120148545607:4', { 'controller': 'OpenDaylight_V2', 'port_number_on_switch': 4, 'neutron_port_id': PORT_1_ID, 'switch': '55120148545607' }, ADMIN_ID), ] self._test_for_meter('switch.port.receive.errors', expected_data) def test_meter_port(self): expected_data = [ (1, str(PORT_1_ID), {'controller': 'OpenDaylight_V2'}, PORT_1_TENANT_ID), ] self._test_for_meter('port', expected_data) def test_meter_port_uptime(self): expected_data = [ (600, str(PORT_1_ID), {'controller': 'OpenDaylight_V2'}, PORT_1_TENANT_ID), ] self._test_for_meter('port.uptime', expected_data) def test_meter_port_receive_packets(self): expected_data = [ (0, str(PORT_1_ID), {'controller': 'OpenDaylight_V2'}, PORT_1_TENANT_ID), ] self._test_for_meter('port.receive.packets', expected_data) def test_meter_port_transmit_packets(self): expected_data = [ (0, str(PORT_1_ID), {'controller': 'OpenDaylight_V2'}, PORT_1_TENANT_ID), ] self._test_for_meter('port.transmit.packets', expected_data) def test_meter_port_receive_bytes(self): expected_data = [ (0, str(PORT_1_ID), {'controller': 'OpenDaylight_V2'}, PORT_1_TENANT_ID), ] self._test_for_meter('port.receive.bytes', expected_data) def test_meter_port_transmit_bytes(self): expected_data = [ (0, str(PORT_1_ID), {'controller': 'OpenDaylight_V2'}, PORT_1_TENANT_ID), ] self._test_for_meter('port.transmit.bytes', expected_data) def test_meter_port_receive_drops(self): expected_data = [ (0, str(PORT_1_ID), {'controller': 'OpenDaylight_V2'}, PORT_1_TENANT_ID), ] self._test_for_meter('port.receive.drops', expected_data) def test_meter_port_receive_errors(self): expected_data = [ (0, str(PORT_1_ID), {'controller': 'OpenDaylight_V2'}, PORT_1_TENANT_ID), ] self._test_for_meter('port.receive.errors', expected_data) def test_meter_switch_table_active_entries(self): expected_data = [ (90, "55120148545607:table:0", { 'switch': '55120148545607', 'controller': 'OpenDaylight_V2' }, ADMIN_ID), ] self._test_for_meter('switch.table.active.entries', expected_data) class TestOpenDayLightDriverComplex(_Base): switch_data = { "flow_capable_switches": [{ "packet_in_messages_received": 501, "packet_out_messages_sent": 300, "ports": 3, "flow_datapath_id": 55120148545607, "switch_port_counters": [{ "bytes_received": 0, "bytes_sent": 512, "duration": 200, "packets_internal_received": 444, "packets_internal_sent": 0, "packets_received": 10, "packets_received_drop": 0, "packets_received_error": 0, "packets_sent": 0, "port_id": 3, }, { "bytes_received": 9800, "bytes_sent": 6540, "duration": 150, "packets_internal_received": 0, "packets_internal_sent": 7650, "packets_received": 20, "packets_received_drop": 0, "packets_received_error": 0, "packets_sent": 0, "port_id": 2, "tenant_id": PORT_2_TENANT_ID, "uuid": PORT_2_ID }, { "bytes_received": 100, "bytes_sent": 840, "duration": 100, "packets_internal_received": 984, "packets_internal_sent": 7950, "packets_received": 9900, "packets_received_drop": 1500, "packets_received_error": 1000, "packets_sent": 7890, "port_id": 1, "tenant_id": PORT_1_TENANT_ID, "uuid": PORT_1_ID }], "table_counters": [{ "flow_count": 90, "table_id": 10 }, { "flow_count": 80, "table_id": 20 }], }, { "packet_in_messages_received": 0, "packet_out_messages_sent": 0, "ports": 0, "flow_datapath_id": 55120148545555, "table_counters": [{ "flow_count": 5, "table_id": 10 }, { "flow_count": 3, "table_id": 20 }], }] } def test_meter_switch(self): expected_data = [ (1, "55120148545607", { 'controller': 'OpenDaylight_V2' }, ADMIN_ID), (1, "55120148545555", { 'controller': 'OpenDaylight_V2' }, ADMIN_ID), ] self._test_for_meter('switch', expected_data) def test_meter_switch_ports(self): expected_data = [ (3, "55120148545607", { 'controller': 'OpenDaylight_V2' }, ADMIN_ID), (0, "55120148545555", { 'controller': 'OpenDaylight_V2' }, ADMIN_ID), ] self._test_for_meter('switch.ports', expected_data) def test_meter_switch_port(self): expected_data = [ (1, "55120148545607:3", { 'controller': 'OpenDaylight_V2', 'port_number_on_switch': 3, 'switch': '55120148545607' }, ADMIN_ID), (1, '55120148545607:2', { 'controller': 'OpenDaylight_V2', 'port_number_on_switch': 2, 'neutron_port_id': PORT_2_ID, 'switch': '55120148545607' }, ADMIN_ID), (1, '55120148545607:1', { 'controller': 'OpenDaylight_V2', 'port_number_on_switch': 1, 'neutron_port_id': PORT_1_ID, 'switch': '55120148545607' }, ADMIN_ID), ] self._test_for_meter('switch.port', expected_data) def test_meter_switch_port_uptime(self): expected_data = [ (200, "55120148545607:3", { 'controller': 'OpenDaylight_V2', 'port_number_on_switch': 3, 'switch': '55120148545607' }, ADMIN_ID), (150, '55120148545607:2', { 'controller': 'OpenDaylight_V2', 'port_number_on_switch': 2, 'neutron_port_id': PORT_2_ID, 'switch': '55120148545607' }, ADMIN_ID), (100, '55120148545607:1', { 'controller': 'OpenDaylight_V2', 'port_number_on_switch': 1, 'neutron_port_id': PORT_1_ID, 'switch': '55120148545607' }, ADMIN_ID), ] self._test_for_meter('switch.port.uptime', expected_data) def test_meter_switch_port_receive_packets(self): expected_data = [ (10, "55120148545607:3", { 'controller': 'OpenDaylight_V2', 'port_number_on_switch': 3, 'switch': '55120148545607' }, ADMIN_ID), (20, '55120148545607:2', { 'controller': 'OpenDaylight_V2', 'port_number_on_switch': 2, 'neutron_port_id': PORT_2_ID, 'switch': '55120148545607' }, ADMIN_ID), (9900, '55120148545607:1', { 'controller': 'OpenDaylight_V2', 'port_number_on_switch': 1, 'neutron_port_id': PORT_1_ID, 'switch': '55120148545607' }, ADMIN_ID), ] self._test_for_meter('switch.port.receive.packets', expected_data) def test_meter_switch_port_transmit_packets(self): expected_data = [ (0, "55120148545607:3", { 'controller': 'OpenDaylight_V2', 'port_number_on_switch': 3, 'switch': '55120148545607' }, ADMIN_ID), (0, '55120148545607:2', { 'controller': 'OpenDaylight_V2', 'port_number_on_switch': 2, 'neutron_port_id': PORT_2_ID, 'switch': '55120148545607' }, ADMIN_ID), (7890, '55120148545607:1', { 'controller': 'OpenDaylight_V2', 'port_number_on_switch': 1, 'neutron_port_id': PORT_1_ID, 'switch': '55120148545607' }, ADMIN_ID), ] self._test_for_meter('switch.port.transmit.packets', expected_data) def test_meter_switch_port_receive_bytes(self): expected_data = [ (0, "55120148545607:3", { 'controller': 'OpenDaylight_V2', 'port_number_on_switch': 3, 'switch': '55120148545607' }, ADMIN_ID), (9800, '55120148545607:2', { 'controller': 'OpenDaylight_V2', 'port_number_on_switch': 2, 'neutron_port_id': PORT_2_ID, 'switch': '55120148545607' }, ADMIN_ID), (100, '55120148545607:1', { 'controller': 'OpenDaylight_V2', 'port_number_on_switch': 1, 'neutron_port_id': PORT_1_ID, 'switch': '55120148545607' }, ADMIN_ID), ] self._test_for_meter('switch.port.receive.bytes', expected_data) def test_meter_switch_port_transmit_bytes(self): expected_data = [ (512, "55120148545607:3", { 'controller': 'OpenDaylight_V2', 'port_number_on_switch': 3, 'switch': '55120148545607' }, ADMIN_ID), (6540, '55120148545607:2', { 'controller': 'OpenDaylight_V2', 'port_number_on_switch': 2, 'neutron_port_id': PORT_2_ID, 'switch': '55120148545607' }, ADMIN_ID), (840, '55120148545607:1', { 'controller': 'OpenDaylight_V2', 'port_number_on_switch': 1, 'neutron_port_id': PORT_1_ID, 'switch': '55120148545607' }, ADMIN_ID), ] self._test_for_meter('switch.port.transmit.bytes', expected_data) def test_meter_switch_port_receive_drops(self): expected_data = [ (0, "55120148545607:3", { 'controller': 'OpenDaylight_V2', 'port_number_on_switch': 3, 'switch': '55120148545607' }, ADMIN_ID), (0, '55120148545607:2', { 'controller': 'OpenDaylight_V2', 'port_number_on_switch': 2, 'neutron_port_id': PORT_2_ID, 'switch': '55120148545607' }, ADMIN_ID), (1500, '55120148545607:1', { 'controller': 'OpenDaylight_V2', 'port_number_on_switch': 1, 'neutron_port_id': PORT_1_ID, 'switch': '55120148545607' }, ADMIN_ID), ] self._test_for_meter('switch.port.receive.drops', expected_data) def test_meter_switch_port_receive_errors(self): expected_data = [ (0, "55120148545607:3", { 'controller': 'OpenDaylight_V2', 'port_number_on_switch': 3, 'switch': '55120148545607' }, ADMIN_ID), (0, '55120148545607:2', { 'controller': 'OpenDaylight_V2', 'port_number_on_switch': 2, 'neutron_port_id': PORT_2_ID, 'switch': '55120148545607' }, ADMIN_ID), (1000, '55120148545607:1', { 'controller': 'OpenDaylight_V2', 'port_number_on_switch': 1, 'neutron_port_id': PORT_1_ID, 'switch': '55120148545607' }, ADMIN_ID), ] self._test_for_meter('switch.port.receive.errors', expected_data) def test_meter_port(self): expected_data = [ (1, str(PORT_2_ID), {'controller': 'OpenDaylight_V2'}, PORT_2_TENANT_ID), (1, str(PORT_1_ID), {'controller': 'OpenDaylight_V2'}, PORT_1_TENANT_ID), ] self._test_for_meter('port', expected_data) def test_meter_port_uptime(self): expected_data = [ (150, str(PORT_2_ID), {'controller': 'OpenDaylight_V2'}, PORT_2_TENANT_ID), (100, str(PORT_1_ID), {'controller': 'OpenDaylight_V2'}, PORT_1_TENANT_ID), ] self._test_for_meter('port.uptime', expected_data) def test_meter_port_receive_packets(self): expected_data = [ (20, str(PORT_2_ID), {'controller': 'OpenDaylight_V2'}, PORT_2_TENANT_ID), (9900, str(PORT_1_ID), {'controller': 'OpenDaylight_V2'}, PORT_1_TENANT_ID), ] self._test_for_meter('port.receive.packets', expected_data) def test_meter_port_transmit_packets(self): expected_data = [ (0, str(PORT_2_ID), {'controller': 'OpenDaylight_V2'}, PORT_2_TENANT_ID), (7890, str(PORT_1_ID), {'controller': 'OpenDaylight_V2'}, PORT_1_TENANT_ID), ] self._test_for_meter('port.transmit.packets', expected_data) def test_meter_port_receive_bytes(self): expected_data = [ (9800, str(PORT_2_ID), {'controller': 'OpenDaylight_V2'}, PORT_2_TENANT_ID), (100, str(PORT_1_ID), {'controller': 'OpenDaylight_V2'}, PORT_1_TENANT_ID), ] self._test_for_meter('port.receive.bytes', expected_data) def test_meter_port_transmit_bytes(self): expected_data = [ (6540, str(PORT_2_ID), {'controller': 'OpenDaylight_V2'}, PORT_2_TENANT_ID), (840, str(PORT_1_ID), {'controller': 'OpenDaylight_V2'}, PORT_1_TENANT_ID), ] self._test_for_meter('port.transmit.bytes', expected_data) def test_meter_port_receive_drops(self): expected_data = [ (0, str(PORT_2_ID), {'controller': 'OpenDaylight_V2'}, PORT_2_TENANT_ID), (1500, str(PORT_1_ID), {'controller': 'OpenDaylight_V2'}, PORT_1_TENANT_ID), ] self._test_for_meter('port.receive.drops', expected_data) def test_meter_port_receive_errors(self): expected_data = [ (0, str(PORT_2_ID), {'controller': 'OpenDaylight_V2'}, PORT_2_TENANT_ID), (1000, str(PORT_1_ID), {'controller': 'OpenDaylight_V2'}, PORT_1_TENANT_ID), ] self._test_for_meter('port.receive.errors', expected_data) def test_meter_switch_table_active_entries(self): expected_data = [ (90, "55120148545607:table:10", { 'switch': '55120148545607', 'controller': 'OpenDaylight_V2' }, ADMIN_ID), (80, "55120148545607:table:20", { 'switch': '55120148545607', 'controller': 'OpenDaylight_V2' }, ADMIN_ID), (5, "55120148545555:table:10", { 'switch': '55120148545555', 'controller': 'OpenDaylight_V2' }, ADMIN_ID), (3, "55120148545555:table:20", { 'switch': '55120148545555', 'controller': 'OpenDaylight_V2' }, ADMIN_ID), ] self._test_for_meter('switch.table.active.entries', expected_data) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.814714 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/cmd/0000755000175000017500000000000000000000000025605 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/cmd/__init__.py0000644000175000017500000000000000000000000027704 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/cmd/test_analyze_journal.py0000644000175000017500000002215100000000000032414 0ustar00jamespagejamespage00000000000000# Copyright (c) 2017 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import random import string import fixtures import mock from six import StringIO from networking_odl.cmd import analyze_journal from networking_odl.journal import journal from networking_odl.tests import base def _random_string(): letters = string.ascii_lowercase return ''.join(random.choice(letters) for _ in range(random.randint(1, 10))) def _generate_log_entry(log_type=None, entry_id=None): entry = mock.Mock() entry.seqnum = entry_id if entry_id else _random_string() entry.operation = _random_string() entry.object_type = _random_string() entry.object_uuid = _random_string() logger = fixtures.FakeLogger() with logger: journal._log_entry(log_type if log_type else _random_string(), entry) return entry, "noise %s noise" % logger.output class TestAnalyzeJournal(base.DietTestCase): def setUp(self): super(TestAnalyzeJournal, self).setUp() self.output = StringIO() def _assert_nothing_printed(self): self.assertEqual('', self.output.getvalue()) def _assert_something_printed(self, expected=None): self.assertNotEqual('', self.output.getvalue()) if expected: self.assertIn(str(expected), self.output.getvalue()) def test_setup_conf_no_args(self): conf = analyze_journal.setup_conf(self.output, []) self.assertIsNotNone(conf) self._assert_nothing_printed() def test_setup_conf_h_flag(self): self.assertRaises( SystemExit, analyze_journal.setup_conf, self.output, ['-h']) self._assert_something_printed() def test_setup_conf_help_flag(self): self.assertRaises( SystemExit, analyze_journal.setup_conf, self.output, ['--help']) self._assert_something_printed() def test_setup_conf_file(self): file_name = _random_string() conf = analyze_journal.setup_conf(self.output, ['--file', file_name]) self.assertEqual(file_name, conf.file) def test_setup_conf_slowest(self): slowest = random.randint(1, 10000) conf = analyze_journal.setup_conf( self.output, ['--slowest', str(slowest)]) self.assertEqual(slowest, conf.slowest) def test_setup_conf_slowest_zero(self): self.assertRaises(SystemExit, analyze_journal.setup_conf, self.output, ['--slowest', '0']) self._assert_nothing_printed() def test_parse_log_no_matched_content(self): self.assertEqual({}, analyze_journal.parse_log([])) self.assertEqual({}, analyze_journal.parse_log(['dummy'])) def _test_parse_log_entry(self, recorded=False, completed=False): content = [] entry_id = _random_string() entry = None if recorded: entry, log = _generate_log_entry(log_type=journal.LOG_RECORDED, entry_id=entry_id) content.append(log) if completed: centry, log = _generate_log_entry(log_type=journal.LOG_COMPLETED, entry_id=entry_id) entry = centry if entry is None else entry content.append(log) entries = analyze_journal.parse_log(content) actual_entry = entries[entry_id] self.assertEqual(entry.operation, actual_entry['op']) self.assertEqual(entry.object_type, actual_entry['obj_type']) self.assertEqual(entry.object_uuid, actual_entry['obj_id']) if recorded: self.assertGreater(actual_entry[journal.LOG_RECORDED], 0) if completed: self.assertGreater(actual_entry[journal.LOG_COMPLETED], 0) def test_parse_log_entry_recorded(self): self._test_parse_log_entry(recorded=True) def test_parse_log_entry_completed(self): self._test_parse_log_entry(completed=True) def test_parse_log_entry_recorded_and_completed(self): self._test_parse_log_entry(recorded=True, completed=True) def test_analyze_entries_no_records(self): self.assertEqual([], analyze_journal.analyze_entries({})) def _generate_random_entry(self): return dict([(k, _random_string()) for k in analyze_journal.LOG_KEYS]) def _entry_for_analyze_entries(self, recorded=False, completed=False): entry = self._generate_random_entry() if recorded: entry[journal.LOG_RECORDED] = random.uniform(1, 10) if completed: entry[journal.LOG_COMPLETED] = random.uniform(10, 20) return entry def test_analyze_entries_no_completed_time(self): entry = self._entry_for_analyze_entries(recorded=True) entries = {entry['entry_id']: entry} self.assertEqual([], analyze_journal.analyze_entries(entries)) def test_analyze_entries_no_recorded_time(self): entry = self._entry_for_analyze_entries(completed=True) entries = {entry['entry_id']: entry} self.assertEqual([], analyze_journal.analyze_entries(entries)) def test_analyze_entries(self): entry = self._entry_for_analyze_entries(recorded=True, completed=True) entry_only_recorded = self._entry_for_analyze_entries(recorded=True) entry_only_completed = self._entry_for_analyze_entries(completed=True) entries = {e['entry_id']: e for e in (entry, entry_only_recorded, entry_only_completed)} entries_stats = analyze_journal.analyze_entries(entries) expected_time = (entry[journal.LOG_COMPLETED] - entry[journal.LOG_RECORDED]) expected_entry = analyze_journal.EntryStats( entry_id=entry['entry_id'], time=expected_time, op=entry['op'], obj_type=entry['obj_type'], obj_id=entry['obj_id']) self.assertIn(expected_entry, entries_stats) def _assert_percentile_printed(self, entries_stats, percentile): expected_percentile_format = "%sth percentile: %s" percentile_index = int(len(entries_stats) * (percentile / 100.0)) entry = entries_stats[percentile_index] self._assert_something_printed(expected_percentile_format % (percentile, entry.time)) def test_print_stats(self): entries_stats = [] entries_count = 10 slowest = random.randint(1, int(entries_count / 2)) for i in range(entries_count): entry = self._generate_random_entry() entries_stats.append( analyze_journal.EntryStats( entry_id=entry['entry_id'], time=i, op=entry['op'], obj_type=entry['obj_type'], obj_id=entry['obj_id'])) analyze_journal.print_stats(self.output, slowest, entries_stats) total_time = (entries_count * (entries_count - 1)) / 2 avg_time = total_time / entries_count self._assert_something_printed(avg_time) self._assert_something_printed(slowest) self._assert_percentile_printed(entries_stats, 90) self._assert_percentile_printed(entries_stats, 99) self._assert_percentile_printed(entries_stats, 99.9) expected = '' for i in reversed(range(entries_count - slowest, entries_count)): entry = entries_stats[i] expected += '\n' expected += (analyze_journal.ENTRY_LOG_TEMPLATE % (entry.entry_id, entry.time, entry.op, entry.obj_type, entry.obj_id)) self._assert_something_printed(expected) @contextlib.contextmanager def _setup_mocks_for_main(self, content): with mock.patch.object(analyze_journal, 'get_content') as mgc, \ mock.patch.object(analyze_journal, 'setup_conf') as msc: m = mock.MagicMock() m.__iter__.return_value = content mgc().__enter__.return_value = m conf = msc() conf.slowest = 10 yield def test_main(self): entry_id = _random_string() _, entry_recorded = _generate_log_entry(journal.LOG_RECORDED, entry_id) _, entry_completed = _generate_log_entry(journal.LOG_COMPLETED, entry_id) with self._setup_mocks_for_main((entry_recorded, entry_completed)): rc = analyze_journal.main(self.output) self.assertEqual(0, rc) self._assert_something_printed(entry_id) def test_main_no_entry_stats(self): with self._setup_mocks_for_main(('dummy',)): rc = analyze_journal.main(self.output) self.assertNotEqual(0, rc) self._assert_something_printed() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/cmd/test_set_ovs_hostconfigs.py0000644000175000017500000002502500000000000033312 0ustar00jamespagejamespage00000000000000# Copyright (c) 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # pylint: disable=unused-argument, protected-access from contextlib import contextmanager import os import sys import tempfile import mock from oslo_serialization import jsonutils import six from networking_odl.cmd import set_ovs_hostconfigs from networking_odl.tests import base from networking_odl.tests import match LOGGING_ENABLED = "Logging Enabled!" LOGGING_PERMISSION_REQUIRED = "permissions are required to configure ovsdb" @contextmanager def capture(command, args): out, sys.stdout = sys.stdout, six.StringIO() try: command(args) sys.stdout.seek(0) yield sys.stdout.read() finally: sys.stdout = out class TestSetOvsHostconfigs(base.DietTestCase): maxDiff = None def test_given_ovs_hostconfigs(self): # given self.patch_os_geteuid() ovs_hostconfigs = { "ODL L2": {"allowed_network_types": ["a", "b", "c"]}} args = ['--ovs_hostconfigs=' + jsonutils.dumps(ovs_hostconfigs), '--bridge_mappings=a:1,b:2'] execute = self.patch_utils_execute() conf = set_ovs_hostconfigs.setup_conf(args) # when result = set_ovs_hostconfigs.main(args) # then self.assertEqual(0, result) execute.assert_has_calls([ mock.call( ('ovs-vsctl', 'get', 'Open_vSwitch', '.', '_uuid')), mock.call( ('ovs-vsctl', 'set', 'Open_vSwitch', '', 'external_ids:odl_os_hostconfig_hostid=' + conf.host)), mock.call( ('ovs-vsctl', 'set', 'Open_vSwitch', '', match.wildcard( 'external_ids:odl_os_hostconfig_config_odl_l2=*'))), ]) expected = ovs_hostconfigs['ODL L2'] _, actual_json = execute.call_args_list[2][0][0][4].split("=", 1) self.assertEqual(match.json(expected), actual_json) def test_given_no_args(self): self._test_given_args(tuple()) def test_given_default_values(self): self._test_given_args([]) def test_given_datapath_type_system(self): self._test_given_args(['--datapath_type=netdev']) def test_given_datapath_type_netdev(self): self._test_given_args(['--datapath_type=netdev']) def test_given_datapath_type_vhostuser(self): self._test_given_args(['--datapath_type=dpdkvhostuser']) def test_given_ovs_dpdk(self): self._test_given_args(['--ovs_dpdk']) def test_given_noovs_dpdk(self): self._test_given_args(['--noovs_dpdk']) def test_given_ovs_sriov_offload(self): self._test_given_args(['--noovs_dpdk', '--ovs_sriov_offload']) def test_given_vhostuser_ovs_plug(self): self._test_given_args(['--vhostuser_ovs_plug']) def test_given_novhostuser_ovs_plug(self): self._test_given_args(['--novhostuser_ovs_plug']) def test_given_allowed_network_types(self): self._test_given_args(['--allowed_network_types=a,b,c']) def test_given_local_ip(self): self._test_given_args(['--local_ip=192.168.1.10', '--host=']) def test_given_vhostuser_mode_server(self): self._test_given_args( ['--vhostuser_mode=server', '--datapath_type=netdev']) def test_given_vhostuser_mode_client(self): self._test_given_args( ['--vhostuser_mode=client', '--datapath_type=netdev']) def test_given_vhostuser_port_prefix_vhu(self): self._test_given_args( ['--vhostuser_port_prefix=vhu', '--datapath_type=netdev']) def test_given_vhostuser_port_prefix_socket(self): self._test_given_args( ['--vhostuser_port_prefix=socket', '--datapath_type=netdev']) def test_given_config_file(self): file_descriptor, file_path = tempfile.mkstemp() try: os.write(file_descriptor, six.b("# dummy neutron config file\n")) os.close(file_descriptor) self._test_given_args(['--config-file={}'.format(file_path)]) finally: os.remove(file_path) def _test_given_args(self, *args): # given self.patch_os_geteuid() execute = self.patch_utils_execute() conf = set_ovs_hostconfigs.setup_conf(*args) datapath_type = conf.datapath_type if datapath_type is None: if conf.ovs_dpdk is False: datapath_type = "system" else: datapath_type = "netdev" # when result = set_ovs_hostconfigs.main(*args) # then self.assertEqual(0, result) execute.assert_has_calls([ mock.call( ('ovs-vsctl', 'get', 'Open_vSwitch', '.', '_uuid')), mock.call( ('ovs-vsctl', 'get', 'Open_vSwitch', '.', 'datapath_types')), mock.call( ('ovs-vsctl', 'set', 'Open_vSwitch', '', 'external_ids:odl_os_hostconfig_hostid=' + conf.host)), mock.call( ('ovs-vsctl', 'set', 'Open_vSwitch', '', match.wildcard( 'external_ids:odl_os_hostconfig_config_odl_l2=*'))), ]) host_addresses = [conf.host or conf.local_ip] if datapath_type == "system": vif_type = "ovs" vif_details = { "uuid": '', "host_addresses": host_addresses, "has_datapath_type_netdev": False, "support_vhost_user": False } else: # datapath_type in ["system", "netdev"] vif_type = "vhostuser" vif_details = { "uuid": '', "host_addresses": host_addresses, "has_datapath_type_netdev": True, "support_vhost_user": True, "port_prefix": conf.vhostuser_port_prefix, "vhostuser_mode": conf.vhostuser_mode, "vhostuser_ovs_plug": conf.vhostuser_ovs_plug, "vhostuser_socket_dir": conf.vhostuser_socket_dir, "vhostuser_socket": os.path.join( conf.vhostuser_socket_dir, conf.vhostuser_port_prefix + "$PORT_ID"), } _, actual_json = execute.call_args_list[3][0][0][4].split("=", 1) expected = { "allowed_network_types": conf.allowed_network_types, "bridge_mappings": conf.bridge_mappings, "datapath_type": datapath_type, "supported_vnic_types": [ { "vif_type": vif_type, "vnic_type": "normal", "vif_details": vif_details } ] } if vif_type == 'ovs' and conf.ovs_sriov_offload: direct_vnic = { "vif_details": vif_details, "vif_type": vif_type, "vnic_type": "direct", } expected["supported_vnic_types"].append(direct_vnic) self.assertEqual(match.json(expected), actual_json) def test_given_ovs_dpdk_undetected(self): # given LOG = self.patch(set_ovs_hostconfigs, 'LOG') args = ('--ovs_dpdk', '--bridge_mappings=a:1,b:2', '--debug') conf = set_ovs_hostconfigs.setup_conf(args) self.patch_os_geteuid() execute = self.patch_utils_execute(datapath_types="whatever") # when result = set_ovs_hostconfigs.main(args) # then self.assertEqual(1, result) execute.assert_has_calls([ mock.call( ('ovs-vsctl', 'get', 'Open_vSwitch', '.', '_uuid')), mock.call( ('ovs-vsctl', 'get', 'Open_vSwitch', '.', 'datapath_types')), ]) LOG.error.assert_called_once_with( "Fatal error: %s", match.wildcard( "--ovs_dpdk option was specified but the 'netdev' " "datapath_type was not enabled. To override use option " "--datapath_type=netdev"), exc_info=conf.debug) def test_bridge_mappings(self): # when conf = set_ovs_hostconfigs.setup_conf(('--bridge_mappings=a:1,b:2',)) self.assertEqual({'a': '1', 'b': '2'}, conf.bridge_mappings) def test_allowed_network_types(self): # when conf = set_ovs_hostconfigs.setup_conf(('--allowed_network_types=a,b',)) self.assertEqual(['a', 'b'], conf.allowed_network_types) def patch_utils_execute( self, uuid='', datapath_types='netdev,dpdkvhostuser,system'): def execute(args): command, method, table, record, value = args self.assertEqual('ovs-vsctl', command) self.assertEqual('Open_vSwitch', table) self.assertIn(method, ['get', 'set']) if method == 'set': self.assertEqual(uuid, record) return "" elif method == 'get': self.assertEqual('.', record) self.assertIn(value, ['_uuid', 'datapath_types']) if value == '_uuid': return uuid elif value == 'datapath_types': return datapath_types self.fail('Unexpected command: ' + repr(args)) return self.patch( set_ovs_hostconfigs.subprocess, "check_output", side_effect=execute) def patch_os_geteuid(self, return_value=0): return self.patch( set_ovs_hostconfigs.os, "geteuid", return_value=return_value) @contextmanager def test_log_on_console_msg(self): with capture(set_ovs_hostconfigs.main, args=()) as output: self.assertNotEqual(-1, output.find(LOGGING_PERMISSION_REQUIRED)) def test_log_in_file(self): with tempfile.TemporaryFile() as fp: set_ovs_hostconfigs.main(("--log-file=%s" % fp.name,)) logs = [LOGGING_ENABLED, LOGGING_PERMISSION_REQUIRED] for line, count in fp.readline(): self.assertNotEqual(-1, line.find(logs[count])) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.818714 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/common/0000755000175000017500000000000000000000000026332 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/common/__init__.py0000644000175000017500000000000000000000000030431 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/common/test_callback.py0000644000175000017500000001727200000000000031510 0ustar00jamespagejamespage00000000000000# Copyright (c) 2013-2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from networking_odl.common import callback from networking_odl.common import constants as odl_const from networking_odl.tests import base import mock from neutron_lib.callbacks import events from neutron_lib.callbacks import resources import testtools FAKE_ID = 'fakeid' class ODLCallbackTestCase(testtools.TestCase): def setUp(self): self.useFixture(base.OpenDaylightRestClientFixture()) super(ODLCallbackTestCase, self).setUp() self._precommit = mock.Mock() self._postcommit = mock.Mock() self.sgh = callback.OdlSecurityGroupsHandler(self._precommit, self._postcommit) def _test_callback_precommit_for_sg(self, event, op, sg, sg_id): plugin_context_mock = mock.Mock() expected_dict = ({resources.SECURITY_GROUP: sg} if sg is not None else None) self.sgh.sg_callback_precommit(resources.SECURITY_GROUP, event, None, context=plugin_context_mock, security_group=sg, security_group_id=sg_id) self._precommit.assert_called_with( plugin_context_mock, op, callback._RESOURCE_MAPPING[resources.SECURITY_GROUP], sg_id, expected_dict, security_group=sg, security_group_id=sg_id) def _test_callback_postcommit_for_sg(self, event, op, sg, sg_id): plugin_context_mock = mock.Mock() expected_dict = ({resources.SECURITY_GROUP: sg} if sg is not None else None) self.sgh.sg_callback_postcommit(resources.SECURITY_GROUP, event, None, context=plugin_context_mock, security_group=sg, security_group_id=sg_id) self._postcommit.assert_called_with( plugin_context_mock, op, callback._RESOURCE_MAPPING[resources.SECURITY_GROUP], sg_id, expected_dict, security_group=sg, security_group_id=sg_id) def test_callback_precommit_sg_create(self): sg = mock.Mock() sg_id = sg.get('id') self._test_callback_precommit_for_sg( events.PRECOMMIT_CREATE, odl_const.ODL_CREATE, sg, sg_id) def test_callback_postcommit_sg_create(self): sg = mock.Mock() sg_id = sg.get('id') self._test_callback_postcommit_for_sg( events.AFTER_CREATE, odl_const.ODL_CREATE, sg, sg_id) def test_callback_precommit_sg_update(self): self._test_callback_precommit_for_sg( events.PRECOMMIT_UPDATE, odl_const.ODL_UPDATE, mock.Mock(), FAKE_ID) def test_callback_postcommit_sg_update(self): self._test_callback_postcommit_for_sg( events.AFTER_UPDATE, odl_const.ODL_UPDATE, mock.Mock(), FAKE_ID) def test_callback_precommit_sg_delete(self): self._test_callback_precommit_for_sg( events.PRECOMMIT_DELETE, odl_const.ODL_DELETE, None, FAKE_ID) def test_callback_postcommit_sg_delete(self): self._test_callback_postcommit_for_sg( events.AFTER_DELETE, odl_const.ODL_DELETE, None, FAKE_ID) def _test_callback_precommit_for_sg_rules( self, event, op, sg_rule, sg_rule_id): plugin_context_mock = mock.Mock() expected_dict = ({resources.SECURITY_GROUP_RULE: sg_rule} if sg_rule is not None else None) self.sgh.sg_callback_precommit(resources.SECURITY_GROUP_RULE, event, None, context=plugin_context_mock, security_group_rule=sg_rule, security_group_rule_id=sg_rule_id) self._precommit.assert_called_with( plugin_context_mock, op, callback._RESOURCE_MAPPING[resources.SECURITY_GROUP_RULE], sg_rule_id, expected_dict, security_group_rule=sg_rule, security_group_rule_id=sg_rule_id) def _test_callback_postcommit_for_sg_rules( self, event, op, sg_rule, sg_rule_id): plugin_context_mock = mock.Mock() expected_dict = ({resources.SECURITY_GROUP_RULE: sg_rule} if sg_rule is not None else None) self.sgh.sg_callback_postcommit(resources.SECURITY_GROUP_RULE, event, None, context=plugin_context_mock, security_group_rule=sg_rule, security_group_rule_id=sg_rule_id) self._postcommit.assert_called_with( plugin_context_mock, op, callback._RESOURCE_MAPPING[resources.SECURITY_GROUP_RULE], sg_rule_id, expected_dict, security_group_rule=sg_rule, security_group_rule_id=sg_rule_id, ) def test_callback_precommit_sg_rules_create(self): rule = mock.Mock() rule_id = rule.get('id') self._test_callback_precommit_for_sg_rules( events.PRECOMMIT_CREATE, odl_const.ODL_CREATE, rule, rule_id) def test_callback_postcommit_sg_rules_create(self): rule = mock.Mock() rule_id = rule.get('id') self._test_callback_postcommit_for_sg_rules( events.AFTER_CREATE, odl_const.ODL_CREATE, rule, rule_id) def test_callback_precommit_sg_rules_delete(self): self._test_callback_precommit_for_sg_rules( events.PRECOMMIT_DELETE, odl_const.ODL_DELETE, None, FAKE_ID) def test_callback_postcommit_sg_rules_delete(self): self._test_callback_postcommit_for_sg_rules( events.AFTER_DELETE, odl_const.ODL_DELETE, None, FAKE_ID) def test_callback_exception(self): class TestException(Exception): def __init__(self): pass self._precommit.side_effect = TestException() resource = callback._RESOURCE_MAPPING[resources.SECURITY_GROUP_RULE] op = callback._OPERATION_MAPPING[events.PRECOMMIT_CREATE] rule = mock.Mock() rule_id = rule.get('id') with mock.patch.object(callback, 'LOG') as log_mock: self.assertRaises(TestException, self._test_callback_precommit_for_sg_rules, events.PRECOMMIT_CREATE, odl_const.ODL_CREATE, rule, rule_id) log_mock.log.assert_called_with( logging.ERROR, callback.LOG_TEMPLATE, {'msg': 'Exception from callback', 'op': op, 'res_type': resource, 'res_id': rule_id, 'res_dict': {odl_const.ODL_SG_RULE: rule}, 'data': {odl_const.ODL_SG_RULE: rule, 'security_group_rule_id': rule_id}, 'exc_info': True}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/common/test_client.py0000644000175000017500000000416600000000000031230 0ustar00jamespagejamespage00000000000000# Copyright (c) 2015 Intel Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_config import fixture as config_fixture from networking_odl.common import client from neutron.tests import base class ClientTestCase(base.DietTestCase): def setUp(self): self.cfg = self.useFixture(config_fixture.Config()) self.cfg.config(mechanism_drivers=[ 'logger', 'opendaylight_v2'], group='ml2') super(ClientTestCase, self).setUp() def _set_config(self, url='http://127.0.0.1:9999', username='someuser', password='somepass'): self.cfg.config(url=url, group='ml2_odl') self.cfg.config(username=username, group='ml2_odl') self.cfg.config(password=password, group='ml2_odl') def _test_missing_config(self, **kwargs): self._set_config(**kwargs) self.assertRaisesRegex(cfg.RequiredOptError, r'value required for option \w+ in group ' r'\[ml2_odl\]', client.OpenDaylightRestClient._check_opt, cfg.CONF.ml2_odl.url) def test_valid_config(self): self._set_config() client.OpenDaylightRestClient._check_opt(cfg.CONF.ml2_odl.url) def test_missing_url_raises_exception(self): self._test_missing_config(url=None) def test_missing_username_raises_exception(self): self._test_missing_config(username=None) def test_missing_password_raises_exception(self): self._test_missing_config(password=None) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/common/test_filters.py0000644000175000017500000001374600000000000031426 0ustar00jamespagejamespage00000000000000# Copyright (C) 2016 Intel Corp. Isaku Yamahata # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock from neutron.tests import base from neutron_lib import constants as n_const from networking_odl.common import filters PROFILE = {"capabilities": ["switchdev"]} PROFILE_STR = '{"capabilities": ["switchdev"]}' FAKE_PORT = {'status': 'DOWN', 'binding:host_id': '', 'allowed_address_pairs': [], 'device_owner': 'fake_owner', 'binding:profile': {"capabilities": ["switchdev"]}, 'fixed_ips': [], 'id': '72c56c48-e9b8-4dcf-b3a7-0813bb3bd839', 'security_groups': [], 'device_id': 'fake_device', 'name': '', 'admin_state_up': True, 'network_id': 'c13bba05-eb07-45ba-ace2-765706b2d701', 'tenant_id': 'bad_tenant_id', 'binding:vif_details': {}, 'binding:vnic_type': 'normal', 'binding:vif_type': 'unbound', 'mac_address': '12:34:56:78:21:b6'} class TestFilters(base.DietTestCase): def _check_id(self, resource, project_id): filters._populate_project_id_and_tenant_id(resource) self.assertIn(resource['project_id'], project_id) self.assertIn(resource['tenant_id'], project_id) def _test_populate_project_id_and_tenant_id(self, project_id): self._check_id({'project_id': project_id}, project_id) self._check_id({'tenant_id': project_id}, project_id) self._check_id({'project_id': project_id, 'tenant_id': project_id}, project_id) def test_populate_project_id_and_tenant_id_with_id(self): self._test_populate_project_id_and_tenant_id( '01234567-890a-bcde-f012-3456789abcde') self._test_populate_project_id_and_tenant_id("") def test_populate_project_id_and_tenant_id_without_id(self): resource = {} filters._populate_project_id_and_tenant_id(resource) self.assertNotIn('project_id', resource) self.assertNotIn('tenant_id', resource) def test_populate_project_id_and_tenant_id_with_router(self): # test case for OpenDaylightL3RouterPlugin.delete_router() # it passes data as dependency_list as list, not dict resource0 = ['gw_port_id'] resource1 = resource0[:] filters._populate_project_id_and_tenant_id(resource1) self.assertEqual(resource0, resource1) def test_populate_project_id_and_tenant_id_with_floatingip(self): # test case for OpenDaylightL3RouterPlugin.delete_floatingip() # it passes data as dependency_list as list, not dict. resource0 = ['router_uuid', 'floatingip_uuid'] resource1 = resource0[:] filters._populate_project_id_and_tenant_id(resource1) self.assertEqual(resource0, resource1) def test_sgrule_scrub_unknown_protocol_name(self): KNOWN_PROTO_NAMES = (n_const.PROTO_NAME_TCP, n_const.PROTO_NAME_UDP, n_const.PROTO_NAME_ICMP, n_const.PROTO_NAME_IPV6_ICMP_LEGACY) for protocol_name in KNOWN_PROTO_NAMES: self.assertEqual( protocol_name, filters._sgrule_scrub_unknown_protocol_name(protocol_name)) self.assertEqual( n_const.PROTO_NUM_AH, filters._sgrule_scrub_unknown_protocol_name(n_const.PROTO_NAME_AH)) self.assertEqual("1", filters._sgrule_scrub_unknown_protocol_name("1")) def test_sgrule_scrub_icmpv6_name(self): for protocol_name in (n_const.PROTO_NAME_ICMP, n_const.PROTO_NAME_IPV6_ICMP, n_const.PROTO_NAME_IPV6_ICMP_LEGACY): sgrule = {'ethertype': n_const.IPv6, 'protocol': protocol_name} filters._sgrule_scrub_icmpv6_name(sgrule) self.assertEqual(n_const.PROTO_NAME_IPV6_ICMP_LEGACY, sgrule['protocol']) def test_convert_value_to_string(self): port = {"binding:profile": PROFILE, "other_param": ["some", "values"]} filters._convert_value_to_str(port, 'binding:profile') self.assertIs(type(port['binding:profile']), str) self.assertEqual(port['binding:profile'], PROFILE_STR) self.assertIsNot(type(port['other_param']), str) def test_convert_value_to_string_unicode(self): port = {"binding:profile": {u"capabilities": [u"switchdev"]}} filters._convert_value_to_str(port, "binding:profile") self.assertEqual(port["binding:profile"], PROFILE_STR) def test_convert_value_to_string_missing_key_is_logged(self): port = {} with mock.patch.object(filters, 'LOG') as mock_log: filters._convert_value_to_str(port, 'invalid_key') mock_log.warning.assert_called_once_with( "key %s is not present in dict %s", 'invalid_key', port) def _filter_port_func_binding_profile_to_string(self, func): port = copy.deepcopy(FAKE_PORT) func(port) self.assertEqual(port["binding:profile"], PROFILE_STR) def test_filter_port_create_binding_profile_string(self): self._filter_port_func_binding_profile_to_string( filters._filter_port_create) def test_filter_port_update_binding_profile_string(self): self._filter_port_func_binding_profile_to_string( filters._filter_port_update) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/common/test_lightweight_testing.py0000644000175000017500000002047000000000000034022 0ustar00jamespagejamespage00000000000000# Copyright (c) 2015 Intel Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.tests import base from oslo_config import fixture as config_fixture from networking_odl.common import lightweight_testing as lwt from networking_odl.tests import base as odl_base class LightweightTestingTestCase(base.DietTestCase): def setUp(self): self.useFixture(odl_base.OpenDaylightRestClientFixture()) self.cfg = self.useFixture(config_fixture.Config()) super(LightweightTestingTestCase, self).setUp() def test_create_client_with_lwt_enabled(self): """Have to do the importation here, otherwise there will be a loop""" from networking_odl.common import client as odl_client self.cfg.config(enable_lightweight_testing=True, group='ml2_odl') # DietTestCase does not automatically cleans configuration overrides self.addCleanup(odl_client.cfg.CONF.reset) client = odl_client.OpenDaylightRestClient.create_client() self.assertIsInstance(client, lwt.OpenDaylightLwtClient) def test_create_client_with_lwt_disabled(self): """Have to do the importation here, otherwise there will be a loop""" from networking_odl.common import client as odl_client self.cfg.config(enable_lightweight_testing=False, group='ml2_odl') # DietTestCase does not automatically cleans configuration overrides self.addCleanup(odl_client.cfg.CONF.reset) client = odl_client.OpenDaylightRestClient.create_client() self.assertIsInstance(client, odl_client.OpenDaylightRestClient) @mock.patch.dict(lwt.OpenDaylightLwtClient.lwt_dict, {'networks': {}}, clear=True) def test_post_single_resource(self): client = lwt.OpenDaylightLwtClient.create_client() fake_network1 = {'id': 'fakeid1', 'name': 'fake_network1'} obj = {'networks': fake_network1} response = client.sendjson('post', 'networks', obj) self.assertEqual(lwt.NO_CONTENT, response.status_code) lwt_dict = lwt.OpenDaylightLwtClient.lwt_dict self.assertEqual(lwt_dict['networks']['fakeid1'], fake_network1) @mock.patch.dict(lwt.OpenDaylightLwtClient.lwt_dict, {'networks': {}}, clear=True) def test_post_multiple_resources(self): client = lwt.OpenDaylightLwtClient.create_client() fake_network1 = {'id': 'fakeid1', 'name': 'fake_network1'} fake_network2 = {'id': 'fakeid2', 'name': 'fake_network2'} obj = {'networks': [fake_network1, fake_network2]} response = client.sendjson('post', 'networks', obj) self.assertEqual(lwt.NO_CONTENT, response.status_code) lwt_dict = lwt.OpenDaylightLwtClient.lwt_dict self.assertEqual(lwt_dict['networks']['fakeid1'], fake_network1) self.assertEqual(lwt_dict['networks']['fakeid2'], fake_network2) @mock.patch.dict(lwt.OpenDaylightLwtClient.lwt_dict, {'ports': {'fakeid1': {'id': 'fakeid1', 'name': 'fake_port1'}}}, clear=True) def test_get_single_resource(self): client = lwt.OpenDaylightLwtClient.create_client() url_path = 'ports/fakeid1' response = client.sendjson('get', url_path, None) self.assertEqual(lwt.OK, response.status_code) res = response.json() # For single resource, the return value is a dict self.assertEqual(res['port']['name'], 'fake_port1') @mock.patch.dict(lwt.OpenDaylightLwtClient.lwt_dict, {'ports': {'fakeid1': {'id': 'fakeid1', 'name': 'fake_port1'}, 'fakeid2': {'id': 'fakeid2', 'name': 'fake_port2'}}}, clear=True) def test_get_multiple_resources(self): client = lwt.OpenDaylightLwtClient.create_client() url_path = 'ports/' response = client.sendjson('get', url_path, None) self.assertEqual(lwt.OK, response.status_code) res = response.json() for port in res: self.assertIn(port['port']['name'], ['fake_port1', 'fake_port2']) @mock.patch.dict(lwt.OpenDaylightLwtClient.lwt_dict, {'subnets': {'fakeid1': {'id': 'fakeid1', 'name': 'fake_subnet1'}}}, clear=True) def test_put_single_resource(self): client = lwt.OpenDaylightLwtClient.create_client() changed = {'id': 'fakeid1', 'name': 'fake_subnet1_changed'} obj = {'subnets': changed} url_path = 'subnets/fakeid1' response = client.sendjson('put', url_path, obj) self.assertEqual(lwt.NO_CONTENT, response.status_code) lwt_dict = lwt.OpenDaylightLwtClient.lwt_dict self.assertEqual('fake_subnet1_changed', lwt_dict['subnets']['fakeid1']['name']) """Check the client does not change the parameter""" self.assertEqual('fakeid1', changed['id']) self.assertEqual('fake_subnet1_changed', changed['name']) @mock.patch.dict(lwt.OpenDaylightLwtClient.lwt_dict, {'subnets': {'fakeid1': {'id': 'fakeid1', 'name': 'fake_subnet1'}, 'fakeid2': {'id': 'fakeid2', 'name': 'fake_subnet2'}}}, clear=True) def test_put_multiple_resources(self): client = lwt.OpenDaylightLwtClient.create_client() changed1 = {'id': 'fakeid1', 'name': 'fake_subnet1_changed'} changed2 = {'id': 'fakeid2', 'name': 'fake_subnet2_changed'} obj = {'subnets': [changed1, changed2]} url_path = 'subnets/' response = client.sendjson('put', url_path, obj) self.assertEqual(lwt.NO_CONTENT, response.status_code) lwt_dict = lwt.OpenDaylightLwtClient.lwt_dict self.assertEqual('fake_subnet1_changed', lwt_dict['subnets']['fakeid1']['name']) self.assertEqual('fake_subnet2_changed', lwt_dict['subnets']['fakeid2']['name']) @mock.patch.dict(lwt.OpenDaylightLwtClient.lwt_dict, {'networks': {'fakeid1': {'id': 'fakeid1', 'name': 'fake_network1'}}}, clear=True) def test_delete_single_resource(self): client = lwt.OpenDaylightLwtClient.create_client() url_path = 'networks/fakeid1' response = client.sendjson('delete', url_path, None) self.assertEqual(lwt.NO_CONTENT, response.status_code) lwt_dict = lwt.OpenDaylightLwtClient.lwt_dict network = lwt_dict['networks'].get('fakeid1') self.assertIsNone(network) @mock.patch.dict(lwt.OpenDaylightLwtClient.lwt_dict, {'networks': {'fakeid1': {'id': 'fakeid1', 'name': 'fake_network1'}, 'fakeid2': {'id': 'fakeid2', 'name': 'fake_network2'}}}, clear=True) def test_delete_multiple_resources(self): client = lwt.OpenDaylightLwtClient.create_client() network1 = {'id': 'fakeid1'} network2 = {'id': 'fakeid2'} obj = {'networks': [network1, network2]} response = client.sendjson('delete', 'networks/', obj) self.assertEqual(lwt.NO_CONTENT, response.status_code) lwt_dict = lwt.OpenDaylightLwtClient.lwt_dict network = lwt_dict['networks'].get('fakeid1') self.assertIsNone(network) network = lwt_dict['networks'].get('fakeid2') self.assertIsNone(network) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/common/test_odl_features.py0000644000175000017500000001607500000000000032430 0ustar00jamespagejamespage00000000000000# Copyright (c) 2017 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import fixture as config_fixture from oslo_serialization import jsonutils from requests import exceptions from networking_odl.common.client import OpenDaylightRestClient from networking_odl.common import odl_features from networking_odl.tests import base class TestOdlFeatures(base.DietTestCase): """Basic tests for odl_features""" feature_json = """{"features": {"feature": [{"service-provider-feature": "neutron-extensions:operational-port-status"}, {"service-provider-feature": "neutron-extensions:feature-with-config", "configuration": "steal-your-face"}]}}""" feature_list = 'thing1, thing2' def setUp(self): self.features_fixture = base.OpenDaylightFeaturesFixture() self.useFixture(self.features_fixture) self.cfg = self.useFixture(config_fixture.Config()) super(TestOdlFeatures, self).setUp() self.addCleanup(odl_features.deinit) @mock.patch.object(OpenDaylightRestClient, 'request') def test_fetch_exception(self, mocked_client): mocked_client.side_effect = exceptions.ConnectionError() self.assertIsNone(odl_features._fetch_features()) @mock.patch.object(OpenDaylightRestClient, 'request') def test_fetch_404(self, mocked_client): mocked_client.return_value = mock.MagicMock(status_code=404) self.assertNotEqual(id(odl_features._fetch_features()), id(odl_features.EMPTY_FEATURES)) @mock.patch.object(OpenDaylightRestClient, 'request') def test_fetch_400(self, mocked_client): mocked_client.return_value = mock.MagicMock(status_code=400) self.assertNotEqual(id(odl_features._fetch_features()), id(odl_features.EMPTY_FEATURES)) @mock.patch.object(OpenDaylightRestClient, 'request') def test_fetch_500(self, mocked_client): mocked_client.return_value = mock.MagicMock(status_code=500) self.assertIsNone(odl_features._fetch_features()) @mock.patch.object(OpenDaylightRestClient, 'request') def test_init(self, mocked_client): self.cfg.config(odl_features=None, group='ml2_odl') self.cfg.config(odl_features_json=None, group='ml2_odl') response = mock.MagicMock() response.status_code = 200 response.json = mock.MagicMock( return_value=jsonutils.loads(self.feature_json)) mocked_client.return_value = response self._assert_odl_feature_config({ odl_features.OPERATIONAL_PORT_STATUS: '', 'feature-with-config': 'steal-your-face', }) def _assert_odl_feature_config(self, features): odl_features.init() for k, v in features.items(): self.assertTrue(odl_features.has(k)) self.assertEqual(odl_features.get_config(k), v) def test_init_from_config_json(self): self.cfg.config(odl_features_json=self.feature_json, group='ml2_odl') self._assert_odl_feature_config({ odl_features.OPERATIONAL_PORT_STATUS: '', 'feature-with-config': 'steal-your-face', }) @mock.patch.object(odl_features, '_fetch_features') def test_init_without_config_calls__fetch_features(self, mock_fetch): self.cfg.config(odl_features_json=None, group='ml2_odl') self.cfg.config(odl_features=None, group='ml2_odl') odl_features.init() mock_fetch.assert_called_once() @mock.patch.object(odl_features, '_fetch_features') def test_init_from_config_list(self, mock_fetch): self.cfg.config(odl_features_json=None, group='ml2_odl') self.cfg.config(odl_features=self.feature_list, group='ml2_odl') odl_features.init() self.assertTrue(odl_features.has('thing1')) self.assertTrue(odl_features.has('thing2')) mock_fetch.assert_not_called() @mock.patch.object(odl_features, '_fetch_features') def test_init_from_json_overrides_list(self, mock_fetch): self.cfg.config(odl_features=self.feature_list, group='ml2_odl') self.cfg.config(odl_features_json=self.feature_json, group='ml2_odl') odl_features.init() self.assertFalse(odl_features.has('thing1')) self.assertTrue(odl_features.has('operational-port-status')) mock_fetch.assert_not_called() @mock.patch.object(odl_features, '_fetch_features') def test_init_with_config_does_not_call__fetch_features(self, mock_fetch): self.cfg.config(odl_features_json=self.feature_json, group='ml2_odl') odl_features.init() mock_fetch.assert_not_called() def test_init_from_config_malformed_json_raises_exception(self): malformed_json = ")]}'" + self.feature_json self.cfg.config(odl_features_json=malformed_json, group='ml2_odl') self.assertRaises(ValueError, odl_features.init) def test_init_from_config_json_not_in_odl_format_raises_exception(self): self.cfg.config(odl_features_json='{}', group='ml2_odl') self.assertRaises(KeyError, odl_features.init) class TestOdlFeaturesNoFixture(base.DietTestCase): """Basic tests for odl_features that don't call init()""" def setUp(self): super(TestOdlFeaturesNoFixture, self).setUp() self.addCleanup(odl_features.deinit) def test_feature_configs_does_not_mutate_default_features(self): odl_features.deinit() self.assertEqual(odl_features.EMPTY_FEATURES, odl_features.feature_configs) odl_features.feature_configs['test'] = True self.assertNotEqual(odl_features.EMPTY_FEATURES, odl_features.feature_configs) def test_deinit_does_not_mutate_default_features(self): # we call it before initing anything, to force the reassignment # of the global features variable. odl_features.deinit() odl_features.feature_configs['test'] = True self.assertNotEqual(odl_features.EMPTY_FEATURES, odl_features.feature_configs) # now we do it again, to make sure that it assigns it to default # values odl_features.deinit() self.assertEqual(odl_features.EMPTY_FEATURES, odl_features.feature_configs) def test_deinit_resets_to_default_features(self): odl_features.deinit() self.assertEqual(odl_features.feature_configs, odl_features.EMPTY_FEATURES) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/common/test_postcommit.py0000644000175000017500000000507100000000000032144 0ustar00jamespagejamespage00000000000000# Copyright (c) 2017 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from networking_odl.common import postcommit from neutron.tests import base class BaseTest(object): def create_resource1_postcommit(self): pass update_resource1_postcommit = create_resource1_postcommit delete_resource1_postcommit = create_resource1_postcommit update_resource2_postcommit = create_resource1_postcommit delete_resource2_postcommit = create_resource1_postcommit create_resource2_postcommit = create_resource1_postcommit class TestPostCommit(base.DietTestCase): def _get_class(self, *args): @postcommit.add_postcommit(*args) class TestClass(BaseTest): pass return TestClass def _get_methods_name(self, resources): ops = ['create', 'update', 'delete'] m_names = [op + '_' + resource + '_postcommit' for op in ops for resource in resources] return m_names def test_with_one_resource(self): cls = self._get_class('resource1') m_names = self._get_methods_name(['resource1']) for m_name in m_names: self.assertEqual(m_name, getattr(cls, m_name).__name__) def test_with_two_resource(self): cls = self._get_class('resource1', 'resource2') m_names = self._get_methods_name(['resource1', 'resource2']) for m_name in m_names: self.assertEqual(m_name, getattr(cls, m_name).__name__) def test_with_two_resource_create_defined_for_one(self): m_names = self._get_methods_name(['resource1', 'resource2']) @postcommit.add_postcommit('resource1', 'resource2') class TestClass(BaseTest): def create_resource1_postcommit(self): pass create_resource1_postcommit.__name__ = 'test_method' for m_name in m_names[1:]: self.assertEqual(m_name, getattr(TestClass, m_name).__name__) self.assertEqual('test_method', getattr(TestClass, m_names[0]).__name__) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/common/test_utils.py0000644000175000017500000000536100000000000031110 0ustar00jamespagejamespage00000000000000# Copyright (c) 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.tests import base from oslo_config import fixture as config_fixture from networking_odl.common import constants as odl_const from networking_odl.common import utils class TestUtils(base.DietTestCase): def setUp(self): self.cfg = self.useFixture(config_fixture.Config()) super(TestUtils, self).setUp() # TODO(manjeets) remove this test once neutronify is # consolidated with make_plural def test_neutronify(self): self.assertEqual('a-b-c', utils.neutronify('a_b_c')) def test_neutronify_empty(self): self.assertEqual('', utils.neutronify('')) @staticmethod def _get_resources(): return {odl_const.ODL_SG: 'security-groups', odl_const.ODL_SG_RULE: 'security-group-rules', odl_const.ODL_NETWORK: 'networks', odl_const.ODL_SUBNET: 'subnets', odl_const.ODL_ROUTER: 'routers', odl_const.ODL_PORT: 'ports', odl_const.ODL_FLOATINGIP: 'floatingips', odl_const.ODL_QOS_POLICY: 'qos/policies', odl_const.ODL_TRUNK: 'trunks', odl_const.ODL_BGPVPN: 'bgpvpns', odl_const.ODL_SFC_FLOW_CLASSIFIER: 'sfc/flowclassifiers', odl_const.ODL_SFC_PORT_PAIR: 'sfc/portpairs', odl_const.ODL_SFC_PORT_PAIR_GROUP: 'sfc/portpairgroups', odl_const.ODL_SFC_PORT_CHAIN: 'sfc/portchains', odl_const.ODL_L2GATEWAY: 'l2-gateways', odl_const.ODL_L2GATEWAY_CONNECTION: 'l2gateway-connections'} def test_all_resources_url(self): for obj, url in self._get_resources().items(): self.assertEqual(utils.make_url_object(obj), url) def test_get_odl_url(self): """test make uri.""" self.cfg.config(url='http://localhost:8080/controller/nb/v2/neutron', group='ml2_odl') test_path = '/restconf/neutron:neutron/hostconfigs' expected = "http://localhost:8080/restconf/neutron:neutron/hostconfigs" test_uri = utils.get_odl_url(path=test_path) self.assertEqual(expected, test_uri) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/common/test_websocket_client.py0000644000175000017500000003151400000000000033273 0ustar00jamespagejamespage00000000000000# Copyright (c) 2017 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import fixture as config_fixture from oslo_serialization import jsonutils import requests import websocket from networking_odl.common.client import OpenDaylightRestClient as odl_client from networking_odl.common import websocket_client as wsc from networking_odl.tests import base class TestWebsocketClient(base.DietTestCase): """Test class for Websocket Client.""" FAKE_WEBSOCKET_STREAM_NAME_DATA = { 'output': { 'stream-name': 'data-change-event-subscription/neutron:neutron/' 'neutron:hostconfigs/datastore=OPERATIONAL/scope=SUBTREE' }} INVALID_WEBSOCKET_STREAM_NAME_DATA = { 'outputs': { 'stream-name': 'data-change-event-subscription/neutron:neutron/' 'neutron:hostconfigs/datastore=OPERATIONAL/scope=SUBTREE' }} FAKE_WEBSOCKET_SUBS_DATA = { 'location': 'ws://localhost:8185/data-change-event-subscription/' 'neutron:neutron/neutron:hostconfigs/datastore=OPERATIONAL' '/scope=SUBTREE'} ODL_URI = "http://localhost:8080/" WEBSOCKET_URI = ( "ws://localhost:8185/" + "data-change-event-subscription/neutron:neutron/" + "neutron:hostconfigs/datastore=OPERATIONAL/scope=SUBTREE") WEBSOCKET_SSL_URI = ( "wss://localhost:8185/" + "data-change-event-subscription/neutron:neutron/" + "neutron:hostconfigs/datastore=OPERATIONAL/scope=SUBTREE") mock_callback_handler = mock.MagicMock() def setUp(self): """Setup test.""" self.useFixture(base.OpenDaylightRestClientFixture()) mock.patch.object(wsc.OpenDaylightWebsocketClient, 'start_odl_websocket_thread').start() self.cfg = self.useFixture(config_fixture.Config()) super(TestWebsocketClient, self).setUp() self.mgr = wsc.OpenDaylightWebsocketClient.odl_create_websocket( TestWebsocketClient.ODL_URI, "restconf/operational/neutron:neutron/hostconfigs", wsc.ODL_OPERATIONAL_DATASTORE, wsc.ODL_NOTIFICATION_SCOPE_SUBTREE, TestWebsocketClient.mock_callback_handler ) def _get_raised_response(self, status_code): response = requests.Response() response.status_code = status_code return response @classmethod def _get_mock_request_response(cls, status_code): response = mock.Mock(status_code=status_code) side_effect = None # NOTE(rajivk): requests.codes.bad_request constant value is 400, # so it filters requests where client(4XX) or server(5XX) has erred. if status_code >= requests.codes.bad_request: side_effect = requests.exceptions.HTTPError() response.raise_for_status = mock.Mock(side_effect=side_effect) return response @mock.patch.object(odl_client, 'sendjson') def test_subscribe_websocket_sendjson(self, mocked_sendjson): request_response = self._get_raised_response( requests.codes.unauthorized) mocked_sendjson.return_value = request_response stream_url = self.mgr._subscribe_websocket() self.assertIsNone(stream_url) request_response = self._get_raised_response( requests.codes.bad_request) mocked_sendjson.return_value = request_response self.assertRaises(ValueError, self.mgr._subscribe_websocket) request_response = self._get_mock_request_response(requests.codes.ok) request_response.json = mock.Mock( return_value=(TestWebsocketClient. INVALID_WEBSOCKET_STREAM_NAME_DATA)) mocked_sendjson.return_value = request_response self.assertRaises(ValueError, self.mgr._subscribe_websocket) request_response = self._get_mock_request_response(requests.codes.ok) request_response.json = mock.Mock(return_value={""}) mocked_sendjson.return_value = request_response self.assertIsNone(self.mgr._subscribe_websocket()) @mock.patch.object(odl_client, 'get') def test_subscribe_websocket_get(self, mocked_get): request_response = self._get_raised_response(requests.codes.not_found) mocked_get.return_value = request_response self.assertRaises(ValueError, self.mgr._subscribe_websocket) request_response = self._get_raised_response( requests.codes.bad_request) mocked_get.return_value = request_response stream_url = self.mgr._subscribe_websocket() self.assertIsNone(stream_url) request_response = self._get_raised_response( requests.codes.unauthorized) mocked_get.return_value = request_response stream_url = self.mgr._subscribe_websocket() self.assertIsNone(stream_url) @mock.patch.object(odl_client, 'sendjson') @mock.patch.object(odl_client, 'get') def test_subscribe_websocket(self, mocked_get, mocked_sendjson): request_response = self._get_mock_request_response(requests.codes.ok) request_response.json = mock.Mock( return_value=TestWebsocketClient.FAKE_WEBSOCKET_STREAM_NAME_DATA) mocked_sendjson.return_value = request_response request_response = self._get_mock_request_response(requests.codes.ok) request_response.headers = TestWebsocketClient.FAKE_WEBSOCKET_SUBS_DATA mocked_get.return_value = request_response stream_url = self.mgr._subscribe_websocket() self.assertEqual(TestWebsocketClient.WEBSOCKET_URI, stream_url) @mock.patch.object(websocket, 'create_connection') def test_create_connection(self, mock_create_connection): mock_create_connection.return_value = None return_value = self.mgr._socket_create_connection("localhost") self.assertIsNone(return_value) @mock.patch.object(websocket, 'create_connection', side_effect=Exception("something went wrong")) def test_create_connection_handles_exception(self, mock_create_connection): self.assertIsNone(self.mgr._socket_create_connection("localhost")) def test_websocket_connect(self): self.mgr._subscribe_websocket = mock.MagicMock( return_value=TestWebsocketClient.WEBSOCKET_URI) self.mgr._socket_create_connection = mock.MagicMock(return_value=True) self.mgr._connect_ws() self.mgr._socket_create_connection.assert_called_with( TestWebsocketClient.WEBSOCKET_URI) def test_websocket_connect_ssl(self): self.mgr._subscribe_websocket = mock.MagicMock( return_value=TestWebsocketClient.WEBSOCKET_SSL_URI) self.mgr._socket_create_connection = mock.MagicMock(return_value=True) self.mgr._connect_ws() self.mgr._socket_create_connection.assert_called_with( TestWebsocketClient.WEBSOCKET_SSL_URI) def test_websocket_connect_ssl_negative_uri(self): self.mgr._subscribe_websocket = mock.MagicMock( return_value=TestWebsocketClient.WEBSOCKET_URI) self.mgr._socket_create_connection = mock.MagicMock(return_value=True) self.mgr.odl_rest_client.url = self.mgr.odl_rest_client.url.replace( 'http:', 'https:') self.mgr._connect_ws() self.mgr._socket_create_connection.assert_called_with( TestWebsocketClient.WEBSOCKET_SSL_URI) def test_run_websocket_thread(self): self.mgr._connect_ws = mock.MagicMock(return_value=None) self.cfg.config(restconf_poll_interval=0, group='ml2_odl') self.mgr.run_websocket_thread(True) assert self.mgr._connect_ws.call_count == 1 self.mgr.set_exit_flag(False) self.mgr._connect_ws = mock.MagicMock(return_value=1) with mock.patch.object(wsc, 'LOG') as mock_log: self.mgr.run_websocket_thread(True) self.assertTrue(mock_log.error.called) self.mgr.set_exit_flag(False) ws = mock.MagicMock() ws.recv.return_value = None self.mgr._connect_ws = mock.MagicMock(return_value=ws) self.mgr._close_ws = mock.MagicMock(return_value=None) with mock.patch.object(wsc, 'LOG') as mock_log: self.mgr.run_websocket_thread(True) self.assertTrue(mock_log.warning.called) self.mgr.set_exit_flag(False) ws = mock.MagicMock() ws.recv.return_value = "Test Data" self.mgr._connect_ws = mock.MagicMock(return_value=ws) self.mgr._close_ws = mock.MagicMock(return_value=None) self.mgr.run_websocket_thread(True) TestWebsocketClient.mock_callback_handler.assert_called_once() class TestEventDataParser(base.DietTestCase): """Test class for Websocket Client.""" # test data port status payload sample_port_status_payload = """{"notification": {"xmlns":"urn:ietf:params:xml:ns:netconf:notification:1.0", "data-changed-notification": { "xmlns": "urn:opendaylight:params:xml:ns:yang:controller:md:sal:remote", "data-change-event": [{"path": "/neutron:neutron/neutron:ports/neutron:port\ [neutron:uuid='a51e439f-4d02-4e76-9b0d-08f6c08855dd']\ /neutron:uuid", "data":{"uuid":{"xmlns":"urn:opendaylight:neutron", "content":"a51e439f-4d02-4e76-9b0d-08f6c08855dd"}}, "operation":"created"}, {"path": "/neutron:neutron/neutron:ports/neutron:port\ [neutron:uuid='a51e439f-4d02-4e76-9b0d-08f6c08855dd']\ /neutron:status", "data":{"status":{"xmlns":"urn:opendaylight:neutron", "content":"ACTIVE"}}, "operation":"created"} ]}, "eventTime":"2017-03-23T09:28:55.379-07:00"}}""" sample_port_status_payload_one_item = """{"notification": {"xmlns": "urn:ietf:params:xml:ns:netconf:notification:1.0", "data-changed-notification": { "data-change-event": { "data": { "status": { "content": "ACTIVE", "xmlns": "urn:opendaylight:neutron" }}, "operation": "updated", "path": "/neutron:neutron/neutron:ports/neutron:port\ [neutron:uuid='d6e6335d-9568-4949-aef1-4107e34c5f28']\ /neutron:status" }, "xmlns": "urn:opendaylight:params:xml:ns:yang:controller:md:\ sal:remote" }, "eventTime": "2017-02-22T02:27:32+02:00" }}""" def setUp(self): """Setup test.""" super(TestEventDataParser, self).setUp() def test_get_item_port_status_payload(self): sample = jsonutils.loads(self.sample_port_status_payload) expected_events = (sample [wsc.EventDataParser.NOTIFICATION_TAG] [wsc.EventDataParser.DC_NOTIFICATION_TAG] [wsc.EventDataParser.DC_EVENT_TAG]) event_0 = expected_events[0] event = wsc.EventDataParser.get_item(self.sample_port_status_payload) operation, path, data = next(event).get_fields() self.assertEqual(event_0.get('operation'), operation) self.assertEqual(event_0.get('path'), path) self.assertEqual(event_0.get('data'), data) uuid = wsc.EventDataParser.extract_field(path, "neutron:uuid") self.assertEqual("'a51e439f-4d02-4e76-9b0d-08f6c08855dd'", uuid) uuid = wsc.EventDataParser.extract_field(path, "invalidkey") self.assertIsNone(uuid) def test_get_item_port_status_payload_one_item(self): sample = jsonutils.loads(self.sample_port_status_payload_one_item) expected_events = (sample [wsc.EventDataParser.NOTIFICATION_TAG] [wsc.EventDataParser.DC_NOTIFICATION_TAG] [wsc.EventDataParser.DC_EVENT_TAG]) event = (wsc.EventDataParser. get_item(self.sample_port_status_payload_one_item)) operation, path, data = next(event).get_fields() self.assertEqual(expected_events.get('operation'), operation) self.assertEqual(expected_events.get('path'), path) self.assertEqual(expected_events.get('data'), data) uuid = wsc.EventDataParser.extract_field(path, "neutron:uuid") self.assertEqual("'d6e6335d-9568-4949-aef1-4107e34c5f28'", uuid) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.818714 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/db/0000755000175000017500000000000000000000000025427 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/db/__init__.py0000644000175000017500000000000000000000000027526 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/db/test_db.py0000644000175000017500000004634100000000000027435 0ustar00jamespagejamespage00000000000000# # Copyright (C) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from datetime import timedelta import functools import mock from neutron_lib.db import api as db_api from networking_odl.common import constants as odl_const from networking_odl.db import db from networking_odl.db import models from networking_odl.tests.unit import test_base_db def in_session(fn): @functools.wraps(fn) def wrapper(self, *args, **kwargs): with db_api.CONTEXT_WRITER.using(self.db_context): return fn(self, *args, **kwargs) return wrapper class DbTestCase(test_base_db.ODLBaseDbTestCase): def setUp(self): super(DbTestCase, self).setUp() # NOTE(mpeterson): Due to how the pecan lib does introspection # to find a non-decorated function, it needs a function that will # be found in the end. The line below workarounds this limitation. self._mock_function = mock.MagicMock() def _update_row(self, row): self.db_context.session.merge(row) self.db_context.session.flush() def _test_validate_updates(self, first_entry, second_entry, expected_deps, state=None): db.create_pending_row(self.db_context, *first_entry) if state: row = db.get_all_db_rows(self.db_context)[0] row.state = state self._update_row(row) deps = db.get_pending_or_processing_ops( self.db_context, second_entry[1], second_entry[2]) self.assertEqual(expected_deps, len(deps) != 0) def _test_retry_count(self, retry_num, max_retry, expected_retry_count, expected_state): # add new pending row db.create_pending_row(self.db_context, *self.UPDATE_ROW) # update the row with the requested retry_num row = db.get_all_db_rows(self.db_context)[0] row.retry_count = retry_num - 1 db.update_pending_db_row_retry(self.db_context, row, max_retry) # validate the state and the retry_count of the row row = db.get_all_db_rows(self.db_context)[0] self.assertEqual(expected_state, row.state) self.assertEqual(expected_retry_count, row.retry_count) def _test_retry_wrapper(self, decorated_function): # NOTE(mpeterson): we want to make sure that it's configured # to MAX_RETRIES. self.assertEqual(db_api._retry_db_errors.max_retries, db_api.MAX_RETRIES) self._test_retry_exceptions(decorated_function, self._mock_function, False) # NOTE(mpeterson): The following function serves to workaround a # limitation in the discovery mechanism of pecan lib that does not allow # us to create a generic function that decorates on the fly. It needs to # be decorated through the decorator directive and not via function # composition @db_api.retry_if_session_inactive() def _decorated_retry_if_session_inactive(self, context): self._mock_function() def test_retry_if_session_inactive(self): self._test_retry_wrapper(self._decorated_retry_if_session_inactive) @in_session def _test_update_row_state(self, from_state, to_state, dry_flush=False): # add new pending row db.create_pending_row(self.db_context, *self.UPDATE_ROW) mock_flush = mock.MagicMock( side_effect=self.db_context.session.flush) if dry_flush: patch_flush = mock.patch.object(self.db_context.session, 'flush', side_effect=mock_flush) row = db.get_all_db_rows(self.db_context)[0] for state in [from_state, to_state]: if dry_flush: patch_flush.start() try: # update the row state db.update_db_row_state(self.db_context, row, state, flush=not dry_flush) finally: if dry_flush: patch_flush.stop() # validate the new state row = db.get_all_db_rows(self.db_context)[0] self.assertEqual(state, row.state) return mock_flush def test_updates_same_object_uuid(self): self._test_validate_updates(self.UPDATE_ROW, self.UPDATE_ROW, True) def test_validate_updates_different_object_uuid(self): other_row = list(self.UPDATE_ROW) other_row[1] += 'a' self._test_validate_updates(self.UPDATE_ROW, other_row, False) def test_validate_updates_different_object_type(self): other_row = list(self.UPDATE_ROW) other_row[0] = odl_const.ODL_PORT other_row[1] += 'a' self._test_validate_updates(self.UPDATE_ROW, other_row, False) def test_check_for_older_ops_processing(self): self._test_validate_updates(self.UPDATE_ROW, self.UPDATE_ROW, True, state=odl_const.PROCESSING) @in_session def test_get_oldest_pending_row_none_when_no_rows(self): row = db.get_oldest_pending_db_row_with_lock(self.db_context) self.assertIsNone(row) @in_session def _test_get_oldest_pending_row_none(self, state): db.create_pending_row(self.db_context, *self.UPDATE_ROW) row = db.get_all_db_rows(self.db_context)[0] row.state = state self._update_row(row) row = db.get_oldest_pending_db_row_with_lock(self.db_context) self.assertIsNone(row) def test_get_oldest_pending_row_none_when_row_processing(self): self._test_get_oldest_pending_row_none(odl_const.PROCESSING) def test_get_oldest_pending_row_none_when_row_failed(self): self._test_get_oldest_pending_row_none(odl_const.FAILED) def test_get_oldest_pending_row_none_when_row_completed(self): self._test_get_oldest_pending_row_none(odl_const.COMPLETED) def test_get_oldest_pending_row(self): db.create_pending_row(self.db_context, *self.UPDATE_ROW) row = db.get_oldest_pending_db_row_with_lock(self.db_context) self.assertIsNotNone(row) self.assertEqual(odl_const.PROCESSING, row.state) @in_session def test_get_oldest_pending_row_order(self): db.create_pending_row(self.db_context, *self.UPDATE_ROW) older_row = db.get_all_db_rows(self.db_context)[0] older_row.last_retried -= timedelta(minutes=1) self._update_row(older_row) db.create_pending_row(self.db_context, *self.UPDATE_ROW) row = db.get_oldest_pending_db_row_with_lock(self.db_context) self.assertEqual(older_row, row) def _test_get_oldest_pending_row_with_dep(self, dep_state): db.create_pending_row(self.db_context, *self.UPDATE_ROW) parent_row = db.get_all_db_rows(self.db_context)[0] db.update_db_row_state(self.db_context, parent_row, dep_state) db.create_pending_row(self.db_context, *self.UPDATE_ROW, depending_on=[parent_row]) row = db.get_oldest_pending_db_row_with_lock(self.db_context) if row is not None: self.assertNotEqual(parent_row.seqnum, row.seqnum) return row def test_get_oldest_pending_row_when_dep_completed(self): row = self._test_get_oldest_pending_row_with_dep(odl_const.COMPLETED) self.assertEqual(odl_const.PROCESSING, row.state) def test_get_oldest_pending_row_when_dep_failed(self): row = self._test_get_oldest_pending_row_with_dep(odl_const.FAILED) self.assertEqual(odl_const.PROCESSING, row.state) @in_session def test_get_oldest_pending_row_returns_parent_when_dep_pending(self): db.create_pending_row(self.db_context, *self.UPDATE_ROW) parent_row = db.get_all_db_rows(self.db_context)[0] db.create_pending_row(self.db_context, *self.UPDATE_ROW, depending_on=[parent_row]) row = db.get_oldest_pending_db_row_with_lock(self.db_context) self.assertEqual(parent_row, row) def test_get_oldest_pending_row_none_when_dep_processing(self): row = self._test_get_oldest_pending_row_with_dep(odl_const.PROCESSING) self.assertIsNone(row) def test_get_oldest_pending_row_retries_exceptions(self): with mock.patch.object(db, 'aliased') as m: self._test_retry_exceptions(db.get_oldest_pending_db_row_with_lock, m) @in_session def _test_delete_row(self, by_row=False, by_row_id=False, dry_flush=False): db.create_pending_row(self.db_context, *self.UPDATE_ROW) db.create_pending_row(self.db_context, *self.UPDATE_ROW) rows = db.get_all_db_rows(self.db_context) self.assertEqual(len(rows), 2) row = rows[-1] params = {'flush': not dry_flush} if by_row: params['row'] = row elif by_row_id: params['row_id'] = row.seqnum mock_flush = None if dry_flush: patch_flush = mock.patch.object( self.db_context.session, 'flush', side_effect=self.db_context.session.flush ) mock_flush = patch_flush.start() try: db.delete_row(self.db_context, **params) finally: if dry_flush: patch_flush.stop() self.db_context.session.flush() rows = db.get_all_db_rows(self.db_context) self.assertEqual(len(rows), 1) self.assertNotEqual(row.seqnum, rows[0].seqnum) return mock_flush def test_delete_row_by_row(self): self._test_delete_row(by_row=True) def test_delete_row_by_row_id(self): self._test_delete_row(by_row_id=True) def test_delete_row_by_row_without_flushing(self): mock_flush = self._test_delete_row(by_row=True, dry_flush=True) mock_flush.assert_not_called() def test_create_pending_row(self): row = db.create_pending_row(self.db_context, *self.UPDATE_ROW) self.assertIsNotNone(row) rows = db.get_all_db_rows(self.db_context) self.assertTrue(row in rows) def _test_delete_rows_by_state_and_time(self, last_retried, row_retention, state, expected_rows, dry_delete=False): db.create_pending_row(self.db_context, *self.UPDATE_ROW) # update state and last retried row = db.get_all_db_rows(self.db_context)[-1] row.state = state row.last_retried = row.last_retried - timedelta(seconds=last_retried) self._update_row(row) if not dry_delete: db.delete_rows_by_state_and_time(self.db_context, odl_const.COMPLETED, timedelta(seconds=row_retention)) # validate the number of rows in the journal rows = db.get_all_db_rows(self.db_context) self.assertEqual(expected_rows, len(rows)) def test_delete_completed_rows_no_new_rows(self): self._test_delete_rows_by_state_and_time(0, 10, odl_const.COMPLETED, 1) def test_delete_completed_rows_one_new_row(self): self._test_delete_rows_by_state_and_time(6, 5, odl_const.COMPLETED, 0) def test_delete_completed_rows_wrong_state(self): self._test_delete_rows_by_state_and_time(10, 8, odl_const.PENDING, 1) @in_session def test_delete_completed_rows_individually(self): self._test_delete_rows_by_state_and_time( 6, 5, odl_const.COMPLETED, 1, True ) patch_delete = mock.patch.object( self.db_context.session, 'delete', side_effect=self.db_context.session.delete ) mock_delete = patch_delete.start() self.addCleanup(patch_delete.stop) self._test_delete_rows_by_state_and_time( 6, 5, odl_const.COMPLETED, 0 ) self.assertEqual(mock_delete.call_count, 2) @mock.patch.object(db, 'delete_row', side_effect=db.delete_row) def test_delete_completed_rows_without_flush(self, mock_delete_row): self._test_delete_rows_by_state_and_time(6, 5, odl_const.COMPLETED, 0) self.assertEqual({'flush': False}, mock_delete_row.call_args[1]) @in_session def _test_reset_processing_rows(self, last_retried, max_timedelta, quantity, dry_reset=False): db.create_pending_row(self.db_context, *self.UPDATE_ROW) expected_state = odl_const.PROCESSING row = db.get_all_db_rows(self.db_context)[-1] row.state = expected_state row.last_retried = row.last_retried - timedelta(seconds=last_retried) self._update_row(row) if not dry_reset: expected_state = odl_const.PENDING reset = db.reset_processing_rows(self.db_context, max_timedelta) self.assertIsInstance(reset, int) self.assertEqual(reset, quantity) rows = db.get_all_db_rows_by_state(self.db_context, expected_state) self.assertEqual(len(rows), quantity) for row in rows: self.assertEqual(row.state, expected_state) def test_reset_processing_rows(self): self._test_reset_processing_rows(6, 5, 1) def test_reset_processing_rows_no_new_rows(self): self._test_reset_processing_rows(0, 10, 0) @mock.patch.object(db, 'update_db_row_state', side_effect=db.update_db_row_state) def test_reset_processing_rows_individually(self, mock_update_row): self._test_reset_processing_rows(6, 5, 1, True) self._test_reset_processing_rows(6, 5, 2) self.assertEqual(mock_update_row.call_count, 2) self.assertEqual(mock_update_row.call_args[1], {'flush': False}) def test_valid_retry_count(self): self._test_retry_count(1, 1, 1, odl_const.PENDING) def test_invalid_retry_count(self): self._test_retry_count(2, 1, 1, odl_const.FAILED) def test_update_row_state_to_pending(self): self._test_update_row_state(odl_const.PROCESSING, odl_const.PENDING) def test_update_row_state_to_processing(self): self._test_update_row_state(odl_const.PENDING, odl_const.PROCESSING) def test_update_row_state_to_failed(self): self._test_update_row_state(odl_const.PROCESSING, odl_const.FAILED) def test_update_row_state_to_completed(self): self._test_update_row_state(odl_const.PROCESSING, odl_const.COMPLETED) def test_update_row_state_to_status_without_flush(self): mock_flush = self._test_update_row_state(odl_const.PROCESSING, odl_const.COMPLETED, dry_flush=True) # NOTE(mpeterson): call_count=2 because session.merge() calls flush() # and we are changing the status twice self.assertEqual(mock_flush.call_count, 2) def _test_periodic_task_lock_unlock(self, db_func, existing_state, expected_state, expected_result, task='test_task'): row = models.OpenDaylightPeriodicTask(state=existing_state, task=task) self.db_context.session.add(row) self.db_context.session.flush() self.assertEqual(expected_result, db_func(self.db_context, task)) row = self.db_context.session.query( models.OpenDaylightPeriodicTask).filter_by(task=task).one() self.assertEqual(expected_state, row['state']) def test_lock_periodic_task(self): self._test_periodic_task_lock_unlock(db.lock_periodic_task, odl_const.PENDING, odl_const.PROCESSING, True) def test_lock_periodic_task_fails_when_processing(self): self._test_periodic_task_lock_unlock(db.lock_periodic_task, odl_const.PROCESSING, odl_const.PROCESSING, False) def test_unlock_periodic_task(self): self._test_periodic_task_lock_unlock(db.unlock_periodic_task, odl_const.PROCESSING, odl_const.PENDING, True) def test_unlock_periodic_task_fails_when_pending(self): self._test_periodic_task_lock_unlock(db.unlock_periodic_task, odl_const.PENDING, odl_const.PENDING, False) def test_multiple_row_tasks(self): self._test_periodic_task_lock_unlock(db.unlock_periodic_task, odl_const.PENDING, odl_const.PENDING, False) def _add_tasks(self, tasks): row = [] for count, task in enumerate(tasks): row.append(models.OpenDaylightPeriodicTask(state=odl_const.PENDING, task=task)) self.db_context.session.add(row[count]) self.db_context.session.flush() rows = self.db_context.session.query( models.OpenDaylightPeriodicTask).all() self.assertEqual(len(tasks), len(rows)) def _perform_ops_on_all_rows(self, tasks, to_lock): if to_lock: curr_state = odl_const.PENDING exp_state = odl_const.PROCESSING func = db.lock_periodic_task else: exp_state = odl_const.PENDING curr_state = odl_const.PROCESSING func = db.unlock_periodic_task processed = [] for task in tasks: row = self.db_context.session.query( models.OpenDaylightPeriodicTask).filter_by(task=task).one() self.assertEqual(row['state'], curr_state) self.assertTrue(func(self.db_context, task)) rows = self.db_context.session.query( models.OpenDaylightPeriodicTask).filter_by().all() processed.append(task) for row in rows: if row['task'] in processed: self.assertEqual(exp_state, row['state']) else: self.assertEqual(curr_state, row['state']) self.assertFalse(func(self.db_context, tasks[-1])) def test_multiple_row_tasks_lock_unlock(self): task1 = 'test_random_task' task2 = 'random_task_random' task3 = 'task_test_random' tasks = [task1, task2, task3] self._add_tasks(tasks) self._perform_ops_on_all_rows(tasks, to_lock=True) self._perform_ops_on_all_rows(tasks, to_lock=False) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.818714 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/dhcp/0000755000175000017500000000000000000000000025760 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/dhcp/__init__.py0000644000175000017500000000000000000000000030057 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/dhcp/test_odl_dhcp_driver.py0000644000175000017500000001012300000000000032515 0ustar00jamespagejamespage00000000000000# Copyright (c) 2017 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import testscenarios from networking_odl.common import constants as odl_const from networking_odl.dhcp import odl_dhcp_driver from networking_odl.ml2 import mech_driver_v2 from networking_odl.tests.unit.dhcp import test_odl_dhcp_driver_base from oslo_config import cfg load_tests = testscenarios.load_tests_apply_scenarios cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config') class OdlDhcpDriverTestCase(test_odl_dhcp_driver_base.OdlDhcpDriverTestBase): def setUp(self): super(OdlDhcpDriverTestCase, self).setUp() self.cfg.config(enable_dhcp_service=True, group='ml2_odl') self.mech = mech_driver_v2.OpenDaylightMechanismDriver() self.mech.initialize() def test_dhcp_flag_test(self): self.assertTrue(cfg.CONF.ml2_odl.enable_dhcp_service) def test_dhcp_driver_load(self): self.assertTrue(isinstance(self.mech.dhcp_driver, odl_dhcp_driver.OdlDhcpDriver)) def test_dhcp_port_create_on_subnet_event(self): data = self.get_network_and_subnet_context('10.0.50.0/24', True, True, True) subnet_context = data['subnet_context'] mech_driver_v2.OpenDaylightMechanismDriver._record_in_journal( subnet_context, odl_const.ODL_SUBNET, odl_const.ODL_CREATE) self.mech.journal.sync_pending_entries() port = self.get_port_id(data['plugin'], data['context'], data['network_id'], data['subnet_id']) self.assertIsNotNone(port) def test_dhcp_port_create_on_v6subnet_event(self): data = self.get_network_and_subnet_context('2001:db8:abcd:0012::0/64', True, True, True, False) subnet_context = data['subnet_context'] mech_driver_v2.OpenDaylightMechanismDriver._record_in_journal( subnet_context, odl_const.ODL_SUBNET, odl_const.ODL_CREATE) self.mech.journal.sync_pending_entries() port = self.get_port_id(data['plugin'], data['context'], data['network_id'], data['subnet_id']) self.assertIsNone(port) def test_dhcp_delete_on_port_update_event(self): data = self.get_network_and_subnet_context('10.0.50.0/24', True, True, True) subnet_context = data['subnet_context'] plugin = data['plugin'] self.mech.dhcp_driver.create_or_delete_dhcp_port(subnet_context) port_id = self.get_port_id(data['plugin'], data['context'], data['network_id'], data['subnet_id']) self.assertIsNotNone(port_id) port = plugin.get_port(data['context'], port_id) port['fixed_ips'] = [] ports = {'port': port} plugin.update_port(data['context'], port_id, ports) mech_driver_v2.OpenDaylightMechanismDriver._record_in_journal( subnet_context, odl_const.ODL_PORT, odl_const.ODL_UPDATE, port) self.mech.journal.sync_pending_entries() port_id = self.get_port_id(data['plugin'], data['context'], data['network_id'], data['subnet_id']) self.assertIsNone(port_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/dhcp/test_odl_dhcp_driver_base.py0000644000175000017500000003074600000000000033524 0ustar00jamespagejamespage00000000000000# Copyright (c) 2017 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import testscenarios from neutron_lib import constants as n_const from neutron_lib.plugins import directory from networking_odl.dhcp import odl_dhcp_driver_base as driver_base from networking_odl.ml2 import mech_driver_v2 from networking_odl.tests import base as odl_base from networking_odl.tests.unit import base_v2 from oslo_config import cfg from oslo_utils import uuidutils # Required to generate tests from scenarios. Not compatible with nose. load_tests = testscenarios.load_tests_apply_scenarios ODL_TENANT_ID = uuidutils.generate_uuid() cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config') class OdlDhcpDriverTestBase(base_v2.OpenDaylightConfigBase): def setUp(self): self.useFixture(odl_base.OpenDaylightFeaturesFixture()) self.useFixture(odl_base.OpenDaylightJournalThreadFixture()) self.useFixture(odl_base.OpenDaylightPseudoAgentPrePopulateFixture()) super(OdlDhcpDriverTestBase, self).setUp() def get_network_and_subnet_context(self, cidr, dhcp_flag, create_subnet, create_network, ipv4=True): data = {} network_id = uuidutils.generate_uuid() subnet_id = uuidutils.generate_uuid() plugin = directory.get_plugin() data['network_id'] = network_id data['subnet_id'] = subnet_id data['context'] = self.context data['plugin'] = plugin network, network_context = \ self.get_network_context(network_id, create_network, ipv4) if create_network: data['network_context'] = network_context data['network'] = network subnet, subnet_context = \ self.get_subnet_context(network_id, subnet_id, cidr, dhcp_flag, create_subnet, ipv4) if create_subnet: data['subnet_context'] = subnet_context data['subnet'] = subnet return data def get_subnet_context(self, network_id, subnet_id, cidr, dhcp_flag, create_subnet, ipv4=True): if ipv4: index = cidr.rfind('.') + 1 ip_range = cidr[:index] cidr_end = ip_range + str(254) ipv6_ramode = None ipv6_addmode = None ipversion = 4 else: index = cidr.rfind(':') + 1 ip_range = cidr[:index] cidr_end = cidr[:index - 1] + 'ffff:ffff:ffff:fffe' ipv6_ramode = 'slaac' ipv6_addmode = 'slaac' ipversion = 6 current = {'ipv6_ra_mode': ipv6_ramode, 'allocation_pools': [{'start': ip_range + str(2), 'end': cidr_end}], 'host_routes': [], 'ipv6_address_mode': ipv6_addmode, 'cidr': cidr, 'id': subnet_id, 'name': '', 'enable_dhcp': dhcp_flag, 'network_id': network_id, 'tenant_id': ODL_TENANT_ID, 'project_id': ODL_TENANT_ID, 'dns_nameservers': [], 'gateway_ip': ip_range + str(1), 'ip_version': ipversion, 'shared': False} subnet = {'subnet': AttributeDict(current)} if create_subnet: plugin = directory.get_plugin() result, subnet_context = plugin._create_subnet_db(self.context, subnet) return subnet, subnet_context else: return subnet def get_network_context(self, network_id, create_network, ipv4=True): netwrk = 'netv4' if not ipv4: netwrk = 'netv6' current = {'status': 'ACTIVE', 'subnets': [], 'name': netwrk, 'provider:physical_network': None, 'admin_state_up': True, 'tenant_id': ODL_TENANT_ID, 'project_id': ODL_TENANT_ID, 'provider:network_type': 'local', 'router:external': False, 'shared': False, 'id': network_id, 'provider:segmentation_id': None} network = {'network': AttributeDict(current)} if create_network: plugin = directory.get_plugin() result, network_context = plugin._create_network_db( self.context, network) return [network, network_context] return network def get_port_id(self, plugin, plugin_context, network_id, subnet_id): device_id = driver_base.OPENDAYLIGHT_DEVICE_ID + '-' + subnet_id filters = { 'network_id': [network_id], 'device_id': [device_id], 'device_owner': [n_const.DEVICE_OWNER_DHCP] } ports = plugin.get_ports(plugin_context, filters=filters) if ports: port = ports[0] return port['id'] class OdlDhcpDriverBaseTestCase(OdlDhcpDriverTestBase): def setUp(self): super(OdlDhcpDriverBaseTestCase, self).setUp() def test_dhcp_driver_not_loaded_without_flag(self): mech = mech_driver_v2.OpenDaylightMechanismDriver() mech.initialize() args = [mech, 'dhcp_driver'] self.assertRaises(AttributeError, getattr, *args) def test_dhcp_port_create(self): dhcp_driver = driver_base.OdlDhcpDriverBase() data = self.get_network_and_subnet_context('10.0.10.0/24', True, True, True) dhcp_driver.create_or_delete_dhcp_port(data['subnet_context']) port = self.get_port_id(data['plugin'], data['context'], data['network_id'], data['subnet_id']) self.assertIsNotNone(port) def test_dhcp_port_create_v6network(self): dhcp_driver = driver_base.OdlDhcpDriverBase() data = self.get_network_and_subnet_context('2001:db8:abcd:0012::0/64', True, True, True, False) dhcp_driver.create_or_delete_dhcp_port(data['subnet_context']) port = self.get_port_id(data['plugin'], data['context'], data['network_id'], data['subnet_id']) self.assertIsNone(port) def test_dhcp_create_without_dhcp_flag(self): dhcp_driver = driver_base.OdlDhcpDriverBase() data = self.get_network_and_subnet_context('10.0.20.0/24', False, True, True) dhcp_driver.create_or_delete_dhcp_port(data['subnet_context']) port = self.get_port_id(data['plugin'], data['context'], data['network_id'], data['subnet_id']) self.assertIsNone(port) def test_dhcp_port_create_with_multiple_create_request(self): dhcp_driver = driver_base.OdlDhcpDriverBase() data = self.get_network_and_subnet_context('10.0.30.0/24', True, True, True) dhcp_driver.create_or_delete_dhcp_port(data['subnet_context']) dhcp_driver.create_or_delete_dhcp_port(data['subnet_context']) # If there are multiple ports will one_or_none wiill throw error # MultipleResultsFound port = self.get_port_id(data['plugin'], data['context'], data['network_id'], data['subnet_id']) self.assertIsNotNone(port) def test_dhcp_update_from_disable_to_enable(self): dhcp_driver = driver_base.OdlDhcpDriverBase() data = self.get_network_and_subnet_context('10.0.40.0/24', False, True, True) subnet_context = data['subnet_context'] dhcp_driver.create_or_delete_dhcp_port(subnet_context) subnet_context.current['enable_dhcp'] = True dhcp_driver.create_or_delete_dhcp_port(subnet_context) port = self.get_port_id(data['plugin'], data['context'], data['network_id'], data['subnet_id']) self.assertIsNotNone(port) def test_dhcp_update_from_enable_to_enable(self): dhcp_driver = driver_base.OdlDhcpDriverBase() data = self.get_network_and_subnet_context('10.0.50.0/24', True, True, True) subnet_context = data['subnet_context'] dhcp_driver.create_or_delete_dhcp_port(subnet_context) subnet_context.current['enable_dhcp'] = True dhcp_driver.create_or_delete_dhcp_port(subnet_context) port = self.get_port_id(data['plugin'], data['context'], data['network_id'], data['subnet_id']) self.assertIsNotNone(port) def test_dhcp_update_from_enable_to_disable(self): dhcp_driver = driver_base.OdlDhcpDriverBase() data = self.get_network_and_subnet_context('10.0.60.0/24', True, True, True) subnet_context = data['subnet_context'] dhcp_driver.create_or_delete_dhcp_port(subnet_context) subnet_context.current['enable_dhcp'] = False dhcp_driver.create_or_delete_dhcp_port(subnet_context) port = self.get_port_id(data['plugin'], data['context'], data['network_id'], data['subnet_id']) self.assertIsNone(port) def test_dhcp_update_from_disable_to_disable(self): dhcp_driver = driver_base.OdlDhcpDriverBase() data = self.get_network_and_subnet_context('10.0.70.0/24', False, True, True) subnet_context = data['subnet_context'] dhcp_driver.create_or_delete_dhcp_port(subnet_context) subnet_context.current['enable_dhcp'] = False dhcp_driver.create_or_delete_dhcp_port(subnet_context) port = self.get_port_id(data['plugin'], data['context'], data['network_id'], data['subnet_id']) self.assertIsNone(port) def test_dhcp_delete_when_dhcp_enabled(self): dhcp_driver = driver_base.OdlDhcpDriverBase() data = self.get_network_and_subnet_context('10.0.80.0/24', True, True, True) subnet_context = data['subnet_context'] dhcp_driver.create_or_delete_dhcp_port(subnet_context) subnet_context.current['enable_dhcp'] = False dhcp_driver.create_or_delete_dhcp_port(subnet_context) port = self.get_port_id(data['plugin'], data['context'], data['network_id'], data['subnet_id']) self.assertIsNone(port) def test_dhcp_delete_when_dhcp_delete(self): dhcp_driver = driver_base.OdlDhcpDriverBase() data = self.get_network_and_subnet_context('10.0.90.0/24', False, True, True) subnet_context = data['subnet_context'] dhcp_driver.create_or_delete_dhcp_port(subnet_context) dhcp_driver.create_or_delete_dhcp_port(subnet_context) port = self.get_port_id(data['plugin'], data['context'], data['network_id'], data['subnet_id']) self.assertIsNone(port) class AttributeDict(dict): def __init__(self, *args, **kwargs): super(AttributeDict, self).__init__(*args, **kwargs) self.__dict__ = self ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585130284.8227139 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/journal/0000755000175000017500000000000000000000000026514 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/journal/__init__.py0000644000175000017500000000000000000000000030613 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/journal/helper.py0000644000175000017500000000310400000000000030343 0ustar00jamespagejamespage00000000000000# Copyright (c) 2017 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from networking_odl.journal import base_driver TEST_UUID = 'bd8db3a8-2b30-4083-a8b3-b3fd46401142' TEST_PLUGIN = 'test_plugin' TEST_RESOURCE1 = 'test_resource1' TEST_RESOURCE2 = 'test_resource2' TEST_RESOURCE1_SUFFIX = 'test_resource1s' TEST_RESOURCE2_SUFFIX = 'test_resource2s' INVALID_RESOURCE = 'invalid_resource' INVALID_PLUGIN = 'invalid_plugin' INVALID_METHOD = 'invalid_method_name' class TestPlugin(object): def get_test_resource1s(self, context): return [{'id': 'test_id1'}, {'id': 'test_id2'}] def get_test_resource2s(self, context): return [{'id': 'test_id3'}, {'id': 'test_id4'}] def get_test_resource1(self, context, id_): return {'id': id_} class TestDriver(base_driver.ResourceBaseDriver): RESOURCES = { TEST_RESOURCE1: TEST_RESOURCE1_SUFFIX, TEST_RESOURCE2: TEST_RESOURCE2_SUFFIX } plugin_type = TEST_PLUGIN def __init__(self): super(TestDriver, self).__init__() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/journal/test_base_driver.py0000644000175000017500000000720400000000000032415 0ustar00jamespagejamespage00000000000000# Copyright (c) 2017 NEC Corp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.plugins import directory from networking_odl.common import constants from networking_odl.common import exceptions from networking_odl.db import db from networking_odl.journal import base_driver from networking_odl.tests.unit.journal import helper from networking_odl.tests.unit import test_base_db class BaseDriverTestCase(test_base_db.ODLBaseDbTestCase): def setUp(self): super(BaseDriverTestCase, self).setUp() self.test_driver = helper.TestDriver() self.plugin = helper.TestPlugin() directory.add_plugin(helper.TEST_PLUGIN, self.plugin) self.addCleanup(directory.add_plugin, helper.TEST_PLUGIN, None) def test_get_resource_driver(self): for resource, resource_suffix in self.test_driver.RESOURCES.items(): driver = base_driver.get_driver(resource) self.assertEqual(driver, self.test_driver) self.assertEqual(driver.plugin_type, helper.TEST_PLUGIN) self.assertEqual(self.test_driver.RESOURCES.get(resource), resource_suffix) def non_existing_plugin_cleanup(self): self.test_driver.plugin_type = helper.TEST_PLUGIN def test_non_existing_plugin(self): self.test_driver.plugin_type = helper.INVALID_PLUGIN self.addCleanup(self.non_existing_plugin_cleanup) self.assertIsNone(self.test_driver.plugin) def test_get_non_existing_resource_driver(self): self.assertRaises(exceptions.ResourceNotRegistered, base_driver.get_driver, helper.INVALID_RESOURCE) def test_get_resources_for_full_sync(self): received_resources = self.test_driver.get_resources_for_full_sync( self.db_context, helper.TEST_RESOURCE1) resources = self.plugin.get_test_resource1s(self.db_context) for resource in resources: self.assertIn(resource, received_resources) def test_get_non_existing_resources_for_full_sync(self): self.assertRaises(exceptions.UnsupportedResourceType, self.test_driver.get_resources_for_full_sync, self.db_context, helper.INVALID_RESOURCE) def test_get_resource(self): row = db.create_pending_row(self.db_context, helper.TEST_RESOURCE1, helper.TEST_UUID, constants.ODL_CREATE, {'id': helper.TEST_UUID}) resource = self.test_driver.get_resource_for_recovery(self.db_context, row) self.assertEqual(resource['id'], helper.TEST_UUID) def test_get_unsupported_resource(self): row = db.create_pending_row(self.db_context, helper.INVALID_RESOURCE, helper.TEST_UUID, constants.ODL_CREATE, {'id': helper.TEST_UUID}) self.assertRaises(exceptions.PluginMethodNotFound, self.test_driver.get_resource_for_recovery, self.db_context, row) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/journal/test_cleanup.py0000644000175000017500000000367300000000000031565 0ustar00jamespagejamespage00000000000000# # Copyright (C) 2017 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import mock from networking_odl.db import db from networking_odl.journal import cleanup from networking_odl.tests.unit import test_base_db class JournalCleanupTestCase(test_base_db.ODLBaseDbTestCase): def test_delete_completed_rows_retries_exceptions(self): self.cfg.config(completed_rows_retention=1, group='ml2_odl') with mock.patch.object(db, 'delete_rows_by_state_and_time') as m: self._test_retry_exceptions( cleanup.delete_completed_rows, m) def test_cleanup_processsing_rows_retries_exceptions(self): with mock.patch.object(db, 'reset_processing_rows') as m: self._test_retry_exceptions( cleanup.cleanup_processing_rows, m) @mock.patch.object(db, 'delete_rows_by_state_and_time') def _test_delete_completed_rows(self, retention, expected_call, mock_db): self.cfg.config(completed_rows_retention=retention, group='ml2_odl') cleanup.delete_completed_rows(self.db_context) self.assertEqual(expected_call, mock_db.called) def test_delete_completed_rows_with_retention(self): self._test_delete_completed_rows(1, True) def test_delete_completed_rows_zero_retention(self): self._test_delete_completed_rows(0, False) def test_delete_completed_rows_indefinite_retention(self): self._test_delete_completed_rows(-1, False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/journal/test_dependency_validations.py0000644000175000017500000005552100000000000034650 0ustar00jamespagejamespage00000000000000# # Copyright (C) 2016 Intel Corp. Isaku Yamahata # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import testscenarios from networking_odl.common import constants as const from networking_odl.db import db from networking_odl.journal import dependency_validations from networking_odl.tests.unit import test_base_db load_tests = testscenarios.load_tests_apply_scenarios _NET_ID = 'NET_ID' _NET_DATA = {'id': _NET_ID} _SUBNET_ID = 'SUBNET_ID' _SUBNET_DATA = {'network_id': _NET_ID} _PORT_ID = 'PORT_ID' _PORT_DATA = {'network_id': _NET_ID, 'fixed_ips': [{'subnet_id': _SUBNET_ID}]} _PORT_DATA_DUPLICATE_SUBNET = { 'network_id': _NET_ID, 'fixed_ips': [{'subnet_id': _SUBNET_ID}, {'subnet_id': _SUBNET_ID}] } _ROUTER_ID = 'ROUTER_ID' _ROUTER_DATA = {'id': 'ROUTER_ID', 'gw_port_id': 'GW_PORT_ID'} _L2GW_ID = 'l2gw_id' _L2GW_DATA = {'id': _L2GW_ID} _L2GWCONN_ID = 'l2gwconn_id' _L2GWCONN_DATA = {'id': _L2GWCONN_ID, 'network_id': _NET_ID, 'gateway_id': _L2GW_ID} _TRUNK_ID = 'TRUNK_ID' _SUBPORT_ID = 'CPORT_ID' _TRUNK_DATA = {'trunk_id': _TRUNK_ID, 'port_id': _PORT_ID, 'sub_ports': [{'port_id': _SUBPORT_ID}]} _BGPVPN_ID = 'BGPVPN_ID' _SG_ID = 'SG_ID' _SG_DATA = {'id': _SG_ID} _SG_RULE_ID = 'SG_RULE_ID' _SG_RULE_DATA = {'id': _SG_RULE_ID, 'security_group_id': _SG_ID} def get_data(res_type, operation): if res_type == const.ODL_NETWORK: return [_NET_DATA] elif res_type == const.ODL_SUBNET: if operation == const.ODL_DELETE: return [[_NET_ID]] return [_SUBNET_DATA] elif res_type == const.ODL_PORT: # TODO(yamahata): test case of (ODL_port, ODL_DELETE) is missing if operation == const.ODL_DELETE: return [[_NET_ID, _SUBNET_ID]] return [_PORT_DATA, _PORT_DATA_DUPLICATE_SUBNET] elif res_type == const.ODL_ROUTER: return [_ROUTER_DATA] elif res_type == const.ODL_L2GATEWAY: return [_L2GW_DATA] elif res_type == const.ODL_L2GATEWAY_CONNECTION: return [_L2GWCONN_DATA] elif res_type == const.ODL_TRUNK: if operation == const.ODL_DELETE: return [[_PORT_ID, _SUBPORT_ID]] return [_TRUNK_DATA] elif res_type == const.ODL_BGPVPN: if operation == const.ODL_DELETE: return [[_NET_ID, _ROUTER_ID]] else: routers = [] networks = [] if operation == const.ODL_UPDATE: routers = [_ROUTER_ID] networks = [_NET_ID] return [{'id': _BGPVPN_ID, 'networks': networks, 'routers': routers, 'route_distinguishers': ['100:1']}] elif res_type == const.ODL_SG: return [_SG_DATA] elif res_type == const.ODL_SG_RULE: if operation == const.ODL_DELETE: return [[_SG_RULE_ID]] return [_SG_RULE_DATA] return [[]] def subnet_fail_network_dep(net_op, subnet_op): return {'expected': 1, 'first_type': const.ODL_NETWORK, 'first_operation': net_op, 'first_id': _NET_ID, 'second_type': const.ODL_SUBNET, 'second_operation': subnet_op, 'second_id': _SUBNET_ID} def subnet_succeed_network_dep(net_op, subnet_op): return {'expected': 0, 'first_type': const.ODL_SUBNET, 'first_operation': subnet_op, 'first_id': _SUBNET_ID, 'second_type': const.ODL_NETWORK, 'second_operation': net_op, 'second_id': _NET_ID} # TODO(vthapar) add tests for l2gw dependency validations class BaseDependencyValidationsTestCase(object): def test_dependency(self): db.create_pending_row( self.db_context, self.first_type, self.first_id, self.first_operation, get_data(self.first_type, self.first_operation)) for data in get_data(self.second_type, self.second_operation): deps = dependency_validations.calculate( self.db_context, self.second_operation, self.second_type, self.second_id, data) self.assertEqual(self.expected, len(deps)) class SubnetDependencyValidationsTestCase( test_base_db.ODLBaseDbTestCase, BaseDependencyValidationsTestCase): scenarios = ( ("subnet_create_depends_on_older_network_create", subnet_fail_network_dep(const.ODL_CREATE, const.ODL_CREATE)), ("subnet_create_depends_on_older_network_update", subnet_fail_network_dep(const.ODL_UPDATE, const.ODL_CREATE)), ("subnet_create_depends_on_older_network_delete", subnet_fail_network_dep(const.ODL_DELETE, const.ODL_CREATE)), ("subnet_create_doesnt_depend_on_newer_network_create", subnet_succeed_network_dep(const.ODL_CREATE, const.ODL_CREATE)), ("subnet_create_doesnt_depend_on_newer_network_update", subnet_succeed_network_dep(const.ODL_UPDATE, const.ODL_CREATE)), ("subnet_create_doesnt_depend_on_newer_network_delete", subnet_succeed_network_dep(const.ODL_DELETE, const.ODL_CREATE)), ("subnet_update_depends_on_older_network_create", subnet_fail_network_dep(const.ODL_CREATE, const.ODL_UPDATE)), ("subnet_update_depends_on_older_network_update", subnet_fail_network_dep(const.ODL_UPDATE, const.ODL_UPDATE)), ("subnet_update_depends_on_older_network_delete", subnet_fail_network_dep(const.ODL_DELETE, const.ODL_UPDATE)), ("subnet_update_doesnt_depend_on_newer_network_create", subnet_succeed_network_dep(const.ODL_CREATE, const.ODL_UPDATE)), ("subnet_update_doesnt_depend_on_newer_network_update", subnet_succeed_network_dep(const.ODL_UPDATE, const.ODL_UPDATE)), ("subnet_update_doesnt_depend_on_newer_network_delete", subnet_succeed_network_dep(const.ODL_DELETE, const.ODL_UPDATE)), ("subnet_delete_doesnt_depend_on_older_network_create", subnet_succeed_network_dep(const.ODL_CREATE, const.ODL_DELETE)), ("subnet_delete_doesnt_depend_on_older_network_update", subnet_succeed_network_dep(const.ODL_UPDATE, const.ODL_DELETE)), ("subnet_delete_doesnt_depend_on_newer_network_create", subnet_succeed_network_dep(const.ODL_CREATE, const.ODL_DELETE)), ("subnet_delete_doesnt_depend_on_newer_network_update", subnet_succeed_network_dep(const.ODL_UPDATE, const.ODL_DELETE)), ) def security_rule_fail_security_group_dep(sg_op, sgr_op): return {'expected': 1, 'first_type': const.ODL_SG, 'first_operation': sg_op, 'first_id': _SG_ID, 'second_type': const.ODL_SG_RULE, 'second_operation': sgr_op, 'second_id': _SG_RULE_ID} def security_rule_succeed_security_group_dep(sg_op, sgr_op): return {'expected': 0, 'first_type': const.ODL_SG_RULE, 'first_operation': sgr_op, 'first_id': _SG_RULE_ID, 'second_type': const.ODL_SG, 'second_operation': sg_op, 'second_id': _SG_ID} class SecurityRuleDependencyValidationsTestCase( test_base_db.ODLBaseDbTestCase, BaseDependencyValidationsTestCase): scenarios = ( ("security_rule_create_depends_on_older_security_group_create", security_rule_fail_security_group_dep(const.ODL_CREATE, const.ODL_CREATE)), ("security_rule_create_depends_on_older_security_group_update", security_rule_fail_security_group_dep(const.ODL_UPDATE, const.ODL_CREATE)), ("security_rule_create_depends_on_older_security_group_delete", security_rule_fail_security_group_dep(const.ODL_DELETE, const.ODL_CREATE)), ("security_rule_create_doesnt_depend_on_newer_security_group_create", security_rule_succeed_security_group_dep(const.ODL_CREATE, const.ODL_CREATE)), ("security_rule_create_doesnt_depend_on_newer_security_group_update", security_rule_succeed_security_group_dep(const.ODL_UPDATE, const.ODL_CREATE)), ("security_rule_create_doesnt_depend_on_newer_security_group_delete", security_rule_succeed_security_group_dep(const.ODL_DELETE, const.ODL_CREATE)), ("security_rule_update_depends_on_older_security_group_create", security_rule_fail_security_group_dep(const.ODL_CREATE, const.ODL_UPDATE)), ("security_rule_update_depends_on_older_security_group_update", security_rule_fail_security_group_dep(const.ODL_UPDATE, const.ODL_UPDATE)), ("security_rule_update_depends_on_older_security_group_delete", security_rule_fail_security_group_dep(const.ODL_DELETE, const.ODL_UPDATE)), ("security_rule_update_doesnt_depend_on_newer_security_group_create", security_rule_succeed_security_group_dep(const.ODL_CREATE, const.ODL_UPDATE)), ("security_rule_update_doesnt_depend_on_newer_security_group_update", security_rule_succeed_security_group_dep(const.ODL_UPDATE, const.ODL_UPDATE)), ("security_rule_update_doesnt_depend_on_newer_security_group_delete", security_rule_succeed_security_group_dep(const.ODL_DELETE, const.ODL_UPDATE)), ("security_rule_delete_doesnt_depend_on_older_security_group_create", security_rule_succeed_security_group_dep(const.ODL_CREATE, const.ODL_DELETE)), ("security_rule_delete_doesnt_depend_on_older_security_group_update", security_rule_succeed_security_group_dep(const.ODL_UPDATE, const.ODL_DELETE)), ("security_rule_delete_doesnt_depend_on_newer_security_group_create", security_rule_succeed_security_group_dep(const.ODL_CREATE, const.ODL_DELETE)), ("security_rule_delete_doesnt_depend_on_newer_security_group_update", security_rule_succeed_security_group_dep(const.ODL_UPDATE, const.ODL_DELETE)), ) def port_fail_network_dep(net_op, port_op): return {'expected': 1, 'first_type': const.ODL_NETWORK, 'first_operation': net_op, 'first_id': _NET_ID, 'second_type': const.ODL_PORT, 'second_operation': port_op, 'second_id': _PORT_ID} def port_succeed_network_dep(net_op, port_op): return {'expected': 0, 'first_type': const.ODL_PORT, 'first_operation': port_op, 'first_id': _PORT_ID, 'second_type': const.ODL_NETWORK, 'second_operation': net_op, 'second_id': _NET_ID} def port_fail_subnet_dep(subnet_op, port_op): return {'expected': 1, 'first_type': const.ODL_SUBNET, 'first_operation': subnet_op, 'first_id': _SUBNET_ID, 'second_type': const.ODL_PORT, 'second_operation': port_op, 'second_id': _PORT_ID} def port_succeed_subnet_dep(subnet_op, port_op): return {'expected': 0, 'first_type': const.ODL_PORT, 'first_operation': port_op, 'first_id': _PORT_ID, 'second_type': const.ODL_SUBNET, 'second_operation': subnet_op, 'second_id': _SUBNET_ID} class PortDependencyValidationsTestCase( test_base_db.ODLBaseDbTestCase, BaseDependencyValidationsTestCase): scenarios = ( ("port_create_depends_on_older_network_create", port_fail_network_dep(const.ODL_CREATE, const.ODL_CREATE)), ("port_create_depends_on_older_network_update", port_fail_network_dep(const.ODL_UPDATE, const.ODL_CREATE)), ("port_create_depends_on_older_network_delete", port_fail_network_dep(const.ODL_DELETE, const.ODL_CREATE)), ("port_create_doesnt_depend_on_newer_network_create", port_succeed_network_dep(const.ODL_CREATE, const.ODL_CREATE)), ("port_create_doesnt_depend_on_newer_network_update", port_succeed_network_dep(const.ODL_UPDATE, const.ODL_CREATE)), ("port_create_doesnt_depend_on_newer_network_delete", port_succeed_network_dep(const.ODL_DELETE, const.ODL_CREATE)), ("port_update_depends_on_older_network_create", port_fail_network_dep(const.ODL_CREATE, const.ODL_UPDATE)), ("port_update_depends_on_older_network_update", port_fail_network_dep(const.ODL_UPDATE, const.ODL_UPDATE)), ("port_update_depends_on_older_network_delete", port_fail_network_dep(const.ODL_DELETE, const.ODL_UPDATE)), ("port_update_doesnt_depend_on_newer_network_create", port_succeed_network_dep(const.ODL_CREATE, const.ODL_UPDATE)), ("port_update_doesnt_depend_on_newer_network_update", port_succeed_network_dep(const.ODL_UPDATE, const.ODL_UPDATE)), ("port_update_doesnt_depend_on_newer_network_delete", port_succeed_network_dep(const.ODL_DELETE, const.ODL_UPDATE)), ("port_create_depends_on_older_subnet_create", port_fail_subnet_dep(const.ODL_CREATE, const.ODL_CREATE)), ("port_create_depends_on_older_subnet_update", port_fail_subnet_dep(const.ODL_UPDATE, const.ODL_CREATE)), ("port_create_depends_on_older_subnet_delete", port_fail_subnet_dep(const.ODL_DELETE, const.ODL_CREATE)), ("port_create_doesnt_depend_on_newer_subnet_create", port_succeed_subnet_dep(const.ODL_CREATE, const.ODL_CREATE)), ("port_create_doesnt_depend_on_newer_subnet_update", port_succeed_subnet_dep(const.ODL_UPDATE, const.ODL_CREATE)), ("port_create_doesnt_depend_on_newer_subnet_delete", port_succeed_subnet_dep(const.ODL_DELETE, const.ODL_CREATE)), ("port_update_depends_on_older_subnet_create", port_fail_subnet_dep(const.ODL_CREATE, const.ODL_UPDATE)), ("port_update_depends_on_older_subnet_update", port_fail_subnet_dep(const.ODL_UPDATE, const.ODL_UPDATE)), ("port_update_depends_on_older_subnet_delete", port_fail_subnet_dep(const.ODL_DELETE, const.ODL_UPDATE)), ("port_update_doesnt_depend_on_newer_subnet_create", port_succeed_subnet_dep(const.ODL_CREATE, const.ODL_UPDATE)), ("port_update_doesnt_depend_on_newer_subnet_update", port_succeed_subnet_dep(const.ODL_UPDATE, const.ODL_UPDATE)), ("port_update_doesnt_depend_on_newer_subnet_delete", port_succeed_subnet_dep(const.ODL_DELETE, const.ODL_UPDATE)), ) def trunk_dep(first_type, second_type, first_op, second_op, result, sub_port=False): expected = {'fail': 1, 'pass': 0} port_id = _SUBPORT_ID if sub_port else _PORT_ID type_id = {const.ODL_PORT: port_id, const.ODL_TRUNK: _TRUNK_ID} return {'expected': expected[result], 'first_type': first_type, 'first_operation': first_op, 'first_id': type_id[first_type], 'second_type': second_type, 'second_operation': second_op, 'second_id': type_id[second_type]} class TrunkDependencyValidationsTestCase( test_base_db.ODLBaseDbTestCase, BaseDependencyValidationsTestCase): scenarios = ( ("trunk_create_depends_on_older_port_create", trunk_dep(const.ODL_PORT, const.ODL_TRUNK, const.ODL_CREATE, const.ODL_CREATE, 'fail')), ("trunk_create_doesnt_depend_on_newer_port_create", trunk_dep(const.ODL_TRUNK, const.ODL_PORT, const.ODL_CREATE, const.ODL_CREATE, 'pass')), ("trunk_create_doesnt_depend_on_port_update", trunk_dep(const.ODL_TRUNK, const.ODL_PORT, const.ODL_CREATE, const.ODL_UPDATE, 'pass')), ("trunk_create_doesnt_depend_on_newer_port_delete", trunk_dep(const.ODL_TRUNK, const.ODL_PORT, const.ODL_CREATE, const.ODL_DELETE, 'pass')), # TODO(vthapar): add more/better validations for subport # trunk update means subport add/delete ("trunk_update_depends_on_older_trunk_create", trunk_dep(const.ODL_TRUNK, const.ODL_TRUNK, const.ODL_CREATE, const.ODL_UPDATE, 'fail', True)), ("trunk_update_depends_on_older_port_create", trunk_dep(const.ODL_PORT, const.ODL_TRUNK, const.ODL_CREATE, const.ODL_UPDATE, 'fail', True)), ("trunk_update_doesnt_depend_on_newer_port_create", trunk_dep(const.ODL_TRUNK, const.ODL_PORT, const.ODL_UPDATE, const.ODL_CREATE, 'pass', True)), ("trunk_update_doesnt_depend_on_port_update", trunk_dep(const.ODL_TRUNK, const.ODL_PORT, const.ODL_UPDATE, const.ODL_UPDATE, 'pass', True)), ("trunk_update_doesnt_depend_on_newer_port_delete", trunk_dep(const.ODL_TRUNK, const.ODL_PORT, const.ODL_UPDATE, const.ODL_DELETE, 'pass', True)), # trunk delete cases ("trunk_delete_depends_on_older_trunk_create", trunk_dep(const.ODL_TRUNK, const.ODL_TRUNK, const.ODL_CREATE, const.ODL_DELETE, 'fail', True)), ("trunk_delete_depends_on_older_trunk_update", trunk_dep(const.ODL_TRUNK, const.ODL_TRUNK, const.ODL_UPDATE, const.ODL_DELETE, 'fail', True)), ("trunk_delete_doesnt_depend_on_older_port_create", trunk_dep(const.ODL_PORT, const.ODL_TRUNK, const.ODL_CREATE, const.ODL_DELETE, 'pass')), ) def l2gw_dep(first_type, second_type, first_op, second_op, result): expected = {'fail': 1, 'pass': 0} type_id = {const.ODL_NETWORK: _NET_ID, const.ODL_L2GATEWAY: _L2GW_ID, const.ODL_L2GATEWAY_CONNECTION: _L2GWCONN_ID} return {'expected': expected[result], 'first_type': first_type, 'first_operation': first_op, 'first_id': type_id[first_type], 'second_type': second_type, 'second_operation': second_op, 'second_id': type_id[second_type]} class L2GWDependencyValidationsTestCase( test_base_db.ODLBaseDbTestCase, BaseDependencyValidationsTestCase): scenarios = ( ("L2GWConn_create_depends_on_older_network_create", l2gw_dep(const.ODL_NETWORK, const.ODL_L2GATEWAY_CONNECTION, const.ODL_CREATE, const.ODL_CREATE, 'fail')), ("L2GWConn_create_depends_on_older_L2GW_create", l2gw_dep(const.ODL_L2GATEWAY, const.ODL_L2GATEWAY_CONNECTION, const.ODL_CREATE, const.ODL_CREATE, 'fail')), ("L2GWConn_create_doesnt_depend_on_newer_network_create", l2gw_dep(const.ODL_L2GATEWAY_CONNECTION, const.ODL_NETWORK, const.ODL_CREATE, const.ODL_CREATE, 'pass')), ("L2GWConn_create_doesnt_depend_on_newer_L2GW_create", l2gw_dep(const.ODL_L2GATEWAY_CONNECTION, const.ODL_L2GATEWAY, const.ODL_CREATE, const.ODL_CREATE, 'pass')), ) # TODO(vthapar): Refactor *_dep into a common method def bgpvpn_dep(first_type, second_type, first_op, second_op, result): expected = {'fail': 1, 'pass': 0} type_id = {const.ODL_NETWORK: _NET_ID, const.ODL_ROUTER: _ROUTER_ID, const.ODL_BGPVPN: _BGPVPN_ID} return {'expected': expected[result], 'first_type': first_type, 'first_operation': first_op, 'first_id': type_id[first_type], 'second_type': second_type, 'second_operation': second_op, 'second_id': type_id[second_type]} class BGPVPNDependencyValidationsTestCase( test_base_db.ODLBaseDbTestCase, BaseDependencyValidationsTestCase): scenarios = ( ("bgpvpn_create_doesnt_depend_on_older_network_create", bgpvpn_dep(const.ODL_NETWORK, const.ODL_BGPVPN, const.ODL_CREATE, const.ODL_CREATE, 'pass')), ("bgpvpn_create_doesnt_depend_on_newer_network_create", bgpvpn_dep(const.ODL_BGPVPN, const.ODL_NETWORK, const.ODL_CREATE, const.ODL_CREATE, 'pass')), ("bgpvpn_create_doesnt_depend_on_older_router_create", bgpvpn_dep(const.ODL_ROUTER, const.ODL_BGPVPN, const.ODL_CREATE, const.ODL_CREATE, 'pass')), ("bgpvpn_create_doesnt_depend_on_newer_router_create", bgpvpn_dep(const.ODL_BGPVPN, const.ODL_ROUTER, const.ODL_CREATE, const.ODL_CREATE, 'pass')), ("bgpvpn_update_depends_on_older_bgpvpn_create", bgpvpn_dep(const.ODL_BGPVPN, const.ODL_BGPVPN, const.ODL_CREATE, const.ODL_UPDATE, 'fail')), ("bgpvpn_update_depends_on_older_network_create", bgpvpn_dep(const.ODL_NETWORK, const.ODL_BGPVPN, const.ODL_CREATE, const.ODL_UPDATE, 'fail')), ("bgpvpn_update_doesnt_depend_on_newer_network_create", bgpvpn_dep(const.ODL_BGPVPN, const.ODL_NETWORK, const.ODL_UPDATE, const.ODL_CREATE, 'pass')), ("bgpvpn_update_depends_on_older_router_create", bgpvpn_dep(const.ODL_ROUTER, const.ODL_BGPVPN, const.ODL_CREATE, const.ODL_UPDATE, 'fail')), ("bgpvpn_update_doesnt_depend_on_newer_router_create", bgpvpn_dep(const.ODL_BGPVPN, const.ODL_ROUTER, const.ODL_UPDATE, const.ODL_CREATE, 'pass')), # bgpvpn delete cases ("bgpvpn_delete_depends_on_older_bgpvpn_create", bgpvpn_dep(const.ODL_BGPVPN, const.ODL_BGPVPN, const.ODL_CREATE, const.ODL_DELETE, 'fail')), ("bgpvpn_delete_depends_on_older_bgpvpn_update", bgpvpn_dep(const.ODL_BGPVPN, const.ODL_BGPVPN, const.ODL_UPDATE, const.ODL_DELETE, 'fail')), ("bgpvpn_delete_doesnt_depend_on_older_network_create", bgpvpn_dep(const.ODL_NETWORK, const.ODL_BGPVPN, const.ODL_CREATE, const.ODL_DELETE, 'pass')), ("bgpvpn_delete_doesnt_depend_on_older_router_create", bgpvpn_dep(const.ODL_ROUTER, const.ODL_BGPVPN, const.ODL_CREATE, const.ODL_DELETE, 'pass')), ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/journal/test_full_sync.py0000644000175000017500000004647700000000000032145 0ustar00jamespagejamespage00000000000000# # Copyright (C) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import mock import requests from networking_l2gw.services.l2gateway.common import constants as l2gw_const from networking_sfc.extensions import flowclassifier as fc_const from networking_sfc.extensions import sfc as sfc_const from neutron_lib.api.definitions import bgpvpn as bgpvpn_const from neutron_lib.callbacks import resources from neutron_lib.plugins import constants from neutron_lib.plugins import directory from networking_odl.bgpvpn import odl_v2 as bgpvpn_driver from networking_odl.common import constants as odl_const from networking_odl.common import exceptions from networking_odl.db import db from networking_odl.journal import base_driver from networking_odl.journal import full_sync from networking_odl.journal import journal from networking_odl.l2gateway import driver_v2 as l2gw_driver from networking_odl.l3 import l3_odl_v2 from networking_odl.ml2 import mech_driver_v2 from networking_odl.qos import qos_driver_v2 as qos_driver from networking_odl.sfc.flowclassifier import sfc_flowclassifier_v2 from networking_odl.sfc import sfc_driver_v2 as sfc_driver from networking_odl.tests import base from networking_odl.tests.unit.journal import helper from networking_odl.tests.unit import test_base_db from networking_odl.trunk import trunk_driver_v2 as trunk_driver class FullSyncTestCase(test_base_db.ODLBaseDbTestCase): def setUp(self): self.useFixture( base.OpenDaylightRestClientGlobalFixture(full_sync._CLIENT)) super(FullSyncTestCase, self).setUp() self._CLIENT = full_sync._CLIENT.get_client() self.addCleanup(full_sync.FULL_SYNC_RESOURCES.clear) # NOTE(rajivk) workaround, Fixture defined are executed after complete # tests cases, but cleanup is needed after each test case. self.addCleanup(self._clean_registered_plugins) def _clean_registered_plugins(self): for plugin_type in self._get_all_plugins().keys(): directory.add_plugin(plugin_type, None) def test_no_full_sync_when_canary_exists(self): full_sync.full_sync(self.db_context) self.assertEqual([], db.get_all_db_rows(self.db_context)) def _filter_out_canary(self, rows): return [row for row in rows if row['object_uuid'] != full_sync._CANARY_NETWORK_ID] def _mock_l2_resources(self): expected_journal = {odl_const.ODL_NETWORK: '1', odl_const.ODL_SUBNET: '2', odl_const.ODL_PORT: '3'} network_id = expected_journal[odl_const.ODL_NETWORK] plugin = mock.Mock() plugin.get_networks.return_value = [{'id': network_id}] plugin.get_subnets.return_value = [ {'id': expected_journal[odl_const.ODL_SUBNET], 'network_id': network_id}] port = {'id': expected_journal[odl_const.ODL_PORT], odl_const.ODL_SGS: None, 'tenant_id': '123', 'fixed_ips': [], 'network_id': network_id} plugin.get_ports.side_effect = ([port], []) directory.add_plugin(constants.CORE, plugin) return expected_journal def _test_no_full_sync_when_canary_in_journal(self, state): self._mock_canary_missing() self._mock_l2_resources() db.create_pending_row(self.db_context, odl_const.ODL_NETWORK, full_sync._CANARY_NETWORK_ID, odl_const.ODL_CREATE, {}) row = db.get_all_db_rows(self.db_context)[0] db.update_db_row_state(self.db_context, row, state) full_sync.full_sync(self.db_context) rows = db.get_all_db_rows(self.db_context) self.assertEqual([], self._filter_out_canary(rows)) def test_no_full_sync_when_canary_pending_creation(self): self._test_no_full_sync_when_canary_in_journal(odl_const.PENDING) def test_no_full_sync_when_canary_is_processing(self): self._test_no_full_sync_when_canary_in_journal(odl_const.PROCESSING) @staticmethod def _get_all_resources(): return ( (odl_const.ODL_SG, constants.CORE), (odl_const.ODL_SG_RULE, constants.CORE), (odl_const.ODL_NETWORK, constants.CORE), (odl_const.ODL_SUBNET, constants.CORE), (odl_const.ODL_ROUTER, constants.L3), (odl_const.ODL_PORT, constants.CORE), (odl_const.ODL_FLOATINGIP, constants.L3), (odl_const.ODL_QOS_POLICY, constants.QOS), (odl_const.ODL_TRUNK, resources.TRUNK), (odl_const.ODL_BGPVPN, bgpvpn_const.ALIAS), (odl_const.ODL_BGPVPN_NETWORK_ASSOCIATION, bgpvpn_const.ALIAS), (odl_const.ODL_BGPVPN_ROUTER_ASSOCIATION, bgpvpn_const.ALIAS), (odl_const.ODL_SFC_FLOW_CLASSIFIER, fc_const.FLOW_CLASSIFIER_EXT), (odl_const.ODL_SFC_PORT_PAIR, sfc_const.SFC_EXT), (odl_const.ODL_SFC_PORT_PAIR_GROUP, sfc_const.SFC_EXT), (odl_const.ODL_SFC_PORT_CHAIN, sfc_const.SFC_EXT), (odl_const.ODL_L2GATEWAY, l2gw_const.L2GW), (odl_const.ODL_L2GATEWAY_CONNECTION, l2gw_const.L2GW)) @mock.patch.object(db, 'delete_pending_rows') @mock.patch.object(full_sync, '_full_sync_needed') @mock.patch.object(full_sync, '_sync_resources') @mock.patch.object(journal, 'record') def test_sync_resource_order( self, record_mock, _sync_resources_mock, _full_sync_needed_mock, delete_pending_rows_mock): all_resources = self._get_all_resources() full_sync.FULL_SYNC_RESOURCES = {resource_type: mock.Mock() for resource_type, _ in all_resources} _full_sync_needed_mock._full_sync_needed.return_value = True context = mock.MagicMock() full_sync.full_sync(context) _sync_resources_mock.assert_has_calls( [mock.call(mock.ANY, object_type, mock.ANY) for object_type, _ in all_resources]) def test_client_error_propagates(self): class TestException(Exception): def __init__(self): pass self._CLIENT.get.side_effect = TestException() self.assertRaises(TestException, full_sync.full_sync, self.db_context) def _mock_canary_missing(self): get_return = mock.MagicMock() get_return.status_code = requests.codes.not_found self._CLIENT.get.return_value = get_return def _assert_canary_created(self): rows = db.get_all_db_rows(self.db_context) self.assertTrue(any(r['object_uuid'] == full_sync._CANARY_NETWORK_ID for r in rows)) return rows def _test_full_sync_resources(self, expected_journal): self._mock_canary_missing() directory.add_plugin(constants.CORE, mock.Mock()) full_sync.full_sync(self.db_context) rows = self._assert_canary_created() rows = self._filter_out_canary(rows) self.assertItemsEqual(expected_journal.keys(), [row['object_type'] for row in rows]) for row in rows: self.assertEqual(expected_journal[row['object_type']], row['object_uuid']) def test_full_sync_removes_pending_rows(self): db.create_pending_row(self.db_context, odl_const.ODL_NETWORK, "uuid", odl_const.ODL_CREATE, {'foo': 'bar'}) self._test_full_sync_resources({}) def test_full_sync_no_resources(self): self._test_full_sync_resources({}) @staticmethod def _get_mocked_security_groups(context): return [{'description': u'description', 'security_group_rules': ['security_grp_rules'], 'id': 'test_uuid', 'name': u'default'}] @staticmethod def _get_mocked_security_group_rules(context): return [{'direction': 'egress', 'protocol': None, 'description': 'description', 'port_range_max': None, 'id': 'test_uuid', 'security_group_id': 'test_uuid'}] @staticmethod def _get_mocked_networks(context): return [{'id': 'test_uuid', 'project_id': u'project_id', 'status': u'ACTIVE', 'subnets': [], 'description': u'', 'name': u'network0'}] @staticmethod def _get_mocked_subnets(context): return [{'description': u'', 'cidr': u'test-cidr', 'id': 'test_uuid', 'name': u'test-subnet', 'network_id': 'test_uuid', 'gateway_ip': u'gateway_ip'}] @staticmethod def _get_mocked_routers(context): return [{'status': u'ACTIVE', 'description': u'', 'name': u'router1', 'id': 'test_uuid'}] @staticmethod def _get_mocked_ports(context): return [{'status': u'DOWN', 'description': None, 'id': 'test_uuid', 'name': u'loadbalancer-27', 'network_id': 'test_uuid', 'mac_address': u'fa:16:3e:69:4e:33'}] @staticmethod def _get_mocked_loadbalancers(context): return [{'description': '', 'tenant_id': 'tenant_id', 'vip_subnet_id': 'subnet_id', 'listeners': [], 'vip_address': '10.1.0.11', 'vip_port_id': 'port_id', 'pools': [], 'id': 'test_uuid', 'name': 'test-lb'}] @staticmethod def _get_mocked_listeners(context): return [{'admin_state_up': True, 'project_id': 'test_uuid', 'id': 'test_uuid'}] @staticmethod def _get_mocked_trunks(context): return [{'routers': [], 'id': 'test_uuid', 'name': u'', 'tenant_id': u'project_id', 'networks': [], 'route_targets': [ u'64512:1'], 'project_id': u'project_id', 'type': 'l3'}, {'routers': [], 'id': 'test_uuid', 'name': u'', 'tenant_id': u'tenant_id', 'networks': [], 'route_targets': [ u'64512:1'], 'project_id': u'project_id', 'type': 'l3'}] @staticmethod def _get_mocked_bgpvpns(context): return [{'network_id': 'test_uuid', 'bgpvpn_id': 'test_uuid', 'project_id': 'test_uuid', 'id': 'test_uuid'}] @staticmethod def _get_mocked_l2_gateways(context): return [{'tenant_id': u'test_tenant_id', 'id': 'test_uuid', 'devices': [{'interfaces': [{'name': u'eth3'}], 'id': 'test_uuid', 'device_name': u'vtep0'}], 'name': u'test-gateway'}] @staticmethod def _get_mocked_l2_gateway_connections(context): return [{'network_id': 'test_uuid', 'tenant_id': 'test_uuid', 'l2_gateway_id': 'test_uuid', 'id': 'test_uuid'}] @staticmethod def _get_mocked_pools(context): return [{'name': 'pool1', 'admin_state_up': True, 'project_id': 'test_uuid', 'id': 'test_uuid'}] @staticmethod def _get_mocked_pool_members(context, pool_id): return [{'name': 'pool1', 'admin_state_up': True, 'project_id': 'test_uuid', 'id': 'test_uuid'}] @staticmethod def _get_mocked_healthmonitors(context): return [{'type': 'HTTP', 'admin_state_up': True, 'project_id': 'test_uuid', 'id': 'test_uuid', 'name': 'monitor1'}] @staticmethod def _get_mocked_listener(context): return [{'admin_state_up': True, 'project_id': 'test_uuid', 'id': 'test_uuid'}] @staticmethod def _get_mocked_floatingips(context): return [{'floating_network_id': 'test_uuid', 'tenant_id': 'test_uuid', 'dns_name': '', 'dns_domain': '', 'id': 'test_uuid'}] @staticmethod def _get_mocked_policies(context): return [{'id': 'test_uuid', 'project_id': 'test_uuid', 'name': 'test-policy', 'description': 'Policy description', 'shared': True, 'is_default': False}] @staticmethod def _get_mocked_bgpvpn_network_associations(context, bgpvpn_id): return [{'network_id': 'test_uuid', 'tenant_id': 'test_uuid', 'id': 'test_uuid'}] @staticmethod def _get_mocked_bgpvpn_router_associations(context, bgpvpn_id): return [{'router_id': 'test_uuid', 'tenant_id': 'test_uuid', 'id': 'test_uuid'}] @staticmethod def _get_mocked_port_chains(context): tenant_id = 'test_uuid' return [{'tenant_id': tenant_id, 'project_id': tenant_id, 'id': 'test_uuid'}] @staticmethod def _get_mocked_port_pair_groups(context): tenant_id = 'test_uuid' return [{'tenant_id': tenant_id, 'project_id': tenant_id, 'id': 'test_uuid'}] @staticmethod def _get_mocked_port_pairs(context): tenant_id = 'test_uuid' return [{'tenant_id': tenant_id, 'project_id': tenant_id, 'id': 'test_uuid'}] @staticmethod def _get_mocked_flowclassifiers(context): tenant_id = 'test_uuid' return [{'tenant_id': tenant_id, 'project_id': tenant_id, 'id': 'test_uuid'}] @staticmethod def _get_all_plugins(): return { constants.CORE: (mock.Mock(), mech_driver_v2.OpenDaylightMechanismDriver), constants.L3: (mock.Mock(), l3_odl_v2.OpenDaylightL3RouterPlugin), resources.TRUNK: (mock.Mock(), trunk_driver.OpenDaylightTrunkHandlerV2), constants.QOS: (mock.Mock(), qos_driver.OpenDaylightQosDriver), sfc_const.SFC_EXT: (mock.Mock(), sfc_driver.OpenDaylightSFCDriverV2), bgpvpn_const.ALIAS: (mock.Mock(), bgpvpn_driver.OpenDaylightBgpvpnDriver), fc_const.FLOW_CLASSIFIER_EXT: ( mock.Mock(), sfc_flowclassifier_v2.OpenDaylightSFCFlowClassifierDriverV2), l2gw_const.L2GW: (mock.Mock(), l2gw_driver.OpenDaylightL2gwDriver) } @staticmethod def _get_name(resource_type): mapping = { odl_const.ODL_QOS_POLICY: odl_const.ODL_QOS_POLICIES, odl_const.ODL_SFC_PORT_PAIR: odl_const.NETWORKING_SFC_FLOW_CLASSIFIERS, odl_const.ODL_SFC_PORT_PAIR: odl_const.NETWORKING_SFC_PORT_PAIRS, odl_const.ODL_SFC_PORT_PAIR_GROUP: odl_const.NETWORKING_SFC_PORT_PAIR_GROUPS, odl_const.ODL_SFC_PORT_CHAIN: odl_const.NETWORKING_SFC_PORT_CHAINS, odl_const.ODL_L2GATEWAY_CONNECTION: odl_const.ODL_L2GATEWAY_CONNECTIONS} return ('_get_mocked_%s' % mapping.get( resource_type, resource_type + 's')) def _add_side_effect(self): plugins = self._get_all_plugins() resources = self._get_all_resources() for resource_type, plugin_name in resources: name = self._get_name(resource_type) setattr(plugins[plugin_name][0], "get_%s" % name[12:], getattr(self, name)) if directory.get_plugin(plugin_name) is None: directory.add_plugin(plugin_name, plugins[plugin_name][0]) @mock.patch.object(journal, 'record') def _test_sync_resources(self, object_type, plugin_type, mocked_record): plugins = self._get_all_plugins() driver = plugins[plugin_type][1] args = [mock.Mock()] if object_type in [odl_const.ODL_BGPVPN_ROUTER_ASSOCIATION, odl_const.ODL_BGPVPN_NETWORK_ASSOCIATION]: args.append(mock.Mock()) resources = getattr(self, self._get_name(object_type))(*args) context = mock.Mock() def _test_get_default_handler(context, resource_type, plugin_type=plugin_type): resource_type = self._get_name(resource_type)[12:] return full_sync.get_resources(context, plugin_type=plugin_type, resource_type=resource_type) handler = getattr(driver, 'get_resources', _test_get_default_handler) full_sync._sync_resources(context, object_type, handler) mocked_record.assert_has_calls( [mock.call(context, object_type, resource['id'], odl_const.ODL_CREATE, resource) for resource in resources]) def test_sync_all_resources(self): self._add_side_effect() resources = self._get_all_resources() for obj_type, plugin_name in resources: self._test_sync_resources(obj_type, plugin_name) def test_full_sync_retries_exceptions(self): with mock.patch.object(full_sync, '_full_sync_needed') as m: self._test_retry_exceptions(full_sync.full_sync, m) def test_object_not_registered(self): self.assertRaises(exceptions.ResourceNotRegistered, full_sync.sync_resources, self.db_context, 'test-object-type') self.assertEqual([], db.get_all_db_rows(self.db_context)) def _register_resources(self): helper.TestDriver() self.addCleanup(base_driver.ALL_RESOURCES.clear) def add_plugin(self, plugin_type, plugin): directory.add_plugin(plugin_type, plugin) def test_plugin_not_registered(self): self._register_resources() # NOTE(rajivk): workaround, as we don't have delete method for plugin plugin = directory.get_plugin(helper.TEST_PLUGIN) directory.add_plugin(helper.TEST_PLUGIN, None) self.addCleanup(self.add_plugin, helper.TEST_PLUGIN, plugin) self.assertRaises(exceptions.PluginMethodNotFound, full_sync.sync_resources, self.db_context, helper.TEST_RESOURCE1) self.assertEqual([], db.get_all_db_rows(self.db_context)) def test_sync_resources(self): self._register_resources() plugin = helper.TestPlugin() self.add_plugin(helper.TEST_PLUGIN, plugin) resources = plugin.get_test_resource1s(self.db_context) full_sync.sync_resources(self.db_context, helper.TEST_RESOURCE1) entries = [entry.data for entry in db.get_all_db_rows(self.db_context)] for resource in resources: self.assertIn(resource, entries) self.assertEqual(len(resources), len(entries)) @mock.patch.object(base_driver.ResourceBaseDriver, 'get_resources_for_full_sync') def test_get_resources_failed(self, mock_get_resources): self._register_resources() mock_get_resources.side_effect = exceptions.UnsupportedResourceType() resource_name = helper.TEST_RESOURCE1 self.assertRaises(exceptions.UnsupportedResourceType, full_sync.sync_resources, self.db_context, resource_name) mock_get_resources.assert_called_once_with(self.db_context, resource_name) self.assertEqual([], db.get_all_db_rows(self.db_context)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/journal/test_journal.py0000644000175000017500000005430100000000000031602 0ustar00jamespagejamespage00000000000000# Copyright (c) 2017 NEC Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import signal import fixtures import mock from neutron.common import utils from oslo_db import exception from oslo_log import log as logging from oslo_service.tests import test_service from oslo_utils import uuidutils from networking_odl.common import client from networking_odl.common import constants as odl_const from networking_odl.db import db from networking_odl.db import models from networking_odl.journal import cleanup from networking_odl.journal import dependency_validations from networking_odl.journal import full_sync from networking_odl.journal import journal from networking_odl.journal import periodic_task from networking_odl.journal import recovery from networking_odl.journal import worker from networking_odl.tests import base from networking_odl.tests.unit import base_v2 from networking_odl.tests.unit.db import test_db PROCESS_RUNNING_STATUSES = ('S', 'R', 'D') class JournalPeriodicProcessorTest(base_v2.OpenDaylightConfigBase, test_service.ServiceTestBase): def setUp(self): super(JournalPeriodicProcessorTest, self).setUp() self.periodic_task_fixture = self.useFixture( base.OpenDaylightPeriodicTaskFixture()) self.cfg.config(sync_timeout=0.1, group='ml2_odl') def _create_periodic_processor(self): periodic_processor = worker.JournalPeriodicProcessor() self.addCleanup(periodic_processor.stop) return periodic_processor def _get_pid_status(self, pid): """Allows to query a system process based on the PID It will use `ps` to query the pid, it's state and the command. :param pid: An integer with the Process ID number :returns: A tuple of strings with the command and the running status in a single char as defined in the manpage PS(1) under PROCESS STATE CODES. """ with os.popen('ps ax -o pid,state,cmd') as f: # Skip ps header f.readline() processes = (l.strip().split()[:3] for l in f) return next(((c, s) for p, s, c in processes if int(p) == pid), (None, None)) def _kill_process(self, pid): if self._get_pid_status(pid)[1] in PROCESS_RUNNING_STATUSES: os.kill(pid, signal.SIGKILL) def mock_object_with_ipc(self, target, attribute, pre_hook=None): patcher = mock.patch.object(target, attribute, autospec=True) c2p_read = self.create_ipc_for_mock(patcher, pre_hook) return c2p_read def create_ipc_for_mock(self, patcher, pre_hook=None): # NOTE(mpeterson): The following pipe is being used because this is # testing something inter processeses and we need to have a value on # the side of the test processes to know it succeeded with the # operation. A pipe provide a way for two processes to communicate. # The was_called method will be called by the worker process while # the test process will read the result on c2p_read. c2p_read, c2p_write = os.pipe() def close_pipe_end(fd): try: os.close(fd) except OSError: print('failed closing: %s' % fd) # First we want to close the write, to unlock any running read() self.addCleanup(close_pipe_end, c2p_read) self.addCleanup(close_pipe_end, c2p_write) mock_ = patcher.start() self.addCleanup(patcher.stop) def was_called(*args, **kwargs): # OSError is caught because start is called twice on the worker # and the second time the pipe is already closed. try: os.close(c2p_read) try: if pre_hook: pre_hook(*args, **kwargs) os.write(c2p_write, b'1') except Exception: # This is done so any read on the pipe is unblocked. os.write(c2p_write, b'0') finally: os.close(c2p_write) except OSError: pass mock_.side_effect = was_called return c2p_read def assert_ipc_mock_called(self, c2p_read): # If it timeouts on the read then it means the function was # not called. called = int(os.read(c2p_read, 1)) self.assertEqual(called, 1, 'The IPC mock was called but during the ' 'execution an exception was raised') @mock.patch.object(journal.OpenDaylightJournalThread, 'set_sync_event') def test_processing(self, mock_journal): periodic_processor = self._create_periodic_processor() periodic_processor.start() utils.wait_until_true(lambda: mock_journal.call_count > 1, 5, 0.1) @mock.patch.object(journal.OpenDaylightJournalThread, 'start') @mock.patch.object(journal.OpenDaylightJournalThread, 'stop') def test_stops_journal_sync_thread(self, mock_stop, mock_start): periodic_processor = self._create_periodic_processor() periodic_processor.start() periodic_processor.stop() mock_stop.assert_called_once() mock_start.assert_called_once() def test_allow_multiple_starts_gracefully(self): periodic_processor = self._create_periodic_processor() periodic_processor.start() periodic_processor.stop() try: periodic_processor.start() except RuntimeError: self.fail('Calling a start() after a stop() should be allowed') def test_multiple_starts_without_stop_throws_exception(self): periodic_processor = self._create_periodic_processor() periodic_processor.start() self.assertRaises(RuntimeError, periodic_processor.start) def test_call_stop_without_calling_start(self): periodic_processor = self._create_periodic_processor() try: periodic_processor.stop() except AttributeError: self.fail('start() was not called before calling stop()') def assert_process_running(self, pid): cmd, state = self._get_pid_status(pid) self.assertIn(state, PROCESS_RUNNING_STATUSES) return cmd def _create_periodic_processor_ipc_fork(self, target, pre_hook=None): self._setup_mocks_for_periodic_task() real_start = worker.JournalPeriodicProcessor.start pipe_start = self.mock_object_with_ipc(worker.JournalPeriodicProcessor, 'start', real_start) c2p_read = self.mock_object_with_ipc(worker.JournalPeriodicProcessor, target, pre_hook) pid = self._spawn_service( service_maker=lambda: worker.JournalPeriodicProcessor()) self.addCleanup(self._kill_process, pid) # Allow the process to spawn and signal handling to be registered self.assert_ipc_mock_called(pipe_start) return pid, c2p_read @mock.patch.object(periodic_task.PeriodicTask, 'execute_ops', new=mock.Mock()) @mock.patch.object(journal.OpenDaylightJournalThread, 'sync_pending_entries', new=mock.Mock()) def test_handle_sighup_gracefully(self): real_reset = worker.JournalPeriodicProcessor.reset pid, c2p_read = self._create_periodic_processor_ipc_fork('reset', real_reset) cmd = self.assert_process_running(pid) os.kill(pid, signal.SIGHUP) self.assert_ipc_mock_called(c2p_read) new_cmd = self.assert_process_running(pid) self.assertEqual(cmd, new_cmd) def _setup_mocks_for_periodic_task(self, executed_recently=False): mock_db_module = mock.MagicMock(spec=db) mock_db_module.was_periodic_task_executed_recently.return_value = \ executed_recently mock_db = mock.patch('networking_odl.journal.periodic_task.db', mock_db_module) mock_db.start() self.addCleanup(mock_db.stop) @mock.patch.object(cleanup, 'delete_completed_rows') @mock.patch.object(cleanup, 'cleanup_processing_rows') @mock.patch.object(full_sync, 'full_sync') @mock.patch.object(recovery, 'journal_recovery') # ^^ The above mocks represent the required calling order starting from # top. Use decorators *only* to specify the stack order. def test_maintenance_task_correctly_registered(self, *stack_order): calls = [] for item in reversed(stack_order): calls.append(mock.call(item)) with mock.patch.object( periodic_task.PeriodicTask, 'register_operation') as register_operation_mock: periodic_processor = self._create_periodic_processor() periodic_processor._start_maintenance_task() register_operation_mock.assert_has_calls(calls) def test_maintenance_task_started(self): self.periodic_task_fixture.task_start_mock.stop() mock_start = self.periodic_task_fixture.task_start_mock.start() periodic_processor = self._create_periodic_processor() periodic_processor.start() periodic_processor._maintenance_task = mock.MagicMock() mock_start.assert_called_once() @mock.patch.object(periodic_task.PeriodicTask, 'execute_ops', new=mock.Mock()) def test_reset_called_on_sighup(self): pid, c2p_read = self._create_periodic_processor_ipc_fork('reset') self.assert_process_running(pid) os.kill(pid, signal.SIGHUP) self.assert_ipc_mock_called(c2p_read) @mock.patch.object(periodic_task.PeriodicTask, 'execute_ops') def test_reset_fires_maintenance_task(self, execute_mock): periodic_processor = self._create_periodic_processor() periodic_processor._start_maintenance_task() execute_mock.reset_mock() periodic_processor.reset() execute_mock.assert_has_calls([mock.call(forced=True)]) def test_reset_succeeeds_when_maintenance_task_not_setup(self): periodic_processor = self._create_periodic_processor() # NOTE(mpeterson): This tests that if calling reset without setting up # the maintenance task then it would not raise an exception and just # proceed as usual. periodic_processor.reset() @mock.patch.object(periodic_task.PeriodicTask, 'execute_ops') def test_start_fires_maintenance_task(self, execute_mock): periodic_processor = self._create_periodic_processor() periodic_processor.start() execute_mock.called_once_with([mock.call(forced=True)]) def test_creates_pidfile(self): periodic_processor = self._create_periodic_processor() periodic_processor._create_pidfile() pidfile = str(periodic_processor.pidfile) self.assertTrue(os.path.isfile(pidfile)) with open(pidfile) as f: pid = int(f.readline()) self.assertEqual(pid, os.getpid()) # NOTE(mpeterson): to avoid showing an expected exception while # running the next assert with mock.patch('neutron.agent.linux.daemon.LOG', autospec=True): self.assertRaises( SystemExit, worker.JournalPeriodicProcessor()._create_pidfile ) @mock.patch.object(worker.JournalPeriodicProcessor, '_create_pidfile') @mock.patch.object(worker.JournalPeriodicProcessor, '_delete_pidfile') def test_pidfile_handling_on_start_stop(self, mock_create, mock_delete): periodic_processor = self._create_periodic_processor() periodic_processor.start() periodic_processor.stop() mock_create.assert_called_once() mock_delete.assert_called_once() def test_deletes_pidfile(self): atexit_mock = self.journal_thread_fixture.remock_atexit() periodic_processor = self._create_periodic_processor() periodic_processor.start() pidfile = str(periodic_processor.pidfile) self.assertTrue(os.path.isfile(pidfile)) periodic_processor._delete_pidfile() self.assertFalse(os.path.isfile(pidfile)) atexit_mock.assert_called_once_with(periodic_processor._delete_pidfile) def test_atexit_delete_pidfile_registered_only_once(self): atexit_mock = self.journal_thread_fixture.remock_atexit() periodic_processor = self._create_periodic_processor() for _ in range(0, 2): periodic_processor.start() periodic_processor.stop() atexit_mock.assert_called_once() class OpenDaylightJournalThreadTest(base_v2.OpenDaylightTestCase): def setUp(self): super(OpenDaylightJournalThreadTest, self).setUp() self.journal = journal.OpenDaylightJournalThread() self.addCleanup(self.cleanup) @staticmethod def cleanup(): journal.MAKE_URL.clear() def test_json_data(self): object_type = 'testobject' data = 'testdata' row = models.OpenDaylightJournal(object_type=object_type, object_uuid=uuidutils.generate_uuid(), operation=odl_const.ODL_CREATE, data=data) self.assertEqual("%ss" % object_type, self.journal._json_data(row)[1]) def test_json_data_customized_url(self): object_type = 'randomtestobject' data = 'testdata' journal.register_url_builder(object_type, lambda row: row.object_type) row = models.OpenDaylightJournal(object_type=object_type, object_uuid=uuidutils.generate_uuid(), operation=odl_const.ODL_CREATE, data=data) url_param = self.journal._json_data(row) self.assertEqual(object_type, url_param[1]) def test_entry_reset_retries_exceptions(self): with mock.patch.object(db, 'update_db_row_state') as m: self._test_retry_exceptions(journal.entry_reset, m) @test_db.in_session @mock.patch.object(client.OpenDaylightRestClient, 'sendjson', mock.Mock(side_effect=Exception)) def test__sync_entry_update_state_by_retry_count_on_exception(self): entry = db.create_pending_row(self.db_context, *self.UPDATE_ROW) self.journal._max_retry_count = 1 self.assertEqual(entry.retry_count, 0) self.journal._sync_entry(self.db_context, entry) self.assertEqual(entry.retry_count, 1) self.assertEqual(entry.state, odl_const.PENDING) self.journal._sync_entry(self.db_context, entry) self.assertEqual(entry.retry_count, 1) self.assertEqual(entry.state, odl_const.FAILED) def _test__sync_entry_logs(self, log_type): entry = db.create_pending_row(self.db_context, *self.UPDATE_ROW) logger = self.useFixture(fixtures.FakeLogger()) self.journal._sync_entry(self.db_context, entry) self.assertIn(log_type, logger.output) def test__sync_entry_logs_processing(self): self._test__sync_entry_logs(journal.LOG_PROCESSING) def test__sync_entry_logs_completed(self): self._test__sync_entry_logs(journal.LOG_COMPLETED) @mock.patch.object(client.OpenDaylightRestClient, 'sendjson', mock.Mock(side_effect=Exception)) def test__sync_entry_logs_failed(self): self._test__sync_entry_logs(journal.LOG_ERROR_PROCESSING) @mock.patch.object(journal.OpenDaylightJournalThread, 'sync_pending_entries') def test_terminate_journal_thread_correctly(self, mock_journal): self.journal_thread_fixture.journal_thread_mock.stop() self.addCleanup(self.journal_thread_fixture.journal_thread_mock.start) journal_thread = journal.OpenDaylightJournalThread(start_thread=True) journal_thread.stop(5) self.assertTrue(not journal_thread._odl_sync_thread.is_alive()) mock_journal.assert_called_once() @mock.patch.object(journal.OpenDaylightJournalThread, 'sync_pending_entries') def test_allow_multiple_starts_gracefully(self, mock_journal): self.journal_thread_fixture.journal_thread_mock.stop() self.addCleanup(self.journal_thread_fixture.journal_thread_mock.start) journal_thread = journal.OpenDaylightJournalThread(start_thread=False) self.addCleanup(journal_thread.stop) journal_thread.start() try: journal_thread.start() except RuntimeError: self.fail('OpenDaylightJournalThread started twice') def _raise_DBReferenceError(*args, **kwargs): args = [mock.Mock(unsafe=True)] * 4 e = exception.DBReferenceError(*args) raise e class JournalTest(base_v2.OpenDaylightTestCase): @mock.patch.object(dependency_validations, 'calculate') @mock.patch.object(journal.db, 'create_pending_row', side_effect=_raise_DBReferenceError) def test_record_triggers_retry_on_reference_error(self, mock_create_row, mock_calculate): args = [mock.Mock(unsafe=True)] * 5 self.assertRaises(exception.RetryRequest, journal.record, *args) def test_entry_complete_retries_exceptions(self): with mock.patch.object(db, 'update_db_row_state') as m: self._test_retry_exceptions(journal.entry_complete, m) @test_db.in_session def _test_entry_complete(self, retention, expected_length): self.cfg.config(completed_rows_retention=retention, group='ml2_odl') db.create_pending_row(self.db_context, *test_db.DbTestCase.UPDATE_ROW) entry = db.get_all_db_rows(self.db_context)[-1] journal.entry_complete(self.db_context, entry) rows = db.get_all_db_rows(self.db_context) self.assertEqual(expected_length, len(rows)) self.assertTrue( all(row.state == odl_const.COMPLETED for row in rows)) def test_entry_complete_no_retention(self): self._test_entry_complete(0, 0) def test_entry_complete_with_retention(self): self._test_entry_complete(1, 1) def test_entry_complete_with_indefinite_retention(self): self._test_entry_complete(-1, 1) @test_db.in_session def test_entry_complete_with_retention_deletes_dependencies(self): self.cfg.config(completed_rows_retention=1, group='ml2_odl') db.create_pending_row(self.db_context, *test_db.DbTestCase.UPDATE_ROW) entry = db.get_all_db_rows(self.db_context)[-1] db.create_pending_row(self.db_context, *test_db.DbTestCase.UPDATE_ROW, depending_on=[entry]) dependant = db.get_all_db_rows(self.db_context)[-1] journal.entry_complete(self.db_context, entry) rows = db.get_all_db_rows(self.db_context) self.assertIn(entry, rows) self.assertEqual([], entry.dependencies) self.assertEqual([], dependant.depending_on) def test_entry_reset_retries_exceptions(self): with mock.patch.object(db, 'update_db_row_state') as m: self._test_retry_exceptions(journal.entry_reset, m) @test_db.in_session def test_entry_reset(self): db.create_pending_row(self.db_context, *test_db.DbTestCase.UPDATE_ROW) db.create_pending_row(self.db_context, *test_db.DbTestCase.UPDATE_ROW) entry = db.get_all_db_rows(self.db_context)[-1] entry.state = odl_const.PROCESSING self.db_context.session.merge(entry) self.db_context.session.flush() entry = db.get_all_db_rows(self.db_context)[-1] self.assertEqual(entry.state, odl_const.PROCESSING) journal.entry_reset(self.db_context, entry) rows = db.get_all_db_rows(self.db_context) self.assertEqual(2, len(rows)) self.assertTrue(all(row.state == odl_const.PENDING for row in rows)) def test_entry_set_retry_count_retries_exceptions(self): with mock.patch.object(db, 'update_pending_db_row_retry') as m: self._test_retry_exceptions( journal.entry_update_state_by_retry_count, m) @test_db.in_session def test_entry_set_retry_count(self): db.create_pending_row(self.db_context, *test_db.DbTestCase.UPDATE_ROW) entry_baseline = db.get_all_db_rows(self.db_context)[-1] db.create_pending_row(self.db_context, *test_db.DbTestCase.UPDATE_ROW) entry_target = db.get_all_db_rows(self.db_context)[-1] self.assertEqual(entry_target.retry_count, 0) self.assertEqual(entry_target.retry_count, entry_baseline.retry_count) self.assertEqual(entry_target.state, entry_baseline.state) journal.entry_update_state_by_retry_count( self.db_context, entry_target, 1) self.assertEqual(entry_target.retry_count, 1) self.assertEqual(entry_target.state, odl_const.PENDING) journal.entry_update_state_by_retry_count( self.db_context, entry_target, 1) self.assertEqual(entry_target.retry_count, 1) self.assertEqual(entry_target.state, odl_const.FAILED) self.assertNotEqual(entry_target.state, entry_baseline.state) self.assertNotEqual(entry_target.retry_count, entry_baseline.retry_count) def test_record_logs_recording(self): logger = self.useFixture(fixtures.FakeLogger()) journal.record(self.db_context, *self.UPDATE_ROW) for arg in self.UPDATE_ROW[0:3]: self.assertIn(arg, logger.output) def test_record_logs_dependencies(self): entry = db.create_pending_row(self.db_context, *self.UPDATE_ROW) logger = self.useFixture(fixtures.FakeLogger(level=logging.DEBUG)) journal.record(self.db_context, *self.UPDATE_ROW) self.assertIn(str(entry.seqnum), logger.output) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/journal/test_periodic_task.py0000644000175000017500000002020200000000000032741 0ustar00jamespagejamespage00000000000000# # Copyright (C) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import threading import mock from neutron.common import utils from neutron_lib import context from networking_odl.common import constants as odl_const from networking_odl.db import db from networking_odl.db import models from networking_odl.journal import periodic_task from networking_odl.tests.unit import test_base_db TEST_TASK_NAME = 'test-maintenance' TEST_TASK_INTERVAL = 0.1 class PeriodicTaskThreadTestCase(test_base_db.ODLBaseDbTestCase): def setUp(self): super(PeriodicTaskThreadTestCase, self).setUp() row = models.OpenDaylightPeriodicTask(task=TEST_TASK_NAME, state=odl_const.PENDING) self.db_context.session.add(row) self.db_context.session.flush() self.thread = periodic_task.PeriodicTask(TEST_TASK_NAME, TEST_TASK_INTERVAL) self.addCleanup(self.thread.cleanup) def test__execute_op_no_exception(self): with mock.patch.object(periodic_task, 'LOG') as mock_log: operation = mock.MagicMock() operation.__name__ = "test" self.thread.register_operation(operation) self.thread._execute_op(operation, self.db_context) operation.assert_called() mock_log.info.assert_called() mock_log.exception.assert_not_called() def test__execute_op_with_exception(self): with mock.patch.object(periodic_task, 'LOG') as mock_log: operation = mock.MagicMock(side_effect=Exception()) operation.__name__ = "test" self.thread._execute_op(operation, self.db_context) mock_log.exception.assert_called() def test_thread_works(self): callback_event = threading.Event() count = 0 def callback_op(*args): nonlocal count count += 1 # The following should be true on the second call, so we're making # sure that the thread runs more than once. if count > 1: callback_event.set() self.thread.register_operation(callback_op) self.thread.start() # Make sure the callback event was called and not timed out self.assertTrue(callback_event.wait(timeout=5)) def test_thread_continues_after_exception(self): exception_event = threading.Event() callback_event = threading.Event() def exception_op(*args): if not exception_event.is_set(): exception_event.set() raise Exception() def callback_op(*args): callback_event.set() for op in [exception_op, callback_op]: self.thread.register_operation(op) self.thread.start() # Make sure the callback event was called and not timed out self.assertTrue(callback_event.wait(timeout=5)) def test_multiple_thread_work(self): self.thread1 = periodic_task.PeriodicTask(TEST_TASK_NAME + '1', TEST_TASK_INTERVAL) callback_event = threading.Event() callback_event1 = threading.Event() self.addCleanup(self.thread1.cleanup) def callback_op(*args): callback_event.set() def callback_op1(*args): callback_event1.set() self.thread.register_operation(callback_op) self.thread.register_operation(callback_op1) self.thread.start() self.assertTrue(callback_event.wait(timeout=5)) self.thread1.start() self.assertTrue(callback_event1.wait(timeout=5)) @mock.patch.object(db, "was_periodic_task_executed_recently") def test_back_to_back_job(self, mock_status_method): callback_event = threading.Event() continue_event = threading.Event() def callback_op(*args): callback_event.set() return_value = True def continue_(*args, **kwargs): continue_event.set() return return_value mock_status_method.side_effect = continue_ self.thread.register_operation(callback_op) msg = ("Periodic %s task executed after periodic " "interval Skipping execution.") with mock.patch.object(periodic_task.LOG, 'info') as mock_log_info: self.thread.start() self.assertTrue(continue_event.wait(timeout=1)) continue_event.clear() mock_log_info.assert_called_with(msg, TEST_TASK_NAME) self.assertFalse(callback_event.is_set()) self.assertTrue(continue_event.wait(timeout=1)) continue_event.clear() mock_log_info.assert_called_with(msg, TEST_TASK_NAME) return_value = False self.assertTrue(callback_event.wait(timeout=2)) def test_set_operation_retries_exceptions(self): with mock.patch.object(db, 'update_periodic_task') as m: self._test_retry_exceptions(self.thread._set_operation, m) def test_lock_task_retries_exceptions(self): with mock.patch.object(db, 'lock_periodic_task') as m: self._test_retry_exceptions(self.thread._lock_task, m) def test_clear_and_unlock_task_retries_exceptions(self): with mock.patch.object(db, 'update_periodic_task') as m: self._test_retry_exceptions(self.thread._clear_and_unlock_task, m) @mock.patch.object(db, "was_periodic_task_executed_recently", return_value=False) def test_no_multiple_executions_simultaneously(self, mock_exec_recently): continue_event = threading.Event() trigger_event = threading.Event() count = 0 def wait_until_event(context): nonlocal count trigger_event.set() if continue_event.wait(2): count += 1 self.thread.register_operation(wait_until_event) def task_locked(): session = self.db_context.session row = (session.query(models.OpenDaylightPeriodicTask) .filter_by(state=odl_const.PROCESSING, task=TEST_TASK_NAME) .one_or_none()) return (row is not None) self.thread.start() utils.wait_until_true(trigger_event.is_set, 5, 0.01) self.assertEqual(count, 0) self.assertTrue(task_locked()) self.thread.execute_ops() self.assertEqual(count, 0) self.assertTrue(task_locked()) continue_event.set() trigger_event.clear() utils.wait_until_true(trigger_event.is_set, 5, 0.01) self.thread.cleanup() self.assertFalse(task_locked()) self.assertGreaterEqual(count, 1) @mock.patch.object(db, "was_periodic_task_executed_recently", return_value=True) def test_forced_execution(self, mock_status_method): operation = mock.MagicMock() operation.__name__ = "test" self.thread.register_operation(operation) self.thread.execute_ops(forced=True) operation.assert_called() @mock.patch.object(db, "was_periodic_task_executed_recently", return_value=True) def test_context_is_passed_as_args(self, _): operation = mock.MagicMock() operation.__name__ = 'test' self.thread.register_operation(operation) self.thread.execute_ops(forced=True) # This tests that only ONE args is passed, and no kwargs operation.assert_called_with(mock.ANY) # This tests that it's a context kall = operation.call_args args, kwargs = kall self.assertIsInstance(args[0], context.Context) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/journal/test_recovery.py0000644000175000017500000002376000000000000031773 0ustar00jamespagejamespage00000000000000# # Copyright (C) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import mock from neutron_lib import exceptions as nexc from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from networking_odl.common import constants as odl_const from networking_odl.common import exceptions from networking_odl.db import db from networking_odl.journal import full_sync from networking_odl.journal import recovery from networking_odl.l3 import l3_odl_v2 from networking_odl.ml2 import mech_driver_v2 from networking_odl.tests import base from networking_odl.tests.unit.db import test_db from networking_odl.tests.unit.journal import helper from networking_odl.tests.unit import test_base_db class RecoveryTestCase(test_base_db.ODLBaseDbTestCase): def setUp(self): self.useFixture( base.OpenDaylightRestClientGlobalFixture(recovery._CLIENT)) super(RecoveryTestCase, self).setUp() self._CLIENT = recovery._CLIENT.get_client() self.addCleanup(self.clean_registered_resources) @staticmethod def clean_registered_resources(): full_sync.ALL_RESOURCES = {} def _mock_resource(self, plugin, resource_type): mock_resource = mock.MagicMock() get_func = getattr(plugin, 'get_{}'.format(resource_type)) get_func.return_value = mock_resource return mock_resource def _mock_row(self, resource_type): return mock.MagicMock(object_type=resource_type) def _test__get_latest_resource(self, plugin, resource_type): l2 = mech_driver_v2.OpenDaylightMechanismDriver.RESOURCES full_sync.ALL_RESOURCES[plugin_constants.CORE] = l2 mock_resource = self._mock_resource(plugin, resource_type) mock_row = self._mock_row(resource_type) resource = recovery._get_latest_resource(self.db_context.session, mock_row) self.assertEqual(mock_resource, resource) @mock.patch.object(directory, 'get_plugin') def test__get_latest_resource_l2(self, plugin_mock): for resource_type in( mech_driver_v2.OpenDaylightMechanismDriver.RESOURCES): plugin = plugin_mock.return_value self._test__get_latest_resource(plugin, resource_type) @mock.patch.object(directory, 'get_plugin') def test__get_latest_resource_l3(self, plugin_mock): full_sync.ALL_RESOURCES[plugin_constants.L3] = l3_odl_v2.L3_RESOURCES for resource_type in l3_odl_v2.L3_RESOURCES: plugin = plugin_mock.return_value self._test__get_latest_resource(plugin, resource_type) def test__get_latest_resource_unsupported(self): mock_row = self._mock_row('aaa') self.assertRaises( exceptions.UnsupportedResourceType, recovery._get_latest_resource, self.db_context.session, mock_row) @mock.patch.object(directory, 'get_plugin') def test__get_latest_resource_none(self, plugin_mock): plugin_mock.return_value.get_network.side_effect = nexc.NotFound() l2 = mech_driver_v2.OpenDaylightMechanismDriver.RESOURCES full_sync.ALL_RESOURCES[plugin_constants.CORE] = l2 mock_row = self._mock_row(odl_const.ODL_NETWORK) self.assertRaises( nexc.NotFound, recovery._get_latest_resource, self.db_context.session, mock_row) def test_journal_recovery_retries_exceptions(self): db.create_pending_row(self.db_context, odl_const.ODL_NETWORK, 'id', odl_const.ODL_DELETE, {}) created_row = db.get_all_db_rows(self.db_context)[0] db.update_db_row_state(self.db_context, created_row, odl_const.FAILED) with mock.patch.object(db, 'update_db_row_state') as m: self._test_retry_exceptions(recovery.journal_recovery, m) def test_journal_recovery_no_rows(self): recovery.journal_recovery(self.db_context) self.assertFalse(self._CLIENT.get_resource.called) @test_db.in_session def _test_recovery(self, operation, odl_resource, expected_state): db.create_pending_row(self.db_context, odl_const.ODL_NETWORK, 'id', operation, {}) created_row = db.get_all_db_rows(self.db_context)[0] db.update_db_row_state(self.db_context, created_row, odl_const.FAILED) self._CLIENT.get_resource.return_value = odl_resource recovery.journal_recovery(self.db_context) if expected_state is None: completed_rows = db.get_all_db_rows_by_state( self.db_context, odl_const.COMPLETED) self.assertEqual([], completed_rows) else: row = db.get_all_db_rows_by_state(self.db_context, expected_state)[0] self.assertEqual(created_row['seqnum'], row['seqnum']) return created_row def _disable_retention(self): self.cfg.config(completed_rows_retention=0, group='ml2_odl') def test_journal_recovery_handles_failure_quietly(self): class TestException(Exception): pass self._CLIENT.get_resource.side_effect = TestException('') self._test_recovery( odl_const.ODL_DELETE, None, odl_const.FAILED) def test_journal_recovery_deleted_row_not_in_odl(self): self._test_recovery(odl_const.ODL_DELETE, None, odl_const.COMPLETED) def test_journal_recovery_deleted_row_not_in_odl_purged(self): self._disable_retention() self._test_recovery(odl_const.ODL_DELETE, None, None) def test_journal_recovery_created_row_exists_in_odl(self): self._test_recovery(odl_const.ODL_CREATE, {}, odl_const.COMPLETED) def test_journal_recovery_created_row_exists_in_odl_purged(self): self._disable_retention() self._test_recovery(odl_const.ODL_CREATE, {}, None) def test_journal_recovery_deleted_row_exists_in_odl(self): self._test_recovery(odl_const.ODL_DELETE, {}, odl_const.PENDING) @mock.patch.object(recovery, '_get_latest_resource') def _test_recovery_creates_operation( self, operation, resource, odl_resource, expected_operation, recovery_mock): if resource is not None: recovery_mock.return_value = resource else: recovery_mock.side_effect = nexc.NotFound original_row = self._test_recovery( operation, odl_resource, odl_const.COMPLETED) pending_row = db.get_all_db_rows_by_state( self.db_context, odl_const.PENDING)[0] self.assertEqual(expected_operation, pending_row['operation']) self.assertEqual(original_row['object_type'], pending_row['object_type']) self.assertEqual(original_row['object_uuid'], pending_row['object_uuid']) def test_recovery_created_row_not_in_odl(self): self._test_recovery_creates_operation( odl_const.ODL_CREATE, {}, None, odl_const.ODL_CREATE) def test_recovery_updated_row_not_in_odl(self): self._test_recovery_creates_operation( odl_const.ODL_UPDATE, {}, None, odl_const.ODL_CREATE) def test_recovery_updated_resource_missing_but_exists_in_odl(self): self._test_recovery_creates_operation( odl_const.ODL_UPDATE, None, {}, odl_const.ODL_DELETE) @mock.patch.object(recovery, '_get_latest_resource') def test_recovery_created_resource_missing_and_not_in_odl(self, rmock): rmock.side_effect = nexc.NotFound self._test_recovery(odl_const.ODL_CREATE, None, odl_const.COMPLETED) @mock.patch.object(recovery, '_get_latest_resource') def test_recovery_created_resource_missing_and_not_in_odl_purged( self, rmock): rmock.side_effect = nexc.NotFound self._disable_retention() self._test_recovery(odl_const.ODL_CREATE, None, None) @mock.patch.object(recovery, '_get_latest_resource') def test_recovery_updated_resource_missing_and_not_in_odl(self, rmock): rmock.side_effect = nexc.NotFound self._test_recovery(odl_const.ODL_UPDATE, None, odl_const.COMPLETED) @mock.patch.object(recovery, '_get_latest_resource') def test_recovery_updated_resource_missing_and_not_in_odl_purged( self, rmock): rmock.side_effect = nexc.NotFound self._disable_retention() self._test_recovery(odl_const.ODL_UPDATE, None, None) def _test_get_latest_resource(self, resource_type): # Drivers needs to be initialized to register resources for recovery # and full sync mechasnim. helper.TestDriver() directory.add_plugin(helper.TEST_PLUGIN, helper.TestPlugin()) self.addCleanup(directory.add_plugin, helper.TEST_PLUGIN, None) return db.create_pending_row(self.db_context, resource_type, 'id', odl_const.ODL_DELETE, {}) def test_get_latest_resource(self): row = self._test_get_latest_resource(helper.TEST_RESOURCE1) plugin = directory.get_plugin(helper.TEST_PLUGIN) resource = recovery.get_latest_resource(self.db_context, row) self.assertDictEqual(resource, plugin.get_test_resource1(self.db_context, 'id')) def test_get_unsupported_latest_resource(self): row = self._test_get_latest_resource(helper.TEST_RESOURCE1) row.object_type = helper.INVALID_RESOURCE self.assertRaises(exceptions.UnsupportedResourceType, recovery.get_latest_resource, self.db_context, row) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585130284.8227139 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/l2gateway/0000755000175000017500000000000000000000000026741 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/l2gateway/__init__.py0000644000175000017500000000000000000000000031040 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/l2gateway/test_driver_v2.py0000644000175000017500000001356300000000000032264 0ustar00jamespagejamespage00000000000000# # Copyright (C) 2017 Ericsson India Global Services Pvt Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from networking_odl.common import constants as odl_const from networking_odl.db import db from networking_odl.l2gateway import driver_v2 as driverv2 from networking_odl.tests.unit import base_v2 class OpenDaylightL2GWDriverTestCase(base_v2.OpenDaylightConfigBase): def setUp(self): super(OpenDaylightL2GWDriverTestCase, self).setUp() self.driver = driverv2.OpenDaylightL2gwDriver(service_plugin=None) def _get_fake_l2_gateway(self): fake_l2_gateway = { "tenant_id": "de0a7495-05c4-4be0-b796-1412835c6820", "id": "5227c228-6bba-4bbe-bdb8-6942768ff0f1", "name": "test-gateway", "devices": [ { "device_name": "switch1", "interfaces": [ { "name": "port1", "segmentation_id": [100] }, { "name": "port2", "segmentation_id": [151, 152] } ] }, { "device_name": "switch2", "interfaces": [ { "name": "port5", "segmentation_id": [200] }, { "name": "port6", "segmentation_id": [251, 252] } ] } ] } return fake_l2_gateway def _get_fake_l2_gateway_connection(self): fake_l2_gateway_connection = { "tenant_id": "de0a7495-05c4-4be0-b796-1412835c6820", "id": "5227c228-6bba-4bbe-bdb8-6942768ff02f", "network_id": "be0a7495-05c4-4be0-b796-1412835c6830", "default_segmentation_id": 77, "l2_gateway_id": "5227c228-6bba-4bbe-bdb8-6942768ff0f1" } return fake_l2_gateway_connection def _assert_op(self, operation, object_type, data, precommit=True): row = db.get_oldest_pending_db_row_with_lock(self.db_context) if precommit: self.db_context.session.flush() self.assertEqual(operation, row['operation']) self.assertEqual(object_type, row['object_type']) self.assertEqual(data['id'], row['object_uuid']) else: self.assertIsNone(row) def test_create_l2_gateway(self): fake_data = self._get_fake_l2_gateway() self.driver.create_l2_gateway_precommit(self.db_context, fake_data) self._assert_op(odl_const.ODL_CREATE, odl_const.ODL_L2GATEWAY, fake_data) self.driver.create_l2_gateway_postcommit(self.db_context, fake_data) self._assert_op(odl_const.ODL_CREATE, odl_const.ODL_L2GATEWAY, fake_data, False) def test_delete_l2_gateway(self): fake_data = self._get_fake_l2_gateway() self.driver.delete_l2_gateway_precommit(self.db_context, fake_data['id']) self._assert_op(odl_const.ODL_DELETE, odl_const.ODL_L2GATEWAY, fake_data) self.driver.delete_l2_gateway_postcommit(self.db_context, fake_data['id']) self._assert_op(odl_const.ODL_DELETE, odl_const.ODL_L2GATEWAY, fake_data, False) def test_update_l2_gateway(self): fake_data = self._get_fake_l2_gateway() self.driver.update_l2_gateway_precommit(self.db_context, fake_data) self._assert_op(odl_const.ODL_UPDATE, odl_const.ODL_L2GATEWAY, fake_data) self.driver.update_l2_gateway_postcommit(self.db_context, fake_data) self._assert_op(odl_const.ODL_UPDATE, odl_const.ODL_L2GATEWAY, fake_data, False) def test_create_l2_gateway_connection(self): fake_data = self._get_fake_l2_gateway_connection() self.driver.create_l2_gateway_connection_precommit(self.db_context, fake_data) self._assert_op(odl_const.ODL_CREATE, odl_const.ODL_L2GATEWAY_CONNECTION, fake_data) self.driver.create_l2_gateway_connection_postcommit(self.db_context, fake_data) self._assert_op(odl_const.ODL_CREATE, odl_const.ODL_L2GATEWAY_CONNECTION, fake_data, False) def test_delete_l2_gateway_connection(self): fake_data = self._get_fake_l2_gateway_connection() self.driver.delete_l2_gateway_connection_precommit(self.db_context, fake_data['id']) self._assert_op(odl_const.ODL_DELETE, odl_const.ODL_L2GATEWAY_CONNECTION, fake_data) self.driver.delete_l2_gateway_connection_postcommit(self.db_context, fake_data['id']) self._assert_op(odl_const.ODL_DELETE, odl_const.ODL_L2GATEWAY_CONNECTION, fake_data, False) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585130284.8227139 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/l3/0000755000175000017500000000000000000000000025360 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/l3/__init__.py0000644000175000017500000000000000000000000027457 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/l3/test_l3_flavor.py0000644000175000017500000002053400000000000030664 0ustar00jamespagejamespage00000000000000# Copyright (c) 2018 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.objects import router as l3_obj from neutron_lib.callbacks import events from neutron_lib.callbacks import resources from oslo_config import fixture as config_fixture from oslo_utils import uuidutils from networking_odl.common import constants as odl_const from networking_odl.db import db from networking_odl.l3 import l3_flavor from networking_odl.tests import base from networking_odl.tests.unit import base_v2 _operation_map = {'del': odl_const.ODL_DELETE, 'update': odl_const.ODL_UPDATE, 'add': odl_const.ODL_CREATE} class OpenDaylightL3FlavorTestCase(base_v2.OpenDaylightConfigBase): def setUp(self): self.useFixture(base.OpenDaylightJournalThreadFixture()) self.cfg = self.useFixture(config_fixture.Config()) self.cfg.config(service_plugins=['router']) super(OpenDaylightL3FlavorTestCase, self).setUp() self.flavor_driver = l3_flavor.ODLL3ServiceProvider(mock.MagicMock()) def _get_mock_fip_kwargs(self): fipid = uuidutils.generate_uuid() fip_db = mock.Mock(floating_ip_address='192.168.1.2', router_id=None, id=fipid, floating_network_id=fipid) projectid = uuidutils.generate_uuid() floating_data = {'floatingip_id': str(fipid), 'router_id': None, 'context': self.db_context, 'floatingip_db': fip_db, 'floatingip': {'project_id': str(projectid), 'floating_ip_address': '172.24.5.4', 'port_id': None, 'id': fip_db.id, 'router_id': None, 'status': 'DOWN', 'floating_network_id': str(fipid) }} return floating_data def _get_mock_router_kwargs(self): router_db = mock.Mock(gw_port_id=uuidutils.generate_uuid(), id=uuidutils.generate_uuid()) router = {odl_const.ODL_ROUTER: {'name': 'router1', 'admin_state_up': True, 'tenant_id': uuidutils.generate_uuid(), 'id': router_db.id, 'external_gateway_info': {'network_id': uuidutils.generate_uuid()}}, 'context': self.db_context, "router_db": router_db} return router def _test_fip_operation(self, event, operation, fip, ops=True): method = getattr(self.flavor_driver, '_floatingip_%s_%s' % (operation, event)) method(odl_const.ODL_FLOATINGIP, mock.ANY, mock.ANY, **fip) row = db.get_oldest_pending_db_row_with_lock(self.db_context) if ops: if operation != odl_const.ODL_DELETE: self.assertEqual(fip['floatingip'], row.data) self.assertEqual(odl_const.ODL_FLOATINGIP, row.object_type) self.assertEqual(fip['floatingip_id'], row.object_uuid) else: self.assertIsNone(row) def _test_router_operation(self, event, operation, router, ops=True): method = getattr(self.flavor_driver, '_router_%s_%s' % (operation, event)) if event == 'precommit': method(odl_const.ODL_ROUTER, mock.ANY, mock.ANY, **router) else: payload = events.DBEventPayload( router.get('context'), states=(router.get('router_db'),), request_body=router.get(resources.ROUTER), resource_id=router.get(resources.ROUTER).get('id')) method(odl_const.ODL_ROUTER, mock.ANY, mock.ANY, payload=payload) row = db.get_oldest_pending_db_row_with_lock(self.db_context) if ops: if operation in ['del', odl_const.ODL_DELETE]: self.assertEqual(router['router_id'], row.object_uuid) else: self.assertEqual(router['router'], row.data) self.assertEqual(_operation_map[operation], row.operation) else: self.assertIsNone(row) def test_router_add_association(self): with mock.patch.object(self.flavor_driver, '_validate_l3_flavor', return_value=True): router = self._get_mock_router_kwargs() # Driver Association payload is different and expects # router_id router['router_id'] = router['router']['id'] self._test_router_operation("association", "add", router) def test_l3_operations_for_different_flavor(self): with mock.patch.object(self.flavor_driver, '_validate_l3_flavor', return_value=False): router = self._get_mock_router_kwargs() router['router_id'] = router['router']['id'] self._test_router_operation("association", "add", router, False) self._test_router_operation("association", "del", router, False) def test_l3_router_update_precommit(self): with mock.patch.object(self.flavor_driver, '_validate_l3_flavor', return_value=True): router = self._get_mock_router_kwargs() router['router_id'] = router['router']['id'] self._test_router_operation("precommit", "update", router) def test_router_del_association(self): with mock.patch.object(self.flavor_driver, '_validate_l3_flavor', return_value=True): router = self._get_mock_router_kwargs() router['router_id'] = router['router']['id'] self._test_router_operation("association", "del", router) def test_fip_precommit_create(self): with mock.patch.object(self.flavor_driver, '_validate_l3_flavor', return_value=True): fip = self._get_mock_fip_kwargs() self._test_fip_operation("precommit", odl_const.ODL_CREATE, fip) def test_l3_fip_different_flavor(self): with mock.patch.object(self.flavor_driver, '_validate_l3_flavor', return_value=False): fip = self._get_mock_fip_kwargs() fip['old_floatingip'] = fip['floatingip'] self._test_fip_operation("precommit", odl_const.ODL_CREATE, fip, False) self._test_fip_operation("precommit", odl_const.ODL_UPDATE, fip, False) def test_fip_precommit_delete(self): # As precommit delete gets port data fip = self._get_mock_fip_kwargs() port = {'port': {'id': uuidutils.generate_uuid()}, 'context': self.db_context, 'floatingip_id': fip['floatingip_id']} with mock.patch.object(l3_obj.FloatingIP, 'get_objects', return_value=[fip['floatingip_db']]): with mock.patch.object(self.flavor_driver, '_validate_l3_flavor', return_value=True): self._test_fip_operation("precommit", odl_const.ODL_DELETE, port) def test_fip_precommit_update(self): with mock.patch.object(self.flavor_driver, '_validate_l3_flavor', return_value=True): fip = self._get_mock_fip_kwargs() self._test_fip_operation("precommit", odl_const.ODL_UPDATE, fip) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/l3/test_l3_odl_v2.py0000644000175000017500000005274100000000000030565 0ustar00jamespagejamespage00000000000000# Copyright (c) 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import mock import requests from neutron.db import l3_db from neutron.plugins.ml2 import plugin from neutron.tests import base from neutron.tests.unit.db import test_db_base_plugin_v2 from neutron.tests.unit import testlib_api from neutron_lib.api.definitions import external_net from neutron_lib.plugins import constants from neutron_lib.plugins import directory from oslo_config import cfg from oslo_config import fixture as config_fixture from oslo_serialization import jsonutils from oslo_utils import uuidutils from networking_odl.common import client from networking_odl.common import constants as odl_const from networking_odl.common import filters from networking_odl.db import db from networking_odl.journal import journal from networking_odl.l3 import l3_odl_v2 from networking_odl.ml2 import mech_driver_v2 from networking_odl.tests import base as odl_base from networking_odl.tests.unit import test_base_db EMPTY_DEP = {'gw_port_id': None} FLOATINGIP_ID = uuidutils.generate_uuid() NETWORK_ID = uuidutils.generate_uuid() ROUTER_ID = uuidutils.generate_uuid() SUBNET_ID = uuidutils.generate_uuid() PORT_ID = uuidutils.generate_uuid() class OpenDayLightMechanismConfigTests(testlib_api.SqlTestCase): def setUp(self): self.useFixture(odl_base.OpenDaylightRestClientFixture()) self.useFixture(odl_base.OpenDaylightFeaturesFixture()) self.cfg = self.useFixture(config_fixture.Config()) self.useFixture(odl_base.OpenDaylightJournalThreadFixture()) super(OpenDayLightMechanismConfigTests, self).setUp() self.cfg.config(mechanism_drivers=[ 'logger', 'opendaylight_v2'], group='ml2') self.cfg.config( port_binding_controller='legacy-port-binding', group='ml2_odl') def _set_config(self, url='http://127.0.0.1:9999', username='someuser', password='somepass'): self.cfg.config(url=url, group='ml2_odl') self.cfg.config(username=username, group='ml2_odl') self.cfg.config(password=password, group='ml2_odl') def _test_missing_config(self, **kwargs): self._set_config(**kwargs) self.assertRaisesRegex(cfg.RequiredOptError, r'value required for option \w+ in group ' r'\[ml2_odl\]', plugin.Ml2Plugin) def test_valid_config(self): self._set_config() plugin.Ml2Plugin() def test_missing_url_raises_exception(self): self._test_missing_config(url=None) def test_missing_username_raises_exception(self): self._test_missing_config(username=None) def test_missing_password_raises_exception(self): self._test_missing_config(password=None) class DataMatcher(object): def __init__(self, operation, object_type, object_dict): self._data = object_dict.copy() self._object_type = object_type filters.filter_for_odl(object_type, operation, self._data) def __eq__(self, s): data = jsonutils.loads(s) return self._data == data[self._object_type] def __ne__(self, s): return not self.__eq__(s) class OpenDaylightL3TestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase, test_base_db.ODLBaseDbTestCase, base.BaseTestCase): def setUp(self): self.cfg = self.useFixture(config_fixture.Config()) self.cfg.config(core_plugin='neutron.plugins.ml2.plugin.Ml2Plugin') self.cfg.config(mechanism_drivers=[ 'logger', 'opendaylight_v2'], group='ml2') self.useFixture(odl_base.OpenDaylightRestClientFixture()) self.cfg.config(service_plugins=['odl-router_v2']) core_plugin = cfg.CONF.core_plugin service_plugins = {'l3_plugin_name': 'odl-router_v2'} self.useFixture(odl_base.OpenDaylightJournalThreadFixture()) mock.patch.object(mech_driver_v2.OpenDaylightMechanismDriver, '_record_in_journal').start() mock.patch.object(mech_driver_v2.OpenDaylightMechanismDriver, 'sync_from_callback_precommit').start() mock.patch.object(mech_driver_v2.OpenDaylightMechanismDriver, 'sync_from_callback_postcommit').start() self.useFixture(odl_base.OpenDaylightPeriodicTaskFixture()) self.useFixture(odl_base.OpenDaylightFeaturesFixture()) self.useFixture(odl_base.OpenDaylightPseudoAgentPrePopulateFixture()) super(OpenDaylightL3TestCase, self).setUp( plugin=core_plugin, service_plugins=service_plugins) self.plugin = directory.get_plugin() self.plugin._network_is_external = mock.Mock(return_value=True) self.driver = directory.get_plugin(constants.L3) self.thread = journal.OpenDaylightJournalThread() @staticmethod def _get_mock_router_operation_info(network, subnet): router = {odl_const.ODL_ROUTER: {'name': 'router1', 'admin_state_up': True, 'tenant_id': network['network']['tenant_id'], 'external_gateway_info': {'network_id': network['network']['id']}}} return router @staticmethod def _get_mock_floatingip_operation_info(network, subnet): floatingip = {odl_const.ODL_FLOATINGIP: {'floating_network_id': network['network']['id'], 'tenant_id': network['network']['tenant_id'], 'subnet_id': None, 'floating_ip_address': None}} return floatingip @staticmethod def _get_mock_router_interface_operation_info(network, subnet): router_intf_dict = {'subnet_id': subnet['subnet']['id'], 'id': network['network']['id']} return router_intf_dict @classmethod def _get_mock_operation_info(cls, object_type, *args): getter = getattr(cls, '_get_mock_' + object_type + '_operation_info') return getter(*args) @classmethod def _get_mock_request_response(cls, status_code): response = mock.Mock(status_code=status_code) response.raise_for_status = mock.Mock() if status_code < 400 else ( mock.Mock(side_effect=requests.exceptions.HTTPError(status_code))) return response def _test_operation(self, status_code, expected_calls, *args, **kwargs): request_response = self._get_mock_request_response(status_code) with mock.patch('requests.sessions.Session.request', return_value=request_response) as mock_method: self.thread.sync_pending_entries() if expected_calls: mock_method.assert_called_with( headers={'Content-Type': 'application/json'}, timeout=cfg.CONF.ml2_odl.timeout, *args, **kwargs) self.assertEqual(expected_calls, mock_method.call_count) def _call_operation_object(self, operation, object_type, object_id, network, subnet): object_dict = self._get_mock_operation_info( object_type, network, subnet) method = getattr(self.driver, operation + '_' + object_type) if operation == odl_const.ODL_CREATE: new_object_dict = method(self.db_context, object_dict) elif operation == odl_const.ODL_UPDATE: new_object_dict = method(self.db_context, object_id, object_dict) else: new_object_dict = method(self.db_context, object_id) return new_object_dict def _test_operation_thread_processing(self, object_type, operation, network, subnet, object_id, expected_calls=1): http_requests = {odl_const.ODL_CREATE: 'post', odl_const.ODL_UPDATE: 'put', odl_const.ODL_DELETE: 'delete'} status_codes = {odl_const.ODL_CREATE: requests.codes.created, odl_const.ODL_UPDATE: requests.codes.ok, odl_const.ODL_DELETE: requests.codes.no_content} http_request = http_requests[operation] status_code = status_codes[operation] # Create database entry. new_object_dict = self._call_operation_object( operation, object_type, object_id, network, subnet) # Setup expected results. if operation in [odl_const.ODL_UPDATE, odl_const.ODL_DELETE]: url = (cfg.CONF.ml2_odl.url + '/' + object_type + 's/' + object_id) else: url = cfg.CONF.ml2_odl.url + '/' + object_type + 's' if operation in [odl_const.ODL_CREATE, odl_const.ODL_UPDATE]: kwargs = { 'url': url, 'data': DataMatcher(operation, object_type, new_object_dict)} else: kwargs = {'url': url, 'data': None} # Call threading routine to process database entry. Test results. self._test_operation(status_code, expected_calls, http_request, **kwargs) return new_object_dict def _test_thread_processing(self, object_type): # Create network and subnet. kwargs = {'arg_list': (external_net.EXTERNAL,), external_net.EXTERNAL: True} with self.network(**kwargs) as network: with self.subnet(network=network, cidr='10.0.0.0/24'): # Add and process create request. new_object_dict = self._test_operation_thread_processing( object_type, odl_const.ODL_CREATE, network, None, None) object_id = new_object_dict['id'] rows = db.get_all_db_rows_by_state(self.db_context, odl_const.COMPLETED) self.assertEqual(1, len(rows)) # Add and process 'update' request. Adds to database. self._test_operation_thread_processing( object_type, odl_const.ODL_UPDATE, network, None, object_id) rows = db.get_all_db_rows_by_state(self.db_context, odl_const.COMPLETED) self.assertEqual(2, len(rows)) # Add and process 'delete' request. Adds to database. self._test_operation_thread_processing( object_type, odl_const.ODL_DELETE, network, None, object_id) rows = db.get_all_db_rows_by_state(self.db_context, odl_const.COMPLETED) self.assertEqual(3, len(rows)) def _test_db_results(self, object_id, operation, object_type): rows = db.get_all_db_rows(self.db_context) self.assertEqual(1, len(rows)) self.assertEqual(operation, rows[0]['operation']) self.assertEqual(object_type, rows[0]['object_type']) self.assertEqual(object_id, rows[0]['object_uuid']) self._db_cleanup() @contextlib.contextmanager def _prepare_resource(self, resource_type): # Create network and subnet for testing. kwargs = {'arg_list': (external_net.EXTERNAL,), external_net.EXTERNAL: True} with self.network(**kwargs) as network: with self.subnet(network=network): yield self._get_mock_operation_info( resource_type, network, None) def _test_object_db(self, object_type): with self._prepare_resource(object_type) as object_dict: # Add and test 'create' database entry. method = getattr(self.driver, odl_const.ODL_CREATE + '_' + object_type) new_object_dict = method(self.db_context, object_dict) object_id = new_object_dict['id'] self._test_db_results(object_id, odl_const.ODL_CREATE, object_type) # Add and test 'update' database entry. method = getattr(self.driver, odl_const.ODL_UPDATE + '_' + object_type) method(self.db_context, object_id, object_dict) self._test_db_results(object_id, odl_const.ODL_UPDATE, object_type) # Add and test 'delete' database entry. method = getattr(self.driver, odl_const.ODL_DELETE + '_' + object_type) method(self.db_context, object_id) self._test_db_results(object_id, odl_const.ODL_DELETE, object_type) def _test_dependency_processing( self, test_operation, test_object, test_id, test_data, dep_operation, dep_object, dep_id, dep_data): # Mock sendjson to verify that it never gets called. mock_sendjson = mock.patch.object(client.OpenDaylightRestClient, 'sendjson').start() # Create dependency db row and mark as 'processing' so it won't # be processed by the journal thread. ctxt = self.db_context journal.record(ctxt, dep_object, dep_id, dep_operation, dep_data) row = db.get_all_db_rows_by_state(self.db_context, odl_const.PENDING) db.update_db_row_state(self.db_context, row[0], odl_const.PROCESSING) # Create test row with dependent ID. journal.record(ctxt, test_object, test_id, test_operation, test_data) # Call journal thread. self.thread.sync_pending_entries() # Verify that dependency row is still set at 'processing'. rows = db.get_all_db_rows_by_state(self.db_context, odl_const.PROCESSING) self.assertEqual(1, len(rows)) # Verify that the test row was processed and set back to 'pending' # to be processed again. rows = db.get_all_db_rows_by_state(self.db_context, odl_const.PENDING) self.assertEqual(1, len(rows)) # Verify that _json_data was not called. self.assertFalse(mock_sendjson.call_count) def test_router_db(self): self._test_object_db(odl_const.ODL_ROUTER) def test_floatingip_db(self): self._test_object_db(odl_const.ODL_FLOATINGIP) def test_router_threading(self): self._test_thread_processing(odl_const.ODL_ROUTER) def test_floatingip_threading(self): self._test_thread_processing(odl_const.ODL_FLOATINGIP) def test_delete_network_validate_ext_delete_router_dep(self): router_context = [NETWORK_ID] self._test_dependency_processing( odl_const.ODL_DELETE, odl_const.ODL_NETWORK, NETWORK_ID, None, odl_const.ODL_DELETE, odl_const.ODL_ROUTER, ROUTER_ID, router_context) def test_create_router_validate_ext_create_port_dep(self): router_context = {'gw_port_id': PORT_ID} self._test_dependency_processing( odl_const.ODL_CREATE, odl_const.ODL_ROUTER, ROUTER_ID, router_context, odl_const.ODL_CREATE, odl_const.ODL_PORT, PORT_ID, {'fixed_ips': [], 'network_id': None, odl_const.ODL_SGS: None, 'tenant_id': 'tenant'}) def test_delete_router_validate_ext_delete_floatingip_dep(self): floatingip_context = [ROUTER_ID] self._test_dependency_processing( odl_const.ODL_DELETE, odl_const.ODL_ROUTER, ROUTER_ID, None, odl_const.ODL_DELETE, odl_const.ODL_FLOATINGIP, FLOATINGIP_ID, floatingip_context) def test_delete_router_validate_self_create_dep(self): self._test_dependency_processing( odl_const.ODL_DELETE, odl_const.ODL_ROUTER, ROUTER_ID, EMPTY_DEP, odl_const.ODL_CREATE, odl_const.ODL_ROUTER, ROUTER_ID, EMPTY_DEP) def test_delete_router_validate_self_update_dep(self): self._test_dependency_processing( odl_const.ODL_DELETE, odl_const.ODL_ROUTER, ROUTER_ID, EMPTY_DEP, odl_const.ODL_UPDATE, odl_const.ODL_ROUTER, ROUTER_ID, EMPTY_DEP) def test_update_router_validate_self_create_dep(self): self._test_dependency_processing( odl_const.ODL_UPDATE, odl_const.ODL_ROUTER, ROUTER_ID, EMPTY_DEP, odl_const.ODL_CREATE, odl_const.ODL_ROUTER, ROUTER_ID, EMPTY_DEP) def test_create_floatingip_validate_ext_create_network_dep(self): floatingip_context = {'floating_network_id': NETWORK_ID} self._test_dependency_processing( odl_const.ODL_CREATE, odl_const.ODL_FLOATINGIP, FLOATINGIP_ID, floatingip_context, odl_const.ODL_CREATE, odl_const.ODL_NETWORK, NETWORK_ID, {}) def test_update_floatingip_validate_self_create_dep(self): floatingip_context = {'floating_network_id': NETWORK_ID} self._test_dependency_processing( odl_const.ODL_UPDATE, odl_const.ODL_FLOATINGIP, FLOATINGIP_ID, floatingip_context, odl_const.ODL_CREATE, odl_const.ODL_FLOATINGIP, FLOATINGIP_ID, EMPTY_DEP) def test_delete_floatingip_validate_self_create_dep(self): self._test_dependency_processing( odl_const.ODL_DELETE, odl_const.ODL_FLOATINGIP, FLOATINGIP_ID, EMPTY_DEP, odl_const.ODL_CREATE, odl_const.ODL_FLOATINGIP, FLOATINGIP_ID, {}) def test_delete_floatingip_validate_self_update_dep(self): self._test_dependency_processing( odl_const.ODL_DELETE, odl_const.ODL_FLOATINGIP, FLOATINGIP_ID, EMPTY_DEP, odl_const.ODL_UPDATE, odl_const.ODL_FLOATINGIP, FLOATINGIP_ID, {}) @mock.patch.object(journal, 'record') def test__record_in_journal_retries(self, record_mock): self._test_retry_exceptions( l3_odl_v2._record_in_journal, record_mock, True) def _assert_record_in_journal(self, record_in_journal, resource_type, operation): record_in_journal.assert_called_with( mock.ANY, resource_type, operation, mock.ANY, mock.ANY) def _call_and_assert_recorded_in_journal( self, resource_type, operation, function, *args): with mock.patch.object(l3_odl_v2, '_record_in_journal') as record_in_journal: function(self.db_context, *args) record_in_journal.assert_called_with( mock.ANY, resource_type, operation, mock.ANY, mock.ANY) def test_create_router_records_in_journal(self): with self._prepare_resource(odl_const.ODL_ROUTER) as router: self._call_and_assert_recorded_in_journal( odl_const.ODL_ROUTER, odl_const.ODL_CREATE, self.driver.create_router, router) def test_update_router_records_in_journal(self): with self._prepare_resource(odl_const.ODL_ROUTER) as router: result = self.driver.create_router(self.db_context, router) self._call_and_assert_recorded_in_journal( odl_const.ODL_ROUTER, odl_const.ODL_UPDATE, self.driver.update_router, result['id'], router) def test_delete_router_records_in_journal(self): with self._prepare_resource(odl_const.ODL_ROUTER) as router: result = self.driver.create_router(self.db_context, router) self._call_and_assert_recorded_in_journal( odl_const.ODL_ROUTER, odl_const.ODL_DELETE, self.driver.delete_router, result['id']) def test_create_fip_records_in_journal(self): with self._prepare_resource(odl_const.ODL_FLOATINGIP) as fip: self._call_and_assert_recorded_in_journal( odl_const.ODL_FLOATINGIP, odl_const.ODL_CREATE, self.driver.create_floatingip, fip) def test_update_fip_records_in_journal(self): with self._prepare_resource(odl_const.ODL_FLOATINGIP) as fip: result = self.driver.create_floatingip(self.db_context, fip) self._call_and_assert_recorded_in_journal( odl_const.ODL_FLOATINGIP, odl_const.ODL_UPDATE, self.driver.update_floatingip, result['id'], fip) def test_delete_fip_records_in_journal(self): with self._prepare_resource(odl_const.ODL_FLOATINGIP) as fip: result = self.driver.create_floatingip(self.db_context, fip) self._call_and_assert_recorded_in_journal( odl_const.ODL_FLOATINGIP, odl_const.ODL_DELETE, self.driver.delete_floatingip, result['id']) @mock.patch.object(l3_db.L3_NAT_dbonly_mixin, 'disassociate_floatingips') @mock.patch.object(l3_odl_v2.OpenDaylightL3RouterPlugin, 'get_floatingips') def test_disassociate_floatingips_records_in_journal( self, get_fips, disassociate_floatingips): with self._prepare_resource(odl_const.ODL_FLOATINGIP) as fip: result = self.driver.create_floatingip(self.db_context, fip) get_fips.return_value = [result] self._call_and_assert_recorded_in_journal( odl_const.ODL_FLOATINGIP, odl_const.ODL_UPDATE, self.driver.disassociate_floatingips, 'fake_id') self.assertTrue(disassociate_floatingips.called) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585130284.8227139 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/ml2/0000755000175000017500000000000000000000000025534 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/ml2/__init__.py0000644000175000017500000000000000000000000027633 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/ml2/config-ovs-external_ids.sh0000755000175000017500000000230200000000000032621 0ustar00jamespagejamespage00000000000000#!/bin/sh # Copyright (c) 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. uuid=$(sudo ovs-vsctl get Open_vSwitch . _uuid) # Test data sudo ovs-vsctl set Open_vSwitch $uuid \ external_ids:odl_os_hostconfig_hostid="devstack" # sudo ovs-vsctl set Open_vSwitch $uuid \ # external_ids:odl_os_hostconfig_hosttype="ODL L2" config=$(cat <<____CONFIG {"supported_vnic_types":[ {"vnic_type":"normal","vif_type":"ovs","vif_details":{}}], "allowed_network_types":["local","vlan","vxlan","gre"], "bridge_mappings":{"physnet1":"br-ex"}} ____CONFIG ) echo config: $config sudo ovs-vsctl set Open_vSwitch $uuid \ external_ids:odl_os_hostconfig_config_odl_l2="$config" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/ml2/odl_teststub.js0000644000175000017500000000400700000000000030606 0ustar00jamespagejamespage00000000000000/* * Copyright (c) 2016 OpenStack Foundation * All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. You may obtain * a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * $nodejs odl_teststub.js * * local.conf or ml2_conf.ini should be set to the following: * * [ml2_odl] * port_binding_controller = pseudo-agentdb-binding * password = admin * username = admin * url = http://localhost:8080/controller/nb/v2/neutron * restconf_uri = http://localhost:8125/ # for this stub * * To test with ODL *end to end* use below URL for restconf_uri and configure * ovsdb external_ids using the test script: config-ovs-external_ids.sh * * http://localhost:8181/restconf/operational/neutron:neutron/hostconfigs */ var http = require('http'); const PORT=8125; __test_odl_hconfig = {"hostconfigs": {"hostconfig": [ {"host-id": "devstack", "host-type": "ODL L2", "config": { "supported_vnic_types": [ {"vnic_type": "normal", "vif_type": "ovs", "vif_details": {}}], "allowed_network_types": ["local", "vlan", "vxlan", "gre"], "bridge_mappings": {"physnet1":"br-ex"} } }] }} function handleRequest(req, res){ res.setHeader('Content-Type', 'application/json'); res.end(JSON.stringify(__test_odl_hconfig)); } var server = http.createServer(handleRequest); server.listen(PORT, function(){ console.log("Server listening on: http://localhost:%s", PORT); }); ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/ml2/test_legacy_port_binding.py0000644000175000017500000000644600000000000033161 0ustar00jamespagejamespage00000000000000# Copyright (c) 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.plugins.ml2 import driver_context as ctx from neutron_lib.api.definitions import portbindings from neutron_lib import constants as n_constants from neutron_lib.plugins.ml2 import api from networking_odl.ml2 import legacy_port_binding from networking_odl.tests import base class TestLegacyPortBindingManager(base.DietTestCase): # valid and invalid segments valid_segment = { api.ID: 'API_ID', api.NETWORK_TYPE: n_constants.TYPE_LOCAL, api.SEGMENTATION_ID: 'API_SEGMENTATION_ID', api.PHYSICAL_NETWORK: 'API_PHYSICAL_NETWORK'} invalid_segment = { api.ID: 'API_ID', api.NETWORK_TYPE: n_constants.TYPE_NONE, api.SEGMENTATION_ID: 'API_SEGMENTATION_ID', api.PHYSICAL_NETWORK: 'API_PHYSICAL_NETWORK'} def test_check_segment(self): """Validate the _check_segment method.""" all_network_types = [n_constants.TYPE_FLAT, n_constants.TYPE_GRE, n_constants.TYPE_LOCAL, n_constants.TYPE_VXLAN, n_constants.TYPE_VLAN, n_constants.TYPE_NONE] mgr = legacy_port_binding.LegacyPortBindingManager() valid_types = { network_type for network_type in all_network_types if mgr._check_segment({api.NETWORK_TYPE: network_type})} self.assertEqual({ n_constants.TYPE_FLAT, n_constants.TYPE_LOCAL, n_constants.TYPE_GRE, n_constants.TYPE_VXLAN, n_constants.TYPE_VLAN}, valid_types) def test_bind_port(self): network = mock.MagicMock(spec=api.NetworkContext) port_context = mock.MagicMock( spec=ctx.PortContext, current={'id': 'CURRENT_CONTEXT_ID'}, segments_to_bind=[self.valid_segment, self.invalid_segment], network=network) mgr = legacy_port_binding.LegacyPortBindingManager() vif_type = mgr._get_vif_type(port_context) mgr.bind_port(port_context) port_context.set_binding.assert_called_once_with( self.valid_segment[api.ID], vif_type, mgr.vif_details, status=n_constants.PORT_STATUS_ACTIVE) def test_bind_port_unsupported_vnic_type(self): network = mock.MagicMock(spec=api.NetworkContext) port_context = mock.MagicMock( spec=ctx.PortContext, current={'id': 'CURRENT_CONTEXT_ID', portbindings.VNIC_TYPE: portbindings.VNIC_DIRECT}, segments_to_bind=[self.valid_segment, self.invalid_segment], network=network) mgr = legacy_port_binding.LegacyPortBindingManager() mgr.bind_port(port_context) port_context.set_binding.assert_not_called() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/ml2/test_mechanism_odl_v2.py0000644000175000017500000010510400000000000032357 0ustar00jamespagejamespage00000000000000# Copyright (c) 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime import operator import mock import requests import testscenarios from neutron.db.models import securitygroup from neutron.db import segments_db from neutron.plugins.ml2 import plugin from neutron.tests.unit import testlib_api from neutron_lib.api.definitions import multiprovidernet as mpnet_apidef from neutron_lib.api.definitions import provider_net as providernet from neutron_lib import constants as n_constants from neutron_lib.db import api as db_api from neutron_lib.plugins import constants as plugin_const from neutron_lib.plugins import directory from oslo_config import cfg from oslo_config import fixture as config_fixture from oslo_serialization import jsonutils from oslo_utils import uuidutils from networking_odl.common import callback from networking_odl.common import constants as odl_const from networking_odl.common import filters from networking_odl.common import utils from networking_odl.db import db from networking_odl.journal import base_driver from networking_odl.journal import cleanup from networking_odl.journal import journal from networking_odl.ml2 import mech_driver_v2 from networking_odl.tests import base from networking_odl.tests.unit import base_v2 # Required to generate tests from scenarios. Not compatible with nose. load_tests = testscenarios.load_tests_apply_scenarios cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config') SECURITY_GROUP = '2f9244b4-9bee-4e81-bc4a-3f3c2045b3d7' SG_FAKE_ID = uuidutils.generate_uuid() SG_RULE_FAKE_ID = uuidutils.generate_uuid() class OpenDayLightMechanismConfigTests(testlib_api.SqlTestCase): def setUp(self): self.useFixture(base.OpenDaylightFeaturesFixture()) self.useFixture(base.OpenDaylightJournalThreadFixture()) self.useFixture(base.OpenDaylightPseudoAgentPrePopulateFixture()) self.cfg = self.useFixture(config_fixture.Config()) super(OpenDayLightMechanismConfigTests, self).setUp() self.cfg.config(mechanism_drivers=[ 'logger', 'opendaylight_v2'], group='ml2') self.cfg.config( port_binding_controller='legacy-port-binding', group='ml2_odl') def _set_config(self, url='http://127.0.0.1:9999', username='someuser', password='somepass'): self.cfg.config(url=url, group='ml2_odl') self.cfg.config(username=username, group='ml2_odl') self.cfg.config(password=password, group='ml2_odl') def _test_missing_config(self, **kwargs): self._set_config(**kwargs) self.assertRaisesRegex(cfg.RequiredOptError, r'value required for option \w+ in group ' r'\[ml2_odl\]', plugin.Ml2Plugin) def test_valid_config(self): self._set_config() plugin.Ml2Plugin() def test_missing_url_raises_exception(self): self._test_missing_config(url=None) def test_missing_username_raises_exception(self): self._test_missing_config(username=None) def test_missing_password_raises_exception(self): self._test_missing_config(password=None) class _OpenDaylightMechanismBase(base_v2.OpenDaylightTestCase): _mechanism_drivers = ['logger', 'opendaylight_v2'] # TODO(mpeterson): Add a test to make sure extension_drivers are honored. _extension_drivers = ['port_security', 'qos'] def setUp(self): mech_initialize_patcher = mock.patch.object( mech_driver_v2.OpenDaylightMechanismDriver, 'initialize', autospec=True, side_effect=mech_driver_v2.OpenDaylightMechanismDriver.initialize ) self.mech_initialize_mock = mech_initialize_patcher.start() mock.patch('networking_odl.common.odl_features.init').start() # NOTE(mpeterson): We cannot use stop in the following cleanup because # several of the following fixtures and setUp() add a cleanup for # stopall. The reason to add the stopall ourselves is to make sure # that it will be stopped if anything were to change in the future. self.addCleanup(mock.patch.stopall) self.useFixture(base.OpenDaylightPseudoAgentPrePopulateFixture()) self.cfg = self.useFixture(config_fixture.Config()) self.cfg.config(extension_drivers=self._extension_drivers, group='ml2') super(_OpenDaylightMechanismBase, self).setUp() def test_mechanism_driver_is_initialized(self): """Test that the mech driver is initialized. This test will allow us know if the mech driver is not initialized in case there is a change in the way Ml2PluginV2TestCase instantiate them """ # NOTE(mpeterson): Because of the autospec the mock lacks # the helper assert_called_once msg = "The opendaylight_v2 ML2 Mechanism Driver was not initialized" self.assertTrue(self.mech_initialize_mock.called, msg) class DataMatcher(object): def __init__(self, operation, object_type, context): if object_type in [odl_const.ODL_SG, odl_const.ODL_SG_RULE]: self._data = copy.deepcopy(context[object_type]) elif object_type == odl_const.ODL_PORT: # NOTE(yamahata): work around for journal._enrich_port() self._data = copy.deepcopy(context.current) if self._data.get(odl_const.ODL_SGS): self._data[odl_const.ODL_SGS] = [ {'id': id_} for id_ in self._data[odl_const.ODL_SGS]] else: self._data = copy.deepcopy(context.current) self._object_type = object_type filters.filter_for_odl(object_type, operation, self._data) def __eq__(self, s): data = jsonutils.loads(s) return self._data == data[self._object_type] def __ne__(self, s): return not self.__eq__(s) def __repr__(self): # for debugging return 'DataMatcher(%(object_type)s, %(data)s)' % { 'object_type': self._object_type, 'data': self._data} class AttributeDict(dict): def __init__(self, *args, **kwargs): super(AttributeDict, self).__init__(*args, **kwargs) self.__dict__ = self class OpenDaylightMechanismDriverTestCase(base_v2.OpenDaylightConfigBase): def setUp(self): self.useFixture(base.OpenDaylightFeaturesFixture()) self.useFixture(base.OpenDaylightJournalThreadFixture()) self.useFixture(base.OpenDaylightPseudoAgentPrePopulateFixture()) super(OpenDaylightMechanismDriverTestCase, self).setUp() self.mech = mech_driver_v2.OpenDaylightMechanismDriver() self.mech.initialize() def test_registered_plugin_type(self): self.assertEqual(self.mech.plugin_type, plugin_const.CORE) def test_registered_resources(self): for resource in self.mech.RESOURCES: self.assertIn(resource, base_driver.ALL_RESOURCES) self.assertEqual(base_driver.ALL_RESOURCES[resource], self.mech) def _get_mock_network_operation_context(self): current = {'status': 'ACTIVE', 'subnets': [], 'name': 'net1', 'provider:physical_network': None, 'admin_state_up': True, 'tenant_id': 'test-tenant', 'provider:network_type': 'local', 'router:external': False, 'shared': False, 'id': 'd897e21a-dfd6-4331-a5dd-7524fa421c3e', 'provider:segmentation_id': None} context = mock.Mock(current=current) context._plugin_context = self.db_context return context def _get_mock_subnet_operation_context(self): current = {'ipv6_ra_mode': None, 'allocation_pools': [{'start': '10.0.0.2', 'end': '10.0.1.254'}], 'host_routes': [], 'ipv6_address_mode': None, 'cidr': '10.0.0.0/23', 'id': '72c56c48-e9b8-4dcf-b3a7-0813bb3bd839', 'name': '', 'enable_dhcp': True, 'network_id': 'd897e21a-dfd6-4331-a5dd-7524fa421c3e', 'tenant_id': 'test-tenant', 'dns_nameservers': [], 'gateway_ip': '10.0.0.1', 'ip_version': 4, 'shared': False} context = mock.Mock(current=current) context._plugin_context = self.db_context return context def _get_mock_port_operation_context(self): current = {'status': 'DOWN', 'binding:host_id': '', 'allowed_address_pairs': [], 'device_owner': 'fake_owner', 'binding:profile': {}, 'fixed_ips': [{ 'subnet_id': '72c56c48-e9b8-4dcf-b3a7-0813bb3bd839'}], 'id': '83d56c48-e9b8-4dcf-b3a7-0813bb3bd940', 'security_groups': [SECURITY_GROUP], 'device_id': 'fake_device', 'name': '', 'admin_state_up': True, 'network_id': 'd897e21a-dfd6-4331-a5dd-7524fa421c3e', 'tenant_id': 'test-tenant', 'binding:vif_details': {}, 'binding:vnic_type': 'normal', 'binding:vif_type': 'unbound', 'mac_address': '12:34:56:78:21:b6'} _network = self._get_mock_network_operation_context().current _plugin = directory.get_plugin() _plugin.writer_get_security_group = mock.Mock( return_value=SECURITY_GROUP) _plugin.get_port = mock.Mock(return_value=current) _plugin.get_network = mock.Mock(return_value=_network) _plugin_context_mock = {'session': self.db_context.session} _network_context_mock = {'_network': _network} context = {'current': AttributeDict(current), '_plugin': _plugin, '_plugin_context': AttributeDict(_plugin_context_mock), '_network_context': AttributeDict(_network_context_mock)} return AttributeDict(context) def _get_mock_security_group_operation_context(self): context = {odl_const.ODL_SG: {'name': 'test_sg', 'project_id': 'test-tenant', 'tenant_id': 'test-tenant', 'description': 'test-description', 'security_group_rules': [], 'id': SG_FAKE_ID}} return context def _get_mock_security_group_rule_operation_context(self): context = {odl_const.ODL_SG_RULE: {'security_group_id': SG_FAKE_ID, 'id': SG_RULE_FAKE_ID}} _plugin = directory.get_plugin() _plugin._get_security_group_rule = mock.Mock( return_value=AttributeDict(context[odl_const.ODL_SG_RULE])) return context def _get_mock_operation_context(self, object_type): getter = getattr(self, '_get_mock_%s_operation_context' % object_type) return getter() _status_code_msgs = { 200: '', 201: '', 204: '', 400: '400 Client Error: Bad Request', 401: '401 Client Error: Unauthorized', 403: '403 Client Error: Forbidden', 404: '404 Client Error: Not Found', 409: '409 Client Error: Conflict', 501: '501 Server Error: Not Implemented', 503: '503 Server Error: Service Unavailable', } @classmethod def _get_mock_request_response(cls, status_code): response = mock.Mock(status_code=status_code) response.raise_for_status = mock.Mock() if status_code < 400 else ( mock.Mock(side_effect=requests.exceptions.HTTPError( cls._status_code_msgs[status_code]))) return response def _test_operation(self, status_code, expected_calls, *args, **kwargs): request_response = self._get_mock_request_response(status_code) with mock.patch('requests.sessions.Session.request', return_value=request_response) as mock_method: self.run_journal_processing() if expected_calls: mock_method.assert_called_with( headers={'Content-Type': 'application/json'}, timeout=cfg.CONF.ml2_odl.timeout, *args, **kwargs) self.assertEqual(expected_calls, mock_method.call_count) def _call_operation_object(self, operation, object_type): context = self._get_mock_operation_context(object_type) if object_type in [odl_const.ODL_SG, odl_const.ODL_SG_RULE]: res_type = [rt for rt in callback._RESOURCE_MAPPING.values() if rt.singular == object_type][0] res_id = context[object_type]['id'] context_ = (copy.deepcopy(context) if operation != odl_const.ODL_DELETE else None) plugin_context = self.db_context if (object_type == odl_const.ODL_SG and operation in [odl_const.ODL_CREATE, odl_const.ODL_DELETE]): # TODO(yamahata): remove this work around once # https://review.opendev.org/#/c/281693/ # is merged. if operation == odl_const.ODL_CREATE: sg = securitygroup.SecurityGroup( id=res_id, name=context_[object_type]['name'], tenant_id=context_[object_type]['tenant_id'], description=context_[object_type]['description']) plugin_context.session.add(sg) sg_dict = dict(sg) sg_dict['security_group_rules'] = [] with db_api.CONTEXT_WRITER.using(plugin_context): self.mech.sync_from_callback_precommit( plugin_context, operation, res_type, res_id, context_, security_group=sg_dict) if operation == odl_const.ODL_DELETE: with db_api.CONTEXT_WRITER.using(plugin_context): self.mech.sync_from_callback_precommit( plugin_context, operation, res_type, res_id, context_, security_group={'security_group_rules': {'id': SG_RULE_FAKE_ID}}, security_group_rule_ids=[SG_RULE_FAKE_ID]) elif (object_type == odl_const.ODL_SG_RULE and operation == odl_const.ODL_DELETE): with db_api.CONTEXT_WRITER.using(plugin_context): self.mech.sync_from_callback_precommit( plugin_context, operation, res_type, res_id, context_, security_group_id=SG_FAKE_ID) else: with db_api.CONTEXT_WRITER.using(plugin_context): self.mech.sync_from_callback_precommit( plugin_context, operation, res_type, res_id, context_) else: method = getattr(self.mech, '%s_%s_precommit' % (operation, object_type)) with db_api.CONTEXT_WRITER.using(context): method(context) def _test_operation_object(self, operation, object_type): self._call_operation_object(operation, object_type) context = self._get_mock_operation_context(object_type) row = db.get_oldest_pending_db_row_with_lock(self.db_context) self.assertEqual(operation, row['operation']) self.assertEqual(object_type, row['object_type']) self.assertEqual(context.current['id'], row['object_uuid']) self._db_cleanup() def _test_thread_processing(self, operation, object_type, expected_calls=1): http_requests = {odl_const.ODL_CREATE: 'post', odl_const.ODL_UPDATE: 'put', odl_const.ODL_DELETE: 'delete'} status_codes = {odl_const.ODL_CREATE: requests.codes.created, odl_const.ODL_UPDATE: requests.codes.ok, odl_const.ODL_DELETE: requests.codes.no_content} http_request = http_requests[operation] status_code = status_codes[operation] self._call_operation_object(operation, object_type) context = self._get_mock_operation_context(object_type) url_object_type = utils.neutronify(object_type) if operation in [odl_const.ODL_UPDATE, odl_const.ODL_DELETE]: if object_type in [odl_const.ODL_SG, odl_const.ODL_SG_RULE]: uuid = context[object_type]['id'] else: uuid = context.current['id'] url = '%s/%ss/%s' % (cfg.CONF.ml2_odl.url, url_object_type, uuid) else: url = '%s/%ss' % (cfg.CONF.ml2_odl.url, url_object_type) if (object_type == odl_const.ODL_SG and operation == odl_const.ODL_CREATE): context = copy.deepcopy(context) if operation in [odl_const.ODL_CREATE, odl_const.ODL_UPDATE]: kwargs = { 'url': url, 'data': DataMatcher(operation, object_type, context)} else: kwargs = {'url': url, 'data': None} self._test_operation(status_code, expected_calls, http_request, **kwargs) def _test_object_type(self, object_type, delete_expected_calls=1): # Add and process create request. self._test_thread_processing(odl_const.ODL_CREATE, object_type) rows = db.get_all_db_rows_by_state(self.db_context, odl_const.COMPLETED) self.assertEqual(1, len(rows)) # Add and process update request. Adds to database. self._test_thread_processing(odl_const.ODL_UPDATE, object_type) rows = db.get_all_db_rows_by_state(self.db_context, odl_const.COMPLETED) self.assertEqual(2, len(rows)) # Add and process update request. Adds to database. self._test_thread_processing(odl_const.ODL_DELETE, object_type, delete_expected_calls) rows = db.get_all_db_rows_by_state(self.db_context, odl_const.COMPLETED) self.assertEqual(2 + delete_expected_calls, len(rows)) def _test_object_type_pending_network(self, object_type): # Create a network (creates db row in pending state). self._call_operation_object(odl_const.ODL_CREATE, odl_const.ODL_NETWORK) # Create object_type database row and process. This results in both # the object_type and network rows being processed. self._test_thread_processing(odl_const.ODL_CREATE, object_type, expected_calls=2) # Verify both rows are now marked as completed. rows = db.get_all_db_rows_by_state(self.db_context, odl_const.COMPLETED) self.assertEqual(2, len(rows)) def _test_object_type_processing_network(self, object_type): self._test_object_operation_pending_another_object_operation( object_type, odl_const.ODL_CREATE, odl_const.ODL_NETWORK, odl_const.ODL_CREATE) def _test_object_operation_pending_object_operation( self, object_type, operation, pending_operation): self._test_object_operation_pending_another_object_operation( object_type, operation, object_type, pending_operation) def _test_object_operation_pending_another_object_operation( self, object_type, operation, pending_type, pending_operation): # Create the object_type (creates db row in pending state). self._call_operation_object(pending_operation, pending_type) # Get pending row and mark as processing so that # this row will not be processed by journal thread. row = db.get_all_db_rows_by_state(self.db_context, odl_const.PENDING) db.update_db_row_state(self.db_context, row[0], odl_const.PROCESSING) # Create the object_type database row and process. # Verify that object request is not processed because the # dependent object operation has not been marked as 'completed'. self._test_thread_processing(operation, object_type, expected_calls=0) # Verify that all rows are still in the database. rows = db.get_all_db_rows_by_state(self.db_context, odl_const.PROCESSING) self.assertEqual(1, len(rows)) rows = db.get_all_db_rows_by_state(self.db_context, odl_const.PENDING) self.assertEqual(1, len(rows)) def _test_parent_delete_pending_child_delete(self, parent, child): self._test_object_operation_pending_another_object_operation( parent, odl_const.ODL_DELETE, child, odl_const.ODL_DELETE) def _test_cleanup_processing_rows(self, last_retried, expected_state): # Create a dummy network (creates db row in pending state). self._call_operation_object(odl_const.ODL_CREATE, odl_const.ODL_NETWORK) # Get pending row and mark as processing and update # the last_retried time row = db.get_all_db_rows_by_state(self.db_context, odl_const.PENDING)[0] row.last_retried = last_retried db.update_db_row_state(self.db_context, row, odl_const.PROCESSING) # Test if the cleanup marks this in the desired state # based on the last_retried timestamp cleanup.cleanup_processing_rows(self.db_context) # Verify that the Db row is in the desired state rows = db.get_all_db_rows_by_state(self.db_context, expected_state) self.assertEqual(1, len(rows)) def test_driver(self): for operation in [odl_const.ODL_CREATE, odl_const.ODL_UPDATE, odl_const.ODL_DELETE]: for object_type in [odl_const.ODL_NETWORK, odl_const.ODL_SUBNET, odl_const.ODL_PORT]: self._test_operation_object(operation, object_type) def test_port_precommit_no_tenant(self): context = self._get_mock_operation_context(odl_const.ODL_PORT) context.current['tenant_id'] = '' method = getattr(self.mech, 'create_port_precommit') method(context) self.db_context.session.flush() # Verify that the Db row has a tenant rows = db.get_all_db_rows_by_state(self.db_context, odl_const.PENDING) self.assertEqual(1, len(rows)) _network = self._get_mock_network_operation_context().current self.assertEqual(_network['tenant_id'], rows[0]['data']['tenant_id']) def test_network(self): self._test_object_type(odl_const.ODL_NETWORK) def test_network_update_pending_network_create(self): self._test_object_operation_pending_object_operation( odl_const.ODL_NETWORK, odl_const.ODL_UPDATE, odl_const.ODL_CREATE) def test_network_delete_pending_network_create(self): self._test_object_operation_pending_object_operation( odl_const.ODL_NETWORK, odl_const.ODL_DELETE, odl_const.ODL_CREATE) def test_network_delete_pending_network_update(self): self._test_object_operation_pending_object_operation( odl_const.ODL_NETWORK, odl_const.ODL_DELETE, odl_const.ODL_UPDATE) def test_network_delete_pending_subnet_delete(self): self._test_parent_delete_pending_child_delete( odl_const.ODL_NETWORK, odl_const.ODL_SUBNET) def test_network_delete_pending_port_delete(self): self._test_parent_delete_pending_child_delete( odl_const.ODL_NETWORK, odl_const.ODL_PORT) def test_subnet(self): self._test_object_type(odl_const.ODL_SUBNET) def test_subnet_update_pending_subnet_create(self): self._test_object_operation_pending_object_operation( odl_const.ODL_SUBNET, odl_const.ODL_UPDATE, odl_const.ODL_CREATE) def test_subnet_delete_pending_subnet_create(self): self._test_object_operation_pending_object_operation( odl_const.ODL_SUBNET, odl_const.ODL_DELETE, odl_const.ODL_CREATE) def test_subnet_delete_pending_subnet_update(self): self._test_object_operation_pending_object_operation( odl_const.ODL_SUBNET, odl_const.ODL_DELETE, odl_const.ODL_UPDATE) def test_subnet_pending_network(self): self._test_object_type_pending_network(odl_const.ODL_SUBNET) def test_subnet_processing_network(self): self._test_object_type_processing_network(odl_const.ODL_SUBNET) def test_subnet_delete_pending_port_delete(self): self._test_parent_delete_pending_child_delete( odl_const.ODL_SUBNET, odl_const.ODL_PORT) def test_port(self): self._test_object_type(odl_const.ODL_PORT) def test_port_update_pending_port_create(self): self._test_object_operation_pending_object_operation( odl_const.ODL_PORT, odl_const.ODL_UPDATE, odl_const.ODL_CREATE) def test_port_delete_pending_port_create(self): self._test_object_operation_pending_object_operation( odl_const.ODL_PORT, odl_const.ODL_DELETE, odl_const.ODL_CREATE) def test_port_delete_pending_port_update(self): self._test_object_operation_pending_object_operation( odl_const.ODL_PORT, odl_const.ODL_DELETE, odl_const.ODL_UPDATE) def test_port_pending_network(self): self._test_object_type_pending_network(odl_const.ODL_PORT) def test_port_processing_network(self): self._test_object_type_processing_network(odl_const.ODL_PORT) def test_cleanup_processing_rows_time_not_expired(self): self._test_cleanup_processing_rows(datetime.datetime.utcnow(), odl_const.PROCESSING) def test_cleanup_processing_rows_time_expired(self): old_time = datetime.datetime.utcnow() - datetime.timedelta(hours=24) self._test_cleanup_processing_rows(old_time, odl_const.PENDING) def test_thread_call(self): """Verify that the sync thread method is called.""" with mock.patch.object( journal.OpenDaylightJournalThread, 'start') as mock_sync_thread: self.mech = mech_driver_v2.OpenDaylightMechanismDriver() self.mech.initialize() # Create any object that would spin up the sync thread via the # decorator call_thread_on_end() used by all the event handlers. self._call_operation_object(odl_const.ODL_CREATE, odl_const.ODL_NETWORK) # Verify that the thread call was made. mock_sync_thread.assert_called() def test_sg(self): self._test_object_type(odl_const.ODL_SG, 2) def test_sg_rule(self): self._test_object_type(odl_const.ODL_SG_RULE) def test_sg_delete(self): with mock.patch.object(journal, 'record') as record: context = self._get_mock_operation_context(odl_const.ODL_SG) res_id = context[odl_const.ODL_SG]['id'] rule = mock.Mock() rule.id = SG_RULE_FAKE_ID rule.security_group_id = SG_FAKE_ID sg = mock.Mock() sg.id = SG_FAKE_ID sg.security_group_rules = [rule] kwargs = {'security_group': sg, 'security_group_rule_ids': [SG_RULE_FAKE_ID]} with db_api.CONTEXT_WRITER.using(self.db_context): self.mech.sync_from_callback_precommit( self.db_context, odl_const.ODL_DELETE, callback._RESOURCE_MAPPING[odl_const.ODL_SG], res_id, context, **kwargs) record.assert_has_calls( [mock.call(mock.ANY, 'security_group_rule', SG_RULE_FAKE_ID, 'delete', [SG_FAKE_ID]), mock.call(mock.ANY, 'security_group', SG_FAKE_ID, 'delete', {'description': 'test-description', 'project_id': 'test-tenant', 'security_group_rules': [], 'tenant_id': 'test-tenant', 'id': SG_FAKE_ID, 'name': 'test_sg'})]) def test_sg_rule_delete(self): with mock.patch.object(journal, 'record') as record: context = self._get_mock_operation_context(odl_const.ODL_SG_RULE) res_id = context[odl_const.ODL_SG_RULE]['id'] rule = mock.Mock() rule.id = SG_RULE_FAKE_ID rule.security_group_id = SG_FAKE_ID kwargs = {'security_group_rule_id': SG_RULE_FAKE_ID, 'security_group_id': SG_FAKE_ID} with db_api.CONTEXT_WRITER.using(self.db_context): self.mech.sync_from_callback_precommit( self.db_context, odl_const.ODL_DELETE, callback._RESOURCE_MAPPING[odl_const.ODL_SG_RULE], res_id, context, **kwargs) record.assert_has_calls( [mock.call(mock.ANY, 'security_group_rule', SG_RULE_FAKE_ID, 'delete', [SG_FAKE_ID])]) def test_subnet_allocation_pools(self): context = self._get_mock_operation_context(odl_const.ODL_SUBNET) alloc_pool = context.current['allocation_pools'] self._call_operation_object(odl_const.ODL_UPDATE, odl_const.ODL_SUBNET) row = db.get_oldest_pending_db_row_with_lock(self.db_context) self.assertEqual(alloc_pool, row.data['allocation_pools']) def test_sync_multiple_updates(self): # add 2 updates for i in range(2): self._call_operation_object(odl_const.ODL_UPDATE, odl_const.ODL_NETWORK) # get the last update row rows = db.get_all_db_rows(self.db_context) rows.sort(key=operator.attrgetter("seqnum")) first_row = rows[0] # change the state to processing db.update_db_row_state(self.db_context, first_row, odl_const.PROCESSING) # create 1 more operation to trigger the sync thread # verify that there are no calls to ODL controller, because the # first row was processing (exit_after_run = true) self._test_thread_processing(odl_const.ODL_UPDATE, odl_const.ODL_NETWORK, expected_calls=0) # validate that all the pending rows stays in 'pending' state # first row should be 'processing' because it was not processed processing = db.get_all_db_rows_by_state(self.db_context, 'processing') self.assertEqual(1, len(processing)) rows = db.get_all_db_rows_by_state(self.db_context, 'pending') self.assertEqual(2, len(rows)) def test_update_port_filter(self): """Validate the filter code on update port operation""" expected_items = ['fixed_ips', 'security_groups', 'device_id', 'security_groups', 'admin_state_up'] subnet = self._get_mock_operation_context(odl_const.ODL_SUBNET).current port = self._get_mock_operation_context(odl_const.ODL_PORT).current port['fixed_ips'] = [{'subnet_id': subnet['id'], 'ip_address': '10.0.0.10'}] port['mac_address'] = port['mac_address'].upper() orig_port = copy.deepcopy(port) with mock.patch.object(segments_db, 'get_network_segments'): filters.filter_for_odl(odl_const.ODL_PORT, odl_const.ODL_UPDATE, port) for key, value in orig_port.items(): if key in expected_items: self.assertEqual(port[key], value) class _OpenDaylightDriverVlanTransparencyBase(_OpenDaylightMechanismBase): def _driver_context(self, network): return mock.MagicMock(current=network) class TestOpenDaylightDriverVlanTransparencyNetwork( _OpenDaylightDriverVlanTransparencyBase): def _test_network_type(self, expected, network_type): context = self._driver_context({providernet.NETWORK_TYPE: network_type}) self.assertEqual(expected, self.mech.check_vlan_transparency(context)) def test_none_network_type(self): context = self._driver_context({}) self.assertTrue(self.mech.check_vlan_transparency(context)) def test_vlan_transparency(self): for network_type in [n_constants.TYPE_VXLAN]: self._test_network_type(True, network_type) for network_type in [n_constants.TYPE_FLAT, n_constants.TYPE_GENEVE, n_constants.TYPE_GRE, n_constants.TYPE_LOCAL, n_constants.TYPE_VLAN]: self._test_network_type(False, network_type) class TestOpenDaylightDriverVlanTransparency( _OpenDaylightDriverVlanTransparencyBase): scenarios = [ ("vxlan_vxlan", {'expected': True, 'network_types': [n_constants.TYPE_VXLAN, n_constants.TYPE_VXLAN]}), ("gre_vxlan", {'expected': False, 'network_types': [n_constants.TYPE_GRE, n_constants.TYPE_VXLAN]}), ("vxlan_vlan", {'expected': False, 'network_types': [n_constants.TYPE_VXLAN, n_constants.TYPE_VLAN]}), ("vxlan_flat", {'expected': False, 'network_types': [n_constants.TYPE_VXLAN, n_constants.TYPE_FLAT]}), ("vlan_vlan", {'expected': False, 'network_types': [n_constants.TYPE_VLAN, n_constants.TYPE_VLAN]}), ] def test_network_segments(self): segments = [{providernet.NETWORK_TYPE: type_} for type_ in self.network_types] context = self._driver_context({mpnet_apidef.SEGMENTS: segments}) self.assertEqual(self.expected, self.mech.check_vlan_transparency(context)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/ml2/test_port_binding.py0000644000175000017500000000330500000000000031624 0ustar00jamespagejamespage00000000000000# Copyright (c) 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from networking_odl.ml2 import legacy_port_binding from networking_odl.ml2 import port_binding from networking_odl.tests import base class TestPortBindingManager(base.DietTestCase): def test_create(self): mgr = port_binding.PortBindingManager.create( name="legacy-port-binding") self.assertEqual("legacy-port-binding", mgr.name) self.assertIsInstance(mgr.controller, legacy_port_binding.LegacyPortBindingManager) def test_create_with_nonexist_name(self): self.assertRaises(AssertionError, port_binding.PortBindingManager.create, name="nonexist-port-binding") @mock.patch.object(legacy_port_binding.LegacyPortBindingManager, "bind_port") def test_bind_port(self, mock_method): port_context = mock.Mock() mgr = port_binding.PortBindingManager.create( name="legacy-port-binding") mgr.controller.bind_port(port_context) mock_method.assert_called_once_with(port_context) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/ml2/test_port_status_update.py0000644000175000017500000000754000000000000033104 0ustar00jamespagejamespage00000000000000# Copyright (c) 2017 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import threading import mock from networking_odl.common.client import OpenDaylightRestClient from networking_odl.common import websocket_client as odl_ws_client from networking_odl.common.websocket_client import OpenDaylightWebsocketClient from networking_odl.ml2.port_status_update import OdlPortStatusUpdate from networking_odl.tests import base from neutron.db import provisioning_blocks import neutron_lib.context import neutron_lib.plugins.directory class TestOdlPortStatusUpdate(base.DietTestCase): WEBSOCK_NOTIFICATION = re.sub(r'\s*', '', """ { "notification": { "data-changed-notification": { "data-change-event": { "data": { "status": { "content": "ACTIVE", "xmlns": "urn:opendaylight:neutron" } }, "operation": "updated", "path": "/neutron:neutron/neutron:ports/neutron:port[ neutron:uuid='d6e6335d-9568-4949-aef1-4107e34c5f28'] /neutron:status" }, "xmlns": "urn:opendaylight:params:xml:ns:yang:controller:md:sal:remote" }, "eventTime": "2017-02-22T02:27:32+02:00", "xmlns": "urn:ietf:params:xml:ns:netconf:notification:1.0" } }""") def setUp(self): self.useFixture(base.OpenDaylightFeaturesFixture()) self.mock_ws_client = mock.patch.object( OpenDaylightWebsocketClient, 'odl_create_websocket') super(TestOdlPortStatusUpdate, self).setUp() def test_object_create(self): OdlPortStatusUpdate() @mock.patch.object(provisioning_blocks, 'provisioning_complete') def test_websock_recv(self, mocked_provisioning_complete): updater = OdlPortStatusUpdate() updater._process_websocket_recv(self.WEBSOCK_NOTIFICATION, False) mocked_provisioning_complete.assert_called_once() self.assertEqual(mocked_provisioning_complete.call_args[0][1], 'd6e6335d-9568-4949-aef1-4107e34c5f28') @mock.patch.object(provisioning_blocks, 'provisioning_complete') @mock.patch.object(neutron_lib.context, 'get_admin_context') @mock.patch.object(OpenDaylightRestClient, 'get') @mock.patch.object(neutron_lib.plugins.directory, 'get_plugin') def test_pull_missed_statuses(self, mocked_get_plugin, mocked_get, ac, pc): uuid = 'd6e6335d-9568-4949-aef1-4107e34c5f28' plugin = mock.MagicMock() plugin.get_ports = mock.MagicMock(return_value=[{'id': uuid}]) mocked_get_plugin.return_value = plugin updater = OdlPortStatusUpdate() updater._pull_missed_statuses() mocked_get.assert_called_with(uuid) @mock.patch.object(threading, 'Thread') def test_process_websocket_reconnect(self, mocked_thread): updater = OdlPortStatusUpdate() updater._process_websocket_reconnect( odl_ws_client.ODL_WEBSOCKET_CONNECTED) mocked_thread.assert_called() mocked_thread.return_value.start.assert_called() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/ml2/test_pseudo_agentdb_binding.py0000644000175000017500000007566200000000000033642 0ustar00jamespagejamespage00000000000000# Copyright (c) 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from copy import deepcopy from os import path as os_path from string import Template import fixtures import mock from oslo_serialization import jsonutils from requests.exceptions import HTTPError from neutron.db import provisioning_blocks from neutron.plugins.ml2 import driver_context as ctx from neutron.plugins.ml2 import plugin as ml2_plugin from neutron.tests.unit.db import test_db_base_plugin_v2 as test_plugin from neutron.tests.unit import testlib_api from neutron_lib.api.definitions import portbindings from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants as n_const from neutron_lib import fixture from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from neutron_lib.plugins.ml2 import api from oslo_config import fixture as config_fixture from networking_odl.common import odl_features from networking_odl.common import websocket_client from networking_odl.journal import periodic_task from networking_odl.ml2 import pseudo_agentdb_binding from networking_odl.tests import base AGENTDB_BINARY = 'neutron-odlagent-portbinding' L2_TYPE = "ODL L2" # test data hostconfig and hostconfig-dbget SAMPLE_ODL_HCONFIGS = {"hostconfigs": {"hostconfig": [ {"host-id": "devstack", "host-type": "ODL L2", "config": """{"supported_vnic_types": [ {"vnic_type": "normal", "vif_type": "ovs", "vif_details": {}}], "allowed_network_types": [ "local", "vlan", "vxlan", "gre"], "bridge_mappings": {"physnet1": "br-ex"}}"""} ]}} class OpenDaylightAgentDBFixture(fixtures.Fixture): def _setUp(self): super(OpenDaylightAgentDBFixture, self)._setUp() fake_agents_db = mock.MagicMock() fake_agents_db.create_or_update_agent = mock.MagicMock() self.useFixture(fixture.PluginDirectoryFixture()) directory.add_plugin(plugin_constants.CORE, fake_agents_db) class TestPseudoAgentDBBindingTaskBase(base.DietTestCase): """Test class for AgentDBPortBindingTaskBase.""" def setUp(self): """Setup test.""" self.useFixture(base.OpenDaylightRestClientFixture()) self.useFixture(base.OpenDaylightPseudoAgentPrePopulateFixture()) self.useFixture(OpenDaylightAgentDBFixture()) super(TestPseudoAgentDBBindingTaskBase, self).setUp() self.worker = pseudo_agentdb_binding.PseudoAgentDBBindingWorker() self.task = pseudo_agentdb_binding.PseudoAgentDBBindingTaskBase( self.worker) def _get_raised_response(self, json_data, status_code): class MockHTTPError(HTTPError): def __init__(self, json_data, status_code): self.json_data = json_data self.status_code = status_code self.response = self class MockResponse(object): def __init__(self, json_data, status_code): self.raise_obj = MockHTTPError(json_data, status_code) def raise_for_status(self): raise self.raise_obj return MockResponse(json_data, status_code) def test_hostconfig_response_404(self): with mock.patch.object(self.task.odl_rest_client, 'get', return_value=self. _get_raised_response({}, 404)): self.assertEqual(self.task._rest_get_hostconfigs(), []) class TestPseudoAgentDBBindingPrePopulate(base.DietTestCase): KNOWN_HOST = 'known_host' AGENT_TYPE = pseudo_agentdb_binding.PseudoAgentDBBindingWorker.L2_TYPE def setUp(self): self.useFixture(base.OpenDaylightRestClientFixture()) self.useFixture(OpenDaylightAgentDBFixture()) super(TestPseudoAgentDBBindingPrePopulate, self).setUp() self.useFixture(fixture.CallbackRegistryFixture()) self.ml2_plugin = mock.Mock() self.ml2_plugin.get_agents = mock.Mock(return_value=[]) self.worker = mock.Mock() self.worker.known_agent = mock.Mock(return_value=False) self.worker.add_known_agent = mock.Mock() self.worker.update_agetns_db_row = mock.Mock() self.prepopulate = (pseudo_agentdb_binding. PseudoAgentDBBindingPrePopulate(self.worker)) def _call_before_port_binding(self, host): kwargs = { 'context': mock.Mock(), 'port': { portbindings.HOST_ID: host } } registry.notify(resources.PORT, events.BEFORE_CREATE, self.ml2_plugin, **kwargs) def test_unspecified(self): self._call_before_port_binding(n_const.ATTR_NOT_SPECIFIED) self.worker.known_agent.assert_not_called() def test_empty_host(self): self._call_before_port_binding('') self.worker.known_agent.assert_not_called() def test_known_agent(self): self.worker.known_agent = mock.Mock(return_value=True) self._call_before_port_binding(self.KNOWN_HOST) self.worker.known_agent.assert_called() self.ml2_plugin.get_agents.assert_not_called() def test_agentdb_alive(self): self.ml2_plugin.get_agents = mock.Mock(return_value=[ {'host': self.KNOWN_HOST, 'agent_type': self.AGENT_TYPE, 'alive': True}]) self._call_before_port_binding(self.KNOWN_HOST) self.worker.known_agent.assert_called() self.ml2_plugin.get_agents.assert_called() self.worker.add_known_agents.assert_called_with([ {'host': self.KNOWN_HOST, 'agent_type': self.AGENT_TYPE, 'alive': True}]) self.worker.update_agents_db_row.assert_not_called() def test_agentdb_dead(self): self.ml2_plugin.get_agents = mock.Mock(return_value=[ {'host': self.KNOWN_HOST, 'agent_type': self.AGENT_TYPE, 'alive': False}]) self._call_before_port_binding(self.KNOWN_HOST) self.worker.known_agent.assert_called() self.ml2_plugin.get_agents.assert_called() self.worker.add_known_agents.assert_not_called() def test_unkown_hostconfig(self): with mock.patch.object(self.prepopulate, 'odl_rest_client') as mock_rest_client: mock_response = mock.Mock() mock_response.json = mock.Mock( return_value=SAMPLE_ODL_HCONFIGS['hostconfigs']) mock_rest_client.get = mock.Mock(return_value=mock_response) self._call_before_port_binding(self.KNOWN_HOST) self.worker.known_agent.assert_called() self.ml2_plugin.get_agents.assert_called() self.worker.add_known_agent.assert_not_called() self.worker.update_agents_db_row.assert_called_once() def test_http_error(self): with mock.patch.object(self.prepopulate, 'odl_rest_client') as mock_rest_client: mock_rest_client.get = mock.Mock(side_effect=Exception('error')) self._call_before_port_binding(self.KNOWN_HOST) self.worker.known_agent.assert_called() self.ml2_plugin.get_agents.assert_called() self.worker.add_known_agent.assert_not_called() self.worker.update_agents_db_row.assert_not_called() class TestPseudoAgentDBBindingWorker(base.DietTestCase): """Test class for AgentDBPortBinding.""" def setUp(self): """Setup test.""" self.useFixture(base.OpenDaylightRestClientFixture()) self.useFixture(base.OpenDaylightPseudoAgentPrePopulateFixture()) self.useFixture(OpenDaylightAgentDBFixture()) super(TestPseudoAgentDBBindingWorker, self).setUp() self.worker = pseudo_agentdb_binding.PseudoAgentDBBindingWorker() def test_update_agents_db(self): """test agent update.""" self.worker.update_agents_db( hostconfigs=SAMPLE_ODL_HCONFIGS['hostconfigs']['hostconfig']) self.worker.agents_db.create_or_update_agent.assert_called_once() class TestPseudoAgentDBBindingController(base.DietTestCase): """Test class for AgentDBPortBinding.""" # Test data for string interpolation of substitutable identifers # e.g. $PORT_ID identifier in the configurations JSON string below shall # be substituted with portcontext.current['id'] eliminating the check # for specific vif_type making port-binding truly switch agnostic. # Refer: Python string templates and interpolation (string.Template) sample_hconf_str_tmpl_subs_vpp = { "host": "devstack", # host-id in ODL JSON "agent_type": "ODL L2", # host-type in ODL JSON # config in ODL JSON "alive": True, "configurations": {"supported_vnic_types": [ {"vnic_type": "normal", "vif_type": "vhostuser", "vif_details": { "uuid": "TEST_UUID", "has_datapath_type_netdev": True, "support_vhost_user": True, "port_prefix": "socket_", "vhostuser_socket_dir": "/tmp", "vhostuser_ovs_plug": True, "vhostuser_mode": "server", "vhostuser_socket": "/tmp/socket_$PORT_ID" }}], "allowed_network_types": [ "local", "vlan", "vxlan", "gre"], "bridge_mappings": {"physnet1": "br-ex"}} } sample_hconf_str_tmpl_subs_ovs = { "host": "devstack", # host-id in ODL JSON "agent_type": "ODL L2", # host-type in ODL JSON # config in ODL JSON "alive": True, "configurations": {"supported_vnic_types": [ {"vnic_type": "normal", "vif_type": "vhostuser", "vif_details": { "uuid": "TEST_UUID", "has_datapath_type_netdev": True, "support_vhost_user": True, "port_prefix": "vhu", "vhostuser_socket_dir": "/var/run/openvswitch", "vhostuser_ovs_plug": True, "vhostuser_mode": "client", "vhostuser_socket": "/var/run/openvswitch/vhu$PORT_ID" }}], "allowed_network_types": [ "local", "vlan", "vxlan", "gre"], "bridge_mappings": {"physnet1": "br-ex"}} } sample_hconf_str_tmpl_nosubs = { "host": "devstack", # host-id in ODL JSON "agent_type": "ODL L2", # host-type in ODL JSON # config in ODL JSON "configurations": {"supported_vnic_types": [ {"vnic_type": "normal", "vif_type": "ovs", "vif_details": { "uuid": "TEST_UUID", "has_datapath_type_netdev": True, "support_vhost_user": True, "port_prefix": "socket_", "vhostuser_socket_dir": "/tmp", "vhostuser_ovs_plug": True, "vhostuser_mode": "server", "vhostuser_socket": "/var/run/openvswitch/PORT_NOSUBS" }}], "allowed_network_types": [ "local", "vlan", "vxlan", "gre"], "bridge_mappings": {"physnet1": "br-ex"}} } # Test data for vanilla OVS sample_hconfig_dbget_ovs = {"configurations": {"supported_vnic_types": [ {"vnic_type": "normal", "vif_type": portbindings.VIF_TYPE_OVS, "vif_details": { "some_test_details": None }}], "allowed_network_types": ["local", "vlan", "vxlan", "gre"], "bridge_mappings": {"physnet1": "br-ex"}}} # Test data for vanilla OVS with SR-IOV offload sample_hconfig_dbget_ovs_sriov_offload = {"configurations": { "supported_vnic_types": [{ "vnic_type": "normal", "vif_type": portbindings.VIF_TYPE_OVS, "vif_details": { "some_test_details": None}}, { "vnic_type": "direct", "vif_type": portbindings.VIF_TYPE_OVS, "vif_details": { "some_test_details": None }}, ], "allowed_network_types": ["local", "vlan", "vxlan", "gre"], "bridge_mappings": {"physnet1": "br-ex"}}} # Test data for OVS-DPDK sample_hconfig_dbget_ovs_dpdk = {"configurations": { "supported_vnic_types": [{ "vnic_type": "normal", "vif_type": portbindings.VIF_TYPE_VHOST_USER, "vif_details": { "uuid": "TEST_UUID", "has_datapath_type_netdev": True, "support_vhost_user": True, "port_prefix": "vhu", # Assumption: /var/run mounted as tmpfs "vhostuser_socket_dir": "/var/run/openvswitch", "vhostuser_ovs_plug": True, "vhostuser_mode": "client", "vhostuser_socket": "/var/run/openvswitch/vhu$PORT_ID"}}], "allowed_network_types": ["local", "vlan", "vxlan", "gre"], "bridge_mappings": {"physnet1": "br-ex"}}} # Test data for VPP sample_hconfig_dbget_vpp = {"configurations": {"supported_vnic_types": [ {"vnic_type": "normal", "vif_type": portbindings.VIF_TYPE_VHOST_USER, "vif_details": { "uuid": "TEST_UUID", "has_datapath_type_netdev": True, "support_vhost_user": True, "port_prefix": "socket_", "vhostuser_socket_dir": "/tmp", "vhostuser_ovs_plug": True, "vhostuser_mode": "server", "vhostuser_socket": "/tmp/socket_$PORT_ID" }}], "allowed_network_types": ["local", "vlan", "vxlan", "gre"], "bridge_mappings": {"physnet1": "br-ex"}}} # Test data for length of string sample_odl_hconfigs_length = { "host": "devstack", # host-id in ODL JSON "agent_type": "ODL L2", # host-type in ODL JSON # config in ODL JSON "configurations": {"supported_vnic_types": [ {"vnic_type": "normal", "vif_type": "vhostuser", "vif_details": { "uuid": "TEST_UUID", "has_datapath_type_netdev": True, "support_vhost_user": True, "port_prefix": "longprefix_", "vhostuser_socket_dir": "/tmp", "vhostuser_ovs_plug": True, "vhostuser_mode": "server", "vhostuser_socket": "/tmp/longprefix_$PORT_ID" }}], "allowed_network_types": [ "local", "vlan", "vxlan", "gre"], "bridge_mappings": {"physnet1": "br-ex"}} } # Raw test data for unicode/string comparison sample_odl_hconfigs_length_raw = { "host": "devstack", "agent_type": "ODL L2", "configurations": """{"supported_vnic_types": [ {"vnic_type": "normal", "vif_type": "vhostuser", "vif_details": { "uuid": "TEST_UUID", "has_datapath_type_netdev": true, "support_vhost_user": true, "port_prefix": "prefix_", "vhostuser_socket_dir": "/tmp", "vhostuser_ovs_plug": true, "vhostuser_mode": "server", "vhostuser_socket": "/tmp/prefix_$PORT_ID" }}], "allowed_network_types": [ "local", "vlan", "vxlan", "gre"], "bridge_mappings": {"physnet1": "br-ex"}}""" } # test data valid and invalid segments test_valid_segment = { api.ID: 'API_ID', api.NETWORK_TYPE: n_const.TYPE_LOCAL, api.SEGMENTATION_ID: 'API_SEGMENTATION_ID', api.PHYSICAL_NETWORK: 'API_PHYSICAL_NETWORK'} test_invalid_segment = { api.ID: 'API_ID', api.NETWORK_TYPE: n_const.TYPE_NONE, api.SEGMENTATION_ID: 'API_SEGMENTATION_ID', api.PHYSICAL_NETWORK: 'API_PHYSICAL_NETWORK'} def setUp(self): """Setup test.""" self.useFixture(base.OpenDaylightRestClientFixture()) self.useFixture(base.OpenDaylightFeaturesFixture()) self.useFixture(base.OpenDaylightPseudoAgentPrePopulateFixture()) self.useFixture(OpenDaylightAgentDBFixture()) super(TestPseudoAgentDBBindingController, self).setUp() self.useFixture(fixture.CallbackRegistryFixture()) self.cfg = self.useFixture(config_fixture.Config()) self.mgr = pseudo_agentdb_binding.PseudoAgentDBBindingController() def test_is_valid_segment(self): """Validate the _check_segment method.""" all_network_types = [n_const.TYPE_FLAT, n_const.TYPE_GRE, n_const.TYPE_LOCAL, n_const.TYPE_VXLAN, n_const.TYPE_VLAN, n_const.TYPE_NONE] valid_types = { network_type for network_type in all_network_types if self.mgr._is_valid_segment( {api.NETWORK_TYPE: network_type}, {'allowed_network_types': [ n_const.TYPE_LOCAL, n_const.TYPE_GRE, n_const.TYPE_VXLAN, n_const.TYPE_VLAN]})} self.assertEqual({ n_const.TYPE_LOCAL, n_const.TYPE_GRE, n_const.TYPE_VXLAN, n_const.TYPE_VLAN}, valid_types) def test_bind_port_with_vif_type_ovs(self): """test bind_port with vanilla ovs.""" port_context = self._fake_port_context( fake_segments=[self.test_invalid_segment, self.test_valid_segment]) vif_type = portbindings.VIF_TYPE_OVS vif_details = {'some_test_details': None} self.mgr._hconfig_bind_port( port_context, self.sample_hconfig_dbget_ovs) port_context.set_binding.assert_called_once_with( self.test_valid_segment[api.ID], vif_type, vif_details, status=n_const.PORT_STATUS_ACTIVE) def test_bind_port_with_vif_type_ovs_with_sriov_offload(self): """test bind_port with vanilla ovs with SR-IOV offload""" port_context = self._fake_port_context( fake_segments=[self.test_invalid_segment, self.test_valid_segment]) vif_type = portbindings.VIF_TYPE_OVS vif_details = {'some_test_details': None} self.mgr._hconfig_bind_port( port_context, self.sample_hconfig_dbget_ovs_sriov_offload) port_context.set_binding.assert_called_once_with( self.test_valid_segment[api.ID], vif_type, vif_details, status=n_const.PORT_STATUS_ACTIVE) def _set_pass_vif_details(self, port_context, vif_details): """extract vif_details and update vif_details if needed.""" vhostuser_socket_dir = vif_details.get( 'vhostuser_socket_dir', '/var/run/openvswitch') port_spec = vif_details.get( 'port_prefix', 'vhu') + port_context.current['id'] socket_path = os_path.join(vhostuser_socket_dir, port_spec) vif_details.update({portbindings.VHOST_USER_SOCKET: socket_path}) return vif_details def test_bind_port_with_vif_type_vhost_user(self): """test bind_port with ovs-dpdk.""" port_context = self._fake_port_context( fake_segments=[self.test_invalid_segment, self.test_valid_segment], host_agents=[deepcopy(self.sample_hconf_str_tmpl_subs_ovs)]) self.mgr.bind_port(port_context) pass_vif_type = portbindings.VIF_TYPE_VHOST_USER pass_vif_details = self.sample_hconfig_dbget_ovs_dpdk[ 'configurations']['supported_vnic_types'][0]['vif_details'] self._set_pass_vif_details(port_context, pass_vif_details) port_context.set_binding.assert_called_once_with( self.test_valid_segment[api.ID], pass_vif_type, pass_vif_details, status=n_const.PORT_STATUS_ACTIVE) def _test_bind_port_succeed_when_agent_status(self, hconfig, agent_status): hconfig['alive'] = agent_status port_context = self._fake_port_context( fake_segments=[self.test_invalid_segment, self.test_valid_segment], host_agents=[hconfig]) self.mgr.bind_port(port_context) port_context.set_binding.assert_called() def test_bind_port_succeed_when_agent_dead_vpp(self): hconfig = deepcopy(self.sample_hconf_str_tmpl_subs_vpp) self._test_bind_port_succeed_when_agent_status(hconfig, False) def test_bind_port_succeed_when_agent_dead_ovs(self): hconfig = deepcopy(self.sample_hconf_str_tmpl_subs_ovs) self._test_bind_port_succeed_when_agent_status(hconfig, False) def test_bind_port_succeed_when_agent_alive_vpp(self): hconfig = deepcopy(self.sample_hconf_str_tmpl_subs_vpp) self._test_bind_port_succeed_when_agent_status(hconfig, True) def test_bind_port_succeed_when_agent_alive_ovs(self): hconfig = deepcopy(self.sample_hconf_str_tmpl_subs_ovs) self._test_bind_port_succeed_when_agent_status(hconfig, True) def test_bind_port_with_vif_type_vhost_user_vpp(self): """test bind_port with vpp.""" port_context = self._fake_port_context( fake_segments=[self.test_invalid_segment, self.test_valid_segment], host_agents=[deepcopy(self.sample_hconf_str_tmpl_subs_vpp)]) self.mgr.bind_port(port_context) pass_vif_type = portbindings.VIF_TYPE_VHOST_USER pass_vif_details = self.sample_hconfig_dbget_vpp['configurations'][ 'supported_vnic_types'][0]['vif_details'] self._set_pass_vif_details(port_context, pass_vif_details) port_context.set_binding.assert_called_once_with( self.test_valid_segment[api.ID], pass_vif_type, pass_vif_details, status=n_const.PORT_STATUS_ACTIVE) def test_bind_port_without_valid_segment(self): """test bind_port without a valid segment.""" port_context = self._fake_port_context( fake_segments=[self.test_invalid_segment]) self.mgr._hconfig_bind_port( port_context, self.sample_hconfig_dbget_ovs) port_context.set_binding.assert_not_called() def test_no_str_template_substitution_in_configuration_string(self): """Test for no identifier substituion in config JSON string.""" port_context = self._fake_port_context( fake_segments=[self.test_invalid_segment, self.test_valid_segment]) hconf_dict = self.mgr._substitute_hconfig_tmpl( port_context, self.sample_hconf_str_tmpl_nosubs) test_string = hconf_dict['configurations'][ 'supported_vnic_types'][0][ 'vif_details'][portbindings.VHOST_USER_SOCKET] expected_str = '/var/run/openvswitch/PORT_NOSUBS' self.assertEqual(expected_str, test_string) def test_str_template_substitution_in_configuration_string(self): """Test for identifier substitution in config JSON string.""" port_context = self._fake_port_context( fake_segments=[self.test_invalid_segment, self.test_valid_segment]) hconf_dict = self.mgr._substitute_hconfig_tmpl( port_context, self.sample_hconf_str_tmpl_subs_vpp) test_string = hconf_dict['configurations'][ 'supported_vnic_types'][0][ 'vif_details'][portbindings.VHOST_USER_SOCKET] expected_str = Template('/tmp/socket_$PORT_ID') expected_str = expected_str.safe_substitute({ 'PORT_ID': port_context.current['id']}) self.assertEqual(expected_str, test_string) def test_str_template_substitution_length_in_configuration_string(self): """Test for identifier substitution in config JSON string.""" port_context = self._fake_port_context( fake_segments=[self.test_invalid_segment, self.test_valid_segment]) hconf_dict = self.mgr._substitute_hconfig_tmpl( port_context, self.sample_odl_hconfigs_length) test_string = hconf_dict['configurations'][ 'supported_vnic_types'][0][ 'vif_details'][portbindings.VHOST_USER_SOCKET] expected_str = Template('/tmp/longprefix_$PORT_ID') expected_str = expected_str.safe_substitute({ 'PORT_ID': port_context.current['id']}) self.assertNotEqual(expected_str, test_string) self.assertEqual(len(test_string) - len('/tmp/'), 14) def test_template_substitution_in_raw_configuration(self): """Test for identifier substitution in config string.""" port_context = self._fake_port_context( fake_segments=[self.test_invalid_segment, self.test_valid_segment]) # Substitute raw string configuration with json raw_configurations = self.sample_odl_hconfigs_length_raw[ 'configurations'] raw_configurations_json = jsonutils.loads(raw_configurations) self.sample_odl_hconfigs_length_raw['configurations'] = ( raw_configurations_json) hconf_dict = self.mgr._substitute_hconfig_tmpl( port_context, self.sample_odl_hconfigs_length_raw) test_string = hconf_dict['configurations'][ 'supported_vnic_types'][0][ 'vif_details'][portbindings.VHOST_USER_SOCKET] expected_str = Template('/tmp/prefix_$PORT_ID') expected_str = expected_str.safe_substitute({ 'PORT_ID': port_context.current['id']}) self.assertEqual(expected_str, test_string) def _fake_port_context(self, fake_segments, host_agents=None): network = mock.MagicMock(spec=api.NetworkContext) return mock.MagicMock( spec=ctx.PortContext, current={'id': 'PORTID', portbindings.VNIC_TYPE: portbindings.VNIC_NORMAL}, segments_to_bind=fake_segments, network=network, host_agents=lambda agent_type: host_agents, _plugin_context=mock.MagicMock() ) @mock.patch.object(provisioning_blocks, 'add_provisioning_component') def test_prepare_inital_port_status_no_websocket( self, mocked_add_provisioning_component): port_ctx = self._fake_port_context( fake_segments=[self.test_valid_segment]) initial_port_status = self.mgr._prepare_initial_port_status(port_ctx) self.assertEqual(initial_port_status, n_const.PORT_STATUS_ACTIVE) mocked_add_provisioning_component.assert_not_called() @mock.patch.object(provisioning_blocks, 'add_provisioning_component') def test_prepare_inital_port_status_with_websocket( self, mocked_add_provisioning_component): feature_json = """{"features": {"feature": [{"service-provider-feature": "neutron-extensions:operational-port-status"}]}}""" self.cfg.config(odl_features_json=feature_json, group='ml2_odl') self.addCleanup(odl_features.deinit) odl_features.init() port_ctx = self._fake_port_context( fake_segments=[self.test_valid_segment]) initial_port_status = self.mgr._prepare_initial_port_status(port_ctx) self.assertEqual(initial_port_status, n_const.PORT_STATUS_DOWN) mocked_add_provisioning_component.assert_called() class TestPseudoAgentDBBindingControllerBug1608659( test_plugin.NeutronDbPluginV2TestCase): """Test class for Bug1608659.""" # test data hostconfig sample_odl_hconfigs = {"hostconfigs": {"hostconfig": [ {"host-id": "devstack-control", "host-type": "ODL L2", "config": """{"supported_vnic_types": [ {"vnic_type": "normal", "vif_type": "vhostuser", "vif_details": {"port_filter": "False", "vhostuser_socket": "/var/run/openvswitch"}}], "allowed_network_types": [ "local", "vlan", "vxlan", "gre"], "bridge_mappings": {"physnet1": "br-ex"}}"""}, {"host-id": "devstack-control", "host-type": "ODL L3", "config": """{ "some_details": "dummy_details" }"""} ]}} def setUp(self): self.useFixture(base.OpenDaylightRestClientFixture()) self.useFixture(base.OpenDaylightPseudoAgentPrePopulateFixture()) self.useFixture(OpenDaylightAgentDBFixture()) super(TestPseudoAgentDBBindingControllerBug1608659, self).setUp( plugin='ml2') self.worker = pseudo_agentdb_binding.PseudoAgentDBBindingWorker() def test_execute_no_exception(self): with mock.patch.object(pseudo_agentdb_binding, 'LOG') as mock_log: self.worker.update_agents_db( self.sample_odl_hconfigs['hostconfigs']['hostconfig']) # Assert no exception happened self.assertFalse(mock_log.exception.called) class TestPseudoAgentNeutronWorker(testlib_api.SqlTestCase): def setUp(self): self.useFixture(base.OpenDaylightRestClientFixture()) self.useFixture(base.OpenDaylightJournalThreadFixture()) self.useFixture(base.OpenDaylightFeaturesFixture()) self.useFixture(base.OpenDaylightPseudoAgentPrePopulateFixture()) self.cfg = self.useFixture(config_fixture.Config()) self.mock_periodic_thread = mock.patch.object( periodic_task.PeriodicTask, 'start').start() super(TestPseudoAgentNeutronWorker, self).setUp() self.cfg.config(mechanism_drivers=['opendaylight_v2'], group='ml2') self.cfg.config( port_binding_controller='pseudo-agentdb-binding', group='ml2_odl') def test_get_worker(self): workers = ml2_plugin.Ml2Plugin().get_workers() self.assertTrue(any( isinstance(worker, pseudo_agentdb_binding.PseudoAgentDBBindingWorker) for worker in workers)) def test_worker(self): worker = pseudo_agentdb_binding.PseudoAgentDBBindingWorker() worker.wait() worker.stop() worker.reset() def test_worker_start_websocket(self): self.cfg.config(enable_websocket_pseudo_agentdb=True, group='ml2_odl') worker = pseudo_agentdb_binding.PseudoAgentDBBindingWorker() with mock.patch.object( websocket_client.OpenDaylightWebsocketClient, 'odl_create_websocket') as mock_odl_create_websocket: worker.start() mock_odl_create_websocket.assert_called_once() def test_worker_start_periodic(self): self.cfg.config(enable_websocket_pseudo_agentdb=False, group='ml2_odl') worker = pseudo_agentdb_binding.PseudoAgentDBBindingWorker() with mock.patch.object( periodic_task.PeriodicTask, 'start') as mock_start: worker.start() mock_start.assert_called_once() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585130284.8227139 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/qos/0000755000175000017500000000000000000000000025644 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/qos/__init__.py0000644000175000017500000000000000000000000027743 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/qos/test_qos_driver_v2.py0000644000175000017500000001003200000000000032035 0ustar00jamespagejamespage00000000000000# Copyright (c) 2016 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib.db import api as db_api from networking_odl.common import constants as odl_const from networking_odl.common import odl_features from networking_odl.db import db from networking_odl.qos import qos_driver_v2 as qos_driver from networking_odl.tests import base from networking_odl.tests.unit import base_v2 class OpenDaylightQosDriverTestCase(base_v2.OpenDaylightConfigBase): def setUp(self): self.useFixture(base.OpenDaylightJournalThreadFixture()) super(OpenDaylightQosDriverTestCase, self).setUp() self.qos_driver = qos_driver.OpenDaylightQosDriver({}) self.addCleanup(odl_features.deinit) def test_qos_supported_rules_are_fetched_from_odl_feature(self): feature_json = """{"features": {"feature": [{"service-provider-feature": "neutron-extensions:operational-port-status"}, {"service-provider-feature": "neutron-extensions:qos-rules", "configuration": {"key": "value"}}]}}""" self.cfg.config(odl_features_json=feature_json, group='ml2_odl') odl_features.init() qos_driver_object = qos_driver.OpenDaylightQosDriver.create() self.assertDictEqual(qos_driver_object.supported_rules, {'key': 'value'}) def test_default_values_for_supported_rules(self): self.cfg.config(odl_features='key', group='ml2_odl') odl_features.init() qos_driver_object = qos_driver.OpenDaylightQosDriver.create() self.assertDictEqual(qos_driver_object.supported_rules, qos_driver.DEFAULT_QOS_RULES) def _get_mock_context(self, session=None): current = {'tenant_id': 'tenant_id'} context = mock.Mock(current=current) context.session = session return context def _get_mock_qos_operation_data(self): data = {'description': u"qos_policy", 'rules': [], 'tenant_id': 'test-tenant', 'shared': False, 'id': 'qos-policy1', 'name': u"policy1"} qos_data = mock.Mock() to_dict = mock.Mock(return_value=data) qos_data.to_dict = to_dict return qos_data def _call_operation_object(self, operation, object_type): qos_data = self._get_mock_qos_operation_data() method = getattr(self.qos_driver, '%s_%s' % (operation, object_type)) assert object_type.endswith("precommit") with db_api.CONTEXT_WRITER.using(self.db_context): context = self._get_mock_context(self.db_context.session) method(context, qos_data) def _test_qos_policy(self, operation): self._call_operation_object(operation=operation, object_type='policy_precommit') qos_data = self._get_mock_qos_operation_data() row = db.get_oldest_pending_db_row_with_lock(self.db_context) self.assertEqual(operation, row['operation']) self.assertEqual(qos_data.to_dict()['id'], row['object_uuid']) def test_qos_policy_create(self): self._test_qos_policy(odl_const.ODL_CREATE) def test_qos_policy_update(self): self._test_qos_policy(odl_const.ODL_UPDATE) def test_qos_policy_delete(self): self._test_qos_policy(odl_const.ODL_DELETE) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585130284.8227139 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/sfc/0000755000175000017500000000000000000000000025615 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/sfc/__init__.py0000644000175000017500000000000000000000000027714 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/sfc/constants.py0000644000175000017500000000507700000000000030214 0ustar00jamespagejamespage00000000000000# Copyright (c) 2016 Brocade Communication Systems # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. CLASSIFIERS_BASE_URI = 'sfc/flowclassifiers' FAKE_FLOW_CLASSIFIER_ID = "4a334cd4-fe9c-4fae-af4b-321c5e2eb051" FAKE_FLOW_CLASSIFIER = { "id": "4a334cd4-fe9c-4fae-af4b-321c5e2eb051", "name": "FC1", "tenant_id": "1814726e2d22407b8ca76db5e567dcf1", "description": "Flow rule for classifying TCP traffic", "protocol": "TCP", "source_port_range_min": 22, "source_port_range_max": 4000, "destination_port_range_min": 80, "destination_port_range_max": 80, "source_ip_prefix": "22.12.34.44", "destination_ip_prefix": "22.12.34.45" } PORT_PAIRS_BASE_URI = 'sfc/portpairs' FAKE_PORT_PAIR_ID = "78dcd363-fc23-aeb6-f44b-56dc5e2fb3ae" FAKE_PORT_PAIR = { "name": "SF1", "id": "78dcd363-fc23-aeb6-f44b-56dc5e2fb3ae", "tenant_id": "d382007aa9904763a801f68ecf065cf5", "description": "Firewall SF instance", "ingress": "dace4513-24fc-4fae-af4b-321c5e2eb3d1", "egress": "aef3478a-4a56-2a6e-cd3a-9dee4e2ec345" } PORT_PAIR_GROUPS_BASE_URI = 'sfc/portpairgroups' FAKE_PORT_PAIR_GROUP_ID = "4512d643-24fc-4fae-af4b-321c5e2eb3d1" FAKE_PORT_PAIR_GROUP = { "name": "Firewall_PortPairGroup", "id": "4512d643-24fc-4fae-af4b-321c5e2eb3d1", "tenant_id": "d382007aa9904763a801f68ecf065cf5", "description": "Grouping Firewall SF instances", "port_pairs": ["78dcd363-fc23-aeb6-f44b-56dc5e2fb3ae"] } PORT_CHAINS_BASE_URI = 'sfc/portchains' FAKE_PORT_CHAIN_ID = "1278dcd4-459f-62ed-754b-87fc5e4a6751" FAKE_PORT_CHAIN = { "name": "PC2", "id": "1278dcd4-459f-62ed-754b-87fc5e4a6751", "tenant_id": "d382007aa9904763a801f68ecf065cf5", "description": "Steering TCP and UDP traffic first to Firewall " "and then to Loadbalancer", "flow_classifiers": ["4a334cd4-fe9c-4fae-af4b-321c5e2eb051", "105a4b0a-73d6-11e5-b392-2c27d72acb4c"], "port_pair_groups": ["4512d643-24fc-4fae-af4b-321c5e2eb3d1", "4a634d49-76dc-4fae-af4b-321c5e23d651"] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585130284.8227139 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/sfc/flowclassifier/0000755000175000017500000000000000000000000030631 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/sfc/flowclassifier/__init__.py0000644000175000017500000000000000000000000032730 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/sfc/flowclassifier/test_sfc_flowclassifier_v2.py 22 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/sfc/flowclassifier/test_sfc_flowclassifier_0000644000175000017500000000634400000000000035630 0ustar00jamespagejamespage00000000000000# Copyright (c) 2017 Brocade Communication Systems # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from mock import patch from neutron_lib.db import api as db_api from networking_odl.common import constants as odl_const from networking_odl.db import db from networking_odl.sfc.flowclassifier import sfc_flowclassifier_v2 as sfc_fc from networking_odl.tests import base as odl_base from networking_odl.tests.unit import base_v2 from networking_odl.tests.unit.sfc import constants as sfc_const class TestOpenDaylightSFCFlowClassifierDriverV2( base_v2.OpenDaylightConfigBase): def setUp(self): self.useFixture(odl_base.OpenDaylightRestClientFixture()) super(TestOpenDaylightSFCFlowClassifierDriverV2, self).setUp() self.handler = sfc_fc.OpenDaylightSFCFlowClassifierDriverV2() self.handler.initialize() def _get_mock_context(self): mocked_fc_context = patch( 'networking_sfc.services.flowclassifier.common.context' '.FlowClassifierContext').start().return_value mocked_fc_context.current = sfc_const.FAKE_FLOW_CLASSIFIER mocked_fc_context.session = self.db_context.session mocked_fc_context._plugin_context = mocked_fc_context return mocked_fc_context def _call_operation_object(self, operation, timing): method = getattr(self.handler, '%s_flow_classifier_%s' % (operation, timing)) method(self._get_mock_context()) def _test_event(self, operation, timing): with db_api.CONTEXT_WRITER.using(self.db_context): self._call_operation_object(operation, timing) if timing == 'precommit': self.db_context.session.flush() row = db.get_oldest_pending_db_row_with_lock(self.db_context) if timing == 'precommit': self.assertEqual(operation, row['operation']) self.assertEqual( odl_const.ODL_SFC_FLOW_CLASSIFIER, row['object_type']) elif timing == 'after': self.assertIsNone(row) # TODO(yamahata): utilize test scenarios def test_create_flow_classifier_precommit(self): self._test_event("create", "precommit") def test_create_flow_classifier_postcommit(self): self._test_event("create", "postcommit") def test_update_flow_classifier_precommit(self): self._test_event("update", "precommit") def test_update_flow_classifier_postcommit(self): self._test_event("update", "postcommit") def test_delete_flow_classifier_precommit(self): self._test_event("delete", "precommit") def test_delete_flow_classifier_postcommit(self): self._test_event("delete", "postcommit") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/sfc/test_sfc_driver_v2.py0000644000175000017500000001523000000000000031764 0ustar00jamespagejamespage00000000000000# Copyright (c) 2017 Brocade Communication Systems # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from mock import patch from neutron_lib.db import api as db_api from networking_odl.common import constants as odl_const from networking_odl.db import db from networking_odl.sfc import sfc_driver_v2 as sfc from networking_odl.tests import base as odl_base from networking_odl.tests.unit import base_v2 from networking_odl.tests.unit.sfc import constants as sfc_const class TestOpenDaylightSFCDriverV2(base_v2.OpenDaylightConfigBase): def setUp(self): self.useFixture(odl_base.OpenDaylightRestClientFixture()) super(TestOpenDaylightSFCDriverV2, self).setUp() self.handler = sfc.OpenDaylightSFCDriverV2() self.handler.initialize() def _get_mock_portpair_operation_context(self): mocked_fc_context = patch( 'networking_sfc.services.sfc.common.context.PortPairContext' ).start().return_value mocked_fc_context.current = sfc_const.FAKE_PORT_PAIR mocked_fc_context.session = self.db_context.session mocked_fc_context._plugin_context = mocked_fc_context return mocked_fc_context def _get_mock_portpairgroup_operation_context(self): mocked_fc_context = patch( 'networking_sfc.services.sfc.common.context.PortPairGroupContext' ).start().return_value mocked_fc_context.current = sfc_const.FAKE_PORT_PAIR_GROUP mocked_fc_context.session = self.db_context.session mocked_fc_context._plugin_context = mocked_fc_context return mocked_fc_context def _get_mock_portchain_operation_context(self): mocked_fc_context = patch( 'networking_sfc.services.sfc.common.context.PortChainContext' ).start().return_value mocked_fc_context.current = sfc_const.FAKE_PORT_CHAIN mocked_fc_context.session = self.db_context.session mocked_fc_context._plugin_context = mocked_fc_context return mocked_fc_context def _get_mock_operation_context(self, object_type): getter = getattr(self, '_get_mock_%s_operation_context' % object_type) return getter() def _call_operation_object(self, operation, timing, resource_str, context): method = getattr(self.handler, '%s_%s_%s' % (operation, resource_str, timing)) method(context) def _test_event(self, operation, timing, resource_str, object_type): with db_api.CONTEXT_WRITER.using(self.db_context): context = self._get_mock_operation_context(object_type) self._call_operation_object(operation, timing, resource_str, context) if timing == 'precommit': self.db_context.session.flush() row = db.get_oldest_pending_db_row_with_lock(self.db_context) if timing == 'precommit': self.assertEqual(operation, row['operation']) self.assertEqual(object_type, row['object_type']) elif timing == 'after': self.assertIsNone(row) # TODO(yamahata): utilize test scenarios def test_create_port_pair_precommit(self): self._test_event("create", "precommit", "port_pair", odl_const.ODL_SFC_PORT_PAIR) def test_create_port_pair_postcommit(self): self._test_event("create", "postcommit", "port_pair", odl_const.ODL_SFC_PORT_PAIR) def test_update_port_pair_precommit(self): self._test_event("update", "precommit", "port_pair", odl_const.ODL_SFC_PORT_PAIR) def test_update_port_pair_postcommit(self): self._test_event("update", "postcommit", "port_pair", odl_const.ODL_SFC_PORT_PAIR) def test_delete_port_pair_precommit(self): self._test_event("delete", "precommit", "port_pair", odl_const.ODL_SFC_PORT_PAIR) def test_delete_port_pair_postcommit(self): self._test_event("delete", "postcommit", "port_pair", odl_const.ODL_SFC_PORT_PAIR) def test_create_port_pair_group_precommit(self): self._test_event("create", "precommit", "port_pair_group", odl_const.ODL_SFC_PORT_PAIR_GROUP) def test_create_port_pair_group_postcommit(self): self._test_event("create", "postcommit", "port_pair_group", odl_const.ODL_SFC_PORT_PAIR_GROUP) def test_update_port_pair_group_precommit(self): self._test_event("update", "precommit", "port_pair_group", odl_const.ODL_SFC_PORT_PAIR_GROUP) def test_update_port_pair_group_postcommit(self): self._test_event("update", "postcommit", "port_pair_group", odl_const.ODL_SFC_PORT_PAIR_GROUP) def test_delete_port_pair_group_precommit(self): self._test_event("delete", "precommit", "port_pair_group", odl_const.ODL_SFC_PORT_PAIR_GROUP) def test_delete_port_pair_group_postcommit(self): self._test_event("delete", "postcommit", "port_pair_group", odl_const.ODL_SFC_PORT_PAIR_GROUP) def test_create_port_chain_precommit(self): self._test_event("create", "precommit", "port_chain", odl_const.ODL_SFC_PORT_CHAIN) def test_create_port_chain_postcommit(self): self._test_event("create", "postcommit", "port_chain", odl_const.ODL_SFC_PORT_CHAIN) def test_update_port_chain_precommit(self): self._test_event("update", "precommit", "port_chain", odl_const.ODL_SFC_PORT_CHAIN) def test_update_port_chain_postcommit(self): self._test_event("update", "postcommit", "port_chain", odl_const.ODL_SFC_PORT_CHAIN) def test_delete_port_chain_precommit(self): self._test_event("delete", "precommit", "port_chain", odl_const.ODL_SFC_PORT_CHAIN) def test_delete_port_chain_postcommit(self): self._test_event("delete", "postcommit", "port_chain", odl_const.ODL_SFC_PORT_CHAIN) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/test_base_db.py0000644000175000017500000001647300000000000030045 0ustar00jamespagejamespage00000000000000# Copyright 2016 Intel Corporation. # Copyright 2016 Isaku Yamahata # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import mock from pecan import util as p_util from neutron.tests.unit.testlib_api import SqlTestCaseLight from neutron_lib import context as neutron_context from neutron_lib.db import api as db_api from neutron_lib import fixture as lib_fixtures from oslo_config import fixture as config_fixture from oslo_db import exception as db_exc import sqlalchemy from sqlalchemy.orm import exc from networking_odl.common import constants from networking_odl.db import models RETRIABLE_EXCEPTIONS = (db_exc.DBDeadlock, exc.StaleDataError, db_exc.DBConnectionError, db_exc.DBDuplicateEntry, db_exc.RetryRequest) RETRY_INTERVAL = 0.001 RETRY_MAX = 2 class _InnerException(Exception): pass class ODLBaseDbTestCase(SqlTestCaseLight): UPDATE_ROW = [constants.ODL_NETWORK, 'id', constants.ODL_UPDATE, {'test': 'data'}] def setUp(self): super(ODLBaseDbTestCase, self).setUp() self.db_context = neutron_context.get_admin_context() self.cfg = self.useFixture(config_fixture.Config()) self.cfg.config(completed_rows_retention=-1, group='ml2_odl') self._setup_retry_tracker_table() def _setup_retry_tracker_table(self): metadata = sqlalchemy.MetaData() self.retry_table = sqlalchemy.Table( 'retry_tracker', metadata, sqlalchemy.Column( 'id', sqlalchemy.Integer, autoincrement=True, primary_key=True, ), ) metadata.create_all(self.engine) self.addCleanup(metadata.drop_all, self.engine) class RetryTracker(object): pass sqlalchemy.orm.mapper(RetryTracker, self.retry_table) self.retry_tracker = RetryTracker def _db_cleanup(self): self.db_context.session.query(models.OpenDaylightJournal).delete() self.db_context.session.query(models.OpenDaylightPeriodicTask).delete() row0 = models.OpenDaylightPeriodicTask( task='maintenance', state=constants.PENDING) row1 = models.OpenDaylightPeriodicTask( task='hostconfig', state=constants.PENDING) self.db_context.session.merge(row0) self.db_context.session.merge(row1) self.db_context.session.flush() def _test_db_exceptions_handled(self, method, mock_object, expect_retries): # NOTE(mpeterson): make retries faster so it doesn't take a lot. retry_fixture = lib_fixtures.DBRetryErrorsFixture( max_retries=RETRY_MAX, retry_interval=RETRY_INTERVAL) retry_fixture.setUp() # NOTE(mpeterson): this test is very verbose, disabling logging logging.disable(logging.CRITICAL) self.addCleanup(logging.disable, logging.NOTSET) exceptions = RETRIABLE_EXCEPTIONS r_method = getattr(method, '__wrapped__', method) r_method_args = p_util.getargspec(r_method).args args_number = len(r_method_args) - (2 if r_method_args[0] == 'self' else 1) mock_arg = mock.MagicMock(unsafe=True) # NOTE(mpeterson): workarounds for py3 compatibility and behavior # expected by particular functions mock_arg.__name__ = 'mock_arg' mock_arg.retry_count = 1 mock_arg.__ge__.return_value = True mock_arg.__gt__.return_value = True mock_arg.__le__.return_value = True mock_arg.__lt__.return_value = True args = (mock_arg,) * args_number def _assertRaises(exceptions, method, context, *args, **kwargs): try: method(context, *args, **kwargs) except Exception as e: if not isinstance(e, exceptions): raise e # TODO(mpeterson): For now the check with session.is_active is # accepted, but when the enginefacade is the only accepted # pattern then it should be changed to check that a session is # attached to the context session = context.session if session.is_active and isinstance(e, _InnerException): self.assertTrue(getattr(e, '_RETRY_EXCEEDED', False)) return exc_names = (tuple(exc.__name__ for exc in exceptions) if hasattr(exceptions, '__iter__') else exceptions.__name__) self.fail('%s did not raise %s' % (method.__name__, exc_names)) try: raise _InnerException except _InnerException as e: _e = e expected_retries = RETRY_MAX if expect_retries else 0 retry_counter = 0 for exception in exceptions: def increase_retry_counter_and_except(*args, **kwargs): nonlocal retry_counter retry_counter += 1 self.db_context.session.add(self.retry_tracker()) self.db_context.session.flush() raise exception(_e) mock_object.side_effect = increase_retry_counter_and_except _assertRaises((exception, _InnerException), method, self.db_context, *args) self.assertEqual(expected_retries, mock_object.call_count - 1) mock_object.reset_mock() retry_fixture.cleanUp() return retry_counter def _assertRetryCount(self, expected_count): actual_count = \ self.db_context.session.query(self.retry_tracker).count() self.assertEqual(expected_count, actual_count) def _test_retry_exceptions(self, method, mock_object, assert_transaction=True): retries = self._test_db_exceptions_handled(method, mock_object, True) if assert_transaction: # It should be 0 as long as the retriable method creates save # points or transactions, which is the correct behavior self._assertRetryCount(0) # RETRIABLE * 3 when expect_retries=True since it will retry # twice as per the test, plus the original call. self.assertEqual( len(RETRIABLE_EXCEPTIONS) * (RETRY_MAX + 1), retries ) with db_api.CONTEXT_WRITER.using(self.db_context): retries = self._test_db_exceptions_handled( method, mock_object, False ) if assert_transaction: self._assertRetryCount(0) # only once per exception when expect_retries=False self.assertEqual(len(RETRIABLE_EXCEPTIONS), retries) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585130284.8227139 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/trunk/0000755000175000017500000000000000000000000026205 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/trunk/__init__.py0000644000175000017500000000000000000000000030304 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/tests/unit/trunk/test_trunk_driver_v2.py0000644000175000017500000002364200000000000032752 0ustar00jamespagejamespage00000000000000# Copyright (c) 2017 Ericsson India Global Service Pvt Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.services.trunk import callbacks from neutron.services.trunk import models from neutron_lib.callbacks import events from neutron_lib.callbacks import resources from neutron_lib import constants as n_const from neutron_lib.db import api as db_api from neutron_lib.plugins import directory from neutron_lib.services.trunk import constants as trunk_consts from networking_odl.common import constants as odl_const from networking_odl.db import db from networking_odl.tests.unit import base_v2 from networking_odl.trunk import trunk_driver_v2 as trunk_driver FAKE_TRUNK = { 'status': 'ACTIVE', 'sub_ports': [{'segmentation_type': 'vlan', 'port_id': 'fake_port_id', 'segmentation_id': 101}, {'segmentation_type': 'vlan', 'port_id': 'fake_port_id', 'segmentation_id': 102}], 'name': 'trunk0', 'admin_state_up': 'true', 'tenant_id': 'fake_tenant_id', 'updated_at': '2016-11-16T10:17:44Z', 'revision_number': 2, 'project_id': 'fake_project_id', 'port_id': 'fake_port_id', 'id': 'fake_id', 'description': 'fake trunk port'} FAKE_PARENT = { 'id': 'fake_parent_id', 'tenant_id': 'fake_tenant_id', 'name': 'parent_port', 'admin_state_up': 'true', 'status': 'ACTIVE'} class TestTrunkHandler(base_v2.OpenDaylightConfigBase): def setUp(self): super(TestTrunkHandler, self).setUp() self.handler = (trunk_driver. OpenDaylightTrunkHandlerV2()) def _fake_trunk_payload(self): payload = callbacks.TrunkPayload( self.db_context, 'fake_id', mock.Mock(return_value=FAKE_TRUNK), mock.Mock(return_value=FAKE_TRUNK), mock.Mock(return_value=FAKE_TRUNK['sub_ports'])) payload.current_trunk.status = trunk_consts.TRUNK_DOWN_STATUS payload.current_trunk.to_dict = mock.Mock(return_value=FAKE_TRUNK) payload.original_trunk.status = trunk_consts.TRUNK_DOWN_STATUS payload.original_trunk.to_dict = mock.Mock(return_value=FAKE_TRUNK) return payload def _call_operation_object(self, operation, timing, fake_payload): method = getattr(self.handler, 'trunk_%s_%s' % (operation, timing)) method(mock.ANY, mock.ANY, mock.ANY, fake_payload) def _test_event(self, operation, timing): with db_api.CONTEXT_WRITER.using(self.db_context): fake_payload = self._fake_trunk_payload() self._call_operation_object(operation, timing, fake_payload) if timing == 'precommit': self.db_context.session.flush() row = db.get_oldest_pending_db_row_with_lock(self.db_context) if timing == 'precommit': self.assertEqual(operation, row['operation']) self.assertEqual(odl_const.ODL_TRUNK, row['object_type']) self.assertEqual(fake_payload.trunk_id, row['object_uuid']) elif timing == 'after': self.assertIsNone(row) def test_trunk_create_precommit(self): self._test_event("create", "precommit") def test_trunk_create_postcommit(self): self._test_event("create", "postcommit") def test_trunk_update_precommit(self): self._test_event("update", "precommit") def test_trunk_update_postcommit(self): self._test_event("update", "postcommit") def test_trunk_delete_precommit(self): self._test_event("delete", "precommit") def test_trunk_delete_postcommit(self): self._test_event("delete", "postcommit") @mock.patch.object(trunk_driver.OpenDaylightTrunkHandlerV2, '_set_subport_status') def test_trunk_subports_set_status_create_parent_active( self, mock_set_subport_status): resource = resources.SUBPORTS event_type = events.AFTER_CREATE fake_payload = self._fake_trunk_payload() core_plugin = directory.get_plugin() fake_payload.subports = [models.SubPort(port_id='fake_port_id', segmentation_id=101, segmentation_type='vlan', trunk_id='fake_id')] parent_port = FAKE_PARENT with mock.patch.object(core_plugin, '_get_port') as gp: gp.return_value = parent_port self.handler.trunk_subports_set_status(resource, event_type, mock.ANY, fake_payload) mock_set_subport_status.assert_called_once_with( core_plugin, mock.ANY, 'fake_port_id', n_const.PORT_STATUS_ACTIVE) @mock.patch.object(trunk_driver.OpenDaylightTrunkHandlerV2, '_set_subport_status') def test_trunk_subports_set_status_create_parent_down( self, mock_set_subport_status): resource = resources.SUBPORTS event_type = events.AFTER_CREATE fake_payload = self._fake_trunk_payload() core_plugin = directory.get_plugin() fake_payload.subports = [models.SubPort(port_id='fake_port_id', segmentation_id=101, segmentation_type='vlan', trunk_id='fake_id')] parent_port = FAKE_PARENT.copy() parent_port['status'] = n_const.PORT_STATUS_DOWN with mock.patch.object(core_plugin, '_get_port') as gp: gp.return_value = parent_port self.handler.trunk_subports_set_status(resource, event_type, mock.ANY, fake_payload) mock_set_subport_status.assert_called_once_with( core_plugin, mock.ANY, 'fake_port_id', n_const.PORT_STATUS_DOWN) @mock.patch.object(trunk_driver.OpenDaylightTrunkHandlerV2, '_set_subport_status') def test_trunk_subports_set_status_delete(self, mock_set_subport_status): resource = resources.SUBPORTS event_type = events.AFTER_DELETE fake_payload = self._fake_trunk_payload() fake_payload.subports = [models.SubPort(port_id='fake_port_id', segmentation_id=101, segmentation_type='vlan', trunk_id='fake_id')] self.handler.trunk_subports_set_status(resource, event_type, mock.ANY, fake_payload) mock_set_subport_status.assert_called_once_with( mock.ANY, mock.ANY, 'fake_port_id', n_const.PORT_STATUS_DOWN) @mock.patch.object(trunk_driver.OpenDaylightTrunkHandlerV2, '_get_subports_ids') @mock.patch.object(trunk_driver.OpenDaylightTrunkHandlerV2, '_set_subport_status') def test_trunk_subports_update_status_parent_down_to_active( self, mock_set_subport_status, mock_get_subports_ids): resource = resources.PORT event_type = events.AFTER_UPDATE core_plugin = directory.get_plugin() port = FAKE_PARENT.copy() original_port = FAKE_PARENT.copy() original_port['status'] = n_const.PORT_STATUS_DOWN port_kwargs = {'port': port, 'original_port': original_port} mock_get_subports_ids.return_value = ['fake_port_id'] self.handler.trunk_subports_update_status(resource, event_type, mock.ANY, **port_kwargs) mock_set_subport_status.assert_called_once_with( core_plugin, mock.ANY, 'fake_port_id', n_const.PORT_STATUS_ACTIVE) @mock.patch.object(trunk_driver.OpenDaylightTrunkHandlerV2, '_get_subports_ids') @mock.patch.object(trunk_driver.OpenDaylightTrunkHandlerV2, '_set_subport_status') def test_trunk_subports_update_status_parent_active_to_down( self, mock_set_subport_status, mock_get_subports_ids): resource = resources.PORT event_type = events.AFTER_UPDATE core_plugin = directory.get_plugin() port = FAKE_PARENT.copy() original_port = FAKE_PARENT.copy() port['status'] = n_const.PORT_STATUS_DOWN port_kwargs = {'port': port, 'original_port': original_port} mock_get_subports_ids.return_value = ['fake_port_id'] self.handler.trunk_subports_update_status(resource, event_type, mock.ANY, **port_kwargs) mock_set_subport_status.assert_called_once_with( core_plugin, mock.ANY, 'fake_port_id', n_const.PORT_STATUS_DOWN) class TestTrunkDriver(base_v2.OpenDaylightConfigBase): def setUp(self): super(TestTrunkDriver, self).setUp() def test_is_loaded(self): driver = trunk_driver.OpenDaylightTrunkDriverV2.create() self.cfg.config(mechanism_drivers=["logger", odl_const.ODL_ML2_MECH_DRIVER_V2], group='ml2') self.assertTrue(driver.is_loaded) self.cfg.config(mechanism_drivers=['logger'], group='ml2') self.assertFalse(driver.is_loaded) self.cfg.config(core_plugin='some_plugin') self.assertFalse(driver.is_loaded) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585130284.8227139 networking-odl-16.0.0.0b2.dev1/networking_odl/trunk/0000755000175000017500000000000000000000000024064 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/trunk/__init__.py0000644000175000017500000000000000000000000026163 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/trunk/constants.py0000644000175000017500000000163600000000000026460 0ustar00jamespagejamespage00000000000000# Copyright (c) 2017 Ericsson India Global Service Pvt Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_lib.api.definitions import portbindings from neutron_lib.services.trunk import constants as t_consts SUPPORTED_INTERFACES = ( portbindings.VIF_TYPE_OVS, portbindings.VIF_TYPE_VHOST_USER, ) SUPPORTED_SEGMENTATION_TYPES = ( t_consts.SEGMENTATION_TYPE_VLAN, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/networking_odl/trunk/trunk_driver_v2.py0000644000175000017500000002113100000000000027561 0ustar00jamespagejamespage00000000000000# Copyright (c) 2017 Ericsson India Global Service Pvt Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants as n_const from neutron_lib import context from neutron_lib.plugins import directory from neutron_lib.services.trunk import constants as t_consts from oslo_config import cfg from oslo_log import helpers as log_helpers from oslo_log import log as logging from neutron.services.trunk.drivers import base as trunk_base from networking_odl.common import config as odl_conf from networking_odl.common import constants as odl_const from networking_odl.journal import full_sync from networking_odl.journal import journal from networking_odl.trunk import constants as odltrunk_const LOG = logging.getLogger(__name__) TRUNK_RESOURCES = { odl_const.ODL_TRUNK: odl_const.ODL_TRUNKS } @registry.has_registry_receivers class OpenDaylightTrunkHandlerV2(object): def __init__(self): cfg.CONF.register_opts(odl_conf.odl_opts, "ml2_odl") self.journal = journal.OpenDaylightJournalThread() full_sync.register(resources.TRUNK, TRUNK_RESOURCES) LOG.info('initialized trunk driver for OpendayLight') @staticmethod def _record_in_journal(context, trunk_id, operation, data): journal.record(context, odl_const.ODL_TRUNK, trunk_id, operation, data) # TODO(vthapar) Revisit status updates once websockets are fully # implemented - https://review.opendev.org/#/c/421127/ @log_helpers.log_method_call def trunk_create_precommit(self, resource, event, trunk_plugin, payload): data = payload.current_trunk.to_dict() data['status'] = t_consts.TRUNK_ACTIVE_STATUS self._record_in_journal(payload.context, payload.trunk_id, odl_const.ODL_CREATE, data) @log_helpers.log_method_call def trunk_update_precommit(self, resource, event, trunk_plugin, payload=None): if isinstance(payload, events.EventPayload): # TODO(boden): remove shim once all callbacks use lib paylaods payload.desired_state.update(status=t_consts.TRUNK_ACTIVE_STATUS) data = payload.desired_state.to_dict() trunk_id = payload.resource_id else: payload.current_trunk.update(status=t_consts.TRUNK_ACTIVE_STATUS) data = payload.current_trunk.to_dict() trunk_id = payload.trunk_id self._record_in_journal(payload.context, trunk_id, odl_const.ODL_UPDATE, data) @log_helpers.log_method_call def trunk_delete_precommit(self, resource, event, trunk_plugin, payload): # fill in data with parent ids, will be used in parent validations trunk_dict = payload.original_trunk.to_dict() data = [subport['port_id'] for subport in trunk_dict['sub_ports']] data.append(trunk_dict['port_id']) self._record_in_journal(payload.context, payload.trunk_id, odl_const.ODL_DELETE, data) @log_helpers.log_method_call def trunk_create_postcommit(self, resource, event, trunk_plugin, payload): payload.current_trunk.update(status=t_consts.TRUNK_ACTIVE_STATUS) self.journal.set_sync_event() @log_helpers.log_method_call def trunk_update_postcommit(self, resource, event, trunk_plugin, payload): payload.current_trunk.update(status=t_consts.TRUNK_ACTIVE_STATUS) self.journal.set_sync_event() @log_helpers.log_method_call def trunk_delete_postcommit(self, resource, event, trunk_plugin, payload): self.journal.set_sync_event() @log_helpers.log_method_call def trunk_subports_set_status(self, resource, event, trunk_plugin, payload): core_plugin = directory.get_plugin() admin_context = context.get_admin_context() if event == events.AFTER_DELETE: status = n_const.PORT_STATUS_DOWN else: parent_id = payload.current_trunk.port_id parent_port = core_plugin._get_port(admin_context, parent_id) status = parent_port['status'] for subport in payload.subports: self._set_subport_status(core_plugin, admin_context, subport.port_id, status) @log_helpers.log_method_call def trunk_subports_update_status(self, resource, event, trigger, **kwargs): core_plugin = directory.get_plugin() admin_context = context.get_admin_context() port = kwargs['port'] original_port = kwargs['original_port'] if port['status'] == original_port['status']: return for subport_id in self._get_subports_ids(port['id']): self._set_subport_status(core_plugin, admin_context, subport_id, port['status']) def _set_subport_status(self, plugin, admin_context, port_id, status): plugin.update_port_status(admin_context, port_id, status) def _get_subports_ids(self, port_id): trunk_plugin = directory.get_plugin('trunk') filters = {'port_id': port_id} trunks = trunk_plugin.get_trunks(context.get_admin_context(), filters=filters) if not trunks: return () trunk = trunks[0] return (subport['port_id'] for subport in trunk['sub_ports']) @registry.has_registry_receivers class OpenDaylightTrunkDriverV2(trunk_base.DriverBase): @property def is_loaded(self): try: return (odl_const.ODL_ML2_MECH_DRIVER_V2 in cfg.CONF.ml2.mechanism_drivers) except cfg.NoSuchOptError: return False @registry.receives(resources.TRUNK_PLUGIN, [events.AFTER_INIT]) def register(self, resource, event, trigger, payload=None): super(OpenDaylightTrunkDriverV2, self).register( resource, event, trigger, payload=payload) self._handler = OpenDaylightTrunkHandlerV2() registry.subscribe(self._handler.trunk_create_precommit, resources.TRUNK, events.PRECOMMIT_CREATE) registry.subscribe(self._handler.trunk_create_postcommit, resources.TRUNK, events.AFTER_CREATE) registry.subscribe(self._handler.trunk_update_precommit, resources.TRUNK, events.PRECOMMIT_UPDATE) registry.subscribe(self._handler.trunk_update_postcommit, resources.TRUNK, events.AFTER_UPDATE) registry.subscribe(self._handler.trunk_delete_precommit, resources.TRUNK, events.PRECOMMIT_DELETE) registry.subscribe(self._handler.trunk_delete_postcommit, resources.TRUNK, events.AFTER_DELETE) for event_ in (events.PRECOMMIT_CREATE, events.PRECOMMIT_DELETE): registry.subscribe(self._handler.trunk_update_precommit, resources.SUBPORTS, event_) for event_ in (events.AFTER_CREATE, events.AFTER_DELETE): registry.subscribe(self._handler.trunk_update_postcommit, resources.SUBPORTS, event_) # Upon subport creation/deletion we need to set the right port # status: # 1. Set it to parent status when it is attached to the trunk # 2. Set it to down when is removed from the trunk registry.subscribe(self._handler.trunk_subports_set_status, resources.SUBPORTS, event_) # NOTE(ltomasbo): if the status of the parent port changes, the # subports need to update their status too registry.subscribe(self._handler.trunk_subports_update_status, resources.PORT, events.AFTER_UPDATE) @classmethod def create(cls): return cls(odl_const.ODL_ML2_MECH_DRIVER_V2, odltrunk_const.SUPPORTED_INTERFACES, odltrunk_const.SUPPORTED_SEGMENTATION_TYPES, None, can_trunk_bound_port=True) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.794714 networking-odl-16.0.0.0b2.dev1/networking_odl.egg-info/0000755000175000017500000000000000000000000024413 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130283.0 networking-odl-16.0.0.0b2.dev1/networking_odl.egg-info/PKG-INFO0000644000175000017500000000430400000000000025511 0ustar00jamespagejamespage00000000000000Metadata-Version: 2.1 Name: networking-odl Version: 16.0.0.0b2.dev1 Summary: OpenStack Networking Home-page: https://docs.openstack.org/networking-odl/latest/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org License: UNKNOWN Description: ========================== Welcome to networking-odl! ========================== .. Team and repository tags .. image:: http://governance.openstack.org/badges/networking-odl.svg :target: http://governance.openstack.org/reference/tags/index.html .. Change things from this point on Summary ------- OpenStack networking-odl is a library of drivers and plugins that integrates OpenStack Neutron API with OpenDaylight Backend. For example it has ML2 driver and L3 plugin to enable communication of OpenStack Neutron L2 and L3 resources API to OpenDayLight Backend. To report and discover bugs in networking-odl the following link can be used: https://bugs.launchpad.net/networking-odl Any new code submission or proposal must follow the development guidelines detailed in HACKING.rst and for further details this link can be checked: https://docs.openstack.org/networking-odl/latest/ The OpenDaylight homepage: https://www.opendaylight.org/ Release notes for the project can be found at: https://docs.openstack.org/releasenotes/networking-odl/ The project source code repository is located at: https://opendev.org/openstack/networking-odl Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Requires-Python: >=3.6 Provides-Extra: ceilometer Provides-Extra: test ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130283.0 networking-odl-16.0.0.0b2.dev1/networking_odl.egg-info/SOURCES.txt0000644000175000017500000003577600000000000026321 0ustar00jamespagejamespage00000000000000.coveragerc .mailmap .pylintrc .stestr.conf AUTHORS CONTRIBUTING.rst ChangeLog HACKING.rst LICENSE README.rst TESTING.rst babel.cfg bindep.txt lower-constraints.txt requirements.txt setup.cfg setup.py tempest-blacklist.txt test-requirements.txt tox.ini .zuul.d/jobs.yaml .zuul.d/project.yaml devstack/README.rst devstack/devstackgaterc devstack/entry_points devstack/functions devstack/jetty-legacy.patch devstack/local.conf.example devstack/override-defaults devstack/plugin.sh devstack/post_test_hook.sh devstack/pre_test_hook.sh devstack/settings devstack/settings.odl devstack/setup_java.sh devstack/files/debs/networking-odl devstack/files/rpms/networking-odl devstack/odl-etc/opendaylight/datastore/initial/config/netvirt-impl-config_netvirt-impl-config.xml devstack/odl-releases/README.rst devstack/odl-releases/common devstack/odl-releases/fluorine-latest devstack/odl-releases/fluorine-snapshot-0.9 devstack/odl-releases/fluorine-snapshot-0.9.0 devstack/odl-releases/latest-release devstack/odl-releases/latest-snapshot devstack/odl-releases/neon-latest devstack/odl-releases/neon-snapshot-0.10.2 devstack/odl-releases/sodium-latest devstack/upgrade/resources.sh devstack/upgrade/settings devstack/upgrade/upgrade.sh doc/requirements.txt doc/source/conf.py doc/source/index.rst doc/source/admin/index.rst doc/source/admin/reference_architecture.rst doc/source/configuration/index.rst doc/source/configuration/samples/ml2_odl.rst doc/source/contributor/contributing.rst doc/source/contributor/drivers_architecture.rst doc/source/contributor/hostconfig.rst doc/source/contributor/index.rst doc/source/contributor/maintenance.rst doc/source/contributor/quickstart.rst doc/source/contributor/testing.rst doc/source/contributor/usage.rst doc/source/contributor/specs/index.rst doc/source/contributor/specs/newton/qos-driver.rst doc/source/contributor/specs/newton/sfc-driver.rst doc/source/contributor/specs/ocata/journal-recovery.rst doc/source/contributor/specs/pike/dep-validations-on-create.rst doc/source/contributor/specs/pike/neutron-port-dhcp.rst doc/source/install/devstack.rst doc/source/install/index.rst doc/source/install/installation.rst doc/source/reference/index.rst doc/source/reference/newton.rst doc/source/reference/ocata.rst doc/source/reference/pike.rst etc/policy.json etc/neutron/plugins/ml2/ml2_conf_odl.ini networking_odl/__init__.py networking_odl/_i18n.py networking_odl.egg-info/PKG-INFO networking_odl.egg-info/SOURCES.txt networking_odl.egg-info/dependency_links.txt networking_odl.egg-info/entry_points.txt networking_odl.egg-info/not-zip-safe networking_odl.egg-info/pbr.json networking_odl.egg-info/requires.txt networking_odl.egg-info/top_level.txt networking_odl/bgpvpn/__init__.py networking_odl/bgpvpn/odl_v2.py networking_odl/ceilometer/__init__.py networking_odl/ceilometer/network/__init__.py networking_odl/ceilometer/network/statistics/__init__.py networking_odl/ceilometer/network/statistics/opendaylight_v2/__init__.py networking_odl/ceilometer/network/statistics/opendaylight_v2/client.py networking_odl/ceilometer/network/statistics/opendaylight_v2/driver.py networking_odl/cmd/__init__.py networking_odl/cmd/analyze_journal.py networking_odl/cmd/set_ovs_hostconfigs.py networking_odl/cmd/test_setup_hostconfigs.sh networking_odl/common/__init__.py networking_odl/common/callback.py networking_odl/common/client.py networking_odl/common/config.py networking_odl/common/constants.py networking_odl/common/exceptions.py networking_odl/common/filters.py networking_odl/common/lightweight_testing.py networking_odl/common/odl_features.py networking_odl/common/postcommit.py networking_odl/common/utils.py networking_odl/common/websocket_client.py networking_odl/db/__init__.py networking_odl/db/db.py networking_odl/db/head.py networking_odl/db/models.py networking_odl/db/migration/__init__.py networking_odl/db/migration/alembic_migrations/README networking_odl/db/migration/alembic_migrations/__init__.py networking_odl/db/migration/alembic_migrations/env.py networking_odl/db/migration/alembic_migrations/script.py.mako networking_odl/db/migration/alembic_migrations/versions/CONTRACT_HEAD networking_odl/db/migration/alembic_migrations/versions/EXPAND_HEAD networking_odl/db/migration/alembic_migrations/versions/b89a299e19f9_initial_branchpoint.py networking_odl/db/migration/alembic_migrations/versions/mitaka/contract/383acb0d38a0_initial_contract.py networking_odl/db/migration/alembic_migrations/versions/mitaka/expand/247501328046_initial_expand.py networking_odl/db/migration/alembic_migrations/versions/mitaka/expand/37e242787ae5_opendaylight_neutron_mechanism_driver_.py networking_odl/db/migration/alembic_migrations/versions/newton/contract/fa0c536252a5_update_opendayligut_journal.py networking_odl/db/migration/alembic_migrations/versions/newton/expand/3d560427d776_add_sequence_number_to_journal.py networking_odl/db/migration/alembic_migrations/versions/newton/expand/703dbf02afde_add_journal_maintenance_table.py networking_odl/db/migration/alembic_migrations/versions/pike/contract/7cbef5a56298_drop_created_at_column.py networking_odl/db/migration/alembic_migrations/versions/pike/contract/eccd865b7d3a_drop_opendaylight_maintenance_table.py networking_odl/db/migration/alembic_migrations/versions/pike/expand/0472f56ff2fb_add_journal_dependencies_table.py networking_odl/db/migration/alembic_migrations/versions/pike/expand/43af357fd638_added_version_id_for_optimistic_locking.py networking_odl/db/migration/alembic_migrations/versions/pike/expand/6f7dfb241354_create_opendaylight_preiodic_task_table.py networking_odl/dhcp/__init__.py networking_odl/dhcp/odl_dhcp_driver.py networking_odl/dhcp/odl_dhcp_driver_base.py networking_odl/hacking/__init__.py networking_odl/hacking/checks.py networking_odl/journal/__init__.py networking_odl/journal/base_driver.py networking_odl/journal/cleanup.py networking_odl/journal/dependency_validations.py networking_odl/journal/full_sync.py networking_odl/journal/journal.py networking_odl/journal/periodic_task.py networking_odl/journal/recovery.py networking_odl/journal/worker.py networking_odl/l2gateway/__init__.py networking_odl/l2gateway/driver_v2.py networking_odl/l3/__init__.py networking_odl/l3/l3_flavor.py networking_odl/l3/l3_odl_v2.py networking_odl/locale/en_GB/LC_MESSAGES/networking_odl.po networking_odl/ml2/README.odl networking_odl/ml2/__init__.py networking_odl/ml2/legacy_port_binding.py networking_odl/ml2/mech_driver_v2.py networking_odl/ml2/port_binding.py networking_odl/ml2/port_status_update.py networking_odl/ml2/pseudo_agentdb_binding.py networking_odl/qos/__init__.py networking_odl/qos/qos_driver_v2.py networking_odl/qos/qos_utils.py networking_odl/sfc/__init__.py networking_odl/sfc/sfc_driver_v2.py networking_odl/sfc/flowclassifier/__init__.py networking_odl/sfc/flowclassifier/sfc_flowclassifier_v2.py networking_odl/tests/__init__.py networking_odl/tests/base.py networking_odl/tests/match.py networking_odl/tests/functional/__init__.py networking_odl/tests/functional/base.py networking_odl/tests/functional/requirements.txt networking_odl/tests/functional/test_bgpvpn.py networking_odl/tests/functional/test_l2gateway.py networking_odl/tests/functional/test_l3.py networking_odl/tests/functional/test_ml2_drivers.py networking_odl/tests/functional/test_odl_dhcp_driver.py networking_odl/tests/functional/test_qos.py networking_odl/tests/functional/test_trunk_drivers.py networking_odl/tests/functional/db/__init__.py networking_odl/tests/functional/db/test_migrations.py networking_odl/tests/unit/__init__.py networking_odl/tests/unit/base_v2.py networking_odl/tests/unit/test_base_db.py networking_odl/tests/unit/bgpvpn/__init__.py networking_odl/tests/unit/bgpvpn/test_odl_v2.py networking_odl/tests/unit/ceilometer/__init__.py networking_odl/tests/unit/ceilometer/network/__init__.py networking_odl/tests/unit/ceilometer/network/statistics/__init__.py networking_odl/tests/unit/ceilometer/network/statistics/opendaylight_v2/__init__.py networking_odl/tests/unit/ceilometer/network/statistics/opendaylight_v2/test_client.py networking_odl/tests/unit/ceilometer/network/statistics/opendaylight_v2/test_driver.py networking_odl/tests/unit/cmd/__init__.py networking_odl/tests/unit/cmd/test_analyze_journal.py networking_odl/tests/unit/cmd/test_set_ovs_hostconfigs.py networking_odl/tests/unit/common/__init__.py networking_odl/tests/unit/common/test_callback.py networking_odl/tests/unit/common/test_client.py networking_odl/tests/unit/common/test_filters.py networking_odl/tests/unit/common/test_lightweight_testing.py networking_odl/tests/unit/common/test_odl_features.py networking_odl/tests/unit/common/test_postcommit.py networking_odl/tests/unit/common/test_utils.py networking_odl/tests/unit/common/test_websocket_client.py networking_odl/tests/unit/db/__init__.py networking_odl/tests/unit/db/test_db.py networking_odl/tests/unit/dhcp/__init__.py networking_odl/tests/unit/dhcp/test_odl_dhcp_driver.py networking_odl/tests/unit/dhcp/test_odl_dhcp_driver_base.py networking_odl/tests/unit/journal/__init__.py networking_odl/tests/unit/journal/helper.py networking_odl/tests/unit/journal/test_base_driver.py networking_odl/tests/unit/journal/test_cleanup.py networking_odl/tests/unit/journal/test_dependency_validations.py networking_odl/tests/unit/journal/test_full_sync.py networking_odl/tests/unit/journal/test_journal.py networking_odl/tests/unit/journal/test_periodic_task.py networking_odl/tests/unit/journal/test_recovery.py networking_odl/tests/unit/l2gateway/__init__.py networking_odl/tests/unit/l2gateway/test_driver_v2.py networking_odl/tests/unit/l3/__init__.py networking_odl/tests/unit/l3/test_l3_flavor.py networking_odl/tests/unit/l3/test_l3_odl_v2.py networking_odl/tests/unit/ml2/__init__.py networking_odl/tests/unit/ml2/config-ovs-external_ids.sh networking_odl/tests/unit/ml2/odl_teststub.js networking_odl/tests/unit/ml2/test_legacy_port_binding.py networking_odl/tests/unit/ml2/test_mechanism_odl_v2.py networking_odl/tests/unit/ml2/test_port_binding.py networking_odl/tests/unit/ml2/test_port_status_update.py networking_odl/tests/unit/ml2/test_pseudo_agentdb_binding.py networking_odl/tests/unit/qos/__init__.py networking_odl/tests/unit/qos/test_qos_driver_v2.py networking_odl/tests/unit/sfc/__init__.py networking_odl/tests/unit/sfc/constants.py networking_odl/tests/unit/sfc/test_sfc_driver_v2.py networking_odl/tests/unit/sfc/flowclassifier/__init__.py networking_odl/tests/unit/sfc/flowclassifier/test_sfc_flowclassifier_v2.py networking_odl/tests/unit/trunk/__init__.py networking_odl/tests/unit/trunk/test_trunk_driver_v2.py networking_odl/trunk/__init__.py networking_odl/trunk/constants.py networking_odl/trunk/trunk_driver_v2.py playbooks/devstack/pre.yaml playbooks/devstack-tox/post.yaml playbooks/devstack-tox/pre.yaml playbooks/devstack-tox/run.yaml playbooks/functional/pre.yaml playbooks/legacy/grenade-dsvm-networking-odl/post.yaml playbooks/legacy/grenade-dsvm-networking-odl/run.yaml playbooks/legacy/tempest-dsvm-networking-odl-multinode-fluorine-snapshot/post.yaml playbooks/legacy/tempest-dsvm-networking-odl-multinode-fluorine-snapshot/run.yaml playbooks/legacy/tempest-dsvm-networking-odl-multinode-oxygen-snapshot/post.yaml playbooks/legacy/tempest-dsvm-networking-odl-multinode-oxygen-snapshot/run.yaml playbooks/tempest/post.yaml playbooks/tempest/pre.yaml playbooks/tempest/run.yaml rally-jobs/README.rst rally-jobs/odl.yaml rally-jobs/extra/README.rst rally-jobs/plugins/README.rst rally-jobs/plugins/__init__.py releasenotes/notes/.placeholder releasenotes/notes/add-analyze-journal-cmd-189eae2cac4d60a5.yaml releasenotes/notes/add-beryllium-sr4-7eced33ec292bcc8.yaml releasenotes/notes/add-host-config-8fb45d7f9732a795.yaml releasenotes/notes/bgpvpn-driver-v2-36c0772d510587f4.yaml releasenotes/notes/bgpvpn-vni-support-0804d0c0789cd1db.yaml releasenotes/notes/delete-completed-rows-immediately-d3aee2ff5278b3f4.yaml releasenotes/notes/deprecate-qos-driver-v1-96bce9842413700b.yaml releasenotes/notes/deprecate-v1-0dd4f07c68a4a0a4.yaml releasenotes/notes/deprecate_ceilometer-0d2830fa1fc6ba4e.yaml releasenotes/notes/devstack-default-driver-v2-6ae6ce789b4a6cc9.yaml releasenotes/notes/drop-py27-support-3bc8094e1823cfcf.yaml releasenotes/notes/fix-sfc-full-sync-4eafe97d27b8b33e.yaml releasenotes/notes/fix-sfcv2-urlpath-f339357bed1a538c.yaml releasenotes/notes/fix-tls-websocket-3bee50093c3e90cf.yaml releasenotes/notes/fix-ws-ssl-timeout-e16cd41779c05d42.yaml releasenotes/notes/flat-network-support-7c032aabc21902b1.yaml releasenotes/notes/full-sync-f6b7ec1bd9ea0e52.yaml releasenotes/notes/functional-test-b0855d6f1d85da30.yaml releasenotes/notes/ignore_agent_aliveness-935a1aa8c285dfa2.yaml releasenotes/notes/journal-recovery-88e583ad2db22bcc.yaml releasenotes/notes/l2gw-driver-v2-b32aacf882ed446c.yaml releasenotes/notes/lbaas-driver-v2-46bf34992f4785d1.yaml releasenotes/notes/maintenance-thread-e54c3b4bd7c03546.yaml releasenotes/notes/make-ceilometer-dependency-optional-fb0407dd2d367599.yaml releasenotes/notes/make_sync_timeout_float-490072005e3f3413.yaml releasenotes/notes/network-statistics-from-opendaylight-057a6b3c30626527.yaml releasenotes/notes/neutron-dhcp-port-dcbc3a1008f45cc2.yaml releasenotes/notes/new-netvirt-default-0eccc77d3cb54484.yaml releasenotes/notes/nuke-lbaasv1-driver-fce366522350fe21.yaml releasenotes/notes/odl-feature-negotiation-ece3201a6e9f8f74.yaml releasenotes/notes/odl-l3-flavor-f093e6c0fb4e9dd8.yaml releasenotes/notes/odl_features-option-type-change-367385ae7d1e949e.yaml releasenotes/notes/ovs_hardware_offload_support-38d2b0b7386b8ca7.yaml releasenotes/notes/port-binding-default-b5f24ad350b47eb0.yaml releasenotes/notes/pseudo-agent-port-binding-0a3d1d193b99293e.yaml releasenotes/notes/qos-driver-v1-711698186ca693c4.yaml releasenotes/notes/qos-driver-v2-4c869a6f0b8e3a4d.yaml releasenotes/notes/remove-network-topology-67daff08f3d6ff14.yaml releasenotes/notes/remove-neutron-lbaas-6afe0b0f7b61290a.yaml releasenotes/notes/remove-v1-driver-df408f9916fc5e5d.yaml releasenotes/notes/remove_qos_driver_v1-2bfbf1f979082b07.yaml releasenotes/notes/set-ovs-hostconfig-flat-default-a3c189858304e2ed.yaml releasenotes/notes/sfc-driver-v1-d11fd5fd17114f2c.yaml releasenotes/notes/sfc-driver-v2-9378b0db810b6fcb.yaml releasenotes/notes/trunk-drivers-3592691bdd08929e.yaml releasenotes/notes/version-bump-16230eadac71cbb0.yaml releasenotes/notes/vlan-transparency-63c153d310eacc5d.yaml releasenotes/notes/websocket-client-7c8117671aeea181.yaml releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/newton.rst releasenotes/source/ocata.rst releasenotes/source/pike.rst releasenotes/source/queens.rst releasenotes/source/rocky.rst releasenotes/source/stein.rst releasenotes/source/train.rst releasenotes/source/unreleased.rst releasenotes/source/_static/.placeholder releasenotes/source/_templates/.placeholder releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po roles/show-odl-info/README.rst roles/show-odl-info/defaults/main.yaml roles/show-odl-info/tasks/main.yaml roles/show-odl-info/tasks/ovs_flows.yaml tools/check_bash.sh tools/check_i18n.py tools/check_i18n_test_case.txt tools/clean.sh tools/coding-checks.sh tools/configure_for_func_testing.sh tools/i18n_cfg.py tools/install_venv.py tools/with_venv.sh././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130283.0 networking-odl-16.0.0.0b2.dev1/networking_odl.egg-info/dependency_links.txt0000644000175000017500000000000100000000000030461 0ustar00jamespagejamespage00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130283.0 networking-odl-16.0.0.0b2.dev1/networking_odl.egg-info/entry_points.txt0000644000175000017500000000217600000000000027717 0ustar00jamespagejamespage00000000000000[console_scripts] neutron-odl-analyze-journal-logs = networking_odl.cmd.analyze_journal:main neutron-odl-ovs-hostconfig = networking_odl.cmd.set_ovs_hostconfigs:main [network.statistics.drivers] opendaylight.v2 = networking_odl.ceilometer.network.statistics.opendaylight_v2.driver:OpenDaylightDriver [networking_odl.ml2.port_binding_controllers] legacy-port-binding = networking_odl.ml2.legacy_port_binding:LegacyPortBindingManager pseudo-agentdb-binding = networking_odl.ml2.pseudo_agentdb_binding:PseudoAgentDBBindingController [networking_sfc.flowclassifier.drivers] odl_v2 = networking_odl.sfc.flowclassifier.sfc_flowclassifier_v2:OpenDaylightSFCFlowClassifierDriverV2 [networking_sfc.sfc.drivers] odl_v2 = networking_odl.sfc.sfc_driver_v2:OpenDaylightSFCDriverV2 [neutron.db.alembic_migrations] networking-odl = networking_odl.db.migration:alembic_migrations [neutron.ml2.mechanism_drivers] opendaylight_v2 = networking_odl.ml2.mech_driver_v2:OpenDaylightMechanismDriver [neutron.service_plugins] odl-router_v2 = networking_odl.l3.l3_odl_v2:OpenDaylightL3RouterPlugin [oslo.config.opts] ml2_odl = networking_odl.common.config:list_opts ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130283.0 networking-odl-16.0.0.0b2.dev1/networking_odl.egg-info/not-zip-safe0000644000175000017500000000000100000000000026641 0ustar00jamespagejamespage00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130283.0 networking-odl-16.0.0.0b2.dev1/networking_odl.egg-info/pbr.json0000644000175000017500000000006100000000000026066 0ustar00jamespagejamespage00000000000000{"git_version": "3de47a829", "is_release": false}././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130283.0 networking-odl-16.0.0.0b2.dev1/networking_odl.egg-info/requires.txt0000644000175000017500000000104000000000000027006 0ustar00jamespagejamespage00000000000000Babel>=2.5.3 debtcollector>=1.19.0 networking-bgpvpn>=10.0.0b1 networking-l2gw>=12.0.0 networking-sfc>=10.0.0.0b1 neutron-lib>=2.0.0 neutron>=16.0.0.0b1 pbr>=4.0.0 stevedore>=1.28.0 websocket-client>=0.47.0 [ceilometer] ceilometer>=11.0.0 [test] astroid==2.1.0 bandit!=1.6.0,>=1.4.0 bashate>=0.5.1 ceilometer>=11.0.0 coverage>=4.5.1 doc8>=0.8.0 flake8-import-order>=0.17.1 hacking!=0.13.0,<0.14,>=0.12.0 oslotest>=3.3.0 pecan>=1.3.2 pylint==2.2.0 python-subunit>=1.2.0 stestr>=2.0.0 testresources>=2.0.1 testscenarios>=0.5.0 testtools>=2.3.0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130283.0 networking-odl-16.0.0.0b2.dev1/networking_odl.egg-info/top_level.txt0000644000175000017500000000001700000000000027143 0ustar00jamespagejamespage00000000000000networking_odl ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.782714 networking-odl-16.0.0.0b2.dev1/playbooks/0000755000175000017500000000000000000000000021677 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585130284.8227139 networking-odl-16.0.0.0b2.dev1/playbooks/devstack/0000755000175000017500000000000000000000000023503 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/playbooks/devstack/pre.yaml0000644000175000017500000000026700000000000025162 0ustar00jamespagejamespage00000000000000- hosts: all roles: - role: run-devstack when: run_devstack == True - role: bindep bindep_profile: test bindep_dir: "{{ zuul_work_dir }}" - test-setup ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585130284.8227139 networking-odl-16.0.0.0b2.dev1/playbooks/devstack-tox/0000755000175000017500000000000000000000000024313 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/playbooks/devstack-tox/post.yaml0000644000175000017500000000011000000000000026154 0ustar00jamespagejamespage00000000000000- hosts: all roles: - fetch-tox-output - fetch-subunit-output ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/playbooks/devstack-tox/pre.yaml0000644000175000017500000000004700000000000025766 0ustar00jamespagejamespage00000000000000- hosts: all roles: - ensure-tox ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/playbooks/devstack-tox/run.yaml0000644000175000017500000000004000000000000025775 0ustar00jamespagejamespage00000000000000- hosts: all roles: - tox ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585130284.8267138 networking-odl-16.0.0.0b2.dev1/playbooks/functional/0000755000175000017500000000000000000000000024041 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/playbooks/functional/pre.yaml0000644000175000017500000000165300000000000025520 0ustar00jamespagejamespage00000000000000- hosts: all tasks: - include_role: name: setup-devstack-log-dir # TODO(mpeterson): Remove when https://github.com/ansible/ansible/issues/21890 is fixed - set_fact: devstack_base_dir: /opt/stack when: devstack_base_dir is not defined - name: Create networking-odl functional logs dir file: path: '{{ devstack_base_dir }}/logs/functional-logs' state: directory owner: zuul become: yes - name: Configure the environment for functional testing shell: cmd: | VENV="dsvm-functional" GATE_DEST="{{ devstack_base_dir }}" DEVSTACK_PATH=$GATE_DEST/devstack NETWORKING_ODL_DIR=$GATE_DEST/networking-odl IS_GATE=True source ./tools/configure_for_func_testing.sh configure_host_for_func_testing executable: /bin/bash chdir: "{{ zuul.project.src_dir }}" become: yes ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.782714 networking-odl-16.0.0.0b2.dev1/playbooks/legacy/0000755000175000017500000000000000000000000023143 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585130284.8267138 networking-odl-16.0.0.0b2.dev1/playbooks/legacy/grenade-dsvm-networking-odl/0000755000175000017500000000000000000000000030460 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/playbooks/legacy/grenade-dsvm-networking-odl/post.yaml0000644000175000017500000000455100000000000032336 0ustar00jamespagejamespage00000000000000- hosts: primary tasks: - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=**/*nose_results.html - --include=*/ - --exclude=* - --prune-empty-dirs - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=**/*testr_results.html.gz - --include=*/ - --exclude=* - --prune-empty-dirs - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/.testrepository/tmp* - --include=*/ - --exclude=* - --prune-empty-dirs - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=**/*testrepository.subunit.gz - --include=*/ - --exclude=* - --prune-empty-dirs - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}/tox' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/.tox/*/log/* - --include=*/ - --exclude=* - --prune-empty-dirs - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/logs/** - --include=*/ - --exclude=* - --prune-empty-dirs ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/playbooks/legacy/grenade-dsvm-networking-odl/run.yaml0000644000175000017500000000355700000000000032162 0ustar00jamespagejamespage00000000000000- hosts: all name: Autoconverted job legacy-grenade-dsvm-networking-odl from old job gate-grenade-dsvm-networking-odl-nv tasks: - name: Ensure legacy workspace directory file: path: '{{ ansible_user_dir }}/workspace' state: directory - shell: cmd: | set -e set -x cat > clonemap.yaml << EOF clonemap: - name: openstack/devstack-gate dest: devstack-gate EOF /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \ https://opendev.org \ openstack/devstack-gate executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | set -e set -x # Set this variable for grenade job to be identified and use lib/neutron-legacy # for base release and lib/neutron for target release export IS_GRENADE_JOB=True export PROJECTS="openstack/grenade openstack/networking-odl $PROJECTS" export PYTHONUNBUFFERED=true export DEVSTACK_GATE_TEMPEST=1 export DEVSTACK_GATE_GRENADE=pullup export GRENADE_PLUGINRC="enable_grenade_plugin networking-odl https://opendev.org/openstack/networking-odl" export DEVSTACK_GATE_NEUTRON=1 export BRANCH_OVERRIDE=default if [ "$BRANCH_OVERRIDE" != "default" ] ; then export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE fi export DEVSTACK_GATE_SETTINGS=/opt/stack/new/networking-odl/devstack/devstackgaterc cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' ././@PaxHeader0000000000000000000000000000021600000000000011454 xustar0000000000000000114 path=networking-odl-16.0.0.0b2.dev1/playbooks/legacy/tempest-dsvm-networking-odl-multinode-fluorine-snapshot/ 28 mtime=1585130284.8267138 networking-odl-16.0.0.0b2.dev1/playbooks/legacy/tempest-dsvm-networking-odl-multinode-fluorine-snaps0000755000175000017500000000000000000000000035416 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000123 path=networking-odl-16.0.0.0b2.dev1/playbooks/legacy/tempest-dsvm-networking-odl-multinode-fluorine-snapshot/post.yaml 22 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/playbooks/legacy/tempest-dsvm-networking-odl-multinode-fluorine-snaps0000644000175000017500000000455100000000000035425 0ustar00jamespagejamespage00000000000000- hosts: primary tasks: - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=**/*nose_results.html - --include=*/ - --exclude=* - --prune-empty-dirs - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=**/*testr_results.html.gz - --include=*/ - --exclude=* - --prune-empty-dirs - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/.testrepository/tmp* - --include=*/ - --exclude=* - --prune-empty-dirs - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=**/*testrepository.subunit.gz - --include=*/ - --exclude=* - --prune-empty-dirs - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}/tox' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/.tox/*/log/* - --include=*/ - --exclude=* - --prune-empty-dirs - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/logs/** - --include=*/ - --exclude=* - --prune-empty-dirs ././@PaxHeader0000000000000000000000000000022000000000000011447 xustar0000000000000000122 path=networking-odl-16.0.0.0b2.dev1/playbooks/legacy/tempest-dsvm-networking-odl-multinode-fluorine-snapshot/run.yaml 22 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/playbooks/legacy/tempest-dsvm-networking-odl-multinode-fluorine-snaps0000644000175000017500000000607100000000000035424 0ustar00jamespagejamespage00000000000000- hosts: primary name: Autoconverted job legacy-tempest-dsvm-networking-odl-multinode-fluorine-snapshot from old job gate-tempest-dsvm-networking-odl-multinode-fluorine-snapshot-nv tasks: - name: Ensure legacy workspace directory file: path: '{{ ansible_user_dir }}/workspace' state: directory - shell: cmd: | set -e set -x cat > clonemap.yaml << EOF clonemap: - name: openstack/devstack-gate dest: devstack-gate EOF /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \ https://opendev.org \ openstack/devstack-gate executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | set -e set -x cat << 'EOF' >>"/tmp/dg-local.conf" [[local|localrc]] enable_plugin networking-odl https://opendev.org/openstack/networking-odl EOF executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | set -e set -x export PYTHONUNBUFFERED=true export DEVSTACK_GATE_NEUTRON=1 export DEVSTACK_GATE_NEUTRON_DVR=1 # Make sure tempest is installed, but run it out of the post_test_hooks.sh # script from the networking-odl repository. export DEVSTACK_GATE_TEMPEST=1 export DEVSTACK_GATE_TEMPEST_NOTESTS=1 export BRANCH_OVERRIDE=default if [ "$BRANCH_OVERRIDE" != "default" ] ; then export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE fi # Because we are testing a non standard project, add # our project repository. This makes zuul do the right # reference magic for testing changes. export PROJECTS="openstack/networking-odl $PROJECTS" # Keep localrc to be able to set some vars in pre_test_hook export KEEP_LOCALRC=1 # specify ODL release to use export ODL_RELEASE_BASE=fluorine-snapshot function pre_test_hook { if [ -f $BASE/new/networking-odl/devstack/pre_test_hook.sh ] ; then . $BASE/new/networking-odl/devstack/pre_test_hook.sh fi } export -f pre_test_hook function post_test_hook { if [ -f $BASE/new/networking-odl/devstack/post_test_hook.sh ] ; then . $BASE/new/networking-odl/devstack/post_test_hook.sh fi } export -f post_test_hook export DEVSTACK_GATE_TOPOLOGY="multinode" export DEVSTACK_GATE_SETTINGS=/opt/stack/new/networking-odl/devstack/devstackgaterc cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' ././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000112 path=networking-odl-16.0.0.0b2.dev1/playbooks/legacy/tempest-dsvm-networking-odl-multinode-oxygen-snapshot/ 28 mtime=1585130284.8267138 networking-odl-16.0.0.0b2.dev1/playbooks/legacy/tempest-dsvm-networking-odl-multinode-oxygen-snapsho0000755000175000017500000000000000000000000035433 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000021700000000000011455 xustar0000000000000000121 path=networking-odl-16.0.0.0b2.dev1/playbooks/legacy/tempest-dsvm-networking-odl-multinode-oxygen-snapshot/post.yaml 22 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/playbooks/legacy/tempest-dsvm-networking-odl-multinode-oxygen-snapsho0000644000175000017500000000455100000000000035442 0ustar00jamespagejamespage00000000000000- hosts: primary tasks: - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=**/*nose_results.html - --include=*/ - --exclude=* - --prune-empty-dirs - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=**/*testr_results.html.gz - --include=*/ - --exclude=* - --prune-empty-dirs - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/.testrepository/tmp* - --include=*/ - --exclude=* - --prune-empty-dirs - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=**/*testrepository.subunit.gz - --include=*/ - --exclude=* - --prune-empty-dirs - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}/tox' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/.tox/*/log/* - --include=*/ - --exclude=* - --prune-empty-dirs - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/logs/** - --include=*/ - --exclude=* - --prune-empty-dirs ././@PaxHeader0000000000000000000000000000021600000000000011454 xustar0000000000000000120 path=networking-odl-16.0.0.0b2.dev1/playbooks/legacy/tempest-dsvm-networking-odl-multinode-oxygen-snapshot/run.yaml 22 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/playbooks/legacy/tempest-dsvm-networking-odl-multinode-oxygen-snapsho0000644000175000017500000000606300000000000035442 0ustar00jamespagejamespage00000000000000- hosts: primary name: Autoconverted job legacy-tempest-dsvm-networking-odl-multinode-oxygen-snapshot from old job gate-tempest-dsvm-networking-odl-multinode-oxygen-snapshot-nv tasks: - name: Ensure legacy workspace directory file: path: '{{ ansible_user_dir }}/workspace' state: directory - shell: cmd: | set -e set -x cat > clonemap.yaml << EOF clonemap: - name: openstack/devstack-gate dest: devstack-gate EOF /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \ https://opendev.org \ openstack/devstack-gate executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | set -e set -x cat << 'EOF' >>"/tmp/dg-local.conf" [[local|localrc]] enable_plugin networking-odl https://opendev.org/openstack/networking-odl EOF executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | set -e set -x export PYTHONUNBUFFERED=true export DEVSTACK_GATE_NEUTRON=1 export DEVSTACK_GATE_NEUTRON_DVR=1 # Make sure tempest is installed, but run it out of the post_test_hooks.sh # script from the networking-odl repository. export DEVSTACK_GATE_TEMPEST=1 export DEVSTACK_GATE_TEMPEST_NOTESTS=1 export BRANCH_OVERRIDE=default if [ "$BRANCH_OVERRIDE" != "default" ] ; then export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE fi # Because we are testing a non standard project, add # our project repository. This makes zuul do the right # reference magic for testing changes. export PROJECTS="openstack/networking-odl $PROJECTS" # Keep localrc to be able to set some vars in pre_test_hook export KEEP_LOCALRC=1 # specify ODL release to use export ODL_RELEASE_BASE=oxygen-snapshot function pre_test_hook { if [ -f $BASE/new/networking-odl/devstack/pre_test_hook.sh ] ; then . $BASE/new/networking-odl/devstack/pre_test_hook.sh fi } export -f pre_test_hook function post_test_hook { if [ -f $BASE/new/networking-odl/devstack/post_test_hook.sh ] ; then . $BASE/new/networking-odl/devstack/post_test_hook.sh fi } export -f post_test_hook export DEVSTACK_GATE_TOPOLOGY="multinode" export DEVSTACK_GATE_SETTINGS=/opt/stack/new/networking-odl/devstack/devstackgaterc cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585130284.8267138 networking-odl-16.0.0.0b2.dev1/playbooks/tempest/0000755000175000017500000000000000000000000023360 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/playbooks/tempest/post.yaml0000644000175000017500000000121400000000000025227 0ustar00jamespagejamespage00000000000000- hosts: all tasks: - include_role: name: show-odl-info # TODO(mpeterson): Remove when https://github.com/ansible/ansible/issues/21890 is fixed - set_fact: devstack_base_dir: /opt/stack when: devstack_base_dir is not defined - name: Stop ODL shell: cmd: "{{devstack_base_dir}}/opendaylight/*karaf-*/bin/stop" executable: /bin/bash chdir: "{{ zuul.project.src_dir }}" # TODO(mpeterson): We can use pause when https://github.com/ansible/ansible/issues/31694 # is merged to the ansible version Zuul uses. - name: Pause to give ODL time to finish command: sleep 5s ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/playbooks/tempest/pre.yaml0000644000175000017500000000121700000000000025033 0ustar00jamespagejamespage00000000000000- hosts: all tasks: # TODO(mpeterson): Remove when https://github.com/ansible/ansible/issues/21890 is fixed - set_fact: devstack_base_dir: /opt/stack when: devstack_base_dir is not defined # delete and recreate network to workaroud netvirt bug: # https://bugs.opendaylight.org/show_bug.cgi?id=7456 # https://bugs.opendaylight.org/show_bug.cgi?id=8133 - name: Purge and recreate initial networks shell: cmd: | source ./devstack/functions purge_and_recreate_initial_networks "{{devstack_base_dir}}/devstack" executable: /bin/bash chdir: "{{ zuul.project.src_dir }}" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/playbooks/tempest/run.yaml0000644000175000017500000000223500000000000025052 0ustar00jamespagejamespage00000000000000# This is taken from https://github.com/openstack/tempest/blob/7541031d1742f31b467134816202485d5a630e05/playbooks/devstack-tempest.yaml # TODO(mpeterson): Ideally we want to use their run playbook but until https://review.opendev.org/#/c/530642/ is resolved. # Changes that run through devstack-tempest are likely to have an impact on # the devstack part of the job, so we keep devstack in the main play to # avoid zuul retrying on legitimate failures. - hosts: all tasks: - include_role: name: run-devstack - include_role: name: show-odl-info # TODO(mpeterson): Remove when https://github.com/ansible/ansible/issues/21890 is fixed - set_fact: devstack_base_dir: /opt/stack when: devstack_base_dir is not defined - name: Print karaf feature list shell: cmd: "{{devstack_base_dir}}/opendaylight/*karaf-*/bin/client 'feature:list -i'" executable: /bin/bash chdir: "{{ zuul.project.src_dir }}" # We run tests only on one node, regardless how many nodes are in the system - hosts: tempest roles: - setup-tempest-run-dir - setup-tempest-data-dir - acl-devstack-files - run-tempest ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585130284.8267138 networking-odl-16.0.0.0b2.dev1/rally-jobs/0000755000175000017500000000000000000000000021752 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/rally-jobs/README.rst0000644000175000017500000000177100000000000023447 0ustar00jamespagejamespage00000000000000Rally job related files ======================= This directory contains rally tasks and plugins that are run by OpenStack CI. Structure --------- * plugins - directory where you can add rally plugins. Almost everything in Rally is a plugin. Benchmark context, Benchmark scenario, SLA checks, Generic cleanup resources, .... * extra - all files from this directory will be copy pasted to gates, so you are able to use absolute paths in rally tasks. Files will be located in ~/.rally/extra/* * odl.yaml is a task that is run in gates against OpenStack with Neutron service configured with ODL plugin Useful links ------------ * More about Rally: https://rally.readthedocs.org/en/latest/ * Rally release notes: https://rally.readthedocs.org/en/latest/release_notes.html * How to add rally-gates: https://rally.readthedocs.org/en/latest/gates.html * About plugins: https://rally.readthedocs.org/en/latest/plugins.html * Plugin samples: https://github.com/openstack/rally/tree/master/samples/plugins ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585130284.8267138 networking-odl-16.0.0.0b2.dev1/rally-jobs/extra/0000755000175000017500000000000000000000000023075 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/rally-jobs/extra/README.rst0000644000175000017500000000025500000000000024566 0ustar00jamespagejamespage00000000000000Extra files =========== All files from this directory will be copy pasted to gates, so you are able to use absolute path in rally tasks. Files will be in ~/.rally/extra/* ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/rally-jobs/odl.yaml0000644000175000017500000001407100000000000023417 0ustar00jamespagejamespage00000000000000--- NeutronNetworks.create_and_list_networks: - runner: type: "constant" times: 40 concurrency: 20 context: users: tenants: 1 users_per_tenant: 1 quotas: neutron: network: -1 sla: failure_rate: max: 0 NeutronNetworks.create_and_list_subnets: - args: subnets_per_network: 2 runner: type: "constant" times: 40 concurrency: 20 context: users: tenants: 1 users_per_tenant: 1 quotas: neutron: subnet: -1 network: -1 sla: failure_rate: max: 0 NeutronNetworks.create_and_list_routers: - args: network_create_args: subnet_create_args: subnet_cidr_start: "1.1.0.0/30" subnets_per_network: 2 router_create_args: runner: type: "constant" times: 40 concurrency: 20 context: users: tenants: 1 users_per_tenant: 1 quotas: neutron: network: -1 subnet: -1 router: -1 sla: failure_rate: max: 0 NeutronNetworks.create_and_list_ports: - args: network_create_args: port_create_args: ports_per_network: 2 runner: type: "constant" times: 40 concurrency: 20 context: users: tenants: 1 users_per_tenant: 1 quotas: neutron: network: -1 subnet: -1 router: -1 port: -1 sla: failure_rate: max: 0 NeutronNetworks.create_and_update_networks: - args: network_create_args: {} network_update_args: admin_state_up: False name: "_updated" runner: type: "constant" times: 40 concurrency: 20 context: users: tenants: 1 users_per_tenant: 1 quotas: neutron: network: -1 sla: failure_rate: max: 0 NeutronNetworks.create_and_update_subnets: - args: network_create_args: {} subnet_create_args: {} subnet_cidr_start: "1.4.0.0/16" subnets_per_network: 2 subnet_update_args: enable_dhcp: False name: "_subnet_updated" runner: type: "constant" times: 40 concurrency: 20 context: users: tenants: 5 users_per_tenant: 5 quotas: neutron: network: -1 subnet: -1 sla: failure_rate: max: 0 NeutronNetworks.create_and_update_routers: - args: network_create_args: {} subnet_create_args: {} subnet_cidr_start: "1.1.0.0/30" subnets_per_network: 2 router_create_args: {} router_update_args: admin_state_up: False name: "_router_updated" runner: type: "constant" times: 40 concurrency: 20 context: users: tenants: 1 users_per_tenant: 1 quotas: neutron: network: -1 subnet: -1 router: -1 sla: failure_rate: max: 0 NeutronNetworks.create_and_update_ports: - args: network_create_args: {} port_create_args: {} ports_per_network: 5 port_update_args: admin_state_up: False device_id: "dummy_id" device_owner: "dummy_owner" name: "_port_updated" runner: type: "constant" times: 40 concurrency: 20 context: users: tenants: 1 users_per_tenant: 1 quotas: neutron: network: -1 port: -1 sla: failure_rate: max: 0 NeutronNetworks.create_and_delete_networks: - args: network_create_args: {} runner: type: "constant" times: 40 concurrency: 20 context: users: tenants: 1 users_per_tenant: 1 quotas: neutron: network: -1 subnet: -1 sla: failure_rate: max: 0 NeutronNetworks.create_and_delete_subnets: - args: network_create_args: {} subnet_create_args: {} subnet_cidr_start: "1.1.0.0/30" subnets_per_network: 2 runner: type: "constant" times: 40 concurrency: 20 context: users: tenants: 1 users_per_tenant: 1 quotas: neutron: network: -1 subnet: -1 sla: failure_rate: max: 0 NeutronNetworks.create_and_delete_routers: - args: network_create_args: {} subnet_create_args: {} subnet_cidr_start: "1.1.0.0/30" subnets_per_network: 2 router_create_args: {} runner: type: "constant" times: 40 concurrency: 20 context: users: tenants: 1 users_per_tenant: 1 quotas: neutron: network: -1 subnet: -1 router: -1 sla: failure_rate: max: 0 NeutronNetworks.create_and_delete_ports: - args: network_create_args: {} port_create_args: {} ports_per_network: 5 runner: type: "constant" times: 40 concurrency: 20 context: users: tenants: 1 users_per_tenant: 1 quotas: neutron: network: -1 port: -1 sla: failure_rate: max: 0 Quotas.neutron_update: - args: max_quota: 1024 runner: type: "constant" times: 40 concurrency: 20 context: users: tenants: 20 users_per_tenant: 1 sla: failure_rate: max: 0 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1585130284.8267138 networking-odl-16.0.0.0b2.dev1/rally-jobs/plugins/0000755000175000017500000000000000000000000023433 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/rally-jobs/plugins/README.rst0000644000175000017500000000060700000000000025125 0ustar00jamespagejamespage00000000000000Rally plugins ============= All \*.py modules from this directory will be auto-loaded by Rally and all plugins will be discoverable. There is no need of any extra configuration and there is no difference between writing them here and in rally code base. Note that it is better to push all interesting and useful benchmarks to Rally code base, this simplifies administration for Operators. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/rally-jobs/plugins/__init__.py0000644000175000017500000000000000000000000025532 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.782714 networking-odl-16.0.0.0b2.dev1/releasenotes/0000755000175000017500000000000000000000000022365 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.830714 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/0000755000175000017500000000000000000000000023515 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/.placeholder0000644000175000017500000000000000000000000025766 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/add-analyze-journal-cmd-189eae2cac4d60a5.yaml0000644000175000017500000000053100000000000033351 0ustar00jamespagejamespage00000000000000--- prelude: > Add command line tool to analyze logs features: - This tool can be used to analyze logs and determine the journal's operation efficiency. The tool tracks a journal entry's recording and processing to determine how much time it took since a journal entry was recorded until it was processed and sent to ODL. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/add-beryllium-sr4-7eced33ec292bcc8.yaml0000644000175000017500000000037500000000000032306 0ustar00jamespagejamespage00000000000000--- prelude: > Add ODL Beryllium SR4 release definition. features: - Add OpenDaylight Beryllium SR4 release and Beryllium 0.4.5 snapshot definition and remove Beryllium 0.4.4 snapshot as OpenDaylight Beryllium 0.4.4 SR4 has been released. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/add-host-config-8fb45d7f9732a795.yaml0000644000175000017500000000063500000000000031535 0ustar00jamespagejamespage00000000000000--- prelude: > Host Configuration data population from agentless OpenDayLight. features: - This configuration is used to get the information about physical host type and other config data like supported vnic types stored in ovsdb. Networking-odl can fetch this info from OpenDaylight via REST API request and feed agents_db table in neutron, which will be used by neutron scheduler. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/bgpvpn-driver-v2-36c0772d510587f4.yaml0000644000175000017500000000066000000000000031516 0ustar00jamespagejamespage00000000000000--- prelude: > BGPVPN Version 2 Driver for OpenDaylight. features: - | A new version of BGPVPN driver that integrate OpenStack Neutron BGPVPN API with OpenDaylight backend. It supports CRUD operations for BGPVPN and enables networks and routers to be associated to such BGPVPNs. This driver uses journaling mechanism, unlike v1 driver, which will first log the operation in journal table before execution. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/bgpvpn-vni-support-0804d0c0789cd1db.yaml0000644000175000017500000000112200000000000032403 0ustar00jamespagejamespage00000000000000--- prelude: > Support for bgpvpn-vni. features: - | BGPVPN OpenDaylight v2 driver will be enhanced to support bgpvpn-vni extension. Bgpvpn VNI resource represents the VNI to use on VXLAN encapsulated packets transferred to or from ODL managed computes themselves or for traffic from ODL-managed computers towards the DC-Gateway. Acceptance and realisation of the vni attribute in a bgpvpn is available in OpenDaylight(ODL) controller from Neon release of ODL other: - OpenDaylight changes are available at https://git.opendaylight.org/gerrit/#/c/63405/ ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=networking-odl-16.0.0.0b2.dev1/releasenotes/notes/delete-completed-rows-immediately-d3aee2ff5278b3f4.yaml 22 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/delete-completed-rows-immediately-d3aee2ff5278b3f40000644000175000017500000000035600000000000034533 0ustar00jamespagejamespage00000000000000--- prelude: > Completed rows are deleted by default. upgrade: - Completed rows will now be immediately deleted upon completion. To retain the completed rows, set the completed_rows_retention configuration value explicitly. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/deprecate-qos-driver-v1-96bce9842413700b.yaml0000644000175000017500000000016600000000000033033 0ustar00jamespagejamespage00000000000000--- deprecations: - The QoS V1 driver is deprecated in the Pike cycle and will be removed in the Queens release.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/deprecate-v1-0dd4f07c68a4a0a4.yaml0000644000175000017500000000110200000000000031135 0ustar00jamespagejamespage00000000000000--- prelude: > V1 drivers are marked depracated beginning with Queens cycle, to be removed in Rocky cycle. deprecations: - The V1 drivers are not actively maintained by the networking-odl team for a few cycles already and aren't guaranteed to even work. As such, the networking-odl team has decided that the drivers will be marked as deprecated beginning Queens cycle, and removed in the beginning of the Rocky cycle. If you're still using the V1 drivers, please switch to using the V2 drivers by updating the appropriate configuration values. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/deprecate_ceilometer-0d2830fa1fc6ba4e.yaml0000644000175000017500000000066700000000000033117 0ustar00jamespagejamespage00000000000000--- prelude: > Ceilometer is marked as deprecated beginning with Stein cycle to be removed in release T deprecations: - | Ceilometer driver should not be part of networking-odl because it forces a hard dependency with ceilometer into the project which does not really make sense. As such, it was decided the driver will be marked as deprecated beginning with Stein cycle and removed in the beginning of release T ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/devstack-default-driver-v2-6ae6ce789b4a6cc9.yaml0000644000175000017500000000040600000000000034040 0ustar00jamespagejamespage00000000000000--- prelude: > Changed devstack default to V2 driver. other: - Starting with Ocata, Devstack will use V2 drivers (where available) by default. To force the use of V1 architecture drivers you can specify 'ODL_V2DRIVER=False' in the local.conf file. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/drop-py27-support-3bc8094e1823cfcf.yaml0000644000175000017500000000033200000000000032153 0ustar00jamespagejamespage00000000000000--- upgrade: - | Python 2.7 support has been dropped. Last release of networking-odl to support python 2.7 is OpenStack Train. The minimum version of Python now supported by networking-odl is Python 3.6. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/fix-sfc-full-sync-4eafe97d27b8b33e.yaml0000644000175000017500000000007100000000000032236 0ustar00jamespagejamespage00000000000000--- fixes: - Fixes full sync errors with SFCv2 driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/fix-sfcv2-urlpath-f339357bed1a538c.yaml0000644000175000017500000000010600000000000032103 0ustar00jamespagejamespage00000000000000--- fixes: - | Fixes ODL Neutron NB URL path for SFC v2 Driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/fix-tls-websocket-3bee50093c3e90cf.yaml0000644000175000017500000000013200000000000032241 0ustar00jamespagejamespage00000000000000--- fixes: - | Fixes using TLS secured websocket when HTTPS is used in ML2 ODL URL. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/fix-ws-ssl-timeout-e16cd41779c05d42.yaml0000644000175000017500000000034600000000000032242 0ustar00jamespagejamespage00000000000000--- fixes: - | Fixes an issue with SSL websocket connections where a read timeout was causing the client to close the connection. Read timeout is normal when no port status update is being sent by the server (ODL). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/flat-network-support-7c032aabc21902b1.yaml0000644000175000017500000000024100000000000032711 0ustar00jamespagejamespage00000000000000--- prelude: > Added FLAT type networks support. features: - In addition to existing supported types, networks of type FLAT can be also used with ODL. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/full-sync-f6b7ec1bd9ea0e52.yaml0000644000175000017500000000110500000000000030746 0ustar00jamespagejamespage00000000000000--- prelude: > Full sync supports and ODL controller with no Neutron resources on it. This support is for the V2 driver, as V1 driver already supports this. features: - The full sync process looks for a "canary" network on the ODL controller side. If such a network is found, it doesn't do anything. If the network is missing then all the neutron resources are re-created on ODL. This supports cases when ODL controller comes online with no Neutron resources on it (also referred to as "cold reboot", but can happen on various cases). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/functional-test-b0855d6f1d85da30.yaml0000644000175000017500000000057100000000000031736 0ustar00jamespagejamespage00000000000000--- prelude: > The new class of test cases, functional test, has been added. So was help scripts to setup necessary environment. other: - The functional tests were added. It's new class of test cases, which requires pre-configured environment. Environment to run such tests can be configured by tool in networking-odl/tools.configure_for_func_testing.sh ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/ignore_agent_aliveness-935a1aa8c285dfa2.yaml0000644000175000017500000000137400000000000033411 0ustar00jamespagejamespage00000000000000--- prelude: > Agent aliveness will be ignored during port binding. other: - | During scale tests we saw that neutron agent aliveness mechanism is not working properly and was marking agents as down and thus failing on port binding. We assessed that aliveness in our context is not actually interesting, as we only use the agentdb mechanism to store the information we need for port binding. As a result of this assessment we decided to remove the aliveness awareness from the code and try to bind the port disregarding that. The consequence of this is that a "neutron agent-list" call might show the agent as dead or alive but that's not information we should depend on to understand if we are binding to that node. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/journal-recovery-88e583ad2db22bcc.yaml0000644000175000017500000000047400000000000032267 0ustar00jamespagejamespage00000000000000--- prelude: > Journal recovery for the V2 driver handles failed journal entries. features: - The journal recovery mechanism handles failed journal entries by inspecting ODL and deciding on the correct course of action. This support should be sufficient for the majority of entry failures. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/l2gw-driver-v2-b32aacf882ed446c.yaml0000644000175000017500000000061600000000000031450 0ustar00jamespagejamespage00000000000000--- prelude: > L2Gateway Driver v2 or networking-odl. features: - | A new version of L2Gateway driver that integrate OpenStack neutron L2Gateway API with OpenDaylight backend. It supports CRUD operations for l2gateway and l2gateway_connection. This driver uses journalling mechanism, unlike v1 driver, which will first log the operation in journal table before execution. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/lbaas-driver-v2-46bf34992f4785d1.yaml0000644000175000017500000000105500000000000031377 0ustar00jamespagejamespage00000000000000--- prelude: > Complement the implementation of odl lbaas driver_v2. features: - Complement the implementation of odl lbaas driver_v2. It supports CRUD operations for loadbalancer, listener, pool, member and healthmonitor. fixes: - Includes the following bug fixes Bug 1640076 - Using odl lbaas driver_v2 to create listener failed. Bug 1633030 - Using odl lbaas driver_v2 to create loadbalancer failed. Bug 1613583 - Odl lbaas driver_v2 Line 61 url_path error. Bug 1613583 - Using ODL lbaas driver_v2 to create member failed.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/maintenance-thread-e54c3b4bd7c03546.yaml0000644000175000017500000000103000000000000032333 0ustar00jamespagejamespage00000000000000--- prelude: > Maintenance thread for the V2 driver. features: - The maintenance thread was introduced in the V2 driver in order to perform various journal maintenance tasks, such as * Stale lock release * Completed entry cleanup * Full sync * Journal recovery The thread runs in a configurable interval and is HA safe so at most one will be executing regardless of how many threads are running concurrently. upgrade: - Maintenace lock table was added to synchronize multiple threads. ././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=networking-odl-16.0.0.0b2.dev1/releasenotes/notes/make-ceilometer-dependency-optional-fb0407dd2d367599.yaml 22 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/make-ceilometer-dependency-optional-fb0407dd2d36750000644000175000017500000000074700000000000034435 0ustar00jamespagejamespage00000000000000--- prelude: > Ceilometer becomes an optional dependency features: - | If a user wants to utilize the ceilometer driver when installing networking-odl, the user should install networking-odl with the following syntax ==> networking-odl[ceilometer] In case, that driver is not needed, then only networking-odl should be used This makes sense because ceilometer driver is only loaded when neutron cfg requires network.statistics.driver and not by default ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/make_sync_timeout_float-490072005e3f3413.yaml0000644000175000017500000000045200000000000033207 0ustar00jamespagejamespage00000000000000--- prelude: > The config parameter sync_timeout sometimes uses the fraction value and because it is set to Intopt, that use of fraction value may end up in error. upgrade: - | Making config paramter sync_timeout of FloatOpt type to allow use of fraction values for timeouts. ././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=networking-odl-16.0.0.0b2.dev1/releasenotes/notes/network-statistics-from-opendaylight-057a6b3c30626527.yaml 22 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/network-statistics-from-opendaylight-057a6b3c306260000644000175000017500000000032700000000000034502 0ustar00jamespagejamespage00000000000000--- prelude: > Network Statistics From OpenDaylight. features: - Add a ceilometer driver to collect network statistics information using REST APIs exposed by network-statistics module in OpenDaylight. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/neutron-dhcp-port-dcbc3a1008f45cc2.yaml0000644000175000017500000000121300000000000032323 0ustar00jamespagejamespage00000000000000--- prelude: > Allocate a neutron port for each subnet to service DHCP requests within OpenDaylight controller DHCP service. features: - | The feature is to be enabled only for ml2 mechanism V2 Driver, when config parameter enable_dhcp_service is set to True in ml2_conf.ini. Creates a new DHCP Neutron port to be serviced by OpenDaylight Netvirt when a Subnet is created or updated with enable-dhcp parameter. The allocated port is to be removed when the Subnet is deleted or updated with disbale-dhcp parameter. The port is identifed with device-id as OpenDaylight- and device-owner as network:dhcp. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/new-netvirt-default-0eccc77d3cb54484.yaml0000644000175000017500000000066400000000000032611 0ustar00jamespagejamespage00000000000000--- prelude: > The default setting for OpenDayligut openstack service provider was changed from ovsdb netvirt (odl-ovsdb-openstack) to new netvirt(odl-netvirt-openstack) for OpenDaylight Boron/Carbon or later. other: - With devstack by default with OpenDaylight after Boron version, new netvirt openstack service provider(odl-netvirt-openstack) is used instead of legacy netvirt(odl-ovsdb-openstack). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/nuke-lbaasv1-driver-fce366522350fe21.yaml0000644000175000017500000000035400000000000032316 0ustar00jamespagejamespage00000000000000--- prelude: > Remove LbaaS v1 driver, as LbaaS removed v1 API. upgrade: - Upgrade to use LBaaS v2 driver and migrate to use LBaaS v2 driver. deprecations: - LBaaS v1 API driver for ODL is removed. * LBaaS v2 API driver ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/odl-feature-negotiation-ece3201a6e9f8f74.yaml0000644000175000017500000000234500000000000033436 0ustar00jamespagejamespage00000000000000--- prelude: > OpenDaylight feature negotiation allows for networking_odl to adapt its behavior to the features supported by the specific ODL version. features: - Networking-odl first attempts to read the ODL features from the odl_features config value. If this config value is not present, networking-odl requests the features from ODL via REST call. Note that this occurs during the plugin initialize and if ODL is unreachable networking-odl will keep trying until successful, essentially blocking networking-odl initialization (and functionality) until successful. As such, it is recommended that in production environments you manually configure the odl_features config value. If you are not sure which features your ODL supports, please consult the ODL documentation or you can retrieve the list like this, $ curl -u : http://:8080/restconf/operational/neutron:neutron/features | python -mjson.tool Note that the features returned in the json have a namespace which should be omitted from the config value. So, if you got to features, say neutron-extensions:feature1 and neutron-extensions:feature2, the config file should have, odl_features=feature1,feature2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/odl-l3-flavor-f093e6c0fb4e9dd8.yaml0000644000175000017500000000102600000000000031350 0ustar00jamespagejamespage00000000000000--- prelude: > OpenStack neutron allows L3 flavors to enable multiple L3 backends in the same cloud. This is ODL L3 flavor driver to implement L3 flavors for OpenStack Neutron and OpenDaylight integration. features: - | L3 flavor driver to implement L3 resource operation callbacks related to router and floating ip create delete and update. upgrade: - | The configuration upgrades are required to enable l3 flavors, service_providers should be added to neutron.conf based on flavor used. ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=networking-odl-16.0.0.0b2.dev1/releasenotes/notes/odl_features-option-type-change-367385ae7d1e949e.yaml 22 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/odl_features-option-type-change-367385ae7d1e949e.y0000644000175000017500000000117600000000000034257 0ustar00jamespagejamespage00000000000000--- prelude: > The config option odl_features_json has been added to allow specifying features in the same format ODL returns during negotiation. features: - | The odl_features_json option accepts a JSON compatible with the JSON response from ODL's API for retrieving features ("/restconf/operational/neutron:neutron/features"). If this option is configured, networking_odl will not query ODL for its feature support and will instead use the configured value. If odl_features and odl_features_json are both specified, odl_features_json will take precedence and odl_features will not be used at all. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/ovs_hardware_offload_support-38d2b0b7386b8ca7.yaml0000644000175000017500000000045300000000000034574 0ustar00jamespagejamespage00000000000000--- features: - The ``opendaylight`` mechanism driver now supports hardware offload via SR-IOV. It allows binding direct (SR-IOV) ports. Using ``openvswitch`` 2.8.0 and 'Linux Kernel' 4.12 allows to control the SR-IOV VF via OpenFlow control plane and gain accelerated 'Open vSwitch'. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/port-binding-default-b5f24ad350b47eb0.yaml0000644000175000017500000000117100000000000032700 0ustar00jamespagejamespage00000000000000--- prelude: > Change the default value of port_binding_controller from network-topology to pseudo-agentdb-binding as networking-topology will be deprecated. upgrade: - pseudo-agentdb-binding is supported by the version of OpenDaylight Boron(0.5.x) or later. So for the version of OpenDaylight Beryllium or earlier, the option, port_binding_controller, needs to be explicitly configured to be legacy-port-binding or network-topology(deprecated). deprecations: - port binding controller, network-topology, is deprecated with OpenStack Ocata and will be removed in future openstack version. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/pseudo-agent-port-binding-0a3d1d193b99293e.yaml0000644000175000017500000000106600000000000033527 0ustar00jamespagejamespage00000000000000--- prelude: > Agentless Port binding controller using agentdb for persistency with ODL provided host configuration. features: - Reads host configuration from ODL using a REST/get and stores the information in Neutron agentdb for persistency. This host configuration is read back from agentdb and applied during port binding. Without this feature several out-of-sync race conditions were caused due to incorrect host information. fixes: - Includes the following bug fixes Bug 1608659 - pseudo_agentdb_binding AttributeError. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/qos-driver-v1-711698186ca693c4.yaml0000644000175000017500000000051300000000000031030 0ustar00jamespagejamespage00000000000000--- prelude: > QoS Driver V1 for networking-odl. features: - A new driver to integrate OpenStack neutron QoS API with OpenDayLight backend. It supports CRUD operations for QoS policy and its associated rules. The QoS driver in tree is of version v1, which does not log the operation request in journal table. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/qos-driver-v2-4c869a6f0b8e3a4d.yaml0000644000175000017500000000045400000000000031327 0ustar00jamespagejamespage00000000000000--- prelude: > QoS Driver V2 for networking-odl features: - A new version of QoS driver that integrate OpenStack neutron QoS API with OpenDaylight backend. This driver uses journaling mechanism unlike v1 driver, which will first log the operation in journal table before execution. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/remove-network-topology-67daff08f3d6ff14.yaml0000644000175000017500000000062000000000000033624 0ustar00jamespagejamespage00000000000000--- prelude: > Eliminate network topology based port binding upgrade: - If network topology based port binding, network-topology, is used, migrate to pseodu agent based port binding, pseudo-agentdb-binding. deprecations: - network topology based port binding was removed. So is network-topology value for port_binding_controllers. Migrate pseudo-agentdb-binding port binding. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/remove-neutron-lbaas-6afe0b0f7b61290a.yaml0000644000175000017500000000036000000000000032730 0ustar00jamespagejamespage00000000000000--- deprecations: - | Neutron-lbaas is retired in Train cycle, so all the related dependencies must be removed from networking-odl, for details see: http://lists.openstack.org/pipermail/openstack-discuss/2019-May/006158.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/remove-v1-driver-df408f9916fc5e5d.yaml0000644000175000017500000000070300000000000032031 0ustar00jamespagejamespage00000000000000--- prelude: > The v1 drivers, which were deprecated in the Queens cycle, are removed. All existing usages should be updated to use the v2 drivers. upgrade: - | If you've been using v1 drivers, update your configuration to use the v2 drivers. Otherwise, neutron won't boot properly if v1 drivers are still used. critical: - | The v1 drivers are removed. If you're still using v1 drivers, migrate to use the v2 drivers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/remove_qos_driver_v1-2bfbf1f979082b07.yaml0000644000175000017500000000050100000000000032757 0ustar00jamespagejamespage00000000000000--- prelude: > As the QoS v2 driver adapted new framework from OpenStack neutron's qos driver framework, QoS v1 driver using notification_drivers is no longer needed. upgrade: - Removing QoS V1 driver which is using deprecated notification driver framework from OpenStack Neutron's QoS driver base. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130284.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/reno.cache0000644000175000017500000000075600000000000025455 0ustar00jamespagejamespage00000000000000--- file-contents: releasenotes/notes/drop-py27-support-3bc8094e1823cfcf.yaml: upgrade: - 'Python 2.7 support has been dropped. Last release of networking-odl to support python 2.7 is OpenStack Train. The minimum version of Python now supported by networking-odl is Python 3.6. ' notes: - files: - - releasenotes/notes/drop-py27-support-3bc8094e1823cfcf.yaml - !!binary | MzU4ZGE4NjIzYWQ4Y2I4ZGY0MGYyYTkzY2VhNjRjM2NlNjYzMDVkYg== version: 16.0.0.0b1 ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=networking-odl-16.0.0.0b2.dev1/releasenotes/notes/set-ovs-hostconfig-flat-default-a3c189858304e2ed.yaml 22 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/set-ovs-hostconfig-flat-default-a3c189858304e2ed.y0000644000175000017500000000077300000000000034167 0ustar00jamespagejamespage00000000000000--- prelude: > update the default value of supported network type for ovs-set-hostconfig. enable 'flat' by default 'flat' type wasn't enabled because legacy netvirt doesn't support it. Now new netvirt is introduced to deprecate legacy netvirt and New netvirt supports flat. So update default value for network type to reflect it. upgrade: - If you're still using legacy netvirt, you need to disable flat network type explicitly when issuing set-ovs-hostconfig command. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/sfc-driver-v1-d11fd5fd17114f2c.yaml0000644000175000017500000000056500000000000031264 0ustar00jamespagejamespage00000000000000--- prelude: > Networking SFC V1 driver for networking-odl. features: - First version of the driver to support networking-sfc API through OpenDaylight controller. This driver support CRUD operation for flow classifier, port-pair, port-pair-group and port-pair-chain. This is version 1 driver and does not support the journal based implementation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/sfc-driver-v2-9378b0db810b6fcb.yaml0000644000175000017500000000130500000000000031265 0ustar00jamespagejamespage00000000000000--- prelude: > Networking SFC V2 driver for networking-odl. features: - Second version of the driver to support networking-sfc API through OpenDaylight controller. This driver support CRUD operation for flow classifier, port-pair, port-pair-group and port-pair-chain. This is version 2 driver and it does support the journal based implementation, where operations are committed in the data store first and then journal thread sycn it with OpenDaylight. This implementation guarantee the ordering of the CRUD events. networking-sfc ocata or later is required. https://review.opendev.org/#/c/363893/ is the corresponding patch of networking-sfc in Ocata cycle. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/trunk-drivers-3592691bdd08929e.yaml0000644000175000017500000000052200000000000031312 0ustar00jamespagejamespage00000000000000--- prelude: > Trunk Drivers v1 and v2 for networking-odl. features: - | A new driver to integrate OpenStack TrunkPort API with OpenDayLight backend. It supports CRUD operations for TrunkPorts. The version v2 driver will first log the call in journal table before execution. Version v1 driver doesn't log any calls. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/version-bump-16230eadac71cbb0.yaml0000644000175000017500000000030300000000000031351 0ustar00jamespagejamespage00000000000000--- prelude: > networking-odl adopts version number aligned with neutron from Pike release. The version number is bumped 11.x.x. other: - version is bumped to 11:pike from 4:ocata. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/vlan-transparency-63c153d310eacc5d.yaml0000644000175000017500000000145500000000000032334 0ustar00jamespagejamespage00000000000000--- prelude: > Support for vlan-transparency. features: - The extension `vlan-transparent` is supported for Newton release, unconditionally only vxlan is considered to support its extension independent of ODL openstack provider. It's future work to allow ODL openstack provider to report list of supported network types at start up statically. issues: - Currently only network type of VXLAN is statically considered to support vlan-transparent independently of OpenDaylight openstack provider. It should use capability report by OpenDaylight openstack provider statically instead of static hard code. other: - For details please read 'VLAN trunking networks for NFV '_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/notes/websocket-client-7c8117671aeea181.yaml0000644000175000017500000000034300000000000031777 0ustar00jamespagejamespage00000000000000--- prelude: > Websocket-client provides framework to create webscket clients for ODL. features: - Features include callback on new notifications and callback on reconnection which includes status information.././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.834714 networking-odl-16.0.0.0b2.dev1/releasenotes/source/0000755000175000017500000000000000000000000023665 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.834714 networking-odl-16.0.0.0b2.dev1/releasenotes/source/_static/0000755000175000017500000000000000000000000025313 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/source/_static/.placeholder0000644000175000017500000000000000000000000027564 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.834714 networking-odl-16.0.0.0b2.dev1/releasenotes/source/_templates/0000755000175000017500000000000000000000000026022 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/source/_templates/.placeholder0000644000175000017500000000000000000000000030273 0ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/source/conf.py0000644000175000017500000002064200000000000025170 0ustar00jamespagejamespage00000000000000# -*- coding: utf-8 -*- # # Networking OpenDaylight Release Notes documentation build configuration file, created by # sphinx-quickstart on Fri Jul 22 14:54:21 2016. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'openstackdocstheme', 'reno.sphinxext', ] # openstackdocstheme options repository_name = 'openstack/networking-odl' bug_project = 'networking-odl' bug_tag = 'doc' # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Networking OpenDaylight Release Notes' copyright = u'2016, networking-odl developers' # Release notes are version independent. # The full version, including alpha/beta/rc tags. release = '' # The short X.Y version. version = '' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'openstackdocs' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = '%Y-%m-%d %H:%M' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'NetworkingOpenDaylightReleaseNotesdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'NetworkingOpenDaylightReleaseNotes.tex', u'Networking OpenDaylight Release Notes Documentation', u'networking-odl developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'networkingopendaylightreleasenotes', u'Networking OpenDaylight Release Notes Documentation', [u'networking-odl developers'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'NetworkingOpenDaylightReleaseNotes', u'Networking OpenDaylight Release Notes Documentation', u'networking-odl developers', 'NetworkingOpenDaylightReleaseNotes', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False # -- Options for Internationalization output ------------------------------ locale_dirs = ['locale/'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/source/index.rst0000644000175000017500000000076400000000000025535 0ustar00jamespagejamespage00000000000000.. Networking OpenDaylight Release Notes documentation master file, created by sphinx-quickstart on Fri Jul 22 14:54:21 2016. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. Welcome to Networking OpenDaylight Release Notes's documentation! ================================================================= Contents: .. toctree:: :maxdepth: 2 unreleased train stein rocky queens pike ocata newton ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.782714 networking-odl-16.0.0.0b2.dev1/releasenotes/source/locale/0000755000175000017500000000000000000000000025124 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.782714 networking-odl-16.0.0.0b2.dev1/releasenotes/source/locale/en_GB/0000755000175000017500000000000000000000000026076 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.834714 networking-odl-16.0.0.0b2.dev1/releasenotes/source/locale/en_GB/LC_MESSAGES/0000755000175000017500000000000000000000000027663 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po0000644000175000017500000006577400000000000032737 0ustar00jamespagejamespage00000000000000# Andi Chandler , 2017. #zanata msgid "" msgstr "" "Project-Id-Version: Networking OpenDaylight Release Notes\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2018-03-07 20:20+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2017-12-13 01:16+0000\n" "Last-Translator: Andi Chandler \n" "Language-Team: English (United Kingdom)\n" "Language: en_GB\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" msgid "11.0.0" msgstr "11.0.0" msgid "3.0.0" msgstr "3.0.0" msgid "3.1.0" msgstr "3.1.0" msgid "4.0.0" msgstr "4.0.0" msgid "" "A new driver to integrate OpenStack TrunkPort API with OpenDayLight backend. " "It supports CRUD operations for TrunkPorts. The version v2 driver will first " "log the call in journal table before execution. Version v1 driver doesn't " "log any calls." msgstr "" "A new driver to integrate OpenStack TrunkPort API with OpenDayLight backend. " "It supports CRUD operations for TrunkPorts. The version v2 driver will first " "log the call in journal table before execution. Version v1 driver doesn't " "log any calls." msgid "" "A new driver to integrate OpenStack neutron QoS API with OpenDayLight " "backend. It supports CRUD operations for QoS policy and its associated " "rules. The QoS driver in tree is of version v1, which does not log the " "operation request in journal table." msgstr "" "A new driver to integrate OpenStack neutron QoS API with OpenDayLight " "backend. It supports CRUD operations for QoS policy and its associated " "rules. The QoS driver in tree is of version v1, which does not log the " "operation request in journal table." msgid "" "A new version of BGPVPN driver that integrate OpenStack Neutron BGPVPN API " "with OpenDaylight backend. It supports CRUD operations for BGPVPN and " "enables networks and routers to be associated to such BGPVPNs. This driver " "uses journaling mechanism, unlike v1 driver, which will first log the " "operation in journal table before execution." msgstr "" "A new version of BGPVPN driver that integrate OpenStack Neutron BGPVPN API " "with OpenDaylight backend. It supports CRUD operations for BGPVPN and " "enables networks and routers to be associated to such BGPVPNs. This driver " "uses journaling mechanism, unlike v1 driver, which will first log the " "operation in journal table before execution." msgid "" "A new version of L2Gateway driver that integrate OpenStack neutron L2Gateway " "API with OpenDaylight backend. It supports CRUD operations for l2gateway and " "l2gateway_connection. This driver uses journalling mechanism, unlike v1 " "driver, which will first log the operation in journal table before execution." msgstr "" "A new version of L2Gateway driver that integrate OpenStack neutron L2Gateway " "API with OpenDaylight backend. It supports CRUD operations for l2gateway and " "l2gateway_connection. This driver uses journalling mechanism, unlike v1 " "driver, which will first log the operation in journal table before execution." msgid "" "A new version of QoS driver that integrate OpenStack neutron QoS API with " "OpenDaylight backend. This driver uses journaling mechanism unlike v1 " "driver, which will first log the operation in journal table before execution." msgstr "" "A new version of QoS driver that integrate OpenStack neutron QoS API with " "OpenDaylight backend. This driver uses journaling mechanism unlike v1 " "driver, which will first log the operation in journal table before execution." msgid "Add ODL Beryllium SR4 release definition." msgstr "Add ODL Beryllium SR4 release definition." msgid "" "Add OpenDaylight Beryllium SR4 release and Beryllium 0.4.5 snapshot " "definition and remove Beryllium 0.4.4 snapshot as OpenDaylight Beryllium " "0.4.4 SR4 has been released." msgstr "" "Add OpenDaylight Beryllium SR4 release and Beryllium 0.4.5 snapshot " "definition and remove Beryllium 0.4.4 snapshot as OpenDaylight Beryllium " "0.4.4 SR4 has been released." msgid "" "Add a ceilometer driver to collect network statistics information using REST " "APIs exposed by network-statistics module in OpenDaylight." msgstr "" "Add a Ceilometer driver to collect network statistics information using REST " "APIs exposed by network-statistics module in OpenDaylight." msgid "Added FLAT type networks support." msgstr "Added FLAT type networks support." msgid "" "Agentless Port binding controller using agentdb for persistency with ODL " "provided host configuration." msgstr "" "Agentless Port binding controller using agentdb for persistence with ODL " "provided host configuration." msgid "" "Allocate a neutron port for each subnet to service DHCP requests within " "OpenDaylight controller DHCP service." msgstr "" "Allocate a Neutron port for each subnet to service DHCP requests within " "OpenDaylight controller DHCP service." msgid "" "As the QoS v2 driver adapted new framework from OpenStack neutron's qos " "driver framework, QoS v1 driver using notification_drivers is no longer " "needed." msgstr "" "As the QoS v2 driver adapted new framework from OpenStack Neutron's QoS " "driver framework, QoS v1 driver using notification_drivers is no longer " "needed." msgid "BGPVPN Version 2 Driver for OpenDaylight." msgstr "BGPVPN Version 2 Driver for OpenDaylight." msgid "Bug Fixes" msgstr "Bug Fixes" msgid "" "Change the default value of port_binding_controller from network-topology to " "pseudo-agentdb-binding as networking-topology will be deprecated." msgstr "" "Change the default value of port_binding_controller from network-topology to " "pseudo-agentdb-binding as networking-topology will be deprecated." msgid "Changed devstack default to V2 driver." msgstr "Changed devstack default to V2 driver." msgid "Complement the implementation of odl lbaas driver_v2." msgstr "Complement the implementation of ODL LBaaS driver_v2." msgid "" "Complement the implementation of odl lbaas driver_v2. It supports CRUD " "operations for loadbalancer, listener, pool, member and healthmonitor." msgstr "" "Complement the implementation of ODL LBaaS driver_v2. It supports CRUD " "operations for load balancer, listener, pool, member and health monitor." msgid "Completed rows are deleted by default." msgstr "Completed rows are deleted by default." msgid "" "Completed rows will now be immediately deleted upon completion. To retain " "the completed rows, set the completed_rows_retention configuration value " "explicitly." msgstr "" "Completed rows will now be immediately deleted upon completion. To retain " "the completed rows, set the completed_rows_retention configuration value " "explicitly." msgid "Contents:" msgstr "Contents:" msgid "Current Series Release Notes" msgstr "Current Series Release Notes" msgid "" "Currently only network type of VXLAN is statically considered to support " "vlan-transparent independently of OpenDaylight openstack provider. It should " "use capability report by OpenDaylight openstack provider statically instead " "of static hard code." msgstr "" "Currently only network type of VXLAN is statically considered to support " "vlan-transparent independently of OpenDaylight openstack provider. It should " "use capability report by OpenDaylight openstack provider statically instead " "of static hard code." msgid "Deprecation Notes" msgstr "Deprecation Notes" msgid "Eliminate network topology based port binding" msgstr "Eliminate network topology based port binding" msgid "" "Features include callback on new notifications and callback on reconnection " "which includes status information." msgstr "" "Features include callback on new notifications and callback on reconnection " "which includes status information." msgid "" "First version of the driver to support networking-sfc API through " "OpenDaylight controller. This driver support CRUD operation for flow " "classifier, port-pair, port-pair-group and port-pair-chain. This is version " "1 driver and does not support the journal based implementation." msgstr "" "First version of the driver to support networking-sfc API through " "OpenDaylight controller. This driver support CRUD operation for flow " "classifier, port-pair, port-pair-group and port-pair-chain. This is version " "1 driver and does not support the journal based implementation." msgid "Fixes ODL Neutron NB URL path for SFC v2 Driver." msgstr "Fixes ODL Neutron NB URL path for SFC v2 Driver." msgid "Fixes full sync errors with SFCv2 driver." msgstr "Fixes full sync errors with SFCv2 driver." msgid "" "For details please read 'VLAN trunking networks for NFV '_." msgstr "" "For details please read 'VLAN trunking networks for NFV '_." msgid "" "Full sync supports and ODL controller with no Neutron resources on it. This " "support is for the V2 driver, as V1 driver already supports this." msgstr "" "Full sync supports an ODL controller with no Neutron resources on it. This " "support is for the V2 driver, as V1 driver already supports this." msgid "Host Configuration data population from agentless OpenDayLight." msgstr "Host Configuration data populated from agentless OpenDayLight." msgid "" "If network topology based port binding, network-topology, is used, migrate " "to pseodu agent based port binding, pseudo-agentdb-binding." msgstr "" "If network topology based port binding, network-topology, is used, migrate " "to pseudo agent based port binding, pseudo-agentdb-binding." msgid "" "If you're still using legacy netvirt, you need to disable flat network type " "explicitly when issuing set-ovs-hostconfig command." msgstr "" "If you're still using legacy netvirt, you need to disable flat network type " "explicitly when issuing set-ovs-hostconfig command." msgid "" "In addition to existing supported types, networks of type FLAT can be also " "used with ODL." msgstr "" "In addition to existing supported types, networks of type FLAT can be also " "used with ODL." msgid "" "Includes the following bug fixes Bug 1608659 - pseudo_agentdb_binding " "AttributeError." msgstr "" "Includes the following bug fixes Bug 1608659 - pseudo_agentdb_binding " "AttributeError." msgid "Journal recovery for the V2 driver handles failed journal entries." msgstr "Journal recovery for the V2 driver handles failed journal entries." msgid "Known Issues" msgstr "Known Issues" msgid "L2Gateway Driver v2 or networking-odl." msgstr "L2Gateway Driver v2 or networking-odl." msgid "LBaaS v1 API driver for ODL is removed. * LBaaS v2 API driver" msgstr "LBaaS v1 API driver for ODL is removed. * LBaaS v2 API driver" msgid "Maintenace lock table was added to synchronize multiple threads." msgstr "Maintenance lock table was added to synchronise multiple threads." msgid "Maintenance thread for the V2 driver." msgstr "Maintenance thread for the V2 driver." msgid "Network Statistics From OpenDaylight." msgstr "Network Statistics From OpenDaylight." msgid "Networking SFC V1 driver for networking-odl." msgstr "Networking SFC V1 driver for networking-odl." msgid "Networking SFC V2 driver for networking-odl." msgstr "Networking SFC V2 driver for networking-odl." msgid "" "Networking-odl first attempts to read the ODL features from the odl_features " "config value. If this config value is not present, networking-odl requests " "the features from ODL via REST call. Note that this occurs during the plugin " "initialize and if ODL is unreachable networking-odl will keep trying until " "successful, essentially blocking networking-odl initialization (and " "functionality) until successful. As such, it is recommended that in " "production environments you manually configure the odl_features config " "value. If you are not sure which features your ODL supports, please consult " "the ODL documentation or you can retrieve the list like this, $ curl -u " ": http://:8080/restconf/operational/neutron:neutron/" "features | python -mjson.tool Note that the features returned in the json " "have a namespace which should be omitted from the config value. So, if you " "got to features, say neutron-extensions:feature1 and neutron-extensions:" "feature2, the config file should have, odl_features=feature1,feature2" msgstr "" "Networking-odl first attempts to read the ODL features from the odl_features " "config value. If this config value is not present, networking-odl requests " "the features from ODL via REST call. Note that this occurs during the plugin " "initialise and if ODL is unreachable networking-odl will keep trying until " "successful, essentially blocking networking-odl initialisation (and " "functionality) until successful. As such, it is recommended that in " "production environments you manually configure the odl_features config " "value. If you are not sure which features your ODL supports, please consult " "the ODL documentation or you can retrieve the list like this, $ curl -u " ": http://:8080/restconf/operational/neutron:neutron/" "features | python -mjson.tool Note that the features returned in the JSON " "have a namespace which should be omitted from the config value. So, if you " "got to features, say neutron-extensions:feature1 and neutron-extensions:" "feature2, the config file should have, odl_features=feature1,feature2" msgid "New Features" msgstr "New Features" msgid "Newton Series Release Notes" msgstr "Newton Series Release Notes" msgid "Ocata Series Release Notes" msgstr "Ocata Series Release Notes" msgid "" "OpenDaylight feature negotiation allows for networking_odl to adapt its " "behavior to the features supported by the specific ODL version." msgstr "" "OpenDaylight feature negotiation allows for networking_odl to adapt its " "behaviour to the features supported by the specific ODL version." msgid "Other Notes" msgstr "Other Notes" msgid "Pike Series Release Notes" msgstr "Pike Series Release Notes" msgid "Prelude" msgstr "Prelude" msgid "QoS Driver V1 for networking-odl." msgstr "QoS Driver V1 for networking-odl." msgid "QoS Driver V2 for networking-odl" msgstr "QoS Driver V2 for networking-odl" msgid "" "Reads host configuration from ODL using a REST/get and stores the " "information in Neutron agentdb for persistency. This host configuration is " "read back from agentdb and applied during port binding. Without this feature " "several out-of-sync race conditions were caused due to incorrect host " "information." msgstr "" "Reads host configuration from ODL using a REST/get and stores the " "information in Neutron agentdb for persistence. This host configuration is " "read back from agentdb and applied during port binding. Without this feature " "several out-of-sync race conditions were caused due to incorrect host " "information." msgid "Remove LbaaS v1 driver, as LbaaS removed v1 API." msgstr "Remove LBaaS v1 driver, as LBaaS removed v1 API." msgid "" "Removing QoS V1 driver which is using deprecated notification driver " "framework from OpenStack Neutron's QoS driver base." msgstr "" "Removing QoS V1 driver which is using deprecated notification driver " "framework from OpenStack Neutron's QoS driver base." msgid "" "Second version of the driver to support networking-sfc API through " "OpenDaylight controller. This driver support CRUD operation for flow " "classifier, port-pair, port-pair-group and port-pair-chain. This is version " "2 driver and it does support the journal based implementation, where " "operations are committed in the data store first and then journal thread " "sycn it with OpenDaylight. This implementation guarantee the ordering of the " "CRUD events. networking-sfc ocata or later is required. https://review." "openstack.org/#/c/363893/ is the corresponding patch of networking-sfc in " "Ocata cycle." msgstr "" "Second version of the driver to support networking-sfc API through " "OpenDaylight controller. This driver support CRUD operation for flow " "classifier, port-pair, port-pair-group and port-pair-chain. This is version " "2 driver and it does support the journal based implementation, where " "operations are committed in the data store first and then journal thread " "sycn it with OpenDaylight. This implementation guarantee the ordering of the " "CRUD events. networking-sfc ocata or later is required. https://review." "openstack.org/#/c/363893/ is the corresponding patch of networking-sfc in " "Ocata cycle." msgid "" "Starting with Ocata, Devstack will use V2 drivers (where available) by " "default. To force the use of V1 architecture drivers you can specify " "'ODL_V2DRIVER=False' in the local.conf file." msgstr "" "Starting with Ocata, Devstack will use V2 drivers (where available) by " "default. To force the use of V1 architecture drivers you can specify " "'ODL_V2DRIVER=False' in the local.conf file." msgid "Support for vlan-transparency." msgstr "Support for VLAN-transparency." msgid "" "The QoS V1 driver is deprecated in the Pike cycle and will be removed in the " "Queens release." msgstr "" "The QoS V1 driver is deprecated in the Pike cycle and will be removed in the " "Queens release." msgid "" "The V1 drivers are not actively maintained by the networking-odl team for a " "few cycles already and aren't guaranteed to even work. As such, the " "networking-odl team has decided that the drivers will be marked as " "deprecated beginning Queens cycle, and removed in the beginning of the Rocky " "cycle. If you're still using the V1 drivers, please switch to using the V2 " "drivers by updating the appropriate configuration values." msgstr "" "The V1 drivers are not actively maintained by the networking-odl team for a " "few cycles already and aren't guaranteed to even work. As such, the " "networking-odl team has decided that the drivers will be marked as " "deprecated beginning Queens cycle, and removed in the beginning of the Rocky " "cycle. If you're still using the V1 drivers, please switch to using the V2 " "drivers by updating the appropriate configuration values." msgid "" "The ``opendaylight`` mechanism driver now supports hardware offload via SR-" "IOV. It allows binding direct (SR-IOV) ports. Using ``openvswitch`` 2.8.0 " "and 'Linux Kernel' 4.12 allows to control the SR-IOV VF via OpenFlow control " "plane and gain accelerated 'Open vSwitch'." msgstr "" "The ``opendaylight`` mechanism driver now supports hardware offload via SR-" "IOV. It allows binding direct (SR-IOV) ports. Using ``openvswitch`` 2.8.0 " "and 'Linux Kernel' 4.12 allows to control the SR-IOV VF via OpenFlow control " "plane and gain accelerated 'Open vSwitch'." msgid "" "The default setting for OpenDayligut openstack service provider was changed " "from ovsdb netvirt (odl-ovsdb-openstack) to new netvirt(odl-netvirt-" "openstack) for OpenDaylight Boron/Carbon or later." msgstr "" "The default setting for OpenDayligut openstack service provider was changed " "from ovsdb netvirt (odl-ovsdb-openstack) to new netvirt(odl-netvirt-" "openstack) for OpenDaylight Boron/Carbon or later." msgid "" "The extension `vlan-transparent` is supported for Newton release, " "unconditionally only vxlan is considered to support its extension " "independent of ODL openstack provider. It's future work to allow ODL " "openstack provider to report list of supported network types at start up " "statically." msgstr "" "The extension `vlan-transparent` is supported for the Newton release. " "Unconditionally only VXLAN is considered to support its extension " "independent of ODL openstack provider. It's future work to allow ODL " "openstack provider to report list of supported network types at start up " "statically." msgid "" "The feature is to be enabled only for ml2 mechanism V2 Driver, when config " "parameter enable_dhcp_service is set to True in ml2_conf.ini. Creates a new " "DHCP Neutron port to be serviced by OpenDaylight Netvirt when a Subnet is " "created or updated with enable-dhcp parameter. The allocated port is to be " "removed when the Subnet is deleted or updated with disbale-dhcp parameter. " "The port is identifed with device-id as OpenDaylight- and device-" "owner as network:dhcp." msgstr "" "The feature is to be enabled only for ml2 mechanism V2 Driver, when config " "parameter enable_dhcp_service is set to True in ml2_conf.ini. It creates a " "new DHCP Neutron port to be serviced by OpenDaylight Netvirt when a Subnet " "is created or updated with enable-dhcp parameter. The allocated port is " "removed when the Subnet is deleted or updated with disbale-dhcp parameter. " "The port is identifed with device-id as OpenDaylight- and device-" "owner as network:dhcp." msgid "" "The full sync process looks for a \"canary\" network on the ODL controller " "side. If such a network is found, it doesn't do anything. If the network is " "missing then all the neutron resources are re-created on ODL. This supports " "cases when ODL controller comes online with no Neutron resources on it (also " "referred to as \"cold reboot\", but can happen on various cases)." msgstr "" "The full sync process looks for a \"canary\" network on the ODL controller " "side. If such a network is found, it doesn't do anything. If the network is " "missing then all the Neutron resources are re-created on ODL. This supports " "cases when the ODL controller comes online with no Neutron resources on it " "(also referred to as \"cold reboot\", but can happen in various cases)." msgid "" "The functional tests were added. It's new class of test cases, which " "requires pre-configured environment. Environment to run such tests can be " "configured by tool in networking-odl/tools.configure_for_func_testing.sh" msgstr "" "The functional tests were added. They are a new class of test cases, which " "requires pre-configured environment. The environment to run such tests can " "be configured by tool in networking-odl/tools.configure_for_func_testing.sh" msgid "" "The journal recovery mechanism handles failed journal entries by inspecting " "ODL and deciding on the correct course of action. This support should be " "sufficient for the majority of entry failures." msgstr "" "The journal recovery mechanism handles failed journal entries by inspecting " "ODL and deciding on the correct course of action. This support should be " "sufficient for the majority of entry failures." msgid "" "The maintenance thread was introduced in the V2 driver in order to perform " "various journal maintenance tasks, such as * Stale lock release * Completed " "entry cleanup * Full sync * Journal recovery The thread runs in a " "configurable interval and is HA safe so at most one will be executing " "regardless of how many threads are running concurrently." msgstr "" "The maintenance thread was introduced in the V2 driver in order to perform " "various journal maintenance tasks, such as * Stale lock release * Completed " "entry cleanup * Full sync * Journal recovery The thread runs in a " "configurable interval and is HA safe so at most one will be executing " "regardless of how many threads are running concurrently." msgid "" "The new class of test cases, functional test, has been added. So was help " "scripts to setup necessary environment." msgstr "" "The new class of test cases, functional test, has been added. So was help " "scripts to setup necessary environment." msgid "" "This configuration is used to get the information about physical host type " "and other config data like supported vnic types stored in ovsdb. Networking-" "odl can fetch this info from OpenDaylight via REST API request and feed " "agents_db table in neutron, which will be used by neutron scheduler." msgstr "" "This configuration is used to get the information about physical host type " "and other config data like supported VNIC types stored in ovsdb. Networking-" "odl can fetch this info from OpenDaylight via REST API request and feed " "agents_db table in neutron, which will be used by Neutron scheduler." msgid "Trunk Drivers v1 and v2 for networking-odl." msgstr "Trunk Drivers v1 and v2 for networking-odl." msgid "Upgrade Notes" msgstr "Upgrade Notes" msgid "Upgrade to use LBaaS v2 driver and migrate to use LBaaS v2 driver." msgstr "Upgrade to use LBaaS v2 driver and migrate to use LBaaS v2 driver." msgid "" "V1 drivers are marked depracated beginning with Queens cycle, to be removed " "in Rocky cycle." msgstr "" "V1 drivers are marked deprecated beginning with the Queens cycle, to be " "removed in Rocky cycle." msgid "Websocket-client provides framework to create webscket clients for ODL." msgstr "" "Websocket-client provides a framework to create websocket clients for ODL." msgid "Welcome to Networking OpenDaylight Release Notes's documentation!" msgstr "Welcome to Networking OpenDaylight release notes documentation!" msgid "" "With devstack by default with OpenDaylight after Boron version, new netvirt " "openstack service provider(odl-netvirt-openstack) is used instead of legacy " "netvirt(odl-ovsdb-openstack)." msgstr "" "With devstack by default with OpenDaylight after Boron version, new netvirt " "openstack service provider(odl-netvirt-openstack) is used instead of legacy " "netvirt(odl-ovsdb-openstack)." msgid "" "network topology based port binding was removed. So is network-topology " "value for port_binding_controllers. Migrate pseudo-agentdb-binding port " "binding." msgstr "" "network topology based port binding was removed. So is network-topology " "value for port_binding_controllers. Migrate pseudo-agentdb-binding port " "binding." msgid "" "networking-odl adopts version number aligned with neutron from Pike release. " "The version number is bumped 11.x.x." msgstr "" "networking-odl adopts version number aligned with neutron from Pike release. " "The version number is bumped 11.x.x." msgid "" "port binding controller, network-topology, is deprecated with OpenStack " "Ocata and will be removed in future openstack version." msgstr "" "port binding controller, network-topology, is deprecated with OpenStack " "Ocata and will be removed in future openstack version." msgid "" "pseudo-agentdb-binding is supported by the version of OpenDaylight Boron(0.5." "x) or later. So for the version of OpenDaylight Beryllium or earlier, the " "option, port_binding_controller, needs to be explicitly configured to be " "legacy-port-binding or network-topology(deprecated)." msgstr "" "pseudo-agentdb-binding is supported by the version of OpenDaylight Boron(0.5." "x) or later. So for the version of OpenDaylight Beryllium or earlier, the " "option, port_binding_controller, needs to be explicitly configured to be " "legacy-port-binding or network-topology(deprecated)." msgid "" "update the default value of supported network type for ovs-set-hostconfig. " "enable 'flat' by default 'flat' type wasn't enabled because legacy netvirt " "doesn't support it. Now new netvirt is introduced to deprecate legacy " "netvirt and New netvirt supports flat. So update default value for network " "type to reflect it." msgstr "" "update the default value of supported network type for ovs-set-hostconfig. " "enable 'flat' by default 'flat' type wasn't enabled because legacy netvirt " "doesn't support it. Now new netvirt is introduced to deprecate legacy " "netvirt and New netvirt supports flat. So update default value for network " "type to reflect it." msgid "version is bumped to 11:pike from 4:ocata." msgstr "version is bumped to 11:pike from 4:ocata." ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.782714 networking-odl-16.0.0.0b2.dev1/releasenotes/source/locale/fr/0000755000175000017500000000000000000000000025533 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.834714 networking-odl-16.0.0.0b2.dev1/releasenotes/source/locale/fr/LC_MESSAGES/0000755000175000017500000000000000000000000027320 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po0000644000175000017500000000145300000000000032354 0ustar00jamespagejamespage00000000000000# Gérald LONLAS , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: Networking OpenDaylight Release Notes\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2018-02-09 19:46+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-10-22 05:33+0000\n" "Last-Translator: Gérald LONLAS \n" "Language-Team: French\n" "Language: fr\n" "X-Generator: Zanata 3.9.6\n" "Plural-Forms: nplurals=2; plural=(n > 1)\n" msgid "Contents:" msgstr "Contenu :" msgid "Current Series Release Notes" msgstr "Note de la release actuelle" msgid "Welcome to Networking OpenDaylight Release Notes's documentation!" msgstr "" "Bienvenue dans la documentation de la note de Release de Networking " "OpenDaylight" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/source/newton.rst0000644000175000017500000000022300000000000025726 0ustar00jamespagejamespage00000000000000=================================== Newton Series Release Notes =================================== .. release-notes:: :branch: stable/newton ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/source/ocata.rst0000644000175000017500000000022100000000000025501 0ustar00jamespagejamespage00000000000000=================================== Ocata Series Release Notes =================================== .. release-notes:: :branch: stable/ocata ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/source/pike.rst0000644000175000017500000000021700000000000025347 0ustar00jamespagejamespage00000000000000=================================== Pike Series Release Notes =================================== .. release-notes:: :branch: stable/pike ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/source/queens.rst0000644000175000017500000000022300000000000025714 0ustar00jamespagejamespage00000000000000=================================== Queens Series Release Notes =================================== .. release-notes:: :branch: stable/queens ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/source/rocky.rst0000644000175000017500000000022100000000000025541 0ustar00jamespagejamespage00000000000000=================================== Rocky Series Release Notes =================================== .. release-notes:: :branch: stable/rocky ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/source/stein.rst0000644000175000017500000000022100000000000025534 0ustar00jamespagejamespage00000000000000=================================== Stein Series Release Notes =================================== .. release-notes:: :branch: stable/stein ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/source/train.rst0000644000175000017500000000017600000000000025540 0ustar00jamespagejamespage00000000000000========================== Train Series Release Notes ========================== .. release-notes:: :branch: stable/train ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/releasenotes/source/unreleased.rst0000644000175000017500000000015300000000000026545 0ustar00jamespagejamespage00000000000000============================ Current Series Release Notes ============================ .. release-notes:: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/requirements.txt0000644000175000017500000000170500000000000023163 0ustar00jamespagejamespage00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. pbr>=4.0.0 # Apache-2.0 Babel>=2.5.3 # BSD stevedore>=1.28.0 # Apache-2.0 debtcollector>=1.19.0 # Apache-2.0 neutron-lib>=2.0.0 # Apache-2.0 websocket-client>=0.47.0 # LGPLv2+ # OpenStack CI will install the following projects from git # if they are in the required-projects list for a job: neutron>=16.0.0.0b1 # Apache-2.0 networking-l2gw>=12.0.0 # Apache-2.0 networking-sfc>=10.0.0.0b1 # Apache-2.0 networking-bgpvpn>=10.0.0b1 # Apache-2.0 # The comment below indicates this project repo is current with neutron-lib # and should receive neutron-lib consumption patches as they are released # in neutron-lib. It also implies the project will stay current with TC # and infra initiatives ensuring consumption patches can land. # neutron-lib-current ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.782714 networking-odl-16.0.0.0b2.dev1/roles/0000755000175000017500000000000000000000000021020 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.834714 networking-odl-16.0.0.0b2.dev1/roles/show-odl-info/0000755000175000017500000000000000000000000023505 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/roles/show-odl-info/README.rst0000644000175000017500000000025100000000000025172 0ustar00jamespagejamespage00000000000000Prints ODL information that is useful for debugging **Role Variables** .. zuul:rolevar:: devstack_base_dir :default: /opt/stack The devstack base directory. ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.834714 networking-odl-16.0.0.0b2.dev1/roles/show-odl-info/defaults/0000755000175000017500000000000000000000000025314 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/roles/show-odl-info/defaults/main.yaml0000644000175000017500000000003600000000000027123 0ustar00jamespagejamespage00000000000000devstack_base_dir: /opt/stack ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.834714 networking-odl-16.0.0.0b2.dev1/roles/show-odl-info/tasks/0000755000175000017500000000000000000000000024632 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/roles/show-odl-info/tasks/main.yaml0000644000175000017500000000245600000000000026451 0ustar00jamespagejamespage00000000000000- name: Print interfaces information command: ip address become: true - name: Print link information command: ip link become: true - name: Print routing tables command: ip route become: true - name: Print a dump of ovsdb\'s database command: ovsdb-client dump become: true - name: Print overview of ovs-vswitchd\'s database command: ovs-vsctl show become: true - name: Retrieve ovs-vswitchd bridges command: ovs-vsctl list-br become: true register: ovs_bridges - include_tasks: ovs_flows.yaml with_items: "{{ ovs_bridges.stdout_lines }}" - name: Print openstack information shell: cmd: | source "{{ devstack_base_dir }}/devstack/openrc" admin admin echo "Openstack networks" openstack network list echo "Openstack ports" openstack port list echo "Openstack subnets" openstack subnet list echo "Openstack routers" openstack router list executable: /bin/bash chdir: "{{ zuul.project.src_dir }}" ignore_errors: true - name: Print ODL configuration command: curl --silent --user admin:admin "http://{{ inventory_hostname }}:8087/restconf/{{ item }}?prettyPrint=true" with_items: - config/neutron:neutron - config/opendaylight-inventory:nodes - config/elan:elan-instances - config/elan:elan-interfaces ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/roles/show-odl-info/tasks/ovs_flows.yaml0000644000175000017500000000167300000000000027546 0ustar00jamespagejamespage00000000000000- name: Print information on "{{ item }}" command: ovs-ofctl --protocols OpenFlow13 show "{{ item|quote }}" become: true - name: Print statistics for network devices associated with "{{ item }}" command: ovs-ofctl --protocols OpenFlow13 dump-ports "{{ item|quote }}" become: true - name: Print detailed information about network devices associated with "{{ item }}" command: ovs-ofctl --protocols OpenFlow13 dump-ports-desc "{{ item|quote }}" become: true - name: Print all flow entries in "{{ item }}"\'s tables that match flows command: ovs-ofctl --protocols OpenFlow13 dump-flows "{{ item|quote }}" become: true - name: Print group entries in "{{ item }}"\'s tables command: ovs-ofctl --protocols OpenFlow13 dump-groups "{{ item|quote }}" become: true - name: Print statistics for the specified groups in the "{{ item }}"\'s tables command: ovs-ofctl --protocols OpenFlow13 dump-group-stats "{{ item|quote }}" become: true ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.838714 networking-odl-16.0.0.0b2.dev1/setup.cfg0000644000175000017500000000466700000000000021532 0ustar00jamespagejamespage00000000000000[metadata] name = networking-odl summary = OpenStack Networking description-file = README.rst author = OpenStack author-email = openstack-discuss@lists.openstack.org home-page = https://docs.openstack.org/networking-odl/latest/ python-requires = >=3.6 classifier = Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: 3 Programming Language :: Python :: 3.6 Programming Language :: Python :: 3.7 [files] packages = networking_odl data_files = etc/neutron = etc/neutron/plugins/ml2/ml2_conf_odl.ini [global] setup-hooks = pbr.hooks.setup_hook [extras] ceilometer = ceilometer>=11.0.0 [entry_points] console_scripts = neutron-odl-ovs-hostconfig = networking_odl.cmd.set_ovs_hostconfigs:main neutron-odl-analyze-journal-logs = networking_odl.cmd.analyze_journal:main neutron.ml2.mechanism_drivers = opendaylight_v2 = networking_odl.ml2.mech_driver_v2:OpenDaylightMechanismDriver neutron.service_plugins = odl-router_v2 = networking_odl.l3.l3_odl_v2:OpenDaylightL3RouterPlugin neutron.db.alembic_migrations = networking-odl = networking_odl.db.migration:alembic_migrations networking_odl.ml2.port_binding_controllers = legacy-port-binding = networking_odl.ml2.legacy_port_binding:LegacyPortBindingManager pseudo-agentdb-binding = networking_odl.ml2.pseudo_agentdb_binding:PseudoAgentDBBindingController oslo.config.opts = ml2_odl = networking_odl.common.config:list_opts networking_sfc.sfc.drivers = odl_v2 = networking_odl.sfc.sfc_driver_v2:OpenDaylightSFCDriverV2 networking_sfc.flowclassifier.drivers = odl_v2 = networking_odl.sfc.flowclassifier.sfc_flowclassifier_v2:OpenDaylightSFCFlowClassifierDriverV2 network.statistics.drivers = opendaylight.v2 = networking_odl.ceilometer.network.statistics.opendaylight_v2.driver:OpenDaylightDriver [build_releasenotes] build-dir = releasenotes/build source-dir = releasenotes/source all_files = 1 [extract_messages] keywords = _ gettext ngettext l_ lazy_gettext mapping_file = babel.cfg output_file = networking_odl/locale/networking-odl.pot [compile_catalog] directory = networking_odl/locale domain = networking-odl [update_catalog] domain = networking-odl output_dir = networking_odl/locale input_file = networking_odl/locale/networking-odl.pot [egg_info] tag_build = tag_date = 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/setup.py0000644000175000017500000000200600000000000021404 0ustar00jamespagejamespage00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT import setuptools # In python < 2.7.4, a lazy loading of package `pbr` will break # setuptools if some other modules registered functions in `atexit`. # solution from: http://bugs.python.org/issue15881#msg170215 try: import multiprocessing # noqa except ImportError: pass setuptools.setup( setup_requires=['pbr>=2.0.0'], pbr=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/tempest-blacklist.txt0000644000175000017500000000421700000000000024070 0ustar00jamespagejamespage00000000000000# BEGIN: EXCLUDE SLOW TESTS .*\[.*\bslow\b.*\] # END EXCLUDE SLOW TESTS # BEGIN: EXCLUDE THINGS THAT JUST AREN'T ENABLED tempest\.scenario\.test_load_balancer tempest\.thirdparty\.boto\.test_s3 # END: EXCLUDE THINGS THAT JUST AREN'T ENABLED # BEGIN: EXCLUDE STUFF LESS RELATED TO NETWORKING, TO REDUCE GATE LOAD tempest\.api\.compute\.admin.* tempest\.api\.compute\.images.* tempest\.api\.compute\.volumes.* # END: EXCLUDE STUFF LESS RELATED TO NETWORKING, TO REDUCE GATE LOAD # BEGIN: UNSUPPORTED FEATURES # ODL legacy netvirt doesn't support ipv6 tempest\.scenario\.test_network_v6\.TestGettingAddress # END: UNSUPPORTED FEATURES # BEGIN: EXCLUSION OF NEUTRON'S TEMPEST PLUGIN # DNS domain isn't supported neutron_tempest_plugin.*dns_domain.* # Reporting router interface status isn't supported neutron_tempest_plugin.*test_router_interface_status.* # DVR is not interesting in the context of ODL neutron_tempest_plugin\..*[dD]vr.* # END: EXCLUSION OF NEUTRON'S TEMPEST PLUGIN # BEGIN: CURRENT LIST OF FAILING TESTS THAT NEED TO BE TRIAGED, HAVE BUGS FILED, AND # FIXED AS APPROPRIATE. # TODO(yamahata): fix bugs and remove those tests from here # BUG: https://bugs.launchpad.net/networking-odl/+bug/1642158 # legacy netvirt ignores admin-state-up state for network/port tempest\.scenario\.test_network_basic_ops\.TestNetworkBasicOps\.test_update_instance_port_admin_state tempest\.scenario\.test_network_basic_ops\.TestNetworkBasicOps\.test_update_router_admin_state # BUG: https://bugs.launchpad.net/networking-odl/+bug/1643033 # stateful security group: conntracking needs to be enabled tempest\.scenario\.test_network_basic_ops\.TestNetworkBasicOps\.test_hotplug_nic tempest\.scenario\.test_security_groups_basic_ops\.TestSecurityGroupsBasicOps\.test_cross_tenant_traffic tempest\.scenario\.test_security_groups_basic_ops\.TestSecurityGroupsBasicOps\.test_port_security_disable_security_group # TODO(mpeterson): investigate the following tests and act accordingly neutron_tempest_plugin\.scenario\.test_floatingip\.DefaultSnatToExternal\.test_snat_external_ip # END: CURRENT LIST OF FAILING TESTS THAT NEED TO BE TRIAGED, HAVE BUGS FILED, AND # FIXED AS APPROPRIATE. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/test-requirements.txt0000644000175000017500000000137300000000000024141 0ustar00jamespagejamespage00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0 coverage>=4.5.1 # Apache-2.0 doc8>=0.8.0 # Apache-2.0 flake8-import-order>=0.17.1 # LGPLv3 python-subunit>=1.2.0 # Apache-2.0/BSD oslotest>=3.3.0 # Apache-2.0 stestr>=2.0.0 # Apache-2.0 pecan>=1.3.2 # BSD pylint==2.2.0;python_version>="3.0" # GPLv2 testresources>=2.0.1 # Apache-2.0/BSD testscenarios>=0.5.0 # Apache-2.0/BSD testtools>=2.3.0 # MIT bandit!=1.6.0,>=1.4.0 # Apache-2.0 bashate>=0.5.1 # Apache-2.0 astroid==2.1.0;python_version>="3.0" # LGPLv2.1 # To test ceilometer client ceilometer>=11.0.0 ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1585130284.834714 networking-odl-16.0.0.0b2.dev1/tools/0000755000175000017500000000000000000000000021034 5ustar00jamespagejamespage00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/tools/check_bash.sh0000644000175000017500000000222600000000000023444 0ustar00jamespagejamespage00000000000000#! /bin/sh # Copyright (C) 2014 VA Linux Systems Japan K.K. # Copyright (C) 2014 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # The purpose of this script is to avoid casual introduction of more # bash dependency. Please consider alternatives before commiting code # which uses bash specific features. # Ignore comments, but include shebangs OBSERVED=$(grep -E '^([^#]|#!).*bash' tox.ini tools/* | wc -l) EXPECTED=5 if [ ${EXPECTED} -ne ${OBSERVED} ]; then echo Unexpected number of bash usages are detected. echo Please read the comment in $0 exit 1 fi exit 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/tools/check_i18n.py0000644000175000017500000001243400000000000023326 0ustar00jamespagejamespage00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import print_function import compiler import imp import os.path import sys def is_localized(node): """Check message wrapped by _()""" if isinstance(node.parent, compiler.ast.CallFunc): if isinstance(node.parent.node, compiler.ast.Name): if node.parent.node.name == '_': return True return False class ASTWalker(compiler.visitor.ASTVisitor): def default(self, node, *args): for child in node.getChildNodes(): child.parent = node compiler.visitor.ASTVisitor.default(self, node, *args) class Visitor(object): def __init__(self, filename, i18n_msg_predicates, msg_format_checkers, debug): self.filename = filename self.debug = debug self.error = 0 self.i18n_msg_predicates = i18n_msg_predicates self.msg_format_checkers = msg_format_checkers with open(filename) as f: self.lines = f.readlines() def visitConst(self, node): if not isinstance(node.value, str): return if is_localized(node): for (checker, msg) in self.msg_format_checkers: if checker(node): print('%s:%d %s: %s Error: %s' % (self.filename, node.lineno, self.lines[node.lineno - 1][:-1], checker.__name__, msg), file=sys.stderr) self.error = 1 return if debug: print('%s:%d %s: %s' % (self.filename, node.lineno, self.lines[node.lineno - 1][:-1], "Pass")) else: for (predicate, action, msg) in self.i18n_msg_predicates: if predicate(node): if action == 'skip': if debug: print('%s:%d %s: %s' % (self.filename, node.lineno, self.lines[node.lineno - 1][:-1], "Pass")) return elif action == 'error': print('%s:%d %s: %s Error: %s' % (self.filename, node.lineno, self.lines[node.lineno - 1][:-1], predicate.__name__, msg), file=sys.stderr) self.error = 1 return elif action == 'warn': print('%s:%d %s: %s' % (self.filename, node.lineno, self.lines[node.lineno - 1][:-1], "Warn: %s" % msg)) return print('Predicate with wrong action!', file=sys.stderr) def is_file_in_black_list(black_list, f): for f in black_list: if os.path.abspath(input_file).startswith( os.path.abspath(f)): return True return False def check_i18n(input_file, i18n_msg_predicates, msg_format_checkers, debug): input_mod = compiler.parseFile(input_file) v = compiler.visitor.walk(input_mod, Visitor(input_file, i18n_msg_predicates, msg_format_checkers, debug), ASTWalker()) return v.error if __name__ == '__main__': input_path = sys.argv[1] cfg_path = sys.argv[2] try: cfg_mod = imp.load_source('', cfg_path) except Exception: print("Load cfg module failed", file=sys.stderr) sys.exit(1) i18n_msg_predicates = cfg_mod.i18n_msg_predicates msg_format_checkers = cfg_mod.msg_format_checkers black_list = cfg_mod.file_black_list debug = False if len(sys.argv) > 3: if sys.argv[3] == '-d': debug = True if os.path.isfile(input_path): sys.exit(check_i18n(input_path, i18n_msg_predicates, msg_format_checkers, debug)) error = 0 for dirpath, dirs, files in os.walk(input_path): for f in files: if not f.endswith('.py'): continue input_file = os.path.join(dirpath, f) if is_file_in_black_list(black_list, input_file): continue if check_i18n(input_file, i18n_msg_predicates, msg_format_checkers, debug): error = 1 sys.exit(error) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/tools/check_i18n_test_case.txt0000644000175000017500000000264500000000000025552 0ustar00jamespagejamespage00000000000000# test-case for check_i18n.py # python check_i18n.py check_i18n.txt -d # message format checking # capital checking msg = _("hello world, error") msg = _("hello world_var, error") msg = _('file_list xyz, pass') msg = _("Hello world, pass") # format specifier checking msg = _("Hello %s world %d, error") msg = _("Hello %s world, pass") msg = _("Hello %(var1)s world %(var2)s, pass") # message has been localized # is_localized msg = _("Hello world, pass") msg = _("Hello world, pass") % var LOG.debug(_('Hello world, pass')) LOG.info(_('Hello world, pass')) raise x.y.Exception(_('Hello world, pass')) raise Exception(_('Hello world, pass')) # message need be localized # is_log_callfunc LOG.debug('hello world, error') LOG.debug('hello world, error' % xyz) sys.append('hello world, warn') # is_log_i18n_msg_with_mod LOG.debug(_('Hello world, error') % xyz) # default warn msg = 'hello world, warn' msg = 'hello world, warn' % var # message needn't be localized # skip only one word msg = '' msg = "hello,pass" # skip dict msg = {'hello world, pass': 1} # skip list msg = ["hello world, pass"] # skip subscript msg['hello world, pass'] # skip xml marker msg = ", pass" # skip sql statement msg = "SELECT * FROM xyz WHERE hello=1, pass" msg = "select * from xyz, pass" # skip add statement msg = 'hello world' + e + 'world hello, pass' # skip doc string """ Hello world, pass """ class Msg: pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/tools/clean.sh0000755000175000017500000000027400000000000022460 0ustar00jamespagejamespage00000000000000#!/bin/bash rm -rf ./*.deb ./*.tar.gz ./*.dsc ./*.changes rm -rf */*.deb rm -rf ./plugins/**/build/ ./plugins/**/dist rm -rf ./plugins/**/lib/neutron_*_plugin.egg-info ./plugins/neutron-* ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/tools/coding-checks.sh0000755000175000017500000000264700000000000024105 0ustar00jamespagejamespage00000000000000#!/bin/sh # this was stoken from Neutron with the change neutron -> networking_odl set -eu usage () { echo "Usage: $0 [OPTION]..." echo "Run Neutron's coding check(s)" echo "" echo " -Y, --pylint [] Run pylint check on the entire neutron module or just files changed in basecommit (e.g. HEAD~1)" echo " -h, --help Print this usage message" echo exit 0 } process_options () { i=1 while [ $i -le $# ]; do eval opt=\$$i case $opt in -h|--help) usage;; -Y|--pylint) pylint=1;; *) scriptargs="$scriptargs $opt" esac i=$((i+1)) done } run_pylint () { local target="${scriptargs:-all}" if [ "$target" = "all" ]; then files="networking_odl" else case "$target" in *HEAD~[0-9]*) files=$(git diff --diff-filter=AM --name-only $target -- "*.py");; *) echo "$target is an unrecognized basecommit"; exit 1;; esac fi echo "Running pylint..." echo "You can speed this up by running it on 'HEAD~[0-9]' (e.g. HEAD~1, this change only)..." if [ -n "${files}" ]; then pylint --rcfile=.pylintrc --output-format=colorized ${files} else echo "No python changes in this commit, pylint check not required." exit 0 fi } scriptargs= pylint=1 process_options $@ if [ $pylint -eq 1 ]; then run_pylint exit 0 fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/tools/configure_for_func_testing.sh0000755000175000017500000002010000000000000026763 0ustar00jamespagejamespage00000000000000#!/usr/bin/env bash # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. set -e # Control variable used to determine whether to execute this script # directly or allow the gate_hook to import. IS_GATE=${IS_GATE:-False} USE_CONSTRAINT_ENV=${USE_CONSTRAINT_ENV:-True} if [[ "$IS_GATE" != "True" ]] && [[ "$#" -lt 1 ]]; then >&2 echo "Usage: $0 /path/to/devstack [-i] Configure a host to run Networking ODL's functional test suite. -i Install Networking ODL's package dependencies. By default, it is assumed that devstack has already been used to deploy Networking ODL to the target host and that package dependencies need not be installed. Warning: This script relies on devstack to perform extensive modification to the underlying host. It is recommended that it be invoked only on a throw-away VM." exit 1 fi # Skip the first argument OPTIND=2 while getopts ":i" opt; do case $opt in i) INSTALL_BASE_DEPENDENCIES=True ;; esac done # Default to environment variables to permit the gate_hook to override # when sourcing. VENV=${VENV:-dsvm-functional} DEVSTACK_PATH=${DEVSTACK_PATH:-$(cd "$1" && pwd)} PROJECT_NAME=${PROJECT_NAME:-networking-odl} REPO_BASE=${GATE_DEST:-$(cd $(dirname "$0")/../.. && pwd)} INSTALL_MYSQL_ONLY=${INSTALL_MYSQL_ONLY:-False} # The gate should automatically install dependencies. INSTALL_BASE_DEPENDENCIES=${INSTALL_BASE_DEPENDENCIES:-$IS_GATE} ODL_DIR=$GATE_DEST/opendaylight if [ ! -f "$DEVSTACK_PATH/stack.sh" ]; then >&2 echo "Unable to find devstack at '$DEVSTACK_PATH'. Please verify that the specified path points to a valid devstack repo." exit 1 fi set -x function _init { # Subsequently-called devstack functions depend on the following variables. HOST_IP=127.0.0.1 FILES=$DEVSTACK_PATH/files TOP_DIR=$DEVSTACK_PATH source $DEVSTACK_PATH/inc/meta-config extract_localrc_section $TOP_DIR/local.conf $TOP_DIR/localrc $TOP_DIR/.localrc.auto source $DEVSTACK_PATH/stackrc # Allow the gate to override values set by stackrc. DEST=${GATE_DEST:-$DEST} STACK_USER=${GATE_STACK_USER:-$STACK_USER} REQUIREMENTS_DIR=$DEST/requirements } function _install_base_deps { echo_summary "Installing base dependencies" INSTALL_TESTONLY_PACKAGES=True PACKAGES=$(get_packages general) # for gethostip command if ! is_plugin_enabled networking-odl; then enable_plugin networking-odl https://opendev.org/openstack/networking-odl fi PACKAGES="$PACKAGES $(get_plugin_packages)" # Do not install 'python-' prefixed packages other than # python-dev*. Networking ODL's functional testing relies on deployment # to a tox env so there is no point in installing python # dependencies system-wide. PACKAGES=$(echo $PACKAGES | perl -pe 's|python-(?!dev)[^ ]*||g') install_package $PACKAGES } # _install_databases [install_pg] function _install_databases { local install_pg=${1:-True} echo_summary "Installing databases" # Avoid attempting to configure the db if it appears to already # have run. The setup as currently defined is not idempotent. if mysql openstack_citest > /dev/null 2>&1 < /dev/null; then echo_summary "DB config appears to be complete, skipping." return 0 fi MYSQL_PASSWORD=${MYSQL_PASSWORD:-secretmysql} DATABASE_PASSWORD=${DATABASE_PASSWORD:-secretdatabase} source $DEVSTACK_PATH/lib/database enable_service mysql initialize_database_backends install_database configure_database_mysql if [[ "$install_pg" == "True" ]]; then # acl package includes setfacl. install_package acl enable_service postgresql initialize_database_backends install_database configure_database_postgresql fi # Set up the 'openstack_citest' user and database in each backend tmp_dir=$(mktemp -d) trap "rm -rf $tmp_dir" EXIT cat << EOF > $tmp_dir/mysql.sql CREATE DATABASE openstack_citest; CREATE USER 'openstack_citest'@'localhost' IDENTIFIED BY 'openstack_citest'; CREATE USER 'openstack_citest' IDENTIFIED BY 'openstack_citest'; GRANT ALL PRIVILEGES ON *.* TO 'openstack_citest'@'localhost'; GRANT ALL PRIVILEGES ON *.* TO 'openstack_citest'; FLUSH PRIVILEGES; EOF /usr/bin/mysql -u root < $tmp_dir/mysql.sql if [[ "$install_pg" == "True" ]]; then cat << EOF > $tmp_dir/postgresql.sql CREATE USER openstack_citest WITH CREATEDB LOGIN PASSWORD 'openstack_citest'; CREATE DATABASE openstack_citest WITH OWNER openstack_citest; EOF # User/group postgres needs to be given access to tmp_dir setfacl -m g:postgres:rwx $tmp_dir sudo -u postgres /usr/bin/psql --file=$tmp_dir/postgresql.sql fi } function _install_infra { echo_summary "Installing infra" pip_install -U virtualenv source $DEVSTACK_PATH/lib/infra install_infra } function _install_opendaylight { echo_summary "Install OpenDaylight" # fake up necessary environment for odl to install/configure source $DEVSTACK_PATH/lib/neutron-legacy neutron_plugin_configure_common _create_neutron_conf_dir mkdir -p $NEUTRON_CONF_DIR touch $NEUTRON_CONF mkdir -p /$Q_PLUGIN_CONF_PATH Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME touch /$Q_PLUGIN_CONF_FILE NETWORKING_ODL_DIR=${NETWORKING_ODL_DIR:-$REPO_BASE/networking-odl} Q_USE_PUBLIC_VETH=False ODL_DONT_WAIT_OVS_BR=True # openstack service provider isn't needed, only ODL neutron northbound # is necessary for functional test ODL_NETVIRT_KARAF_FEATURE=odl-neutron-service,odl-restconf-all,odl-aaa-authn,odl-dlux-core,odl-mdsal-apidocs,odl-neutron-logger ODL_BOOT_WAIT_URL=controller/nb/v2/neutron/networks source $NETWORKING_ODL_DIR/devstack/settings.odl local ODL_NEUTRON_NETWORK_WAIT_URL=controller/nb/v2/neutron/networks set +e curl -o /dev/null --fail --silent --head -u \ ${ODL_USERNAME}:${ODL_PASSWORD} \ http://${ODL_MGR_HOST}:${ODL_PORT}/${ODL_NEUTRON_NETWORK_WAIT_URL} local result=$? set -e if [ $result -eq 0 ]; then echo_summary "OpenDaylight config appears to be complete, skipping" return 0 fi enable_service odl-server source $NETWORKING_ODL_DIR/devstack/plugin.sh stack install source $NETWORKING_ODL_DIR/devstack/plugin.sh stack post-config } function _install_post_devstack { echo_summary "Performing post-devstack installation" _install_databases # networkign-odl devstack plugin requires infra _install_infra _install_opendaylight if is_ubuntu; then install_package isc-dhcp-client install_package netcat-openbsd elif is_fedora; then install_package dhclient else exit_distro_not_supported "installing dhclient package" fi } function configure_host_for_func_testing { echo_summary "Configuring host for functional testing" if [[ "$INSTALL_BASE_DEPENDENCIES" == "True" ]]; then # Installing of the following can be achieved via devstack by # installing Networking ODL, so their installation is conditional to # minimize the work to do on a devstack-configured host. _install_base_deps fi _install_post_devstack } # This function has been added because it's called by the devstack scripts # but since functional is not stacking devstack entirely this # this function is never imported. Thus, the creation of this no-op function function conductor_conf { : } _init if [[ "$IS_GATE" != "True" ]]; then if [[ "$INSTALL_MYSQL_ONLY" == "True" ]]; then _install_databases nopg else configure_host_for_func_testing fi fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/tools/i18n_cfg.py0000644000175000017500000000771600000000000023017 0ustar00jamespagejamespage00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import compiler import re def is_log_callfunc(n): """LOG.xxx('hello %s' % xyz) and LOG('hello')""" if isinstance(n.parent, compiler.ast.Mod): n = n.parent if isinstance(n.parent, compiler.ast.CallFunc): if isinstance(n.parent.node, compiler.ast.Getattr): if isinstance(n.parent.node.getChildNodes()[0], compiler.ast.Name): if n.parent.node.getChildNodes()[0].name == 'LOG': return True return False def is_log_i18n_msg_with_mod(n): """LOG.xxx("Hello %s" % xyz) should be LOG.xxx("Hello %s", xyz)""" if not isinstance(n.parent.parent, compiler.ast.Mod): return False n = n.parent.parent if isinstance(n.parent, compiler.ast.CallFunc): if isinstance(n.parent.node, compiler.ast.Getattr): if isinstance(n.parent.node.getChildNodes()[0], compiler.ast.Name): if n.parent.node.getChildNodes()[0].name == 'LOG': return True return False def is_wrong_i18n_format(n): """Check _('hello %s' % xyz)""" if isinstance(n.parent, compiler.ast.Mod): n = n.parent if isinstance(n.parent, compiler.ast.CallFunc): if isinstance(n.parent.node, compiler.ast.Name): if n.parent.node.name == '_': return True return False """ Used for check message need be localized or not. (predicate_func, action, message) """ i18n_msg_predicates = [ # Skip ['hello world', 1] (lambda n: isinstance(n.parent, compiler.ast.List), 'skip', ''), # Skip {'hellow world', 1} (lambda n: isinstance(n.parent, compiler.ast.Dict), 'skip', ''), # Skip msg['hello world'] (lambda n: isinstance(n.parent, compiler.ast.Subscript), 'skip', ''), # Skip doc string (lambda n: isinstance(n.parent, compiler.ast.Discard), 'skip', ''), # Skip msg = "hello", in normal, message should more than one word (lambda n: len(n.value.strip().split(' ')) <= 1, 'skip', ''), # Skip msg = 'hello world' + vars + 'world hello' (lambda n: isinstance(n.parent, compiler.ast.Add), 'skip', ''), # Skip xml markers msg = "" (lambda n: len(re.compile("").findall(n.value)) > 0, 'skip', ''), # Skip sql statement (lambda n: len( re.compile("^SELECT.*FROM", flags=re.I).findall(n.value)) > 0, 'skip', ''), # LOG.xxx() (is_log_callfunc, 'error', 'Message must be localized'), # _('hello %s' % xyz) should be _('hello %s') % xyz (is_wrong_i18n_format, 'error', ("Message format was wrong, _('hello %s' % xyz) " "should be _('hello %s') % xyz")), # default (lambda n: True, 'warn', 'Message might need localized') ] """ Used for checking message format. (checker_func, message) """ msg_format_checkers = [ # If message contain more than on format specifier, it should use # mapping key (lambda n: len(re.compile("%[bcdeEfFgGnosxX]").findall(n.value)) > 1, "The message shouldn't contain more than one format specifier"), # Check capital (lambda n: n.value.split(' ')[0].count('_') == 0 and n.value[0].isalpha() and n.value[0].islower(), "First letter must be capital"), (is_log_i18n_msg_with_mod, 'LOG.xxx("Hello %s" % xyz) should be LOG.xxx("Hello %s", xyz)') ] file_black_list = ["./neutron/tests/unit", "./neutron/openstack", "./neutron/plugins/bigswitch/tests"] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/tools/install_venv.py0000644000175000017500000000440500000000000024115 0ustar00jamespagejamespage00000000000000#!/usr/bin/env python # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2010 OpenStack Foundation. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Installation script for Neutron's development virtualenv """ from __future__ import print_function import os import sys import install_venv_common as install_venv def print_help(): help = """ Neutron development environment setup is complete. Neutron development uses virtualenv to track and manage Python dependencies while in development and testing. To activate the Neutron virtualenv for the extent of your current shell session you can run: $ . .venv/bin/activate Or, if you prefer, you can run commands in the virtualenv on a case by case basis by running: $ tools/with_venv.sh Also, make test will automatically use the virtualenv. """ print(help) def main(argv): root = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) venv = os.path.join(root, '.venv') pip_requires = os.path.join(root, 'requirements.txt') test_requires = os.path.join(root, 'test-requirements.txt') py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1]) project = 'Neutron' install = install_venv.InstallVenv(root, venv, pip_requires, test_requires, py_version, project) options = install.parse_args(argv) install.check_python_version() install.check_dependencies() install.create_virtualenv(no_site_packages=options.no_site_packages) install.install_dependencies() print_help() if __name__ == '__main__': sys.exit(main(sys.argv)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/tools/with_venv.sh0000755000175000017500000000132300000000000023403 0ustar00jamespagejamespage00000000000000#!/bin/bash # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. TOOLS=`dirname $0` VENV=$TOOLS/../.venv source $VENV/bin/activate && "$@" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1585130281.0 networking-odl-16.0.0.0b2.dev1/tox.ini0000644000175000017500000001750400000000000021216 0ustar00jamespagejamespage00000000000000[tox] envlist = docs,py37,pep8 minversion = 3.1.1 skipsdist = True [testenv] setenv = VIRTUAL_ENV={envdir} PYTHONWARNINGS=default::DeprecationWarning OS_LOG_CAPTURE={env:OS_LOG_CAPTURE:1} OS_STDOUT_CAPTURE={env:OS_STDOUT_CAPTURE:1} OS_STDERR_CAPTURE={env:OS_STDERR_CAPTURE:1} OS_TEST_TIMEOUT={env:OS_TEST_TIMEOUT:60} passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY OS_FAIL_ON_MISSING_DEPS OS_POST_MORTEM_DEBUGGER TRACE_FAILONLY OS_TEST_DBAPI_ADMIN_CONNECTION OS_DEBUG usedevelop = True deps = -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt whitelist_externals = bash commands = stestr run {posargs} [testenv:dsvm] # Fake job to define environment variables shared between dsvm jobs setenv = OS_SUDO_TESTING=1 OS_FAIL_ON_MISSING_DEPS=1 OS_LOG_PATH={env:OS_LOG_PATH:/opt/stack/logs} commands = false [testenv:functional] setenv = {[testenv]setenv} OS_TEST_PATH=./networking_odl/tests/functional OS_LOG_PATH={env:OS_LOG_PATH:/opt/stack/logs} deps = -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/requirements.txt -r{toxinidir}/networking_odl/tests/functional/requirements.txt [testenv:dsvm-functional] setenv = {[testenv:functional]setenv} {[testenv:dsvm]setenv} deps = {[testenv:functional]deps} [testenv:pep8] deps = {[testenv]deps} -r{toxinidir}/doc/requirements.txt commands = flake8 {toxinidir}/tools/coding-checks.sh --pylint '{posargs}' doc8 doc/source devstack releasenotes/source rally-jobs neutron-db-manage --subproject networking-odl check_migration {[testenv:genconfig]commands} {[testenv:bashate]commands} {[testenv:capitald]commands} {[testenv:bandit]commands} whitelist_externals = bash mkdir [testenv:i18n] commands = python ./tools/check_i18n.py ./networking_odl ./tools/i18n_cfg.py [testenv:venv] deps = {[testenv]deps} -r{toxinidir}/doc/requirements.txt commands = {posargs} [testenv:cover] setenv = PYTHON=coverage run --source networking_odl --parallel-mode commands = stestr run {posargs} coverage combine coverage report --fail-under=80 --skip-covered coverage html -d cover coverage xml -o cover/coverage.xml [testenv:docs] whitelist_externals = rm deps = -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/doc/requirements.txt commands = rm -rf doc/build sphinx-build -W -b html doc/source doc/build/html [testenv:pdf-docs] envdir = {toxworkdir}/docs deps = {[testenv:docs]deps} whitelist_externals = make commands = sphinx-build -W -b latex doc/source doc/build/pdf make -C doc/build/pdf [testenv:debug] # Uses default base python setenv = {[testenv]setenv} OS_LOG_CAPTURE={env:OS_LOG_CAPTURE:0} OS_STDOUT_CAPTURE={env:OS_STDOUT_CAPTURE:0} OS_STDERR_CAPTURE={env:OS_STDERR_CAPTURE:0} OS_TEST_TIMEOUT={env:OS_TEST_TIMEOUT:0} commands = oslo_debug_helper -t networking_odl/tests {posargs} [hacking] import_exceptions = networking_odl._i18n local-check-factory = networking_odl.hacking.checks.factory [testenv:bandit] # B101: Use of assert detected deps = -r{toxinidir}/test-requirements.txt commands = bandit -r networking_odl -x tests -n5 -s B101 [doc8] # File extensions to check extensions = .rst # TODO(yamahata): doc8 work around. remove this when doc8 is fixed. # doc8(actually docutils) handles relative path inclusion differently from sphinx. # doc8 wrongly alerts invalid inclusion path with recursive relative inclusion # https://sourceforge.net/p/docutils/bugs/211/ ignore-path-errors=doc/source/devref/index.rst;D000 [flake8] # TODO(dougwig) -- uncomment this to test for remaining linkages # N530 direct neutron imports not allowed show-source = True # TODO(mkolesni): Fix I202 if you really care about import checks ignore = N530,I202 # H106: Dont put vim configuration in source files # H203: Use assertIs(Not)None to check for None # H204: Use assert(Not)Equal to check for equality # H205: Use assert(Greater|Less)(Equal) for comparison # H904: Delay string interpolations at logging calls enable-extensions=H106,H203,H204,H205,H904 exclude=./.*,dist,doc,releasenotes,*lib/python*,*egg,build,tools import-order-style = pep8 [testenv:bashate] commands = bash -c "find {toxinidir} \ -not \( -type d -name .\?\* -prune \) \ -type f \ \( \ -name \*.sh \ -or \ -path \*/devstack/\*settings\* \ -or \ -path \*/devstack/devstackgaterc \ -or \ -path \*/devstack/entry_points \ -or \ -path \*/devstack/functions \ -or \ -path \*/devstack/odl-releases/common \ -or \ -path \*/devstack/override-defaults \ \) \ # E005 file does not begin with #! or have a .sh prefix # E006 check for lines longer than 79 columns # E042 local declaration hides errors # E043 Arithmetic compound has inconsistent return semantics -print0 | xargs -0 bashate -v -iE006 -eE005,E042,E043" whitelist_externals = bash [testenv:capitald] usedevelop = False skip_install = True deps = # Check if "Opendaylight" word is in any file # Only "OpenDaylight" (with uppercase 'D') should be used commands = bash -c "! grep \ --exclude-dir='.*' \ --exclude-dir='cover' \ --exclude-dir='__pycache__' \ --exclude='tox.ini' \ --exclude='ChangeLog' \ --exclude='*.py' \ --exclude='*.pyc' \ --exclude='*~' \ --recursive \ --line-number \ Opendaylight \ {toxinidir}" whitelist_externals = bash [testenv:genconfig] deps = -r{toxinidir}/requirements.txt commands = mkdir -p etc/neutron/plugins/ml2 oslo-config-generator --namespace ml2_odl --output-file etc/neutron/plugins/ml2/ml2_conf_odl.ini.sample whitelist_externals = mkdir [testenv:releasenotes] deps = -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/doc/requirements.txt commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html [testenv:lower-constraints] deps = -c{toxinidir}/lower-constraints.txt -r{toxinidir}/test-requirements.txt -r{toxinidir}/requirements.txt [testenv:dev] # run locally (not in the gate) using editable mode # https://pip.pypa.io/en/stable/reference/pip_install/#editable-installs # note that order is important to ensure dependencies don't override commands = pip install -q -e "git+https://opendev.org/openstack/networking-l2gw#egg=networking_l2gw" pip install -q -e "git+https://opendev.org/openstack/networking-bgpvpn#egg=networking_bgpvpn" pip install -q -e "git+https://opendev.org/openstack/networking-sfc#egg=networking_sfc" pip install -q -e "git+https://opendev.org/openstack/neutron-fwaas#egg=neutron_fwaas" pip install -q -e "git+https://opendev.org/openstack/ceilometer#egg=ceilometer" pip install -q -e "git+https://opendev.org/openstack/neutron#egg=neutron" {[testenv]commands} [testenv:pep8-dev] deps = {[testenv]deps} -r{toxinidir}/doc/requirements.txt commands = {[testenv:dev]commands} {[testenv:pep8]commands}