pax_global_header00006660000000000000000000000064132324230710014507gustar00rootroot0000000000000052 comment=3b9f251db77526d907b73c6eee38905bd56bec5f networking-arista-2017.2.2/000077500000000000000000000000001323242307100154125ustar00rootroot00000000000000networking-arista-2017.2.2/.coveragerc000066400000000000000000000002151323242307100175310ustar00rootroot00000000000000[run] branch = True source = networking-arista omit = networking-arista/tests/*,networking-arista/openstack/* [report] ignore_errors = True networking-arista-2017.2.2/.gitignore000066400000000000000000000007151323242307100174050ustar00rootroot00000000000000*.py[cod] # C extensions *.so # Packages *.egg *.egg-info dist build eggs parts bin var sdist develop-eggs .installed.cfg lib lib64 # Installer logs pip-log.txt # Unit test / coverage reports .coverage .tox nosetests.xml .testrepository .venv # Translations *.mo # Mr Developer .mr.developer.cfg .project .pydevproject # Complexity output/*.html output/*/index.html # Sphinx doc/build # pbr generates these AUTHORS ChangeLog # Editors *~ .*.swp .*sw? networking-arista-2017.2.2/.gitreview000066400000000000000000000001261323242307100174170ustar00rootroot00000000000000[gerrit] host=review.openstack.org port=29418 project=openstack/networking-arista.git networking-arista-2017.2.2/.mailmap000066400000000000000000000003301323242307100170270ustar00rootroot00000000000000# Format is: # # Sukhdev Kapur Shashank Hegde Andre Pech networking-arista-2017.2.2/.testr.conf000066400000000000000000000005371323242307100175050ustar00rootroot00000000000000[DEFAULT] test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \ OS_LOG_CAPTURE=1 \ ${PYTHON:-python} -m subunit.run discover -t ./ . $LISTOPT $IDOPTION test_id_option=--load-list $IDFILE test_list_option=--list networking-arista-2017.2.2/CONTRIBUTING.rst000066400000000000000000000010441323242307100200520ustar00rootroot00000000000000If you would like to contribute to the development of OpenStack, you must follow the steps in this page: http://docs.openstack.org/infra/manual/developers.html Once those steps have been completed, changes to OpenStack should be submitted for review via the Gerrit tool, following the workflow documented at: http://docs.openstack.org/infra/manual/developers.html#development-workflow Pull requests submitted through GitHub will be ignored. Bugs should be filed on Launchpad, not GitHub: https://bugs.launchpad.net/networking-arista networking-arista-2017.2.2/HACKING.rst000066400000000000000000000002511323242307100172060ustar00rootroot00000000000000networking-arista Style Commandments =============================================== Read the OpenStack Style Commandments http://docs.openstack.org/developer/hacking/ networking-arista-2017.2.2/LICENSE000066400000000000000000000236371323242307100164320ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. networking-arista-2017.2.2/MANIFEST.in000066400000000000000000000001361323242307100171500ustar00rootroot00000000000000include AUTHORS include ChangeLog exclude .gitignore exclude .gitreview global-exclude *.pyc networking-arista-2017.2.2/README.rst000066400000000000000000000003551323242307100171040ustar00rootroot00000000000000=============================== networking-arista =============================== Arista Networking drivers * Free software: Apache license * Source: http://git.openstack.org/cgit/openstack/networking-arista Features -------- * TODO networking-arista-2017.2.2/babel.cfg000066400000000000000000000000211323242307100171310ustar00rootroot00000000000000[python: **.py] networking-arista-2017.2.2/devstack/000077500000000000000000000000001323242307100172165ustar00rootroot00000000000000networking-arista-2017.2.2/devstack/plugin.sh000066400000000000000000000066101323242307100210530ustar00rootroot00000000000000# -*- mode: shell-script -*- function install_lldp() { echo_summary "Installing LLDP" install_package lldpd restart_service lldpd } function install_arista_driver() { echo_summary "Installing Arista Driver" setup_develop $ARISTA_DIR } function configure_arista() { echo_summary "Configuring Neutron for Arista Driver" cp $ARISTA_ML2_CONF_SAMPLE $ARISTA_ML2_CONF_FILE iniset $ARISTA_ML2_CONF_FILE ml2_arista eapi_host $ARISTA_EAPI_HOST iniset $ARISTA_ML2_CONF_FILE ml2_arista eapi_username $ARISTA_EAPI_USERNAME iniset $ARISTA_ML2_CONF_FILE ml2_arista eapi_password $ARISTA_EAPI_PASSWORD iniset $ARISTA_ML2_CONF_FILE ml2_arista api_type $ARISTA_API_TYPE iniset $ARISTA_ML2_CONF_FILE ml2_arista region_name $ARISTA_REGION_NAME if [ -n "${ARISTA_USE_FQDN+x}" ]; then iniset $ARISTA_ML2_CONF_FILE ml2_arista use_fqdn $ARISTA_USE_FQDN fi if [ -n "${ARISTA_ML2_SYNC_INTERVAL+x}" ]; then iniset $ARISTA_ML2_CONF_FILE ml2_arista sync_interval $ARISTA_ML2_SYNC_INTERVAL fi if [ -n "${ARISTA_SEC_GROUP_SUPPORT+x}" ]; then iniset $ARISTA_ML2_CONF_FILE ml2_arista sec_group_support $ARISTA_SEC_GROUP_SUPPORT fi if [ -n "${ARISTA_SWITCH_INFO+x}" ]; then iniset $ARISTA_ML2_CONF_FILE ml2_arista switch_info $ARISTA_SWITCH_INFO fi if [ -n "${ARISTA_PRIMARY_L3_HOST+x}" ]; then iniset $ARISTA_ML2_CONF_FILE l3_arista primary_l3_host $ARISTA_PRIMARY_L3_HOST fi if [ -n "${ARISTA_PRIMARY_L3_HOST_USERNAME+x}" ]; then iniset $ARISTA_ML2_CONF_FILE l3_arista primary_l3_host_username $ARISTA_PRIMARY_L3_HOST_USERNAME fi if [ -n "${ARISTA_PRIMARY_L3_HOST_PASSWORD+x}" ]; then iniset $ARISTA_ML2_CONF_FILE l3_arista primary_l3_host_password $ARISTA_PRIMARY_L3_HOST_PASSWORD fi if [ -n "${ARISTA_SECONDARY_L3_HOST+x}" ]; then iniset $ARISTA_ML2_CONF_FILE l3_arista secondary_l3_host $ARISTA_SECONDARY_L3_HOST fi if [ -n "${ARISTA_SECONDARY_L3_HOST_USERNAME+x}" ]; then iniset $ARISTA_ML2_CONF_FILE l3_arista secondary_l3_host_username $ARISTA_SECONDARY_L3_HOST_USERNAME fi if [ -n "${ARISTA_SECONDARY_L3_HOST_PASSWORD+x}" ]; then iniset $ARISTA_ML2_CONF_FILE l3_arista secondary_l3_host_password $ARISTA_SECONDARY_L3_HOST_PASSWORD fi if [ -n "${ARISTA_MLAG_CONFIG+x}" ]; then iniset $ARISTA_ML2_CONF_FILE l3_arista mlag_config $ARISTA_MLAG_CONFIG fi if [ -n "${ARISTA_USE_VRF+x}" ]; then iniset $ARISTA_ML2_CONF_FILE l3_arista use_vrf $ARISTA_USE_VRF fi if [ -n "${ARISTA_L3_SYNC_INTERVAL+x}" ]; then iniset $ARISTA_ML2_CONF_FILE l3_arista l3_sync_interval $ARISTA_L3_SYNC_INTERVAL fi if [ -n "${ARISTA_TYPE_DRIVER_SYNC_INTERVAL+x}" ]; then iniset $ARISTA_ML2_CONF_FILE arista_type_driver sync_interval $ARISTA_TYPE_DRIVER_SYNC_INTERVAL fi neutron_server_config_add $ARISTA_ML2_CONF_FILE } if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then neutron_service_plugin_class_add "trunk" if is_service_enabled "q-agt"; then install_lldp fi elif [[ "$1" == "stack" && "$2" == "install" ]]; then install_arista_driver elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then configure_arista elif [[ "$1" == "stack" && "$2" == "extra" ]]; then # no-op : fi if [[ "$1" == "unstack" ]]; then # no-op : fi if [[ "$1" == "clean" ]]; then # no-op : fi networking-arista-2017.2.2/devstack/settings000066400000000000000000000006701323242307100210040ustar00rootroot00000000000000if ! [[ "$Q_ML2_PLUGIN_MECHANISM_DRIVERS" =~ "arista" ]]; then Q_ML2_PLUGIN_MECHANISM_DRIVERS="$Q_ML2_PLUGIN_MECHANISM_DRIVERS,arista" fi ARISTA_DIR=${ARISTA_DIR:-$DEST/networking-arista} ARISTA_ML2_CONF_SAMPLE=$ARISTA_DIR/etc/ml2_conf_arista.ini ARISTA_ML2_CONF_FILE=${ARISTA_ML2_CONF_FILE:-"$NEUTRON_CONF_DIR/ml2_conf_arista.ini"} ARISTA_API_TYPE=${ARISTA_API_TYPE:-"EAPI"} ARISTA_REGION_NAME=${ARISTA_REGION_NAME:-"$REGION_NAME"} networking-arista-2017.2.2/doc/000077500000000000000000000000001323242307100161575ustar00rootroot00000000000000networking-arista-2017.2.2/doc/source/000077500000000000000000000000001323242307100174575ustar00rootroot00000000000000networking-arista-2017.2.2/doc/source/conf.py000077500000000000000000000046411323242307100207660ustar00rootroot00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys sys.path.insert(0, os.path.abspath('../..')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', #'sphinx.ext.intersphinx', 'oslosphinx' ] # autodoc generation is a bit aggressive and a nuisance when doing heavy # text edit cycles. # execute "export SPHINX_DEBUG=1" in your terminal to disable # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = u'networking-arista' copyright = u'2013, OpenStack Foundation' # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] # html_theme = '_theme' # html_static_path = ['static'] # Output file base name for HTML help builder. htmlhelp_basename = '%sdoc' % project # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', '%s.tex' % project, u'%s Documentation' % project, u'OpenStack Foundation', 'manual'), ] # Example configuration for intersphinx: refer to the Python standard library. #intersphinx_mapping = {'http://docs.python.org/': None} networking-arista-2017.2.2/doc/source/contributing.rst000066400000000000000000000001131323242307100227130ustar00rootroot00000000000000============ Contributing ============ .. include:: ../../CONTRIBUTING.rst networking-arista-2017.2.2/doc/source/index.rst000066400000000000000000000010051323242307100213140ustar00rootroot00000000000000.. networking-arista documentation master file, created by sphinx-quickstart on Tue Jul 9 22:26:36 2013. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. Welcome to networking-arista's documentation! ======================================================== Contents: .. toctree:: :maxdepth: 2 readme installation usage contributing Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` networking-arista-2017.2.2/doc/source/installation.rst000066400000000000000000000003341323242307100227120ustar00rootroot00000000000000============ Installation ============ At the command line:: $ pip install networking-arista Or, if you have virtualenvwrapper installed:: $ mkvirtualenv networking-arista $ pip install networking-arista networking-arista-2017.2.2/doc/source/readme.rst000066400000000000000000000000361323242307100214450ustar00rootroot00000000000000.. include:: ../../README.rst networking-arista-2017.2.2/doc/source/usage.rst000066400000000000000000000001371323242307100213160ustar00rootroot00000000000000======== Usage ======== To use networking-arista in a project:: import networking-arista networking-arista-2017.2.2/etc/000077500000000000000000000000001323242307100161655ustar00rootroot00000000000000networking-arista-2017.2.2/etc/ml2_conf_arista.ini000066400000000000000000000137501323242307100217360ustar00rootroot00000000000000# Defines configuration options specific for Arista ML2 Mechanism driver [ml2_arista] # (StrOpt) Comma separated list of IP addresses for all CVX instances in # the high availabilty CVX cluster. This is a required field with # a minimum of one address (if CVX is deployed in a non-redundant # (standalone) manner). If not set, all communications to Arista # EOS will fail. # # eapi_host = # Example: eapi_host = 192.168.0.1, 192.168.11.1, 192.168.22.1 # # (StrOpt) EOS command API username. This is required field. # if not set, all communications to Arista EOS will fail. # # eapi_username = # Example: eapi_username = admin # # (StrOpt) EOS command API password. This is required field. # if not set, all communications to Arista EOS will fail. # # eapi_password = # Example: eapi_password = my_password # # (StrOpt) Defines if hostnames are sent to Arista EOS as FQDNs # ("node1.domain.com") or as short names ("node1"). This is # optional. If not set, a value of "True" is assumed. # # use_fqdn = # Example: use_fqdn = True # # (IntOpt) Sync interval in seconds between Neutron plugin and EOS. # This field defines how often the synchronization is performed. # This is an optional field. If not set, a value of 30 seconds # is assumed. # # sync_interval = # Example: sync_interval = 30 # # (StrOpt) Defines Region Name that is assigned to this OpenStack Controller. # This is useful when multiple OpenStack/Neutron controllers are # managing the same Arista HW clusters. Note that this name must # match with the region name registered (or known) to keystone # service. Authentication with Keysotne is performed by EOS. # This is optional. If not set, a value of "RegionOne" is assumed. # # region_name = # Example: region_name = RegionOne # # (BoolOpt) Specifies if the Security Groups need to be deployed for baremetal # deployments. If this flag is set to "True", this means switch_info # (see below) must be defined. If this flag is not defined, it is # assumed to be False. # # sec_group_support = # Example: sec_group_support = True # # (ListOpt) This is a comma separated list of Arista switches where # security groups (i.e. ACLs) need to be applied. Each string has # three values separated by ":" in the following format. # ::,:: # This is required if sec_group_support is set to "True" # # switch_info = # Example: switch_info = 172.13.23.55:admin:admin,172.13.23.56:admin:admin # # (StrOpt) Tells the plugin to use a sepcific API interfaces to communicate # with CVX. Valid options are: # EAPI - Use EOS' extensible API. # JSON - Use EOS' JSON/REST API. # api_type = # Example: api_type = EAPI # # (ListOpt) This is a comma separated list of physical networks which are # managed by Arista switches. This list will be used in # by the Arista ML2 plugin to make the decision if it can # participate on binding or updating a port. # # managed_physnets = # Example: managed_physnets = arista_network # # (BoolOpt) Specifies whether the Arista ML2 plugin should bind ports to vxlan # fabric segments and dynamically allocate vlan segments based on # the host to connect the port to the vxlan fabric. # # manage_fabric = # Example: manage_fabric = False [l3_arista] # (StrOpt) primary host IP address. This is required field. If not set, all # communications to Arista EOS will fail. This is the host where # primary router is created. # # primary_l3_host = # Example: primary_l3_host = 192.168.10.10 # # (StrOpt) Primary host username. This is required field. # if not set, all communications to Arista EOS will fail. # # primary_l3_host_username = # Example: primary_l3_username = admin # # (StrOpt) Primary host password. This is required field. # if not set, all communications to Arista EOS will fail. # # primary_l3_host_password = # Example: primary_l3_password = my_password # # (StrOpt) IP address of the second Arista switch paired as # MLAG (Multi-chassis Link Aggregation) with the first. # This is optional field, however, if mlag_config flag is set, # then this is a required field. If not set, all # communications to Arista EOS will fail. If mlag_config is set # to False, then this field is ignored # # secondary_l3_host = # Example: secondary_l3_host = 192.168.10.20 # # (IntOpt) Connection timeout interval in seconds. This interval # defines how long an EAPI request from the driver to ' # EOS waits before timing out. If not set, a value of 10 # seconds is assumed. # # conn_timeout = # Example: conn_timeout = 10 # # (BoolOpt) Defines if Arista switches are configured in MLAG mode # If yes, all L3 configuration is pushed to both switches # automatically. If this flag is set, ensure that secondary_l3_host # is set to the second switch's IP. # This flag is Optional. If not set, a value of "False" is assumed. # # mlag_config = # Example: mlag_config = True # # (BoolOpt) Defines if the router is created in default VRF or a # a specific VRF. This is optional. # If not set, a value of "False" is assumed. # # Example: use_vrf = True # # (IntOpt) Sync interval in seconds between Neutron plugin and EOS. # This field defines how often the synchronization is performed. # This is an optional field. If not set, a value of 180 seconds # is assumed. # # l3_sync_interval = # Example: l3_sync_interval = 60 [arista_type_driver] # (IntOpt) VLAN Sync interval in seconds between the type driver and EOS. # This interval defines how often the VLAN synchronization is # performed. This is an optional field. If not set, a value of # 10 seconds is assumed. # # sync_interval = # Example: sync_interval = 10 networking-arista-2017.2.2/etc/policy.json000066400000000000000000000146521323242307100203670ustar00rootroot00000000000000{ "context_is_admin": "role:admin", "admin_or_owner": "rule:context_is_admin or tenant_id:%(tenant_id)s", "context_is_advsvc": "role:advsvc", "admin_or_network_owner": "rule:context_is_admin or tenant_id:%(network:tenant_id)s", "admin_only": "rule:context_is_admin", "regular_user": "", "shared": "field:networks:shared=True", "shared_firewalls": "field:firewalls:shared=True", "external": "field:networks:router:external=True", "default": "rule:admin_or_owner", "create_subnet": "rule:admin_or_network_owner", "get_subnet": "rule:admin_or_owner or rule:shared", "update_subnet": "rule:admin_or_network_owner", "delete_subnet": "rule:admin_or_network_owner", "create_network": "", "get_network": "rule:admin_or_owner or rule:shared or rule:external or rule:context_is_advsvc", "get_network:router:external": "rule:regular_user", "get_network:segments": "rule:admin_only", "get_network:provider:network_type": "rule:admin_only", "get_network:provider:physical_network": "rule:admin_only", "get_network:provider:segmentation_id": "rule:admin_only", "get_network:queue_id": "rule:admin_only", "create_network:shared": "rule:admin_only", "create_network:router:external": "rule:admin_only", "create_network:segments": "rule:admin_only", "create_network:provider:network_type": "rule:admin_only", "create_network:provider:physical_network": "rule:admin_only", "create_network:provider:segmentation_id": "rule:admin_only", "update_network": "rule:admin_or_owner", "update_network:segments": "rule:admin_only", "update_network:shared": "rule:admin_only", "update_network:provider:network_type": "rule:admin_only", "update_network:provider:physical_network": "rule:admin_only", "update_network:provider:segmentation_id": "rule:admin_only", "update_network:router:external": "rule:admin_only", "delete_network": "rule:admin_or_owner", "create_port": "", "create_port:mac_address": "rule:admin_or_network_owner or rule:context_is_advsvc", "create_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc", "create_port:port_security_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc", "create_port:binding:host_id": "rule:admin_only", "create_port:binding:profile": "rule:admin_only", "create_port:mac_learning_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc", "get_port": "rule:admin_or_owner or rule:context_is_advsvc", "get_port:queue_id": "rule:admin_only", "get_port:binding:vif_type": "rule:admin_only", "get_port:binding:vif_details": "rule:admin_only", "get_port:binding:host_id": "rule:admin_only", "get_port:binding:profile": "rule:admin_only", "update_port": "rule:admin_or_owner or rule:context_is_advsvc", "update_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc", "update_port:port_security_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc", "update_port:binding:host_id": "rule:admin_only", "update_port:binding:profile": "rule:admin_only", "update_port:mac_learning_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc", "delete_port": "rule:admin_or_owner or rule:context_is_advsvc", "get_router:ha": "rule:admin_only", "create_router": "rule:regular_user", "create_router:external_gateway_info:enable_snat": "rule:admin_only", "create_router:distributed": "rule:admin_only", "create_router:ha": "rule:admin_only", "get_router": "rule:admin_or_owner", "get_router:distributed": "rule:admin_only", "update_router:external_gateway_info:enable_snat": "rule:admin_only", "update_router:distributed": "rule:admin_only", "update_router:ha": "rule:admin_only", "delete_router": "rule:admin_or_owner", "add_router_interface": "rule:admin_or_owner", "remove_router_interface": "rule:admin_or_owner", "create_router:external_gateway_info:external_fixed_ips": "rule:admin_only", "update_router:external_gateway_info:external_fixed_ips": "rule:admin_only", "create_firewall": "", "get_firewall": "rule:admin_or_owner", "create_firewall:shared": "rule:admin_only", "get_firewall:shared": "rule:admin_only", "update_firewall": "rule:admin_or_owner", "update_firewall:shared": "rule:admin_only", "delete_firewall": "rule:admin_or_owner", "create_firewall_policy": "", "get_firewall_policy": "rule:admin_or_owner or rule:shared_firewalls", "create_firewall_policy:shared": "rule:admin_or_owner", "update_firewall_policy": "rule:admin_or_owner", "delete_firewall_policy": "rule:admin_or_owner", "create_firewall_rule": "", "get_firewall_rule": "rule:admin_or_owner or rule:shared_firewalls", "update_firewall_rule": "rule:admin_or_owner", "delete_firewall_rule": "rule:admin_or_owner", "create_qos_queue": "rule:admin_only", "get_qos_queue": "rule:admin_only", "update_agent": "rule:admin_only", "delete_agent": "rule:admin_only", "get_agent": "rule:admin_only", "create_dhcp-network": "rule:admin_only", "delete_dhcp-network": "rule:admin_only", "get_dhcp-networks": "rule:admin_only", "create_l3-router": "rule:admin_only", "delete_l3-router": "rule:admin_only", "get_l3-routers": "rule:admin_only", "get_dhcp-agents": "rule:admin_only", "get_l3-agents": "rule:admin_only", "get_loadbalancer-agent": "rule:admin_only", "get_loadbalancer-pools": "rule:admin_only", "create_floatingip": "rule:regular_user", "create_floatingip:floating_ip_address": "rule:admin_only", "update_floatingip": "rule:admin_or_owner", "delete_floatingip": "rule:admin_or_owner", "get_floatingip": "rule:admin_or_owner", "create_network_profile": "rule:admin_only", "update_network_profile": "rule:admin_only", "delete_network_profile": "rule:admin_only", "get_network_profiles": "", "get_network_profile": "", "update_policy_profiles": "rule:admin_only", "get_policy_profiles": "", "get_policy_profile": "", "create_metering_label": "rule:admin_only", "delete_metering_label": "rule:admin_only", "get_metering_label": "rule:admin_only", "create_metering_label_rule": "rule:admin_only", "delete_metering_label_rule": "rule:admin_only", "get_metering_label_rule": "rule:admin_only", "get_service_provider": "rule:regular_user", "get_lsn": "rule:admin_only", "create_lsn": "rule:admin_only" } networking-arista-2017.2.2/networking_arista/000077500000000000000000000000001323242307100211445ustar00rootroot00000000000000networking-arista-2017.2.2/networking_arista/__init__.py000066400000000000000000000014541323242307100232610ustar00rootroot00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import gettext import pbr.version import six __version__ = pbr.version.VersionInfo( 'networking_arista').version_string() if six.PY2: gettext.install('networking_arista', unicode=1) else: gettext.install('networking_arista') networking-arista-2017.2.2/networking_arista/_i18n.py000066400000000000000000000025251323242307100224400ustar00rootroot00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import oslo_i18n DOMAIN = "networking_arista" _translators = oslo_i18n.TranslatorFactory(domain=DOMAIN) # The primary translation function using the well-known name "_" _ = _translators.primary # The contextual translation function using the name "_C" _C = _translators.contextual_form # The plural translation function using the name "_P" _P = _translators.plural_form # Translators for log levels. # # The abbreviated names are meant to reflect the usual use of a short # name like '_'. The "L" is for "log" and the other letter comes from # the level. _LI = _translators.log_info _LW = _translators.log_warning _LE = _translators.log_error _LC = _translators.log_critical def get_available_languages(): return oslo_i18n.get_available_languages(DOMAIN) networking-arista-2017.2.2/networking_arista/common/000077500000000000000000000000001323242307100224345ustar00rootroot00000000000000networking-arista-2017.2.2/networking_arista/common/__init__.py000066400000000000000000000000001323242307100245330ustar00rootroot00000000000000networking-arista-2017.2.2/networking_arista/common/api.py000066400000000000000000000110611323242307100235560ustar00rootroot00000000000000# Copyright (c) 2017 Arista Networks, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import json from oslo_log import log as logging from oslo_utils import excutils import requests from requests import exceptions as requests_exc from six.moves.urllib import parse from networking_arista._i18n import _LI, _LW from networking_arista.common import exceptions as arista_exc LOG = logging.getLogger(__name__) # EAPI error message ERR_CVX_NOT_LEADER = 'only available on cluster leader' class EAPIClient(object): def __init__(self, host, username=None, password=None, verify=False, timeout=None): self.host = host self.timeout = timeout self.url = self._make_url(host) self.session = requests.Session() self.session.headers['Content-Type'] = 'application/json' self.session.headers['Accept'] = 'application/json' self.session.verify = verify if username and password: self.session.auth = (username, password) @staticmethod def _make_url(host, scheme='https'): return parse.urlunsplit( (scheme, host, '/command-api', '', '') ) def execute(self, commands, commands_to_log=None): params = { 'timestamps': False, 'format': 'json', 'version': 1, 'cmds': commands } data = { 'id': 'Networking Arista Driver', 'method': 'runCmds', 'jsonrpc': '2.0', 'params': params } if commands_to_log: log_data = dict(data) log_data['params'] = dict(params) log_data['params']['cmds'] = commands_to_log else: log_data = data LOG.info( _LI('EAPI request %(ip)s contains %(data)s'), {'ip': self.host, 'data': json.dumps(log_data)} ) # request handling try: error = None response = self.session.post( self.url, data=json.dumps(data), timeout=self.timeout ) except requests_exc.ConnectionError: error = _LW('Error while trying to connect to %(ip)s') except requests_exc.ConnectTimeout: error = _LW('Timed out while trying to connect to %(ip)s') except requests_exc.Timeout: error = _LW('Timed out during an EAPI request to %(ip)s') except requests_exc.InvalidURL: error = _LW('Ingoring attempt to connect to invalid URL at %(ip)s') except Exception as e: with excutils.save_and_reraise_exception(): LOG.warning( _LW('Error during processing the EAPI request %(error)s'), {'error': e} ) finally: if error: msg = error % {'ip': self.host} # stop processing since we've encountered request error LOG.warning(msg) raise arista_exc.AristaRpcError(msg=msg) # response handling try: resp_data = response.json() return resp_data['result'] except ValueError as e: LOG.info(_LI('Ignoring invalid JSON response')) except KeyError: if 'error' in resp_data and resp_data['error']['code'] == 1002: for d in resp_data['error']['data']: if not isinstance(d, dict): continue elif ERR_CVX_NOT_LEADER in d.get('errors', {})[0]: LOG.info( _LI('%(ip)s is not the CVX leader'), {'ip': self.host} ) return msg = _LI('Unexpected EAPI error') LOG.info(msg) raise arista_exc.AristaRpcError(msg=msg) except Exception as e: with excutils.save_and_reraise_exception(): LOG.warning( _LW('Error during processing the EAPI response %(error)s'), {'error': e} ) networking-arista-2017.2.2/networking_arista/common/config.py000066400000000000000000000222351323242307100242570ustar00rootroot00000000000000# Copyright (c) 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from networking_arista._i18n import _ # Arista ML2 Mechanism driver specific configuration knobs. # # Following are user configurable options for Arista ML2 Mechanism # driver. The eapi_username, eapi_password, and eapi_host are # required options. Region Name must be the same that is used by # Keystone service. This option is available to support multiple # OpenStack/Neutron controllers. ARISTA_DRIVER_OPTS = [ cfg.StrOpt('eapi_username', default='', help=_('Username for Arista EOS. This is required field. ' 'If not set, all communications to Arista EOS ' 'will fail.')), cfg.StrOpt('eapi_password', default='', secret=True, # do not expose value in the logs help=_('Password for Arista EOS. This is required field. ' 'If not set, all communications to Arista EOS ' 'will fail.')), cfg.StrOpt('eapi_host', default='', help=_('Arista EOS IP address. This is required field. ' 'If not set, all communications to Arista EOS ' 'will fail.')), cfg.BoolOpt('use_fqdn', default=True, help=_('Defines if hostnames are sent to Arista EOS as FQDNs ' '("node1.domain.com") or as short names ("node1"). ' 'This is optional. If not set, a value of "True" ' 'is assumed.')), cfg.IntOpt('sync_interval', default=30, help=_('Sync interval in seconds between Neutron plugin and ' 'EOS. This interval defines how often the ' 'synchronization is performed. This is an optional ' 'field. If not set, a value of 30 seconds is ' 'assumed.')), cfg.IntOpt('conn_timeout', default=10, help=_('Connection timeout interval in seconds. This interval ' 'defines how long an EAPI request from the driver to ' 'EOS waits before timing out. If not set, a value of 10 ' 'seconds is assumed.')), cfg.StrOpt('region_name', default='RegionOne', help=_('Defines Region Name that is assigned to this OpenStack ' 'Controller. This is useful when multiple ' 'OpenStack/Neutron controllers are managing the same ' 'Arista HW clusters. Note that this name must match ' 'with the region name registered (or known) to keystone ' 'service. Authentication with Keysotne is performed by ' 'EOS. This is optional. If not set, a value of ' '"RegionOne" is assumed.')), cfg.BoolOpt('sec_group_support', default=False, help=_('Specifies if the Security Groups needs to deployed ' 'for baremetal deployments. If this flag is set to ' 'True, this means switch_info(see below) must be ' 'defined. If this flag is not defined, it is assumed ' 'to be False')), cfg.ListOpt('switch_info', default=[], help=_('This is a comma separated list of Arista switches ' 'where security groups (i.e. ACLs) need to be ' 'applied. Each string has three values separated ' 'by : in the follow format ' '::, ...... ' 'For Example: 172.13.23.55:admin:admin, ' '172.13.23.56:admin:admin, .... ' 'This is required if sec_group_support is set to ' '"True"')), cfg.StrOpt('api_type', default='JSON', help=_('Tells the plugin to use a sepcific API interfaces ' 'to communicate with CVX. Valid options are:' 'EAPI - Use EOS\' extensible API.' 'JSON - Use EOS\' JSON/REST API.')), cfg.ListOpt('managed_physnets', default=[], help=_('This is a comma separated list of physical networks ' 'which are managed by Arista switches.' 'This list will be used by the Arista ML2 plugin' 'to make the decision if it can participate in binding' 'or updating a port.' 'For Example: ' 'managed_physnets = arista_network')), cfg.BoolOpt('manage_fabric', default=False, help=_('Specifies whether the Arista ML2 plugin should bind ' 'ports to vxlan fabric segments and dynamically ' 'allocate vlan segments based on the host to connect ' 'the port to the vxlan fabric')), ] """ Arista L3 Service Plugin specific configuration knobs. Following are user configurable options for Arista L3 plugin driver. The eapi_username, eapi_password, and eapi_host are required options. """ ARISTA_L3_PLUGIN = [ cfg.StrOpt('primary_l3_host_username', default='', help=_('Username for Arista EOS. This is required field. ' 'If not set, all communications to Arista EOS ' 'will fail')), cfg.StrOpt('primary_l3_host_password', default='', secret=True, # do not expose value in the logs help=_('Password for Arista EOS. This is required field. ' 'If not set, all communications to Arista EOS ' 'will fail')), cfg.StrOpt('primary_l3_host', default='', help=_('Arista EOS IP address. This is required field. ' 'If not set, all communications to Arista EOS ' 'will fail')), cfg.StrOpt('secondary_l3_host', default='', help=_('Arista EOS IP address for second Switch MLAGed with ' 'the first one. This an optional field, however, if ' 'mlag_config flag is set, then this is required. ' 'If not set, all communications to Arista EOS ' 'will fail')), cfg.IntOpt('conn_timeout', default=10, help=_('Connection timeout interval in seconds. This interval ' 'defines how long an EAPI request from the driver to ' 'EOS waits before timing out. If not set, a value of 10 ' 'seconds is assumed.')), cfg.BoolOpt('mlag_config', default=False, help=_('This flag is used indicate if Arista Switches are ' 'configured in MLAG mode. If yes, all L3 config ' 'is pushed to both the switches automatically. ' 'If this flag is set to True, ensure to specify IP ' 'addresses of both switches. ' 'This is optional. If not set, a value of "False" ' 'is assumed.')), cfg.BoolOpt('use_vrf', default=False, help=_('A "True" value for this flag indicates to create a ' 'router in VRF. If not set, all routers are created ' 'in default VRF. ' 'This is optional. If not set, a value of "False" ' 'is assumed.')), cfg.IntOpt('l3_sync_interval', default=180, help=_('Sync interval in seconds between L3 Service plugin ' 'and EOS. This interval defines how often the ' 'synchronization is performed. This is an optional ' 'field. If not set, a value of 180 seconds is assumed')) ] ARISTA_TYPE_DRIVER_OPTS = [ cfg.IntOpt('sync_interval', default=10, help=_('VLAN Sync interval in seconds between Neutron plugin ' 'and EOS. This interval defines how often the VLAN ' 'synchronization is performed. This is an optional ' 'field. If not set, a value of 10 seconds is ' 'assumed.')), ] cfg.CONF.register_opts(ARISTA_L3_PLUGIN, "l3_arista") cfg.CONF.register_opts(ARISTA_DRIVER_OPTS, "ml2_arista") cfg.CONF.register_opts(ARISTA_TYPE_DRIVER_OPTS, "arista_type_driver") networking-arista-2017.2.2/networking_arista/common/constants.py000066400000000000000000000030331323242307100250210ustar00rootroot00000000000000# Copyright (c) 2017 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from networking_arista._i18n import _ EOS_UNREACHABLE_MSG = _('Unable to reach EOS') UNABLE_TO_DELETE_PORT_MSG = _('Unable to delete port from EOS') UNABLE_TO_DELETE_DEVICE_MSG = _('Unable to delete device') # Constants INTERNAL_TENANT_ID = 'INTERNAL-TENANT-ID' MECHANISM_DRV_NAME = 'arista' # Insert a heartbeat command every 100 commands HEARTBEAT_INTERVAL = 100 # Commands dict keys CMD_SYNC_HEARTBEAT = 'SYNC_HEARTBEAT' CMD_REGION_SYNC = 'REGION_SYNC' CMD_INSTANCE = 'INSTANCE' # EAPI error messages of interest ERR_CVX_NOT_LEADER = 'only available on cluster leader' ERR_DVR_NOT_SUPPORTED = 'EOS version on CVX does not support DVR' BAREMETAL_NOT_SUPPORTED = 'EOS version on CVX dpes not support Baremetal' # Flat network constant NETWORK_TYPE_FLAT = 'flat' class InstanceType(object): BAREMETAL = 'baremetal' DHCP = 'dhcp' ROUTER = 'router' VM = 'vm' VIRTUAL_INSTANCE_TYPES = [DHCP, ROUTER, VM] BAREMETAL_INSTANCE_TYPES = [BAREMETAL] networking-arista-2017.2.2/networking_arista/common/db.py000066400000000000000000000053221323242307100233750ustar00rootroot00000000000000# Copyright (c) 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from neutron_lib.db import constants as db_const from neutron_lib.db import model_base import sqlalchemy as sa UUID_LEN = 36 STR_LEN = 255 class HasTenant(object): """Tenant mixin, add to subclasses that have a tenant.""" tenant_id = sa.Column(sa.String(db_const.PROJECT_ID_FIELD_SIZE), index=True) class AristaProvisionedNets(model_base.BASEV2, model_base.HasId, HasTenant): """Stores networks provisioned on Arista EOS. Saves the segmentation ID for each network that is provisioned on EOS. This information is used during synchronization between Neutron and EOS. """ __tablename__ = 'arista_provisioned_nets' network_id = sa.Column(sa.String(UUID_LEN)) segmentation_id = sa.Column(sa.Integer) def eos_network_representation(self, segmentation_type): return {u'networkId': self.network_id, u'segmentationTypeId': self.segmentation_id, u'segmentationType': segmentation_type, u'tenantId': self.tenant_id, u'segmentId': self.id, } class AristaProvisionedVms(model_base.BASEV2, model_base.HasId, HasTenant): """Stores VMs provisioned on Arista EOS. All VMs launched on physical hosts connected to Arista Switches are remembered """ __tablename__ = 'arista_provisioned_vms' vm_id = sa.Column(sa.String(STR_LEN)) host_id = sa.Column(sa.String(STR_LEN)) port_id = sa.Column(sa.String(UUID_LEN)) network_id = sa.Column(sa.String(UUID_LEN)) def eos_port_representation(self): return {u'portId': self.port_id, u'deviceId': self.vm_id, u'hosts': [self.host_id], u'networkId': self.network_id} class AristaProvisionedTenants(model_base.BASEV2, model_base.HasId, HasTenant): """Stores Tenants provisioned on Arista EOS. Tenants list is maintained for sync between Neutron and EOS. """ __tablename__ = 'arista_provisioned_tenants' def eos_tenant_representation(self): return {u'tenantId': self.tenant_id} networking-arista-2017.2.2/networking_arista/common/db_lib.py000066400000000000000000000547411323242307100242340ustar00rootroot00000000000000# Copyright (c) 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from neutron_lib import constants as n_const from neutron_lib import context as nctx from neutron_lib.plugins.ml2 import api as driver_api import neutron.db.api as db from neutron.db import db_base_plugin_v2 from neutron.db.models import segment as segment_models from neutron.db import securitygroups_db as sec_db from neutron.db import segments_db from neutron.plugins.ml2 import models as ml2_models from neutron.services.trunk import models as trunk_models from networking_arista.common import db as db_models VLAN_SEGMENTATION = 'vlan' def remember_tenant(tenant_id): """Stores a tenant information in repository. :param tenant_id: globally unique neutron tenant identifier """ session = db.get_writer_session() with session.begin(): tenant = (session.query(db_models.AristaProvisionedTenants). filter_by(tenant_id=tenant_id).first()) if not tenant: tenant = db_models.AristaProvisionedTenants(tenant_id=tenant_id) session.add(tenant) def forget_tenant(tenant_id): """Removes a tenant information from repository. :param tenant_id: globally unique neutron tenant identifier """ session = db.get_writer_session() with session.begin(): (session.query(db_models.AristaProvisionedTenants). filter_by(tenant_id=tenant_id). delete()) def get_all_tenants(): """Returns a list of all tenants stored in repository.""" session = db.get_reader_session() with session.begin(): return session.query(db_models.AristaProvisionedTenants).all() def num_provisioned_tenants(): """Returns number of tenants stored in repository.""" session = db.get_reader_session() with session.begin(): return session.query(db_models.AristaProvisionedTenants).count() def remember_vm(vm_id, host_id, port_id, network_id, tenant_id): """Stores all relevant information about a VM in repository. :param vm_id: globally unique identifier for VM instance :param host_id: ID of the host where the VM is placed :param port_id: globally unique port ID that connects VM to network :param network_id: globally unique neutron network identifier :param tenant_id: globally unique neutron tenant identifier """ session = db.get_writer_session() with session.begin(): vm = db_models.AristaProvisionedVms( vm_id=vm_id, host_id=host_id, port_id=port_id, network_id=network_id, tenant_id=tenant_id) session.add(vm) def forget_all_ports_for_network(net_id): """Removes all ports for a given network fron repository. :param net_id: globally unique network ID """ session = db.get_writer_session() with session.begin(): (session.query(db_models.AristaProvisionedVms). filter_by(network_id=net_id).delete()) def update_port(vm_id, host_id, port_id, network_id, tenant_id): """Updates the port details in the database. :param vm_id: globally unique identifier for VM instance :param host_id: ID of the new host where the VM is placed :param port_id: globally unique port ID that connects VM to network :param network_id: globally unique neutron network identifier :param tenant_id: globally unique neutron tenant identifier """ session = db.get_writer_session() with session.begin(): port = session.query(db_models.AristaProvisionedVms).filter_by( port_id=port_id).first() if port: # Update the VM's host id port.host_id = host_id port.vm_id = vm_id port.network_id = network_id port.tenant_id = tenant_id def forget_port(port_id, host_id): """Deletes the port from the database :param port_id: globally unique port ID that connects VM to network :param host_id: host to which the port is bound to """ session = db.get_writer_session() with session.begin(): session.query(db_models.AristaProvisionedVms).filter_by( port_id=port_id, host_id=host_id).delete() def remember_network_segment(tenant_id, network_id, segmentation_id, segment_id): """Stores all relevant information about a Network in repository. :param tenant_id: globally unique neutron tenant identifier :param network_id: globally unique neutron network identifier :param segmentation_id: segmentation id that is assigned to the network :param segment_id: globally unique neutron network segment identifier """ session = db.get_writer_session() with session.begin(): net = db_models.AristaProvisionedNets( tenant_id=tenant_id, id=segment_id, network_id=network_id, segmentation_id=segmentation_id) session.add(net) def forget_network_segment(tenant_id, network_id, segment_id=None): """Deletes all relevant information about a Network from repository. :param tenant_id: globally unique neutron tenant identifier :param network_id: globally unique neutron network identifier :param segment_id: globally unique neutron network segment identifier """ filters = { 'tenant_id': tenant_id, 'network_id': network_id } if segment_id: filters['id'] = segment_id session = db.get_writer_session() with session.begin(): (session.query(db_models.AristaProvisionedNets). filter_by(**filters).delete()) def get_segmentation_id(tenant_id, network_id): """Returns Segmentation ID (VLAN) associated with a network. :param tenant_id: globally unique neutron tenant identifier :param network_id: globally unique neutron network identifier """ session = db.get_reader_session() with session.begin(): net = (session.query(db_models.AristaProvisionedNets). filter_by(tenant_id=tenant_id, network_id=network_id).first()) return net.segmentation_id if net else None def is_vm_provisioned(vm_id, host_id, port_id, network_id, tenant_id): """Checks if a VM is already known to EOS :returns: True, if yes; False otherwise. :param vm_id: globally unique identifier for VM instance :param host_id: ID of the host where the VM is placed :param port_id: globally unique port ID that connects VM to network :param network_id: globally unique neutron network identifier :param tenant_id: globally unique neutron tenant identifier """ session = db.get_reader_session() with session.begin(): num_vm = (session.query(db_models.AristaProvisionedVms). filter_by(tenant_id=tenant_id, vm_id=vm_id, port_id=port_id, network_id=network_id, host_id=host_id).count()) return num_vm > 0 def is_port_provisioned(port_id, host_id=None): """Checks if a port is already known to EOS :returns: True, if yes; False otherwise. :param port_id: globally unique port ID that connects VM to network :param host_id: host to which the port is bound to """ filters = { 'port_id': port_id } if host_id: filters['host_id'] = host_id session = db.get_reader_session() with session.begin(): num_ports = (session.query(db_models.AristaProvisionedVms). filter_by(**filters).count()) return num_ports > 0 def is_network_provisioned(tenant_id, network_id, segmentation_id=None, segment_id=None): """Checks if a networks is already known to EOS :returns: True, if yes; False otherwise. :param tenant_id: globally unique neutron tenant identifier :param network_id: globally unique neutron network identifier :param segment_id: globally unique neutron network segment identifier """ session = db.get_reader_session() with session.begin(): filters = {'tenant_id': tenant_id, 'network_id': network_id} if segmentation_id: filters['segmentation_id'] = segmentation_id if segment_id: filters['id'] = segment_id num_nets = (session.query(db_models.AristaProvisionedNets). filter_by(**filters).count()) return num_nets > 0 def is_tenant_provisioned(tenant_id): """Checks if a tenant is already known to EOS :returns: True, if yes; False otherwise. :param tenant_id: globally unique neutron tenant identifier """ session = db.get_reader_session() with session.begin(): num_tenants = (session.query(db_models.AristaProvisionedTenants). filter_by(tenant_id=tenant_id).count()) return num_tenants > 0 def num_nets_provisioned(tenant_id): """Returns number of networks for a given tennat. :param tenant_id: globally unique neutron tenant identifier """ session = db.get_reader_session() with session.begin(): return (session.query(db_models.AristaProvisionedNets). filter_by(tenant_id=tenant_id).count()) def num_vms_provisioned(tenant_id): """Returns number of VMs for a given tennat. :param tenant_id: globally unique neutron tenant identifier """ session = db.get_reader_session() with session.begin(): return (session.query(db_models.AristaProvisionedVms). filter_by(tenant_id=tenant_id).count()) def get_networks(tenant_id): """Returns all networks for a given tenant in EOS-compatible format. See AristaRPCWrapper.get_network_list() for return value format. :param tenant_id: globally unique neutron tenant identifier """ session = db.get_reader_session() with session.begin(): model = db_models.AristaProvisionedNets # hack for pep8 E711: comparison to None should be # 'if cond is not None' none = None all_nets = [] if tenant_id != 'any': all_nets = (session.query(model). filter(model.tenant_id == tenant_id, model.segmentation_id != none)) else: all_nets = (session.query(model). filter(model.segmentation_id != none)) res = dict( (net.network_id, net.eos_network_representation( VLAN_SEGMENTATION)) for net in all_nets ) return res def get_vms(tenant_id): """Returns all VMs for a given tenant in EOS-compatible format. :param tenant_id: globally unique neutron tenant identifier """ session = db.get_reader_session() with session.begin(): model = db_models.AristaProvisionedVms # hack for pep8 E711: comparison to None should be # 'if cond is not None' none = None all_ports = (session.query(model). filter(model.tenant_id == tenant_id, model.host_id != none, model.vm_id != none, model.network_id != none, model.port_id != none)) ports = {} for port in all_ports: if port.port_id not in ports: ports[port.port_id] = port.eos_port_representation() else: ports[port.port_id]['hosts'].append(port.host_id) vm_dict = dict() def eos_vm_representation(port): return {u'vmId': port['deviceId'], u'baremetal_instance': False, u'ports': [port]} for port in ports.values(): deviceId = port['deviceId'] if deviceId in vm_dict: vm_dict[deviceId]['ports'].append(port) else: vm_dict[deviceId] = eos_vm_representation(port) return vm_dict def are_ports_attached_to_network(net_id): """Checks if a given network is used by any port, excluding dhcp port. :param net_id: globally unique network ID """ session = db.get_reader_session() with session.begin(): model = db_models.AristaProvisionedVms return session.query(model).filter_by(network_id=net_id).filter( ~model.vm_id.startswith('dhcp')).count() > 0 def get_ports(tenant_id=None): """Returns all ports of VMs in EOS-compatible format. :param tenant_id: globally unique neutron tenant identifier """ session = db.get_reader_session() with session.begin(): model = db_models.AristaProvisionedVms # hack for pep8 E711: comparison to None should be # 'if cond is not None' none = None if tenant_id: all_ports = (session.query(model). filter(model.tenant_id == tenant_id, model.host_id != none, model.vm_id != none, model.network_id != none, model.port_id != none)) else: all_ports = (session.query(model). filter(model.tenant_id != none, model.host_id != none, model.vm_id != none, model.network_id != none, model.port_id != none)) ports = {} for port in all_ports: if port.port_id not in ports: ports[port.port_id] = port.eos_port_representation() ports[port.port_id]['hosts'].append(port.host_id) return ports def get_tenants(): """Returns list of all tenants in EOS-compatible format.""" session = db.get_reader_session() with session.begin(): model = db_models.AristaProvisionedTenants all_tenants = session.query(model) res = dict( (tenant.tenant_id, tenant.eos_tenant_representation()) for tenant in all_tenants ) return res def _make_port_dict(record): """Make a dict from the BM profile DB record.""" return {'port_id': record.port_id, 'host_id': record.host, 'vnic_type': record.vnic_type, 'profile': record.profile} def get_all_baremetal_ports(): """Returns a list of all ports that belong to baremetal hosts.""" session = db.get_reader_session() with session.begin(): querry = session.query(ml2_models.PortBinding) bm_ports = querry.filter_by(vnic_type='baremetal').all() return {bm_port.port_id: _make_port_dict(bm_port) for bm_port in bm_ports} def get_all_portbindings(): """Returns a list of all ports bindings.""" session = db.get_reader_session() with session.begin(): query = session.query(ml2_models.PortBinding) ports = query.all() return {port.port_id: _make_port_dict(port) for port in ports} def get_port_binding_level(filters): """Returns entries from PortBindingLevel based on the specified filters.""" session = db.get_reader_session() with session.begin(): return (session.query(ml2_models.PortBindingLevel). filter_by(**filters). order_by(ml2_models.PortBindingLevel.level). all()) def get_network_segments_by_port_id(port_id): session = db.get_reader_session() with session.begin(): segments = (session.query(segment_models.NetworkSegment, ml2_models.PortBindingLevel). join(ml2_models.PortBindingLevel). filter_by(port_id=port_id). order_by(ml2_models.PortBindingLevel.level). all()) return [segment[0] for segment in segments] def get_trunk_port_by_subport_id(subport_id): """Returns trunk parent port based on sub port id.""" session = db.get_reader_session() with session.begin(): subport = (session.query(trunk_models.SubPort). filter_by(port_id=subport_id).first()) if subport: trunk_id = subport.trunk_id return get_trunk_port_by_trunk_id(trunk_id) def get_trunk_port_by_trunk_id(trunk_id): session = db.get_reader_session() with session.begin(): trunk_port = (session.query(trunk_models.Trunk). filter_by(id=trunk_id).first()) if trunk_port: return trunk_port.port class NeutronNets(db_base_plugin_v2.NeutronDbPluginV2, sec_db.SecurityGroupDbMixin): """Access to Neutron DB. Provides access to the Neutron Data bases for all provisioned networks as well ports. This data is used during the synchronization of DB between ML2 Mechanism Driver and Arista EOS Names of the networks and ports are not stroed in Arista repository They are pulled from Neutron DB. """ def __init__(self): self.admin_ctx = nctx.get_admin_context() def get_network_name(self, tenant_id, network_id): network = self._get_network(tenant_id, network_id) network_name = None if network: network_name = network[0]['name'] return network_name def get_all_networks_for_tenant(self, tenant_id): filters = {'tenant_id': [tenant_id]} return super(NeutronNets, self).get_networks(self.admin_ctx, filters=filters) or [] def get_all_networks(self): return super(NeutronNets, self).get_networks(self.admin_ctx) or [] def get_all_ports_for_tenant(self, tenant_id): filters = {'tenant_id': [tenant_id]} return super(NeutronNets, self).get_ports(self.admin_ctx, filters=filters) or [] def get_all_ports(self): return super(NeutronNets, self).get_ports(self.admin_ctx) or [] def get_shared_network_owner_id(self, network_id): filters = {'id': [network_id]} nets = self.get_networks(self.admin_ctx, filters=filters) or [] segments = segments_db.get_network_segments(self.admin_ctx, network_id) if not nets or not segments: return if (nets[0]['shared'] and segments[0][driver_api.NETWORK_TYPE] == n_const.TYPE_VLAN): return nets[0]['tenant_id'] def get_network_segments(self, network_id, dynamic=False, context=None): context = context if context is not None else self.admin_ctx segments = segments_db.get_network_segments(context, network_id, filter_dynamic=dynamic) if dynamic: for segment in segments: segment['is_dynamic'] = True return segments def get_all_network_segments(self, network_id, context=None): segments = self.get_network_segments(network_id, context=context) segments += self.get_network_segments(network_id, dynamic=True, context=context) return segments def get_segment_by_id(self, context, segment_id): return segments_db.get_segment_by_id(context, segment_id) def get_network_from_net_id(self, network_id, context=None): filters = {'id': [network_id]} ctxt = context if context else self.admin_ctx return super(NeutronNets, self).get_networks(ctxt, filters=filters) or [] def _get_network(self, tenant_id, network_id): filters = {'tenant_id': [tenant_id], 'id': [network_id]} return super(NeutronNets, self).get_networks(self.admin_ctx, filters=filters) or [] def get_subnet_info(self, subnet_id): return self.get_subnet(subnet_id) def get_subnet_ip_version(self, subnet_id): subnet = self.get_subnet(subnet_id) return subnet['ip_version'] if 'ip_version' in subnet else None def get_subnet_gateway_ip(self, subnet_id): subnet = self.get_subnet(subnet_id) return subnet['gateway_ip'] if 'gateway_ip' in subnet else None def get_subnet_cidr(self, subnet_id): subnet = self.get_subnet(subnet_id) return subnet['cidr'] if 'cidr' in subnet else None def get_network_id(self, subnet_id): subnet = self.get_subnet(subnet_id) return subnet['network_id'] if 'network_id' in subnet else None def get_network_id_from_port_id(self, port_id): port = self.get_port(port_id) return port['network_id'] if 'network_id' in port else None def get_subnet(self, subnet_id): return super(NeutronNets, self).get_subnet(self.admin_ctx, subnet_id) or {} def get_port(self, port_id): return super(NeutronNets, self).get_port(self.admin_ctx, port_id) or {} def get_all_security_gp_to_port_bindings(self): return super(NeutronNets, self)._get_port_security_group_bindings( self.admin_ctx) or [] def get_security_gp_to_port_bindings(self, sec_gp_id): filters = {'security_group_id': [sec_gp_id]} return super(NeutronNets, self)._get_port_security_group_bindings( self.admin_ctx, filters=filters) or [] def get_security_group(self, sec_gp_id): return super(NeutronNets, self).get_security_group(self.admin_ctx, sec_gp_id) or [] def get_security_groups(self): sgs = super(NeutronNets, self).get_security_groups(self.admin_ctx) or [] sgs_all = {} if sgs: for s in sgs: sgs_all[s['id']] = s return sgs_all def get_security_group_rule(self, sec_gpr_id): return super(NeutronNets, self).get_security_group_rule(self.admin_ctx, sec_gpr_id) or [] def validate_network_rbac_policy_change(self, resource, event, trigger, context, object_type, policy, **kwargs): return super(NeutronNets, self).validate_network_rbac_policy_change( resource, event, trigger, context, object_type, policy, kwargs) networking-arista-2017.2.2/networking_arista/common/exceptions.py000066400000000000000000000032521323242307100251710ustar00rootroot00000000000000# Copyright (c) 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Exceptions used by Arista ML2 Mechanism Driver.""" from neutron_lib import exceptions from networking_arista._i18n import _ class AristaRpcError(exceptions.NeutronException): message = _('%(msg)s') class AristaConfigError(exceptions.NeutronException): message = _('%(msg)s') class AristaServicePluginRpcError(exceptions.NeutronException): message = _('%(msg)s') class AristaServicePluginConfigError(exceptions.NeutronException): message = _('%(msg)s') class AristaSecurityGroupError(exceptions.NeutronException): message = _('%(msg)s') class VlanUnavailable(exceptions.NeutronException): """An exception indicating VLAN creation failed because it's not available. A specialization of the NeutronException indicating network creation failed because a specified VLAN is unavailable on the physical network. :param vlan_id: The VLAN ID. :param physical_network: The physical network. """ message = _("Unable to create the network. " "The VLAN %(vlan_id)s on physical network " "%(physical_network)s is not available.") networking-arista-2017.2.2/networking_arista/db/000077500000000000000000000000001323242307100215315ustar00rootroot00000000000000networking-arista-2017.2.2/networking_arista/db/README000066400000000000000000000001061323242307100224060ustar00rootroot00000000000000Alembic database migration scripts for the networking-arista package. networking-arista-2017.2.2/networking_arista/db/__init__.py000066400000000000000000000000001323242307100236300ustar00rootroot00000000000000networking-arista-2017.2.2/networking_arista/db/migration/000077500000000000000000000000001323242307100235225ustar00rootroot00000000000000networking-arista-2017.2.2/networking_arista/db/migration/README000066400000000000000000000001061323242307100243770ustar00rootroot00000000000000Alembic database migration scripts for the networking-arista package. networking-arista-2017.2.2/networking_arista/db/migration/__init__.py000066400000000000000000000000001323242307100256210ustar00rootroot00000000000000networking-arista-2017.2.2/networking_arista/db/migration/alembic_migrations/000077500000000000000000000000001323242307100273525ustar00rootroot00000000000000networking-arista-2017.2.2/networking_arista/db/migration/alembic_migrations/README000066400000000000000000000000461323242307100302320ustar00rootroot00000000000000Generic single-database configuration.networking-arista-2017.2.2/networking_arista/db/migration/alembic_migrations/__init__.py000066400000000000000000000000001323242307100314510ustar00rootroot00000000000000networking-arista-2017.2.2/networking_arista/db/migration/alembic_migrations/env.py000066400000000000000000000066761323242307100305330ustar00rootroot00000000000000# Copyright (c) 2015 Arista Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from logging.config import fileConfig from alembic import context from neutron_lib.db import model_base from oslo_config import cfg from oslo_db.sqlalchemy import session import sqlalchemy as sa from sqlalchemy import event from neutron.db.migration.alembic_migrations import external from neutron.db.migration.models import head # noqa # this is the Alembic Config object, which provides # access to the values within the .ini file in use. config = context.config neutron_config = config.neutron_config # Interpret the config file for Python logging. # This line sets up loggers basically. fileConfig(config.config_file_name) # add your model's MetaData object here # for 'autogenerate' support # from myapp import mymodel # target_metadata = mymodel.Base.metadata target_metadata = model_base.BASEV2.metadata MYSQL_ENGINE = None ARISTA_VERSION_TABLE = 'arista_alembic_version' def set_mysql_engine(): try: mysql_engine = neutron_config.command.mysql_engine except cfg.NoSuchOptError: mysql_engine = None global MYSQL_ENGINE MYSQL_ENGINE = (mysql_engine or model_base.BASEV2.__table_args__['mysql_engine']) def include_object(object, name, type_, reflected, compare_to): if type_ == 'table' and name in external.TABLES: return False else: return True def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with just a URL or an Engine. Calls to context.execute() here emit the given string to the script output. """ set_mysql_engine() kwargs = dict() if neutron_config.database.connection: kwargs['url'] = neutron_config.database.connection else: kwargs['dialect_name'] = neutron_config.database.engine kwargs['include_object'] = include_object kwargs['version_table'] = ARISTA_VERSION_TABLE context.configure(**kwargs) with context.begin_transaction(): context.run_migrations() @event.listens_for(sa.Table, 'after_parent_attach') def set_storage_engine(target, parent): if MYSQL_ENGINE: target.kwargs['mysql_engine'] = MYSQL_ENGINE def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ set_mysql_engine() engine = session.create_engine(neutron_config.database.connection) connection = engine.connect() context.configure( connection=connection, target_metadata=target_metadata, include_object=include_object, version_table=ARISTA_VERSION_TABLE, ) try: with context.begin_transaction(): context.run_migrations() finally: connection.close() engine.dispose() if context.is_offline_mode(): run_migrations_offline() else: run_migrations_online() networking-arista-2017.2.2/networking_arista/db/migration/alembic_migrations/script.py.mako000066400000000000000000000020471323242307100321610ustar00rootroot00000000000000# Copyright ${create_date.year} Arista Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """${message} Revision ID: ${up_revision} Revises: ${down_revision | comma,n} Create Date: ${create_date} """ # revision identifiers, used by Alembic. revision = ${repr(up_revision)} down_revision = ${repr(down_revision)} branch_labels = ${repr(branch_labels)} depends_on = ${repr(depends_on)} from alembic import op import sqlalchemy as sa ${imports if imports else ""} def upgrade(): ${upgrades if upgrades else "pass"} networking-arista-2017.2.2/networking_arista/db/migration/alembic_migrations/versions/000077500000000000000000000000001323242307100312225ustar00rootroot00000000000000296b4e0236e0_initial_db_version.py000066400000000000000000000014751323242307100371220ustar00rootroot00000000000000networking-arista-2017.2.2/networking_arista/db/migration/alembic_migrations/versions# Copyright (c) 2015 Arista Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Initial db version Revision ID: 296b4e0236e0 Create Date: 2015-10-23 14:37:49.594974 """ # revision identifiers, used by Alembic. revision = '296b4e0236e0' down_revision = None def upgrade(): pass networking-arista-2017.2.2/networking_arista/db/migration/alembic_migrations/versions/__init__.py000066400000000000000000000000001323242307100333210ustar00rootroot00000000000000networking-arista-2017.2.2/networking_arista/db/migration/alembic_migrations/versions/liberty/000077500000000000000000000000001323242307100326745ustar00rootroot00000000000000contract/000077500000000000000000000000001323242307100344325ustar00rootroot00000000000000networking-arista-2017.2.2/networking_arista/db/migration/alembic_migrations/versions/liberty47036dc8697a_initial_db_version_contract.py000066400000000000000000000016241323242307100443200ustar00rootroot00000000000000networking-arista-2017.2.2/networking_arista/db/migration/alembic_migrations/versions/liberty/contract# Copyright (c) 2015 Arista Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Initial db version Revision ID: 47036dc8697a Create Date: 2015-10-23 14:37:49.594974 """ from neutron.db.migration import cli # revision identifiers, used by Alembic. revision = '47036dc8697a' down_revision = '296b4e0236e0' branch_labels = (cli.CONTRACT_BRANCH,) def upgrade(): pass expand/000077500000000000000000000000001323242307100340745ustar00rootroot00000000000000networking-arista-2017.2.2/networking_arista/db/migration/alembic_migrations/versions/liberty1c6993ce7db0_initial_db_version_expand.py000066400000000000000000000016221323242307100435540ustar00rootroot00000000000000networking-arista-2017.2.2/networking_arista/db/migration/alembic_migrations/versions/liberty/expand# Copyright (c) 2015 Arista Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Initial db version Revision ID: 1c6993ce7db0 Create Date: 2015-10-23 14:37:49.594974 """ from neutron.db.migration import cli # revision identifiers, used by Alembic. revision = '1c6993ce7db0' down_revision = '296b4e0236e0' branch_labels = (cli.EXPAND_BRANCH,) def upgrade(): pass networking-arista-2017.2.2/networking_arista/l3Plugin/000077500000000000000000000000001323242307100226415ustar00rootroot00000000000000networking-arista-2017.2.2/networking_arista/l3Plugin/__init__.py000066400000000000000000000000001323242307100247400ustar00rootroot00000000000000networking-arista-2017.2.2/networking_arista/l3Plugin/arista_l3_driver.py000066400000000000000000000444771323242307100264670ustar00rootroot00000000000000# Copyright 2014 Arista Networks, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib import socket import struct from oslo_config import cfg from oslo_log import log as logging from networking_arista._i18n import _, _LI from networking_arista.common import api from networking_arista.common import exceptions as arista_exc LOG = logging.getLogger(__name__) cfg.CONF.import_group('l3_arista', 'networking_arista.common.config') EOS_UNREACHABLE_MSG = _('Unable to reach EOS') DEFAULT_VLAN = 1 MLAG_SWITCHES = 2 VIRTUAL_ROUTER_MAC = '00:11:22:33:44:55' IPV4_BITS = 32 IPV6_BITS = 128 # This string-format-at-a-distance confuses pylint :( # pylint: disable=too-many-format-args router_in_vrf = { 'router': {'create': ['vrf definition {0}', 'rd {1}', 'exit'], 'delete': ['no vrf definition {0}']}, 'interface': {'add': ['ip routing vrf {1}', 'vlan {0}', 'exit', 'interface vlan {0}', 'vrf forwarding {1}', 'ip address {2}'], 'remove': ['no interface vlan {0}']}} router_in_default_vrf = { 'router': {'create': [], # Place holder for now. 'delete': []}, # Place holder for now. 'interface': {'add': ['ip routing', 'vlan {0}', 'exit', 'interface vlan {0}', 'ip address {2}'], 'remove': ['no interface vlan {0}']}} router_in_default_vrf_v6 = { 'router': {'create': [], 'delete': []}, 'interface': {'add': ['ipv6 unicast-routing', 'vlan {0}', 'exit', 'interface vlan {0}', 'ipv6 enable', 'ipv6 address {2}'], 'remove': ['no interface vlan {0}']}} additional_cmds_for_mlag = { 'router': {'create': ['ip virtual-router mac-address {0}'], 'delete': []}, 'interface': {'add': ['ip virtual-router address {0}'], 'remove': []}} additional_cmds_for_mlag_v6 = { 'router': {'create': [], 'delete': []}, 'interface': {'add': ['ipv6 virtual-router address {0}'], 'remove': []}} class AristaL3Driver(object): """Wraps Arista JSON RPC. All communications between Neutron and EOS are over JSON RPC. EOS - operating system used on Arista hardware Command API - JSON RPC API provided by Arista EOS """ def __init__(self): self._servers = [] self._hosts = [] self._interfaceDict = None self._validate_config() host = cfg.CONF.l3_arista.primary_l3_host self._hosts.append(host) self._servers.append(self._make_eapi_client(host)) self._mlag_configured = cfg.CONF.l3_arista.mlag_config self._use_vrf = cfg.CONF.l3_arista.use_vrf if self._mlag_configured: host = cfg.CONF.l3_arista.secondary_l3_host self._hosts.append(host) self._servers.append(self._make_eapi_client(host)) self._additionalRouterCmdsDict = additional_cmds_for_mlag['router'] self._additionalInterfaceCmdsDict = ( additional_cmds_for_mlag['interface']) if self._use_vrf: self.routerDict = router_in_vrf['router'] self._interfaceDict = router_in_vrf['interface'] else: self.routerDict = router_in_default_vrf['router'] self._interfaceDict = router_in_default_vrf['interface'] @staticmethod def _make_eapi_client(host): return api.EAPIClient( host, username=cfg.CONF.l3_arista.primary_l3_host_username, password=cfg.CONF.l3_arista.primary_l3_host_password, verify=False, timeout=cfg.CONF.l3_arista.conn_timeout ) def _validate_config(self): if cfg.CONF.l3_arista.get('primary_l3_host') == '': msg = _('Required option primary_l3_host is not set') LOG.error(msg) raise arista_exc.AristaServicePluginConfigError(msg=msg) if cfg.CONF.l3_arista.get('mlag_config'): if cfg.CONF.l3_arista.get('use_vrf'): # This is invalid/unsupported configuration msg = _('VRFs are not supported MLAG config mode') LOG.error(msg) raise arista_exc.AristaServicePluginConfigError(msg=msg) if cfg.CONF.l3_arista.get('secondary_l3_host') == '': msg = _('Required option secondary_l3_host is not set') LOG.error(msg) raise arista_exc.AristaServicePluginConfigError(msg=msg) if cfg.CONF.l3_arista.get('primary_l3_host_username') == '': msg = _('Required option primary_l3_host_username is not set') LOG.error(msg) raise arista_exc.AristaServicePluginConfigError(msg=msg) def create_router_on_eos(self, router_name, rdm, server): """Creates a router on Arista HW Device. :param router_name: globally unique identifier for router/VRF :param rdm: A value generated by hashing router name :param server: Server endpoint on the Arista switch to be configured """ cmds = [] rd = "%s:%s" % (rdm, rdm) for c in self.routerDict['create']: cmds.append(c.format(router_name, rd)) if self._mlag_configured: mac = VIRTUAL_ROUTER_MAC for c in self._additionalRouterCmdsDict['create']: cmds.append(c.format(mac)) self._run_openstack_l3_cmds(cmds, server) def delete_router_from_eos(self, router_name, server): """Deletes a router from Arista HW Device. :param router_name: globally unique identifier for router/VRF :param server: Server endpoint on the Arista switch to be configured """ cmds = [] for c in self.routerDict['delete']: cmds.append(c.format(router_name)) if self._mlag_configured: for c in self._additionalRouterCmdsDict['delete']: cmds.append(c) self._run_openstack_l3_cmds(cmds, server) def _select_dicts(self, ipv): if self._use_vrf: self._interfaceDict = router_in_vrf['interface'] else: if ipv == 6: # for IPv6 use IPv6 commmands self._interfaceDict = router_in_default_vrf_v6['interface'] self._additionalInterfaceCmdsDict = ( additional_cmds_for_mlag_v6['interface']) else: self._interfaceDict = router_in_default_vrf['interface'] self._additionalInterfaceCmdsDict = ( additional_cmds_for_mlag['interface']) def add_interface_to_router(self, segment_id, router_name, gip, router_ip, mask, server): """Adds an interface to existing HW router on Arista HW device. :param segment_id: VLAN Id associated with interface that is added :param router_name: globally unique identifier for router/VRF :param gip: Gateway IP associated with the subnet :param router_ip: IP address of the router :param mask: subnet mask to be used :param server: Server endpoint on the Arista switch to be configured """ if not segment_id: segment_id = DEFAULT_VLAN cmds = [] for c in self._interfaceDict['add']: if self._mlag_configured: # In VARP config, use router ID else, use gateway IP address. ip = router_ip else: ip = gip + '/' + mask cmds.append(c.format(segment_id, router_name, ip)) if self._mlag_configured: for c in self._additionalInterfaceCmdsDict['add']: cmds.append(c.format(gip)) self._run_openstack_l3_cmds(cmds, server) def delete_interface_from_router(self, segment_id, router_name, server): """Deletes an interface from existing HW router on Arista HW device. :param segment_id: VLAN Id associated with interface that is added :param router_name: globally unique identifier for router/VRF :param server: Server endpoint on the Arista switch to be configured """ if not segment_id: segment_id = DEFAULT_VLAN cmds = [] for c in self._interfaceDict['remove']: cmds.append(c.format(segment_id)) self._run_openstack_l3_cmds(cmds, server) def create_router(self, context, tenant_id, router): """Creates a router on Arista Switch. Deals with multiple configurations - such as Router per VRF, a router in default VRF, Virtual Router in MLAG configurations """ if router: router_name = self._arista_router_name(tenant_id, router['name']) hashed = hashlib.sha256(router_name.encode('utf-8')) rdm = str(int(hashed.hexdigest(), 16) % 65536) mlag_peer_failed = False for s in self._servers: try: self.create_router_on_eos(router_name, rdm, s) mlag_peer_failed = False except Exception: if self._mlag_configured and not mlag_peer_failed: # In paied switch, it is OK to fail on one switch mlag_peer_failed = True else: msg = (_('Failed to create router %s on EOS') % router_name) LOG.exception(msg) raise arista_exc.AristaServicePluginRpcError(msg=msg) def delete_router(self, context, tenant_id, router_id, router): """Deletes a router from Arista Switch.""" if router: router_name = self._arista_router_name(tenant_id, router['name']) mlag_peer_failed = False for s in self._servers: try: self.delete_router_from_eos(router_name, s) mlag_peer_failed = False except Exception: if self._mlag_configured and not mlag_peer_failed: # In paied switch, it is OK to fail on one switch mlag_peer_failed = True else: msg = (_('Failed to create router %s on EOS') % router_name) LOG.exception(msg) raise arista_exc.AristaServicePluginRpcError(msg=msg) def update_router(self, context, router_id, original_router, new_router): """Updates a router which is already created on Arista Switch. TODO: (Sukhdev) - to be implemented in next release. """ pass def add_router_interface(self, context, router_info): """Adds an interface to a router created on Arista HW router. This deals with both IPv6 and IPv4 configurations. """ if router_info: self._select_dicts(router_info['ip_version']) cidr = router_info['cidr'] subnet_mask = cidr.split('/')[1] router_name = self._arista_router_name(router_info['tenant_id'], router_info['name']) if self._mlag_configured: # For MLAG, we send a specific IP address as opposed to cidr # For now, we are using x.x.x.253 and x.x.x.254 as virtual IP mlag_peer_failed = False for i, server in enumerate(self._servers): # Get appropriate virtual IP address for this router router_ip = self._get_router_ip(cidr, i, router_info['ip_version']) try: self.add_interface_to_router(router_info['seg_id'], router_name, router_info['gip'], router_ip, subnet_mask, server) mlag_peer_failed = False except Exception: if not mlag_peer_failed: mlag_peer_failed = True else: msg = (_('Failed to add interface to router ' '%s on EOS') % router_name) LOG.exception(msg) raise arista_exc.AristaServicePluginRpcError( msg=msg) else: for s in self._servers: self.add_interface_to_router(router_info['seg_id'], router_name, router_info['gip'], None, subnet_mask, s) def remove_router_interface(self, context, router_info): """Removes previously configured interface from router on Arista HW. This deals with both IPv6 and IPv4 configurations. """ if router_info: router_name = self._arista_router_name(router_info['tenant_id'], router_info['name']) mlag_peer_failed = False for s in self._servers: try: self.delete_interface_from_router(router_info['seg_id'], router_name, s) if self._mlag_configured: mlag_peer_failed = False except Exception: if self._mlag_configured and not mlag_peer_failed: mlag_peer_failed = True else: msg = (_('Failed to add interface to router ' '%s on EOS') % router_name) LOG.exception(msg) raise arista_exc.AristaServicePluginRpcError(msg=msg) def _run_openstack_l3_cmds(self, commands, server): """Execute/sends a CAPI (Command API) command to EOS. In this method, list of commands is appended with prefix and postfix commands - to make is understandble by EOS. :param commands : List of command to be executed on EOS. :param server: Server endpoint on the Arista switch to be configured """ command_start = ['enable', 'configure'] command_end = ['exit'] full_command = command_start + commands + command_end LOG.info(_LI('Executing command on Arista EOS: %s'), full_command) try: # this returns array of return values for every command in # full_command list ret = server.execute(full_command) LOG.info(_LI('Results of execution on Arista EOS: %s'), ret) except Exception: msg = (_('Error occurred while trying to execute ' 'commands %(cmd)s on EOS %(host)s') % {'cmd': full_command, 'host': server}) LOG.exception(msg) raise arista_exc.AristaServicePluginRpcError(msg=msg) def _arista_router_name(self, tenant_id, name): """Generate an arista specific name for this router. Use a unique name so that OpenStack created routers/SVIs can be distinguishged from the user created routers/SVIs on Arista HW. """ return 'OS' + '-' + tenant_id + '-' + name def _get_binary_from_ipv4(self, ip_addr): """Converts IPv4 address to binary form.""" return struct.unpack("!L", socket.inet_pton(socket.AF_INET, ip_addr))[0] def _get_binary_from_ipv6(self, ip_addr): """Converts IPv6 address to binary form.""" hi, lo = struct.unpack("!QQ", socket.inet_pton(socket.AF_INET6, ip_addr)) return (hi << 64) | lo def _get_ipv4_from_binary(self, bin_addr): """Converts binary address to Ipv4 format.""" return socket.inet_ntop(socket.AF_INET, struct.pack("!L", bin_addr)) def _get_ipv6_from_binary(self, bin_addr): """Converts binary address to Ipv6 format.""" hi = bin_addr >> 64 lo = bin_addr & 0xFFFFFFFF return socket.inet_ntop(socket.AF_INET6, struct.pack("!QQ", hi, lo)) def _get_router_ip(self, cidr, ip_count, ip_ver): """For a given IP subnet and IP version type, generate IP for router. This method takes the network address (cidr) and selects an IP address that should be assigned to virtual router running on multiple switches. It uses upper addresses in a subnet address as IP for the router. Each instace of the router, on each switch, requires uniqe IP address. For example in IPv4 case, on a 255 subnet, it will pick X.X.X.254 as first addess, X.X.X.253 for next, and so on. """ start_ip = MLAG_SWITCHES + ip_count network_addr, prefix = cidr.split('/') if ip_ver == 4: bits = IPV4_BITS ip = self._get_binary_from_ipv4(network_addr) elif ip_ver == 6: bits = IPV6_BITS ip = self._get_binary_from_ipv6(network_addr) mask = (pow(2, bits) - 1) << (bits - int(prefix)) network_addr = ip & mask router_ip = pow(2, bits - int(prefix)) - start_ip router_ip = network_addr | router_ip if ip_ver == 4: return self._get_ipv4_from_binary(router_ip) + '/' + prefix else: return self._get_ipv6_from_binary(router_ip) + '/' + prefix networking-arista-2017.2.2/networking_arista/l3Plugin/l3_arista.py000066400000000000000000000260701323242307100251010ustar00rootroot00000000000000# Copyright 2014 Arista Networks, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import threading from neutron_lib import constants as n_const from neutron_lib import context as nctx from neutron_lib.plugins import constants as plugin_constants from oslo_config import cfg from oslo_log import helpers as log_helpers from oslo_log import log as logging from oslo_utils import excutils from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api from neutron.api.rpc.handlers import l3_rpc from neutron.common import rpc as n_rpc from neutron.common import topics from neutron.db import db_base_plugin_v2 from neutron.db import extraroute_db from neutron.db import l3_agentschedulers_db from neutron.db import l3_gwmode_db from neutron.plugins.ml2.driver_context import NetworkContext # noqa from networking_arista._i18n import _LE, _LI from networking_arista.common import db_lib from networking_arista.l3Plugin import arista_l3_driver LOG = logging.getLogger(__name__) class AristaL3ServicePlugin(db_base_plugin_v2.NeutronDbPluginV2, extraroute_db.ExtraRoute_db_mixin, l3_gwmode_db.L3_NAT_db_mixin, l3_agentschedulers_db.L3AgentSchedulerDbMixin): """Implements L3 Router service plugin for Arista hardware. Creates routers in Arista hardware, manages them, adds/deletes interfaces to the routes. """ supported_extension_aliases = ["router", "ext-gw-mode", "extraroute"] def __init__(self, driver=None): self.driver = driver or arista_l3_driver.AristaL3Driver() self.ndb = db_lib.NeutronNets() self.setup_rpc() self.sync_timeout = cfg.CONF.l3_arista.l3_sync_interval self.sync_lock = threading.Lock() self._synchronization_thread() def setup_rpc(self): # RPC support self.topic = topics.L3PLUGIN self.conn = n_rpc.create_connection() self.agent_notifiers.update( {n_const.AGENT_TYPE_L3: l3_rpc_agent_api.L3AgentNotifyAPI()}) self.endpoints = [l3_rpc.L3RpcCallback()] self.conn.create_consumer(self.topic, self.endpoints, fanout=False) self.conn.consume_in_threads() def get_plugin_type(self): return plugin_constants.L3 def get_plugin_description(self): """Returns string description of the plugin.""" return ("Arista L3 Router Service Plugin for Arista Hardware " "based routing") def _synchronization_thread(self): with self.sync_lock: self.synchronize() self.timer = threading.Timer(self.sync_timeout, self._synchronization_thread) self.timer.start() def stop_synchronization_thread(self): if self.timer: self.timer.cancel() self.timer = None @log_helpers.log_method_call def create_router(self, context, router): """Create a new router entry in DB, and create it Arista HW.""" tenant_id = router['router']['tenant_id'] # Add router to the DB new_router = super(AristaL3ServicePlugin, self).create_router( context, router) # create router on the Arista Hw try: self.driver.create_router(context, tenant_id, new_router) return new_router except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error creating router on Arista HW router=%s "), new_router) super(AristaL3ServicePlugin, self).delete_router( context, new_router['id']) @log_helpers.log_method_call def update_router(self, context, router_id, router): """Update an existing router in DB, and update it in Arista HW.""" # Read existing router record from DB original_router = super(AristaL3ServicePlugin, self).get_router( context, router_id) # Update router DB new_router = super(AristaL3ServicePlugin, self).update_router( context, router_id, router) # Modify router on the Arista Hw try: self.driver.update_router(context, router_id, original_router, new_router) return new_router except Exception: LOG.error(_LE("Error updating router on Arista HW router=%s "), new_router) @log_helpers.log_method_call def delete_router(self, context, router_id): """Delete an existing router from Arista HW as well as from the DB.""" router = super(AristaL3ServicePlugin, self).get_router(context, router_id) tenant_id = router['tenant_id'] # Delete router on the Arista Hw try: self.driver.delete_router(context, tenant_id, router_id, router) except Exception as e: LOG.error(_LE("Error deleting router on Arista HW " "router %(r)s exception=%(e)s"), {'r': router, 'e': e}) super(AristaL3ServicePlugin, self).delete_router(context, router_id) @log_helpers.log_method_call def add_router_interface(self, context, router_id, interface_info): """Add a subnet of a network to an existing router.""" new_router = super(AristaL3ServicePlugin, self).add_router_interface( context, router_id, interface_info) # Get network info for the subnet that is being added to the router. # Check if the interface information is by port-id or subnet-id add_by_port, add_by_sub = self._validate_interface_info(interface_info) if add_by_sub: subnet = self.get_subnet(context, interface_info['subnet_id']) elif add_by_port: port = self.get_port(context, interface_info['port_id']) subnet_id = port['fixed_ips'][0]['subnet_id'] subnet = self.get_subnet(context, subnet_id) network_id = subnet['network_id'] # To create SVI's in Arista HW, the segmentation Id is required # for this network. ml2_db = NetworkContext(self, context, {'id': network_id}) seg_id = ml2_db.network_segments[0]['segmentation_id'] # Package all the info needed for Hw programming router = super(AristaL3ServicePlugin, self).get_router(context, router_id) router_info = copy.deepcopy(new_router) router_info['seg_id'] = seg_id router_info['name'] = router['name'] router_info['cidr'] = subnet['cidr'] router_info['gip'] = subnet['gateway_ip'] router_info['ip_version'] = subnet['ip_version'] try: self.driver.add_router_interface(context, router_info) return new_router except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error Adding subnet %(subnet)s to " "router %(router_id)s on Arista HW"), {'subnet': subnet, 'router_id': router_id}) super(AristaL3ServicePlugin, self).remove_router_interface( context, router_id, interface_info) @log_helpers.log_method_call def remove_router_interface(self, context, router_id, interface_info): """Remove a subnet of a network from an existing router.""" new_router = ( super(AristaL3ServicePlugin, self).remove_router_interface( context, router_id, interface_info)) # Get network information of the subnet that is being removed subnet = self.get_subnet(context, new_router['subnet_id']) network_id = subnet['network_id'] # For SVI removal from Arista HW, segmentation ID is needed ml2_db = NetworkContext(self, context, {'id': network_id}) seg_id = ml2_db.network_segments[0]['segmentation_id'] router = super(AristaL3ServicePlugin, self).get_router(context, router_id) router_info = copy.deepcopy(new_router) router_info['seg_id'] = seg_id router_info['name'] = router['name'] try: self.driver.remove_router_interface(context, router_info) return new_router except Exception as exc: LOG.error(_LE("Error removing interface %(interface)s from " "router %(router_id)s on Arista HW" "Exception =(exc)s"), {'interface': interface_info, 'router_id': router_id, 'exc': exc}) def synchronize(self): """Synchronizes Router DB from Neturon DB with EOS. Walks through the Neturon Db and ensures that all the routers created in Netuton DB match with EOS. After creating appropriate routers, it ensures to add interfaces as well. Uses idempotent properties of EOS configuration, which means same commands can be repeated. """ LOG.info(_LI('Syncing Neutron Router DB <-> EOS')) ctx = nctx.get_admin_context() routers = super(AristaL3ServicePlugin, self).get_routers(ctx) for r in routers: tenant_id = r['tenant_id'] ports = self.ndb.get_all_ports_for_tenant(tenant_id) try: self.driver.create_router(self, tenant_id, r) except Exception: continue # Figure out which interfaces are added to this router for p in ports: if p['device_id'] == r['id']: net_id = p['network_id'] subnet_id = p['fixed_ips'][0]['subnet_id'] subnet = self.ndb.get_subnet_info(subnet_id) ml2_db = NetworkContext(self, ctx, {'id': net_id}) seg_id = ml2_db.network_segments[0]['segmentation_id'] r['seg_id'] = seg_id r['cidr'] = subnet['cidr'] r['gip'] = subnet['gateway_ip'] r['ip_version'] = subnet['ip_version'] try: self.driver.add_router_interface(self, r) except Exception: LOG.error(_LE("Error Adding interface %(subnet_id)s " "to router %(router_id)s on Arista HW"), {'subnet_id': subnet_id, 'router_id': r}) networking-arista-2017.2.2/networking_arista/ml2/000077500000000000000000000000001323242307100216365ustar00rootroot00000000000000networking-arista-2017.2.2/networking_arista/ml2/__init__.py000066400000000000000000000000001323242307100237350ustar00rootroot00000000000000networking-arista-2017.2.2/networking_arista/ml2/arista_sec_gp.py000066400000000000000000000606351323242307100250250ustar00rootroot00000000000000# Copyright (c) 2016 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import re from oslo_config import cfg from oslo_log import log as logging from networking_arista._i18n import _, _LI from networking_arista.common import api from networking_arista.common import db_lib from networking_arista.common import exceptions as arista_exc LOG = logging.getLogger(__name__) EOS_UNREACHABLE_MSG = _('Unable to reach EOS') # Note 'None,null' means default rule - i.e. deny everything SUPPORTED_SG_PROTOCOLS = [None, 'tcp', 'udp', 'icmp'] acl_cmd = { 'acl': {'create': ['ip access-list {0}'], 'in_rule': ['permit {0} {1} any range {2} {3}'], 'out_rule': ['permit {0} any {1} range {2} {3}'], 'in_icmp_custom1': ['permit icmp {0} any {1}'], 'out_icmp_custom1': ['permit icmp any {0} {1}'], 'in_icmp_custom2': ['permit icmp {0} any {1} {2}'], 'out_icmp_custom2': ['permit icmp any {0} {1} {2}'], 'default': [], 'delete_acl': ['no ip access-list {0}'], 'del_in_icmp_custom1': ['ip access-list {0}', 'no permit icmp {1} any {2}', 'exit'], 'del_out_icmp_custom1': ['ip access-list {0}', 'no permit icmp any {1} {2}', 'exit'], 'del_in_icmp_custom2': ['ip access-list {0}', 'no permit icmp {1} any {2} {3}', 'exit'], 'del_out_icmp_custom2': ['ip access-list {0}', 'no permit icmp any {1} {2} {3}', 'exit'], 'del_in_acl_rule': ['ip access-list {0}', 'no permit {1} {2} any range {3} {4}', 'exit'], 'del_out_acl_rule': ['ip access-list {0}', 'no permit {1} any {2} range {3} {4}', 'exit']}, 'apply': {'ingress': ['interface {0}', 'ip access-group {1} in', 'exit'], 'egress': ['interface {0}', 'ip access-group {1} out', 'exit'], 'rm_ingress': ['interface {0}', 'no ip access-group {1} in', 'exit'], 'rm_egress': ['interface {0}', 'no ip access-group {1} out', 'exit']}} class AristaSecGroupSwitchDriver(object): """Wraps Arista JSON RPC. All communications between Neutron and EOS are over JSON RPC. EOS - operating system used on Arista hardware Command API - JSON RPC API provided by Arista EOS """ def __init__(self, neutron_db): self._ndb = neutron_db self._servers = [] self._hosts = {} self.sg_enabled = cfg.CONF.ml2_arista.get('sec_group_support') self._validate_config() for s in cfg.CONF.ml2_arista.switch_info: switch_ip, switch_user, switch_pass = s.split(":") if switch_pass == "''": switch_pass = '' self._hosts[switch_ip] = ( {'user': switch_user, 'password': switch_pass}) self._servers.append(self._make_eapi_client(switch_ip)) self.aclCreateDict = acl_cmd['acl'] self.aclApplyDict = acl_cmd['apply'] def _make_eapi_client(self, host): return api.EAPIClient( host, username=self._hosts[host]['user'], password=self._hosts[host]['password'], verify=False, timeout=cfg.CONF.ml2_arista.conn_timeout ) def _validate_config(self): if not self.sg_enabled: return if len(cfg.CONF.ml2_arista.get('switch_info')) < 1: msg = _('Required option - when "sec_group_support" is enabled, ' 'at least one switch must be specified ') LOG.exception(msg) raise arista_exc.AristaConfigError(msg=msg) def _get_port_for_acl(self, port_id, server): """Gets interface name for ACLs Finds the Port-Channel name if port_id is in a Port-Channel, otherwise ACLs are applied to Ethernet interface. :param port_id: Name of port from ironic db :param server: Server endpoint on the Arista switch to be configured """ all_intf_info = self._run_eos_cmds( ['show interfaces %s' % port_id], server)[0] intf_info = all_intf_info.get('interfaces', {}).get(port_id, {}) member_info = intf_info.get('interfaceMembership', '') port_group_info = re.search('Member of (?P\S+)', member_info) if port_group_info: port_id = port_group_info.group('port_group') return port_id def _create_acl_on_eos(self, in_cmds, out_cmds, protocol, cidr, from_port, to_port, direction): """Creates an ACL on Arista HW Device. :param name: Name for the ACL :param server: Server endpoint on the Arista switch to be configured """ if protocol == 'icmp': # ICMP rules require special processing if ((from_port and to_port) or (not from_port and not to_port)): rule = 'icmp_custom2' elif from_port and not to_port: rule = 'icmp_custom1' else: msg = _('Invalid ICMP rule specified') LOG.exception(msg) raise arista_exc.AristaSecurityGroupError(msg=msg) rule_type = 'in' cmds = in_cmds if direction == 'egress': rule_type = 'out' cmds = out_cmds final_rule = rule_type + '_' + rule acl_dict = self.aclCreateDict[final_rule] # None port is probematic - should be replaced with 0 if not from_port: from_port = 0 if not to_port: to_port = 0 for c in acl_dict: if rule == 'icmp_custom2': cmds.append(c.format(cidr, from_port, to_port)) else: cmds.append(c.format(cidr, from_port)) return in_cmds, out_cmds else: # Non ICMP rules processing here acl_dict = self.aclCreateDict['in_rule'] cmds = in_cmds if direction == 'egress': acl_dict = self.aclCreateDict['out_rule'] cmds = out_cmds if not protocol: acl_dict = self.aclCreateDict['default'] for c in acl_dict: cmds.append(c.format(protocol, cidr, from_port, to_port)) return in_cmds, out_cmds def _delete_acl_from_eos(self, name, server): """deletes an ACL from Arista HW Device. :param name: Name for the ACL :param server: Server endpoint on the Arista switch to be configured """ cmds = [] for c in self.aclCreateDict['delete_acl']: cmds.append(c.format(name)) self._run_openstack_sg_cmds(cmds, server) def _delete_acl_rule_from_eos(self, name, protocol, cidr, from_port, to_port, direction, server): """deletes an ACL from Arista HW Device. :param name: Name for the ACL :param server: Server endpoint on the Arista switch to be configured """ cmds = [] if protocol == 'icmp': # ICMP rules require special processing if ((from_port and to_port) or (not from_port and not to_port)): rule = 'icmp_custom2' elif from_port and not to_port: rule = 'icmp_custom1' else: msg = _('Invalid ICMP rule specified') LOG.exception(msg) raise arista_exc.AristaSecurityGroupError(msg=msg) rule_type = 'del_in' if direction == 'egress': rule_type = 'del_out' final_rule = rule_type + '_' + rule acl_dict = self.aclCreateDict[final_rule] # None port is probematic - should be replaced with 0 if not from_port: from_port = 0 if not to_port: to_port = 0 for c in acl_dict: if rule == 'icmp_custom2': cmds.append(c.format(name, cidr, from_port, to_port)) else: cmds.append(c.format(name, cidr, from_port)) else: acl_dict = self.aclCreateDict['del_in_acl_rule'] if direction == 'egress': acl_dict = self.aclCreateDict['del_out_acl_rule'] for c in acl_dict: cmds.append(c.format(name, protocol, cidr, from_port, to_port)) self._run_openstack_sg_cmds(cmds, server) def _apply_acl_on_eos(self, port_id, name, direction, server): """Creates an ACL on Arista HW Device. :param port_id: The port where the ACL needs to be applied :param name: Name for the ACL :param direction: must contain "ingress" or "egress" :param server: Server endpoint on the Arista switch to be configured """ cmds = [] port_id = self._get_port_for_acl(port_id, server) for c in self.aclApplyDict[direction]: cmds.append(c.format(port_id, name)) self._run_openstack_sg_cmds(cmds, server) def _remove_acl_from_eos(self, port_id, name, direction, server): """Remove an ACL from a port on Arista HW Device. :param port_id: The port where the ACL needs to be applied :param name: Name for the ACL :param direction: must contain "ingress" or "egress" :param server: Server endpoint on the Arista switch to be configured """ cmds = [] port_id = self._get_port_for_acl(port_id, server) acl_cmd = self.aclApplyDict['rm_ingress'] if direction == 'egress': acl_cmd = self.aclApplyDict['rm_egress'] for c in acl_cmd: cmds.append(c.format(port_id, name)) self._run_openstack_sg_cmds(cmds, server) def _create_acl_rule(self, in_cmds, out_cmds, sgr): """Creates an ACL on Arista Switch. For a given Security Group (ACL), it adds additional rule Deals with multiple configurations - such as multiple switches """ # Only deal with valid protocols - skip the rest if not sgr or sgr['protocol'] not in SUPPORTED_SG_PROTOCOLS: return in_cmds, out_cmds remote_ip = sgr['remote_ip_prefix'] if not remote_ip: remote_ip = 'any' min_port = sgr['port_range_min'] if not min_port: min_port = 0 max_port = sgr['port_range_max'] if not max_port and sgr['protocol'] != 'icmp': max_port = 65535 in_cmds, out_cmds = self._create_acl_on_eos(in_cmds, out_cmds, sgr['protocol'], remote_ip, min_port, max_port, sgr['direction']) return in_cmds, out_cmds def create_acl_rule(self, sgr): """Creates an ACL on Arista Switch. For a given Security Group (ACL), it adds additional rule Deals with multiple configurations - such as multiple switches """ # Do nothing if Security Groups are not enabled if not self.sg_enabled: return name = self._arista_acl_name(sgr['security_group_id'], sgr['direction']) cmds = [] for c in self.aclCreateDict['create']: cmds.append(c.format(name)) in_cmds, out_cmds = self._create_acl_rule(cmds, cmds, sgr) cmds = in_cmds if sgr['direction'] == 'egress': cmds = out_cmds cmds.append('exit') for s in self._servers: try: self._run_openstack_sg_cmds(cmds, s) except Exception: msg = (_('Failed to create ACL rule on EOS %s') % s) LOG.exception(msg) raise arista_exc.AristaSecurityGroupError(msg=msg) def delete_acl_rule(self, sgr): """Deletes an ACL rule on Arista Switch. For a given Security Group (ACL), it adds removes a rule Deals with multiple configurations - such as multiple switches """ # Do nothing if Security Groups are not enabled if not self.sg_enabled: return # Only deal with valid protocols - skip the rest if not sgr or sgr['protocol'] not in SUPPORTED_SG_PROTOCOLS: return # Build seperate ACL for ingress and egress name = self._arista_acl_name(sgr['security_group_id'], sgr['direction']) remote_ip = sgr['remote_ip_prefix'] if not remote_ip: remote_ip = 'any' min_port = sgr['port_range_min'] if not min_port: min_port = 0 max_port = sgr['port_range_max'] if not max_port and sgr['protocol'] != 'icmp': max_port = 65535 for s in self._servers: try: self._delete_acl_rule_from_eos(name, sgr['protocol'], remote_ip, min_port, max_port, sgr['direction'], s) except Exception: msg = (_('Failed to delete ACL on EOS %s') % s) LOG.exception(msg) raise arista_exc.AristaSecurityGroupError(msg=msg) def _create_acl_shell(self, sg_id): """Creates an ACL on Arista Switch. For a given Security Group (ACL), it adds additional rule Deals with multiple configurations - such as multiple switches """ # Build seperate ACL for ingress and egress direction = ['ingress', 'egress'] cmds = [] for d in range(len(direction)): name = self._arista_acl_name(sg_id, direction[d]) cmds.append([]) for c in self.aclCreateDict['create']: cmds[d].append(c.format(name)) return cmds[0], cmds[1] def create_acl(self, sg): """Creates an ACL on Arista Switch. Deals with multiple configurations - such as multiple switches """ # Do nothing if Security Groups are not enabled if not self.sg_enabled: return if not sg: msg = _('Invalid or Empty Security Group Specified') raise arista_exc.AristaSecurityGroupError(msg=msg) in_cmds, out_cmds = self._create_acl_shell(sg['id']) for sgr in sg['security_group_rules']: in_cmds, out_cmds = self._create_acl_rule(in_cmds, out_cmds, sgr) in_cmds.append('exit') out_cmds.append('exit') for s in self._servers: try: self._run_openstack_sg_cmds(in_cmds, s) self._run_openstack_sg_cmds(out_cmds, s) except Exception: msg = (_('Failed to create ACL on EOS %s') % s) LOG.exception(msg) raise arista_exc.AristaSecurityGroupError(msg=msg) def delete_acl(self, sg): """Deletes an ACL from Arista Switch. Deals with multiple configurations - such as multiple switches """ # Do nothing if Security Groups are not enabled if not self.sg_enabled: return if not sg: msg = _('Invalid or Empty Security Group Specified') raise arista_exc.AristaSecurityGroupError(msg=msg) direction = ['ingress', 'egress'] for d in range(len(direction)): name = self._arista_acl_name(sg['id'], direction[d]) for s in self._servers: try: self._delete_acl_from_eos(name, s) except Exception: msg = (_('Failed to create ACL on EOS %s') % s) LOG.exception(msg) raise arista_exc.AristaSecurityGroupError(msg=msg) def apply_acl(self, sgs, switch_id, port_id, switch_info): """Creates an ACL on Arista Switch. Applies ACLs to the baremetal ports only. The port/switch details is passed through the parameters. Deals with multiple configurations - such as multiple switches param sgs: List of Security Groups param switch_id: Switch ID of TOR where ACL needs to be applied param port_id: Port ID of port where ACL needs to be applied param switch_info: IP address of the TOR """ # Do nothing if Security Groups are not enabled if not self.sg_enabled: return # We do not support more than one security group on a port if not sgs or len(sgs) > 1: msg = (_('Only one Security Group Supported on a port %s') % sgs) raise arista_exc.AristaSecurityGroupError(msg=msg) sg = self._ndb.get_security_group(sgs[0]) # We already have ACLs on the TORs. # Here we need to find out which ACL is applicable - i.e. # Ingress ACL, egress ACL or both direction = ['ingress', 'egress'] server = self._make_eapi_client(switch_info) for d in range(len(direction)): name = self._arista_acl_name(sg['id'], direction[d]) try: self._apply_acl_on_eos(port_id, name, direction[d], server) except Exception: msg = (_('Failed to apply ACL on port %s') % port_id) LOG.exception(msg) raise arista_exc.AristaSecurityGroupError(msg=msg) def remove_acl(self, sgs, switch_id, port_id, switch_info): """Removes an ACL from Arista Switch. Removes ACLs from the baremetal ports only. The port/switch details is passed throuhg the parameters. param sgs: List of Security Groups param switch_id: Switch ID of TOR where ACL needs to be removed param port_id: Port ID of port where ACL needs to be removed param switch_info: IP address of the TOR """ # Do nothing if Security Groups are not enabled if not self.sg_enabled: return # We do not support more than one security group on a port if not sgs or len(sgs) > 1: msg = (_('Only one Security Group Supported on a port %s') % sgs) raise arista_exc.AristaSecurityGroupError(msg=msg) sg = self._ndb.get_security_group(sgs[0]) # We already have ACLs on the TORs. # Here we need to find out which ACL is applicable - i.e. # Ingress ACL, egress ACL or both direction = [] for sgr in sg['security_group_rules']: # Only deal with valid protocols - skip the rest if not sgr or sgr['protocol'] not in SUPPORTED_SG_PROTOCOLS: continue if sgr['direction'] not in direction: direction.append(sgr['direction']) # THIS IS TOTAL HACK NOW - just for testing # Assumes the credential of all switches are same as specified # in the condig file server = self._make_eapi_client(switch_info) for d in range(len(direction)): name = self._arista_acl_name(sg['id'], direction[d]) try: self._remove_acl_from_eos(port_id, name, direction[d], server) except Exception: msg = (_('Failed to remove ACL on port %s') % port_id) LOG.exception(msg) # No need to raise exception for ACL removal # raise arista_exc.AristaSecurityGroupError(msg=msg) def _run_openstack_sg_cmds(self, commands, server): """Execute/sends a CAPI (Command API) command to EOS. In this method, list of commands is appended with prefix and postfix commands - to make is understandble by EOS. :param commands : List of command to be executed on EOS. :param server: Server endpoint on the Arista switch to be configured """ command_start = ['enable', 'configure'] command_end = ['exit'] full_command = command_start + commands + command_end return self._run_eos_cmds(full_command, server) def _run_eos_cmds(self, commands, server): """Execute/sends a CAPI (Command API) command to EOS. This method is useful for running show commands that require no prefix or postfix commands. :param commands : List of commands to be executed on EOS. :param server: Server endpoint on the Arista switch to be configured """ LOG.info(_LI('Executing command on Arista EOS: %s'), commands) try: # this returns array of return values for every command in # commands list ret = server.execute(commands) LOG.info(_LI('Results of execution on Arista EOS: %s'), ret) return ret except Exception: msg = (_('Error occurred while trying to execute ' 'commands %(cmd)s on EOS %(host)s') % {'cmd': commands, 'host': server}) LOG.exception(msg) raise arista_exc.AristaServicePluginRpcError(msg=msg) def _arista_acl_name(self, name, direction): """Generate an arista specific name for this ACL. Use a unique name so that OpenStack created ACLs can be distinguishged from the user created ACLs on Arista HW. """ in_out = 'IN' if direction == 'egress': in_out = 'OUT' return 'SG' + '-' + in_out + '-' + name def perform_sync_of_sg(self): """Perform sync of the security groups between ML2 and EOS. This is unconditional sync to ensure that all security ACLs are pushed to all the switches, in case of switch or neutron reboot """ # Do nothing if Security Groups are not enabled if not self.sg_enabled: return arista_ports = db_lib.get_ports() neutron_sgs = self._ndb.get_security_groups() sg_bindings = self._ndb.get_all_security_gp_to_port_bindings() sgs = [] sgs_dict = {} arista_port_ids = arista_ports.keys() # Get the list of Security Groups of interest to us for s in sg_bindings: if s['port_id'] in arista_port_ids: if not s['security_group_id'] in sgs: sgs_dict[s['port_id']] = ( {'security_group_id': s['security_group_id']}) sgs.append(s['security_group_id']) # Create the ACLs on Arista Switches for idx in range(len(sgs)): self.create_acl(neutron_sgs[sgs[idx]]) # Get Baremetal port profiles, if any bm_port_profiles = db_lib.get_all_baremetal_ports() if bm_port_profiles: for bm in bm_port_profiles.values(): if bm['port_id'] in sgs_dict: sg = sgs_dict[bm['port_id']]['security_group_id'] profile = json.loads(bm['profile']) link_info = profile['local_link_information'] for l in link_info: if not l: # skip all empty entries continue self.apply_acl([sg], l['switch_id'], l['port_id'], l['switch_info']) networking-arista-2017.2.2/networking_arista/ml2/arista_sync.py000066400000000000000000000330001323242307100245230ustar00rootroot00000000000000# Copyright (c) 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import threading from neutron_lib import worker from oslo_config import cfg from oslo_log import log as logging from oslo_service import loopingcall from networking_arista._i18n import _LI from networking_arista.common import constants from networking_arista.common import db_lib from networking_arista.common import exceptions as arista_exc LOG = logging.getLogger(__name__) class AristaSyncWorker(worker.BaseWorker): def __init__(self, rpc, ndb): super(AristaSyncWorker, self).__init__(worker_process_count=0) self.ndb = ndb self.rpc = rpc self.sync_service = SyncService(rpc, ndb) rpc.sync_service = self.sync_service self._loop = None def start(self): super(AristaSyncWorker, self).start() self._sync_running = True self._sync_event = threading.Event() self._cleanup_db() # Registering with EOS updates self.rpc.region_updated_time. Clear it # to force an initial sync self.rpc.clear_region_updated_time() if self._loop is None: self._loop = loopingcall.FixedIntervalLoopingCall( self.sync_service.do_synchronize ) self._loop.start(interval=cfg.CONF.ml2_arista.sync_interval) def stop(self, graceful=False): if self._loop is not None: self._loop.stop() def wait(self): if self._loop is not None: self._loop.wait() def reset(self): self.stop() self.wait() self.start() def _cleanup_db(self): """Clean up any unnecessary entries in our DB.""" LOG.info('Arista Sync: DB Cleanup') neutron_nets = self.ndb.get_all_networks() arista_db_nets = db_lib.get_networks(tenant_id='any') neutron_net_ids = set() for net in neutron_nets: neutron_net_ids.add(net['id']) # Remove networks from the Arista DB if the network does not exist in # Neutron DB for net_id in set(arista_db_nets.keys()).difference(neutron_net_ids): tenant_network = arista_db_nets[net_id] db_lib.forget_network_segment(tenant_network['tenantId'], net_id) db_lib.forget_all_ports_for_network(net_id) class SyncService(object): """Synchronization of information between Neutron and EOS Periodically (through configuration option), this service ensures that Networks and VMs configured on EOS/Arista HW are always in sync with Neutron DB. """ def __init__(self, rpc_wrapper, neutron_db): self._rpc = rpc_wrapper self._ndb = neutron_db self._force_sync = True self._region_updated_time = None def force_sync(self): """Sets the force_sync flag.""" self._force_sync = True def do_synchronize(self): """Periodically check whether EOS is in sync with ML2 driver. If ML2 database is not in sync with EOS, then compute the diff and send it down to EOS. """ # Perform sync of Security Groups unconditionally try: self._rpc.perform_sync_of_sg() except Exception as e: LOG.warning(e) # Check whether CVX is available before starting the sync. if not self._rpc.check_cvx_availability(): LOG.warning("Not syncing as CVX is unreachable") self.force_sync() return if not self._sync_required(): return LOG.info('Attempting to sync') # Send 'sync start' marker. if not self._rpc.sync_start(): LOG.info(_LI('Not starting sync, setting force')) self._force_sync = True return # Perform the actual synchronization. self.synchronize() # Send 'sync end' marker. if not self._rpc.sync_end(): LOG.info(_LI('Sync end failed, setting force')) self._force_sync = True return self._set_region_updated_time() def synchronize(self): """Sends data to EOS which differs from neutron DB.""" LOG.info(_LI('Syncing Neutron <-> EOS')) try: # Register with EOS to ensure that it has correct credentials self._rpc.register_with_eos(sync=True) self._rpc.check_supported_features() eos_tenants = self._rpc.get_tenants() except arista_exc.AristaRpcError: LOG.warning(constants.EOS_UNREACHABLE_MSG) self._force_sync = True return db_tenants = db_lib.get_tenants() # Delete tenants that are in EOS, but not in the database tenants_to_delete = frozenset(eos_tenants.keys()).difference( db_tenants.keys()) if tenants_to_delete: try: self._rpc.delete_tenant_bulk(tenants_to_delete, sync=True) except arista_exc.AristaRpcError: LOG.warning(constants.EOS_UNREACHABLE_MSG) self._force_sync = True return # None of the commands have failed till now. But if subsequent # operations fail, then force_sync is set to true self._force_sync = False # Create a dict of networks keyed by id. neutron_nets = dict( (network['id'], network) for network in self._ndb.get_all_networks() ) # Get Baremetal port switch_bindings, if any port_profiles = db_lib.get_all_portbindings() # To support shared networks, split the sync loop in two parts: # In first loop, delete unwanted VM and networks and update networks # In second loop, update VMs. This is done to ensure that networks for # all tenats are updated before VMs are updated instances_to_update = {} for tenant in db_tenants.keys(): db_nets = db_lib.get_networks(tenant) db_instances = db_lib.get_vms(tenant) eos_nets = self._get_eos_networks(eos_tenants, tenant) eos_vms, eos_bms, eos_routers = self._get_eos_vms(eos_tenants, tenant) db_nets_key_set = frozenset(db_nets.keys()) db_instances_key_set = frozenset(db_instances.keys()) eos_nets_key_set = frozenset(eos_nets.keys()) eos_vms_key_set = frozenset(eos_vms.keys()) eos_routers_key_set = frozenset(eos_routers.keys()) eos_bms_key_set = frozenset(eos_bms.keys()) # Create a candidate list by incorporating all instances eos_instances_key_set = (eos_vms_key_set | eos_routers_key_set | eos_bms_key_set) # Find the networks that are present on EOS, but not in Neutron DB nets_to_delete = eos_nets_key_set.difference(db_nets_key_set) # Find the VMs that are present on EOS, but not in Neutron DB instances_to_delete = eos_instances_key_set.difference( db_instances_key_set) vms_to_delete = [ vm for vm in eos_vms_key_set if vm in instances_to_delete] routers_to_delete = [ r for r in eos_routers_key_set if r in instances_to_delete] bms_to_delete = [ b for b in eos_bms_key_set if b in instances_to_delete] # Find the Networks that are present in Neutron DB, but not on EOS nets_to_update = db_nets_key_set.difference(eos_nets_key_set) # Find the VMs that are present in Neutron DB, but not on EOS instances_to_update[tenant] = db_instances_key_set.difference( eos_instances_key_set) try: if vms_to_delete: self._rpc.delete_vm_bulk(tenant, vms_to_delete, sync=True) if routers_to_delete: if self._rpc.bm_and_dvr_supported(): self._rpc.delete_instance_bulk( tenant, routers_to_delete, constants.InstanceType.ROUTER, sync=True) else: LOG.info(constants.ERR_DVR_NOT_SUPPORTED) if bms_to_delete: if self._rpc.bm_and_dvr_supported(): self._rpc.delete_instance_bulk( tenant, bms_to_delete, constants.InstanceType.BAREMETAL, sync=True) else: LOG.info(constants.BAREMETAL_NOT_SUPPORTED) if nets_to_delete: self._rpc.delete_network_bulk(tenant, nets_to_delete, sync=True) if nets_to_update: networks = [{ 'network_id': net_id, 'network_name': neutron_nets.get(net_id, {'name': ''})['name'], 'shared': neutron_nets.get(net_id, {'shared': False})['shared'], 'segments': self._ndb.get_all_network_segments(net_id), } for net_id in nets_to_update ] self._rpc.create_network_bulk(tenant, networks, sync=True) except arista_exc.AristaRpcError: LOG.warning(constants.EOS_UNREACHABLE_MSG) self._force_sync = True ports_of_interest = {} for port in self._ndb.get_all_ports(): ports_of_interest.update( self._port_dict_representation(port)) # Now update the VMs for tenant in instances_to_update: if not instances_to_update[tenant]: continue try: db_vms = db_lib.get_vms(tenant) if db_vms: self._rpc.create_instance_bulk(tenant, ports_of_interest, db_vms, port_profiles, sync=True) except arista_exc.AristaRpcError: LOG.warning(constants.EOS_UNREACHABLE_MSG) self._force_sync = True def _region_in_sync(self): """Checks if the region is in sync with EOS. Checks whether the timestamp stored in EOS is the same as the timestamp stored locally. """ eos_region_updated_times = self._rpc.get_region_updated_time() if eos_region_updated_times: return (self._region_updated_time and (self._region_updated_time['regionTimestamp'] == eos_region_updated_times['regionTimestamp'])) else: return False def _sync_required(self): """"Check whether the sync is required.""" try: # Get the time at which entities in the region were updated. # If the times match, then ML2 is in sync with EOS. Otherwise # perform a complete sync. if not self._force_sync and self._region_in_sync(): LOG.info(_LI('OpenStack and EOS are in sync!')) return False except arista_exc.AristaRpcError: LOG.warning(constants.EOS_UNREACHABLE_MSG) # Force an update incase of an error. self._force_sync = True return True def _set_region_updated_time(self): """Get the region updated time from EOS and store it locally.""" try: self._region_updated_time = self._rpc.get_region_updated_time() except arista_exc.AristaRpcError: # Force an update incase of an error. self._force_sync = True def _get_eos_networks(self, eos_tenants, tenant): networks = {} if eos_tenants and tenant in eos_tenants: networks = eos_tenants[tenant]['tenantNetworks'] return networks def _get_eos_vms(self, eos_tenants, tenant): vms = {} bms = {} routers = {} if eos_tenants and tenant in eos_tenants: vms = eos_tenants[tenant]['tenantVmInstances'] if 'tenantBaremetalInstances' in eos_tenants[tenant]: # Check if baremetal service is supported bms = eos_tenants[tenant]['tenantBaremetalInstances'] if 'tenantRouterInstances' in eos_tenants[tenant]: routers = eos_tenants[tenant]['tenantRouterInstances'] return vms, bms, routers def _port_dict_representation(self, port): return {port['id']: {'device_owner': port['device_owner'], 'device_id': port['device_id'], 'name': port['name'], 'id': port['id'], 'tenant_id': port['tenant_id'], 'network_id': port['network_id']}} networking-arista-2017.2.2/networking_arista/ml2/drivers/000077500000000000000000000000001323242307100233145ustar00rootroot00000000000000networking-arista-2017.2.2/networking_arista/ml2/drivers/__init__.py000066400000000000000000000000001323242307100254130ustar00rootroot00000000000000networking-arista-2017.2.2/networking_arista/ml2/drivers/driver_helpers.py000066400000000000000000000122051323242307100267030ustar00rootroot00000000000000# Copyright (c) 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import api as db_api from oslo_log import log from six import moves from neutron.db.models.plugins.ml2 import vlanallocation from networking_arista._i18n import _LI from networking_arista.common.constants import EOS_UNREACHABLE_MSG from networking_arista.common import exceptions as arista_exc LOG = log.getLogger(__name__) class VlanSyncService(object): """Sync vlan assignment from CVX into the OpenStack db.""" def __init__(self, rpc_wrapper): self._rpc = rpc_wrapper self._force_sync = True self._vlan_assignment_uuid = None self._assigned_vlans = dict() def force_sync(self): self._force_sync = True def _parse_vlan_ranges(self, vlan_pool, return_as_ranges=False): vlan_ids = set() if return_as_ranges: vlan_ids = list() if not vlan_pool: return vlan_ids vlan_ranges = vlan_pool.split(',') for vlan_range in vlan_ranges: endpoints = vlan_range.split('-') if len(endpoints) == 2: vlan_min = int(endpoints[0]) vlan_max = int(endpoints[1]) if return_as_ranges: vlan_ids.append((vlan_min, vlan_max)) else: vlan_ids |= set(moves.range(vlan_min, vlan_max + 1)) elif len(endpoints) == 1: single_vlan = int(endpoints[0]) if return_as_ranges: vlan_ids.append((single_vlan, single_vlan)) else: vlan_ids.add(single_vlan) return vlan_ids def get_network_vlan_ranges(self): return self._assigned_vlans def _sync_required(self): try: if not self._force_sync and self._region_in_sync(): LOG.info(_LI('VLANs are in sync!')) return False except arista_exc.AristaRpcError: LOG.warning(EOS_UNREACHABLE_MSG) self._force_sync = True return True def _region_in_sync(self): eos_vlan_assignment_uuid = self._rpc.get_vlan_assignment_uuid() return (self._vlan_assignment_uuid and (self._vlan_assignment_uuid['uuid'] == eos_vlan_assignment_uuid['uuid'])) def _set_vlan_assignment_uuid(self): try: self._vlan_assignment_uuid = self._rpc.get_vlan_assignment_uuid() except arista_exc.AristaRpcError: self._force_sync = True def do_synchronize(self): if not self._sync_required(): return self.synchronize() self._set_vlan_assignment_uuid() def synchronize(self): LOG.info(_LI('Syncing VLANs with EOS')) try: self._rpc.register_with_eos() vlan_pool = self._rpc.get_vlan_allocation() except arista_exc.AristaRpcError: LOG.warning(EOS_UNREACHABLE_MSG) self._force_sync = True return self._assigned_vlans = { 'default': self._parse_vlan_ranges(vlan_pool['assignedVlans'], return_as_ranges=True), } assigned_vlans = ( self._parse_vlan_ranges(vlan_pool['assignedVlans'])) available_vlans = frozenset( self._parse_vlan_ranges(vlan_pool['availableVlans'])) used_vlans = frozenset( self._parse_vlan_ranges(vlan_pool['allocatedVlans'])) self._force_sync = False session = db_api.get_writer_session() with session.begin(subtransactions=True): allocs = ( session.query(vlanallocation.VlanAllocation).with_lockmode( 'update')) for alloc in allocs: if alloc.physical_network != 'default': session.delete(alloc) try: assigned_vlans.remove(alloc.vlan_id) except KeyError: session.delete(alloc) continue if alloc.allocated and alloc.vlan_id in available_vlans: alloc.update({"allocated": False}) elif not alloc.allocated and alloc.vlan_id in used_vlans: alloc.update({"allocated": True}) for vlan_id in sorted(assigned_vlans): allocated = vlan_id in used_vlans alloc = vlanallocation.VlanAllocation( physical_network='default', vlan_id=vlan_id, allocated=allocated) session.add(alloc) networking-arista-2017.2.2/networking_arista/ml2/drivers/type_arista_vlan.py000066400000000000000000000054561323242307100272440ustar00rootroot00000000000000# Copyright (c) 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import threading from oslo_config import cfg from oslo_log import log from neutron.plugins.ml2.drivers import type_vlan from networking_arista._i18n import _LI from networking_arista.common import db_lib from networking_arista.common import exceptions as exc from networking_arista.ml2.drivers import driver_helpers from networking_arista.ml2.rpc.arista_eapi import AristaRPCWrapperEapi LOG = log.getLogger(__name__) cfg.CONF.import_group('arista_type_driver', 'networking_arista.common.config') class AristaVlanTypeDriver(type_vlan.VlanTypeDriver): """Manage state for VLAN networks with ML2. The VlanTypeDriver implements the 'vlan' network_type. VLAN network segments provide connectivity between VMs and other devices using any connected IEEE 802.1Q conformant physical_network segmented into virtual networks via IEEE 802.1Q headers. Up to 4094 VLAN network segments can exist on each available physical_network. """ def __init__(self): super(AristaVlanTypeDriver, self).__init__() ndb = db_lib.NeutronNets() self.rpc = AristaRPCWrapperEapi(ndb) self.sync_service = driver_helpers.VlanSyncService(self.rpc) self.network_vlan_ranges = dict() self.sync_timeout = cfg.CONF.arista_type_driver['sync_interval'] def initialize(self): self.rpc.check_supported_features() self.rpc.check_vlan_type_driver_commands() self._synchronization_thread() LOG.info(_LI("AristaVlanTypeDriver initialization complete")) def _synchronization_thread(self): self.sync_service.do_synchronize() self.network_vlan_ranges = self.sync_service.get_network_vlan_ranges() self.timer = threading.Timer(self.sync_timeout, self._synchronization_thread) self.timer.start() def allocate_fully_specified_segment(self, session, **raw_segment): alloc = session.query(self.model).filter_by(**raw_segment).first() if not alloc: raise exc.VlanUnavailable(**raw_segment) return super(AristaVlanTypeDriver, self).allocate_fully_specified_segment( session, **raw_segment) networking-arista-2017.2.2/networking_arista/ml2/mechanism_arista.py000066400000000000000000001346711323242307100255330ustar00rootroot00000000000000# Copyright (c) 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import json import threading from neutron_lib.api.definitions import portbindings from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib import constants as n_const from neutron_lib.plugins.ml2 import api as driver_api from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from neutron.services.trunk import constants as trunk_consts from networking_arista._i18n import _, _LI, _LE from networking_arista.common import constants from networking_arista.common import db from networking_arista.common import db_lib from networking_arista.common import exceptions as arista_exc from networking_arista.ml2 import arista_sync from networking_arista.ml2.rpc.arista_eapi import AristaRPCWrapperEapi from networking_arista.ml2.rpc.arista_json import AristaRPCWrapperJSON from networking_arista.ml2 import sec_group_callback LOG = logging.getLogger(__name__) cfg.CONF.import_group('ml2_arista', 'networking_arista.common.config') def pretty_log(tag, obj): log_data = json.dumps(obj, sort_keys=True, indent=4) LOG.debug(tag) LOG.debug(log_data) class AristaDriver(driver_api.MechanismDriver): """Ml2 Mechanism driver for Arista networking hardware. Remembers all networks and VMs that are provisioned on Arista Hardware. Does not send network provisioning request if the network has already been provisioned before for the given port. """ def __init__(self, rpc=None): self.ndb = db_lib.NeutronNets() self.db_nets = db.AristaProvisionedNets() self.db_vms = db.AristaProvisionedVms() self.db_tenants = db.AristaProvisionedTenants() confg = cfg.CONF.ml2_arista self.segmentation_type = db_lib.VLAN_SEGMENTATION self.timer = None self.managed_physnets = confg['managed_physnets'] self.manage_fabric = confg['manage_fabric'] self.eos_sync_lock = threading.Lock() self.eapi = None if rpc is not None: LOG.info("Using passed in parameter for RPC") self.rpc = rpc self.eapi = rpc else: self.eapi = AristaRPCWrapperEapi(self.ndb) api_type = confg['api_type'].upper() if api_type == 'EAPI': LOG.info("Using EAPI for RPC") self.rpc = AristaRPCWrapperEapi(self.ndb) elif api_type == 'JSON': LOG.info("Using JSON for RPC") self.rpc = AristaRPCWrapperJSON(self.ndb) else: msg = "RPC mechanism %s not recognized" % api_type LOG.error(msg) raise arista_exc.AristaRpcError(msg=msg) def initialize(self): if self.rpc.check_cvx_availability(): self.rpc.register_with_eos() self.rpc.check_supported_features() self.sg_handler = sec_group_callback.AristaSecurityGroupHandler(self) registry.subscribe(self.set_subport, trunk_consts.SUBPORTS, events.AFTER_CREATE) registry.subscribe(self.unset_subport, trunk_consts.SUBPORTS, events.AFTER_DELETE) def get_workers(self): return [arista_sync.AristaSyncWorker(self.rpc, self.ndb)] def create_network_precommit(self, context): """Remember the tenant, and network information.""" network = context.current segments = context.network_segments if not self.rpc.hpb_supported(): # Hierarchical port binding is not supported by CVX, only # allow VLAN network type. if(segments and segments[0][driver_api.NETWORK_TYPE] != n_const.TYPE_VLAN): return network_id = network['id'] tenant_id = network['tenant_id'] or constants.INTERNAL_TENANT_ID with self.eos_sync_lock: db_lib.remember_tenant(tenant_id) for segment in segments: db_lib.remember_network_segment(tenant_id, network_id, segment.get('segmentation_id'), segment.get('id')) def create_network_postcommit(self, context): """Provision the network on the Arista Hardware.""" network = context.current network_id = network['id'] network_name = network['name'] tenant_id = network['tenant_id'] or constants.INTERNAL_TENANT_ID segments = context.network_segments shared_net = network['shared'] with self.eos_sync_lock: if db_lib.is_network_provisioned(tenant_id, network_id): try: network_dict = { 'network_id': network_id, 'segments': segments, 'network_name': network_name, 'shared': shared_net} self.rpc.create_network(tenant_id, network_dict) except arista_exc.AristaRpcError as err: LOG.error(_LE("create_network_postcommit: Did not create " "network %(name)s. Reason: %(err)s"), {'name': network_name, 'err': err}) else: LOG.info(_LI('Network %s is not created as it is not found in ' 'Arista DB'), network_id) def update_network_precommit(self, context): """At the moment we only support network name change Any other change in network is not supported at this time. We do not store the network names, therefore, no DB store action is performed here. """ new_network = context.current orig_network = context.original if new_network['name'] != orig_network['name']: LOG.info(_LI('Network name changed to %s'), new_network['name']) def update_network_postcommit(self, context): """At the moment we only support network name change If network name is changed, a new network create request is sent to the Arista Hardware. """ new_network = context.current orig_network = context.original if ((new_network['name'] != orig_network['name']) or (new_network['shared'] != orig_network['shared'])): network_id = new_network['id'] network_name = new_network['name'] tenant_id = (new_network['tenant_id'] or constants.INTERNAL_TENANT_ID) shared_net = new_network['shared'] with self.eos_sync_lock: if db_lib.is_network_provisioned(tenant_id, network_id): try: network_dict = { 'network_id': network_id, 'segments': context.network_segments, 'network_name': network_name, 'shared': shared_net} self.rpc.create_network(tenant_id, network_dict) except arista_exc.AristaRpcError as err: LOG.error(_LE('update_network_postcommit: Did not ' 'update network %(name)s. ' 'Reason: %(err)s'), {'name': network_name, 'err': err}) else: LOG.info(_LI('Network %s is not updated as it is not found' ' in Arista DB'), network_id) def delete_network_precommit(self, context): """Delete the network information from the DB.""" network = context.current network_id = network['id'] tenant_id = network['tenant_id'] or constants.INTERNAL_TENANT_ID with self.eos_sync_lock: if db_lib.is_network_provisioned(tenant_id, network_id): if db_lib.are_ports_attached_to_network(network_id): db_lib.forget_all_ports_for_network(network_id) LOG.info(_LI('Deleting all ports on network %s'), network_id) db_lib.forget_network_segment(tenant_id, network_id) def delete_network_postcommit(self, context): """Send network delete request to Arista HW.""" network = context.current segments = context.network_segments if not self.rpc.hpb_supported(): # Hierarchical port binding is not supported by CVX, only # send the request if network type is VLAN. if (segments and segments[0][driver_api.NETWORK_TYPE] != n_const.TYPE_VLAN): # If network type is not VLAN, do nothing return # No need to pass segments info when calling delete_network as # HPB is not supported. segments = [] network_id = network['id'] tenant_id = network['tenant_id'] or constants.INTERNAL_TENANT_ID with self.eos_sync_lock: # Succeed deleting network in case EOS is not accessible. # EOS state will be updated by sync thread once EOS gets # alive. try: self.rpc.delete_network(tenant_id, network_id, segments) # if necessary, delete tenant as well. self.delete_tenant(tenant_id) except arista_exc.AristaRpcError as err: LOG.error(_LE('delete_network_postcommit: Did not delete ' 'network %(network_id)s. Reason: %(err)s'), {'network_id': network_id, 'err': err}) def create_port_precommit(self, context): """Remember the information about a VM and its ports A VM information, along with the physical host information is saved. """ # Returning from here, since the update_port_precommit is performing # same operation, and also need of port binding information to decide # whether to react to a port create event which is not available when # this method is called. return def _get_physnet_from_link_info(self, port, physnet_info): binding_profile = port.get(portbindings.PROFILE) if not binding_profile: return link_info = binding_profile.get('local_link_information') if not link_info: return mac_to_hostname = physnet_info.get('mac_to_hostname', {}) for link in link_info: if link.get('switch_id') in mac_to_hostname: physnet = mac_to_hostname.get(link.get('switch_id')) return self.rpc.mlag_pairs.get(physnet, physnet) def _bind_port_to_baremetal(self, context, segment): port = context.current vnic_type = port.get('binding:vnic_type') if vnic_type != portbindings.VNIC_BAREMETAL: # We are only interested in binding baremetal ports. return binding_profile = port.get(portbindings.PROFILE) if not binding_profile: return link_info = binding_profile.get('local_link_information') if not link_info: return vif_details = { portbindings.VIF_DETAILS_VLAN: str( segment[driver_api.SEGMENTATION_ID]) } context.set_binding(segment[driver_api.ID], portbindings.VIF_TYPE_OTHER, vif_details, n_const.ACTIVE) LOG.debug("AristaDriver: bound port info- port ID %(id)s " "on network %(network)s", {'id': port['id'], 'network': context.network.current['id']}) def bind_port(self, context): """Bind port to a network segment. Provisioning request to Arista Hardware to plug a host into appropriate network is done when the port is created this simply tells the ML2 Plugin that we are binding the port """ host_id = context.host port = context.current physnet_info = {} for segment in context.segments_to_bind: physnet = segment.get(driver_api.PHYSICAL_NETWORK) if not self._is_in_managed_physnets(physnet): LOG.debug("bind_port for port %(port)s: physical_network " "%(physnet)s is not managed by Arista " "mechanism driver", {'port': port.get('id'), 'physnet': physnet}) continue # If physnet is not set, we need to look it up using hostname # and topology info if not physnet: if not physnet_info: # We only need to get physnet_info once physnet_info = self.eapi.get_physical_network(host_id) if (port.get('binding:vnic_type') == portbindings.VNIC_BAREMETAL): # Find physnet using link_information in baremetal case physnet = self._get_physnet_from_link_info(port, physnet_info) else: physnet = physnet_info.get('physnet') # If physnet was not found, we cannot bind this port if not physnet: LOG.debug("bind_port for port %(port)s: no physical_network " "found", {'port': port.get('id')}) continue if segment[driver_api.NETWORK_TYPE] == n_const.TYPE_VXLAN: # Check if CVX supports HPB if not self.rpc.hpb_supported(): LOG.debug("bind_port: HPB is not supported") return # The physical network is connected to arista switches, # allocate dynamic segmentation id to bind the port to # the network that the port belongs to. try: next_segment = context.allocate_dynamic_segment( {'id': context.network.current['id'], 'network_type': n_const.TYPE_VLAN, 'physical_network': physnet}) except Exception as exc: LOG.error(_LE("bind_port for port %(port)s: Failed to " "allocate dynamic segment for physnet " "%(physnet)s. %(exc)s"), {'port': port.get('id'), 'physnet': physnet, 'exc': exc}) return LOG.debug("bind_port for port %(port)s: " "current_segment=%(current_seg)s, " "next_segment=%(next_seg)s", {'port': port.get('id'), 'current_seg': segment, 'next_seg': next_segment}) context.continue_binding(segment['id'], [next_segment]) elif port.get('binding:vnic_type') == portbindings.VNIC_BAREMETAL: # The network_type is vlan, try binding process for baremetal. self._bind_port_to_baremetal(context, segment) def create_port_postcommit(self, context): """Plug a physical host into a network. Send provisioning request to Arista Hardware to plug a host into appropriate network. """ # Returning from here, since the update_port_postcommit is performing # same operation, and also need of port binding information to decide # whether to react to a port create event which is not available when # this method is called. return def _supported_device_owner(self, device_owner): supported_device_owner = [n_const.DEVICE_OWNER_DHCP, n_const.DEVICE_OWNER_DVR_INTERFACE] if any([device_owner in supported_device_owner, device_owner.startswith('compute') and device_owner != 'compute:probe', device_owner.startswith('baremetal'), device_owner.startswith('trunk')]): return True LOG.debug('Unsupported device owner: %s', device_owner) def _network_owner_tenant(self, context, network_id, tenant_id): tid = tenant_id if network_id and tenant_id: context = context._plugin_context network_owner = self.ndb.get_network_from_net_id(network_id, context=context) if network_owner and network_owner[0]['tenant_id'] != tenant_id: tid = network_owner[0]['tenant_id'] or tenant_id return tid def _is_in_managed_physnets(self, physnet): # Check if this is a fabric segment if not physnet: return self.manage_fabric # If managed physnet is empty, accept all. if not self.managed_physnets: return True # managed physnet is not empty, find for matching physnet return any(pn == physnet for pn in self.managed_physnets) def _bound_segments(self, context): """Check if a given port is managed by the mechanism driver. It returns bound segment dictionary, if physical network in the bound segment is included in the managed physical network list. """ if not self.managed_physnets: return [ binding_level.get(driver_api.BOUND_SEGMENT) for binding_level in (context.binding_levels or []) ] bound_segments = [] for binding_level in (context.binding_levels or []): bound_segment = binding_level.get(driver_api.BOUND_SEGMENT) if (bound_segment and self._is_in_managed_physnets( bound_segment.get(driver_api.PHYSICAL_NETWORK))): bound_segments.append(bound_segment) return bound_segments def _handle_port_migration_precommit(self, context): """Handles port migration in precommit It updates the port's new host in the DB """ orig_port = context.original orig_host = context.original_host new_host = context.host new_port = context.current port_id = orig_port['id'] if new_host and orig_host and new_host != orig_host: LOG.debug("Handling port migration for: %s " % orig_port) network_id = orig_port['network_id'] tenant_id = orig_port['tenant_id'] or constants.INTERNAL_TENANT_ID # Ensure that we use tenant Id for the network owner tenant_id = self._network_owner_tenant(context, network_id, tenant_id) device_id = new_port['device_id'] with self.eos_sync_lock: port_provisioned = db_lib.is_port_provisioned(port_id, orig_host) if port_provisioned: db_lib.update_port(device_id, new_host, port_id, network_id, tenant_id) return True def _handle_port_migration_postcommit(self, context): """Handles port migration in postcommit In case of port migration, it removes the port from the original host and also it release the segment id if no port is attached to the same segment id that the port is attached to. """ orig_port = context.original orig_host = context.original_host new_host = context.host if new_host and orig_host and new_host != orig_host: self._try_to_release_dynamic_segment(context, migration=True) # Handling migration case. # 1. The port should be unplugged from network # 2. If segment_id is provisioned and it not bound to any port it # should be removed from EOS. network_id = orig_port['network_id'] tenant_id = orig_port['tenant_id'] or constants.INTERNAL_TENANT_ID # Ensure that we use tenant Id for the network owner tenant_id = self._network_owner_tenant(context, network_id, tenant_id) for binding_level in context._original_binding_levels or []: if self._network_provisioned( tenant_id, network_id, segment_id=binding_level.segment_id): with self.eos_sync_lock: # Removing the port form original host self._delete_port(orig_port, orig_host, tenant_id) # If segment id is not bound to any port, then # remove it from EOS segment = self.ndb.get_segment_by_id( context._plugin_context, binding_level.segment_id) if not segment: try: segment_info = [{ 'id': binding_level.segment_id, 'network_id': network_id, }] LOG.debug("migration_postcommit:" "deleting segment %s", segment_info) self.rpc.delete_network_segments(tenant_id, segment_info) # Remove the segment from the provisioned # network DB. db_lib.forget_network_segment( tenant_id, network_id, binding_level.segment_id) except arista_exc.AristaRpcError: LOG.info(constants.EOS_UNREACHABLE_MSG) return True def update_port_precommit(self, context): """Update the name of a given port. At the moment we only support port name change. Any other change to port is not supported at this time. We do not store the port names, therefore, no DB store action is performed here. """ new_port = context.current orig_port = context.original if new_port['name'] != orig_port['name']: LOG.info(_LI('Port name changed to %s'), new_port['name']) device_id = new_port['device_id'] host = context.host pretty_log("update_port_precommit: new", new_port) pretty_log("update_port_precommit: orig", orig_port) if not self._supported_device_owner(new_port['device_owner']): return # Check if it is port migration case if self._handle_port_migration_precommit(context): return # Check if the port is part of managed physical network seg_info = self._bound_segments(context) if not seg_info: # Ignoring the update as the port is not managed by # arista mechanism driver. return # device_id and device_owner are set on VM boot port_id = new_port['id'] network_id = new_port['network_id'] tenant_id = new_port['tenant_id'] or constants.INTERNAL_TENANT_ID # Ensure that we use tenant Id for the network owner tenant_id = self._network_owner_tenant(context, network_id, tenant_id) for seg in seg_info: if not self._network_provisioned(tenant_id, network_id, seg[driver_api.SEGMENTATION_ID], seg[driver_api.ID]): LOG.info( _LI("Adding %s to provisioned network database"), seg) with self.eos_sync_lock: db_lib.remember_tenant(tenant_id) db_lib.remember_network_segment( tenant_id, network_id, seg[driver_api.SEGMENTATION_ID], seg[driver_api.ID]) with self.eos_sync_lock: port_down = False if(new_port['device_owner'] == n_const.DEVICE_OWNER_DVR_INTERFACE): # We care about port status only for DVR ports because # for DVR, a single port exists on multiple hosts. If a port # is no longer needed on a host then the driver gets a # port_update notification for that with the # port status as PORT_STATUS_DOWN. port_down = context.status == n_const.PORT_STATUS_DOWN if host and not port_down: port_host_filter = None if(new_port['device_owner'] == n_const.DEVICE_OWNER_DVR_INTERFACE): # uniquely identifies a DVR port. Other # ports are identified by just the port id port_host_filter = host port_provisioned = db_lib.is_port_provisioned( port_id, port_host_filter) if not port_provisioned: LOG.info("Remembering the port") # Create a new port in the DB db_lib.remember_tenant(tenant_id) db_lib.remember_vm(device_id, host, port_id, network_id, tenant_id) else: if(new_port['device_id'] != orig_port['device_id'] or context.host != context.original_host or new_port['network_id'] != orig_port['network_id'] or new_port['tenant_id'] != orig_port['tenant_id']): LOG.info("Updating the port") # Port exists in the DB. Update it db_lib.update_port(device_id, host, port_id, network_id, tenant_id) else: # Unbound or down port does not concern us orig_host = context.original_host LOG.info("Forgetting the port on %s" % str(orig_host)) db_lib.forget_port(port_id, orig_host) def _port_updated(self, context): """Returns true if any port parameters have changed.""" new_port = context.current orig_port = context.original return (new_port['device_id'] != orig_port['device_id'] or context.host != context.original_host or new_port['network_id'] != orig_port['network_id'] or new_port['tenant_id'] != orig_port['tenant_id']) def update_port_postcommit(self, context): """Update the name of a given port in EOS. At the moment we only support port name change Any other change to port is not supported at this time. """ port = context.current orig_port = context.original device_id = port['device_id'] device_owner = port['device_owner'] host = context.host is_vm_boot = device_id and device_owner # When delete a vm, the trunk port context has no device_owner # Keep device_owner as in original port if not device_owner and orig_port.get('trunk_details'): device_owner = orig_port['device_owner'] if not self._supported_device_owner(device_owner): return vnic_type = port['binding:vnic_type'] binding_profile = port['binding:profile'] bindings = [] if binding_profile: bindings = binding_profile.get('local_link_information', []) port_id = port['id'] port_name = port['name'] network_id = port['network_id'] tenant_id = port['tenant_id'] or constants.INTERNAL_TENANT_ID # Ensure that we use tenant Id for the network owner tenant_id = self._network_owner_tenant(context, network_id, tenant_id) sg = port['security_groups'] orig_sg = orig_port['security_groups'] pretty_log("update_port_postcommit: new", port) pretty_log("update_port_postcommit: orig", orig_port) # Check if it is port migration case if self._handle_port_migration_postcommit(context): # Return from here as port migration is already handled. return # Check if it is trunk_port deletion case seg_info = [] if not port.get('trunk_details') or host: seg_info = self._bound_segments(context) if not seg_info: LOG.debug("Ignoring the update as the port is not managed by " "Arista switches.") return with self.eos_sync_lock: hostname = self._host_name(host) port_host_filter = None if(port['device_owner'] == n_const.DEVICE_OWNER_DVR_INTERFACE): # uniquely identifies a DVR port. Other # ports are identified by just the port id port_host_filter = host port_provisioned = db_lib.is_port_provisioned(port_id, port_host_filter) # If network does not exist under this tenant, # it may be a shared network. Get shared network owner Id net_provisioned = self._network_provisioned( tenant_id, network_id) for seg in seg_info: if not self._network_provisioned( tenant_id, network_id, segmentation_id=seg[driver_api.SEGMENTATION_ID]): net_provisioned = False segments = [] if net_provisioned and self.rpc.hpb_supported(): segments = seg_info all_segments = self.ndb.get_all_network_segments( network_id, context=context._plugin_context) try: self.rpc.create_network_segments( tenant_id, network_id, context.network.current['name'], all_segments) except arista_exc.AristaRpcError: with excutils.save_and_reraise_exception(): LOG.error(_LE("Failed to create network segments")) try: orig_host = context.original_host port_down = False if(port['device_owner'] == n_const.DEVICE_OWNER_DVR_INTERFACE or port.get('trunk_details')): # We care about port status only for DVR ports and # trunk ports port_down = context.status == n_const.PORT_STATUS_DOWN if orig_host and (port_down or host != orig_host or device_id == n_const.DEVICE_ID_RESERVED_DHCP_PORT): LOG.info("Deleting the port %s" % str(orig_port)) # The port moved to a different host or the VM # connected to the port was deleted or its in DOWN # state. So delete the old port on the old host. self._delete_port(orig_port, orig_host, tenant_id) if(port_provisioned and net_provisioned and hostname and is_vm_boot and not port_down and device_id != n_const.DEVICE_ID_RESERVED_DHCP_PORT): LOG.info(_LI("Port plugged into network")) # Plug port into the network only if it exists in the db # and is bound to a host and the port is up. trunk_details = port.get('trunk_details') self.rpc.plug_port_into_network(device_id, hostname, port_id, network_id, tenant_id, port_name, device_owner, sg, orig_sg, vnic_type, segments=segments, switch_bindings=bindings, trunk_details=trunk_details ) else: LOG.info(_LI("Port not plugged into network")) except arista_exc.AristaRpcError as err: LOG.error(_LE('update_port_postcommit: Did not update ' 'port %(port_id)s. Reason: %(err)s'), {'port_id': port_id, 'err': err}) def delete_port_precommit(self, context): """Delete information about a VM and host from the DB.""" port = context.current pretty_log("delete_port_precommit:", port) port_id = port['id'] host_id = context.host with self.eos_sync_lock: if db_lib.is_port_provisioned(port_id, host_id): db_lib.forget_port(port_id, host_id) def delete_port_postcommit(self, context): """Unplug a physical host from a network. Send provisioning request to Arista Hardware to unplug a host from appropriate network. """ port = context.current host = context.host network_id = port['network_id'] tenant_id = port['tenant_id'] or constants.INTERNAL_TENANT_ID # Ensure that we use tenant Id for the network owner tenant_id = self._network_owner_tenant(context, network_id, tenant_id) pretty_log("delete_port_postcommit:", port) # If this port is the last one using dynamic segmentation id, # and the segmentation id was allocated by this driver, it needs # to be released. self._try_to_release_dynamic_segment(context) with self.eos_sync_lock: try: self._delete_port(port, host, tenant_id) self._delete_segment(context, tenant_id) except arista_exc.AristaRpcError: # Can't do much if deleting a port failed. # Log a warning and continue. LOG.warning(constants.UNABLE_TO_DELETE_PORT_MSG) def _delete_port(self, port, host, tenant_id): """Deletes the port from EOS. param port: Port which is to be deleted param host: The host on which the port existed param tenant_id: The tenant to which the port belongs to. Some times the tenant id in the port dict is not present (as in the case of HA router). """ device_id = port['device_id'] port_id = port['id'] network_id = port['network_id'] device_owner = port['device_owner'] if not self._supported_device_owner(device_owner): return vnic_type = port['binding:vnic_type'] binding_profile = port['binding:profile'] switch_bindings = [] if binding_profile: switch_bindings = binding_profile.get('local_link_information', []) sg = port['security_groups'] if not device_id or not host: LOG.warning(constants.UNABLE_TO_DELETE_DEVICE_MSG) return try: if not self._network_provisioned(tenant_id, network_id): # If we do not have network associated with this, ignore it return hostname = self._host_name(host) trunk_details = port.get('trunk_details') self.rpc.unplug_port_from_network(device_id, device_owner, hostname, port_id, network_id, tenant_id, sg, vnic_type, switch_bindings=switch_bindings, trunk_details=trunk_details) self.rpc.remove_security_group(sg, switch_bindings) # if necessary, delete tenant as well. self.delete_tenant(tenant_id) except arista_exc.AristaRpcError: LOG.info(constants.EOS_UNREACHABLE_MSG) def _delete_segment(self, context, tenant_id): """Deletes a dynamic network segment from EOS. param context: The port context param tenant_id: The tenant which the port belongs to """ if not self.rpc.hpb_supported(): # Returning as HPB not supported by CVX return port = context.current network_id = port.get('network_id') if not context._binding_levels: return for binding_level in context._binding_levels: LOG.debug("deleting segment %s", binding_level.segment_id) if self._network_provisioned(tenant_id, network_id, segment_id=binding_level.segment_id): segment = self.ndb.get_segment_by_id( context._plugin_context, binding_level.segment_id) if not segment: # The segment is already released. Delete it from EOS LOG.debug("Deleting segment %s", binding_level.segment_id) try: segment_info = { 'id': binding_level.segment_id, 'network_id': network_id, } self.rpc.delete_network_segments(tenant_id, [segment_info]) # Remove the segment from the provisioned network DB. db_lib.forget_network_segment( tenant_id, network_id, binding_level.segment_id) except arista_exc.AristaRpcError: LOG.info(constants.EOS_UNREACHABLE_MSG) else: LOG.debug("Cannot delete segment_id %(segid)s " "segment is %(seg)s", {'segid': binding_level.segment_id, 'seg': segment}) def _try_to_release_dynamic_segment(self, context, migration=False): """Release dynamic segment allocated by the driver If this port is the last port using the segmentation id allocated by the driver, it should be released """ host = context.original_host if migration else context.host physnet_info = self.eapi.get_physical_network(host) physnet = physnet_info.get('physnet') if not physnet: return binding_levels = context.binding_levels LOG.debug("_try_release_dynamic_segment: " "binding_levels=%(bl)s", {'bl': binding_levels}) if not binding_levels: return segment_id = None bound_drivers = [] for binding_level in binding_levels: bound_segment = binding_level.get(driver_api.BOUND_SEGMENT) driver = binding_level.get(driver_api.BOUND_DRIVER) bound_drivers.append(driver) if (bound_segment and bound_segment.get('physical_network') == physnet and bound_segment.get('network_type') == n_const.TYPE_VLAN): segment_id = bound_segment.get('id') break # If the segment id is found and it is bound by this driver, and also # the segment id is not bound to any other port, release the segment. # When Arista driver participate in port binding by allocating dynamic # segment and then calling continue_binding, the driver should the # second last driver in the bound drivers list. if (segment_id and bound_drivers[-2:-1] == [constants.MECHANISM_DRV_NAME]): filters = {'segment_id': segment_id} result = db_lib.get_port_binding_level(filters) LOG.debug("Looking for entry with filters=%(filters)s " "result=%(result)s ", {'filters': filters, 'result': result}) if not result: # The requested segment_id does not exist in the port binding # database. Release the dynamic segment. context.release_dynamic_segment(segment_id) LOG.debug("Released dynamic segment %(seg)s allocated " "by %(drv)s", {'seg': segment_id, 'drv': bound_drivers[-2]}) def delete_tenant(self, tenant_id): """delete a tenant from DB. A tenant is deleted only if there is no network or VM configured configured for this tenant. """ objects_for_tenant = (db_lib.num_nets_provisioned(tenant_id) + db_lib.num_vms_provisioned(tenant_id)) if not objects_for_tenant: db_lib.forget_tenant(tenant_id) try: self.rpc.delete_tenant(tenant_id) except arista_exc.AristaRpcError: with excutils.save_and_reraise_exception(): LOG.info(constants.EOS_UNREACHABLE_MSG) def _host_name(self, hostname): fqdns_used = cfg.CONF.ml2_arista['use_fqdn'] return hostname if fqdns_used else hostname.split('.')[0] def _network_provisioned(self, tenant_id, network_id, segmentation_id=None, segment_id=None): # If network does not exist under this tenant, # it may be a shared network. return ( db_lib.is_network_provisioned(tenant_id, network_id, segmentation_id, segment_id) or self.ndb.get_shared_network_owner_id(network_id) ) def create_security_group(self, sg): try: self.rpc.create_acl(sg) except Exception: msg = (_('Failed to create ACL on EOS %s') % sg) LOG.exception(msg) raise arista_exc.AristaSecurityGroupError(msg=msg) def delete_security_group(self, sg): try: self.rpc.delete_acl(sg) except Exception: msg = (_('Failed to create ACL on EOS %s') % sg) LOG.exception(msg) raise arista_exc.AristaSecurityGroupError(msg=msg) def update_security_group(self, sg): try: self.rpc.create_acl(sg) except Exception: msg = (_('Failed to create ACL on EOS %s') % sg) LOG.exception(msg) raise arista_exc.AristaSecurityGroupError(msg=msg) def create_security_group_rule(self, sgr): try: self.rpc.create_acl_rule(sgr) except Exception: msg = (_('Failed to create ACL rule on EOS %s') % sgr) LOG.exception(msg) raise arista_exc.AristaSecurityGroupError(msg=msg) def delete_security_group_rule(self, sgr_id): if sgr_id: sgr = self.ndb.get_security_group_rule(sgr_id) if sgr: try: self.rpc.delete_acl_rule(sgr) except Exception: msg = (_('Failed to delete ACL rule on EOS %s') % sgr) LOG.exception(msg) raise arista_exc.AristaSecurityGroupError(msg=msg) def unset_subport(self, resource, event, trigger, **kwargs): payload = kwargs['payload'] trunk_id = payload.trunk_id subports = payload.subports trunk_port = db_lib.get_trunk_port_by_trunk_id(trunk_id) if trunk_port: device_id = trunk_port.device_id tenant_id = trunk_port.tenant_id host = trunk_port.port_binding.host vnic_type = trunk_port.port_binding.vnic_type profile = trunk_port.port_binding.profile if profile: profile = json.loads(profile) for subport in subports: subport_id = subport.port_id subport_current = self.ndb.get_port(subport_id) subport_current['device_id'] = device_id subport_current['binding:vnic_type'] = vnic_type subport_current['binding:profile'] = profile subport_current['device_owner'] = 'trunk:subport' self._delete_port(subport_current, host, tenant_id) else: LOG.warning('Unable to unset the subport, no trunk port found') def set_subport(self, resource, event, trigger, **kwargs): payload = kwargs['payload'] trunk_id = payload.trunk_id subports = payload.subports device_owner = 'trunk:subport' trunk_port = db_lib.get_trunk_port_by_trunk_id(trunk_id) if not trunk_port: return device_id = trunk_port.device_id tenant_id = trunk_port.tenant_id host = trunk_port.port_binding.host if not host: return hostname = self._host_name(host) vnic_type = trunk_port.port_binding.vnic_type profile = trunk_port.port_binding.profile bindings = [] if profile: profile = json.loads(profile) bindings = profile.get('local_link_information', []) for subport in subports: subport_id = subport.port_id subport_current = self.ndb.get_port(subport_id) network_id = self.ndb.get_network_id_from_port_id(subport_id) port_name = subport_current.get('name') sg = subport_current.get('security_groups') orig_sg = None segments = db_lib.get_network_segments_by_port_id(subport_id) self.rpc.plug_port_into_network(device_id, hostname, subport_id, network_id, tenant_id, port_name, device_owner, sg, orig_sg, vnic_type, segments=segments, switch_bindings=bindings) networking-arista-2017.2.2/networking_arista/ml2/rpc/000077500000000000000000000000001323242307100224225ustar00rootroot00000000000000networking-arista-2017.2.2/networking_arista/ml2/rpc/__init__.py000066400000000000000000000000001323242307100245210ustar00rootroot00000000000000networking-arista-2017.2.2/networking_arista/ml2/rpc/arista_eapi.py000066400000000000000000001230571323242307100252650ustar00rootroot00000000000000# Copyright (c) 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import json import socket from neutron_lib.api.definitions import portbindings from neutron_lib import constants as n_const from oslo_config import cfg from oslo_log import log as logging import requests import six from networking_arista._i18n import _, _LI, _LW, _LE from networking_arista.common import constants as const from networking_arista.common import db_lib from networking_arista.common import exceptions as arista_exc from networking_arista.ml2.rpc.base import AristaRPCWrapperBase LOG = logging.getLogger(__name__) class AristaRPCWrapperEapi(AristaRPCWrapperBase): def __init__(self, ndb): super(AristaRPCWrapperEapi, self).__init__(ndb) # The cli_commands dict stores the mapping between the CLI command key # and the actual CLI command. self.cli_commands = { 'timestamp': [ 'show openstack config region %s timestamp' % self.region], const.CMD_REGION_SYNC: 'region %s sync' % self.region, const.CMD_INSTANCE: None, const.CMD_SYNC_HEARTBEAT: 'sync heartbeat', 'resource-pool': [], 'features': {}, } def _send_eapi_req(self, cmds, commands_to_log=None): # This method handles all EAPI requests (using the requests library) # and returns either None or response.json()['result'] from the EAPI # request. # # Exceptions related to failures in connecting/ timeouts are caught # here and logged. Other unexpected exceptions are logged and raised request_headers = {} request_headers['Content-Type'] = 'application/json' request_headers['Accept'] = 'application/json' url = self._api_host_url(host=self._server_ip) params = {} params['timestamps'] = "false" params['format'] = "json" params['version'] = 1 params['cmds'] = cmds data = {} data['id'] = "Arista ML2 driver" data['method'] = "runCmds" data['jsonrpc'] = "2.0" data['params'] = params response = None try: # NOTE(pbourke): shallow copy data and params to remove sensitive # information before logging log_data = dict(data) log_data['params'] = dict(params) log_data['params']['cmds'] = commands_to_log or cmds msg = (_('EAPI request to %(ip)s contains %(cmd)s') % {'ip': self._server_ip, 'cmd': json.dumps(log_data)}) LOG.info(msg) response = requests.post(url, timeout=self.conn_timeout, verify=False, data=json.dumps(data)) LOG.info(_LI('EAPI response contains: %s'), response.json()) try: return response.json()['result'] except KeyError: if response.json()['error']['code'] == 1002: for data in response.json()['error']['data']: if type(data) == dict and 'errors' in data: if const.ERR_CVX_NOT_LEADER in data['errors'][0]: msg = six.text_type("%s is not the master" % ( self._server_ip)) LOG.info(msg) return None msg = "Unexpected EAPI error" LOG.info(msg) raise arista_exc.AristaRpcError(msg=msg) except requests.exceptions.ConnectionError: msg = (_('Error while trying to connect to %(ip)s') % {'ip': self._server_ip}) LOG.warning(msg) return None except requests.exceptions.ConnectTimeout: msg = (_('Timed out while trying to connect to %(ip)s') % {'ip': self._server_ip}) LOG.warning(msg) return None except requests.exceptions.Timeout: msg = (_('Timed out during an EAPI request to %(ip)s') % {'ip': self._server_ip}) LOG.warning(msg) return None except requests.exceptions.InvalidURL: msg = (_('Ignore attempt to connect to invalid URL %(ip)s') % {'ip': self._server_ip}) LOG.warning(msg) return None except ValueError: LOG.info("Ignoring invalid JSON response") return None except Exception as error: msg = six.text_type(error) LOG.warning(msg) raise def check_supported_features(self): cmd = ['show openstack instances'] try: self._run_eos_cmds(cmd) self.cli_commands[const.CMD_INSTANCE] = 'instance' except (arista_exc.AristaRpcError, Exception) as err: self.cli_commands[const.CMD_INSTANCE] = None LOG.warning(_LW("'instance' command is not available on EOS " "because of %s"), err) # Get list of supported openstack features by CVX cmd = ['show openstack features'] try: resp = self._run_eos_cmds(cmd) self.cli_commands['features'] = resp[0].get('features', {}) except (Exception, arista_exc.AristaRpcError): self.cli_commands['features'] = {} def check_vlan_type_driver_commands(self): """Checks the validity of CLI commands for Arista's VLAN type driver. This method tries to execute the commands used exclusively by the arista_vlan type driver and stores the commands if they succeed. """ cmd = ['show openstack resource-pool vlan region %s uuid' % self.region] try: self._run_eos_cmds(cmd) self.cli_commands['resource-pool'] = cmd except arista_exc.AristaRpcError: self.cli_commands['resource-pool'] = [] LOG.warning( _LW("'resource-pool' command '%s' is not available on EOS"), cmd) def _heartbeat_required(self, sync, counter=0): return (sync and self.cli_commands[const.CMD_SYNC_HEARTBEAT] and (counter % const.HEARTBEAT_INTERVAL) == 0) def get_vlan_assignment_uuid(self): """Returns the UUID for the region's vlan assignment on CVX :returns: string containing the region's vlan assignment UUID """ vlan_uuid_cmd = self.cli_commands['resource-pool'] if vlan_uuid_cmd: return self._run_eos_cmds(commands=vlan_uuid_cmd)[0] return None def get_vlan_allocation(self): """Returns the status of the region's VLAN pool in CVX :returns: dictionary containg the assigned, allocated and available VLANs for the region """ if not self.cli_commands['resource-pool']: LOG.warning(_('The version of CVX you are using does not support' 'arista VLAN type driver.')) return None cmd = ['show openstack resource-pools region %s' % self.region] command_output = self._run_eos_cmds(cmd) if command_output: regions = command_output[0]['physicalNetwork'] if self.region in regions.keys(): return regions[self.region]['vlanPool']['default'] return {'assignedVlans': '', 'availableVlans': '', 'allocatedVlans': ''} def get_tenants(self): cmds = ['show openstack config region %s' % self.region] command_output = self._run_eos_cmds(cmds) tenants = command_output[0]['tenants'] return tenants def bm_and_dvr_supported(self): return (self.cli_commands[const.CMD_INSTANCE] == 'instance') def _baremetal_support_check(self, vnic_type): # Basic error checking for baremental deployments if (vnic_type == portbindings.VNIC_BAREMETAL and not self.bm_and_dvr_supported()): msg = _("Baremetal instances are not supported in this" " release of EOS") LOG.error(msg) raise arista_exc.AristaConfigError(msg=msg) def plug_port_into_network(self, device_id, host_id, port_id, net_id, tenant_id, port_name, device_owner, sg, orig_sg, vnic_type, segments, switch_bindings=None, trunk_details=None): if device_owner == n_const.DEVICE_OWNER_DHCP: self.plug_dhcp_port_into_network(device_id, host_id, port_id, net_id, tenant_id, segments, port_name) elif (device_owner.startswith('compute') or device_owner.startswith('baremetal') or device_owner.startswith('trunk')): if vnic_type == 'baremetal': self.plug_baremetal_into_network(device_id, host_id, port_id, net_id, tenant_id, segments, port_name, device_owner, sg, orig_sg, vnic_type, switch_bindings, trunk_details) else: self.plug_host_into_network(device_id, host_id, port_id, net_id, tenant_id, segments, port_name, trunk_details) elif device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE: self.plug_distributed_router_port_into_network(device_id, host_id, port_id, net_id, tenant_id, segments) def unplug_port_from_network(self, device_id, device_owner, hostname, port_id, network_id, tenant_id, sg, vnic_type, switch_bindings=None, trunk_details=None): if device_owner == n_const.DEVICE_OWNER_DHCP: self.unplug_dhcp_port_from_network(device_id, hostname, port_id, network_id, tenant_id) elif (device_owner.startswith('compute') or device_owner.startswith('baremetal') or device_owner.startswith('trunk')): if vnic_type == 'baremetal': self.unplug_baremetal_from_network(device_id, hostname, port_id, network_id, tenant_id, sg, vnic_type, switch_bindings, trunk_details) else: self.unplug_host_from_network(device_id, hostname, port_id, network_id, tenant_id, trunk_details) elif device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE: self.unplug_distributed_router_port_from_network(device_id, port_id, hostname, tenant_id) def plug_host_into_network(self, vm_id, host, port_id, network_id, tenant_id, segments, port_name, trunk_details=None): cmds = ['tenant %s' % tenant_id, 'vm id %s hostid %s' % (vm_id, host)] if port_name: cmds.append('port id %s name "%s" network-id %s' % (port_id, port_name, network_id)) else: cmds.append('port id %s network-id %s' % (port_id, network_id)) cmds.extend( 'segment level %d id %s' % (level, segment['id']) for level, segment in enumerate(segments)) if trunk_details and trunk_details.get('sub_ports'): for subport in trunk_details['sub_ports']: port_id = subport['port_id'] net_id = self._ndb.get_network_id_from_port_id(port_id) filters = {'port_id': port_id} segments = db_lib.get_port_binding_level(filters) cmds.append('port id %s network-id %s' % (port_id, net_id)) cmds.extend( 'segment level %d id %s' % (s.level, s.segment_id) for s in segments ) self._run_openstack_cmds(cmds) def plug_baremetal_into_network(self, vm_id, host, port_id, network_id, tenant_id, segments, port_name, device_owner, sg=None, orig_sg=None, vnic_type=None, switch_bindings=None, trunk_details=None): # Basic error checking for baremental deployments # notice that the following method throws and exception # if an error condition exists self._baremetal_support_check(vnic_type) # For baremetal, add host to the topology if switch_bindings and vnic_type == portbindings.VNIC_BAREMETAL: cmds = ['tenant %s' % tenant_id] cmds.append('instance id %s hostid %s type baremetal' % (vm_id, host)) # This list keeps track of any ACLs that need to be rolled back # in case we hit a failure trying to apply ACLs, and we end # failing the transaction. for binding in switch_bindings: if not binding: # skip all empty entries continue if device_owner.startswith('trunk'): vlan_type = 'allowed' else: vlan_type = 'native' # Ensure that binding contains switch and port ID info if binding['switch_id'] and binding['port_id']: if port_name: cmds.append('port id %s name "%s" network-id %s ' 'type %s switch-id %s switchport %s' % (port_id, port_name, network_id, vlan_type, binding['switch_id'], binding['port_id'])) else: cmds.append('port id %s network-id %s type %s ' 'switch-id %s switchport %s' % (port_id, network_id, vlan_type, binding['switch_id'], binding['port_id'])) cmds.extend('segment level %d id %s' % (level, segment['id']) for level, segment in enumerate(segments)) if trunk_details and trunk_details.get('sub_ports'): for subport in trunk_details['sub_ports']: port_id = subport['port_id'] net_id = self._ndb.get_network_id_from_port_id( port_id) filters = {'port_id': port_id} segments = db_lib.get_port_binding_level(filters) cmds.append('port id %s network-id %s type allowed' ' switch-id %s switchport %s' % (port_id, net_id, binding['switch_id'], binding['port_id'])) cmds.extend( 'segment level %d id %s' % (s.level, s.segment_id) for s in segments ) else: msg = _('switch and port ID not specified for baremetal') LOG.error(msg) raise arista_exc.AristaConfigError(msg=msg) cmds.append('exit') self._run_openstack_cmds(cmds) if sg: self.apply_security_group(sg, switch_bindings) else: # Security group was removed. Clean up the existing security # groups. if orig_sg: self.remove_security_group(orig_sg, switch_bindings) def plug_dhcp_port_into_network(self, dhcp_id, host, port_id, network_id, tenant_id, segments, port_name): cmds = ['tenant %s' % tenant_id, 'network id %s' % network_id] if port_name: cmds.append('dhcp id %s hostid %s port-id %s name "%s"' % (dhcp_id, host, port_id, port_name)) else: cmds.append('dhcp id %s hostid %s port-id %s' % (dhcp_id, host, port_id)) cmds.extend('segment level %d id %s' % (level, segment['id']) for level, segment in enumerate(segments)) self._run_openstack_cmds(cmds) def plug_distributed_router_port_into_network(self, router_id, host, port_id, net_id, tenant_id, segments): if not self.bm_and_dvr_supported(): LOG.info(const.ERR_DVR_NOT_SUPPORTED) return cmds = ['tenant %s' % tenant_id, 'instance id %s type router' % router_id, 'port id %s network-id %s hostid %s' % (port_id, net_id, host)] cmds.extend('segment level %d id %s' % (level, segment['id']) for level, segment in enumerate(segments)) self._run_openstack_cmds(cmds) def unplug_host_from_network(self, vm_id, host, port_id, network_id, tenant_id, trunk_details=None): cmds = ['tenant %s' % tenant_id, 'vm id %s hostid %s' % (vm_id, host), ] if trunk_details and trunk_details.get('sub_ports'): cmds.extend( 'no port id %s' % subport['port_id'] for subport in trunk_details['sub_ports'] ) cmds.append('no port id %s' % port_id) self._run_openstack_cmds(cmds) def unplug_baremetal_from_network(self, vm_id, host, port_id, network_id, tenant_id, sg, vnic_type, switch_bindings=None, trunk_details=None): # Basic error checking for baremental deployments # notice that the following method throws and exception # if an error condition exists self._baremetal_support_check(vnic_type) # Following is a temporary code for native VLANs - should be removed cmds = ['tenant %s' % tenant_id] cmds.append('instance id %s hostid %s type baremetal' % (vm_id, host)) if trunk_details and trunk_details.get('sub_ports'): cmds.extend( 'no port id %s' % subport['port_id'] for subport in trunk_details['sub_ports'] ) cmds.append('no port id %s' % port_id) self._run_openstack_cmds(cmds) # SG - Remove security group rules from the port # after deleting the instance for binding in switch_bindings: if not binding: continue self.security_group_driver.remove_acl(sg, binding['switch_id'], binding['port_id'], binding['switch_info']) def unplug_dhcp_port_from_network(self, dhcp_id, host, port_id, network_id, tenant_id): cmds = ['tenant %s' % tenant_id, 'network id %s' % network_id, 'no dhcp id %s port-id %s' % (dhcp_id, port_id), ] self._run_openstack_cmds(cmds) def unplug_distributed_router_port_from_network(self, router_id, port_id, host, tenant_id): if not self.bm_and_dvr_supported(): LOG.info(const.ERR_DVR_NOT_SUPPORTED) return # When the last router port is removed, the router is deleted from EOS. cmds = ['tenant %s' % tenant_id, 'instance id %s type router' % router_id, 'no port id %s hostid %s' % (port_id, host)] self._run_openstack_cmds(cmds) def create_network_bulk(self, tenant_id, network_list, sync=False): cmds = ['tenant %s' % tenant_id] # Create a reference to function to avoid name lookups in the loop append_cmd = cmds.append for counter, network in enumerate(network_list, 1): try: append_cmd('network id %s name "%s"' % (network['network_id'], network['network_name'])) except KeyError: append_cmd('network id %s' % network['network_id']) cmds.extend( 'segment %s type %s id %d %s' % ( seg['id'] if self.hpb_supported() else 1, seg['network_type'], seg['segmentation_id'], ('dynamic' if seg.get('is_dynamic', False) else 'static' if self.hpb_supported() else '')) for seg in network['segments'] if seg['network_type'] != const.NETWORK_TYPE_FLAT ) shared_cmd = 'shared' if network['shared'] else 'no shared' append_cmd(shared_cmd) if self._heartbeat_required(sync, counter): append_cmd(self.cli_commands[const.CMD_SYNC_HEARTBEAT]) if self._heartbeat_required(sync): append_cmd(self.cli_commands[const.CMD_SYNC_HEARTBEAT]) self._run_openstack_cmds(cmds, sync=sync) def create_network_segments(self, tenant_id, network_id, network_name, segments): if segments: cmds = ['tenant %s' % tenant_id, 'network id %s name "%s"' % (network_id, network_name)] cmds.extend( 'segment %s type %s id %d %s' % ( seg['id'], seg['network_type'], seg['segmentation_id'], ('dynamic' if seg.get('is_dynamic', False) else 'static' if self.hpb_supported() else '')) for seg in segments) self._run_openstack_cmds(cmds) def delete_network_segments(self, tenant_id, segments): if not segments: return cmds = ['tenant %s' % tenant_id] for segment in segments: cmds.append('network id %s' % segment['network_id']) cmds.append('no segment %s' % segment['id']) self._run_openstack_cmds(cmds) def delete_network_bulk(self, tenant_id, network_id_list, sync=False): cmds = ['tenant %s' % tenant_id] for counter, network_id in enumerate(network_id_list, 1): cmds.append('no network id %s' % network_id) if self._heartbeat_required(sync, counter): cmds.append(self.cli_commands[const.CMD_SYNC_HEARTBEAT]) if self._heartbeat_required(sync): cmds.append(self.cli_commands[const.CMD_SYNC_HEARTBEAT]) self._run_openstack_cmds(cmds, sync=sync) def delete_vm_bulk(self, tenant_id, vm_id_list, sync=False): cmds = ['tenant %s' % tenant_id] counter = 0 for vm_id in vm_id_list: counter += 1 cmds.append('no vm id %s' % vm_id) if self._heartbeat_required(sync, counter): cmds.append(self.cli_commands[const.CMD_SYNC_HEARTBEAT]) if self._heartbeat_required(sync): cmds.append(self.cli_commands[const.CMD_SYNC_HEARTBEAT]) self._run_openstack_cmds(cmds, sync=sync) def delete_instance_bulk(self, tenant_id, instance_id_list, instance_type, sync=False): cmds = ['tenant %s' % tenant_id] counter = 0 for instance in instance_id_list: counter += 1 cmds.append('no instance id %s' % instance) if self._heartbeat_required(sync, counter): cmds.append(self.cli_commands[const.CMD_SYNC_HEARTBEAT]) if self._heartbeat_required(sync): cmds.append(self.cli_commands[const.CMD_SYNC_HEARTBEAT]) self._run_openstack_cmds(cmds, sync=sync) def create_instance_bulk(self, tenant_id, neutron_ports, vms, port_profiles, sync=False): cmds = ['tenant %s' % tenant_id] # Create a reference to function to avoid name lookups in the loop append_cmd = cmds.append counter = 0 for vm in vms.values(): counter += 1 for v_port in vm['ports']: port_id = v_port['portId'] if not v_port['hosts']: # Skip all the ports that have no host associsted with them continue if port_id not in neutron_ports.keys(): continue neutron_port = neutron_ports[port_id] port_name = '' if 'name' in neutron_port: port_name = 'name "%s"' % neutron_port['name'] device_owner = neutron_port['device_owner'] if port_id not in port_profiles: continue vnic_type = port_profiles[port_id]['vnic_type'] network_id = neutron_port['network_id'] segments = [] if (self.hpb_supported() and device_owner != n_const.DEVICE_OWNER_DVR_INTERFACE): filters = {'port_id': port_id, 'host': v_port['hosts'][0]} segments = db_lib.get_port_binding_level(filters) if device_owner == n_const.DEVICE_OWNER_DHCP: append_cmd('network id %s' % neutron_port['network_id']) append_cmd('dhcp id %s hostid %s port-id %s %s' % (vm['vmId'], v_port['hosts'][0], neutron_port['id'], port_name)) cmds.extend('segment level %d id %s' % ( segment.level, segment.segment_id) for segment in segments) elif (device_owner.startswith('compute') or device_owner.startswith('baremetal') or device_owner.startswith('trunk')): if vnic_type == 'baremetal': append_cmd('instance id %s hostid %s type baremetal' % (vm['vmId'], v_port['hosts'][0])) profile = port_profiles[neutron_port['id']] profile = json.loads(profile['profile']) for binding in profile['local_link_information']: if not binding or not isinstance(binding, dict): # skip all empty entries continue if device_owner.startswith('trunk'): vlan_type = 'allowed' else: vlan_type = 'native' # Ensure that profile contains local link info if binding['switch_id'] and binding['port_id']: if port_name: cmds.append('port id %s name "%s" ' 'network-id %s type %s ' 'switch-id %s switchport %s' % (port_id, port_name, network_id, vlan_type, binding['switch_id'], binding['port_id'])) else: cmds.append('port id %s network-id %s ' 'type %s ' 'switch-id %s switchport %s' % (port_id, network_id, vlan_type, binding['switch_id'], binding['port_id'])) cmds.extend('segment level %d id %s' % ( segment.level, segment.segment_id) for segment in segments) else: append_cmd('vm id %s hostid %s' % (vm['vmId'], v_port['hosts'][0])) append_cmd('port id %s %s network-id %s' % (neutron_port['id'], port_name, neutron_port['network_id'])) cmds.extend('segment level %d id %s' % ( segment.level, segment.segment_id) for segment in segments) elif device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE: if not self.bm_and_dvr_supported(): LOG.info(const.ERR_DVR_NOT_SUPPORTED) continue append_cmd('instance id %s type router' % ( neutron_port['device_id'])) for host in v_port['hosts']: if self.hpb_supported(): filters = {'port_id': port_id, 'host': host} segments = db_lib.get_port_binding_level(filters) append_cmd('port id %s network-id %s hostid %s' % ( neutron_port['id'], neutron_port['network_id'], host)) cmds.extend('segment level %d id %s' % ( segment.level, segment.segment_id) for segment in segments) else: LOG.warning(_LW("Unknown device owner: %s"), neutron_port['device_owner']) if self._heartbeat_required(sync, counter): append_cmd(self.cli_commands[const.CMD_SYNC_HEARTBEAT]) if self._heartbeat_required(sync): append_cmd(self.cli_commands[const.CMD_SYNC_HEARTBEAT]) self._run_openstack_cmds(cmds, sync=sync) def delete_tenant_bulk(self, tenant_list, sync=False): cmds = [] for tenant in tenant_list: cmds.append('no tenant %s' % tenant) if self._heartbeat_required(sync): cmds.append(self.cli_commands[const.CMD_SYNC_HEARTBEAT]) self._run_openstack_cmds(cmds, sync=sync) def delete_this_region(self): cmds = ['enable', 'configure', 'cvx', 'service openstack', 'no region %s' % self.region, ] self._run_eos_cmds(cmds) def register_with_eos(self, sync=False): self._run_openstack_cmds(['sync interval %d' % self.sync_interval], sync=sync) def get_region_updated_time(self): timestamp_cmd = self.cli_commands['timestamp'] if timestamp_cmd: try: return self._run_eos_cmds(commands=timestamp_cmd)[0] except IndexError: # EAPI request failed and so return none msg = "Failed to get last sync timestamp; trigger full sync" LOG.info(msg) return None def _check_sync_lock(self, client): """Check if the lock is owned by this client. :param client: Returns true only if the lock owner matches the expected client. """ cmds = ['show sync lock'] ret = self._run_openstack_cmds(cmds, sync=True) for r in ret: if 'owner' in r: lock_owner = r['owner'] LOG.info(_LI('Lock requested by: %s'), client) LOG.info(_LI('Lock owner: %s'), lock_owner) return lock_owner == client return False def sync_supported(self): return self.cli_commands[const.CMD_REGION_SYNC] def hpb_supported(self): return 'hierarchical-port-binding' in self.cli_commands['features'] def sync_start(self): try: cmds = [] if self.sync_supported(): # Locking the region during sync is supported. client_id = socket.gethostname().split('.')[0] request_id = self._get_random_name() cmds = ['sync lock %s %s' % (client_id, request_id)] self._run_openstack_cmds(cmds) # Check whether the lock was acquired. return self._check_sync_lock(client_id) else: cmds = ['sync start'] self._run_openstack_cmds(cmds) return True except arista_exc.AristaRpcError: return False def sync_end(self): try: # 'sync end' can be sent only when the region has been entered in # the sync mode self._run_openstack_cmds(['sync end'], sync=True) return True except arista_exc.AristaRpcError: return False def _run_eos_cmds(self, commands, commands_to_log=None): """Execute/sends a CAPI (Command API) command to EOS. In this method, list of commands is appended with prefix and postfix commands - to make is understandble by EOS. :param commands : List of command to be executed on EOS. :param commands_to_log : This should be set to the command that is logged. If it is None, then the commands param is logged. """ # Always figure out who is master (starting with the last known val) try: if self._get_eos_master() is None: msg = "Failed to identify CVX master" self.set_cvx_unavailable() raise arista_exc.AristaRpcError(msg=msg) except Exception: self.set_cvx_unavailable() raise self.set_cvx_available() log_cmds = commands if commands_to_log: log_cmds = commands_to_log LOG.info(_LI('Executing command on Arista EOS: %s'), log_cmds) # this returns array of return values for every command in # full_command list try: response = self._send_eapi_req(cmds=commands, commands_to_log=log_cmds) if response is None: # Reset the server as we failed communicating with it self._server_ip = None self.set_cvx_unavailable() msg = "Failed to communicate with CVX master" raise arista_exc.AristaRpcError(msg=msg) return response except arista_exc.AristaRpcError: raise def _build_command(self, cmds, sync=False): """Build full EOS's openstack CLI command. Helper method to add commands to enter and exit from openstack CLI modes. :param cmds: The openstack CLI commands that need to be executed in the openstack config mode. :param sync: This flags indicates that the region is being synced. """ region_cmd = 'region %s' % self.region if sync and self.sync_supported(): region_cmd = self.cli_commands[const.CMD_REGION_SYNC] full_command = [ 'enable', 'configure', 'cvx', 'service openstack', region_cmd, ] full_command.extend(cmds) return full_command def _run_openstack_cmds(self, commands, commands_to_log=None, sync=False): """Execute/sends a CAPI (Command API) command to EOS. In this method, list of commands is appended with prefix and postfix commands - to make is understandble by EOS. :param commands : List of command to be executed on EOS. :param commands_to_logs : This should be set to the command that is logged. If it is None, then the commands param is logged. :param sync: This flags indicates that the region is being synced. """ full_command = self._build_command(commands, sync=sync) if commands_to_log: full_log_command = self._build_command(commands_to_log, sync=sync) else: full_log_command = None return self._run_eos_cmds(full_command, full_log_command) def _get_eos_master(self): # Use guarded command to figure out if this is the master cmd = ['show openstack agent uuid'] cvx = self._get_cvx_hosts() # Identify which EOS instance is currently the master for self._server_ip in cvx: try: response = self._send_eapi_req(cmds=cmd, commands_to_log=cmd) if response is not None: return self._server_ip else: continue # Try another EOS instance except Exception: raise # Couldn't find an instance that is the leader and returning none self._server_ip = None msg = "Failed to reach the CVX master" LOG.error(msg) return None def _api_host_url(self, host=""): return ('https://%s:%s@%s/command-api' % (self._api_username(), self._api_password(), host)) def get_physical_network(self, host_id): """Returns dirctionary which contains physical topology information for a given host_id """ fqdns_used = cfg.CONF.ml2_arista['use_fqdn'] physnet = None switch_id = None mac_to_hostname = {} cmds = ['show network physical-topology neighbors', 'show network physical-topology hosts'] try: response = self._run_eos_cmds(cmds) # Get response for 'show network physical-topology neighbors' # command neighbors = response[0]['neighbors'] for neighbor in neighbors: if host_id in neighbor: switchname = neighbors[neighbor]['toPort'][0]['hostname'] physnet = switchname if fqdns_used else ( switchname.split('.')[0]) switch_id = neighbors[neighbor]['toPort'][0].get('hostid') if not switch_id: switch_id = response[1]['hosts'][switchname]['name'] break # Check if the switch is part of an MLAG pair, and lookup the # pair's physnet name if so physnet = self.mlag_pairs.get(physnet, physnet) for host in response[1]['hosts'].values(): mac_to_hostname[host['name']] = host['hostname'] res = {'physnet': physnet, 'switch_id': switch_id, 'mac_to_hostname': mac_to_hostname} LOG.debug("get_physical_network: Physical Network info for " "%(host)s is %(res)s", {'host': host_id, 'res': res}) return res except Exception as exc: LOG.error(_LE('command %(cmds)s failed with ' '%(exc)s'), {'cmds': cmds, 'exc': exc}) return {} networking-arista-2017.2.2/networking_arista/ml2/rpc/arista_json.py000066400000000000000000000754411323242307100253230ustar00rootroot00000000000000# Copyright (c) 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import json import socket from neutron_lib import constants as n_const from oslo_log import log as logging from oslo_utils import excutils import requests import six from networking_arista._i18n import _, _LI, _LW, _LE from networking_arista.common import constants as const from networking_arista.common import db_lib from networking_arista.common import exceptions as arista_exc from networking_arista.ml2.rpc.base import AristaRPCWrapperBase LOG = logging.getLogger(__name__) class AristaRPCWrapperJSON(AristaRPCWrapperBase): def __init__(self, ndb): super(AristaRPCWrapperJSON, self).__init__(ndb) self.current_sync_name = None def _get_url(self, host="", user="", password=""): return ('https://%s:%s@%s/openstack/api/' % (user, password, host)) def _api_host_url(self, host=""): return self._get_url(host, self._api_username(), self._api_password()) def _send_request(self, host, path, method, data=None, sanitized_data=None): request_headers = { 'Content-Type': 'application/json', 'Accept': 'application/json', 'X-Sync-ID': self.current_sync_name } url = self._api_host_url(host=host) + path # Don't log the password log_url = self._get_url(host=host, user=self._api_username(), password="*****") + path resp = None data = json.dumps(data) try: msg = (_('JSON request type: %(type)s url %(url)s data: ' '%(data)s sync_id: %(sync)s') % {'type': method, 'url': log_url, 'data': sanitized_data or data, 'sync': self.current_sync_name}) LOG.info(msg) func_lookup = { 'GET': requests.get, 'POST': requests.post, 'PUT': requests.put, 'PATCH': requests.patch, 'DELETE': requests.delete } func = func_lookup.get(method) if not func: LOG.warning(_LW('Unrecognized HTTP method %s'), method) return None resp = func(url, timeout=self.conn_timeout, verify=False, data=data, headers=request_headers) msg = (_LI('JSON response contains: %(code)s %(resp)s') % {'code': resp.status_code, 'resp': resp.json()}) LOG.info(msg) if resp.ok: return resp.json() else: raise arista_exc.AristaRpcError(msg=resp.json().get('error')) except requests.exceptions.ConnectionError: msg = (_('Error connecting to %(url)s') % {'url': url}) LOG.warning(msg) except requests.exceptions.ConnectTimeout: msg = (_('Timed out connecting to API request to %(url)s') % {'url': url}) LOG.warning(msg) except requests.exceptions.Timeout: msg = (_('Timed out during API request to %(url)s') % {'url': url}) LOG.warning(msg) except requests.exceptions.InvalidURL: msg = (_('Ignore attempt to connect to invalid URL %(url)s') % {'url': self._server_ip}) LOG.warning(msg) except ValueError: LOG.warning(_LW("Ignoring invalid JSON response: %s"), resp.text) except Exception as error: msg = six.text_type(error) LOG.warning(msg) # reraise the exception with excutils.save_and_reraise_exception() as ctxt: ctxt.reraise = True return {} if method == 'GET' else None def _check_if_cvx_leader(self, host): url = 'agent/' data = self._send_request(host, url, 'GET') return False if not data else data.get('isLeader', False) def _get_eos_master(self): cvx = self._get_cvx_hosts() for self._server_ip in cvx: if self._check_if_cvx_leader(self._server_ip): return self._server_ip return None def _send_api_request(self, path, method, data=None, sanitized_data=None): host = self._get_eos_master() if not host: msg = six.text_type("Could not find CVX leader") LOG.info(msg) self.set_cvx_unavailable() raise arista_exc.AristaRpcError(msg=msg) self.set_cvx_available() return self._send_request(host, path, method, data, sanitized_data) def _set_region_update_interval(self): path = 'region/%s' % self.region data = { 'name': self.region, 'syncInterval': self.sync_interval } self._send_api_request(path, 'PUT', [data]) def register_with_eos(self, sync=False): self.create_region(self.region) self._set_region_update_interval() def check_supported_features(self): # We don't use this function as we know the features # that are available once using this API. pass def bm_and_dvr_supported(self): return True def get_region_updated_time(self): path = 'agent/' try: data = self._send_api_request(path, 'GET') return {'regionTimestamp': data.get('uuid', '')} except arista_exc.AristaRpcError: return {'regionTimestamp': ''} def create_region(self, region): path = 'region/' data = {'name': region} return self._send_api_request(path, 'POST', [data]) def delete_region(self, region): path = 'region/' data = {'name': region} return self._send_api_request(path, 'DELETE', [data]) def delete_this_region(self): return self.delete_region(self.region) def get_region(self, name): path = 'region/%s' % name try: regions = self._send_api_request(path, 'GET') for region in regions: if region['name'] == name: return region except arista_exc.AristaRpcError: pass return None def sync_supported(self): return True def hpb_supported(self): return True def sync_start(self): try: region = self.get_region(self.region) # If the region doesn't exist, we may need to create # it in order for POSTs to the sync endpoint to succeed if not region: self.register_with_eos() return False if region and region['syncStatus'] == 'syncInProgress': LOG.info('Sync in progress, not syncing') return False req_id = self._get_random_name() data = { 'requester': socket.gethostname().split('.')[0], 'requestId': req_id } path = 'region/' + self.region + '/sync' self._send_api_request(path, 'POST', data) self.current_sync_name = req_id return True except (KeyError, arista_exc.AristaRpcError): LOG.info('Not syncing due to RPC error') return False LOG.info('Not syncing due to server syncStatus') return False def sync_end(self): LOG.info('Attempting to end sync') try: path = 'region/' + self.region + '/sync' self._send_api_request(path, 'DELETE') self.current_sync_name = None return True except arista_exc.AristaRpcError: LOG.info('Not ending sync due to RPC error') return False def get_vms_for_tenant(self, tenant): path = 'region/' + self.region + '/vm?tenantId=' + tenant return self._send_api_request(path, 'GET') def get_dhcps_for_tenant(self, tenant): path = 'region/' + self.region + '/dhcp?tenantId=' + tenant return self._send_api_request(path, 'GET') def get_baremetals_for_tenant(self, tenant): path = 'region/' + self.region + '/baremetal?tenantId=' + tenant return self._send_api_request(path, 'GET') def get_routers_for_tenant(self, tenant): path = 'region/' + self.region + '/router?tenantId=' + tenant return self._send_api_request(path, 'GET') def get_ports_for_tenant(self, tenant, pType): path = 'region/%s/port?tenantId=%s&type=%s' % (self.region, tenant, pType) return self._send_api_request(path, 'GET') def get_tenants(self): path = 'region/' + self.region + '/tenant' tenants = self._send_api_request(path, 'GET') d = {} for ten in tenants: ten['tenantId'] = ten.pop('id') nets = self.get_networks(ten['tenantId']) netDict = {} try: for net in nets: net['networkId'] = net.pop('id') net['networkName'] = net.pop('name') netDict[net['networkId']] = net except Exception as exc: LOG.error(_LE('Failed to get tenant network %(net)s. ' 'Reason: %(exc)s'), {'net': net, 'exc': exc}) ten['tenantNetworks'] = netDict vms = self.get_vms_for_tenant(ten['tenantId']) vmDict = dict((v['id'], v) for v in vms) ten['tenantVmInstances'] = vmDict routers = self.get_routers_for_tenant(ten['tenantId']) routerDict = dict((r['id'], r) for r in routers) ten['tenantRouterInstances'] = routerDict bms = self.get_baremetals_for_tenant(ten['tenantId']) bmDict = dict((b['id'], b) for b in bms) ten['tenantBaremetalInstances'] = bmDict d[ten['tenantId']] = ten return d def delete_tenant_bulk(self, tenant_list, sync=False): path = 'region/' + self.region + '/tenant' data = [{'id': t} for t in tenant_list] return self._send_api_request(path, 'DELETE', data) def get_networks(self, tenant): path = 'region/' + self.region + '/network?tenantId=' + tenant return self._send_api_request(path, 'GET') def create_network_bulk(self, tenant_id, network_list, sync=False): self._create_tenant_if_needed(tenant_id) networks = [] segments = [] for net in network_list: n = { 'id': net['network_id'], 'tenantId': tenant_id, 'shared': net['shared'], } if net.get('network_name'): n['name'] = net['network_name'] if net.get('segmentation_id'): n['segId'] = net['segmentation_id'] for segment in net['segments']: if segment['network_type'] == const.NETWORK_TYPE_FLAT: continue segmentType = 'static' if segment.get('is_dynamic', False): segmentType = 'dynamic' segments.append({ 'id': segment['id'], 'networkId': net['network_id'], 'type': segment['network_type'], 'segmentationId': segment['segmentation_id'], 'segmentType': segmentType, }) networks.append(n) if networks: path = 'region/' + self.region + '/network' self._send_api_request(path, 'POST', networks) if segments: path = 'region/' + self.region + '/segment' self._send_api_request(path, 'POST', segments) def create_network_segments(self, tenant_id, network_id, network_name, segments): segment_data = [] for segment in segments: segmentType = 'static' if segment.get('is_dynamic', False): segmentType = 'dynamic' segment_data.append({ 'id': segment['id'], 'networkId': network_id, 'type': segment['network_type'], 'segmentationId': segment['segmentation_id'], 'segmentType': segmentType, }) path = 'region/' + self.region + '/segment' self._send_api_request(path, 'POST', segment_data) def delete_network_segments(self, tenant_id, segments): segment_data = [] for segment in segments: segment_data.append({ 'id': segment['id'], }) path = 'region/' + self.region + '/segment' self._send_api_request(path, 'DELETE', segment_data) def delete_network_bulk(self, tenant_id, network_id_list, sync=False): path = 'region/' + self.region + '/network' data = [{'id': n, 'tenantId': tenant_id} for n in network_id_list] return self._send_api_request(path, 'DELETE', data) def _create_instance_data(self, vm_id, host_id): return { 'id': vm_id, 'hostId': host_id } def _create_port_data(self, port_id, tenant_id, network_id, instance_id, name, instance_type, hosts, device_owner=None): vlan_type = 'allowed' if instance_type in const.InstanceType.BAREMETAL_INSTANCE_TYPES: vlan_type = 'native' if device_owner and device_owner.startswith('trunk'): vlan_type = 'allowed' return { 'id': port_id, 'tenantId': tenant_id, 'networkId': network_id, 'instanceId': instance_id, 'name': name, 'instanceType': instance_type, 'vlanType': vlan_type, 'hosts': hosts or [] } def _create_tenant_if_needed(self, tenant_id): tenResponse = self.get_tenant(tenant_id) if tenResponse is None: self.create_tenant_bulk([tenant_id]) def get_tenant(self, tenant_id): path = 'region/' + self.region + '/tenant?tenantId=' + tenant_id tenants = self._send_api_request(path, 'GET') if tenants: try: return tenants[0] except KeyError: return None return None def create_tenant_bulk(self, tenant_ids): path = 'region/' + self.region + '/tenant' data = [{'id': tid} for tid in tenant_ids] return self._send_api_request(path, 'POST', data) def create_instance_bulk(self, tenant_id, neutron_ports, vms, port_profiles, sync=False): self._create_tenant_if_needed(tenant_id) vmInst = {} dhcpInst = {} baremetalInst = {} routerInst = {} portInst = [] networkSegments = {} portBindings = {} for vm in vms.values(): for v_port in vm['ports']: port_id = v_port['portId'] if not v_port['hosts']: # Skip all the ports that have no host associsted with them continue if port_id not in neutron_ports.keys(): continue neutron_port = neutron_ports[port_id] inst_id = vm['vmId'] inst_host = vm['ports'][0]['hosts'][0] instance = self._create_instance_data(inst_id, inst_host) device_owner = neutron_port['device_owner'] if port_id not in port_profiles: continue vnic_type = port_profiles[port_id]['vnic_type'] if device_owner == n_const.DEVICE_OWNER_DHCP: instance_type = const.InstanceType.DHCP if inst_id not in dhcpInst: dhcpInst[inst_id] = instance elif (device_owner.startswith('compute') or device_owner.startswith('baremetal') or device_owner.startswith('trunk')): if vnic_type == 'baremetal': instance_type = const.InstanceType.BAREMETAL if inst_id not in baremetalInst: baremetalInst[inst_id] = instance else: instance_type = const.InstanceType.VM if inst_id not in vmInst: vmInst[inst_id] = instance elif device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE: instance_type = const.InstanceType.ROUTER if inst_id not in routerInst: routerInst[inst_id] = instance else: LOG.warning(_LW("Unknown device owner: %s"), neutron_port['device_owner']) continue network_id = neutron_port['network_id'] if port_id not in networkSegments: networkSegments[port_id] = ( db_lib.get_network_segments_by_port_id(port_id)) port = self._create_port_data(port_id, tenant_id, network_id, inst_id, neutron_port.get('name'), instance_type, v_port['hosts'], device_owner) portInst.append(port) if instance_type in const.InstanceType.VIRTUAL_INSTANCE_TYPES: portBinding = self._get_host_bindings( port_id, inst_host, network_id, networkSegments[port_id]) elif (instance_type in const.InstanceType.BAREMETAL_INSTANCE_TYPES): switch_profile = json.loads(port_profiles[ port_id]['profile']) portBinding = self._get_switch_bindings( port_id, inst_host, network_id, switch_profile['local_link_information'], networkSegments[port_id]) if port_id not in portBindings: portBindings[port_id] = portBinding else: portBindings[port_id] += portBinding # create instances first if vmInst: path = 'region/' + self.region + '/vm?tenantId=' + tenant_id self._send_api_request(path, 'POST', list(vmInst.values())) if dhcpInst: path = 'region/' + self.region + '/dhcp?tenantId=' + tenant_id self._send_api_request(path, 'POST', list(dhcpInst.values())) if baremetalInst: path = 'region/' + self.region + '/baremetal?tenantId=' + tenant_id self._send_api_request(path, 'POST', list(baremetalInst.values())) if routerInst: path = 'region/' + self.region + '/router?tenantId=' + tenant_id self._send_api_request(path, 'POST', list(routerInst.values())) # now create ports for the instances path = 'region/' + self.region + '/port' self._send_api_request(path, 'POST', portInst) # TODO(shashank): Optimize this for port_id, bindings in portBindings.items(): url = 'region/' + self.region + '/port/' + port_id + '/binding' self._send_api_request(url, 'POST', bindings) def delete_instance_bulk(self, tenant_id, instance_id_list, instance_type, sync=False): path = 'region/%(region)s/%(type)s' % { 'region': self.region, 'type': instance_type} data = [{'id': i} for i in instance_id_list] return self._send_api_request(path, 'DELETE', data) def delete_vm_bulk(self, tenant_id, vm_id_list, sync=False): self.delete_instance_bulk(tenant_id, vm_id_list, const.InstanceType.VM) def delete_dhcp_bulk(self, tenant_id, dhcp_id_list): self.delete_instance_bulk(tenant_id, dhcp_id_list, const.InstanceType.DHCP) def delete_port(self, port_id, instance_id, instance_type, device_owner=None): path = ('region/%s/port?portId=%s&id=%s&type=%s' % (self.region, port_id, instance_id, instance_type)) port = self._create_port_data(port_id, None, None, instance_id, None, instance_type, None, device_owner) return self._send_api_request(path, 'DELETE', [port]) def get_instance_ports(self, instance_id, instance_type): path = ('region/%s/port?id=%s&type=%s' % (self.region, instance_id, instance_type)) return self._send_api_request(path, 'GET') def plug_port_into_network(self, device_id, host_id, port_id, net_id, tenant_id, port_name, device_owner, sg, orig_sg, vnic_type, segments, switch_bindings=None, trunk_details=None): device_type = '' if device_owner == n_const.DEVICE_OWNER_DHCP: device_type = const.InstanceType.DHCP elif (device_owner.startswith('compute') or device_owner.startswith('baremetal') or device_owner.startswith('trunk')): if vnic_type == 'baremetal': device_type = const.InstanceType.BAREMETAL else: device_type = const.InstanceType.VM elif device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE: device_type = const.InstanceType.ROUTER else: LOG.info(_LI('Unsupported device owner: %s'), device_owner) return self._create_tenant_if_needed(tenant_id) instance = self._create_instance_data(device_id, host_id) port = self._create_port_data(port_id, tenant_id, net_id, device_id, port_name, device_type, [host_id], device_owner) url = 'region/%(region)s/%(device_type)s?tenantId=%(tenant_id)s' % { 'region': self.region, 'device_type': device_type, 'tenant_id': tenant_id, } self._send_api_request(url, 'POST', [instance]) self._send_api_request('region/' + self.region + '/port', 'POST', [port]) if trunk_details and trunk_details.get('sub_ports'): for subport in trunk_details['sub_ports']: subport_id = subport['port_id'] subport_net_id = self._ndb.get_network_id_from_port_id( subport_id) subport_name = 'name_%s' % subport_id sub_device_owner = 'trunk:subport' port = self._create_port_data(subport_id, tenant_id, subport_net_id, device_id, subport_name, device_type, [host_id], sub_device_owner) self._send_api_request('region/' + self.region + '/port', 'POST', [port]) if device_type in const.InstanceType.VIRTUAL_INSTANCE_TYPES: self.bind_port_to_host(port_id, host_id, net_id, segments) if trunk_details and trunk_details.get('sub_ports'): for subport in trunk_details['sub_ports']: subport_id = subport['port_id'] subport_net_id = self._ndb.get_network_id_from_port_id( subport_id) sub_segments = db_lib.get_network_segments_by_port_id( subport_id) self.bind_port_to_host(subport_id, host_id, subport_net_id, sub_segments) elif device_type in const.InstanceType.BAREMETAL_INSTANCE_TYPES: self.bind_port_to_switch_interface(port_id, host_id, net_id, switch_bindings, segments) if trunk_details and trunk_details.get('sub_ports'): for subport in trunk_details['sub_ports']: subport_id = subport['port_id'] subport_net_id = self._ndb.get_network_id_from_port_id( subport_id) sub_segments = db_lib.get_network_segments_by_port_id( subport_id) self.bind_port_to_switch_interface(subport_id, host_id, subport_net_id, switch_bindings, sub_segments) if sg: self.apply_security_group(sg, switch_bindings) else: # Security group was removed. Clean up the existing security # groups. if orig_sg: self.remove_security_group(orig_sg, switch_bindings) def unplug_port_from_network(self, device_id, device_owner, hostname, port_id, network_id, tenant_id, sg, vnic_type, switch_bindings=None, trunk_details=None): device_type = '' if device_owner == n_const.DEVICE_OWNER_DHCP: device_type = const.InstanceType.DHCP elif (device_owner.startswith('compute') or device_owner.startswith('baremetal') or device_owner.startswith('trunk')): if vnic_type == 'baremetal': device_type = const.InstanceType.BAREMETAL else: device_type = const.InstanceType.VM elif device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE: device_type = const.InstanceType.ROUTER else: LOG.info(_LI('Unsupported device owner: %s'), device_owner) return if device_type in const.InstanceType.VIRTUAL_INSTANCE_TYPES: if trunk_details and trunk_details.get('sub_ports'): for subport in trunk_details['sub_ports']: subport_id = subport['port_id'] subport_device_owner = 'trunk:subport' self.unbind_port_from_host(subport_id, hostname) self.delete_port(subport_id, device_id, device_type, subport_device_owner) self.unbind_port_from_host(port_id, hostname) elif device_type in const.InstanceType.BAREMETAL_INSTANCE_TYPES: if trunk_details and trunk_details.get('sub_ports'): for subport in trunk_details['sub_ports']: subport_id = subport['port_id'] subport_device_owner = 'trunk:subport' self.unbind_port_from_switch_interface(subport_id, hostname, switch_bindings) self.delete_port(subport_id, device_id, device_type, subport_device_owner) self.unbind_port_from_switch_interface(port_id, hostname, switch_bindings) self.delete_port(port_id, device_id, device_type, device_owner) port = self.get_instance_ports(device_id, device_type) if not port: # If the last port attached to an instance is deleted, cleanup the # instance. instances = [device_id] self.delete_instance_bulk(tenant_id, instances, device_type) def _get_segment_list(self, network_id, segments): if not network_id or not segments: return [] return [{'id': s['id'], 'type': s['network_type'], 'segmentationId': s['segmentation_id'], 'networkId': network_id, 'segment_type': 'dynamic' if s.get('is_dynamic', False) else 'static', } for s in segments] def _get_host_bindings(self, port_id, host, network_id, segments): return [{'portId': port_id, 'hostBinding': [{ 'host': host, 'segment': self._get_segment_list(network_id, segments), }] }] def bind_port_to_host(self, port_id, host, network_id, segments): url = 'region/' + self.region + '/port/' + port_id + '/binding' bindings = self._get_host_bindings(port_id, host, network_id, segments) self._send_api_request(url, 'POST', bindings) def unbind_port_from_host(self, port_id, host): url = 'region/' + self.region + '/port/' + port_id + '/binding' binding = {'portId': port_id, 'hostBinding': [{ 'host': host, }]} self._send_api_request(url, 'DELETE', [binding]) def _get_switch_bindings(self, port_id, host, network_id, switch_bindings, segments): bindings = [] for binding in switch_bindings: if not binding: continue switch = binding['switch_id'] interface = binding['port_id'] bindings.append({'portId': port_id, 'switchBinding': [{ 'host': host, 'switch': switch, 'interface': interface, 'segment': self._get_segment_list( network_id, segments), }]}) return bindings def bind_port_to_switch_interface(self, port_id, host, network_id, switch_bindings, segments): if not switch_bindings: return url = 'region/' + self.region + '/port/' + port_id + '/binding' bindings = self._get_switch_bindings(port_id, host, network_id, switch_bindings, segments) self._send_api_request(url, 'POST', bindings) def unbind_port_from_switch_interface(self, port_id, host, switch_bindings): url = 'region/' + self.region + '/port/' + port_id + '/binding' bindings = self._get_switch_bindings(port_id, host, None, switch_bindings, None) self._send_api_request(url, 'DELETE', bindings) networking-arista-2017.2.2/networking_arista/ml2/rpc/base.py000066400000000000000000000432541323242307100237160ustar00rootroot00000000000000# Copyright (c) 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import base64 import os from neutron_lib.db import api as db_api from oslo_config import cfg from oslo_log import log as logging import six from neutron.db.models.plugins.ml2 import vlanallocation from networking_arista._i18n import _, _LW from networking_arista.common import exceptions as arista_exc from networking_arista.ml2 import arista_sec_gp LOG = logging.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class AristaRPCWrapperBase(object): """Wraps Arista JSON RPC. All communications between Neutron and EOS are over JSON RPC. EOS - operating system used on Arista hardware Command API - JSON RPC API provided by Arista EOS """ def __init__(self, neutron_db): self._ndb = neutron_db self._validate_config() self._server_ip = None self.region = cfg.CONF.ml2_arista.region_name self.sync_interval = cfg.CONF.ml2_arista.sync_interval self.conn_timeout = cfg.CONF.ml2_arista.conn_timeout self.eapi_hosts = cfg.CONF.ml2_arista.eapi_host.split(',') self.security_group_driver = arista_sec_gp.AristaSecGroupSwitchDriver( self._ndb) # We denote mlag_pair physnets as peer1_peer2 in the physnet name, the # following builds a mapping of peer name to physnet name for use # during port binding self.mlag_pairs = {} session = db_api.get_reader_session() with session.begin(): physnets = session.query( vlanallocation.VlanAllocation.physical_network ).distinct().all() for (physnet,) in physnets: if '_' in physnet: peers = physnet.split('_') self.mlag_pairs[peers[0]] = physnet self.mlag_pairs[peers[1]] = physnet # Indication of CVX availabililty in the driver. self._cvx_available = True # Reference to SyncService object which is set in AristaDriver self.sync_service = None def _validate_config(self): if cfg.CONF.ml2_arista.get('eapi_host') == '': msg = _('Required option eapi_host is not set') LOG.error(msg) raise arista_exc.AristaConfigError(msg=msg) if cfg.CONF.ml2_arista.get('eapi_username') == '': msg = _('Required option eapi_username is not set') LOG.error(msg) raise arista_exc.AristaConfigError(msg=msg) def _api_username(self): return cfg.CONF.ml2_arista.eapi_username def _api_password(self): return cfg.CONF.ml2_arista.eapi_password def _get_random_name(self, length=10): """Returns a base64 encoded name.""" result = base64.b64encode(os.urandom(10)).translate(None, b'=+/') return result if six.PY2 else result.decode('utf-8') def _get_cvx_hosts(self): cvx = [] if self._server_ip: # If we know the master's IP, let's start with that cvx.append(self._server_ip) for h in self.eapi_hosts: if h.strip() not in cvx: cvx.append(h.strip()) return cvx def set_cvx_unavailable(self): self._cvx_available = False if self.sync_service: self.sync_service.force_sync() def set_cvx_available(self): self._cvx_available = True def cvx_available(self): return self._cvx_available def check_cvx_availability(self): try: if self._get_eos_master(): self.set_cvx_available() return True except Exception as exc: LOG.warning(_LW('%s when getting CVX master'), exc) self.set_cvx_unavailable() return False def delete_tenant(self, tenant_id): """Deletes a given tenant and all its networks and VMs from EOS. :param tenant_id: globally unique neutron tenant identifier """ self.delete_tenant_bulk([tenant_id]) def clear_region_updated_time(self): # TODO(shashank): Remove this once the call is removed from the ML2 # driver. pass def create_network(self, tenant_id, network): """Creates a single network on Arista hardware :param tenant_id: globally unique neutron tenant identifier :param network: dict containing network_id, network_name and segmentation_id """ self.create_network_bulk(tenant_id, [network]) def delete_network(self, tenant_id, network_id, network_segments): """Deletes a specified network for a given tenant :param tenant_id: globally unique neutron tenant identifier :param network_id: globally unique neutron network identifier :param network_segments: segments associated with the network """ segments_info = [] segments_info.extend({'id': segment['id'], 'network_id': network_id} for segment in network_segments) self.delete_network_segments(tenant_id, segments_info) self.delete_network_bulk(tenant_id, [network_id]) def delete_vm(self, tenant_id, vm_id): """Deletes a VM from EOS for a given tenant :param tenant_id : globally unique neutron tenant identifier :param vm_id : id of a VM that needs to be deleted. """ self.delete_vm_bulk(tenant_id, [vm_id]) @abc.abstractmethod def plug_port_into_network(self, device_id, host_id, port_id, net_id, tenant_id, port_name, device_owner, sg, orig_sg, vnic_type, segments=None, switch_bindings=None, trunk_details=None): """Generic routine plug a port of a VM instace into network. :param device_id: globally unique identifier for the device :param host: ID of the host where the port is placed :param port_id: globally unique port ID that connects port to network :param network_id: globally unique neutron network identifier :param tenant_id: globally unique neutron tenant identifier :param port_name: Name of the port - for display purposes :param device_owner: Device owner - e.g. compute or network:dhcp :param sg: current security group for the port :param orig_sg: original security group for the port :param vnic_type: VNIC type for the port :param segments: list of network segments the port is bound to :param switch_bindings: List of switch_bindings :param trunk_details: List of subports of a trunk port """ @abc.abstractmethod def unplug_port_from_network(self, device_id, device_owner, hostname, port_id, network_id, tenant_id, sg, vnic_type, switch_bindings=None, trunk_details=None): """Removes a port from the device :param device_id: globally unique identifier for the device :param host: ID of the host where the device is placed :param port_id: globally unique port ID that connects device to network :param network_id: globally unique neutron network identifier :param tenant_id: globally unique neutron tenant identifier :param trunk_details: List of subports of a trunk port """ def _clean_acls(self, sg, failed_switch, switches_to_clean): """This is a helper function to clean up ACLs on switches. This called from within an exception - when apply_acl fails. Therefore, ensure that exception is raised after the cleanup is done. :param sg: Security Group to be removed :param failed_switch: IP of the switch where ACL failed :param switches_to_clean: List of switches containing link info """ if not switches_to_clean: # This means the no switch needs cleaning - so, simply raise the # the exception and bail out msg = (_("Failed to apply ACL %(sg)s on switch %(switch)s") % {'sg': sg, 'switch': failed_switch}) LOG.error(msg) for s in switches_to_clean: try: # Port is being updated to remove security groups self.security_group_driver.remove_acl(sg, s['switch_id'], s['port_id'], s['switch_info']) except Exception: msg = (_("Failed to remove ACL %(sg)s on switch %(switch)%") % {'sg': sg, 'switch': s['switch_info']}) LOG.warning(msg) raise arista_exc.AristaSecurityGroupError(msg=msg) def create_acl(self, sg): """Creates an ACL on Arista Switch. Deals with multiple configurations - such as multiple switches """ self.security_group_driver.create_acl(sg) def delete_acl(self, sg): """Deletes an ACL from Arista Switch. Deals with multiple configurations - such as multiple switches """ self.security_group_driver.delete_acl(sg) def create_acl_rule(self, sgr): """Creates an ACL on Arista Switch. For a given Security Group (ACL), it adds additional rule Deals with multiple configurations - such as multiple switches """ self.security_group_driver.create_acl_rule(sgr) def delete_acl_rule(self, sgr): """Deletes an ACL rule on Arista Switch. For a given Security Group (ACL), it removes a rule Deals with multiple configurations - such as multiple switches """ self.security_group_driver.delete_acl_rule(sgr) def perform_sync_of_sg(self): """Perform sync of the security groups between ML2 and EOS. This is unconditional sync to ensure that all security ACLs are pushed to all the switches, in case of switch or neutron reboot """ self.security_group_driver.perform_sync_of_sg() @abc.abstractmethod def sync_supported(self): """Whether the EOS version supports sync. Returns True if sync is supported, false otherwise. """ @abc.abstractmethod def bm_and_dvr_supported(self): """Whether EOS supports Ironic and DVR. Returns True if supported, false otherwise. """ @abc.abstractmethod def register_with_eos(self, sync=False): """This is the registration request with EOS. This the initial handshake between Neutron and EOS. critical end-point information is registered with EOS. :param sync: This flags indicates that the region is being synced. """ @abc.abstractmethod def check_supported_features(self): """Checks whether the CLI commands are valid. This method tries to execute the commands on EOS and if it succeedes the command is stored. """ @abc.abstractmethod def get_region_updated_time(self): """Return the timestamp of the last update. This method returns the time at which any entities in the region were updated. """ @abc.abstractmethod def delete_this_region(self): """Deleted the region data from EOS.""" @abc.abstractmethod def sync_start(self): """Let EOS know that a sync in being initiated.""" @abc.abstractmethod def sync_end(self): """Let EOS know that sync is complete.""" @abc.abstractmethod def get_tenants(self): """Returns dict of all tenants known by EOS. :returns: dictionary containing the networks per tenant and VMs allocated per tenant """ @abc.abstractmethod def delete_tenant_bulk(self, tenant_list, sync=False): """Sends a bulk request to delete the tenants. :param tenant_list: list of globaly unique neutron tenant ids which need to be deleted. :param sync: This flags indicates that the region is being synced. """ @abc.abstractmethod def create_network_bulk(self, tenant_id, network_list, sync=False): """Creates a network on Arista Hardware :param tenant_id: globally unique neutron tenant identifier :param network_list: list of dicts containing network_id, network_name and segmentation_id :param sync: This flags indicates that the region is being synced. """ @abc.abstractmethod def create_network_segments(self, tenant_id, network_id, network_name, segments): """Creates a network on Arista Hardware Note: This method is not used at the moment. create_network() is used instead. This will be used once the support for multiple segments is added in Neutron. :param tenant_id: globally unique neutron tenant identifier :param network_id: globally unique neutron network identifier :param network_name: Network name - for display purposes :param segments: List of segments in a given network """ @abc.abstractmethod def delete_network_bulk(self, tenant_id, network_id_list, sync=False): """Deletes the network ids specified for a tenant :param tenant_id: globally unique neutron tenant identifier :param network_id_list: list of globally unique neutron network identifiers :param sync: This flags indicates that the region is being synced. """ @abc.abstractmethod def delete_network_segments(self, tenant_id, network_segments): """Deletes the network segments :param network_segments: List of network segments to be delted. """ @abc.abstractmethod def create_instance_bulk(self, tenant_id, neutron_ports, vms, port_profiles, sync=False): """Sends a bulk request to create ports. :param tenant_id: globaly unique neutron tenant identifier :param neutron_ports: list of ports that need to be created. :param vms: list of vms to which the ports will be attached to. :param sync: This flags indicates that the region is being synced. """ @abc.abstractmethod def delete_instance_bulk(self, tenant_id, instance_id_list, instance_type, sync=False): """Deletes instances from EOS for a given tenant :param tenant_id : globally unique neutron tenant identifier :param instance_id_list : ids of instances that needs to be deleted. :param instance_type: The type of the instance which is being deleted. :param sync: This flags indicates that the region is being synced. """ @abc.abstractmethod def delete_vm_bulk(self, tenant_id, vm_id_list, sync=False): """Deletes VMs from EOS for a given tenant :param tenant_id : globally unique neutron tenant identifier :param vm_id_list : ids of VMs that needs to be deleted. :param sync: This flags indicates that the region is being synced. """ @abc.abstractmethod def hpb_supported(self): """Whether hierarchical port binding (HPB) is supported by CVX. Returns True if HPB is supported, False otherwise. """ def apply_security_group(self, security_group, switch_bindings): """Applies ACLs on switch interface. Translates neutron security group to switch ACL and applies the ACLs on all the switch interfaces defined in the switch_bindings. :param security_group: Neutron security group :param switch_bindings: Switch link information """ switches_with_acl = [] for binding in switch_bindings: try: self.security_group_driver.apply_acl(security_group, binding['switch_id'], binding['port_id'], binding['switch_info']) switches_with_acl.append(binding) except Exception: message = _LW('Unable to apply security group on %s') % ( binding['switch_id']) LOG.warning(message) self._clean_acls(security_group, binding['switch_id'], switches_with_acl) def remove_security_group(self, security_group, switch_bindings): """Removes ACLs from switch interface Translates neutron security group to switch ACL and removes the ACLs from all the switch interfaces defined in the switch_bindings. :param security_group: Neutron security group :param switch_bindings: Switch link information """ for binding in switch_bindings: try: self.security_group_driver.remove_acl(security_group, binding['switch_id'], binding['port_id'], binding['switch_info']) except Exception: message = _LW('Unable to remove security group from %s') % ( binding['switch_id']) LOG.warning(message) networking-arista-2017.2.2/networking_arista/ml2/sec_group_callback.py000066400000000000000000000113421323242307100260130ustar00rootroot00000000000000# Copyright (c) 2016 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from oslo_log import helpers as log_helpers from oslo_log import log as logging from oslo_utils import excutils from networking_arista._i18n import _LE LOG = logging.getLogger(__name__) class AristaSecurityGroupHandler(object): """Security Group Handler for Arista networking hardware. Registers for the notification of security group updates. Once a notification is recieved, it takes appropriate actions by updating Arista hardware appropriately. """ def __init__(self, client): self.client = client self.subscribe() @log_helpers.log_method_call def create_security_group(self, resource, event, trigger, **kwargs): sg = kwargs.get('security_group') try: self.client.create_security_group(sg) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error(_LE("Failed to create a security group %(sg_id)s " "in Arista Driver: %(err)s"), {"sg_id": sg["id"], "err": e}) try: self.client.delete_security_group(sg) except Exception: LOG.exception(_LE("Failed to delete security group %s"), sg['id']) @log_helpers.log_method_call def delete_security_group(self, resource, event, trigger, **kwargs): sg = kwargs.get('security_group') try: self.client.delete_security_group(sg) except Exception as e: LOG.error(_LE("Failed to delete security group %(sg_id)s " "in Arista Driver: %(err)s"), {"sg_id": sg["id"], "err": e}) @log_helpers.log_method_call def update_security_group(self, resource, event, trigger, **kwargs): sg = kwargs.get('security_group') try: self.client.update_security_group(sg) except Exception as e: LOG.error(_LE("Failed to update security group %(sg_id)s " "in Arista Driver: %(err)s"), {"sg_id": sg["id"], "err": e}) @log_helpers.log_method_call def create_security_group_rule(self, resource, event, trigger, **kwargs): sgr = kwargs.get('security_group_rule') try: self.client.create_security_group_rule(sgr) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error(_LE("Failed to create a security group %(sgr_id)s " "rule in Arista Driver: %(err)s"), {"sgr_id": sgr["id"], "err": e}) try: self.client.delete_security_group_rule(sgr) except Exception: LOG.exception(_LE("Failed to delete security group " "rule %s"), sgr['id']) @log_helpers.log_method_call def delete_security_group_rule(self, resource, event, trigger, **kwargs): sgr_id = kwargs.get('security_group_rule_id') try: self.client.delete_security_group_rule(sgr_id) except Exception as e: LOG.error(_LE("Failed to delete security group %(sgr_id)s " "rule in Arista Driver: %(err)s"), {"sgr_id": sgr_id, "err": e}) def subscribe(self): # Subscribe to the events related to security groups and rules registry.subscribe( self.create_security_group, resources.SECURITY_GROUP, events.AFTER_CREATE) registry.subscribe( self.update_security_group, resources.SECURITY_GROUP, events.AFTER_UPDATE) registry.subscribe( self.delete_security_group, resources.SECURITY_GROUP, events.BEFORE_DELETE) registry.subscribe( self.create_security_group_rule, resources.SECURITY_GROUP_RULE, events.AFTER_CREATE) registry.subscribe( self.delete_security_group_rule, resources.SECURITY_GROUP_RULE, events.BEFORE_DELETE) networking-arista-2017.2.2/networking_arista/tests/000077500000000000000000000000001323242307100223065ustar00rootroot00000000000000networking-arista-2017.2.2/networking_arista/tests/__init__.py000066400000000000000000000000001323242307100244050ustar00rootroot00000000000000networking-arista-2017.2.2/networking_arista/tests/base.py000066400000000000000000000014321323242307100235720ustar00rootroot00000000000000# -*- coding: utf-8 -*- # Copyright 2010-2011 OpenStack Foundation # Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslotest import base class TestCase(base.BaseTestCase): """Test case base class for all unit tests.""" networking-arista-2017.2.2/networking_arista/tests/test_networking_arista.py000066400000000000000000000014501323242307100274510ustar00rootroot00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ test_networking_arista ---------------------------------- Tests for `networking_arista` module. """ from networking_arista.tests import base class TestNetworking_arista(base.TestCase): def test_something(self): pass networking-arista-2017.2.2/networking_arista/tests/unit/000077500000000000000000000000001323242307100232655ustar00rootroot00000000000000networking-arista-2017.2.2/networking_arista/tests/unit/__init__.py000066400000000000000000000012671323242307100254040ustar00rootroot00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg cfg.CONF.use_stderr = False networking-arista-2017.2.2/networking_arista/tests/unit/common/000077500000000000000000000000001323242307100245555ustar00rootroot00000000000000networking-arista-2017.2.2/networking_arista/tests/unit/common/__init__.py000066400000000000000000000000001323242307100266540ustar00rootroot00000000000000networking-arista-2017.2.2/networking_arista/tests/unit/common/test_api.py000066400000000000000000000202551323242307100267430ustar00rootroot00000000000000# Copyright (c) 2017 Arista Networks, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock import requests from requests import exceptions as requests_exc import testtools from networking_arista.common import api class TestEAPIClientInit(testtools.TestCase): def test_basic_init(self): host_ip = '10.20.30.40' client = api.EAPIClient(host_ip) self.assertEqual(client.host, host_ip) self.assertEqual(client.url, 'https://10.20.30.40/command-api') self.assertDictContainsSubset( {'Content-Type': 'application/json', 'Accept': 'application/json'}, client.session.headers ) def test_init_enable_verify(self): client = api.EAPIClient('10.0.0.1', verify=True) self.assertTrue(client.session.verify) def test_init_auth(self): client = api.EAPIClient('10.0.0.1', username='user', password='pass') self.assertEqual(client.session.auth, ('user', 'pass')) def test_init_timeout(self): client = api.EAPIClient('10.0.0.1', timeout=99) self.assertEqual(client.timeout, 99) def test_make_url(self): url = api.EAPIClient._make_url('1.2.3.4') self.assertEqual(url, 'https://1.2.3.4/command-api') def test_make_url_http(self): url = api.EAPIClient._make_url('5.6.7.8', 'http') self.assertEqual(url, 'http://5.6.7.8/command-api') class TestEAPIClientExecute(testtools.TestCase): def setUp(self): super(TestEAPIClientExecute, self).setUp() mock.patch('requests.Session.post').start() self.mock_log = mock.patch.object(api, 'LOG').start() self.mock_json_dumps = mock.patch.object(api.json, 'dumps').start() self.addCleanup(mock.patch.stopall) self.client = api.EAPIClient('10.0.0.1', timeout=99) def _test_execute_helper(self, commands, commands_to_log=None): expected_data = { 'id': 'Networking Arista Driver', 'method': 'runCmds', 'jsonrpc': '2.0', 'params': { 'timestamps': False, 'format': 'json', 'version': 1, 'cmds': commands } } self.client.session.post.assert_called_once_with( 'https://10.0.0.1/command-api', data=self.mock_json_dumps.return_value, timeout=99 ) self.mock_log.info.assert_has_calls( [ mock.call( mock.ANY, { 'ip': '10.0.0.1', 'data': self.mock_json_dumps.return_value } ) ] ) log_data = dict(expected_data) log_data['params'] = dict(expected_data['params']) log_data['params']['cmds'] = commands_to_log or commands self.mock_json_dumps.assert_has_calls( [ mock.call(log_data), mock.call(expected_data) ] ) def test_command_prep(self): commands = ['enable'] self.client.execute(commands) self._test_execute_helper(commands) def test_commands_to_log(self): commands = ['config', 'secret'] commands_to_log = ['config', '******'] self.client.execute(commands, commands_to_log) self._test_execute_helper(commands, commands_to_log) def _test_execute_error_helper(self, raise_exception, expected_exception, warning_has_params=False): commands = ['config'] self.client.session.post.side_effect = raise_exception self.assertRaises( expected_exception, self.client.execute, commands ) self._test_execute_helper(commands) if warning_has_params: args = (mock.ANY, mock.ANY) else: args = (mock.ANY,) self.mock_log.warning.assert_called_once_with(*args) def test_request_connection_error(self): self._test_execute_error_helper( requests_exc.ConnectionError, api.arista_exc.AristaRpcError ) def test_request_connect_timeout(self): self._test_execute_error_helper( requests_exc.ConnectTimeout, api.arista_exc.AristaRpcError ) def test_request_timeout(self): self._test_execute_error_helper( requests_exc.Timeout, api.arista_exc.AristaRpcError ) def test_request_connect_InvalidURL(self): self._test_execute_error_helper( requests_exc.InvalidURL, api.arista_exc.AristaRpcError ) def test_request_other_exception(self): class OtherException(Exception): pass self._test_execute_error_helper( OtherException, OtherException, warning_has_params=True ) def _test_response_helper(self, response_data): mock_response = mock.MagicMock(requests.Response) mock_response.json.return_value = response_data self.client.session.post.return_value = mock_response def test_response_success(self): mock_response = mock.MagicMock(requests.Response) mock_response.json.return_value = {'result': mock.sentinel} self.client.session.post.return_value = mock_response retval = self.client.execute(['enable']) self.assertEqual(retval, mock.sentinel) def test_response_json_error(self): mock_response = mock.MagicMock(requests.Response) mock_response.json.side_effect = ValueError self.client.session.post.return_value = mock_response retval = self.client.execute(['enable']) self.assertIsNone(retval) self.mock_log.info.assert_has_calls([mock.call(mock.ANY)]) def _test_response_format_error_helper(self, bad_response): mock_response = mock.MagicMock(requests.Response) mock_response.json.return_value = bad_response self.client.session.post.return_value = mock_response self.assertRaises( api.arista_exc.AristaRpcError, self.client.execute, ['enable'] ) self.mock_log.info.assert_has_calls([mock.call(mock.ANY)]) def test_response_format_error(self): self._test_response_format_error_helper({}) def test_response_unknown_error_code(self): self._test_response_format_error_helper( {'error': {'code': 999}} ) def test_response_known_error_code(self): self._test_response_format_error_helper( {'error': {'code': 1002, 'data': []}} ) def test_response_known_error_code_data_is_not_dict(self): self._test_response_format_error_helper( {'error': {'code': 1002, 'data': ['some text']}} ) def test_response_not_cvx_leader(self): mock_response = mock.MagicMock(requests.Response) mock_response.json.return_value = { 'error': { 'code': 1002, 'data': [{'errors': [api.ERR_CVX_NOT_LEADER]}] } } self.client.session.post.return_value = mock_response retval = self.client.execute(['enable']) self.assertIsNone(retval) def test_response_other_exception(self): class OtherException(Exception): pass mock_response = mock.MagicMock(requests.Response) mock_response.json.return_value = 'text' self.client.session.post.return_value = mock_response self.assertRaises( TypeError, self.client.execute, ['enable'] ) self.mock_log.warning.assert_has_calls( [ mock.call(mock.ANY, {'error': mock.ANY}) ] ) networking-arista-2017.2.2/networking_arista/tests/unit/l3Plugin/000077500000000000000000000000001323242307100247625ustar00rootroot00000000000000networking-arista-2017.2.2/networking_arista/tests/unit/l3Plugin/__init__.py000066400000000000000000000012671323242307100271010ustar00rootroot00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg cfg.CONF.use_stderr = False networking-arista-2017.2.2/networking_arista/tests/unit/l3Plugin/test_arista_l3_driver.py000066400000000000000000000411731323242307100316350ustar00rootroot00000000000000# Copyright (c) 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_config import cfg from neutron.tests import base from networking_arista.l3Plugin import arista_l3_driver as arista def setup_arista_config(value='', vrf=False, mlag=False): cfg.CONF.set_override('primary_l3_host', value, "l3_arista") cfg.CONF.set_override('primary_l3_host_username', value, "l3_arista") if vrf: cfg.CONF.set_override('use_vrf', vrf, "l3_arista") if mlag: cfg.CONF.set_override('secondary_l3_host', value, "l3_arista") cfg.CONF.set_override('mlag_config', mlag, "l3_arista") class AristaL3DriverTestCasesDefaultVrf(base.BaseTestCase): """Test cases to test the RPC between Arista Driver and EOS. Tests all methods used to send commands between Arista L3 Driver and EOS to program routing functions in Default VRF. """ def setUp(self): super(AristaL3DriverTestCasesDefaultVrf, self).setUp() setup_arista_config('value') self.drv = arista.AristaL3Driver() self.drv._servers = [] self.drv._servers.append(mock.MagicMock()) def test_no_exception_on_correct_configuration(self): self.assertIsNotNone(self.drv) def test_create_router_on_eos(self): router_name = 'test-router-1' route_domain = '123:123' self.drv.create_router_on_eos(router_name, route_domain, self.drv._servers[0]) cmds = ['enable', 'configure', 'exit'] self.drv._servers[0].execute.assert_called_once_with(cmds) def test_delete_router_from_eos(self): router_name = 'test-router-1' self.drv.delete_router_from_eos(router_name, self.drv._servers[0]) cmds = ['enable', 'configure', 'exit'] self.drv._servers[0].execute.assert_called_once_with(cmds) def test_add_interface_to_router_on_eos(self): router_name = 'test-router-1' segment_id = '123' router_ip = '10.10.10.10' gw_ip = '10.10.10.1' mask = '255.255.255.0' self.drv.add_interface_to_router(segment_id, router_name, gw_ip, router_ip, mask, self.drv._servers[0]) cmds = ['enable', 'configure', 'ip routing', 'vlan %s' % segment_id, 'exit', 'interface vlan %s' % segment_id, 'ip address %s/%s' % (gw_ip, mask), 'exit'] self.drv._servers[0].execute.assert_called_once_with(cmds) def test_delete_interface_from_router_on_eos(self): router_name = 'test-router-1' segment_id = '123' self.drv.delete_interface_from_router(segment_id, router_name, self.drv._servers[0]) cmds = ['enable', 'configure', 'no interface vlan %s' % segment_id, 'exit'] self.drv._servers[0].execute.assert_called_once_with(cmds) class AristaL3DriverTestCasesUsingVRFs(base.BaseTestCase): """Test cases to test the RPC between Arista Driver and EOS. Tests all methods used to send commands between Arista L3 Driver and EOS to program routing functions using multiple VRFs. Note that the configuration commands are different when VRFs are used. """ def setUp(self): super(AristaL3DriverTestCasesUsingVRFs, self).setUp() setup_arista_config('value', vrf=True) self.drv = arista.AristaL3Driver() self.drv._servers = [] self.drv._servers.append(mock.MagicMock()) def test_no_exception_on_correct_configuration(self): self.assertIsNotNone(self.drv) def test_create_router_on_eos(self): max_vrfs = 5 routers = ['testRouter-%s' % n for n in range(max_vrfs)] domains = ['10%s' % n for n in range(max_vrfs)] for (r, d) in zip(routers, domains): self.drv.create_router_on_eos(r, d, self.drv._servers[0]) cmds = ['enable', 'configure', 'vrf definition %s' % r, 'rd %(rd)s:%(rd)s' % {'rd': d}, 'exit', 'exit'] self.drv._servers[0].execute.assert_called_with(cmds) def test_delete_router_from_eos(self): max_vrfs = 5 routers = ['testRouter-%s' % n for n in range(max_vrfs)] for r in routers: self.drv.delete_router_from_eos(r, self.drv._servers[0]) cmds = ['enable', 'configure', 'no vrf definition %s' % r, 'exit'] self.drv._servers[0].execute.assert_called_with(cmds) def test_add_interface_to_router_on_eos(self): router_name = 'test-router-1' segment_id = '123' router_ip = '10.10.10.10' gw_ip = '10.10.10.1' mask = '255.255.255.0' self.drv.add_interface_to_router(segment_id, router_name, gw_ip, router_ip, mask, self.drv._servers[0]) cmds = ['enable', 'configure', 'ip routing vrf %s' % router_name, 'vlan %s' % segment_id, 'exit', 'interface vlan %s' % segment_id, 'vrf forwarding %s' % router_name, 'ip address %s/%s' % (gw_ip, mask), 'exit'] self.drv._servers[0].execute.assert_called_once_with(cmds) def test_delete_interface_from_router_on_eos(self): router_name = 'test-router-1' segment_id = '123' self.drv.delete_interface_from_router(segment_id, router_name, self.drv._servers[0]) cmds = ['enable', 'configure', 'no interface vlan %s' % segment_id, 'exit'] self.drv._servers[0].execute.assert_called_once_with(cmds) class AristaL3DriverTestCasesMlagConfig(base.BaseTestCase): """Test cases to test the RPC between Arista Driver and EOS. Tests all methods used to send commands between Arista L3 Driver and EOS to program routing functions in Default VRF using MLAG configuration. MLAG configuration means that the commands will be sent to both primary and secondary Arista Switches. """ def setUp(self): super(AristaL3DriverTestCasesMlagConfig, self).setUp() setup_arista_config('value', mlag=True) self.drv = arista.AristaL3Driver() self.drv._servers = [] self.drv._servers.append(mock.MagicMock()) self.drv._servers.append(mock.MagicMock()) def test_no_exception_on_correct_configuration(self): self.assertIsNotNone(self.drv) def test_create_router_on_eos(self): router_name = 'test-router-1' route_domain = '123:123' router_mac = '00:11:22:33:44:55' for s in self.drv._servers: self.drv.create_router_on_eos(router_name, route_domain, s) cmds = ['enable', 'configure', 'ip virtual-router mac-address %s' % router_mac, 'exit'] s.execute.assert_called_with(cmds) def test_delete_router_from_eos(self): router_name = 'test-router-1' for s in self.drv._servers: self.drv.delete_router_from_eos(router_name, s) cmds = ['enable', 'configure', 'exit'] s.execute.assert_called_once_with(cmds) def test_add_interface_to_router_on_eos(self): router_name = 'test-router-1' segment_id = '123' router_ip = '10.10.10.10' gw_ip = '10.10.10.1' mask = '255.255.255.0' for s in self.drv._servers: self.drv.add_interface_to_router(segment_id, router_name, gw_ip, router_ip, mask, s) cmds = ['enable', 'configure', 'ip routing', 'vlan %s' % segment_id, 'exit', 'interface vlan %s' % segment_id, 'ip address %s' % router_ip, 'ip virtual-router address %s' % gw_ip, 'exit'] s.execute.assert_called_once_with(cmds) def test_delete_interface_from_router_on_eos(self): router_name = 'test-router-1' segment_id = '123' for s in self.drv._servers: self.drv.delete_interface_from_router(segment_id, router_name, s) cmds = ['enable', 'configure', 'no interface vlan %s' % segment_id, 'exit'] s.execute.assert_called_once_with(cmds) class AristaL3DriverTestCases_v4(base.BaseTestCase): """Test cases to test the RPC between Arista Driver and EOS. Tests all methods used to send commands between Arista L3 Driver and EOS to program routing functions in Default VRF using IPv4. """ def setUp(self): super(AristaL3DriverTestCases_v4, self).setUp() setup_arista_config('value') self.drv = arista.AristaL3Driver() self.drv._servers = [] self.drv._servers.append(mock.MagicMock()) def test_no_exception_on_correct_configuration(self): self.assertIsNotNone(self.drv) def test_add_v4_interface_to_router(self): gateway_ip = '10.10.10.1' cidrs = ['10.10.10.0/24', '10.11.11.0/24'] # Add couple of IPv4 subnets to router for cidr in cidrs: router = {'name': 'test-router-1', 'tenant_id': 'ten-a', 'seg_id': '123', 'cidr': "%s" % cidr, 'gip': "%s" % gateway_ip, 'ip_version': 4} self.assertFalse(self.drv.add_router_interface(None, router)) def test_delete_v4_interface_from_router(self): gateway_ip = '10.10.10.1' cidrs = ['10.10.10.0/24', '10.11.11.0/24'] # remove couple of IPv4 subnets from router for cidr in cidrs: router = {'name': 'test-router-1', 'tenant_id': 'ten-a', 'seg_id': '123', 'cidr': "%s" % cidr, 'gip': "%s" % gateway_ip, 'ip_version': 4} self.assertFalse(self.drv.remove_router_interface(None, router)) class AristaL3DriverTestCases_v6(base.BaseTestCase): """Test cases to test the RPC between Arista Driver and EOS. Tests all methods used to send commands between Arista L3 Driver and EOS to program routing functions in Default VRF using IPv6. """ def setUp(self): super(AristaL3DriverTestCases_v6, self).setUp() setup_arista_config('value') self.drv = arista.AristaL3Driver() self.drv._servers = [] self.drv._servers.append(mock.MagicMock()) def test_no_exception_on_correct_configuration(self): self.assertIsNotNone(self.drv) def test_add_v6_interface_to_router(self): gateway_ip = '3FFE::1' cidrs = ['3FFE::/16', '2001::/16'] # Add couple of IPv6 subnets to router for cidr in cidrs: router = {'name': 'test-router-1', 'tenant_id': 'ten-a', 'seg_id': '123', 'cidr': "%s" % cidr, 'gip': "%s" % gateway_ip, 'ip_version': 6} self.assertFalse(self.drv.add_router_interface(None, router)) def test_delete_v6_interface_from_router(self): gateway_ip = '3FFE::1' cidrs = ['3FFE::/16', '2001::/16'] # remove couple of IPv6 subnets from router for cidr in cidrs: router = {'name': 'test-router-1', 'tenant_id': 'ten-a', 'seg_id': '123', 'cidr': "%s" % cidr, 'gip': "%s" % gateway_ip, 'ip_version': 6} self.assertFalse(self.drv.remove_router_interface(None, router)) class AristaL3DriverTestCases_MLAG_v6(base.BaseTestCase): """Test cases to test the RPC between Arista Driver and EOS. Tests all methods used to send commands between Arista L3 Driver and EOS to program routing functions in Default VRF on MLAG'ed switches using IPv6. """ def setUp(self): super(AristaL3DriverTestCases_MLAG_v6, self).setUp() setup_arista_config('value', mlag=True) self.drv = arista.AristaL3Driver() self.drv._servers = [] self.drv._servers.append(mock.MagicMock()) self.drv._servers.append(mock.MagicMock()) def test_no_exception_on_correct_configuration(self): self.assertIsNotNone(self.drv) def test_add_v6_interface_to_router(self): gateway_ip = '3FFE::1' cidrs = ['3FFE::/16', '2001::/16'] # Add couple of IPv6 subnets to router for cidr in cidrs: router = {'name': 'test-router-1', 'tenant_id': 'ten-a', 'seg_id': '123', 'cidr': "%s" % cidr, 'gip': "%s" % gateway_ip, 'ip_version': 6} self.assertFalse(self.drv.add_router_interface(None, router)) def test_delete_v6_interface_from_router(self): gateway_ip = '3FFE::1' cidrs = ['3FFE::/16', '2001::/16'] # remove couple of IPv6 subnets from router for cidr in cidrs: router = {'name': 'test-router-1', 'tenant_id': 'ten-a', 'seg_id': '123', 'cidr': "%s" % cidr, 'gip': "%s" % gateway_ip, 'ip_version': 6} self.assertFalse(self.drv.remove_router_interface(None, router)) class AristaL3DriverTestCasesMlag_one_switch_failed(base.BaseTestCase): """Test cases to test with non redundant hardare in redundancy mode. In the following test cases, the driver is configured in MLAG (redundancy mode) but, one of the switches is mocked to throw exceptoin to mimic failure of the switch. Ensure that the the operation does not fail when one of the switches fails. """ def setUp(self): super(AristaL3DriverTestCasesMlag_one_switch_failed, self).setUp() setup_arista_config('value', mlag=True) self.drv = arista.AristaL3Driver() self.drv._servers = [] self.drv._servers.append(mock.MagicMock()) self.drv._servers.append(mock.MagicMock()) def test_create_router_when_one_switch_fails(self): router = {} router['name'] = 'test-router-1' tenant = '123' # Make one of the switches throw an exception - i.e. fail self.drv._servers[0].execute = mock.Mock(side_effect=Exception) with mock.patch.object(arista.LOG, 'exception') as log_exception: self.drv.create_router(None, tenant, router) log_exception.assert_called_once_with(mock.ANY) def test_delete_router_when_one_switch_fails(self): router = {} router['name'] = 'test-router-1' tenant = '123' router_id = '345' # Make one of the switches throw an exception - i.e. fail self.drv._servers[1].execute = mock.Mock(side_effect=Exception) with mock.patch.object(arista.LOG, 'exception') as log_exception: self.drv.delete_router(None, tenant, router_id, router) log_exception.assert_called_once_with(mock.ANY) def test_add_router_interface_when_one_switch_fails(self): router = {} router['name'] = 'test-router-1' router['tenant_id'] = 'ten-1' router['seg_id'] = '100' router['ip_version'] = 4 router['cidr'] = '10.10.10.0/24' router['gip'] = '10.10.10.1' # Make one of the switches throw an exception - i.e. fail self.drv._servers[1].execute = mock.Mock(side_effect=Exception) with mock.patch.object(arista.LOG, 'exception') as log_exception: self.drv.add_router_interface(None, router) log_exception.assert_called_once_with(mock.ANY) def test_remove_router_interface_when_one_switch_fails(self): router = {} router['name'] = 'test-router-1' router['tenant_id'] = 'ten-1' router['seg_id'] = '100' router['ip_version'] = 4 router['cidr'] = '10.10.10.0/24' router['gip'] = '10.10.10.1' # Make one of the switches throw an exception - i.e. fail self.drv._servers[0].execute = mock.Mock(side_effect=Exception) with mock.patch.object(arista.LOG, 'exception') as log_exception: self.drv.remove_router_interface(None, router) log_exception.assert_called_once_with(mock.ANY) networking-arista-2017.2.2/networking_arista/tests/unit/ml2/000077500000000000000000000000001323242307100237575ustar00rootroot00000000000000networking-arista-2017.2.2/networking_arista/tests/unit/ml2/__init__.py000066400000000000000000000012671323242307100260760ustar00rootroot00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg cfg.CONF.use_stderr = False networking-arista-2017.2.2/networking_arista/tests/unit/ml2/drivers/000077500000000000000000000000001323242307100254355ustar00rootroot00000000000000networking-arista-2017.2.2/networking_arista/tests/unit/ml2/drivers/test_arista_type_driver.py000066400000000000000000000075641323242307100327610ustar00rootroot00000000000000# Copyright (c) 2016 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools import mock from mock import patch from neutron_lib.db import api as db_api from oslo_config import cfg from neutron.db.models.plugins.ml2 import vlanallocation from neutron.tests.unit import testlib_api from networking_arista.ml2.drivers.driver_helpers import VlanSyncService from networking_arista.ml2.drivers.type_arista_vlan import AristaVlanTypeDriver import networking_arista.tests.unit.ml2.utils as utils EAPI_SEND_FUNC = ('networking_arista.ml2.rpc.arista_eapi.AristaRPCWrapperEapi' '._send_eapi_req') class AristaTypeDriverTest(testlib_api.SqlTestCase): def setUp(self): super(AristaTypeDriverTest, self).setUp() utils.setup_arista_wrapper_config(cfg) @patch(EAPI_SEND_FUNC) def test_initialize_type_driver(self, mock_send_eapi_req): type_driver = AristaVlanTypeDriver() type_driver.sync_service._force_sync = False type_driver.sync_service._vlan_assignment_uuid = {'uuid': 1} type_driver.sync_service._rpc = mock.MagicMock() rpc = type_driver.sync_service._rpc rpc.get_vlan_assignment_uuid.return_value = {'uuid': 1} type_driver.initialize() cmds = ['show openstack agent uuid', 'show openstack instances', 'show openstack agent uuid', 'show openstack features'] calls = [mock.call(cmds=[cmd], commands_to_log=[cmd]) for cmd in cmds] mock_send_eapi_req.assert_has_calls(calls) type_driver.timer.cancel() class VlanSyncServiceTest(testlib_api.SqlTestCase): """Test that VLANs are synchronized between EOS and Neutron.""" def _ensure_in_db(self, assigned, allocated, available): session = db_api.get_reader_session() with session.begin(): vlans = session.query(vlanallocation.VlanAllocation).all() for vlan in vlans: self.assertIn(vlan.vlan_id, assigned) if vlan.vlan_id in available: self.assertFalse(vlan.allocated) elif vlan.vlan_id in allocated: self.assertTrue(vlan.allocated) def test_synchronization_test(self): rpc = mock.MagicMock() rpc.get_vlan_allocation.return_value = { 'assignedVlans': '1-10,21-30', 'availableVlans': '1-5,21,23,25,27,29', 'allocatedVlans': '6-10,22,24,26,28,30' } assigned = list(itertools.chain(range(1, 11), range(21, 31))) available = [1, 2, 3, 4, 5, 21, 23, 25, 27, 29] allocated = list(set(assigned) - set(available)) sync_service = VlanSyncService(rpc) sync_service.synchronize() self._ensure_in_db(assigned, allocated, available) # Call synchronize again which returns different data rpc.get_vlan_allocation.return_value = { 'assignedVlans': '51-60,71-80', 'availableVlans': '51-55,71,73,75,77,79', 'allocatedVlans': '56-60,72,74,76,78,80' } assigned = list(itertools.chain(range(51, 61), range(71, 81))) available = [51, 52, 53, 54, 55, 71, 73, 75, 77, 79] allocated = list(set(assigned) - set(available)) sync_service = VlanSyncService(rpc) sync_service.synchronize() self._ensure_in_db(assigned, allocated, available) networking-arista-2017.2.2/networking_arista/tests/unit/ml2/rpc/000077500000000000000000000000001323242307100245435ustar00rootroot00000000000000networking-arista-2017.2.2/networking_arista/tests/unit/ml2/rpc/__init__.py000066400000000000000000000000001323242307100266420ustar00rootroot00000000000000networking-arista-2017.2.2/networking_arista/tests/unit/ml2/rpc/test_arista_eapi_rpc_wrapper.py000066400000000000000000001276321323242307100330540ustar00rootroot00000000000000# Copyright (c) 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from mock import patch from neutron_lib import constants as n_const from oslo_config import cfg import six from neutron.tests import base from neutron.tests.unit import testlib_api from networking_arista.common import constants from networking_arista.common import db_lib from networking_arista.common import exceptions as arista_exc from networking_arista.ml2.rpc import arista_eapi from networking_arista.tests.unit.ml2.test_arista_mechanism_driver import \ FakePortBindingLevel import networking_arista.tests.unit.ml2.utils as utils EAPI_SEND_FUNC = ('networking_arista.ml2.rpc.arista_eapi.AristaRPCWrapperEapi' '._send_eapi_req') EAPI_DB_LIB_MODULE = 'networking_arista.ml2.rpc.arista_eapi.db_lib' def setup_valid_config(): utils.setup_arista_wrapper_config(cfg) class PositiveRPCWrapperValidConfigTestCase(testlib_api.SqlTestCase): """Test cases to test the RPC between Arista Driver and EOS. Tests all methods used to send commands between Arista Driver and EOS """ def setUp(self): super(PositiveRPCWrapperValidConfigTestCase, self).setUp() setup_valid_config() ndb = db_lib.NeutronNets() self.drv = arista_eapi.AristaRPCWrapperEapi(ndb) self.drv._server_ip = "10.11.12.13" self.region = 'RegionOne' def _get_exit_mode_cmds(self, modes): return ['exit'] * len(modes) def _verify_send_eapi_request_calls(self, mock_send_eapi_req, cmds, commands_to_log=None): calls = [] calls.extend( mock.call(cmds=cmd, commands_to_log=log_cmd) for cmd, log_cmd in six.moves.zip(cmds, commands_to_log or cmds)) mock_send_eapi_req.assert_has_calls(calls) def test_no_exception_on_correct_configuration(self): self.assertIsNotNone(self.drv) @patch(EAPI_SEND_FUNC) def test_plug_host_into_network(self, mock_send_eapi_req): tenant_id = 'ten-1' vm_id = 'vm-1' port_id = 123 network_id = 'net-id' host = 'host' port_name = '123-port' segment_id = 'segment_id_1' segments = [{'network_type': 'vlan', 'physical_network': 'default', 'segmentation_id': 1234, 'id': segment_id}] self.drv.plug_host_into_network(vm_id, host, port_id, network_id, tenant_id, segments, port_name) cmd1 = ['show openstack agent uuid'] cmd2 = ['enable', 'configure', 'cvx', 'service openstack', 'region RegionOne', 'tenant ten-1', 'vm id vm-1 hostid host', 'port id 123 name "123-port" network-id net-id', ] for level, segment in enumerate(segments): cmd2.append('segment level %s id %s' % (level, segment['id'])) self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) @patch(EAPI_SEND_FUNC) def test_plug_dhcp_port_into_network(self, mock_send_eapi_req): tenant_id = 'ten-1' vm_id = 'vm-1' port_id = 123 network_id = 'net-id' host = 'host' port_name = '123-port' segments = [] self.drv.plug_port_into_network(vm_id, host, port_id, network_id, tenant_id, port_name, n_const.DEVICE_OWNER_DHCP, None, None, None, segments) cmd1 = ['show openstack agent uuid'] cmd2 = ['enable', 'configure', 'cvx', 'service openstack', 'region RegionOne', 'tenant ten-1', 'network id net-id', 'dhcp id vm-1 hostid host port-id 123 name "123-port"', ] self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) @patch(EAPI_SEND_FUNC) def test_plug_baremetal_into_network(self, mock_send_eapi_req): tenant_id = 'ten-1' network_id = 'net-id-1' bm_id = 'bm-1' port_id = 'p1' host = 'host' port_name = 'name_p1' device_owner = 'compute:None' segments = [{'segmentation_id': 1001, 'id': 'segment_id_1', 'network_type': 'vlan', 'is_dynamic': False}] switch_bindings = {'local_link_information': [ {'port_id': 'Eth1', 'switch_id': 'switch-id-1', 'switch_info': 'switch-1'}]} bindings = switch_bindings['local_link_information'] self.drv.bm_and_dvr_supported = mock.MagicMock(return_value=True) self.drv.plug_baremetal_into_network(bm_id, host, port_id, network_id, tenant_id, segments, port_name, device_owner, None, None, 'baremetal', bindings) cmd1 = ['show openstack agent uuid'] cmd2 = ['enable', 'configure', 'cvx', 'service openstack', 'region RegionOne', 'tenant ten-1', 'instance id bm-1 hostid host type baremetal', 'port id p1 name "name_p1" network-id net-id-1 ' 'type native switch-id switch-id-1 switchport Eth1', ] for level, segment in enumerate(segments): cmd2.append('segment level %s id %s' % (level, segment['id'])) cmd2.append('exit') self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) @patch(EAPI_SEND_FUNC) def test_unplug_baremetal_from_network(self, mock_send_eapi_req): tenant_id = 'ten-1' network_id = 'net-id-1' bm_id = 'bm-1' port_id = 111 host = 'host' switch_bindings = {'local_link_information': [ {'port_id': 'Eth1', 'switch_id': 'switch-id-1', 'switch_info': 'switch-1'}]} bindings = switch_bindings['local_link_information'] self.drv.bm_and_dvr_supported = mock.MagicMock(return_value=True) self.drv.unplug_baremetal_from_network(bm_id, host, port_id, network_id, tenant_id, None, 'baremetal', bindings) cmd1 = ['show openstack agent uuid'] cmd2 = ['enable', 'configure', 'cvx', 'service openstack', 'region RegionOne', 'tenant ten-1', 'instance id bm-1 hostid host type baremetal', 'no port id 111', ] self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) @patch(EAPI_SEND_FUNC) def test_unplug_host_from_network(self, mock_send_eapi_req): tenant_id = 'ten-1' vm_id = 'vm-1' port_id = 123 network_id = 'net-id' host = 'host' self.drv.unplug_host_from_network(vm_id, host, port_id, network_id, tenant_id) cmd1 = ['show openstack agent uuid'] cmd2 = ['enable', 'configure', 'cvx', 'service openstack', 'region RegionOne', 'tenant ten-1', 'vm id vm-1 hostid host', 'no port id 123', ] self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) @patch(EAPI_SEND_FUNC) def test_unplug_dhcp_port_from_network(self, mock_send_eapi_req): tenant_id = 'ten-1' vm_id = 'vm-1' port_id = 123 network_id = 'net-id' host = 'host' self.drv.unplug_dhcp_port_from_network(vm_id, host, port_id, network_id, tenant_id) cmd1 = ['show openstack agent uuid'] cmd2 = ['enable', 'configure', 'cvx', 'service openstack', 'region RegionOne', 'tenant ten-1', 'network id net-id', 'no dhcp id vm-1 port-id 123', ] self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) @patch(EAPI_SEND_FUNC) def test_create_network(self, mock_send_eapi_req): tenant_id = 'ten-1' self.drv.cli_commands['features'] = {'hierarchical-port-binding': 1} network = { 'network_id': 'net-id', 'network_name': 'net-name', 'segments': [{'segmentation_id': 123, 'physical_network': 'default', 'network_type': 'vlan', 'id': 'segment_id_1'}], 'shared': False, } self.drv.create_network(tenant_id, network) cmd1 = ['show openstack agent uuid'] cmd2 = ['enable', 'configure', 'cvx', 'service openstack', 'region RegionOne', 'tenant ten-1', 'network id net-id name "net-name"', ] for seg in network['segments']: is_dynamic = seg.get('is_dynamic', False) cmd2.append('segment %s type %s id %d %s' % (seg['id'], seg['network_type'], seg['segmentation_id'], 'dynamic' if is_dynamic else 'static')) cmd2.append('no shared') self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) @patch(EAPI_SEND_FUNC) def test_create_shared_network(self, mock_send_eapi_req): tenant_id = 'ten-1' segment_id = 'abcd-cccc' segmentation_id = 123 network_type = 'vlan' segments = [{'segmentation_id': segmentation_id, 'id': segment_id, 'network_type': network_type}] network = { 'network_id': 'net-id', 'network_name': 'net-name', 'segments': segments, 'shared': True} self.drv.cli_commands['features'] = {'hierarchical-port-binding': 1} self.drv.create_network(tenant_id, network) cmd1 = ['show openstack agent uuid'] cmd2 = ['enable', 'configure', 'cvx', 'service openstack', 'region RegionOne', 'tenant ten-1', 'network id net-id name "net-name"', 'segment %s type %s id %d %s' % (segment_id, network_type, segmentation_id, 'static'), 'shared', ] self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) @patch(EAPI_SEND_FUNC) def test_create_network_bulk(self, mock_send_eapi_req): tenant_id = 'ten-2' num_networks = 10 network_type = 'vlan' segment_id = 'abcd-eeee-%s' self.drv.cli_commands['features'] = {'hierarchical-port-binding': 1} networks = [{ 'network_id': 'net-id-%d' % net_id, 'network_name': 'net-name-%d' % net_id, 'segments': [{'segmentation_id': net_id, 'network_type': 'vlan', 'id': segment_id % net_id}], 'shared': True, } for net_id in range(1, num_networks) ] self.drv.create_network_bulk(tenant_id, networks) cmd1 = ['show openstack agent uuid'] cmd2 = ['enable', 'configure', 'cvx', 'service openstack', 'region RegionOne', 'tenant ten-2'] for net_id in range(1, num_networks): cmd2.append('network id net-id-%d name "net-name-%d"' % (net_id, net_id)) cmd2.append('segment %s type %s id %d %s' % ( segment_id % net_id, network_type, net_id, 'static')) cmd2.append('shared') self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) @patch(EAPI_SEND_FUNC) def test_delete_network(self, mock_send_eapi_req): tenant_id = 'ten-1' network_id = 'net-id' segments = [{'segmentation_id': 101, 'physical_network': 'default', 'id': 'segment_id_1', 'network_type': 'vlan'}] self.drv.delete_network(tenant_id, network_id, segments) cmd1 = ['show openstack agent uuid'] cmd2 = ['enable', 'configure', 'cvx', 'service openstack', 'region RegionOne', 'tenant ten-1', 'network id net-id', 'no segment segment_id_1', ] cmd3 = ['enable', 'configure', 'cvx', 'service openstack', 'region RegionOne', 'tenant ten-1', 'no network id net-id', ] self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2, cmd1, cmd3]) @patch(EAPI_SEND_FUNC) def test_delete_network_bulk(self, mock_send_eapi_req): tenant_id = 'ten-2' num_networks = 10 networks = ['net-id-%d' % net_id for net_id in range(1, num_networks)] self.drv.delete_network_bulk(tenant_id, networks) cmd1 = ['show openstack agent uuid'] cmd2 = ['enable', 'configure', 'cvx', 'service openstack', 'region RegionOne', 'tenant ten-2'] for net_id in range(1, num_networks): cmd2.append('no network id net-id-%d' % net_id) self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) @patch(EAPI_SEND_FUNC) def test_delete_vm(self, mock_send_eapi_req): tenant_id = 'ten-1' vm_id = 'vm-id' self.drv.delete_vm(tenant_id, vm_id) cmd1 = ['show openstack agent uuid'] cmd2 = ['enable', 'configure', 'cvx', 'service openstack', 'region RegionOne', 'tenant ten-1', 'no vm id vm-id', ] self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) @patch(EAPI_SEND_FUNC) def test_delete_vm_bulk(self, mock_send_eapi_req): tenant_id = 'ten-2' num_vms = 10 vm_ids = ['vm-id-%d' % vm_id for vm_id in range(1, num_vms)] self.drv.delete_vm_bulk(tenant_id, vm_ids) cmd1 = ['show openstack agent uuid'] cmd2 = ['enable', 'configure', 'cvx', 'service openstack', 'region RegionOne', 'tenant ten-2'] for vm_id in range(1, num_vms): cmd2.append('no vm id vm-id-%d' % vm_id) self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) @patch(EAPI_SEND_FUNC) @patch(EAPI_DB_LIB_MODULE) def test_hpb_create_port_bulk(self, mock_db_lib, mock_send_eapi_req): tenant_id = 'ten-3' num_devices = 10 num_ports_per_device = 2 device_owners = [n_const.DEVICE_OWNER_DHCP, 'compute', n_const.DEVICE_OWNER_DVR_INTERFACE] port_list = [] devices = {} for device_id in range(1, num_devices): dev_id = 'dev-id-%d' % device_id devices[dev_id] = {'vmId': dev_id, 'baremetal_instance': False, 'ports': []} for port_id in range(1, num_ports_per_device): pid = 'port-id-%d-%d' % (device_id, port_id) port = { 'device_id': 'dev-id-%d' % device_id, 'hosts': ['host_%d' % device_id], 'portId': pid, 'device_owner': device_owners[(device_id + port_id) % 3], 'network_id': 'network-id-%d' % port_id, 'name': 'port-%d-%d' % (device_id, port_id), 'tenant_id': tenant_id, 'segments': [FakePortBindingLevel(pid, 0, 'vendor-0', 5000 + port_id), FakePortBindingLevel(pid, 1, 'vendor-1', 500 + port_id)] } port_list.append(port) devices[dev_id]['ports'].append(port) create_ports = {} port_profiles = {} for port in port_list: create_ports.update(utils.port_dict_representation(port)) port_profiles[port['portId']] = {'vnic_type': 'normal'} self.drv.cli_commands[constants.CMD_INSTANCE] = 'instance' self.drv.cli_commands['features'] = {'hierarchical-port-binding': 1} mock_db_lib.get_port_binding_level.side_effect = ( lambda x: create_ports.get(x['port_id']).get('segments')) self.drv.create_instance_bulk(tenant_id, create_ports, devices, port_profiles=port_profiles) cmd1 = ['show openstack agent uuid'] cmd2 = ['enable', 'configure', 'cvx', 'service openstack', 'region RegionOne', 'tenant ten-3'] for device in devices.values(): for v_port in device['ports']: port_id = v_port['portId'] port = create_ports[port_id] host = v_port['hosts'][0] device_owner = port['device_owner'] port_name = port['name'] network_id = port['network_id'] device_id = port['device_id'] if device_owner == 'network:dhcp': cmd2.append('network id %s' % network_id) cmd2.append('dhcp id %s hostid %s port-id %s name "%s"' % ( device_id, host, port_id, port_name)) elif device_owner == 'compute': cmd2.append('vm id %s hostid %s' % (device_id, host)) cmd2.append('port id %s name "%s" network-id %s' % ( port_id, port_name, network_id)) elif device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE: cmd2.append('instance id %s type router' % device_id) cmd2.append('port id %s network-id %s hostid %s' % ( port_id, network_id, host)) if self.drv.hpb_supported(): cmd2.extend('segment level %d id %s' % ( segment.level, segment.segment_id) for segment in v_port.get('segments')) self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) @patch(EAPI_SEND_FUNC) def test_create_port_bulk(self, mock_send_eapi_req): tenant_id = 'ten-3' num_devices = 10 num_ports_per_device = 2 device_count = 0 devices = {} for device_id in range(1, num_devices): device_count += 1 dev_id = 'dev-id-%d' % device_id devices[dev_id] = {'vmId': dev_id, 'baremetal_instance': False, 'ports': [] } for port_id in range(1, num_ports_per_device): port_id = 'port-id-%d-%d' % (device_id, port_id) port = { 'device_id': 'dev-id-%d' % device_id, 'hosts': ['host_%d' % (device_count)], 'portId': port_id } devices[dev_id]['ports'].append(port) device_owners = [n_const.DEVICE_OWNER_DHCP, 'compute', n_const.DEVICE_OWNER_DVR_INTERFACE] port_list = [] net_count = 1 for device_id in range(1, num_devices): for port_id in range(1, num_ports_per_device): port = { 'portId': 'port-id-%d-%d' % (device_id, port_id), 'device_id': 'dev-id-%d' % device_id, 'device_owner': device_owners[(device_id + port_id) % 3], 'network_id': 'network-id-%d' % net_count, 'name': 'port-%d-%d' % (device_id, port_id), 'tenant_id': tenant_id } port_list.append(port) net_count += 1 create_ports = {} port_profiles = {} for port in port_list: create_ports.update(utils.port_dict_representation(port)) port_profiles[port['portId']] = {'vnic_type': 'normal'} self.drv.cli_commands[constants.CMD_INSTANCE] = 'instance' self.drv.create_instance_bulk(tenant_id, create_ports, devices, port_profiles=port_profiles) cmd1 = ['show openstack agent uuid'] cmd2 = ['enable', 'configure', 'cvx', 'service openstack', 'region RegionOne', 'tenant ten-3'] for device in devices.values(): for v_port in device['ports']: port_id = v_port['portId'] port = create_ports[port_id] host = v_port['hosts'][0] device_owner = port['device_owner'] port_name = port['name'] network_id = port['network_id'] device_id = port['device_id'] if device_owner == 'network:dhcp': cmd2.append('network id %s' % network_id) cmd2.append('dhcp id %s hostid %s port-id %s name "%s"' % ( device_id, host, port_id, port_name)) elif device_owner == 'compute': cmd2.append('vm id %s hostid %s' % (device_id, host)) cmd2.append('port id %s name "%s" network-id %s' % ( port_id, port_name, network_id)) elif device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE: cmd2.append('instance id %s type router' % device_id) cmd2.append('port id %s network-id %s hostid %s' % ( port_id, network_id, host)) net_count += 1 self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) @patch(EAPI_SEND_FUNC) def test_delete_tenant(self, mock_send_eapi_req): tenant_id = 'ten-1' self.drv.delete_tenant(tenant_id) cmd1 = ['show openstack agent uuid'] cmd2 = ['enable', 'configure', 'cvx', 'service openstack', 'region RegionOne', 'no tenant ten-1', ] self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) @patch(EAPI_SEND_FUNC) def test_delete_tenant_bulk(self, mock_send_eapi_req): num_tenants = 10 tenant_list = ['ten-%d' % t_id for t_id in range(1, num_tenants)] self.drv.delete_tenant_bulk(tenant_list) cmd1 = ['show openstack agent uuid'] cmd2 = ['enable', 'configure', 'cvx', 'service openstack', 'region RegionOne'] for ten_id in range(1, num_tenants): cmd2.append('no tenant ten-%d' % ten_id) self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) def test_get_network_info_returns_none_when_no_such_net(self): expected = [] self.drv.get_tenants = mock.MagicMock() self.drv.get_tenants.return_value = [] net_info = self.drv.get_tenants() self.drv.get_tenants.assert_called_once_with() self.assertEqual(net_info, expected, ('Network info must be "None"' 'for unknown network')) def test_get_network_info_returns_info_for_available_net(self): valid_network_id = '12345' valid_net_info = {'network_id': valid_network_id, 'some_info': 'net info'} known_nets = valid_net_info self.drv.get_tenants = mock.MagicMock() self.drv.get_tenants.return_value = known_nets net_info = self.drv.get_tenants() self.assertEqual(net_info, valid_net_info, ('Must return network info for a valid net')) @patch(EAPI_SEND_FUNC) def test_check_supported_features(self, mock_send_eapi_req): self.drv._get_random_name = mock.MagicMock() self.drv._get_random_name.return_value = 'RegionOne' self.drv.check_supported_features() get_eos_master_cmd = ['show openstack agent uuid'] instance_command = ['show openstack instances'] cmds = [get_eos_master_cmd, instance_command] calls = [] calls.extend(mock.call(cmds=cmd, commands_to_log=cmd) for cmd in cmds) mock_send_eapi_req.assert_has_calls(calls) @patch(EAPI_SEND_FUNC) def test_register_with_eos(self, mock_send_eapi_req): self.drv.register_with_eos() cmd1 = ['show openstack agent uuid'] cmd2 = ['enable', 'configure', 'cvx', 'service openstack', 'region %s' % self.region, 'sync interval %d' % cfg.CONF.ml2_arista.sync_interval, ] self._verify_send_eapi_request_calls( mock_send_eapi_req, [cmd1, cmd2], commands_to_log=[cmd1, cmd2]) def _enable_sync_cmds(self): self.drv.cli_commands[ constants.CMD_REGION_SYNC] = 'region RegionOne sync' self.drv.cli_commands[constants.CMD_SYNC_HEARTBEAT] = 'sync heartbeat' self.drv.cli_commands['baremetal'] = '' @patch(EAPI_SEND_FUNC) def test_create_network_bulk_during_sync(self, mock_send_eapi_req): self._enable_sync_cmds() self.drv.cli_commands['features'] = {'hierarchical-port-binding': 1} tenant_id = 'ten-10' num_networks = 101 segments = [{'segmentation_id': 101, 'physical_network': 'default', 'id': 'segment_id_1', 'network_type': 'vlan'}] networks = [{ 'network_id': 'net-id-%d' % net_id, 'network_name': 'net-name-%d' % net_id, 'segments': segments, 'shared': True, } for net_id in range(1, num_networks + 1) ] self.drv.create_network_bulk(tenant_id, networks, sync=True) cmd1 = ['show openstack agent uuid'] cmd2 = ['enable', 'configure', 'cvx', 'service openstack', 'region RegionOne sync', 'tenant ten-10'] # Send 100 create network commands for net_id in range(1, 101): cmd2.append('network id net-id-%d name "net-name-%d"' % (net_id, net_id)) for seg in segments: is_dynamic = seg.get('is_dynamic', False) cmd2.append('segment %s type %s id %d %s' % (seg['id'], seg['network_type'], seg['segmentation_id'], 'dynamic' if is_dynamic else 'static')) cmd2.append('shared') # Send heartbeat cmd2.append('sync heartbeat') # Send the remaining network cmd2.append('network id net-id-101 name "net-name-101"') for seg in segments: is_dynamic = seg.get('is_dynamic', False) cmd2.append('segment %s type %s id %d %s' % (seg['id'], seg['network_type'], seg['segmentation_id'], 'dynamic' if is_dynamic else 'static')) cmd2.append('shared') # Send the final heartbeat cmd2.append('sync heartbeat') self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) @patch(EAPI_SEND_FUNC) def test_delete_network_bulk_during_sync(self, mock_send_eapi_req): self._enable_sync_cmds() tenant_id = 'ten-10' num_networks = 101 networks = ['nid-%d' % net_id for net_id in range(1, num_networks + 1)] self.drv.delete_network_bulk(tenant_id, networks, sync=True) cmd1 = ['show openstack agent uuid'] cmd2 = ['enable', 'configure', 'cvx', 'service openstack', 'region RegionOne sync', 'tenant ten-10'] # Send 100 create network commands for net_id in range(1, 101): cmd2.append('no network id nid-%d' % (net_id)) # Send heartbeat cmd2.append('sync heartbeat') # Send the remaining network cmd2.append('no network id nid-101') # Send the final heartbeat cmd2.append('sync heartbeat') self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) @patch(EAPI_SEND_FUNC) def test_delete_vm_bulk_during_sync(self, mock_send_eapi_req): self._enable_sync_cmds() tenant_id = 'ten-2' num_vms = 101 vm_ids = ['vm-id-%d' % vm_id for vm_id in range(1, num_vms + 1)] self.drv.delete_vm_bulk(tenant_id, vm_ids, sync=True) cmd1 = ['show openstack agent uuid'] cmd2 = ['enable', 'configure', 'cvx', 'service openstack', 'region RegionOne sync', 'tenant ten-2'] for vm_id in range(1, 101): cmd2.append('no vm id vm-id-%d' % vm_id) # Send heartbeat cmd2.append('sync heartbeat') # Send the remaining vm cmd2.append('no vm id vm-id-101') # Send the final heartbeat cmd2.append('sync heartbeat') self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) @patch(EAPI_SEND_FUNC) def test_create_port_bulk_during_sync(self, mock_send_eapi_req): self._enable_sync_cmds() tenant_id = 'ten-3' num_devices = 101 num_ports_per_device = 2 device_count = 0 devices = {} for device_id in range(1, num_devices): device_count += 1 dev_id = 'dev-id-%d' % device_id devices[dev_id] = {'vmId': dev_id, 'baremetal_instance': False, 'ports': [] } for port_id in range(1, num_ports_per_device + 1): port_id = 'port-id-%d-%d' % (device_id, port_id) port = { 'device_id': 'dev-id-%d' % device_id, 'hosts': ['host_%d' % (device_count)], 'portId': port_id } devices[dev_id]['ports'].append(port) device_owners = [n_const.DEVICE_OWNER_DHCP, 'compute', n_const.DEVICE_OWNER_DVR_INTERFACE] port_list = [] net_count = 1 for device_id in range(1, num_devices): for port_id in range(1, num_ports_per_device + 1): port = { 'portId': 'port-id-%d-%d' % (device_id, port_id), 'device_id': 'dev-id-%d' % device_id, 'device_owner': device_owners[(device_id + port_id) % 3], 'network_id': 'network-id-%d' % net_count, 'name': 'port-%d-%d' % (device_id, port_id), 'tenant_id': tenant_id } port_list.append(port) net_count += 1 create_ports = {} port_profiles = {} for port in port_list: create_ports.update(utils.port_dict_representation(port)) port_profiles[port['portId']] = {'vnic_type': 'normal'} self.drv.cli_commands[constants.CMD_INSTANCE] = 'instance' self.drv.create_instance_bulk(tenant_id, create_ports, devices, port_profiles=port_profiles, sync=True) cmd1 = ['show openstack agent uuid'] cmd2 = ['enable', 'configure', 'cvx', 'service openstack', 'region RegionOne sync', 'tenant ten-3'] for count, device in enumerate(devices.values(), 1): for v_port in device['ports']: port_id = v_port['portId'] port = create_ports[port_id] host = v_port['hosts'][0] vm_id = device['vmId'] port_name = port['name'] network_id = port['network_id'] device_owner = port['device_owner'] device_id = port['device_id'] if device_owner == n_const.DEVICE_OWNER_DHCP: cmd2.append('network id %s' % network_id) cmd2.append('dhcp id %s hostid %s port-id %s name "%s"' % ( vm_id, host, port_id, port_name)) elif device_owner == 'compute': cmd2.append('vm id %s hostid %s' % (vm_id, host)) cmd2.append('port id %s name "%s" network-id %s' % ( port_id, port_name, network_id)) elif device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE: cmd2.append('instance id %s type router' % device_id) cmd2.append('port id %s network-id %s hostid %s' % ( port_id, network_id, host)) if count == (num_devices - 1): # Send heartbeat cmd2.append('sync heartbeat') # Send the final heartbeat cmd2.append('sync heartbeat') self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) @patch(EAPI_SEND_FUNC) def test_delete_tenant_bulk_during_sync(self, mock_send_eapi_req): self._enable_sync_cmds() num_tenants = 101 tenant_list = ['ten-%d' % t_id for t_id in range(1, num_tenants + 1)] self.drv.delete_tenant_bulk(tenant_list, sync=True) cmd1 = ['show openstack agent uuid'] cmd2 = ['enable', 'configure', 'cvx', 'service openstack', 'region RegionOne sync'] for ten_id in range(1, num_tenants + 1): cmd2.append('no tenant ten-%d' % ten_id) cmd2.append('sync heartbeat') self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) class AristaRPCWrapperInvalidConfigTestCase(base.BaseTestCase): """Negative test cases to test the Arista Driver configuration.""" def setUp(self): super(AristaRPCWrapperInvalidConfigTestCase, self).setUp() self.setup_invalid_config() # Invalid config, required options not set def setup_invalid_config(self): utils.setup_arista_wrapper_config(cfg, host='', user='') def test_raises_exception_on_wrong_configuration(self): ndb = db_lib.NeutronNets() self.assertRaises(arista_exc.AristaConfigError, arista_eapi.AristaRPCWrapperEapi, ndb) class NegativeRPCWrapperTestCase(testlib_api.SqlTestCase): """Negative test cases to test the RPC between Arista Driver and EOS.""" def setUp(self): super(NegativeRPCWrapperTestCase, self).setUp() setup_valid_config() def test_exception_is_raised_on_json_server_error(self): ndb = db_lib.NeutronNets() drv = arista_eapi.AristaRPCWrapperEapi(ndb) drv._send_api_request = mock.MagicMock( side_effect=Exception('server error') ) with mock.patch.object(arista_eapi.LOG, 'error') as log_err: self.assertRaises(arista_exc.AristaRpcError, drv.get_tenants) log_err.assert_called_once_with(mock.ANY) class RPCWrapperEapiValidConfigTrunkTestCase(testlib_api.SqlTestCase): """Test cases to test plug trunk port into network.""" def setUp(self): super(RPCWrapperEapiValidConfigTrunkTestCase, self).setUp() setup_valid_config() ndb = mock.MagicMock() self.drv = arista_eapi.AristaRPCWrapperEapi(ndb) self.drv._server_ip = "10.11.12.13" self.region = 'RegionOne' arista_eapi.db_lib = mock.MagicMock() @patch(EAPI_SEND_FUNC) def test_plug_host_into_network(self, mock_send_eapi_req): tenant_id = 'ten-1' network_id = 'net-id-1' vm_id = 'vm-1' port_id = 111 host = 'host' port_name = '111-port' sub_segment_id = 'sub_segment_id_1' sub_segmentation_id = 1002 sub_network_id = 'subnet-id' subport_id = 222 segment_id = 'segment_id_1' segments = [{'network_type': 'vlan', 'physical_network': 'default', 'segmentation_id': 1234, 'id': segment_id}] binding_level = FakePortBindingLevel(subport_id, 0, 'vendor-1', sub_segment_id) subport_segments = [binding_level] trunk_details = {'sub_ports': [{'mac_address': 'mac_address', 'port_id': subport_id, 'segmentation_id': sub_segmentation_id, 'segmentation_type': 'vlan'}], 'trunk_id': 'trunk_id'} self.drv._ndb.get_network_id_from_port_id.return_value = sub_network_id arista_eapi.db_lib.get_port_binding_level.return_value = \ subport_segments self.drv.plug_host_into_network(vm_id, host, port_id, network_id, tenant_id, segments, port_name, trunk_details=trunk_details) cmd1 = ['show openstack agent uuid'] cmd2 = ['enable', 'configure', 'cvx', 'service openstack', 'region RegionOne', 'tenant ten-1', 'vm id vm-1 hostid host', 'port id 111 name "111-port" network-id net-id-1', ] for level, segment in enumerate(segments): cmd2.append('segment level %s id %s' % (level, segment['id'])) cmd2.append('port id 222 network-id subnet-id') for segment in subport_segments: cmd2.append('segment level %s id %s' % (segment.level, segment.segment_id)) self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) @patch(EAPI_SEND_FUNC) def test_plug_baremetal_into_network(self, mock_send_eapi_req): tenant_id = 'ten-2' network_id = 'net-id-1' bm_id = 'bm-1' port_id = 'p1' host = 'host' port_name = 'name_p1' device_owner = 'compute:None' subport_id = 222 sub_segment_id = 'sub_segment_id_1' segments = [{'segmentation_id': 1001, 'id': 'segment_id_1', 'network_type': 'vlan', 'is_dynamic': False}] subport_net_id = 'net-id-2' binding_level = FakePortBindingLevel(subport_id, 0, 'vendor-1', sub_segment_id) subport_segments = [binding_level] trunk_details = {'sub_ports': [{'mac_address': 'mac_address', 'port_id': 'p2', 'segmentation_id': 1002, 'segmentation_type': 'vlan'}], 'trunk_id': 'trunk_id'} switch_bindings = {'local_link_information': [ {'port_id': 'Eth1', 'switch_id': 'switch-id-1', 'switch_info': 'switch-1'}]} bindings = switch_bindings['local_link_information'] self.drv._ndb.get_network_id_from_port_id.return_value = subport_net_id arista_eapi.db_lib.get_port_binding_level.return_value = \ subport_segments self.drv.bm_and_dvr_supported = mock.MagicMock(return_value=True) self.drv.plug_baremetal_into_network(bm_id, host, port_id, network_id, tenant_id, segments, port_name, device_owner, None, None, 'baremetal', bindings, trunk_details) cmd1 = ['show openstack agent uuid'] cmd2 = ['enable', 'configure', 'cvx', 'service openstack', 'region RegionOne', 'tenant ten-2', 'instance id bm-1 hostid host type baremetal', 'port id p1 name "name_p1" network-id net-id-1 ' 'type native switch-id switch-id-1 switchport Eth1', ] for level, segment in enumerate(segments): cmd2.append('segment level %s id %s' % (level, segment['id'])) cmd2.append('port id p2 network-id net-id-2 ' 'type allowed switch-id switch-id-1 switchport Eth1', ) for segment in subport_segments: cmd2.append('segment level %s id %s' % (segment.level, segment.segment_id)) cmd2.append('exit') self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) @patch(EAPI_SEND_FUNC) def test_unplug_host_from_network(self, mock_send_eapi_req): tenant_id = 'ten-2' network_id = 'net-id-1' vm_id = 'vm-2' port_id = 111 host = 'host' subport_id = 222 trunk_details = {'sub_ports': [{'mac_address': 'mac_address', 'port_id': subport_id, 'segmentation_id': 123, 'segmentation_type': 'vlan'}], 'trunk_id': 'trunk_id'} self.drv.unplug_host_from_network(vm_id, host, port_id, network_id, tenant_id, trunk_details) cmd1 = ['show openstack agent uuid'] cmd2 = ['enable', 'configure', 'cvx', 'service openstack', 'region RegionOne', 'tenant ten-2', 'vm id vm-2 hostid host', 'no port id 222', 'no port id 111', ] self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) @patch(EAPI_SEND_FUNC) def test_unplug_baremetal_from_network(self, mock_send_eapi_req): tenant_id = 'ten-2' network_id = 'net-id-1' bm_id = 'bm-2' port_id = 111 host = 'host' subport_id = 222 trunk_details = {'sub_ports': [{'mac_address': 'mac_address', 'port_id': subport_id, 'segmentation_id': 123, 'segmentation_type': 'vlan'}], 'trunk_id': 'trunk_id'} switch_bindings = {'local_link_information': [ {'port_id': 'Eth1', 'switch_id': 'switch-id-1', 'switch_info': 'switch-1'}]} bindings = switch_bindings['local_link_information'] self.drv.bm_and_dvr_supported = mock.MagicMock(return_value=True) self.drv.unplug_baremetal_from_network(bm_id, host, port_id, network_id, tenant_id, None, 'baremetal', bindings, trunk_details) cmd1 = ['show openstack agent uuid'] cmd2 = ['enable', 'configure', 'cvx', 'service openstack', 'region RegionOne', 'tenant ten-2', 'instance id bm-2 hostid host type baremetal', 'no port id 222', 'no port id 111', ] self._verify_send_eapi_request_calls(mock_send_eapi_req, [cmd1, cmd2]) def _verify_send_eapi_request_calls(self, mock_send_eapi_req, cmds, commands_to_log=None): calls = [] calls.extend( mock.call(cmds=cmd, commands_to_log=log_cmd) for cmd, log_cmd in six.moves.zip(cmds, commands_to_log or cmds)) mock_send_eapi_req.assert_has_calls(calls) networking-arista-2017.2.2/networking_arista/tests/unit/ml2/rpc/test_arista_json_rpc_wrapper.py000066400000000000000000001555701323242307100331110ustar00rootroot00000000000000# Copyright (c) 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import functools import operator import requests import socket import mock from mock import patch from neutron_lib import constants as n_const from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from oslo_config import cfg from oslo_utils import importutils from neutron.tests.unit import testlib_api from networking_arista.common import db_lib from networking_arista.ml2.rpc import arista_json import networking_arista.tests.unit.ml2.utils as utils BASE_RPC = "networking_arista.ml2.rpc.arista_json.AristaRPCWrapperJSON." JSON_SEND_FUNC = BASE_RPC + "_send_api_request" RAND_FUNC = BASE_RPC + "_get_random_name" DB_LIB_MODULE = 'networking_arista.ml2.rpc.arista_json.db_lib' def setup_valid_config(): utils.setup_arista_wrapper_config(cfg) class _UnorderedDictList(list): def __init__(self, iterable='', sort_key=None): super(_UnorderedDictList, self).__init__(iterable) try: (self[0] or {})[sort_key] self.sort_key = sort_key except (IndexError, KeyError): self.sort_key = None def __eq__(self, other): if isinstance(other, list) and self.sort_key: key = operator.itemgetter(self.sort_key) return sorted(self, key=key) == sorted(other, key=key) else: return super(_UnorderedDictList, self).__eq__(other) class TestAristaJSONRPCWrapper(testlib_api.SqlTestCase): def setUp(self): super(TestAristaJSONRPCWrapper, self).setUp() plugin_klass = importutils.import_class( "neutron.db.db_base_plugin_v2.NeutronDbPluginV2") directory.add_plugin(plugin_constants.CORE, plugin_klass()) setup_valid_config() ndb = db_lib.NeutronNets() self.drv = arista_json.AristaRPCWrapperJSON(ndb) self.drv._server_ip = "10.11.12.13" self.region = 'RegionOne' def _verify_send_api_request_call(self, mock_send_api_req, calls, unordered_dict_list=False): if unordered_dict_list: wrapper = functools.partial(_UnorderedDictList, sort_key='id') else: wrapper = lambda x: x expected_calls = [ mock.call(c[0], c[1], *(wrapper(d) for d in c[2:])) for c in calls ] mock_send_api_req.assert_has_calls(expected_calls, any_order=True) @patch(JSON_SEND_FUNC) def test_register_with_eos(self, mock_send_api_req): self.drv.register_with_eos() calls = [ ('region/RegionOne', 'PUT', [{'name': 'RegionOne', 'syncInterval': 10}]) ] self._verify_send_api_request_call(mock_send_api_req, calls) def _get_random_name(self): return 'thisWillBeRandomInProd' @patch(JSON_SEND_FUNC) @patch(RAND_FUNC, _get_random_name) def test_sync_start(self, mock_send_api_req): mock_send_api_req.side_effect = [ [{'name': 'RegionOne', 'syncStatus': ''}], [{}], [{'syncStatus': 'syncInProgress', 'requestId': self._get_random_name()}] ] assert self.drv.sync_start() calls = [ ('region/RegionOne/sync', 'POST', {'requester': socket.gethostname().split('.')[0], 'requestId': self._get_random_name()}) ] self._verify_send_api_request_call(mock_send_api_req, calls) @patch('requests.Response') def test_sync_start_exception(self, mock_response): mock_response.ok.return_value = False self.assertFalse(self.drv.sync_start()) @patch(JSON_SEND_FUNC) def test_sync_start_no_region(self, mock_send_api_req): mock_send_api_req.return_value = {} self.assertFalse(self.drv.sync_start()) calls = [ ('region/RegionOne', 'GET'), ('region/', 'POST', [{'name': 'RegionOne'}]) ] self._verify_send_api_request_call(mock_send_api_req, calls) @patch(JSON_SEND_FUNC) @patch(RAND_FUNC, _get_random_name) def test_sync_end(self, mock_send_api_req): mock_send_api_req.return_value = [{'requester': self._get_random_name()}] self.drv.current_sync_name = self._get_random_name() self.assertTrue(self.drv.sync_end()) calls = [ ('region/RegionOne/sync', 'DELETE') ] self._verify_send_api_request_call(mock_send_api_req, calls) @patch(JSON_SEND_FUNC) def test_create_region(self, mock_send_api_req): self.drv.create_region('foo') calls = [('region/', 'POST', [{'name': 'foo'}])] self._verify_send_api_request_call(mock_send_api_req, calls) @patch('requests.Response') def test_get_region_exception(self, mock_response): mock_response.ok.return_value = False self.assertIsNone(self.drv.get_region('foo')) @patch(JSON_SEND_FUNC) def test_delete_region(self, mock_send_api_req): self.drv.delete_region('foo') calls = [('region/', 'DELETE', [{'name': 'foo'}])] self._verify_send_api_request_call(mock_send_api_req, calls) @patch('requests.Response') def test_get_region__updated_exception(self, mock_response): mock_response.ok.return_value = False self.assertEqual(self.drv.get_region_updated_time(), {'regionTimestamp': ''}) @patch(JSON_SEND_FUNC) def test_get_tenants(self, mock_send_api_req): self.drv.get_tenants() calls = [('region/RegionOne/tenant', 'GET')] self._verify_send_api_request_call(mock_send_api_req, calls) @patch(JSON_SEND_FUNC) def test_delete_tenant_bulk(self, mock_send_api_req): self.drv.delete_tenant_bulk(['t1', 't2']) calls = [('region/RegionOne/tenant', 'DELETE', [{'id': 't1'}, {'id': 't2'}])] self._verify_send_api_request_call(mock_send_api_req, calls) def _createNetworkData(self, tenant_id, network_id, shared=False, seg_id=100, network_type='vlan'): return { 'network_id': network_id, 'tenantId': tenant_id, 'shared': shared, 'segments': [{'segmentation_id': seg_id, 'physical_network': 'default', 'id': 'segment_id_1', 'is_dynamic': False, 'network_type': network_type}], } @patch(JSON_SEND_FUNC) def test_create_network_bulk(self, mock_send_api_req): n = [] n.append(self._createNetworkData('t1', 'net1', seg_id=100)) n.append(self._createNetworkData('t1', 'net2', seg_id=200)) n.append(self._createNetworkData('t1', 'net3', network_type='flat')) self.drv.create_network_bulk('t1', n) calls = [ ('region/RegionOne/network', 'POST', [{'id': 'net1', 'tenantId': 't1', 'shared': False}, {'id': 'net2', 'tenantId': 't1', 'shared': False}, {'id': 'net3', 'tenantId': 't1', 'shared': False}]), ('region/RegionOne/segment', 'POST', [{'id': 'segment_id_1', 'networkId': 'net1', 'type': 'vlan', 'segmentationId': 100, 'segmentType': 'static'}, {'id': 'segment_id_1', 'networkId': 'net2', 'type': 'vlan', 'segmentationId': 200, 'segmentType': 'static'}]) ] self._verify_send_api_request_call(mock_send_api_req, calls, True) @patch(JSON_SEND_FUNC) def test_delete_network_bulk(self, mock_send_api_req): self.drv.delete_network_bulk('t1', ['net1', 'net2']) calls = [ ('region/RegionOne/network', 'DELETE', [{'id': 'net1', 'tenantId': 't1'}, {'id': 'net2', 'tenantId': 't1'}]) ] self._verify_send_api_request_call(mock_send_api_req, calls, True) @patch(JSON_SEND_FUNC) def test_create_network_segments(self, mock_send_api_req): segments = [{'segmentation_id': 101, 'physical_network': 'default', 'id': 'segment_id_1', 'is_dynamic': False, 'network_type': 'vlan'}, {'segmentation_id': 102, 'physical_network': 'default', 'id': 'segment_id_2', 'is_dynamic': True, 'network_type': 'vlan'}] self.drv.create_network_segments('t1', 'n1', 'net1', segments) calls = [ ('region/RegionOne/segment', 'POST', [{'id': 'segment_id_1', 'networkId': 'n1', 'type': 'vlan', 'segmentationId': 101, 'segmentType': 'static'}, {'id': 'segment_id_2', 'networkId': 'n1', 'type': 'vlan', 'segmentationId': 102, 'segmentType': 'dynamic'}]) ] self._verify_send_api_request_call(mock_send_api_req, calls, True) @patch(JSON_SEND_FUNC) def test_delete_network_segments(self, mock_send_api_req): segments = [{'segmentation_id': 101, 'physical_network': 'default', 'id': 'segment_id_1', 'is_dynamic': False, 'network_type': 'vlan'}, {'segmentation_id': 102, 'physical_network': 'default', 'id': 'segment_id_2', 'is_dynamic': True, 'network_type': 'vlan'}] self.drv.delete_network_segments('t1', segments) calls = [ ('region/RegionOne/segment', 'DELETE', [{'id': 'segment_id_1'}, {'id': 'segment_id_2'}]) ] self._verify_send_api_request_call(mock_send_api_req, calls) @patch(JSON_SEND_FUNC) @patch(DB_LIB_MODULE) def test_create_instance_bulk(self, mock_db_lib, mock_send_api_req): tenant_id = 'ten-3' num_devices = 8 num_ports_per_device = 2 device_count = 0 devices = {} for device_id in range(0, num_devices): dev_id = 'dev-id-%d' % device_id devices[dev_id] = {'vmId': dev_id, 'baremetal_instance': False, 'ports': [] } for port_id in range(0, num_ports_per_device): port_id = 'port-id-%d-%d' % (device_id, port_id) port = { 'device_id': 'dev-id-%d' % device_id, 'hosts': ['host_%d' % (device_count)], 'portId': port_id } devices[dev_id]['ports'].append(port) device_count += 1 device_owners = [n_const.DEVICE_OWNER_DHCP, 'compute', 'baremetal', n_const.DEVICE_OWNER_DVR_INTERFACE] port_list = [] net_count = 0 for device_id in range(0, num_devices): for port_id in range(0, num_ports_per_device): port = { 'portId': 'port-id-%d-%d' % (device_id, port_id), 'device_id': 'dev-id-%d' % device_id, 'device_owner': device_owners[device_id % 4], 'network_id': 'network-id-%d' % net_count, 'name': 'port-%d-%d' % (device_id, port_id), 'tenant_id': tenant_id, 'segments': [{ 'network_id': 'network-id-%d' % net_count, 'segment_type': 'static', 'segmentation_id': (5000 + net_count), 'is_dynamic': False, 'network_type': 'vxlan', 'id': 'segment-id-%d' % (5000 + net_count)}, {'network_id': 'network-id-%d' % net_count, 'segment_type': 'dynamic', 'segmentation_id': (500 + net_count), 'is_dynamic': True, 'network_type': 'vlan', 'id': 'segment-id-%d' % (500 + net_count)}], } port_list.append(port) net_count += 1 create_ports = {} for port in port_list: create_ports.update(utils.port_dict_representation(port)) port_network_segments = {} for port in port_list: port_network_segments[port['portId']] = port['segments'] profiles = {} for port in port_list: profiles[port['portId']] = {'vnic_type': 'normal'} if port['device_owner'] == 'baremetal': profiles[port['portId']] = { 'vnic_type': 'baremetal', 'profile': '{"local_link_information":' '[{"switch_id": "switch01", "port_id": "Ethernet1"}]}'} mock_db_lib.get_network_segments_by_port_id.side_effect = ( port_network_segments.get) self.drv.create_instance_bulk(tenant_id, create_ports, devices, profiles) calls = [ ('region/RegionOne/tenant?tenantId=ten-3', 'GET'), ('region/RegionOne/dhcp?tenantId=ten-3', 'POST', [{'id': 'dev-id-0', 'hostId': 'host_0'}, {'id': 'dev-id-4', 'hostId': 'host_4'}]), ('region/RegionOne/vm?tenantId=ten-3', 'POST', [{'id': 'dev-id-1', 'hostId': 'host_1'}, {'id': 'dev-id-5', 'hostId': 'host_5'}]), ('region/RegionOne/baremetal?tenantId=ten-3', 'POST', [{'id': 'dev-id-2', 'hostId': 'host_2'}, {'id': 'dev-id-6', 'hostId': 'host_6'}]), ('region/RegionOne/router?tenantId=ten-3', 'POST', [{'id': 'dev-id-3', 'hostId': 'host_3'}, {'id': 'dev-id-7', 'hostId': 'host_7'}]), ('region/RegionOne/port', 'POST', [{'networkId': 'network-id-0', 'id': 'port-id-0-0', 'tenantId': 'ten-3', 'instanceId': 'dev-id-0', 'name': 'port-0-0', 'hosts': ['host_0'], 'instanceType': 'dhcp', 'vlanType': 'allowed'}, {'networkId': 'network-id-1', 'id': 'port-id-0-1', 'tenantId': 'ten-3', 'instanceId': 'dev-id-0', 'name': 'port-0-1', 'hosts': ['host_0'], 'instanceType': 'dhcp', 'vlanType': 'allowed'}, {'networkId': 'network-id-2', 'id': 'port-id-1-0', 'tenantId': 'ten-3', 'instanceId': 'dev-id-1', 'name': 'port-1-0', 'hosts': ['host_1'], 'instanceType': 'vm', 'vlanType': 'allowed'}, {'networkId': 'network-id-3', 'id': 'port-id-1-1', 'tenantId': 'ten-3', 'instanceId': 'dev-id-1', 'name': 'port-1-1', 'hosts': ['host_1'], 'instanceType': 'vm', 'vlanType': 'allowed'}, {'networkId': 'network-id-4', 'id': 'port-id-2-0', 'tenantId': 'ten-3', 'instanceId': 'dev-id-2', 'name': 'port-2-0', 'hosts': ['host_2'], 'instanceType': 'baremetal', 'vlanType': 'native'}, {'networkId': 'network-id-5', 'id': 'port-id-2-1', 'tenantId': 'ten-3', 'instanceId': 'dev-id-2', 'name': 'port-2-1', 'hosts': ['host_2'], 'instanceType': 'baremetal', 'vlanType': 'native'}, {'networkId': 'network-id-6', 'id': 'port-id-3-0', 'tenantId': 'ten-3', 'instanceId': 'dev-id-3', 'name': 'port-3-0', 'hosts': ['host_3'], 'instanceType': 'router', 'vlanType': 'allowed'}, {'networkId': 'network-id-7', 'id': 'port-id-3-1', 'tenantId': 'ten-3', 'instanceId': 'dev-id-3', 'name': 'port-3-1', 'hosts': ['host_3'], 'instanceType': 'router', 'vlanType': 'allowed'}, {'networkId': 'network-id-8', 'id': 'port-id-4-0', 'tenantId': 'ten-3', 'instanceId': 'dev-id-4', 'name': 'port-4-0', 'hosts': ['host_4'], 'instanceType': 'dhcp', 'vlanType': 'allowed'}, {'networkId': 'network-id-9', 'id': 'port-id-4-1', 'tenantId': 'ten-3', 'instanceId': 'dev-id-4', 'name': 'port-4-1', 'hosts': ['host_4'], 'instanceType': 'dhcp', 'vlanType': 'allowed'}, {'networkId': 'network-id-10', 'id': 'port-id-5-0', 'tenantId': 'ten-3', 'instanceId': 'dev-id-5', 'name': 'port-5-0', 'hosts': ['host_5'], 'instanceType': 'vm', 'vlanType': 'allowed'}, {'networkId': 'network-id-11', 'id': 'port-id-5-1', 'tenantId': 'ten-3', 'instanceId': 'dev-id-5', 'name': 'port-5-1', 'hosts': ['host_5'], 'instanceType': 'vm', 'vlanType': 'allowed'}, {'networkId': 'network-id-12', 'id': 'port-id-6-0', 'tenantId': 'ten-3', 'instanceId': 'dev-id-6', 'name': 'port-6-0', 'hosts': ['host_6'], 'instanceType': 'baremetal', 'vlanType': 'native'}, {'networkId': 'network-id-13', 'id': 'port-id-6-1', 'tenantId': 'ten-3', 'instanceId': 'dev-id-6', 'name': 'port-6-1', 'hosts': ['host_6'], 'instanceType': 'baremetal', 'vlanType': 'native'}, {'networkId': 'network-id-14', 'id': 'port-id-7-0', 'tenantId': 'ten-3', 'instanceId': 'dev-id-7', 'name': 'port-7-0', 'hosts': ['host_7'], 'instanceType': 'router', 'vlanType': 'allowed'}, {'networkId': 'network-id-15', 'id': 'port-id-7-1', 'tenantId': 'ten-3', 'instanceId': 'dev-id-7', 'name': 'port-7-1', 'hosts': ['host_7'], 'instanceType': 'router', 'vlanType': 'allowed'}]), ('region/RegionOne/port/port-id-0-0/binding', 'POST', [{'portId': 'port-id-0-0', 'hostBinding': [{ 'segment': [{'networkId': 'network-id-0', 'segment_type': 'static', 'segmentationId': 5000, 'type': 'vxlan', 'id': 'segment-id-5000'}, {'networkId': 'network-id-0', 'segment_type': 'dynamic', 'segmentationId': 500, 'type': 'vlan', 'id': 'segment-id-500'}], 'host': 'host_0'}]}]), ('region/RegionOne/port/port-id-0-1/binding', 'POST', [{'portId': 'port-id-0-1', 'hostBinding': [{ 'segment': [{'networkId': 'network-id-1', 'segment_type': 'static', 'segmentationId': 5001, 'type': 'vxlan', 'id': 'segment-id-5001'}, {'networkId': 'network-id-1', 'segment_type': 'dynamic', 'segmentationId': 501, 'type': 'vlan', 'id': 'segment-id-501'}], 'host': 'host_0'}]}]), ('region/RegionOne/port/port-id-1-0/binding', 'POST', [{'portId': 'port-id-1-0', 'hostBinding': [{ 'segment': [{'networkId': 'network-id-2', 'segment_type': 'static', 'segmentationId': 5002, 'type': 'vxlan', 'id': 'segment-id-5002'}, {'networkId': 'network-id-2', 'segment_type': 'dynamic', 'segmentationId': 502, 'type': 'vlan', 'id': 'segment-id-502'}], 'host': 'host_1'}]}]), ('region/RegionOne/port/port-id-1-1/binding', 'POST', [{'portId': 'port-id-1-1', 'hostBinding': [{ 'segment': [{'networkId': 'network-id-3', 'segment_type': 'static', 'segmentationId': 5003, 'type': 'vxlan', 'id': 'segment-id-5003'}, {'networkId': 'network-id-3', 'segment_type': 'dynamic', 'segmentationId': 503, 'type': 'vlan', 'id': 'segment-id-503'}], 'host': 'host_1'}]}]), ('region/RegionOne/port/port-id-2-0/binding', 'POST', [{'portId': 'port-id-2-0', 'switchBinding': [{ 'interface': u'Ethernet1', 'host': 'host_2', 'segment': [{'networkId': 'network-id-4', 'segment_type': 'static', 'segmentationId': 5004, 'type': 'vxlan', 'id': 'segment-id-5004'}, {'networkId': 'network-id-4', 'segment_type': 'dynamic', 'segmentationId': 504, 'type': 'vlan', 'id': 'segment-id-504'}], 'switch': u'switch01'}]}]), ('region/RegionOne/port/port-id-2-1/binding', 'POST', [{'portId': 'port-id-2-1', 'switchBinding': [ {'interface': u'Ethernet1', 'host': 'host_2', 'segment': [{'networkId': 'network-id-5', 'segment_type': 'static', 'segmentationId': 5005, 'type': 'vxlan', 'id': 'segment-id-5005'}, {'networkId': 'network-id-5', 'segment_type': 'dynamic', 'segmentationId': 505, 'type': 'vlan', 'id': 'segment-id-505'}], 'switch': u'switch01'}]}]), ('region/RegionOne/port/port-id-3-0/binding', 'POST', [{'portId': 'port-id-3-0', 'hostBinding': [{ 'segment': [{'networkId': 'network-id-6', 'segment_type': 'static', 'segmentationId': 5006, 'type': 'vxlan', 'id': 'segment-id-5006'}, {'networkId': 'network-id-6', 'segment_type': 'dynamic', 'segmentationId': 506, 'type': 'vlan', 'id': 'segment-id-506'}], 'host': 'host_3'}]}]), ('region/RegionOne/port/port-id-3-1/binding', 'POST', [{'portId': 'port-id-3-1', 'hostBinding': [{ 'segment': [{'networkId': 'network-id-7', 'segment_type': 'static', 'segmentationId': 5007, 'type': 'vxlan', 'id': 'segment-id-5007'}, {'networkId': 'network-id-7', 'segment_type': 'dynamic', 'segmentationId': 507, 'type': 'vlan', 'id': 'segment-id-507'}], 'host': 'host_3'}]}]), ('region/RegionOne/port/port-id-4-0/binding', 'POST', [{'portId': 'port-id-4-0', 'hostBinding': [{ 'segment': [{'networkId': 'network-id-8', 'segment_type': 'static', 'segmentationId': 5008, 'type': 'vxlan', 'id': 'segment-id-5008'}, {'networkId': 'network-id-8', 'segment_type': 'dynamic', 'segmentationId': 508, 'type': 'vlan', 'id': 'segment-id-508'}], 'host': 'host_4'}]}]), ('region/RegionOne/port/port-id-4-1/binding', 'POST', [{'portId': 'port-id-4-1', 'hostBinding': [{ 'segment': [{'networkId': 'network-id-9', 'segment_type': 'static', 'segmentationId': 5009, 'type': 'vxlan', 'id': 'segment-id-5009'}, {'networkId': 'network-id-9', 'segment_type': 'dynamic', 'segmentationId': 509, 'type': 'vlan', 'id': 'segment-id-509'}], 'host': 'host_4'}]}]), ('region/RegionOne/port/port-id-5-0/binding', 'POST', [{'portId': 'port-id-5-0', 'hostBinding': [{ 'segment': [{'networkId': 'network-id-10', 'segment_type': 'static', 'segmentationId': 5010, 'type': 'vxlan', 'id': 'segment-id-5010'}, {'networkId': 'network-id-10', 'segment_type': 'dynamic', 'segmentationId': 510, 'type': 'vlan', 'id': 'segment-id-510'}], 'host': 'host_5'}]}]), ('region/RegionOne/port/port-id-5-1/binding', 'POST', [{'portId': 'port-id-5-1', 'hostBinding': [{ 'segment': [{'networkId': 'network-id-11', 'segment_type': 'static', 'segmentationId': 5011, 'type': 'vxlan', 'id': 'segment-id-5011'}, {'networkId': 'network-id-11', 'segment_type': 'dynamic', 'segmentationId': 511, 'type': 'vlan', 'id': 'segment-id-511'}], 'host': 'host_5'}]}]), ('region/RegionOne/port/port-id-6-0/binding', 'POST', [{'portId': 'port-id-6-0', 'switchBinding': [{ 'interface': u'Ethernet1', 'host': 'host_6', 'segment': [{'networkId': 'network-id-12', 'segment_type': 'static', 'segmentationId': 5012, 'type': 'vxlan', 'id': 'segment-id-5012'}, {'networkId': 'network-id-12', 'segment_type': 'dynamic', 'segmentationId': 512, 'type': 'vlan', 'id': 'segment-id-512'}], 'switch': u'switch01'}]}]), ('region/RegionOne/port/port-id-6-1/binding', 'POST', [{'portId': 'port-id-6-1', 'switchBinding': [{ 'interface': u'Ethernet1', 'host': 'host_6', 'segment': [{'networkId': 'network-id-13', 'segment_type': 'static', 'segmentationId': 5013, 'type': 'vxlan', 'id': 'segment-id-5013'}, {'networkId': 'network-id-13', 'segment_type': 'dynamic', 'segmentationId': 513, 'type': 'vlan', 'id': 'segment-id-513'}], 'switch': u'switch01'}]}]), ('region/RegionOne/port/port-id-7-0/binding', 'POST', [{'portId': 'port-id-7-0', 'hostBinding': [{ 'segment': [{'networkId': 'network-id-14', 'segment_type': 'static', 'segmentationId': 5014, 'type': 'vxlan', 'id': 'segment-id-5014'}, {'networkId': 'network-id-14', 'segment_type': 'dynamic', 'segmentationId': 514, 'type': 'vlan', 'id': 'segment-id-514'}], 'host': 'host_7'}]}]), ('region/RegionOne/port/port-id-7-1/binding', 'POST', [{'portId': 'port-id-7-1', 'hostBinding': [{ 'segment': [{'networkId': 'network-id-15', 'segment_type': 'static', 'segmentationId': 5015, 'type': 'vxlan', 'id': 'segment-id-5015'}, {'networkId': 'network-id-15', 'segment_type': 'dynamic', 'segmentationId': 515, 'type': 'vlan', 'id': 'segment-id-515'}], 'host': 'host_7'}]}]), ] self._verify_send_api_request_call(mock_send_api_req, calls, True) @patch(JSON_SEND_FUNC) def test_delete_vm_bulk(self, mock_send_api_req): self.drv.delete_vm_bulk('t1', ['vm1', 'vm2']) calls = [ ('region/RegionOne/vm', 'DELETE', [{'id': 'vm1'}, {'id': 'vm2'}]) ] self._verify_send_api_request_call(mock_send_api_req, calls) @patch(JSON_SEND_FUNC) def test_delete_dhcp_bulk(self, mock_send_api_req): self.drv.delete_dhcp_bulk('t1', ['dhcp1', 'dhcp2']) calls = [ ('region/RegionOne/dhcp', 'DELETE', [{'id': 'dhcp1'}, {'id': 'dhcp2'}]) ] self._verify_send_api_request_call(mock_send_api_req, calls) @patch(JSON_SEND_FUNC) def test_delete_port(self, mock_send_api_req): self.drv.delete_port('p1', 'inst1', 'vm') self.drv.delete_port('p2', 'inst2', 'dhcp') calls = [ ('region/RegionOne/port?portId=p1&id=inst1&type=vm', 'DELETE', [{'hosts': [], 'id': 'p1', 'tenantId': None, 'networkId': None, 'instanceId': 'inst1', 'name': None, 'instanceType': 'vm', 'vlanType': 'allowed'}]), ('region/RegionOne/port?portId=p2&id=inst2&type=dhcp', 'DELETE', [{'hosts': [], 'id': 'p2', 'tenantId': None, 'networkId': None, 'instanceId': 'inst2', 'name': None, 'instanceType': 'dhcp', 'vlanType': 'allowed'}]) ] self._verify_send_api_request_call(mock_send_api_req, calls) @patch(JSON_SEND_FUNC) def test_get_port(self, mock_send_api_req): self.drv.get_instance_ports('inst1', 'vm') calls = [ ('region/RegionOne/port?id=inst1&type=vm', 'GET') ] self._verify_send_api_request_call(mock_send_api_req, calls) @patch(JSON_SEND_FUNC) def test_plug_virtual_port_into_network(self, mock_send_api_req): segments = [{'segmentation_id': 101, 'id': 'segment_id_1', 'network_type': 'vlan', 'is_dynamic': False}] self.drv.plug_port_into_network('vm1', 'h1', 'p1', 'n1', 't1', 'port1', 'compute', None, None, None, segments) calls = [ ('region/RegionOne/vm?tenantId=t1', 'POST', [{'id': 'vm1', 'hostId': 'h1'}]), ('region/RegionOne/port', 'POST', [{'id': 'p1', 'hosts': ['h1'], 'tenantId': 't1', 'networkId': 'n1', 'instanceId': 'vm1', 'name': 'port1', 'instanceType': 'vm', 'vlanType': 'allowed'}]), ('region/RegionOne/port/p1/binding', 'POST', [{'portId': 'p1', 'hostBinding': [{'host': 'h1', 'segment': [{ 'id': 'segment_id_1', 'type': 'vlan', 'segmentationId': 101, 'networkId': 'n1', 'segment_type': 'static'}]}]}]), ] self._verify_send_api_request_call(mock_send_api_req, calls) @patch(JSON_SEND_FUNC) @patch('networking_arista.ml2.rpc.arista_json.AristaRPCWrapperJSON.' 'get_instance_ports') def test_unplug_virtual_port_from_network(self, mock_get_instance_ports, mock_send_api_req): mock_get_instance_ports.return_value = [] self.drv.unplug_port_from_network('vm1', 'compute', 'h1', 'p1', 'n1', 't1', None, None) port = self.drv._create_port_data('p1', None, None, 'vm1', None, 'vm', None) calls = [ ('region/RegionOne/port/p1/binding', 'DELETE', [{'portId': 'p1', 'hostBinding': [{'host': 'h1'}]}]), ('region/RegionOne/port?portId=p1&id=vm1&type=vm', 'DELETE', [port]), ('region/RegionOne/vm', 'DELETE', [{'id': 'vm1'}]) ] self._verify_send_api_request_call(mock_send_api_req, calls) @patch(JSON_SEND_FUNC) def test_plug_baremetal_port_into_network(self, mock_send_api_req): segments = [{'segmentation_id': 101, 'id': 'segment_id_1', 'network_type': 'vlan', 'is_dynamic': False}] sg = {'id': 'security-group-1'} switch_bindings = [{'switch_id': 'switch01', 'port_id': 'Ethernet1', 'switch_info': 'switch01'}] self.drv.plug_port_into_network('bm1', 'h1', 'p1', 'n1', 't1', 'port1', 'baremetal', sg, None, 'baremetal', segments, switch_bindings=switch_bindings) calls = [ ('region/RegionOne/baremetal?tenantId=t1', 'POST', [{'id': 'bm1', 'hostId': 'h1'}]), ('region/RegionOne/port', 'POST', [{'id': 'p1', 'hosts': ['h1'], 'tenantId': 't1', 'networkId': 'n1', 'instanceId': 'bm1', 'name': 'port1', 'instanceType': 'baremetal', 'vlanType': 'native'}]), ('region/RegionOne/port/p1/binding', 'POST', [{'portId': 'p1', 'switchBinding': [{'host': 'h1', 'switch': 'switch01', 'interface': 'Ethernet1', 'segment': [{ 'id': 'segment_id_1', 'type': 'vlan', 'segmentationId': 101, 'networkId': 'n1', 'segment_type': 'static'}]}]}]), ] self._verify_send_api_request_call(mock_send_api_req, calls) @patch(JSON_SEND_FUNC) @patch('networking_arista.ml2.rpc.arista_json.AristaRPCWrapperJSON.' 'get_instance_ports') def test_unplug_baremetal_port_from_network(self, mock_get_instance_ports, mock_send_api_req): mock_get_instance_ports.return_value = [] switch_bindings = [{'switch_id': 'switch01', 'port_id': 'Ethernet1'}] self.drv.unplug_port_from_network('bm1', 'baremetal', 'h1', 'p1', 'n1', 't1', None, 'baremetal', switch_bindings) port = self.drv._create_port_data('p1', None, None, 'bm1', None, 'baremetal', None) calls = [ ('region/RegionOne/port/p1/binding', 'DELETE', [{'portId': 'p1', 'switchBinding': [{'host': 'h1', 'switch': 'switch01', 'segment': [], 'interface': 'Ethernet1'}]}]), ('region/RegionOne/port?portId=p1&id=bm1&type=baremetal', 'DELETE', [port]), ('region/RegionOne/baremetal', 'DELETE', [{'id': 'bm1'}]) ] self._verify_send_api_request_call(mock_send_api_req, calls) @patch(JSON_SEND_FUNC) def test_plug_dhcp_port_into_network(self, mock_send_api_req): segments = [{'segmentation_id': 101, 'id': 'segment_id_1', 'network_type': 'vlan', 'is_dynamic': False}] self.drv.plug_port_into_network('vm1', 'h1', 'p1', 'n1', 't1', 'port1', n_const.DEVICE_OWNER_DHCP, None, None, None, segments) calls = [ ('region/RegionOne/dhcp?tenantId=t1', 'POST', [{'id': 'vm1', 'hostId': 'h1'}]), ('region/RegionOne/port', 'POST', [{'id': 'p1', 'hosts': ['h1'], 'tenantId': 't1', 'networkId': 'n1', 'instanceId': 'vm1', 'name': 'port1', 'instanceType': 'dhcp', 'vlanType': 'allowed'}]) ] self._verify_send_api_request_call(mock_send_api_req, calls) @patch(JSON_SEND_FUNC) @patch('networking_arista.ml2.rpc.arista_json.AristaRPCWrapperJSON.' 'get_instance_ports') def test_unplug_dhcp_port_from_network(self, mock_get_instance_ports, mock_send_api_req): mock_get_instance_ports.return_value = [] self.drv.unplug_port_from_network('dhcp1', n_const.DEVICE_OWNER_DHCP, 'h1', 'p1', 'n1', 't1', None, None) calls = [ ('region/RegionOne/port?portId=p1&id=dhcp1&type=dhcp', 'DELETE', [{'id': 'p1', 'hosts': [], 'tenantId': None, 'networkId': None, 'instanceId': 'dhcp1', 'name': None, 'instanceType': 'dhcp', 'vlanType': 'allowed'}]), ('region/RegionOne/dhcp', 'DELETE', [{'id': 'dhcp1'}]) ] self._verify_send_api_request_call(mock_send_api_req, calls) @patch(JSON_SEND_FUNC) def test_plug_router_port_into_network(self, mock_send_api_req): segments = [{'segmentation_id': 101, 'id': 'segment_id_1', 'network_type': 'vlan', 'is_dynamic': False}] self.drv.plug_port_into_network('router1', 'h1', 'p1', 'n1', 't1', 'port1', n_const.DEVICE_OWNER_DVR_INTERFACE, None, None, None, segments) calls = [ ('region/RegionOne/router?tenantId=t1', 'POST', [{'id': 'router1', 'hostId': 'h1'}]), ('region/RegionOne/port', 'POST', [{'id': 'p1', 'hosts': ['h1'], 'tenantId': 't1', 'networkId': 'n1', 'instanceId': 'router1', 'name': 'port1', 'instanceType': 'router', 'vlanType': 'allowed'}]) ] self._verify_send_api_request_call(mock_send_api_req, calls) @patch(JSON_SEND_FUNC) @patch('networking_arista.ml2.rpc.arista_json.AristaRPCWrapperJSON.' 'get_instance_ports') def test_unplug_router_port_from_network(self, mock_get_instance_ports, mock_send_api_req): mock_get_instance_ports.return_value = [] self.drv.unplug_port_from_network('router1', n_const.DEVICE_OWNER_DVR_INTERFACE, 'h1', 'p1', 'n1', 't1', None, None) calls = [ ('region/RegionOne/port?portId=p1&id=router1&type=router', 'DELETE', [{'id': 'p1', 'hosts': [], 'tenantId': None, 'networkId': None, 'instanceId': 'router1', 'name': None, 'instanceType': 'router', 'vlanType': 'allowed'}]), ('region/RegionOne/router', 'DELETE', [{'id': 'router1'}]) ] self._verify_send_api_request_call(mock_send_api_req, calls) @patch('requests.get') @patch(BASE_RPC + '_get_eos_master') def test_get_value_error(self, mock_get_eos_master, mock_requests_get): mock_get_eos_master.return_value = 'fake_master' mock_requests_get.return_value = requests.Response() self.assertIsNotNone(self.drv.get_vms_for_tenant('')) self.assertIsNotNone(self.drv.get_dhcps_for_tenant('')) self.assertIsNotNone(self.drv.get_baremetals_for_tenant('')) self.assertIsNotNone(self.drv.get_routers_for_tenant('')) self.assertIsNotNone(self.drv.get_ports_for_tenant('', 'vm')) self.assertIsNotNone(self.drv.get_tenants()) self.assertIsNotNone(self.drv.get_networks('')) self.assertIsNotNone(self.drv.get_instance_ports('', 'vm')) @patch(BASE_RPC + '_get_eos_master') def test_get_exception(self, mock_get_eos_master): mock_get_eos_master.return_value = 'fake_master' self.assertIsNotNone(self.drv.get_vms_for_tenant('')) self.assertIsNotNone(self.drv.get_dhcps_for_tenant('')) self.assertIsNotNone(self.drv.get_baremetals_for_tenant('')) self.assertIsNotNone(self.drv.get_routers_for_tenant('')) self.assertIsNotNone(self.drv.get_ports_for_tenant('', 'vm')) self.assertIsNotNone(self.drv.get_tenants()) self.assertIsNotNone(self.drv.get_networks('')) self.assertIsNotNone(self.drv.get_instance_ports('', 'vm')) class RPCWrapperJSONValidConfigTrunkTestCase(testlib_api.SqlTestCase): """Test cases to test plug trunk port into network. """ def setUp(self): super(RPCWrapperJSONValidConfigTrunkTestCase, self).setUp() setup_valid_config() ndb = mock.MagicMock() self.drv = arista_json.AristaRPCWrapperJSON(ndb) self.drv._server_ip = "10.11.12.13" self.region = 'RegionOne' @patch(JSON_SEND_FUNC) @patch(DB_LIB_MODULE) def test_plug_virtual_trunk_port_into_network(self, mock_db_lib, mock_send_api_req): # vm tenant_id = 'ten-1' network_id = 'net-id-1' vm_id = 'vm-1' port_id = 'p1' host = 'host' port_name = 'name_p1' subport_net_id = 'net-id-2' segments = [{'segmentation_id': 1001, 'id': 'segment_id_1', 'network_type': 'vlan', 'is_dynamic': False}] subport_segments = [{'id': 'sub_segment_id_1', 'segmentation_id': 1002, 'network_type': 'vlan', 'is_dynamic': False}] trunk_details = {'sub_ports': [{'mac_address': 'mac_address', 'port_id': 'p2', 'segmentation_id': 1002, 'segmentation_type': 'vlan'}], 'trunk_id': 'trunk_id'} self.drv._ndb.get_network_id_from_port_id.return_value = subport_net_id mock_db_lib.get_network_segments_by_port_id.return_value = ( subport_segments) self.drv.plug_port_into_network(vm_id, host, port_id, network_id, tenant_id, port_name, 'compute', None, None, None, segments, trunk_details=trunk_details) calls = [ ('region/RegionOne/vm?tenantId=ten-1', 'POST', [{'id': 'vm-1', 'hostId': 'host'}]), ('region/RegionOne/port', 'POST', [{'id': 'p1', 'hosts': ['host'], 'tenantId': 'ten-1', 'networkId': 'net-id-1', 'instanceId': 'vm-1', 'name': 'name_p1', 'instanceType': 'vm', 'vlanType': 'allowed'}]), ('region/RegionOne/port', 'POST', [{'id': 'p2', 'hosts': ['host'], 'tenantId': 'ten-1', 'networkId': 'net-id-2', 'instanceId': 'vm-1', 'name': 'name_p2', 'instanceType': 'vm', 'vlanType': 'allowed'}]), ('region/RegionOne/port/p1/binding', 'POST', [{'portId': 'p1', 'hostBinding': [{'host': 'host', 'segment': [{ 'id': 'segment_id_1', 'type': 'vlan', 'segmentationId': 1001, 'networkId': 'net-id-1', 'segment_type': 'static'}]}]}]), ('region/RegionOne/port/p2/binding', 'POST', [{'portId': 'p2', 'hostBinding': [{'host': 'host', 'segment': [{ 'id': 'sub_segment_id_1', 'type': 'vlan', 'segmentationId': 1002, 'networkId': 'net-id-2', 'segment_type': 'static'}]}]}]), ] self._verify_send_api_request_call(mock_send_api_req, calls) @patch(JSON_SEND_FUNC) @patch(DB_LIB_MODULE) def test_plug_baremetal_trunk_port_into_network(self, mock_db_lib, mock_send_api_req): # baremetal tenant_id = 'ten-2' network_id = 'net-id-1' bm_id = 'bm-1' port_id = 'p1' host = 'host' port_name = 'name_p1' sg = {'id': 'security-group-1'} segments = [{'segmentation_id': 1111, 'id': 'segment_id_1', 'network_type': 'vlan', 'is_dynamic': False}] subport_net_id = 'net-id-2' subport_segments = [{'id': 'sub_segment_id_1', 'segmentation_id': 1112, 'network_type': 'vlan', 'is_dynamic': False}] trunk_details = {'sub_ports': [{'mac_address': 'mac_address', 'port_id': 'p2', 'segmentation_id': 1112, 'segmentation_type': 'vlan'}], 'trunk_id': 'trunk_id'} switch_bindings = {'local_link_information': [ {'port_id': 'Eth1', 'switch_id': 'switch-id-1', 'switch_info': 'switch-1'}]} bindings = switch_bindings['local_link_information'] self.drv._ndb.get_network_id_from_port_id.return_value = subport_net_id mock_db_lib.get_network_segments_by_port_id.return_value = ( subport_segments) self.drv.plug_port_into_network(bm_id, host, port_id, network_id, tenant_id, port_name, 'baremetal', sg, None, 'baremetal', segments, bindings, trunk_details=trunk_details) calls = [ ('region/RegionOne/baremetal?tenantId=ten-2', 'POST', [{'id': 'bm-1', 'hostId': 'host'}]), ('region/RegionOne/port', 'POST', [{'id': 'p1', 'hosts': ['host'], 'tenantId': 'ten-2', 'networkId': 'net-id-1', 'instanceId': 'bm-1', 'name': 'name_p1', 'instanceType': 'baremetal', 'vlanType': 'native'}]), ('region/RegionOne/port', 'POST', [{'id': 'p2', 'hosts': ['host'], 'tenantId': 'ten-2', 'networkId': 'net-id-2', 'instanceId': 'bm-1', 'name': 'name_p2', 'instanceType': 'baremetal', 'vlanType': 'allowed'}]), ('region/RegionOne/port/p1/binding', 'POST', [{'portId': 'p1', 'switchBinding': [ {'host': 'host', 'switch': 'switch-id-1', 'interface': 'Eth1', 'segment': [{'id': 'segment_id_1', 'type': 'vlan', 'segmentationId': 1111, 'networkId': 'net-id-1', 'segment_type': 'static'}]}]}]), ('region/RegionOne/port/p2/binding', 'POST', [{'portId': 'p2', 'switchBinding': [{'host': 'host', 'switch': 'switch-id-1', 'interface': 'Eth1', 'segment': [{'id': 'sub_segment_id_1', 'type': 'vlan', 'segmentationId': 1112, 'networkId': 'net-id-2', 'segment_type': 'static'}]}]}]), ] self._verify_send_api_request_call(mock_send_api_req, calls) @patch(JSON_SEND_FUNC) @patch('networking_arista.ml2.rpc.arista_json.AristaRPCWrapperJSON.' 'get_instance_ports') def test_unplug_virtual_trunk_port_from_network(self, mock_get_instance_ports, mock_send_api_req): # trunk port trunk_details = {'sub_ports': [{'mac_address': 'mac_address', 'port_id': 'subport', 'segmentation_id': 1001, 'segmentation_type': 'vlan'}], 'trunk_id': 'trunk_id'} mock_get_instance_ports.return_value = [] self.drv.unplug_port_from_network('vm1', 'compute', 'h1', 'trunk_port', 'n1', 't1', None, None, trunk_details=trunk_details) subport = self.drv._create_port_data('subport', None, None, 'vm1', None, 'vm', None) trunk_port = self.drv._create_port_data('trunk_port', None, None, 'vm1', None, 'vm', None) calls = [ ('region/RegionOne/port/subport/binding', 'DELETE', [{'portId': 'subport', 'hostBinding': [{'host': 'h1'}]}]), ('region/RegionOne/port?portId=subport&id=vm1&type=vm', 'DELETE', [subport]), ('region/RegionOne/port/trunk_port/binding', 'DELETE', [{'portId': 'trunk_port', 'hostBinding': [{'host': 'h1'}]}]), ('region/RegionOne/port?portId=trunk_port&id=vm1&type=vm', 'DELETE', [trunk_port]), ('region/RegionOne/vm', 'DELETE', [{'id': 'vm1'}]) ] self._verify_send_api_request_call(mock_send_api_req, calls) @patch(JSON_SEND_FUNC) @patch('networking_arista.ml2.rpc.arista_json.AristaRPCWrapperJSON.' 'get_instance_ports') def test_unplug_baremetal_trunk_port_from_network(self, mock_get_instance_ports, mock_send_api_req): # trunk port trunk_details = {'sub_ports': [{'mac_address': 'mac_address', 'port_id': 'subport', 'segmentation_id': 1001, 'segmentation_type': 'vlan'}], 'trunk_id': 'trunk_id'} mock_get_instance_ports.return_value = [] switch_bindings = [{'switch_id': 'switch01', 'port_id': 'Ethernet1'}] self.drv.unplug_port_from_network('bm1', 'baremetal', 'h1', 'p1', 'n1', 't1', None, 'baremetal', switch_bindings, trunk_details) subport = self.drv._create_port_data('subport', None, None, 'bm1', None, 'baremetal', None, 'trunk:subport') trunk_port = self.drv._create_port_data('p1', None, None, 'bm1', None, 'baremetal', None) calls = [ ('region/RegionOne/port/subport/binding', 'DELETE', [{'portId': 'subport', 'switchBinding': [{'host': 'h1', 'switch': 'switch01', 'segment': [], 'interface': 'Ethernet1'}]}]), ('region/RegionOne/port?portId=subport&id=bm1&type=baremetal', 'DELETE', [subport]), ('region/RegionOne/port/p1/binding', 'DELETE', [{'portId': 'p1', 'switchBinding': [{'host': 'h1', 'switch': 'switch01', 'segment': [], 'interface': 'Ethernet1'}]}]), ('region/RegionOne/port?portId=p1&id=bm1&type=baremetal', 'DELETE', [trunk_port]), ('region/RegionOne/baremetal', 'DELETE', [{'id': 'bm1'}]) ] self._verify_send_api_request_call(mock_send_api_req, calls) def _verify_send_api_request_call(self, mock_send_api_req, calls, unordered_dict_list=False): if unordered_dict_list: wrapper = functools.partial(_UnorderedDictList, sort_key='id') else: wrapper = lambda x: x expected_calls = [ mock.call(c[0], c[1], *(wrapper(d) for d in c[2:])) for c in calls ] mock_send_api_req.assert_has_calls(expected_calls, any_order=True) networking-arista-2017.2.2/networking_arista/tests/unit/ml2/test_arista_mechanism_driver.py000066400000000000000000000470341323242307100322620ustar00rootroot00000000000000# Copyright (c) 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron_lib.api.definitions import portbindings from neutron_lib.db import api as db_api from neutron_lib.plugins.ml2 import api as driver_api from oslo_config import cfg from neutron.tests.unit import testlib_api from networking_arista.common import db_lib from networking_arista.ml2 import mechanism_arista import networking_arista.tests.unit.ml2.utils as utils def setup_valid_config(): utils.setup_arista_wrapper_config(cfg) class AristaProvisionedVlansStorageTestCase(testlib_api.SqlTestCase): """Test storing and retriving functionality of Arista mechanism driver. Tests all methods of this class by invoking them separately as well as a group. """ def test_tenant_is_remembered(self): tenant_id = 'test' db_lib.remember_tenant(tenant_id) net_provisioned = db_lib.is_tenant_provisioned(tenant_id) self.assertTrue(net_provisioned, 'Tenant must be provisioned') def test_tenant_is_removed(self): tenant_id = 'test' db_lib.remember_tenant(tenant_id) db_lib.forget_tenant(tenant_id) net_provisioned = db_lib.is_tenant_provisioned(tenant_id) self.assertFalse(net_provisioned, 'The Tenant should be deleted') def test_network_is_remembered(self): tenant_id = 'test' network_id = '123' segmentation_id = 456 segment_id = 'segment_id_%s' % segmentation_id db_lib.remember_network_segment(tenant_id, network_id, segmentation_id, segment_id) net_provisioned = db_lib.is_network_provisioned(tenant_id, network_id) self.assertTrue(net_provisioned, 'Network must be provisioned') def test_network_is_removed(self): tenant_id = 'test' network_id = '123' segment_id = 'segment_id_1' db_lib.remember_network_segment(tenant_id, network_id, '123', segment_id) db_lib.forget_network_segment(tenant_id, network_id) net_provisioned = db_lib.is_network_provisioned(tenant_id, network_id) self.assertFalse(net_provisioned, 'The network should be deleted') def test_vm_is_remembered(self): vm_id = 'VM-1' tenant_id = 'test' network_id = '123' port_id = 456 host_id = 'ubuntu1' db_lib.remember_vm(vm_id, host_id, port_id, network_id, tenant_id) vm_provisioned = db_lib.is_vm_provisioned(vm_id, host_id, port_id, network_id, tenant_id) self.assertTrue(vm_provisioned, 'VM must be provisioned') def test_vm_is_removed(self): vm_id = 'VM-1' tenant_id = 'test' network_id = '123' port_id = 456 host_id = 'ubuntu1' db_lib.remember_vm(vm_id, host_id, port_id, network_id, tenant_id) db_lib.forget_port(port_id, host_id) vm_provisioned = db_lib.is_vm_provisioned(vm_id, host_id, port_id, network_id, tenant_id) self.assertFalse(vm_provisioned, 'The vm should be deleted') def test_remembers_multiple_networks(self): tenant_id = 'test' expected_num_nets = 100 segment_id = 'segment_%s' nets = ['id%s' % n for n in range(expected_num_nets)] for net_id in nets: db_lib.remember_network_segment(tenant_id, net_id, 123, segment_id % net_id) num_nets_provisioned = db_lib.num_nets_provisioned(tenant_id) self.assertEqual(expected_num_nets, num_nets_provisioned, 'There should be %d nets, not %d' % (expected_num_nets, num_nets_provisioned)) def test_removes_all_networks(self): tenant_id = 'test' num_nets = 100 old_nets = db_lib.num_nets_provisioned(tenant_id) nets = ['id_%s' % n for n in range(num_nets)] segment_id = 'segment_%s' for net_id in nets: db_lib.remember_network_segment(tenant_id, net_id, 123, segment_id % net_id) for net_id in nets: db_lib.forget_network_segment(tenant_id, net_id) num_nets_provisioned = db_lib.num_nets_provisioned(tenant_id) expected = old_nets self.assertEqual(expected, num_nets_provisioned, 'There should be %d nets, not %d' % (expected, num_nets_provisioned)) def test_remembers_multiple_tenants(self): expected_num_tenants = 100 tenants = ['id%s' % n for n in range(expected_num_tenants)] for tenant_id in tenants: db_lib.remember_tenant(tenant_id) num_tenants_provisioned = db_lib.num_provisioned_tenants() self.assertEqual(expected_num_tenants, num_tenants_provisioned, 'There should be %d tenants, not %d' % (expected_num_tenants, num_tenants_provisioned)) def test_removes_multiple_tenants(self): num_tenants = 100 tenants = ['id%s' % n for n in range(num_tenants)] for tenant_id in tenants: db_lib.remember_tenant(tenant_id) for tenant_id in tenants: db_lib.forget_tenant(tenant_id) num_tenants_provisioned = db_lib.num_provisioned_tenants() expected = 0 self.assertEqual(expected, num_tenants_provisioned, 'There should be %d tenants, not %d' % (expected, num_tenants_provisioned)) def test_num_vm_is_valid(self): tenant_id = 'test' network_id = '123' port_id_base = 'port-id' host_id = 'ubuntu1' vm_to_remember = ['vm1', 'vm2', 'vm3'] vm_to_forget = ['vm2', 'vm1'] for vm in vm_to_remember: port_id = port_id_base + vm db_lib.remember_vm(vm, host_id, port_id, network_id, tenant_id) for vm in vm_to_forget: port_id = port_id_base + vm db_lib.forget_port(port_id, host_id) num_vms = len(db_lib.get_vms(tenant_id)) expected = len(vm_to_remember) - len(vm_to_forget) self.assertEqual(expected, num_vms, 'There should be %d records, ' 'got %d records' % (expected, num_vms)) # clean up afterwards db_lib.forget_port(port_id, host_id) def test_get_network_list_returns_eos_compatible_data(self): tenant = u'test-1' segm_type = 'vlan' network_id = u'123' network2_id = u'1234' vlan_id = 123 vlan2_id = 1234 segment_id1 = '11111-%s' % vlan_id segment_id2 = '11111-%s' % vlan2_id expected_eos_net_list = {network_id: {u'networkId': network_id, u'segmentationTypeId': vlan_id, u'tenantId': tenant, u'segmentId': segment_id1, u'segmentationType': segm_type}, network2_id: {u'networkId': network2_id, u'tenantId': tenant, u'segmentId': segment_id2, u'segmentationTypeId': vlan2_id, u'segmentationType': segm_type}} db_lib.remember_network_segment(tenant, network_id, vlan_id, segment_id1) db_lib.remember_network_segment(tenant, network2_id, vlan2_id, segment_id2) net_list = db_lib.get_networks(tenant) self.assertEqual(net_list, expected_eos_net_list, ('%s != %s' % (net_list, expected_eos_net_list))) class RealNetStorageAristaDriverTestCase(testlib_api.SqlTestCase): """Main test cases for Arista Mechanism driver. Tests all mechanism driver APIs supported by Arista Driver. It invokes all the APIs as they would be invoked in real world scenarios and verifies the functionality. """ def setUp(self): super(RealNetStorageAristaDriverTestCase, self).setUp() setup_valid_config() self.fake_rpc = mock.MagicMock() self.drv = mechanism_arista.AristaDriver(self.fake_rpc) def tearDown(self): super(RealNetStorageAristaDriverTestCase, self).tearDown() def test_create_and_delete_network(self): tenant_id = 'ten-1' network_id = 'net1-id' segmentation_id = 1001 network_context = self._get_network_context(tenant_id, network_id, segmentation_id) self.drv.create_network_precommit(network_context) net_provisioned = db_lib.is_network_provisioned(tenant_id, network_id) self.assertTrue(net_provisioned, 'The network should be created') expected_num_nets = 1 num_nets_provisioned = db_lib.num_nets_provisioned(tenant_id) self.assertEqual(expected_num_nets, num_nets_provisioned, 'There should be %d nets, not %d' % (expected_num_nets, num_nets_provisioned)) # Now test the delete network self.drv.delete_network_precommit(network_context) net_provisioned = db_lib.is_network_provisioned(tenant_id, network_id) self.assertFalse(net_provisioned, 'The network should be created') expected_num_nets = 0 num_nets_provisioned = db_lib.num_nets_provisioned(tenant_id) self.assertEqual(expected_num_nets, num_nets_provisioned, 'There should be %d nets, not %d' % (expected_num_nets, num_nets_provisioned)) def test_create_and_delete_multiple_networks(self): tenant_id = 'ten-1' expected_num_nets = 100 segmentation_id = 1001 nets = ['id%s' % n for n in range(expected_num_nets)] for net_id in nets: network_context = self._get_network_context(tenant_id, net_id, segmentation_id) self.drv.create_network_precommit(network_context) num_nets_provisioned = db_lib.num_nets_provisioned(tenant_id) self.assertEqual(expected_num_nets, num_nets_provisioned, 'There should be %d nets, not %d' % (expected_num_nets, num_nets_provisioned)) # Now test the delete networks for net_id in nets: network_context = self._get_network_context(tenant_id, net_id, segmentation_id) self.drv.delete_network_precommit(network_context) num_nets_provisioned = db_lib.num_nets_provisioned(tenant_id) expected_num_nets = 0 self.assertEqual(expected_num_nets, num_nets_provisioned, 'There should be %d nets, not %d' % (expected_num_nets, num_nets_provisioned)) def test_create_and_delete_ports(self): tenant_id = 'ten-1' network_id = 'net1-id' segmentation_id = 1001 vms = ['vm1', 'vm2', 'vm3'] network_context = self._get_network_context(tenant_id, network_id, segmentation_id) self.drv.create_network_precommit(network_context) for vm_id in vms: port_id = '%s_%s' % (vm_id, 101) port_context = self._get_port_context(port_id, tenant_id, network_id, vm_id, network_context) self.drv.update_port_precommit(port_context) vm_list = db_lib.get_vms(tenant_id) provisioned_vms = len(vm_list) expected_vms = len(vms) self.assertEqual(expected_vms, provisioned_vms, 'There should be %d ' 'hosts, not %d' % (expected_vms, provisioned_vms)) # Now test the delete ports for vm_id in vms: port_id = '%s_%s' % (vm_id, 101) port_context = self._get_port_context(port_id, tenant_id, network_id, vm_id, network_context) self.drv.delete_port_precommit(port_context) vm_list = db_lib.get_vms(tenant_id) provisioned_vms = len(vm_list) expected_vms = 0 self.assertEqual(expected_vms, provisioned_vms, 'There should be %d ' 'VMs, not %d' % (expected_vms, provisioned_vms)) def test_cleanup_on_start(self): """Ensures that the driver cleans up the arista database on startup.""" ndb = db_lib.NeutronNets() # create a shared session session = db_api.get_writer_session() # Create some networks in neutron db n1_context = self._get_network_context('t1', 'n1', 10, session) ndb.create_network(n1_context, {'network': n1_context.current}) n2_context = self._get_network_context('t2', 'n2', 20, session) ndb.create_network(n2_context, {'network': n2_context.current}) n3_context = self._get_network_context('', 'ha-network', 100, session) ndb.create_network(n3_context, {'network': n3_context.current}) # Objects were created in different sessions, but Neutron no longer # implicitly flushes subtransactions. session.flush() # Create some networks in Arista db db_lib.remember_network_segment('t1', 'n1', 10, 'segment_id_10') db_lib.remember_network_segment('t2', 'n2', 20, 'segment_id_20') db_lib.remember_network_segment('admin', 'ha-network', 100, 'segment_id_100') db_lib.remember_network_segment('t3', 'n3', 30, 'segment_id_30') # Initialize the driver which should clean up the extra networks self.drv.initialize() worker = self.drv.get_workers()[0] with mock.patch.object(worker.sync_service, 'do_synchronize') as ds: worker.start() adb_networks = db_lib.get_networks(tenant_id='any') # 'n3' should now be deleted from the Arista DB self.assertEqual( set(('n1', 'n2', 'ha-network')), set(adb_networks.keys()) ) ds.assert_called_once_with() def _get_network_context(self, tenant_id, net_id, seg_id, session=None): network = {'id': net_id, 'tenant_id': tenant_id, 'name': net_id, 'admin_state_up': True, 'shared': False, } network_segments = [{'segmentation_id': seg_id, 'id': 'segment_%s' % net_id, 'network_type': 'vlan'}] return FakeNetworkContext(network, network_segments, network, session) def _get_port_context(self, port_id, tenant_id, net_id, vm_id, network): port = {'device_id': vm_id, 'device_owner': 'compute', 'binding:host_id': 'ubuntu1', 'binding:vnic_type': 'normal', 'tenant_id': tenant_id, 'id': port_id, 'network_id': net_id, 'name': '', 'status': 'ACTIVE', } binding_levels = [] for level, segment in enumerate(network.network_segments): binding_levels.append(FakePortBindingLevel(port['id'], level, 'vendor-1', segment['id'])) return FakePortContext(port, port, network, port['status'], binding_levels) class FakeNetworkContext(object): """To generate network context for testing purposes only.""" def __init__(self, network, segments=None, original_network=None, session=None): self._network = network self._original_network = original_network self._segments = segments self.is_admin = False self.tenant_id = network['tenant_id'] self.session = session or db_api.get_reader_session() @property def current(self): return self._network @property def original(self): return self._original_network @property def network_segments(self): return self._segments class FakePluginContext(object): """Plugin context for testing purposes only.""" def __init__(self, tenant_id): self.tenant_id = tenant_id self.session = mock.MagicMock() class FakePortContext(object): """To generate port context for testing purposes only.""" def __init__(self, port, original_port, network, status, binding_levels): self._plugin_context = None self._port = port self._original_port = original_port self._network_context = network self._status = status self._binding_levels = binding_levels @property def current(self): return self._port @property def original(self): return self._original_port @property def network(self): return self._network_context @property def host(self): return self._port.get(portbindings.HOST_ID) @property def original_host(self): return self._original_port.get(portbindings.HOST_ID) @property def status(self): return self._status @property def original_status(self): if self._original_port: return self._original_port['status'] @property def binding_levels(self): if self._binding_levels: return [{ driver_api.BOUND_DRIVER: level.driver, driver_api.BOUND_SEGMENT: self._expand_segment(level.segment_id) } for level in self._binding_levels] @property def bottom_bound_segment(self): if self._binding_levels: return self._expand_segment(self._binding_levels[-1].segment_id) def _expand_segment(self, segment_id): for segment in self._network_context.network_segments: if segment[driver_api.ID] == segment_id: return segment class FakePortBindingLevel(object): """Port binding object for testing purposes only.""" def __init__(self, port_id, level, driver, segment_id): self.port_id = port_id self.level = level self.driver = driver self.segment_id = segment_id networking-arista-2017.2.2/networking_arista/tests/unit/ml2/test_arista_sync.py000066400000000000000000000400721323242307100277120ustar00rootroot00000000000000# Copyright (c) 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from oslo_utils import importutils from neutron.tests.unit import testlib_api from networking_arista.common import db_lib from networking_arista.ml2 import arista_sync class SyncServiceTest(testlib_api.SqlTestCase): """Test cases for the sync service.""" def setUp(self): super(SyncServiceTest, self).setUp() plugin_klass = importutils.import_class( "neutron.db.db_base_plugin_v2.NeutronDbPluginV2") directory.add_plugin(plugin_constants.CORE, plugin_klass()) self.rpc = mock.MagicMock() ndb = db_lib.NeutronNets() self.sync_service = arista_sync.SyncService(self.rpc, ndb) self.sync_service._force_sync = False def test_region_in_sync(self): """Tests whether the region_in_sync() behaves as expected.""" region_updated_time = { 'regionName': 'RegionOne', 'regionTimestamp': '12345' } self.rpc.get_region_updated_time.return_value = region_updated_time self.sync_service._region_updated_time = None assert not self.sync_service._region_in_sync() self.sync_service._region_updated_time = region_updated_time assert self.sync_service._region_in_sync() def test_synchronize_required(self): """Tests whether synchronize() sends the right commands. This test verifies a scenario when the sync is required. """ region_updated_time = { 'regionName': 'RegionOne', 'regionTimestamp': '12345' } self.rpc.get_region_updated_time.return_value = region_updated_time self.sync_service._region_updated_time = { 'regionName': 'RegionOne', 'regionTimestamp': '0', } tenant_id = 'tenant-1' network_id = 'net-1' segmentation_id = 42 segment_id = 'segment_id_1' db_lib.remember_tenant(tenant_id) db_lib.remember_network_segment(tenant_id, network_id, segmentation_id, segment_id) self.rpc.get_tenants.return_value = {} self.rpc.sync_start.return_value = True self.rpc.sync_end.return_value = True self.rpc.check_cvx_availability.return_value = True self.rpc._baremetal_supported.return_value = False self.rpc.get_all_baremetal_hosts.return_value = {} self.sync_service.do_synchronize() expected_calls = [ mock.call.perform_sync_of_sg(), mock.call.check_cvx_availability(), mock.call.get_region_updated_time(), mock.call.sync_start(), mock.call.register_with_eos(sync=True), mock.call.check_supported_features(), mock.call.get_tenants(), mock.call.create_network_bulk( tenant_id, [{'network_id': network_id, 'segments': [], 'network_name': '', 'shared': False}], sync=True), mock.call.sync_end(), mock.call.get_region_updated_time() ] self.assertTrue(self.rpc.mock_calls == expected_calls, "Seen: %s\nExpected: %s" % ( self.rpc.mock_calls, expected_calls, ) ) db_lib.forget_network_segment(tenant_id, network_id) db_lib.forget_tenant(tenant_id) def test_sync_start_failure(self): """Tests that we force another sync when sync_start fails. The failure could be because a region does not exist or because another controller has the sync lock. """ self.sync_service.synchronize = mock.MagicMock() region_updated_time = { 'regionName': 'RegionOne', 'regionTimestamp': '424242' } self.rpc.get_region_updated_time.return_value = region_updated_time self.rpc.check_cvx_availability.return_value = True self.rpc.sync_start.return_value = False self.sync_service.do_synchronize() self.assertFalse(self.sync_service.synchronize.called) self.assertTrue(self.sync_service._force_sync) def test_synchronize_not_required(self): """Tests whether synchronize() sends the right commands. This test verifies a scenario when the sync is not required. """ region_updated_time = { 'regionName': 'RegionOne', 'regionTimestamp': '424242' } self.rpc.get_region_updated_time.return_value = region_updated_time self.rpc.check_cvx_availability.return_value = True self.sync_service._region_updated_time = { 'regionName': 'RegionOne', 'regionTimestamp': '424242', } self.rpc.sync_start.return_value = True self.rpc.sync_end.return_value = True self.sync_service.do_synchronize() # If the timestamps do match, then the sync should not be executed. expected_calls = [ mock.call.perform_sync_of_sg(), mock.call.check_cvx_availability(), mock.call.get_region_updated_time(), ] self.assertTrue(self.rpc.mock_calls[:4] == expected_calls, "Seen: %s\nExpected: %s" % ( self.rpc.mock_calls, expected_calls, ) ) def test_synchronize_one_network(self): """Test to ensure that only the required resources are sent to EOS.""" # Store two tenants in a db and a single tenant in EOS. # The sync should send details of the second tenant to EOS tenant_1_id = 'tenant-1' tenant_1_net_1_id = 'ten-1-net-1' tenant_1_net_1_seg_id = 11 db_lib.remember_tenant(tenant_1_id) db_lib.remember_network_segment(tenant_1_id, tenant_1_net_1_id, tenant_1_net_1_seg_id, 'segment_id_11') tenant_2_id = 'tenant-2' tenant_2_net_1_id = 'ten-2-net-1' tenant_2_net_1_seg_id = 21 db_lib.remember_tenant(tenant_2_id) db_lib.remember_network_segment(tenant_2_id, tenant_2_net_1_id, tenant_2_net_1_seg_id, 'segment_id_21') self.rpc.get_tenants.return_value = { tenant_1_id: { 'tenantVmInstances': {}, 'tenantBaremetalInstances': {}, 'tenantNetworks': { tenant_1_net_1_id: { 'networkId': tenant_1_net_1_id, 'shared': False, 'networkName': 'Net1', 'segmenationType': 'vlan', 'segmentationTypeId': tenant_1_net_1_seg_id, } } } } self.rpc.sync_start.return_value = True self.rpc.sync_end.return_value = True self.rpc.check_cvx_availability.return_value = True self.rpc.get_region_updated_time.return_value = {'regionTimestamp': 1} self.rpc._baremetal_supported.return_value = False self.rpc.get_all_baremetal_hosts.return_value = {} self.sync_service.do_synchronize() expected_calls = [ mock.call.perform_sync_of_sg(), mock.call.check_cvx_availability(), mock.call.get_region_updated_time(), mock.call.sync_start(), mock.call.register_with_eos(sync=True), mock.call.check_supported_features(), mock.call.get_tenants(), mock.call.create_network_bulk( tenant_2_id, [{'network_id': tenant_2_net_1_id, 'segments': [], 'network_name': '', 'shared': False}], sync=True), mock.call.sync_end(), mock.call.get_region_updated_time() ] self.rpc.assert_has_calls(expected_calls) db_lib.forget_network_segment(tenant_1_id, tenant_1_net_1_id) db_lib.forget_network_segment(tenant_2_id, tenant_2_net_1_id) db_lib.forget_tenant(tenant_1_id) db_lib.forget_tenant(tenant_2_id) def test_synchronize_all_networks(self): """Test to ensure that only the required resources are sent to EOS.""" # Store two tenants in a db and none on EOS. # The sync should send details of all tenants to EOS tenant_1_id = u'tenant-1' tenant_1_net_1_id = u'ten-1-net-1' tenant_1_net_1_seg_id = 11 db_lib.remember_tenant(tenant_1_id) db_lib.remember_network_segment(tenant_1_id, tenant_1_net_1_id, tenant_1_net_1_seg_id, 'segment_id_11') tenant_2_id = u'tenant-2' tenant_2_net_1_id = u'ten-2-net-1' tenant_2_net_1_seg_id = 21 db_lib.remember_tenant(tenant_2_id) db_lib.remember_network_segment(tenant_2_id, tenant_2_net_1_id, tenant_2_net_1_seg_id, 'segment_id_21') self.rpc.get_tenants.return_value = {} self.rpc.sync_start.return_value = True self.rpc.sync_end.return_value = True self.rpc.check_cvx_availability.return_value = True self.rpc.get_region_updated_time.return_value = {'regionTimestamp': 1} self.rpc._baremetal_supported.return_value = False self.rpc.get_all_baremetal_hosts.return_value = {} self.sync_service.do_synchronize() expected_calls = [ mock.call.perform_sync_of_sg(), mock.call.check_cvx_availability(), mock.call.get_region_updated_time(), mock.call.sync_start(), mock.call.register_with_eos(sync=True), mock.call.check_supported_features(), mock.call.get_tenants(), mock.call.create_network_bulk( tenant_1_id, [{'network_id': tenant_1_net_1_id, 'segments': [], 'network_name': '', 'shared': False}], sync=True), mock.call.create_network_bulk( tenant_2_id, [{'network_id': tenant_2_net_1_id, 'segments': [], 'network_name': '', 'shared': False}], sync=True), mock.call.sync_end(), mock.call.get_region_updated_time() ] # The create_network_bulk() can be called in different order. So split # it up. The first part checks if the initial set of methods are # invoked. idx = expected_calls.index(mock.call.get_tenants()) + 1 self.assertTrue(self.rpc.mock_calls[:idx] == expected_calls[:idx], "Seen: %s\nExpected: %s" % ( self.rpc.mock_calls, expected_calls, ) ) # Check if tenant 1 networks are created. It must be one of the two # methods. self.assertTrue(self.rpc.mock_calls[idx] in expected_calls[idx:idx + 2], "Seen: %s\nExpected: %s" % ( self.rpc.mock_calls, expected_calls, ) ) # Check if tenant 2 networks are created. It must be one of the two # methods. self.assertTrue(self.rpc.mock_calls[idx + 1] in expected_calls[idx:idx + 2], "Seen: %s\nExpected: %s" % ( self.rpc.mock_calls, expected_calls, ) ) # Check if the sync end methods are invoked. self.assertTrue(self.rpc.mock_calls[idx + 2:] == expected_calls[idx + 2:], "Seen: %s\nExpected: %s" % ( self.rpc.mock_calls, expected_calls, ) ) db_lib.forget_network_segment(tenant_1_id, tenant_1_net_1_id) db_lib.forget_network_segment(tenant_2_id, tenant_2_net_1_id) db_lib.forget_tenant(tenant_1_id) db_lib.forget_tenant(tenant_2_id) def test_synchronize_shared_network_ports(self): """Test to ensure that shared network ports are synchronized. This is to ensure that ports whose tenant id does not match the network tenant id are still sync'd. The test stores a network and 2 ports in the neutron db. The sync should send details of the ports to EOS. """ host_id = 'host1' network_id = 'net-1' network_seg_id = 11 tenant_1_id = 'tenant-1' db_lib.remember_tenant(tenant_1_id) db_lib.remember_network_segment(tenant_1_id, network_id, network_seg_id, 'segment_id_11') port_1_id = 'port-1' device_1_id = 'vm-1' db_lib.remember_vm(device_1_id, host_id, port_1_id, network_id, tenant_1_id) port_2_id = 'port-2' device_2_id = 'vm-2' # Shared network ports are stored under the network owner tenant db_lib.remember_vm(device_2_id, host_id, port_2_id, network_id, tenant_1_id) self.rpc.get_tenants.return_value = { tenant_1_id: { 'tenantVmInstances': {}, 'tenantBaremetalInstances': {}, 'tenantNetworks': { network_id: { 'networkId': network_id, 'shared': True, 'networkName': '', 'segmenationType': 'vlan', 'segmentationTypeId': network_seg_id, } } } } self.rpc.sync_start.return_value = True self.rpc.sync_end.return_value = True self.rpc.check_cvx_availability.return_value = True self.rpc.get_region_updated_time.return_value = {'regionTimestamp': 1} self.rpc._baremetal_supported.return_value = False self.rpc.get_all_baremetal_hosts.return_value = {} self.sync_service.do_synchronize() expected_calls = [ mock.call.perform_sync_of_sg(), mock.call.check_cvx_availability(), mock.call.get_region_updated_time(), mock.call.sync_start(), mock.call.register_with_eos(sync=True), mock.call.check_supported_features(), mock.call.get_tenants(), mock.call.create_instance_bulk( tenant_1_id, {}, {'vm-1': {'ports': [{'deviceId': 'vm-1', 'portId': 'port-1', 'networkId': 'net-1', 'hosts': ['host1']}], 'vmId': 'vm-1', 'baremetal_instance': False}, 'vm-2': {'ports': [{'deviceId': 'vm-2', 'portId': 'port-2', 'networkId': 'net-1', 'hosts': ['host1']}], 'vmId': 'vm-2', 'baremetal_instance': False}}, {}, sync=True), mock.call.sync_end(), mock.call.get_region_updated_time() ] self.rpc.assert_has_calls(expected_calls) networking-arista-2017.2.2/networking_arista/tests/unit/ml2/test_mechanism_arista.py000066400000000000000000002310671323242307100307100ustar00rootroot00000000000000# Copyright (c) 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron_lib.api.definitions import portbindings from neutron_lib import constants as n_const from neutron_lib.plugins.ml2 import api as driver_api from neutron.db import models_v2 from neutron.plugins.ml2 import models as port_models from neutron.services.trunk import callbacks from neutron.services.trunk import models as trunk_models from neutron.tests.unit import testlib_api from networking_arista.ml2 import mechanism_arista INTERNAL_TENANT_ID = 'INTERNAL-TENANT-ID' class AristaDriverTestCase(testlib_api.SqlTestCase): """Main test cases for Arista Mechanism driver. Tests all mechanism driver APIs supported by Arista Driver. It invokes all the APIs as they would be invoked in real world scenarios and verifies the functionality. """ def setUp(self): super(AristaDriverTestCase, self).setUp() self.fake_rpc = mock.MagicMock() mechanism_arista.db_lib = self.fake_rpc self.drv = mechanism_arista.AristaDriver(self.fake_rpc) self.drv.ndb = mock.MagicMock() def tearDown(self): super(AristaDriverTestCase, self).tearDown() def test_create_network_precommit(self): tenant_id = 'ten-1' network_id = 'net1-id' segmentation_id = 1001 self.drv.rpc.hpb_supported.return_value = True network_context = self._get_network_context(tenant_id, network_id, segmentation_id, False) self.drv.create_network_precommit(network_context) segment_id = network_context.network_segments[0]['id'] expected_calls = [ mock.call.hpb_supported(), mock.call.remember_tenant(tenant_id), mock.call.remember_network_segment(tenant_id, network_id, segmentation_id, segment_id) ] mechanism_arista.db_lib.assert_has_calls(expected_calls) # If there is no tenant id associated with the network, then the # network should be created under the tenant id in the context. tenant_id = 'ten-2' network_id = 'net2-id' segmentation_id = 1002 network_context = self._get_network_context(tenant_id, network_id, segmentation_id, False) network_context.current['tenant_id'] = '' self.drv.create_network_precommit(network_context) segment_id = network_context.network_segments[0]['id'] expected_calls += [ mock.call.hpb_supported(), mock.call.remember_tenant(INTERNAL_TENANT_ID), mock.call.remember_network_segment(INTERNAL_TENANT_ID, network_id, segmentation_id, segment_id) ] mechanism_arista.db_lib.assert_has_calls(expected_calls) def test_create_network_postcommit(self): tenant_id = 'ten-1' network_id = 'net1-id' segmentation_id = 1001 network_context = self._get_network_context(tenant_id, network_id, segmentation_id, False) mechanism_arista.db_lib.is_network_provisioned.return_value = True network = network_context.current segments = network_context.network_segments net_dict = { 'network_id': network['id'], 'segments': segments, 'network_name': network['name'], 'shared': network['shared']} self.drv.create_network_postcommit(network_context) expected_calls = [ mock.call.is_network_provisioned(tenant_id, network_id), mock.call.create_network(tenant_id, net_dict), ] mechanism_arista.db_lib.assert_has_calls(expected_calls) # If there is no tenant id associated with the network, then the # network should be created under the tenant id in the context. tenant_id = 'ten-2' network_id = 'net2-id' segmentation_id = 1002 network_context = self._get_network_context(tenant_id, network_id, segmentation_id, False) network_context.current['tenant_id'] = '' mechanism_arista.db_lib.is_network_provisioned.return_value = True network = network_context.current segments = network_context.network_segments net_dict = { 'network_id': network['id'], 'segments': segments, 'network_name': network['name'], 'shared': network['shared']} self.drv.create_network_postcommit(network_context) expected_calls += [ mock.call.is_network_provisioned(INTERNAL_TENANT_ID, network_id), mock.call.create_network(INTERNAL_TENANT_ID, net_dict), ] mechanism_arista.db_lib.assert_has_calls(expected_calls) def test_delete_network_precommit(self): tenant_id = 'ten-1' network_id = 'net1-id' segmentation_id = 1001 network_context = self._get_network_context(tenant_id, network_id, segmentation_id, False) mechanism_arista.db_lib.is_network_provisioned.return_value = True mechanism_arista.db_lib.num_nets_provisioned.return_value = 0 mechanism_arista.db_lib.num_vms_provisioned.return_value = 0 mechanism_arista.db_lib.are_ports_attached_to_network.return_value = ( False) self.drv.delete_network_precommit(network_context) expected_calls = [ mock.call.is_network_provisioned(tenant_id, network_id), mock.call.are_ports_attached_to_network(network_id), mock.call.forget_network_segment(tenant_id, network_id), ] mechanism_arista.db_lib.assert_has_calls(expected_calls) # If there is no tenant id associated with the network, then the # network should be created under the tenant id in the context. tenant_id = 'ten-2' network_id = 'net2-id' segmentation_id = 1002 network_context = self._get_network_context(tenant_id, network_id, segmentation_id, False) network_context.current['tenant_id'] = '' mechanism_arista.db_lib.is_network_provisioned.return_value = True mechanism_arista.db_lib.num_nets_provisioned.return_value = 0 mechanism_arista.db_lib.num_vms_provisioned.return_value = 0 mechanism_arista.db_lib.are_ports_attached_to_network.return_value = ( False) self.drv.delete_network_precommit(network_context) expected_calls += [ mock.call.is_network_provisioned(INTERNAL_TENANT_ID, network_id), mock.call.are_ports_attached_to_network(network_id), mock.call.forget_network_segment(INTERNAL_TENANT_ID, network_id), ] mechanism_arista.db_lib.assert_has_calls(expected_calls) def test_delete_network_precommit_with_ports(self): tenant_id = 'ten-1' network_id = 'net1-id' segmentation_id = 1001 network_context = self._get_network_context(tenant_id, network_id, segmentation_id, False) mechanism_arista.db_lib.is_network_provisioned.return_value = True mechanism_arista.db_lib.num_nets_provisioned.return_value = 0 mechanism_arista.db_lib.num_vms_provisioned.return_value = 0 mechanism_arista.db_lib.are_ports_attached_to_network.return_value = ( True) try: self.drv.delete_network_precommit(network_context) except Exception: # exception is expeted in this case - as network is not # deleted in this case and exception is raised pass expected_calls = [ mock.call.is_network_provisioned(tenant_id, network_id), mock.call.are_ports_attached_to_network(network_id), ] mechanism_arista.db_lib.assert_has_calls(expected_calls) def test_delete_network_postcommit(self): tenant_id = 'ten-1' network_id = 'net1-id' segmentation_id = 1001 self.drv.rpc.hpb_supported.return_value = True network_context = self._get_network_context(tenant_id, network_id, segmentation_id, False) mechanism_arista.db_lib.num_nets_provisioned.return_value = 0 mechanism_arista.db_lib.num_vms_provisioned.return_value = 0 self.drv.delete_network_postcommit(network_context) expected_calls = [ mock.call.hpb_supported(), mock.call.delete_network(tenant_id, network_id, network_context.network_segments), mock.call.num_nets_provisioned(tenant_id), mock.call.num_vms_provisioned(tenant_id), mock.call.forget_tenant(tenant_id), mock.call.delete_tenant(tenant_id), ] mechanism_arista.db_lib.assert_has_calls(expected_calls) # If there is no tenant id associated with the network, then the # network should be created under the tenant id in the context. tenant_id = 'ten-2' network_id = 'net2-id' segmentation_id = 1002 network_context = self._get_network_context(tenant_id, network_id, segmentation_id, False) network_context.current['tenant_id'] = '' self.drv.delete_network_postcommit(network_context) expected_calls += [ mock.call.hpb_supported(), mock.call.delete_network(INTERNAL_TENANT_ID, network_id, network_context.network_segments), mock.call.num_nets_provisioned(INTERNAL_TENANT_ID), mock.call.num_vms_provisioned(INTERNAL_TENANT_ID), mock.call.forget_tenant(INTERNAL_TENANT_ID), mock.call.delete_tenant(INTERNAL_TENANT_ID), ] mechanism_arista.db_lib.assert_has_calls(expected_calls) def _test_create_port_precommit(self): tenant_id = 'ten-1' network_id = 'net1-id' segmentation_id = 1001 vm_id = 'vm1' network_context = self._get_network_context(tenant_id, network_id, segmentation_id, False) port_context = self._get_port_context(tenant_id, network_id, vm_id, network_context) mechanism_arista.db_lib.is_network_provisioned.return_value = True network = {'tenant_id': tenant_id} self.drv.ndb.get_network_from_net_id.return_value = [network] host_id = port_context.current['binding:host_id'] port_id = port_context.current['id'] self.drv.create_port_precommit(port_context) expected_calls = [ mock.call.is_network_provisioned(tenant_id, network_id, None), mock.call.remember_tenant(tenant_id), mock.call.remember_vm(vm_id, host_id, port_id, network_id, tenant_id) ] mechanism_arista.db_lib.assert_has_calls(expected_calls) # If there is no tenant id associated with the network, then the # network should be created under the tenant id in the context. tenant_id = 'ten-2' network_id = 'net2-id' segmentation_id = 1002 vm_id = 'vm2' network_context = self._get_network_context(tenant_id, network_id, segmentation_id, False) port_context = self._get_port_context(tenant_id, network_id, vm_id, network_context) port_context.current['tenant_id'] = '' mechanism_arista.db_lib.is_network_provisioned.return_value = True network = {'tenant_id': ''} self.drv.ndb.get_network_from_net_id.return_value = [network] host_id = port_context.current['binding:host_id'] port_id = port_context.current['id'] self.drv.create_port_precommit(port_context) expected_calls += [ mock.call.is_network_provisioned(INTERNAL_TENANT_ID, network_id, None), mock.call.remember_tenant(INTERNAL_TENANT_ID), mock.call.remember_vm(vm_id, host_id, port_id, network_id, INTERNAL_TENANT_ID) ] mechanism_arista.db_lib.assert_has_calls(expected_calls) def _test_create_port_postcommit(self): tenant_id = 'ten-1' network_id = 'net1-id' segmentation_id = 1001 vm_id = 'vm1' network_context = self._get_network_context(tenant_id, network_id, segmentation_id, False) port_context = self._get_port_context(tenant_id, network_id, vm_id, network_context) mechanism_arista.db_lib.is_port_provisioned.return_value = True mechanism_arista.db_lib.is_network_provisioned.return_value = True mechanism_arista.db_lib.get_shared_network_owner_id.return_value = 1 network = {'tenant_id': tenant_id} self.drv.ndb.get_network_from_net_id.return_value = [network] port = port_context.current device_id = port['device_id'] device_owner = port['device_owner'] host_id = port['binding:host_id'] port_id = port['id'] port_name = port['name'] profile = port['binding:profile'] self.drv.create_port_postcommit(port_context) expected_calls = [ mock.call.NeutronNets(), mock.call.is_port_provisioned(port_id), mock.call.is_network_provisioned(tenant_id, network_id, None), mock.call.plug_port_into_network(device_id, host_id, port_id, network_id, tenant_id, port_name, device_owner, None, [], None, switch_bindings=profile) ] mechanism_arista.db_lib.assert_has_calls(expected_calls) # If there is no tenant id associated with the network, then the # network should be created under the tenant id in the context. tenant_id = 'ten-2' network_id = 'net2-id' segmentation_id = 1002 vm_id = 'vm2' network_context = self._get_network_context(tenant_id, network_id, segmentation_id, False) port_context = self._get_port_context(tenant_id, network_id, vm_id, network_context) port_context.current['tenant_id'] = '' mechanism_arista.db_lib.is_port_provisioned.return_value = True mechanism_arista.db_lib.is_network_provisioned.return_value = True mechanism_arista.db_lib.get_shared_network_owner_id.return_value = 1 network = {'tenant_id': ''} self.drv.ndb.get_network_from_net_id.return_value = [network] port = port_context.current device_id = port['device_id'] device_owner = port['device_owner'] host_id = port['binding:host_id'] port_id = port['id'] port_name = port['name'] self.drv.create_port_postcommit(port_context) expected_calls += [ mock.call.is_port_provisioned(port_id), mock.call.is_network_provisioned(INTERNAL_TENANT_ID, network_id, None), mock.call.plug_port_into_network(device_id, host_id, port_id, network_id, INTERNAL_TENANT_ID, port_name, device_owner, None, [], None, switch_bindings=profile) ] mechanism_arista.db_lib.assert_has_calls(expected_calls) # Now test the delete ports def test_delete_port_precommit(self): tenant_id = 'ten-1' network_id = 'net1-id' segmentation_id = 1001 vm_id = 'vm1' network_context = self._get_network_context(tenant_id, network_id, segmentation_id, False) port_context = self._get_port_context(tenant_id, network_id, vm_id, network_context) mechanism_arista.db_lib.is_port_provisioned.return_value = True mechanism_arista.db_lib.num_nets_provisioned.return_value = 0 mechanism_arista.db_lib.num_vms_provisioned.return_value = 0 self.drv.delete_port_precommit(port_context) port_id = port_context.current['id'] expected_calls = [ mock.call.is_port_provisioned(port_id, port_context.host), mock.call.forget_port(port_id, port_context.host), ] mechanism_arista.db_lib.assert_has_calls(expected_calls) # If there is no tenant id associated with the network, then the # network should be created under the tenant id in the context. tenant_id = 'ten-2' network_id = 'net2-id' segmentation_id = 1002 vm_id = 'vm2' network_context = self._get_network_context(tenant_id, network_id, segmentation_id, False) port_context = self._get_port_context(tenant_id, network_id, vm_id, network_context) port_context.current['tenant_id'] = '' mechanism_arista.db_lib.is_port_provisioned.return_value = True mechanism_arista.db_lib.num_nets_provisioned.return_value = 0 mechanism_arista.db_lib.num_vms_provisioned.return_value = 0 self.drv.delete_port_precommit(port_context) port_id = port_context.current['id'] expected_calls += [ mock.call.is_port_provisioned(port_id, port_context.host), mock.call.forget_port(port_id, port_context.host), ] mechanism_arista.db_lib.assert_has_calls(expected_calls) def test_delete_port_postcommit(self): tenant_id = 'ten-1' network_id = 'net1-id' segmentation_id = 1001 vm_id = 'vm1' network_context = self._get_network_context(tenant_id, network_id, segmentation_id, False) port_context = self._get_port_context(tenant_id, network_id, vm_id, network_context) mechanism_arista.db_lib.num_nets_provisioned.return_value = 0 mechanism_arista.db_lib.num_vms_provisioned.return_value = 0 mechanism_arista.db_lib.is_network_provisioned.return_value = True port = port_context.current device_id = port['device_id'] host_id = port['binding:host_id'] port_id = port['id'] vnic_type = port['binding:vnic_type'] profile = port['binding:profile'] network = {'tenant_id': tenant_id} self.drv.ndb.get_network_from_net_id.return_value = [network] physnet = dict(physnet='default') self.fake_rpc.get_physical_network.return_value = physnet self.drv.rpc.hpb_supported.return_value = True self.drv.delete_port_postcommit(port_context) expected_calls = [ mock.call.NeutronNets(), mock.call.get_physical_network(host_id), mock.call.is_network_provisioned(tenant_id, network_id, None, None), mock.call.unplug_port_from_network(device_id, 'compute', host_id, port_id, network_id, tenant_id, None, vnic_type, switch_bindings=profile, trunk_details=None), mock.call.remove_security_group(None, profile), mock.call.num_nets_provisioned(tenant_id), mock.call.num_vms_provisioned(tenant_id), mock.call.forget_tenant(tenant_id), mock.call.delete_tenant(tenant_id), mock.call.hpb_supported(), ] for binding_level in port_context.binding_levels: expected_calls.append(mock.call.is_network_provisioned(tenant_id, network_id, None, binding_level['bound_segment']['id'])) mechanism_arista.db_lib.assert_has_calls(expected_calls) # If there is no tenant id associated with the network, then the # network should be created under the tenant id in the context. tenant_id = 'ten-2' network_id = 'net2-id' segmentation_id = 1002 vm_id = 'vm2' network_context = self._get_network_context(tenant_id, network_id, segmentation_id, False) port_context = self._get_port_context(tenant_id, network_id, vm_id, network_context) port_context.current['tenant_id'] = '' port = port_context.current device_id = port['device_id'] host_id = port['binding:host_id'] port_id = port['id'] network = {'tenant_id': ''} self.drv.ndb.get_network_from_net_id.return_value = [network] physnet = dict(physnet='default') self.fake_rpc.get_physical_network.return_value = physnet self.drv.delete_port_postcommit(port_context) expected_calls += [ mock.call.get_physical_network(host_id), mock.call.is_network_provisioned(INTERNAL_TENANT_ID, network_id, None, None), mock.call.unplug_port_from_network(device_id, 'compute', host_id, port_id, network_id, INTERNAL_TENANT_ID, None, vnic_type, switch_bindings=profile, trunk_details=None), mock.call.remove_security_group(None, profile), mock.call.num_nets_provisioned(INTERNAL_TENANT_ID), mock.call.num_vms_provisioned(INTERNAL_TENANT_ID), mock.call.forget_tenant(INTERNAL_TENANT_ID), mock.call.delete_tenant(INTERNAL_TENANT_ID), mock.call.hpb_supported(), ] for binding_level in port_context.binding_levels: expected_calls.append(mock.call.is_network_provisioned( INTERNAL_TENANT_ID, network_id, None, binding_level['bound_segment']['id'])) mechanism_arista.db_lib.assert_has_calls(expected_calls) def test_delete_trunk_port_postcommit(self): # trunk port tenant_id = 'ten-3' network_id = 'net3-id' segmentation_id = 1003 vm_id = 'vm3' trunk_details = {'sub_ports': [{'mac_address': 'mac_address', 'port_id': 'subport_id', 'segmentation_id': 123, 'segmentation_type': 'vlan'}], 'trunk_id': 'trunk_id'} network_context = self._get_network_context(tenant_id, network_id, segmentation_id, False) port_context = self._get_port_context(tenant_id, network_id, vm_id, network_context) mechanism_arista.db_lib.num_nets_provisioned.return_value = 0 mechanism_arista.db_lib.num_vms_provisioned.return_value = 0 mechanism_arista.db_lib.is_network_provisioned.return_value = True port = port_context.current port['trunk_details'] = trunk_details device_id = port['device_id'] host_id = port['binding:host_id'] port_id = port['id'] vnic_type = port['binding:vnic_type'] profile = port['binding:profile'] network = {'tenant_id': tenant_id} self.drv.ndb.get_network_from_net_id.return_value = [network] physnet = dict(physnet='default') self.fake_rpc.get_physical_network.return_value = physnet self.drv.rpc.hpb_supported.return_value = True self.drv.delete_port_postcommit(port_context) expected_calls = [ mock.call.NeutronNets(), mock.call.get_physical_network(host_id), mock.call.is_network_provisioned(tenant_id, network_id, None, None), mock.call.unplug_port_from_network(device_id, 'compute', host_id, port_id, network_id, tenant_id, None, vnic_type, switch_bindings=profile, trunk_details=trunk_details), mock.call.remove_security_group(None, profile), mock.call.num_nets_provisioned(tenant_id), mock.call.num_vms_provisioned(tenant_id), mock.call.forget_tenant(tenant_id), mock.call.delete_tenant(tenant_id), mock.call.hpb_supported(), ] for binding_level in port_context.binding_levels: expected_calls.append(mock.call.is_network_provisioned( tenant_id, network_id, None, binding_level['bound_segment']['id'])) mechanism_arista.db_lib.assert_has_calls(expected_calls) def test_set_subport(self): tenant_id = 'ten-4' network_id = 'net4-id' vm_id = 'vm4' trunk_id = 111 host_id = 'host1' port_id = 'p1' vnic_type = 'allowed' profile = [] sg = ['security-groups'] orig_sg = None trunk_port = models_v2.Port( tenant_id=tenant_id, network_id='net-trunk', device_id=vm_id, device_owner='compute:None') trunk_port.port_binding = port_models.PortBinding() trunk_port.port_binding.vnic_type = vnic_type trunk_port.port_binding.host = host_id trunk_port.port_binding.profile = profile mechanism_arista.db_lib.num_nets_provisioned.return_value = 0 mechanism_arista.db_lib.num_vms_provisioned.return_value = 0 mechanism_arista.db_lib.get_trunk_port_by_trunk_id.return_value = \ trunk_port mechanism_arista.db_lib.is_network_provisioned.return_value = True resource = 'SubPort' event = 'AFTER_CREATE' trigger = 'AristaDriver' sp = dict(models_v2.Port( id=port_id, device_owner='trunk:subport', network_id=network_id, name='subport')) sp['security_groups'] = ['security-groups'] subport = trunk_models.SubPort() subport.port_id = port_id payload = callbacks.TrunkPayload(None, trunk_id, subports=[subport]) segments = [{'segmentation_id': 12, 'physical_network': 'default', 'id': 'segment_id', 'network_type': 'vlan'}] bindings = [] self.drv.ndb.get_port.return_value = sp self.drv.ndb.get_network_id_from_port_id.return_value = network_id mechanism_arista.db_lib.get_network_segments_by_port_id.return_value = \ segments self.drv.set_subport(resource, event, trigger, payload=payload) expected_calls = [ mock.call.NeutronNets(), mock.call.get_trunk_port_by_trunk_id(trunk_id), mock.call.get_network_segments_by_port_id('p1'), mock.call.plug_port_into_network(vm_id, host_id, port_id, network_id, tenant_id, 'subport', 'trunk:subport', sg, orig_sg, vnic_type, segments=segments, switch_bindings=bindings), ] mechanism_arista.db_lib.assert_has_calls(expected_calls) def test_unset_subport(self): tenant_id = 'ten-4' network_id = 'net4-id' vm_id = 'vm4' trunk_id = 111 device_owner = 'trunk:subport' host_id = 'host1' port_id = 'p1' vnic_type = 'allowed' profile = [] trunk_port = models_v2.Port( tenant_id=tenant_id, network_id=network_id, device_id=vm_id, device_owner='compute:None') trunk_port.port_binding = port_models.PortBinding() trunk_port.port_binding.vnic_type = vnic_type trunk_port.port_binding.host = host_id trunk_port.port_binding.profile = profile mechanism_arista.db_lib.num_nets_provisioned.return_value = 0 mechanism_arista.db_lib.num_vms_provisioned.return_value = 0 mechanism_arista.db_lib.get_trunk_port_by_trunk_id.return_value = \ trunk_port mechanism_arista.db_lib.is_network_provisioned.return_value = True resource = 'SubPort' event = 'AFTER_DELETE' trigger = 'AristaDriver' subport = trunk_models.SubPort() subport.port_id = port_id payload = callbacks.TrunkPayload(None, trunk_id, subports=[trunk_models.SubPort()]) sp = models_v2.Port( id=port_id, device_owner='trunk:subport', network_id=network_id ) self.drv.ndb.get_port.return_value = sp self.drv.unset_subport(resource, event, trigger, payload=payload) expected_calls = [ mock.call.NeutronNets(), mock.call.get_trunk_port_by_trunk_id(trunk_id), mock.call.is_network_provisioned(tenant_id, network_id, None, None), mock.call.unplug_port_from_network(vm_id, device_owner, host_id, port_id, network_id, tenant_id, [], vnic_type, switch_bindings=profile, trunk_details=None), mock.call.remove_security_group([], profile), mock.call.num_nets_provisioned(tenant_id), mock.call.num_vms_provisioned(tenant_id), mock.call.forget_tenant(tenant_id), mock.call.delete_tenant(tenant_id), ] mechanism_arista.db_lib.assert_has_calls(expected_calls) def test_update_port_precommit(self): # Test the case where the port was not provisioned previsouly # If port is not provisioned, we should bail out mechanism_arista.db_lib.is_port_provisioned.return_value = False mechanism_arista.db_lib.is_network_provisioned.return_value = False tenant_id = 'ten-1' network_id = 'net1-id' segmentation_id = 1001 vm_id = 'vm1' network_context = self._get_network_context(tenant_id, network_id, segmentation_id, False) port_context = self._get_port_context(tenant_id, network_id, vm_id, network_context) host_id = port_context.current['binding:host_id'] port_id = port_context.current['id'] network = {'tenant_id': tenant_id} self.drv.ndb.get_network_from_net_id.return_value = [network] # Make sure the port is not found mechanism_arista.db_lib.is_port_provisioned.return_value = False self.drv.update_port_precommit(port_context) segment_id = network_context.network_segments[-1]['id'] expected_calls = [ mock.call.NeutronNets(), mock.call.is_network_provisioned(tenant_id, network_id, segmentation_id, segment_id), mock.call.is_port_provisioned(port_id, None), mock.call.remember_tenant(tenant_id), mock.call.remember_vm(vm_id, host_id, port_id, network_id, tenant_id) ] mechanism_arista.db_lib.assert_has_calls(expected_calls) # Test the case where the port was provisioned, but it was not on # correct network. We should bail out in this case as well tenant_id = 'ten-2' network_id = 'net2-id' segmentation_id = 1002 vm_id = 'vm2' network_context = self._get_network_context(tenant_id, network_id, segmentation_id, False) port_context = self._get_port_context(tenant_id, network_id, vm_id, network_context) host_id = port_context.current['binding:host_id'] port_id = port_context.current['id'] # Force the check to return port found, but, network was not found mechanism_arista.db_lib.is_port_provisioned.return_value = True mechanism_arista.db_lib.is_network_provisioned.return_value = False network = {'tenant_id': tenant_id} self.drv.ndb.get_network_from_net_id.return_value = [network] try: self.drv.update_port_precommit(port_context) except Exception: # This shoud raise an exception as this is not permitted # operation pass segment_id = network_context.network_segments[-1]['id'] expected_calls += [ mock.call.is_network_provisioned(tenant_id, network_id, segmentation_id, segment_id), mock.call.is_port_provisioned(port_id, None), ] mechanism_arista.db_lib.assert_has_calls(expected_calls) # If the tenant id is not specified, then the port should be created # with internal tenant id. tenant_id = 'ten-3' network_id = 'net3-id' segmentation_id = 1003 vm_id = 'vm3' network_context = self._get_network_context(tenant_id, network_id, segmentation_id, False) port_context = self._get_port_context(tenant_id, network_id, vm_id, network_context) # Port does not contain a tenant port_context.current['tenant_id'] = None host_id = port_context.current['binding:host_id'] port_id = port_context.current['id'] # Force the check to return port and network were found mechanism_arista.db_lib.is_port_provisioned.return_value = True mechanism_arista.db_lib.is_network_provisioned.return_value = True network = {'tenant_id': None} self.drv.ndb.get_network_from_net_id.return_value = [network] self.drv.update_port_precommit(port_context) segment_id = network_context.network_segments[-1]['id'] expected_calls += [ mock.call.is_network_provisioned(INTERNAL_TENANT_ID, network_id, segmentation_id, segment_id), mock.call.is_port_provisioned(port_id, None), mock.call.update_port(vm_id, host_id, port_id, network_id, INTERNAL_TENANT_ID) ] mechanism_arista.db_lib.assert_has_calls(expected_calls) router_id = 'r1' # DVR ports # does not exist. It should be added to the DB owner = n_const.DEVICE_OWNER_DVR_INTERFACE port_context = self._get_port_context(tenant_id, network_id, router_id, network_context, device_owner=owner) mechanism_arista.db_lib.is_port_provisioned.return_value = False self.drv.update_port_precommit(port_context) expected_calls += [ mock.call.is_network_provisioned(tenant_id, network_id, segmentation_id, segment_id), mock.call.is_port_provisioned(port_id, host_id), mock.call.remember_tenant(tenant_id), mock.call.remember_vm(router_id, host_id, port_id, network_id, tenant_id) ] mechanism_arista.db_lib.assert_has_calls(expected_calls) # Unbind the port. It should be removed from the DB port_context._port['binding:host_id'] = None self.drv.update_port_precommit(port_context) expected_calls += [ mock.call.is_network_provisioned(tenant_id, network_id, segmentation_id, segment_id), mock.call.forget_port(port_id, 'ubuntu1'), ] mechanism_arista.db_lib.assert_has_calls(expected_calls) def test_update_port_postcommit(self): tenant_id = 'ten-1' network_id = 'net1-id' segmentation_id = 1001 vm_id = 'vm1' network_context = self._get_network_context(tenant_id, network_id, segmentation_id, False) segments = network_context.network_segments port_context = self._get_port_context(tenant_id, network_id, vm_id, network_context) mechanism_arista.db_lib.is_port_provisioned.return_value = True mechanism_arista.db_lib.is_network_provisioned.return_value = True mechanism_arista.db_lib.get_shared_network_owner_id.return_value = 1 mechanism_arista.db_lib.num_nets_provisioned.return_value = 1 mechanism_arista.db_lib.num_vms_provisioned.return_value = 1 self.drv.ndb.get_all_network_segments.return_value = segments mechanism_arista.db_lib.hpb_supported.return_value = True network = {'tenant_id': tenant_id} self.drv.ndb.get_network_from_net_id.return_value = [network] port = port_context.current device_id = port['device_id'] device_owner = port['device_owner'] host_id = port['binding:host_id'] orig_host_id = port_context.original_host port_id = port['id'] port_name = port['name'] vnic_type = port['binding:vnic_type'] profile = port['binding:profile'] network_name = network_context.current['name'] self.drv.rpc.hpb_supported.return_value = True self.drv.update_port_postcommit(port_context) expected_calls = [ mock.call.NeutronNets(), mock.call.is_port_provisioned(port_id, None), mock.call.is_network_provisioned(tenant_id, network_id, None, None), mock.call.is_network_provisioned(tenant_id, network_id, segmentation_id, None), mock.call.hpb_supported(), mock.call.create_network_segments(tenant_id, network_id, network_name, segments), mock.call.plug_port_into_network(device_id, host_id, port_id, network_id, tenant_id, port_name, device_owner, None, None, vnic_type, segments=segments, switch_bindings=profile, trunk_details=None) ] mechanism_arista.db_lib.assert_has_calls(expected_calls) # If there is no tenant id associated with the network, then the # network should be created under the tenant id in the context. tenant_id = 'ten-2' network_id = 'net2-id' segmentation_id = 1002 vm_id = 'vm2' network_context = self._get_network_context(tenant_id, network_id, segmentation_id, False) segments = network_context.network_segments port_context = self._get_port_context(tenant_id, network_id, vm_id, network_context) port_context.current['tenant_id'] = '' mechanism_arista.db_lib.is_port_provisioned.return_value = True mechanism_arista.db_lib.is_network_provisioned.return_value = True mechanism_arista.db_lib.get_shared_network_owner_id.return_value = 1 mechanism_arista.db_lib.num_nets_provisioned.return_value = 1 mechanism_arista.db_lib.num_vms_provisioned.return_value = 1 self.drv.ndb.get_all_network_segments.return_value = segments network = {'tenant_id': ''} self.drv.ndb.get_network_from_net_id.return_value = [network] port = port_context.current device_id = port['device_id'] device_owner = port['device_owner'] host_id = port['binding:host_id'] orig_host_id = port_context.original_host port_id = port['id'] port_name = port['name'] network_name = network_context.current['name'] self.drv.update_port_postcommit(port_context) expected_calls += [ mock.call.is_port_provisioned(port_id, None), mock.call.is_network_provisioned(INTERNAL_TENANT_ID, network_id, None, None), mock.call.is_network_provisioned(INTERNAL_TENANT_ID, network_id, segmentation_id, None), mock.call.hpb_supported(), mock.call.create_network_segments(INTERNAL_TENANT_ID, network_id, network_name, segments), mock.call.plug_port_into_network(device_id, host_id, port_id, network_id, INTERNAL_TENANT_ID, port_name, device_owner, None, None, vnic_type, segments=segments, switch_bindings=profile, trunk_details=None) ] mechanism_arista.db_lib.assert_has_calls(expected_calls) # DVR ports tenant_id = 'ten-3' network_id = 'net3-id' segmentation_id = 1003 router_id = 'r1' network_context = self._get_network_context(tenant_id, network_id, segmentation_id, False) segments = network_context.network_segments network_name = network_context.current['name'] owner = n_const.DEVICE_OWNER_DVR_INTERFACE port_context = self._get_port_context(tenant_id, network_id, router_id, network_context, device_owner=owner) mechanism_arista.db_lib.is_port_provisioned.return_value = True mechanism_arista.db_lib.is_network_provisioned.return_value = True mechanism_arista.db_lib.get_shared_network_owner_id.return_value = 1 mechanism_arista.db_lib.num_nets_provisioned.return_value = 1 mechanism_arista.db_lib.num_vms_provisioned.return_value = 1 self.drv.ndb.get_all_network_segments.return_value = segments # New DVR port - context.original_host is not set and status is ACTIVE # port should be plugged into the network port = port_context.current device_id = port['device_id'] device_owner = port['device_owner'] host_id = port['binding:host_id'] orig_host_id = 'ubuntu1' port_id = port['id'] port_name = port['name'] vnic_type = port['binding:vnic_type'] profile = port['binding:profile'] self.drv.update_port_postcommit(port_context) expected_calls += [ mock.call.is_port_provisioned(port_id, port_context.host), mock.call.is_network_provisioned(tenant_id, network_id, None, None), mock.call.is_network_provisioned(tenant_id, network_id, segmentation_id, None), mock.call.hpb_supported(), mock.call.create_network_segments(tenant_id, network_id, network_name, segments), mock.call.plug_port_into_network(device_id, host_id, port_id, network_id, tenant_id, port_name, device_owner, None, None, vnic_type, segments=segments, switch_bindings=profile, trunk_details=None) ] mechanism_arista.db_lib.assert_has_calls(expected_calls) # Delete DVR port - context.original is set and the status is DOWN. # port should be deleted port_context._status = n_const.PORT_STATUS_DOWN self.drv.update_port_postcommit(port_context) expected_calls += [ mock.call.is_port_provisioned(port_id, port_context.host), mock.call.is_network_provisioned(tenant_id, network_id, None, None), mock.call.is_network_provisioned(tenant_id, network_id, segmentation_id, None), mock.call.hpb_supported(), mock.call.create_network_segments(tenant_id, network_id, network_name, segments), mock.call.is_network_provisioned(tenant_id, network_id, None, None), mock.call.unplug_port_from_network(device_id, owner, orig_host_id, port_id, network_id, tenant_id, None, vnic_type, switch_bindings=profile, trunk_details=None), mock.call.remove_security_group(None, profile), mock.call.num_nets_provisioned(tenant_id), mock.call.num_vms_provisioned(tenant_id), ] mechanism_arista.db_lib.assert_has_calls(expected_calls) def test_update_trunk_port_postcommit(self): # trunk port tenant_id = 'ten-4' network_id = 'net4-id' segmentation_id = 1004 vm_id = 'vm4' trunk_details = {'sub_ports': [{'mac_address': 'mac_address', 'port_id': 'subport_id', 'segmentation_id': 123, 'segmentation_type': 'vlan'}], 'trunk_id': 'trunk_id'} network_context = self._get_network_context(tenant_id, network_id, segmentation_id, False) segments = network_context.network_segments port_context = self._get_port_context(tenant_id, network_id, vm_id, network_context) mechanism_arista.db_lib.is_port_provisioned.return_value = True mechanism_arista.db_lib.is_network_provisioned.return_value = True mechanism_arista.db_lib.get_shared_network_owner_id.return_value = 1 mechanism_arista.db_lib.num_nets_provisioned.return_value = 1 mechanism_arista.db_lib.num_vms_provisioned.return_value = 1 self.drv.ndb.get_all_network_segments.return_value = segments mechanism_arista.db_lib.hpb_supported.return_value = True network = {'tenant_id': tenant_id} self.drv.ndb.get_network_from_net_id.return_value = [network] port = port_context.current port['trunk_details'] = trunk_details device_id = port['device_id'] device_owner = port['device_owner'] host_id = port['binding:host_id'] port_id = port['id'] port_name = port['name'] vnic_type = port['binding:vnic_type'] profile = port['binding:profile'] network_name = network_context.current['name'] self.drv.update_port_postcommit(port_context) expected_calls = [ mock.call.NeutronNets(), mock.call.is_port_provisioned(port_id, None), mock.call.is_network_provisioned(tenant_id, network_id, None, None), mock.call.is_network_provisioned(tenant_id, network_id, segmentation_id, None), mock.call.hpb_supported(), mock.call.create_network_segments(tenant_id, network_id, network_name, segments), mock.call.plug_port_into_network(device_id, host_id, port_id, network_id, tenant_id, port_name, device_owner, None, None, vnic_type, segments=segments, switch_bindings=profile, trunk_details=trunk_details) ] mechanism_arista.db_lib.assert_has_calls(expected_calls) def test_update_port_precommit_dhcp_reserved_port(self): '''Test to ensure the dhcp port migration is handled correctly. Whenever a DHCP agent dies, the port is attached to a dummy device identified by DEVICE_ID_RESERVED_DHCP_PORT. Once the dhcp agent is respawned, the port is reattached to the newly created DHCP instance. This deletes the old dhcp port from the old host and creates the port on the new host. The dhcp port transitions from (Active ) to (Active ) to (Down ) to (Down ) to (Build ) to (Active ) When the port is updated to (Active ), the port needs to be removed from old host and when the port is updated to (Down ), it should be created on the new host. Removal and creation should take place in two updates because when the port is updated to (Down ), the original port would have the device id set to 'reserved_dhcp_port' and so it can't be removed from CVX at that point. ''' tenant_id = 't1' network_id = 'n1' old_device_id = 'old_device_id' new_device_id = 'new_device_id' reserved_device = n_const.DEVICE_ID_RESERVED_DHCP_PORT old_host = 'ubuntu1' new_host = 'ubuntu2' port_id = 101 segmentation_id = 1000 network_context = self._get_network_context(tenant_id, network_id, segmentation_id, False) segment_id = network_context.network_segments[-1]['id'] # (Active ) to # (Active ) context = self._get_port_context( tenant_id, network_id, old_device_id, network_context, device_owner=n_const.DEVICE_OWNER_DHCP) context.current['device_id'] = reserved_device network = {'tenant_id': tenant_id} self.drv.ndb.get_network_from_net_id.return_value = [network] mechanism_arista.db_lib.is_port_provisioned.return_value = True mechanism_arista.db_lib.is_network_provisioned.return_value = True mechanism_arista.db_lib.reset_mock() self.drv.update_port_precommit(context) expected_calls = [ mock.call.is_network_provisioned(tenant_id, network_id, segmentation_id, segment_id), mock.call.is_port_provisioned(port_id, None), mock.call.update_port(reserved_device, old_host, port_id, network_id, tenant_id) ] mechanism_arista.db_lib.assert_has_calls(expected_calls) # (Active ) to # (Down ) context = self._get_port_context( tenant_id, network_id, reserved_device, network_context, device_owner=n_const.DEVICE_OWNER_DHCP) context.current['device_id'] = new_device_id context.current['binding:host_id'] = new_host context.current['status'] = 'DOWN' mechanism_arista.db_lib.reset_mock() self.drv.update_port_precommit(context) expected_calls = [ mock.call.is_port_provisioned(port_id, old_host), mock.call.update_port(new_device_id, new_host, port_id, network_id, tenant_id) ] mechanism_arista.db_lib.assert_has_calls(expected_calls) # (Down ) to # (Down ) to context = self._get_port_context( tenant_id, network_id, new_device_id, network_context, device_owner=n_const.DEVICE_OWNER_DHCP, status='DOWN') mechanism_arista.db_lib.reset_mock() self.drv.update_port_precommit(context) expected_calls = [ mock.call.is_network_provisioned(tenant_id, network_id, segmentation_id, segment_id), mock.call.is_port_provisioned(port_id, None), ] mechanism_arista.db_lib.assert_has_calls(expected_calls) # (Down ) to # (Build ) to context = self._get_port_context( tenant_id, network_id, new_device_id, network_context, device_owner=n_const.DEVICE_OWNER_DHCP, status='DOWN') context.current['status'] = 'BUILD' mechanism_arista.db_lib.reset_mock() self.drv.update_port_precommit(context) expected_calls = [ mock.call.is_network_provisioned(tenant_id, network_id, segmentation_id, segment_id), mock.call.is_port_provisioned(port_id, None), ] mechanism_arista.db_lib.assert_has_calls(expected_calls) # (Build ) to # (Active ) context = self._get_port_context( tenant_id, network_id, new_device_id, network_context, device_owner=n_const.DEVICE_OWNER_DHCP, status='BUILD') context.current['status'] = 'ACTIVE' mechanism_arista.db_lib.reset_mock() self.drv.update_port_precommit(context) expected_calls = [ mock.call.is_network_provisioned(tenant_id, network_id, segmentation_id, segment_id), mock.call.is_port_provisioned(port_id, None), ] mechanism_arista.db_lib.assert_has_calls(expected_calls) def test_update_port_postcommit_dhcp_reserved_port(self): '''Test to ensure the dhcp port migration is handled correctly. Whenever a DHCP agent dies, the port is attached to a dummy device identified by DEVICE_ID_RESERVED_DHCP_PORT. Once the dhcp agent is respawned, the port is reattached to the newly created DHCP instance. This deletes the old dhcp port from the old host and creates the port on the new host. The dhcp port transitions from (Active ) to (Active ) to (Down ) to (Down ) to (Build ) to (Active ) When the port is updated to (Active ), the port needs to be removed from old host and when the port is updated to (Down ), it should be created on the new host. Removal and creation should take place in two updates because when the port is updated to (Down ), the original port would have the device id set to 'reserved_dhcp_port' and so it can't be removed from CVX at that point. ''' tenant_id = 't1' network_id = 'n1' old_device_id = 'old_device_id' new_device_id = 'new_device_id' reserved_device = n_const.DEVICE_ID_RESERVED_DHCP_PORT old_host = 'ubuntu1' new_host = 'ubuntu2' port_id = 101 segmentation_id = 1000 network_context = self._get_network_context(tenant_id, network_id, segmentation_id, False) segments = network_context.network_segments # (Active ) to # (Active ) context = self._get_port_context( tenant_id, network_id, old_device_id, network_context, device_owner=n_const.DEVICE_OWNER_DHCP) context.current['device_id'] = reserved_device vnic_type = context.current['binding:vnic_type'] profile = context.current['binding:profile'] port_name = context.current['name'] network = {'tenant_id': tenant_id} self.drv.ndb.get_network_from_net_id.return_value = [network] mechanism_arista.db_lib.is_port_provisioned.return_value = True mechanism_arista.db_lib.is_network_provisioned.return_value = True mechanism_arista.db_lib.get_shared_network_owner_id.return_value = 1 mechanism_arista.db_lib.num_nets_provisioned.return_value = 1 mechanism_arista.db_lib.num_vms_provisioned.return_value = 1 self.drv.rpc.hpb_supported.return_value = False self.drv.ndb.get_network_segments.return_value = segments mechanism_arista.db_lib.reset_mock() self.drv.update_port_postcommit(context) expected_calls = [ mock.call.is_port_provisioned(port_id, None), mock.call.is_network_provisioned(tenant_id, network_id, None, None), mock.call.is_network_provisioned(tenant_id, network_id, segmentation_id, None), mock.call.hpb_supported(), mock.call.is_network_provisioned(tenant_id, network_id, None, None), mock.call.unplug_port_from_network(old_device_id, n_const.DEVICE_OWNER_DHCP, old_host, port_id, network_id, tenant_id, None, vnic_type, switch_bindings=profile, trunk_details=None), mock.call.remove_security_group(None, profile), mock.call.num_nets_provisioned(tenant_id), mock.call.num_vms_provisioned(tenant_id), ] mechanism_arista.db_lib.assert_has_calls(expected_calls) # (Active ) to # (Down ) context = self._get_port_context( tenant_id, network_id, reserved_device, network_context, device_owner=n_const.DEVICE_OWNER_DHCP) context.current['device_id'] = new_device_id context.current['binding:host_id'] = new_host context.current['status'] = 'DOWN' physnet = dict(physnet='default') self.fake_rpc.get_physical_network.return_value = physnet context._original_binding_levels = context._binding_levels mechanism_arista.db_lib.reset_mock() self.drv.update_port_postcommit(context) expected_calls = [] expected_calls.extend( mock.call.is_network_provisioned(tenant_id, network_id, None, binding_level.segment_id) for binding_level in context._original_binding_levels) expected_calls += [ mock.call.is_network_provisioned(tenant_id, network_id, None, None), mock.call.unplug_port_from_network(reserved_device, n_const.DEVICE_OWNER_DHCP, old_host, port_id, network_id, tenant_id, None, vnic_type, switch_bindings=profile, trunk_details=None), mock.call.remove_security_group(None, profile), mock.call.num_nets_provisioned(tenant_id), mock.call.num_vms_provisioned(tenant_id), ] mechanism_arista.db_lib.assert_has_calls(expected_calls) # (Down ) to # (Down ) to context = self._get_port_context( tenant_id, network_id, new_device_id, network_context, device_owner=n_const.DEVICE_OWNER_DHCP, status='DOWN') context.current['binding:host_id'] = new_host context.original['binding:host_id'] = new_host mechanism_arista.db_lib.reset_mock() self.drv.update_port_postcommit(context) expected_calls = [ mock.call.is_port_provisioned(port_id, None), mock.call.is_network_provisioned(tenant_id, network_id, None, None), mock.call.is_network_provisioned(tenant_id, network_id, segmentation_id, None), mock.call.hpb_supported(), mock.call.plug_port_into_network(new_device_id, new_host, port_id, network_id, tenant_id, port_name, n_const.DEVICE_OWNER_DHCP, None, None, vnic_type, segments=[], switch_bindings=profile, trunk_details=None), ] mechanism_arista.db_lib.assert_has_calls(expected_calls) # (Down ) to # (Build ) to context = self._get_port_context( tenant_id, network_id, new_device_id, network_context, device_owner=n_const.DEVICE_OWNER_DHCP, status='DOWN') context.current['binding:host_id'] = new_host context.original['binding:host_id'] = new_host context.current['status'] = 'BUILD' mechanism_arista.db_lib.reset_mock() self.drv.update_port_postcommit(context) expected_calls = [ mock.call.is_port_provisioned(port_id, None), mock.call.is_network_provisioned(tenant_id, network_id, None, None), mock.call.is_network_provisioned(tenant_id, network_id, segmentation_id, None), mock.call.hpb_supported(), mock.call.plug_port_into_network(new_device_id, new_host, port_id, network_id, tenant_id, port_name, n_const.DEVICE_OWNER_DHCP, None, None, vnic_type, segments=[], switch_bindings=profile, trunk_details=None), ] mechanism_arista.db_lib.assert_has_calls(expected_calls) # (Build ) to # (Active ) context = self._get_port_context( tenant_id, network_id, new_device_id, network_context, device_owner=n_const.DEVICE_OWNER_DHCP, status='BUILD') context.current['binding:host_id'] = new_host context.original['binding:host_id'] = new_host context.current['status'] = 'ACTIVE' mechanism_arista.db_lib.reset_mock() self.drv.update_port_postcommit(context) expected_calls = [ mock.call.is_port_provisioned(port_id, None), mock.call.is_network_provisioned(tenant_id, network_id, None, None), mock.call.is_network_provisioned(tenant_id, network_id, segmentation_id, None), mock.call.hpb_supported(), ] mechanism_arista.db_lib.assert_has_calls(expected_calls) def _get_network_context(self, tenant_id, net_id, segmentation_id, shared): network = {'id': net_id, 'tenant_id': tenant_id, 'name': 'test-net', 'shared': shared} network_segments = [{'segmentation_id': segmentation_id, 'physical_network': u'default', 'id': 'segment-id-for-%s' % segmentation_id, 'network_type': 'vlan'}] return FakeNetworkContext(tenant_id, network, network_segments, network) def _get_port_context(self, tenant_id, net_id, device_id, network, device_owner='compute', status='ACTIVE'): port = {'device_id': device_id, 'device_owner': device_owner, 'binding:host_id': 'ubuntu1', 'name': 'test-port', 'tenant_id': tenant_id, 'id': 101, 'network_id': net_id, 'binding:vnic_type': None, 'binding:profile': [], 'security_groups': None, 'status': 'ACTIVE', } orig_port = {'device_id': device_id, 'device_owner': device_owner, 'binding:host_id': 'ubuntu1', 'name': 'test-port', 'tenant_id': tenant_id, 'id': 101, 'network_id': net_id, 'binding:vnic_type': None, 'binding:profile': [], 'security_groups': None, 'status': 'ACTIVE', } binding_levels = [] for level, segment in enumerate(network.network_segments): binding_levels.append(FakePortBindingLevel(port['id'], level, 'vendor-1', segment['id'])) return FakePortContext(port, dict(orig_port), network, status, binding_levels) def test_supported_device_owner(self): device_owner_list = [n_const.DEVICE_OWNER_DHCP, n_const.DEVICE_OWNER_DVR_INTERFACE, 'compute:*', 'baremetal:*', 'trunk:*'] for device_owner in device_owner_list: self.assertTrue(self.drv._supported_device_owner(device_owner), 'device_owner:%s should be a ' 'supported device owner' % device_owner) device_owner = 'compute:probe' self.assertFalse(self.drv._supported_device_owner(device_owner), 'device_owner:%s is not a ' 'supported device owner' % device_owner) class fake_keystone_info_class(object): """To generate fake Keystone Authentication token information Arista Driver expects Keystone auth info. This fake information is for testing only """ auth_uri = 'abc://host:35357/v3/' identity_uri = 'abc://host:5000' admin_user = 'neutron' admin_password = 'fun' admin_tenant_name = 'tenant_name' class FakeNetworkContext(object): """To generate network context for testing purposes only.""" def __init__(self, tenant_id, network, segments=None, original_network=None): self._network = network self._original_network = original_network self._segments = segments self._plugin_context = FakePluginContext(tenant_id) @property def current(self): return self._network @property def original(self): return self._original_network @property def network_segments(self): return self._segments class FakePortContext(object): """To generate port context for testing purposes only.""" def __init__(self, port, original_port, network, status, binding_levels): self._plugin_context = FakePluginContext('test') self._port = port self._original_port = original_port self._network_context = network self._status = status self._binding_levels = binding_levels self._original_binding_levels = [] @property def current(self): return self._port @property def original(self): return self._original_port @property def network(self): return self._network_context @property def host(self): return self._port.get(portbindings.HOST_ID) @property def original_host(self): return self._original_port.get(portbindings.HOST_ID) @property def status(self): return self._status @property def original_status(self): if self._original_port: return self._original_port['status'] @property def binding_levels(self): if self._binding_levels: return [{ driver_api.BOUND_DRIVER: level.driver, driver_api.BOUND_SEGMENT: self._expand_segment(level.segment_id) } for level in self._binding_levels] @property def bottom_bound_segment(self): if self._binding_levels: return self._expand_segment(self._binding_levels[-1].segment_id) def _expand_segment(self, segment_id): for segment in self._network_context.network_segments: if segment[driver_api.ID] == segment_id: return segment class FakePluginContext(object): """Plugin context for testing purposes only.""" def __init__(self, tenant_id): self.tenant_id = tenant_id self.session = mock.MagicMock() class FakePortBindingLevel(object): """Port binding object for testing purposes only.""" def __init__(self, port_id, level, driver, segment_id): self.port_id = port_id self.level = level self.driver = driver self.segment_id = segment_id networking-arista-2017.2.2/networking_arista/tests/unit/ml2/utils.py000066400000000000000000000027401323242307100254740ustar00rootroot00000000000000# Copyright (c) 2016 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. def setup_arista_wrapper_config(cfg, host='host', user='user'): cfg.CONF.set_override('eapi_host', host, "ml2_arista") cfg.CONF.set_override('eapi_username', user, "ml2_arista") cfg.CONF.set_override('sync_interval', 10, "ml2_arista") cfg.CONF.set_override('conn_timeout', 20, "ml2_arista") cfg.CONF.set_override('switch_info', ['switch1:user:pass'], "ml2_arista") cfg.CONF.set_override('sec_group_support', False, "ml2_arista") def port_dict_representation(port): return {port['portId']: {'device_owner': port['device_owner'], 'device_id': port['device_id'], 'name': port['name'], 'id': port['portId'], 'tenant_id': port['tenant_id'], 'network_id': port['network_id'], 'segments': port.get('segments', [])}} networking-arista-2017.2.2/requirements.txt000066400000000000000000000011101323242307100206670ustar00rootroot00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. pbr!=2.1.0,>=2.0.0 # Apache-2.0 alembic>=0.8.10 # MIT neutron-lib>=1.9.0 # Apache-2.0 oslo.i18n!=3.15.2,>=2.1.0 # Apache-2.0 oslo.config!=4.3.0,!=4.4.0,>=4.0.0 # Apache-2.0 oslo.log>=3.22.0 # Apache-2.0 oslo.service>=1.10.0 # Apache-2.0 oslo.utils>=3.20.0 # Apache-2.0 requests>=2.14.2 # Apache-2.0 six>=1.9.0 # MIT SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.0.10 # MIT networking-arista-2017.2.2/setup.cfg000066400000000000000000000034761323242307100172450ustar00rootroot00000000000000[metadata] name = networking_arista summary = Arista Networking drivers description-file = README.rst author = Arista Networks author-email = openstack-dev@arista.com home-page = https://github.com/openstack/networking-arista/ classifier = Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: 2 Programming Language :: Python :: 2.7 Programming Language :: Python :: 3 Programming Language :: Python :: 3.5 [files] packages = networking_arista data_files = /etc/neutron/plugins/ml2 = etc/ml2_conf_arista.ini [global] setup-hooks = pbr.hooks.setup_hook [entry_points] neutron.ml2.mechanism_drivers = arista = networking_arista.ml2.mechanism_arista:AristaDriver arista_ml2 = networking_arista.ml2.mechanism_arista:AristaDriver neutron.service_plugins = arista_l3 = networking_arista.l3Plugin.l3_arista:AristaL3ServicePlugin neutron.db.alembic_migrations = networking-arista = networking_arista.db.migration:alembic_migrations neutron.ml2.type_drivers = arista_vlan = networking_arista.ml2.drivers.type_arista_vlan:AristaVlanTypeDriver [build_sphinx] source-dir = doc/source build-dir = doc/build all_files = 1 [upload_sphinx] upload-dir = doc/build/html [compile_catalog] directory = networking_arista/locale domain = networking-arista [update_catalog] domain = networking-arista output_dir = networking_arista/locale input_file = networking_arista/locale/networking-arista.pot [extract_messages] keywords = _ gettext ngettext l_ lazy_gettext mapping_file = babel.cfg output_file = networking_arista/locale/networking-arista.pot [wheel] universal = 1 networking-arista-2017.2.2/setup.py000066400000000000000000000020061323242307100171220ustar00rootroot00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT import setuptools # In python < 2.7.4, a lazy loading of package `pbr` will break # setuptools if some other modules registered functions in `atexit`. # solution from: http://bugs.python.org/issue15881#msg170215 try: import multiprocessing # noqa except ImportError: pass setuptools.setup( setup_requires=['pbr>=2.0.0'], pbr=True) networking-arista-2017.2.2/test-requirements.txt000066400000000000000000000010621323242307100216520ustar00rootroot00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0 coverage!=4.4,>=4.0 # Apache-2.0 mock>=2.0 # BSD python-subunit>=0.0.18 # Apache-2.0/BSD sphinx>=1.6.2 # BSD oslosphinx>=4.7.0 # Apache-2.0 oslotest>=1.10.0 # Apache-2.0 testrepository>=0.0.18 # Apache-2.0/BSD testtools>=1.4.0 # MIT testresources>=0.2.4 # Apache-2.0/BSD testscenarios>=0.4 # Apache-2.0/BSD networking-arista-2017.2.2/tox.ini000066400000000000000000000021461323242307100167300ustar00rootroot00000000000000[tox] envlist = py27,py35,pep8 minversion = 1.6 skipsdist = True [testenv] usedevelop = True install_command = pip install -c {env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} -r requirements.txt -U {opts} {packages} setenv = VIRTUAL_ENV={envdir} PYTHONWARNINGS=default::DeprecationWarning deps = -r{toxinidir}/test-requirements.txt -egit+https://git.openstack.org/openstack/neutron.git#egg=neutron whitelist_externals = sh commands = python setup.py testr --slowest --testr-args='{posargs}' [testenv:pep8] commands = flake8 neutron-db-manage --subproject networking-arista check_migration [testenv:venv] commands = {posargs} [testenv:cover] commands = python setup.py testr --coverage --testr-args='{posargs}' [testenv:docs] commands = python setup.py build_sphinx [flake8] # H803 skipped on purpose per list discussion. # E123, E125 skipped as they are invalid PEP-8. show-source = True ignore = E123,E125,H803 builtins = _ exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build [hacking] import_exceptions = networking_arista._i18n