networking-mlnx-15.0.2/0000755000413600001450000000000013575645772015072 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/devstack/0000755000413600001450000000000013575645771016675 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/devstack/lib/0000755000413600001450000000000013575645771017443 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/devstack/lib/eswitchd0000644000413600001450000000346713575645041021200 0ustar lennybmtl00000000000000#!/bin/bash -x # # lib/eswitchd # Functions to control the configuration and operation of the eswitchd service # # Dependencies: # # - ``functions`` file # - ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined # - # ``stack.sh`` calls the entry points in this order: # # - is_eswitchd_enabled # - install_eswitchd_mlnx # - configure_eswitchd_mlnx # - init_eswitchd_mlnx # - start_eswitchd_mlnx # - stop_eswitchd_mlnx # - cleanup_eswitchd_mlnx # Save trace setting MY_XTRACE=$(set +o | grep xtrace) set +o xtrace ESWITCHD_DIR=$DEST/neutron_ml2_mlnx ESWITCHD_BIN_DIR=$(get_python_exec_prefix) ESWITCHD_CONF_DIR=/etc/neutron/plugins/ml2 ESWITCHD_CONF_FILE=$ESWITCHD_CONF_DIR/eswitchd.conf function configure_eswitchd { # setting up configuration sudo install -d -o $STACK_USER -m 755 $ESWITCHD_CONF_DIR sudo cp -r $ESWITCHD_DIR/$ESWITCHD_CONF_FILE $ESWITCHD_CONF_DIR sudo cp -r $ESWITCHD_DIR/etc/neutron/rootwrap.d/eswitchd.filters /etc/neutron/rootwrap.d/ # configure nova rootwarp Q_NOVA_RR_CONF_FILE=$NOVA_CONF_DIR/rootwrap.conf sudo sed -e 's:^exec_dirs=\(.*\)$:exec_dirs=\1,/usr/local/bin:' -i $Q_NOVA_RR_CONF_FILE if [[ -n "$PHYSICAL_INTERFACE_MAPPINGS" ]]; then iniset /$ESWITCHD_CONF_FILE DAEMON fabrics "$PHYSICAL_INTERFACE_MAPPINGS" else iniset /$ESWITCHD_CONF_FILE DAEMON fabrics "${PHYSICAL_NETWORK}:${PHYSICAL_INTERFACE}" fi } function init_eswitchd { : } function start_eswitchd { run_process eswitchd "${ESWITCHD_BIN_DIR}/eswitchd --config-file $ESWITCHD_CONF_FILE" } function stop_eswitchd { stop_process eswitchd } function check_eswitchd { : } function cleanup_eswitch { sudo rm -rf ${ESWITCHD_CONF_FILE} } # Restore trace $MY_XTRACE networking-mlnx-15.0.2/devstack/lib/neutron_ml2_mlnx0000644000413600001450000000760613575645041022667 0ustar lennybmtl00000000000000#!/bin/bash # # lib/neutron_ml2_mlnx # Functions to control the configuration and operation of the neutron ml2 mlnx service # # Dependencies: # # - ``functions`` file # - ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined # - # ``stack.sh`` calls the entry points in this order: # # - is_neutron_ml2_mlnx_enabled # - install_neutron_ml2_mlnx # - configure_neutron_ml2_mlnx # - init_neutron_ml2_mlnx # - start_neutron_ml2_mlnx # - stop_neutron_ml2_mlnx # - cleanup_neutron_ml2_mlnx # Save trace setting XTRACE=$(set +o | grep xtrace) set +o xtrace # Defaults # -------- # # Set up default directories MLNX_AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-mlnx-agent" source ${DEST}/neutron_ml2_mlnx/devstack/lib/eswitchd # Entry Points # ------------ # cleanup_neutron_ml2_mlnx() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_neutron_ml2_mlnx { # kill instances (nova) # delete image files (glance) # This function intentionally left blank : } # configure_neutron_ml2_mlnx() - Set config files, create data dirs, etc function configure_neutron_ml2_mlnx { if is_service_enabled mlnx-agt; then if [[ -z "$PHYSICAL_INTERFACE_MAPPINGS" ]] && [[ -n "$PHYSICAL_NETWORK" ]] && [[ -n "$PHYSICAL_INTERFACE" ]]; then PHYSICAL_INTERFACE_MAPPINGS=$PHYSICAL_NETWORK:$PHYSICAL_INTERFACE fi if [[ -n "$PHYSICAL_INTERFACE_MAPPINGS" ]]; then iniset /$Q_PLUGIN_CONF_FILE eswitch physical_interface_mappings $PHYSICAL_INTERFACE_MAPPINGS iniset /$Q_PLUGIN_CONF_FILE sdn bind_normal_ports_physnets $PHYSICAL_NETWORK iniset /$Q_PLUGIN_CONF_FILE sdn bind_normal_ports True iniset /$Q_PLUGIN_CONF_FILE sdn sync_enabled False fi iniset /$Q_PLUGIN_CONF_FILE securitygroup noop fi if is_service_enabled eswitchd; then configure_eswitchd fi if is_service_enabled q-dhcp; then iniset /$Q_DHCP_CONF_FILE DEFAULT dhcp_broadcast_reply True iniset /$Q_DHCP_CONF_FILE DEFAULT interface_driver multi iniset /$Q_DHCP_CONF_FILE DEFAULT multi_interface_driver_mappings $PHYSICAL_NETWORK:ipoib iniset /$Q_DHCP_CONF_FILE DEFAULT ipoib_physical_interface $PHYSICAL_INTERFACE fi if is_service_enabled q-l3; then iniset /$Q_L3_CONF_FILE DEFAULT interface_driver multi iniset /$Q_L3_CONF_FILE DEFAULT multi_interface_driver_mappings $PHYSICAL_NETWORK:ipoib iniset /$Q_L3_CONF_FILE DEFAULT ipoib_physical_interface $PHYSICAL_INTERFACE fi } # init_neutron_ml2_mlnx() - Initialize databases, etc. function init_neutron_ml2_mlnx { # clean up from previous (possibly aborted) runs # create required data files : } # install_neutron_ml2_mlnx() - Collect source and prepare function install_neutron_ml2_mlnx { setup_develop $DEST/neutron_ml2_mlnx } # start_neutron_ml2_mlnx() - Start running processes, including screen function start_neutron_ml2_mlnx { # The quoted command must be a single command and not include an # shell metacharacters, redirections or shell builtins. if is_service_enabled eswitchd; then start_eswitchd fi if is_service_enabled mlnx-agt; then run_process mlnx-agt "$MLNX_AGENT_BINARY --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini" fi sleep 10 } # stop_neutron_ml2_mlnx() - Stop running processes (non-screen) function stop_neutron_ml2_mlnx { if is_service_enabled eswitchd; then stop_eswitchd cleanup_eswitch fi if is_service_enabled mlnx-agt; then stop_process mlnx-agt fi } # Restore xtrace $XTRACE # Tell emacs to use shell-script-mode ## Local variables: ## mode: shell-script ## End: networking-mlnx-15.0.2/devstack/README.rst0000644000413600001450000000212713566516767020370 0ustar lennybmtl00000000000000================== MLNX agent enable ================== 1) Download DevStack 2) Add this as an external repository:: enable_plugin neutron_ml2_mlnx git://github.com/openstack/networking-mlnx 3) update Q_ML2_PLUGIN_MECHANISM_DRIVERS with mlnx_infiniband mech driver:: Q_ML2_PLUGIN_MECHANISM_DRIVERS=mlnx_infiniband,openvswitch 4) enable switchd mlnx-agt and mlnx_dnsmasq services:: enable_service mlnx-agt eswitchd mlnx_dnsmasq 5) run ``stack.sh`` ========================================== SDN Mechanism Driver Enabling in Devstack ========================================== 1) Download DevStack 2) Add this external repository: enable_plugin neutron_ml2_mlnx git://github.com/openstack/networking-mlnx 3) Add SDN plugin to mechanism drivers plugins list: Q_ML2_PLUGIN_MECHANISM_DRIVERS=mlnx_sdn_assist,openvswitch 4) Add SDN mandatory configurations, for example:: [[post-config|/etc/neutron/plugins/ml2/ml2_conf.ini]] [sdn] url = http:///neo domain = cloudx username = admin password = admin 5) run ``stack.sh`` networking-mlnx-15.0.2/devstack/plugin.sh0000755000413600001450000000177713566516767020550 0ustar lennybmtl00000000000000# plugin.sh - DevStack extras script to install mlnx_infiniband MD source ${DEST}/neutron_ml2_mlnx/devstack/lib/neutron_ml2_mlnx if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then # Set up system services # no-op : elif [[ "$1" == "stack" && "$2" == "install" ]]; then # Perform installation of service source echo_summary "Installing MLNX Ml2 MD" install_neutron_ml2_mlnx elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then # Configure after the other layer 1 and 2 services have been configured echo_summary "Configuring MLNX Ml2 MD" configure_neutron_ml2_mlnx elif [[ "$1" == "stack" && "$2" == "extra" ]]; then # Initialize and start the template service ##init_template start_neutron_ml2_mlnx fi if [[ "$1" == "unstack" ]]; then # Shut down template services stop_neutron_ml2_mlnx fi if [[ "$1" == "clean" ]]; then # Remove state and transient data # Remember clean.sh first calls unstack.sh # no-op cleanup_neutron_ml2_mlnx fi networking-mlnx-15.0.2/doc/0000755000413600001450000000000013575645771015636 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/doc/source/0000755000413600001450000000000013575645771017136 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/doc/source/conf.py0000644000413600001450000000464513566516767020450 0ustar lennybmtl00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys sys.path.insert(0, os.path.abspath('../..')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', #'sphinx.ext.intersphinx', 'oslosphinx' ] # autodoc generation is a bit aggressive and a nuisance when doing heavy # text edit cycles. # execute "export SPHINX_DEBUG=1" in your terminal to disable # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = u'networking-mlnx' copyright = u'2015, Mellanox Technologies, Ltd' # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] # html_theme = '_theme' # html_static_path = ['static'] # Output file base name for HTML help builder. htmlhelp_basename = '%sdoc' % project # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', '%s.tex' % project, u'%s Documentation' % project, u'OpenStack Foundation', 'manual'), ] # Example configuration for intersphinx: refer to the Python standard library. #intersphinx_mapping = {'http://docs.python.org/': None} networking-mlnx-15.0.2/doc/source/contributing.rst0000644000413600001450000000011213566516767022373 0ustar lennybmtl00000000000000============ Contributing ============ .. include:: ../../CONTRIBUTING.rstnetworking-mlnx-15.0.2/doc/source/index.rst0000644000413600001450000000100013566516767020770 0ustar lennybmtl00000000000000.. networking-mlnx documentation master file, created by sphinx-quickstart on Tue Jan 25 22:26:36 2015. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. Welcome to networking-mlnx's documentation! ======================================================== Contents: .. toctree:: :maxdepth: 2 readme installation usage contributing Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search`networking-mlnx-15.0.2/doc/source/installation.rst0000644000413600001450000000032613566516767022374 0ustar lennybmtl00000000000000============ Installation ============ At the command line:: $ pip install networking-mlnx Or, if you have virtualenvwrapper installed:: $ mkvirtualenv networking-mlnx $ pip install networking-mlnx networking-mlnx-15.0.2/doc/source/readme.rst0000644000413600001450000000003613566516767021126 0ustar lennybmtl00000000000000.. include:: ../../README.rst networking-mlnx-15.0.2/doc/source/usage.rst0000644000413600001450000000013313566516767020773 0ustar lennybmtl00000000000000======== Usage ======== To use networking-mlnx in a project:: import networking_mlnx networking-mlnx-15.0.2/etc/0000755000413600001450000000000013575645771015644 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/etc/neutron/0000755000413600001450000000000013575645771017336 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/etc/neutron/plugins/0000755000413600001450000000000013575645771021017 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/etc/neutron/plugins/ml2/0000755000413600001450000000000013575645771021511 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/etc/neutron/plugins/ml2/eswitchd.conf0000755000413600001450000000143513566516767024202 0ustar lennybmtl00000000000000[DEFAULT] # Print more verbose output (set logging level to INFO instead of default WARNING level). # verbose = False # Print debugging output (set logging level to DEBUG instead of default WARNING level). # debug = False # log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s # log_date_format = %Y-%m-%d %H:%M:%S # use_syslog -> syslog # log_file and log_dir -> log_dir/log_file # (not log_file) and log_dir -> log_dir/{binary_name}.log # use_stderr -> stderr # (not user_stderr) and (not log_file) -> stdout # publish_errors -> notification system # use_syslog = False # syslog_log_facility = LOG_USER # use_stderr = True # log_file = # log_dir = [DAEMON] fabrics='default:ib0' networking-mlnx-15.0.2/etc/neutron/plugins/ml2/ml2_conf_sdn.ini0000644000413600001450000000357313566516767024567 0ustar lennybmtl00000000000000# Configuration for the SDN Mechanism Driver [sdn] # (StrOpt) mandatory param: SDN REST URL # If this is not set then no HTTP requests will be made. # Example: url = http://10.209.25.201/neo/ # url = # (StrOpt) mandatory param: Cloud domain name in SDN provider # This is an optional parameter, default value is cloudx # Example: domain = cloudx # domain = # (StrOpt) mandatory param: Username for HTTP basic authentication # to SDN Provider. # Example: username = admin # username = # (StrOpt) mandatory param: Password for HTTP basic authentication # to SDN Provider. # Example: password = admin # password = # (IntOpt) Timeout in seconds to wait for SDN Provider HTTP request completion. # This is an optional parameter, default value is 10 seconds. # Example: timeout = 15 # timeout = # (IntOpt) Timeout in seconds for the driver thread to fire off # another thread run through the journal database. # # sync_timeout = 10 # Example: sync_timeout = 10 # (IntOpt) Number of times to retry a journal transaction before # marking it 'failed'. To disable retry count value should be -1 # # retry_count = -1 # Example: retry_count = 5 # (IntOpt) Journal maintenance operations interval in seconds. # # maintenance_interval = 300 # Example: maintenance_interval = 30 # (IntOpt) Time to keep completed rows in seconds. # Completed rows retention will be checked every maintenance_interval by the # cleanup thread. # To disable completed rows deletion value should be -1 # # completed_rows_retention = 600 # Example: completed_rows_retention = 30 # (IntOpt) Timeout in seconds to wait before marking a processing # row back to pending state. # # processing_timeout = 100 # Example: maintenance_interval = 200 # (ListOpt) Comma-separated list of # that it will send notification. * means all physical_networks # # physical_networks = * # Example: physical_networks = datacenter1, datacenter3networking-mlnx-15.0.2/etc/neutron/plugins/mlnx/0000755000413600001450000000000013575645771021775 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/etc/neutron/plugins/mlnx/mlnx_conf.ini0000644000413600001450000000173513566516767024471 0ustar lennybmtl00000000000000[eswitch] # (ListOpt) Comma-separated list of # : tuples mapping physical # network names to the agent's node-specific physical network # interfaces to be used for flat and VLAN networks. All physical # networks listed in network_vlan_ranges on the server should have # mappings to appropriate interfaces on each agent. # # physical_interface_mappings = # Example: physical_interface_mappings = default:eth2 # (StrOpt) Eswitch daemon end point connection url # daemon_endpoint = 'tcp://127.0.0.1:60001' # The number of milliseconds the agent will wait for # response on request to daemon # request_timeout = 3000 # The number of retries the agent will send request # to daemon before giving up # retries = 3 # The backoff rate multiplier for waiting period between retries # on request to daemon, i.e. value of 2 will double # the request timeout each retry # backoff_rate = 2 [agent] # Agent's polling interval in seconds # polling_interval = 2 networking-mlnx-15.0.2/etc/neutron/rootwrap.d/0000755000413600001450000000000013575645771021435 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/etc/neutron/rootwrap.d/eswitchd.filters0000755000413600001450000000025213575645041024631 0ustar lennybmtl00000000000000# eswitchd-rootwrap command filters for network nodes # This file should be owned by (and only-writeable by) the root user [Filters] ebrctl: CommandFilter, ebrctl, root networking-mlnx-15.0.2/etc/policy.json0000644000413600001450000001465213566516767020050 0ustar lennybmtl00000000000000{ "context_is_admin": "role:admin", "admin_or_owner": "rule:context_is_admin or tenant_id:%(tenant_id)s", "context_is_advsvc": "role:advsvc", "admin_or_network_owner": "rule:context_is_admin or tenant_id:%(network:tenant_id)s", "admin_only": "rule:context_is_admin", "regular_user": "", "shared": "field:networks:shared=True", "shared_firewalls": "field:firewalls:shared=True", "external": "field:networks:router:external=True", "default": "rule:admin_or_owner", "create_subnet": "rule:admin_or_network_owner", "get_subnet": "rule:admin_or_owner or rule:shared", "update_subnet": "rule:admin_or_network_owner", "delete_subnet": "rule:admin_or_network_owner", "create_network": "", "get_network": "rule:admin_or_owner or rule:shared or rule:external or rule:context_is_advsvc", "get_network:router:external": "rule:regular_user", "get_network:segments": "rule:admin_only", "get_network:provider:network_type": "rule:admin_only", "get_network:provider:physical_network": "rule:admin_only", "get_network:provider:segmentation_id": "rule:admin_only", "get_network:queue_id": "rule:admin_only", "create_network:shared": "rule:admin_only", "create_network:router:external": "rule:admin_only", "create_network:segments": "rule:admin_only", "create_network:provider:network_type": "rule:admin_only", "create_network:provider:physical_network": "rule:admin_only", "create_network:provider:segmentation_id": "rule:admin_only", "update_network": "rule:admin_or_owner", "update_network:segments": "rule:admin_only", "update_network:shared": "rule:admin_only", "update_network:provider:network_type": "rule:admin_only", "update_network:provider:physical_network": "rule:admin_only", "update_network:provider:segmentation_id": "rule:admin_only", "update_network:router:external": "rule:admin_only", "delete_network": "rule:admin_or_owner", "create_port": "", "create_port:mac_address": "rule:admin_or_network_owner or rule:context_is_advsvc", "create_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc", "create_port:port_security_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc", "create_port:binding:host_id": "rule:admin_only", "create_port:binding:profile": "rule:admin_only", "create_port:mac_learning_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc", "get_port": "rule:admin_or_owner or rule:context_is_advsvc", "get_port:queue_id": "rule:admin_only", "get_port:binding:vif_type": "rule:admin_only", "get_port:binding:vif_details": "rule:admin_only", "get_port:binding:host_id": "rule:admin_only", "get_port:binding:profile": "rule:admin_only", "update_port": "rule:admin_or_owner or rule:context_is_advsvc", "update_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc", "update_port:port_security_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc", "update_port:binding:host_id": "rule:admin_only", "update_port:binding:profile": "rule:admin_only", "update_port:mac_learning_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc", "delete_port": "rule:admin_or_owner or rule:context_is_advsvc", "get_router:ha": "rule:admin_only", "create_router": "rule:regular_user", "create_router:external_gateway_info:enable_snat": "rule:admin_only", "create_router:distributed": "rule:admin_only", "create_router:ha": "rule:admin_only", "get_router": "rule:admin_or_owner", "get_router:distributed": "rule:admin_only", "update_router:external_gateway_info:enable_snat": "rule:admin_only", "update_router:distributed": "rule:admin_only", "update_router:ha": "rule:admin_only", "delete_router": "rule:admin_or_owner", "add_router_interface": "rule:admin_or_owner", "remove_router_interface": "rule:admin_or_owner", "create_router:external_gateway_info:external_fixed_ips": "rule:admin_only", "update_router:external_gateway_info:external_fixed_ips": "rule:admin_only", "create_firewall": "", "get_firewall": "rule:admin_or_owner", "create_firewall:shared": "rule:admin_only", "get_firewall:shared": "rule:admin_only", "update_firewall": "rule:admin_or_owner", "update_firewall:shared": "rule:admin_only", "delete_firewall": "rule:admin_or_owner", "create_firewall_policy": "", "get_firewall_policy": "rule:admin_or_owner or rule:shared_firewalls", "create_firewall_policy:shared": "rule:admin_or_owner", "update_firewall_policy": "rule:admin_or_owner", "delete_firewall_policy": "rule:admin_or_owner", "create_firewall_rule": "", "get_firewall_rule": "rule:admin_or_owner or rule:shared_firewalls", "update_firewall_rule": "rule:admin_or_owner", "delete_firewall_rule": "rule:admin_or_owner", "create_qos_queue": "rule:admin_only", "get_qos_queue": "rule:admin_only", "update_agent": "rule:admin_only", "delete_agent": "rule:admin_only", "get_agent": "rule:admin_only", "create_dhcp-network": "rule:admin_only", "delete_dhcp-network": "rule:admin_only", "get_dhcp-networks": "rule:admin_only", "create_l3-router": "rule:admin_only", "delete_l3-router": "rule:admin_only", "get_l3-routers": "rule:admin_only", "get_dhcp-agents": "rule:admin_only", "get_l3-agents": "rule:admin_only", "get_loadbalancer-agent": "rule:admin_only", "get_loadbalancer-pools": "rule:admin_only", "create_floatingip": "rule:regular_user", "create_floatingip:floating_ip_address": "rule:admin_only", "update_floatingip": "rule:admin_or_owner", "delete_floatingip": "rule:admin_or_owner", "get_floatingip": "rule:admin_or_owner", "create_network_profile": "rule:admin_only", "update_network_profile": "rule:admin_only", "delete_network_profile": "rule:admin_only", "get_network_profiles": "", "get_network_profile": "", "update_policy_profiles": "rule:admin_only", "get_policy_profiles": "", "get_policy_profile": "", "create_metering_label": "rule:admin_only", "delete_metering_label": "rule:admin_only", "get_metering_label": "rule:admin_only", "create_metering_label_rule": "rule:admin_only", "delete_metering_label_rule": "rule:admin_only", "get_metering_label_rule": "rule:admin_only", "get_service_provider": "rule:regular_user", "get_lsn": "rule:admin_only", "create_lsn": "rule:admin_only" } networking-mlnx-15.0.2/networking_mlnx/0000755000413600001450000000000013575645771020316 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/cmd/0000755000413600001450000000000013575645771021061 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/cmd/eventlet/0000755000413600001450000000000013575645771022707 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/cmd/eventlet/agents/0000755000413600001450000000000013575645771024170 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/cmd/eventlet/agents/__init__.py0000644000413600001450000000000013575645017026260 0ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/cmd/eventlet/agents/mlnx_agent.py0000644000413600001450000000132113575645017026664 0ustar lennybmtl00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from networking_mlnx.plugins.ml2.drivers.mlnx.agent \ import mlnx_eswitch_neutron_agent def main(): mlnx_eswitch_neutron_agent.main() networking-mlnx-15.0.2/networking_mlnx/cmd/eventlet/__init__.py0000644000413600001450000000114613575645017025013 0ustar lennybmtl00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import eventlet eventlet.monkey_patch() networking-mlnx-15.0.2/networking_mlnx/cmd/__init__.py0000644000413600001450000000000013575645017023151 0ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/db/0000755000413600001450000000000013575645771020703 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/db/migration/0000755000413600001450000000000013575645772022675 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/db/migration/alembic_migrations/0000755000413600001450000000000013575645772026525 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/db/migration/alembic_migrations/versions/0000755000413600001450000000000013575645772030375 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/db/migration/alembic_migrations/versions/newton/0000755000413600001450000000000013575645771031706 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/db/migration/alembic_migrations/versions/newton/contract/0000755000413600001450000000000013575645772033524 5ustar lennybmtl00000000000000././@LongLink0000000000000000000000000000017200000000000011215 Lustar 00000000000000networking-mlnx-15.0.2/networking_mlnx/db/migration/alembic_migrations/versions/newton/contract/dfd1a1f22c4180_initial.pynetworking-mlnx-15.0.2/networking_mlnx/db/migration/alembic_migrations/versions/newton/contract/dfd10000644000413600001450000000166413566516767034275 0ustar lennybmtl00000000000000# Copyright 2016 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """start networking-mlnx contract branch Revision ID: dfd1a1f22c4180 Create Date: 2016-07-24 12:34:56.789098 """ from neutron.db.migration import cli # revision identifiers, used by Alembic. revision = 'dfd1a1f22c4180' down_revision = 'start_networking_mlnx' branch_labels = (cli.CONTRACT_BRANCH,) def upgrade(): pass networking-mlnx-15.0.2/networking_mlnx/db/migration/alembic_migrations/versions/newton/expand/0000755000413600001450000000000013575645772033166 5ustar lennybmtl00000000000000././@LongLink0000000000000000000000000000021600000000000011214 Lustar 00000000000000networking-mlnx-15.0.2/networking_mlnx/db/migration/alembic_migrations/versions/newton/expand/5d5e04ea01d5_sdn_journal_change_data_to_text.pynetworking-mlnx-15.0.2/networking_mlnx/db/migration/alembic_migrations/versions/newton/expand/5d5e040000644000413600001450000000322613566516767034023 0ustar lennybmtl00000000000000# Copyright 2016 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """sdn_journal change data to text Revision ID: 5d5e04ea01d5 Create Date: 2016-08-16 06:01:54.795542 """ from alembic import op import sqlalchemy as sa from networking_mlnx.plugins.ml2.drivers.sdn import constants as sdn_const # revision identifiers, used by Alembic. revision = '5d5e04ea01d5' down_revision = 'd02c04effb34' def upgrade(): op.alter_column('sdn_journal', 'data', existing_type=sa.PickleType(), type_=sa.Text, existing_nullable=True) op.alter_column('sdn_journal', 'state', existing_type=sa.Enum( sdn_const.PENDING, sdn_const.FAILED, sdn_const.PROCESSING, sdn_const.COMPLETED, name='state'), type_=sa.Enum( sdn_const.PENDING, sdn_const.FAILED, sdn_const.PROCESSING, sdn_const.MONITORING, sdn_const.COMPLETED, name='state'), existing_nullable=True) ././@LongLink0000000000000000000000000000017000000000000011213 Lustar 00000000000000networking-mlnx-15.0.2/networking_mlnx/db/migration/alembic_migrations/versions/newton/expand/65b6db113427b9_initial.pynetworking-mlnx-15.0.2/networking_mlnx/db/migration/alembic_migrations/versions/newton/expand/65b6db0000644000413600001450000000166013566516767034105 0ustar lennybmtl00000000000000# Copyright 2016 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """start networking-mlnx expand branch Revision ID: 65b6db113427b9 Create Date: 2016-07-24 12:34:56.789098 """ from neutron.db.migration import cli # revision identifiers, used by Alembic. revision = '65b6db113427b9' down_revision = 'start_networking_mlnx' branch_labels = (cli.EXPAND_BRANCH,) def upgrade(): pass ././@LongLink0000000000000000000000000000020400000000000011211 Lustar 00000000000000networking-mlnx-15.0.2/networking_mlnx/db/migration/alembic_migrations/versions/newton/expand/9f30890cfbd1_adding_sdn_journal_db.pynetworking-mlnx-15.0.2/networking_mlnx/db/migration/alembic_migrations/versions/newton/expand/9f30890000644000413600001450000000357613566516767033767 0ustar lennybmtl00000000000000# Copyright 2016 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """adding sdn journal db Revision ID: 9f30890cfbd1 Create Date: 2016-08-07 10:57:15.895551 """ from alembic import op import sqlalchemy as sa from networking_mlnx.plugins.ml2.drivers.sdn import constants as sdn_const # revision identifiers, used by Alembic. revision = '9f30890cfbd1' down_revision = '65b6db113427b9' def upgrade(): op.create_table( 'sdn_journal', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('object_type', sa.String(length=36), nullable=False), sa.Column('object_uuid', sa.String(length=36), nullable=False), sa.Column('operation', sa.String(length=36), nullable=False), sa.Column('data', sa.PickleType(), nullable=True), sa.Column('job_id', sa.String(length=36), nullable=True), sa.Column('state', sa.Enum(sdn_const.PENDING, sdn_const.FAILED, sdn_const.PROCESSING, sdn_const.COMPLETED, name='state'), nullable=False, default=sdn_const.PENDING), sa.Column('retry_count', sa.Integer, default=0), sa.Column('created_at', sa.DateTime, default=sa.func.now()), sa.Column('last_retried', sa.TIMESTAMP, server_default=sa.func.now(), onupdate=sa.func.now()) ) ././@LongLink0000000000000000000000000000021000000000000011206 Lustar 00000000000000networking-mlnx-15.0.2/networking_mlnx/db/migration/alembic_migrations/versions/newton/expand/d02c04effb34_adding_sdn_maintenance_db.pynetworking-mlnx-15.0.2/networking_mlnx/db/migration/alembic_migrations/versions/newton/expand/d02c040000644000413600001450000000332113566516767034005 0ustar lennybmtl00000000000000# Copyright 2016 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """adding sdn maintenance db Revision ID: d02c04effb34 Create Date: 2016-08-08 10:26:22.393410 """ from alembic import op from oslo_utils import uuidutils import sqlalchemy as sa from networking_mlnx.plugins.ml2.drivers.sdn import constants as sdn_const # revision identifiers, used by Alembic. revision = 'd02c04effb34' down_revision = '9f30890cfbd1' def upgrade(): maint_table = op.create_table( 'sdn_maintenance', sa.Column('id', sa.String(length=36), nullable=False), sa.Column('state', sa.Enum(sdn_const.PENDING, sdn_const.PROCESSING, name='state'), nullable=False), sa.Column('processing_operation', sa.String(70)), sa.Column('lock_updated', sa.TIMESTAMP, nullable=False, server_default=sa.func.now(), onupdate=sa.func.now()) ) # Insert the only row here that is used to synchronize the lock between # different Neutron processes. op.bulk_insert(maint_table, [{'id': uuidutils.generate_uuid(), 'state': sdn_const.PENDING}]) networking-mlnx-15.0.2/networking_mlnx/db/migration/alembic_migrations/versions/CONTRACT_HEAD0000644000413600001450000000001713566516767032315 0ustar lennybmtl00000000000000dfd1a1f22c4180 networking-mlnx-15.0.2/networking_mlnx/db/migration/alembic_migrations/versions/EXPAND_HEAD0000644000413600001450000000001513575645041032041 0ustar lennybmtl000000000000005d5e04ea01d5 ././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000networking-mlnx-15.0.2/networking_mlnx/db/migration/alembic_migrations/versions/start_networking_mlnx.pynetworking-mlnx-15.0.2/networking_mlnx/db/migration/alembic_migrations/versions/start_networking_mln0000644000413600001450000000153013566516767034572 0ustar lennybmtl00000000000000# Copyright 2016 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """start networking-mlnx chain Revision ID: start_networking_mlnx Create Date: 2016-07-24 12:34:56.789098 """ # revision identifiers, used by Alembic. revision = 'start_networking_mlnx' down_revision = None def upgrade(): pass networking-mlnx-15.0.2/networking_mlnx/db/migration/alembic_migrations/__init__.py0000644000413600001450000000000013566516767030625 0ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/db/migration/alembic_migrations/env.py0000644000413600001450000000545013575645017027663 0ustar lennybmtl00000000000000# Copyright 2016 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from logging import config as logging_config from alembic import context from neutron.db.migration.alembic_migrations import external from neutron_lib.db import model_base from oslo_config import cfg from oslo_db.sqlalchemy import session import sqlalchemy as sa from sqlalchemy import event from networking_mlnx.db.migration.models import head # noqa MYSQL_ENGINE = None ALEMBIC_VERSION_TABLE = 'mlnx_alembic_version' config = context.config neutron_config = config.neutron_config logging_config.fileConfig(config.config_file_name) target_metadata = model_base.BASEV2.metadata def set_mysql_engine(): try: mysql_engine = neutron_config.command.mysql_engine except cfg.NoSuchOptError: mysql_engine = None global MYSQL_ENGINE MYSQL_ENGINE = (mysql_engine or model_base.BASEV2.__table_args__['mysql_engine']) def include_object(object, name, type_, reflected, compare_to): if type_ == 'table' and name in external.TABLES: return False return True def run_migrations_offline(): set_mysql_engine() kwargs = dict() if neutron_config.database.connection: kwargs['url'] = neutron_config.database.connection else: kwargs['dialect_name'] = neutron_config.database.engine kwargs['include_object'] = include_object kwargs['version_table'] = ALEMBIC_VERSION_TABLE context.configure(**kwargs) with context.begin_transaction(): context.run_migrations() @event.listens_for(sa.Table, 'after_parent_attach') def set_storage_engine(target, parent): if MYSQL_ENGINE: target.kwargs['mysql_engine'] = MYSQL_ENGINE def run_migrations_online(): set_mysql_engine() engine = session.create_engine(neutron_config.database.connection) connection = engine.connect() context.configure( connection=connection, target_metadata=target_metadata, include_object=include_object, version_table=ALEMBIC_VERSION_TABLE ) try: with context.begin_transaction(): context.run_migrations() finally: connection.close() engine.dispose() if context.is_offline_mode(): run_migrations_offline() else: run_migrations_online() networking-mlnx-15.0.2/networking_mlnx/db/migration/alembic_migrations/script.py.mako0000644000413600001450000000200113566516767031323 0ustar lennybmtl00000000000000# Copyright ${create_date.year} Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """${message} Revision ID: ${up_revision} Create Date: ${create_date} """ from alembic import op import sqlalchemy as sa ${imports if imports else ""} # revision identifiers, used by Alembic. revision = ${repr(up_revision)} down_revision = ${repr(down_revision)} % if branch_labels: branch_labels = ${repr(branch_labels)} % endif def upgrade(): ${upgrades if upgrades else "pass"} networking-mlnx-15.0.2/networking_mlnx/db/migration/models/0000755000413600001450000000000013575645772024160 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/db/migration/models/__init__.py0000644000413600001450000000000013566516767026260 0ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/db/migration/models/head.py0000644000413600001450000000155713566516767025444 0ustar lennybmtl00000000000000# Copyright 2016 Mellanox Technologies, Ltd # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.db.migration.models import head from networking_mlnx.db.models import sdn_journal_db # noqa from networking_mlnx.db.models import sdn_maintenance_db # noqa def get_metadata(): return head.model_base.BASEV2.metadata networking-mlnx-15.0.2/networking_mlnx/db/migration/__init__.py0000644000413600001450000000000013566516767024775 0ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/db/models/0000755000413600001450000000000013575645772022167 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/db/models/__init__.py0000644000413600001450000000000013566516767024267 0ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/db/models/sdn_journal_db.py0000644000413600001450000000316213575645041025513 0ustar lennybmtl00000000000000# Copyright 2016 Mellanox Technologies, Ltd # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import model_base import sqlalchemy as sa from networking_mlnx.plugins.ml2.drivers.sdn import constants as sdn_const class SdnJournal(model_base.BASEV2, model_base.HasId): __tablename__ = 'sdn_journal' object_type = sa.Column(sa.String(36), nullable=False) object_uuid = sa.Column(sa.String(36), nullable=False) operation = sa.Column(sa.String(36), nullable=False) data = sa.Column(sa.Text, nullable=True) job_id = sa.Column(sa.String(36), nullable=True) state = sa.Column(sa.Enum(sdn_const.PENDING, sdn_const.FAILED, sdn_const.PROCESSING, sdn_const.MONITORING, sdn_const.COMPLETED), nullable=False, default=sdn_const.PENDING) retry_count = sa.Column(sa.Integer, default=0) created_at = sa.Column(sa.DateTime, server_default=sa.func.now()) last_retried = sa.Column(sa.TIMESTAMP, server_default=sa.func.now(), onupdate=sa.func.now()) networking-mlnx-15.0.2/networking_mlnx/db/models/sdn_maintenance_db.py0000755000413600001450000000227513566516767026346 0ustar lennybmtl00000000000000# Copyright 2016 Mellanox Technologies, Ltd # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import model_base import sqlalchemy as sa from networking_mlnx.plugins.ml2.drivers.sdn import constants as sdn_const class SdnMaintenance(model_base.BASEV2, model_base.HasId): __tablename__ = 'sdn_maintenance' state = sa.Column(sa.Enum(sdn_const.PENDING, sdn_const.PROCESSING), nullable=False) processing_operation = sa.Column(sa.String(70)) lock_updated = sa.Column(sa.TIMESTAMP, nullable=False, server_default=sa.func.now(), onupdate=sa.func.now()) networking-mlnx-15.0.2/networking_mlnx/db/__init__.py0000644000413600001450000000000013566516767023004 0ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/db/db.py0000644000413600001450000001655113575645041021640 0ustar lennybmtl00000000000000# Copyright 2016 Mellanox Technologies, Ltd # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from neutron_lib.db import api as db_api from oslo_db import api as oslo_db_api from oslo_serialization import jsonutils from sqlalchemy import asc from sqlalchemy import func from sqlalchemy import or_ from networking_mlnx.db.models import sdn_journal_db from networking_mlnx.db.models import sdn_maintenance_db from networking_mlnx.plugins.ml2.drivers.sdn import constants as sdn_const def check_for_pending_or_processing_ops(session, object_uuid, operation=None): q = session.query(sdn_journal_db.SdnJournal).filter( or_(sdn_journal_db.SdnJournal.state == sdn_const.PENDING, sdn_journal_db.SdnJournal.state == sdn_const.PROCESSING), sdn_journal_db.SdnJournal.object_uuid == object_uuid) if operation: if isinstance(operation, (list, tuple)): q = q.filter(sdn_journal_db.SdnJournal.operation.in_(operation)) else: q = q.filter(sdn_journal_db.SdnJournal.operation == operation) return session.query(q.exists()).scalar() def check_for_pending_delete_ops_with_parent(session, object_type, parent_id): rows = session.query(sdn_journal_db.SdnJournal).filter( or_(sdn_journal_db.SdnJournal.state == sdn_const.PENDING, sdn_journal_db.SdnJournal.state == sdn_const.PROCESSING), sdn_journal_db.SdnJournal.object_type == object_type, sdn_journal_db.SdnJournal.operation == sdn_const.DELETE ).all() for row in rows: if parent_id in row.data: return True return False def check_for_older_ops(session, row): q = session.query(sdn_journal_db.SdnJournal).filter( or_(sdn_journal_db.SdnJournal.state == sdn_const.PENDING, sdn_journal_db.SdnJournal.state == sdn_const.PROCESSING), sdn_journal_db.SdnJournal.object_uuid == row.object_uuid, sdn_journal_db.SdnJournal.created_at < row.created_at, sdn_journal_db.SdnJournal.id != row.id) return session.query(q.exists()).scalar() def get_all_db_rows(session): return session.query(sdn_journal_db.SdnJournal).all() def get_all_db_rows_by_state(session, state): return session.query(sdn_journal_db.SdnJournal).filter_by( state=state).all() # Retry deadlock exception for Galera DB. # If two (or more) different threads call this method at the same time, they # might both succeed in changing the same row to pending, but at least one # of them will get a deadlock from Galera and will have to retry the operation. @db_api.retry_db_errors def get_oldest_pending_db_row_with_lock(session): with session.begin(): row = session.query(sdn_journal_db.SdnJournal).filter_by( state=sdn_const.PENDING).order_by( asc(sdn_journal_db.SdnJournal.last_retried)).with_for_update( ).first() if row: update_db_row_state(session, row, sdn_const.PROCESSING) return row @db_api.retry_db_errors def get_all_monitoring_db_row_by_oldest(session): with session.begin(): rows = session.query(sdn_journal_db.SdnJournal).filter_by( state=sdn_const.MONITORING).order_by( asc(sdn_journal_db.SdnJournal.last_retried)).all() return rows @oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES) def update_db_row_state(session, row, state): row.state = state session.merge(row) session.flush() @oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES) def update_db_row_job_id(session, row, job_id): row.job_id = job_id session.merge(row) session.flush() def update_pending_db_row_retry(session, row, retry_count): if row.retry_count >= retry_count and retry_count != -1: update_db_row_state(session, row, sdn_const.FAILED) else: row.retry_count += 1 update_db_row_state(session, row, sdn_const.PENDING) # This function is currently not used. # Deleted resources are marked as 'deleted' in the database. @oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES) def delete_row(session, row=None, row_id=None): if row_id: row = session.query(sdn_journal_db.SdnJournal).filter_by( id=row_id).one() if row: session.delete(row) session.flush() @oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES) def create_pending_row(session, object_type, object_uuid, operation, data): data = jsonutils.dumps(data) row = sdn_journal_db.SdnJournal(object_type=object_type, object_uuid=object_uuid, operation=operation, data=data, created_at=func.now(), state=sdn_const.PENDING) session.add(row) # Keep session flush for unit tests. NOOP for L2/L3 events since calls are # made inside database session transaction with subtransactions=True. session.flush() @db_api.retry_db_errors def _update_maintenance_state(session, expected_state, state): with session.begin(): row = session.query(sdn_maintenance_db.SdnMaintenance).filter_by( state=expected_state).with_for_update().one_or_none() if row is None: return False row.state = state return True def lock_maintenance(session): return _update_maintenance_state(session, sdn_const.PENDING, sdn_const.PROCESSING) def unlock_maintenance(session): return _update_maintenance_state(session, sdn_const.PROCESSING, sdn_const.PENDING) def update_maintenance_operation(session, operation=None): """Update the current maintenance operation details. The function assumes the lock is held, so it mustn't be run outside of a locked context. """ op_text = None if operation: op_text = operation.__name__ with session.begin(): row = session.query(sdn_maintenance_db.SdnMaintenance).one_or_none() row.processing_operation = op_text def delete_rows_by_state_and_time(session, state, time_delta): with session.begin(): now = session.execute(func.now()).scalar() session.query(sdn_journal_db.SdnJournal).filter( sdn_journal_db.SdnJournal.state == state, sdn_journal_db.SdnJournal.last_retried < now - time_delta).delete( synchronize_session=False) session.expire_all() def reset_processing_rows(session, max_timedelta): with session.begin(): now = session.execute(func.now()).scalar() max_timedelta = datetime.timedelta(seconds=max_timedelta) rows = session.query(sdn_journal_db.SdnJournal).filter( sdn_journal_db.SdnJournal.last_retried < now - max_timedelta, sdn_journal_db.SdnJournal.state == sdn_const.PROCESSING, ).update({'state': sdn_const.PENDING}) return rows networking-mlnx-15.0.2/networking_mlnx/eswitchd/0000755000413600001450000000000013575645772022131 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/eswitchd/cli/0000755000413600001450000000000013575645772022700 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/eswitchd/cli/__init__.py0000755000413600001450000000000013566516767025003 0ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/eswitchd/cli/conn_utils.py0000755000413600001450000000707013575645017025426 0ustar lennybmtl00000000000000# Copyright 2013 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from networking_mlnx.eswitchd.common import constants from oslo_serialization import jsonutils import zmq from networking_mlnx.eswitchd.cli import exceptions from networking_mlnx.eswitchd.utils.helper_utils import set_conn_url REQUEST_TIMEOUT = 50000 class ConnUtil(object): def __init__(self): transport = constants.SOCKET_OS_TRANSPORT port = constants.SOCKET_OS_PORT addr = constants.SOCKET_OS_ADDR self.conn_url = set_conn_url(transport, addr, port) def send_msg(self, msg): context = zmq.Context() socket = context.socket(zmq.REQ) socket.setsockopt(zmq.LINGER, 0) socket.connect(self.conn_url) try: socket.send_string(msg) poller = zmq.Poller() poller.register(socket, zmq.POLLIN) conn = dict(poller.poll(REQUEST_TIMEOUT)) if conn: if conn.get(socket) == zmq.POLLIN: response_msg = socket.recv_string(zmq.NOBLOCK) response = self.parse_response_msg(response_msg) return response else: print('no result received') finally: socket.close() context.term() def parse_response_msg(self, recv_msg): msg = jsonutils.loads(recv_msg) error_msg = " " if msg['status'] == 'OK': if 'response' in msg: return msg['response'] return elif msg['status'] == 'FAIL': error_msg = "Action %s failed: %s" % (msg['action'], msg['reason']) else: error_msg = "Unknown operation status %s" % msg['status'] raise exceptions.MlxException(error_msg) def plug_nic(self, vnic_mac, device_id, fabric, vif_type, dev_name): msg = jsonutils.dumps({'action': 'plug_nic', 'vnic_mac': vnic_mac, 'device_id': device_id, 'fabric': fabric, 'vnic_type': vif_type, 'dev_name': dev_name}) recv_msg = self.send_msg(msg) try: dev = recv_msg['dev'] except Exception: error_msg = "Failed to plug_nic %s on %s" % (vnic_mac, fabric) raise exceptions.MlxException(error_msg) return dev def deallocate_nic(self, vnic_mac, fabric): msg = jsonutils.dumps({'action': 'delete_port', 'fabric': fabric, 'vnic_mac': vnic_mac}) recv_msg = self.send_msg(msg) try: dev = recv_msg['dev'] except Exception: error_msg = "Failed to deallocate %s on %s" % (vnic_mac, fabric) raise exceptions.MlxException(error_msg) return dev def get_tables(self, fabric): msg = jsonutils.dumps({'action': 'get_eswitch_tables', 'fabric': fabric}) recv_msg = self.send_msg(msg) tables = recv_msg['tables'] return tables networking-mlnx-15.0.2/networking_mlnx/eswitchd/cli/ebr_dbg.py0000755000413600001450000000443513575645017024637 0ustar lennybmtl00000000000000#!/usr/bin/python # Copyright 2013 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import sys from networking_mlnx.eswitchd.cli import conn_utils from networking_mlnx.eswitchd.cli import exceptions action = sys.argv[1] client = conn_utils.ConnUtil() def pprint_table(out, table): """Prints out a table of data, padded for alignment @param out: Output stream (file-like object) @param table: The table to print. A list of lists. Each row must have the same number of columns. """ def get_max_width(table, index): """Get the maximum width of the given column index""" return max([len(str(row[index])) for row in table]) col_paddings = [] for i in range(len(table[0])): col_paddings.append(get_max_width(table, i)) for row in table: # left col print(row[0].ljust(col_paddings[0] + 1), file=out) # rest of the cols for i in range(1, len(row)): col = str(row[i]).rjust(col_paddings[i] + 2) print(col, file=out) print(file=out) def main(): if action == 'get-tables': fabric = sys.argv[2] try: result = client.get_tables(fabric) for fabric, tables in result.items(): print("FABRIC = %s" % fabric) print("========================") for table, data in tables.items(): print("TABLE: %s" % table) pprint_table(sys.stdout, data) print("========================") except exceptions.MlxException as e: sys.stderr.write("Error in get-tables command") sys.stderr.write(e.message) sys.exit(1) sys.exit(0) if __name__ == '__main__': main() networking-mlnx-15.0.2/networking_mlnx/eswitchd/cli/ebrctl.py0000755000413600001450000000573213575645041024524 0ustar lennybmtl00000000000000#!/usr/bin/python # Copyright 2013 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import sys from networking_mlnx.eswitchd.cli import conn_utils from networking_mlnx.eswitchd.cli import exceptions client = conn_utils.ConnUtil() def parse(): """Main method that manages supported CLI commands. The actions that are supported throught the CLI are: write-sys, del-port, allocate-port and add-port Each action is matched with method that should handle it e.g. write-sys action is matched with write_sys method """ parser = argparse.ArgumentParser(prog='ebrctl') parser.add_argument('action', action='store_true') parent_parser = argparse.ArgumentParser(add_help=False) parent_parser.add_argument('vnic_mac') parent_parser.add_argument('device_id') parent_parser.add_argument('fabric') parent_parser.add_argument('vnic_type') subparsers = parser.add_subparsers() parser_add_port = subparsers.add_parser('add-port', parents=[parent_parser]) parser_add_port.add_argument('dev_name') parser_add_port.set_defaults(func=add_port) parser_del_port = subparsers.add_parser('del-port') parser_del_port.set_defaults(func=del_port) parser_del_port.add_argument('fabric') parser_del_port.add_argument('vnic_mac') parser_write_sys = subparsers.add_parser('write-sys') parser_write_sys.set_defaults(func=write_sys) parser_write_sys.add_argument('path') parser_write_sys.add_argument('value') args = parser.parse_args() args.func(args) def add_port(args): try: dev = client.plug_nic(args.vnic_mac, args.device_id, args.fabric, args.vnic_type, args.dev_name) except exceptions.MlxException as e: sys.stderr.write("Error in add-port command") sys.stderr.write(e.message) sys.exit(1) sys.stdout.write(dev) sys.exit(0) def del_port(args): try: client.deallocate_nic(args.vnic_mac, args.fabric) except exceptions.MlxException as e: sys.stderr.write("Error in del-port command") sys.stderr.write(e.message) sys.exit(1) sys.exit(0) def write_sys(args): try: fd = open(args.path, 'w') fd.write(args.value) fd.close() except Exception as e: sys.stderr.write("Error in write-sys command") sys.stderr.write(e.message) sys.exit(1) sys.exit(0) def main(): parse() networking-mlnx-15.0.2/networking_mlnx/eswitchd/cli/exceptions.py0000755000413600001450000000226213575645017025430 0ustar lennybmtl00000000000000# Copyright 2013 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. _FATAL_EXCEPTION_FORMAT_ERRORS = False class BaseException(Exception): message = "An unknown exception occurred" def __init__(self, **kwargs): try: self._error_string = self.message % kwargs except Exception as e: if _FATAL_EXCEPTION_FORMAT_ERRORS: raise e self._error_string = self.message def __str__(self): return self._error_string class MlxException(BaseException): def __init__(self, message=None): self.message = message def __str__(self): return 'MlxException: %s' % self.message networking-mlnx-15.0.2/networking_mlnx/eswitchd/common/0000755000413600001450000000000013575645772023421 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/eswitchd/common/__init__.py0000644000413600001450000000000013566516767025521 0ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/eswitchd/common/config.py0000644000413600001450000000367213575645041025235 0ustar lennybmtl00000000000000# Copyright 2013 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from oslo_config import cfg from oslo_log import log as logging LOG = logging.getLogger(__name__) DEFAULT_INTERFACE_MAPPINGS = [] mlx_daemon_opts = [ cfg.StrOpt('socket_os_transport', default="tcp"), cfg.StrOpt('socket_os_port', default="60001"), cfg.StrOpt('socket_os_addr', default="0.0.0.0"), cfg.ListOpt('fabrics', default=DEFAULT_INTERFACE_MAPPINGS, help=("List of :")), cfg.IntOpt('default_timeout', default=5000, help=('Default timeout waiting for messages')), cfg.IntOpt('max_polling_count', default=5, help=('Daemon will do sync after max_polling_count ' '* default_timeout')), cfg.StrOpt('rootwrap_conf', default='/etc/neutron/rootwrap.conf', help=('rootwrap configuration file')) ] cfg.CONF.register_opts(mlx_daemon_opts, "DAEMON") logging.register_options(cfg.CONF) def init(args, **kwargs): cfg.CONF(args=args, project='eswitchd', **kwargs) def setup_logging(): """Sets up the logging options for a log with supplied name.""" logging.setup(cfg.CONF, 'eswitchd') LOG.info("Logging enabled!") LOG.info("%(prog)s Started!", {'prog': sys.argv[0]}) LOG.debug("command line: %s", " ".join(sys.argv)) networking-mlnx-15.0.2/networking_mlnx/eswitchd/common/constants.py0000644000413600001450000000347213575645041026002 0ustar lennybmtl00000000000000# Copyright 2013 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. VENDOR = '0x15b3' VIF_TYPE_HOSTDEV = 'ib_hostdev' VPORT_STATE_ATTACHED = 'attached' VPORT_STATE_PENDING = 'pending' VPORT_STATE_UNPLUGGED = 'unplugged' UNTAGGED_VLAN_ID = 4095 INVALID_MAC = '00:00:00:00:00:00' # MLNX4 MLNX4_ADMIN_GUID_PATH = "/sys/class/infiniband/%s/iov/ports/%s/admin_guids/%s" MLNX4_GUID_INDEX_PATH = "/sys/class/infiniband/%s/iov/%s/ports/%s/gid_idx/0" MLNX4_PKEY_INDEX_PATH = "/sys/class/infiniband/%s/iov/%s/ports/%s/pkey_idx/%s" # MLNX5 MLNX5_GUID_NODE_PATH = ('/sys/class/infiniband/%(module)s/device/sriov/' '%(vf_num)s/node') MLNX5_GUID_PORT_PATH = ('/sys/class/infiniband/%(module)s/device/sriov/' '%(vf_num)s/port') MLNX5_GUID_POLICY_PATH = ('/sys/class/infiniband/%(module)s/device/sriov/' '%(vf_num)s/policy') UNBIND_PATH = '/sys/bus/pci/drivers/mlx5_core/unbind' BIND_PATH = '/sys/bus/pci/drivers/mlx5_core/bind' MLNX4_INVALID_GUID = 'ffffffffffffffff' MLNX5_INVALID_GUID = 'ff:ff:ff:ff:ff:ff:ff:ff' CONN_URL = '%(transport)s://%(addr)s:%(port)s' MLNX4_DRIVER_TYPE = 'mlx4_core' MLNX5_DRIVER_TYPE = 'mlx5_core' MLNX4_DEVICE_TYPE = 'MLNX4' MLNX5_DEVICE_TYPE = 'MLNX5' SOCKET_OS_PORT = '60001' SOCKET_OS_TRANSPORT = 'tcp' SOCKET_OS_ADDR = '0.0.0.0' networking-mlnx-15.0.2/networking_mlnx/eswitchd/common/exceptions.py0000644000413600001450000000137613575645041026150 0ustar lennybmtl00000000000000# Copyright 2013 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. class MlxException(Exception): def __init__(self, message=None): self.message = message def __str__(self): return 'MlxException: %s' % self.message networking-mlnx-15.0.2/networking_mlnx/eswitchd/db/0000755000413600001450000000000013575645772022516 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/eswitchd/db/__init__.py0000644000413600001450000000000013575645041024602 0ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/eswitchd/db/device_db.py0000644000413600001450000000336313575645041024766 0ustar lennybmtl00000000000000# Copyright 2013 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging import six LOG = logging.getLogger(__name__) class DeviceDB(object): def __init__(self): self.device_db = {} def add_fabric(self, fabric, pf, hca_port, pf_mlx_dev): pf_details = {} pf_details['vfs'] = {} pf_details['pf_device_type'] = None pf_details['hca_port'] = hca_port pf_details['pf_mlx_dev'] = pf_mlx_dev if self.device_db.get(fabric) is None: self.device_db[fabric] = {pf: pf_details} else: self.device_db[fabric][pf] = pf_details def get_fabric_details(self, fabric, pf=None): if pf is None: return self.device_db[fabric] else: return self.device_db[fabric][pf] def set_fabric_devices(self, fabric, pf, vfs): self.device_db[fabric][pf]['vfs'] = vfs vf = six.next(six.itervalues(vfs)) self.device_db[fabric][pf]['pf_device_type'] = vf['vf_device_type'] def get_dev_fabric(self, dev): for fabric in self.device_db: for pf in self.device_db[fabric]: if dev in self.device_db[fabric][pf]['vfs']: return fabric networking-mlnx-15.0.2/networking_mlnx/eswitchd/db/eswitch_db.py0000644000413600001450000001256013575645041025174 0ustar lennybmtl00000000000000# Copyright 2013 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from networking_mlnx.eswitchd.common import constants LOG = logging.getLogger(__name__) class eSwitchDB(object): def __init__(self, pf, vfs): self.port_table = {} self.port_policy = {} self.vfs = vfs self.pf = pf def create_port(self, port_name, port_type): self.port_table.update({port_name: {'type': port_type, 'vnic': None, 'state': None, 'alias': None, 'device_id': None}}) def plug_nic(self, port_name): self.port_table[port_name]['state'] = constants.VPORT_STATE_ATTACHED LOG.info("port table:", self.port_table) def get_port_state(self, dev): state = None dev = self.port_table.get(dev) if dev: state = dev.get('state') return state def get_attached_vnics(self): vnics = {} for port in self.port_table.values(): vnic_mac = port['vnic'] state = port['state'] if vnic_mac and state == constants.VPORT_STATE_ATTACHED: device_id = self.port_policy[vnic_mac]['device_id'] vnics[vnic_mac] = {'mac': vnic_mac, 'device_id': device_id} return vnics def get_port_policy_matrix(self): table_matrix = [['VNIC_MAC', 'VLAN', 'DEV', 'DEVICE_ID']] for vnic_mac, port_policy in self.port_policy.items(): table_matrix.append([vnic_mac, port_policy['vlan'], port_policy['dev'], port_policy['device_id']]) return table_matrix def get_port_table(self): return self.port_table def get_port_table_matrix(self): table_matrix = [['PORT_NAME', 'TYPE', 'VNIC', 'STATE', 'ALIAS', 'DEVICE_ID']] for port_name, port_data in self.port_table.items(): table_matrix.append([port_name, port_data['type'], port_data['vnic'], port_data['state'], port_data['alias'], port_data['device_id']]) return table_matrix def create_vnic(self, vnic_mac): if not self.vnic_exists(vnic_mac): self.port_policy.update({vnic_mac: {'vlan': None, 'dev': None, 'device_id': None, 'port_id': None}}) def get_dev_for_vnic(self, vnic_mac): dev = None if vnic_mac in self.port_policy: if 'dev' in self.port_policy[vnic_mac]: dev = self.port_policy[vnic_mac]['dev'] return dev def vnic_exists(self, vnic_mac): if vnic_mac in self.port_policy: return True else: return False def attach_vnic(self, port_name, device_id, vnic_mac, dev_name=None): self.port_table[port_name]['vnic'] = vnic_mac self.port_table[port_name]['alias'] = dev_name self.port_table[port_name]['state'] = constants.VPORT_STATE_PENDING self.port_table[port_name]['device_id'] = device_id dev = self.get_dev_for_vnic(vnic_mac) if not dev and vnic_mac != constants.INVALID_MAC: if vnic_mac in self.port_policy: vnic_mac_entry = self.port_policy[vnic_mac] vnic_mac_entry['dev'] = port_name vnic_mac_entry['device_id'] = device_id vnic_mac_entry.setdefault('vlan', None) else: self.port_policy.update({vnic_mac: {'vlan': None, 'dev': port_name, 'device_id': device_id, }}) return True return False def detach_vnic(self, vnic_mac): dev = self.get_dev_for_vnic(vnic_mac) if dev: self.port_table[dev]['vnic'] = None self.port_table[dev]['alias'] = None self.port_table[dev]['state'] = constants.VPORT_STATE_UNPLUGGED self.port_table[dev]['device_id'] = None return dev def port_release(self, vnic_mac): try: dev = self.get_dev_for_vnic(vnic_mac) vnic = self.port_policy.pop(vnic_mac) self.port_table[dev]['state'] = None vnic['type'] = self.port_table[vnic['dev']]['type'] return vnic except KeyError: return except IndexError: return def set_vlan(self, vnic_mac, vlan): if not self.vnic_exists(vnic_mac): self.create_vnic(vnic_mac) self.port_policy[vnic_mac]['vlan'] = vlan networking-mlnx-15.0.2/networking_mlnx/eswitchd/utils/0000755000413600001450000000000013575645772023271 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/eswitchd/utils/__init__.py0000644000413600001450000000000013566516767025371 0ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/eswitchd/utils/command_utils.py0000644000413600001450000000205213575645041026465 0ustar lennybmtl00000000000000# Copyright 2013 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging LOG = logging.getLogger(__name__) def get_root_helper(): root_helper = 'sudo neutron-rootwrap %s' % cfg.CONF.DAEMON.rootwrap_conf return root_helper def execute(*cmd, **kwargs): if kwargs.get('root_helper') is None: kwargs['run_as_root'] = True kwargs['root_helper'] = get_root_helper() return processutils.execute(*cmd, **kwargs) networking-mlnx-15.0.2/networking_mlnx/eswitchd/utils/helper_utils.py0000644000413600001450000000157213575645041026334 0ustar lennybmtl00000000000000#!/usr/bin/env python # Copyright 2013 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from networking_mlnx.eswitchd.common import constants def set_conn_url(transport, addr, port): """Return connection string for using in ZMQ connect """ return constants.CONN_URL % {'transport': transport, 'port': port, 'addr': addr} networking-mlnx-15.0.2/networking_mlnx/eswitchd/utils/pci_utils.py0000644000413600001450000001424313575645041025627 0ustar lennybmtl00000000000000# Copyright 2013 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import glob import os import re from oslo_log import log as logging from networking_mlnx._i18n import _LE from networking_mlnx.eswitchd.common import constants LOG = logging.getLogger(__name__) class pciUtils(object): ETH_PATH = "/sys/class/net/%(interface)s" ETH_DEV = ETH_PATH + "/device" ETH_DRIVER = ETH_DEV + "/driver" ETH_PORT = ETH_PATH + "/dev_id" INFINIBAND_PATH = 'device/infiniband' VENDOR_PATH = ETH_DEV + '/vendor' _VIRTFN_RE = re.compile(r'virtfn(?P\d+)') VFS_PATH = ETH_DEV + "/virtfn*" def get_vfs_info(self, pf): vfs_info = {} try: dev_path = self.ETH_DEV % {'interface': pf} dev_info = os.listdir(dev_path) for dev_filename in dev_info: result = self._VIRTFN_RE.match(dev_filename) if result and result.group('vf_num'): dev_file = os.path.join(dev_path, dev_filename) vf_pci = os.readlink(dev_file).strip("./") vf_num = result.group('vf_num') vf_device_type = self.get_pf_device_type(pf) vfs_info[vf_pci] = {'vf_num': vf_num, 'vf_device_type': vf_device_type} except Exception: LOG.error("PCI device %s not found", pf) return vfs_info def get_dev_attr(self, attr_path): try: fd = open(attr_path) return fd.readline().strip() except IOError: return def verify_vendor_pf(self, pf, vendor_id=constants.VENDOR): vendor_path = pciUtils.VENDOR_PATH % {'interface': pf} if self.get_dev_attr(vendor_path) == vendor_id: return True else: return False def get_pf_device_type(self, pf): device_type = None try: driver_type = os.readlink(self.ETH_DRIVER % {'interface': pf}) driver_type = os.path.basename(driver_type) if driver_type == constants.MLNX4_DRIVER_TYPE: device_type = constants.MLNX4_DEVICE_TYPE elif driver_type == constants.MLNX5_DRIVER_TYPE: device_type = constants.MLNX5_DEVICE_TYPE else: raise Exception(_LE('driver type %s is not supported'), driver_type) except IOError: pass return device_type def is_sriov_pf(self, pf): vfs_path = pciUtils.VFS_PATH % {'interface': pf} vfs = glob.glob(vfs_path) if vfs: return True else: return def get_pf_mlx_dev(self, pf): dev_path = ( os.path.join(pciUtils.ETH_PATH % {'interface': pf}, pciUtils.INFINIBAND_PATH)) dev_info = os.listdir(dev_path) return dev_info.pop() def get_guid_index(self, pf_mlx_dev, dev, hca_port): guid_index = None path = constants.MLNX4_GUID_INDEX_PATH % (pf_mlx_dev, dev, hca_port) with open(path) as fd: guid_index = fd.readline().strip() return guid_index def get_eth_port(self, dev): port_path = pciUtils.ETH_PORT % {'interface': dev} try: with open(port_path) as f: dev_id = int(f.read(), 0) return dev_id + 1 except IOError: return def get_vfs_macs_ib(self, fabric_details): macs_map = {} for pf_fabric_details in fabric_details.values(): if (pf_fabric_details['pf_device_type'] == constants.MLNX4_DEVICE_TYPE): macs_map.update(self.get_vfs_macs_ib_mlnx4(pf_fabric_details)) elif (pf_fabric_details['pf_device_type'] == constants.MLNX5_DEVICE_TYPE): macs_map.update(self.get_vfs_macs_ib_mlnx5(pf_fabric_details)) return macs_map def get_vfs_macs_ib_mlnx4(self, fabric_details): hca_port = fabric_details['hca_port'] pf_mlx_dev = fabric_details['pf_mlx_dev'] macs_map = {} guids_path = constants.MLNX4_ADMIN_GUID_PATH % (pf_mlx_dev, hca_port, '[1-9]*') paths = glob.glob(guids_path) for path in paths: vf_index = path.split('/')[-1] with open(path) as f: guid = f.readline().strip() if guid == constants.MLNX4_INVALID_GUID: mac = constants.INVALID_MAC else: head = guid[:6] tail = guid[-6:] mac = ":".join(re.findall('..?', head + tail)) macs_map[str(int(vf_index))] = mac return macs_map def get_vfs_macs_ib_mlnx5(self, fabric_details): vfs = fabric_details['vfs'] macs_map = {} for vf in vfs.values(): vf_num = vf['vf_num'] pf_mlx_dev = fabric_details['pf_mlx_dev'] guid_path = ( constants.MLNX5_GUID_NODE_PATH % {'module': pf_mlx_dev, 'vf_num': vf_num}) with open(guid_path) as f: guid = f.readline().strip() head = guid[:8] tail = guid[-9:] mac = head + tail macs_map[vf_num] = mac return macs_map def get_device_address(self, hostdev): domain = hostdev.attrib['domain'][2:] bus = hostdev.attrib['bus'][2:] slot = hostdev.attrib['slot'][2:] function = hostdev.attrib['function'][2:] dev = "%.4s:%.2s:%2s.%.1s" % (domain, bus, slot, function) return dev networking-mlnx-15.0.2/networking_mlnx/eswitchd/__init__.py0000644000413600001450000000000013566516767024231 0ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/eswitchd/eswitch_daemon.py0000644000413600001450000000753513575645041025473 0ustar lennybmtl00000000000000#!/usr/bin/env python # Copyright 2013 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils import zmq from networking_mlnx._i18n import _ from networking_mlnx.eswitchd.common import config from networking_mlnx.eswitchd.common import constants from networking_mlnx.eswitchd.eswitch_handler import eSwitchHandler from networking_mlnx.eswitchd import msg_handler as message from networking_mlnx.eswitchd.utils.helper_utils import set_conn_url LOG = logging.getLogger(__name__) class MlxEswitchDaemon(object): def __init__(self): self.max_polling_count = cfg.CONF.DAEMON.max_polling_count self.default_timeout = cfg.CONF.DAEMON.default_timeout fabrics = self._parse_physical_mapping() self.eswitch_handler = eSwitchHandler(fabrics) self.dispatcher = message.MessageDispatch(self.eswitch_handler) def start(self): self._init_connections() def _parse_physical_mapping(self): fabrics = [] fabrics_config = cfg.CONF.DAEMON.fabrics for entry in fabrics_config: if ':' in entry: try: fabric, pf = entry.split(':') fabrics.append((fabric, pf)) except ValueError: LOG.error("Invalid fabric: " "'%(entry)s' - " "Service terminated!", locals()) raise else: LOG.error("Cannot parse Fabric Mappings") raise Exception(_("Cannot parse Fabric Mappings")) return fabrics def _init_connections(self): context = zmq.Context() self.socket_os = context.socket(zmq.REP) os_transport = constants.SOCKET_OS_TRANSPORT os_port = constants.SOCKET_OS_PORT os_addr = constants.SOCKET_OS_ADDR self.conn_os_url = set_conn_url(os_transport, os_addr, os_port) self.socket_os.bind(self.conn_os_url) self.poller = zmq.Poller() self.poller.register(self.socket_os, zmq.POLLIN) def _handle_msg(self): data = None msg = self.socket_os.recv() sender = self.socket_os if msg: data = jsonutils.loads(msg) msg = None if data: try: result = self.dispatcher.handle_msg(data) msg = jsonutils.dumps(result) except Exception as e: LOG.exception("Exception during message handling - %s", e) msg = str(e) sender.send_string(msg) def daemon_loop(self): LOG.info("Daemon Started!") polling_counter = 0 while True: self._handle_msg() if polling_counter == self.max_polling_count: LOG.debug("Resync devices") # self.eswitch_handler.sync_devices() polling_counter = 0 else: polling_counter += 1 def main(): config.init(sys.argv[1:]) config.setup_logging() try: daemon = MlxEswitchDaemon() daemon.start() except Exception as e: LOG.exception("Failed to start EswitchDaemon " "- Daemon terminated! %s", e) sys.exit(1) daemon.daemon_loop() if __name__ == '__main__': main() networking-mlnx-15.0.2/networking_mlnx/eswitchd/eswitch_handler.py0000644000413600001450000003576213575645041025650 0ustar lennybmtl00000000000000# Copyright 2013 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import glob import sys from oslo_log import log as logging from networking_mlnx.eswitchd.common import constants from networking_mlnx.eswitchd.db import eswitch_db from networking_mlnx.eswitchd.resource_mngr import ResourceManager from networking_mlnx.eswitchd.utils import command_utils from networking_mlnx.eswitchd.utils import pci_utils LOG = logging.getLogger(__name__) INVALID_PKEY = 'none' DEFAULT_PKEY_IDX = '0' PARTIAL_PKEY_IDX = '1' DEFAULT_MASK = 0x7fff DEFAULT_PKEY = '0xffff' class eSwitchHandler(object): def __init__(self, fabrics=None): self.eswitches = {} self.pci_utils = pci_utils.pciUtils() self.rm = ResourceManager() self.devices = set() if fabrics: self.add_fabrics(fabrics) def add_fabrics(self, fabrics): for fabric, pf in fabrics: verify_vendor_pf = ( self.pci_utils.verify_vendor_pf(pf, constants.VENDOR)) if (not verify_vendor_pf or not self.pci_utils.is_sriov_pf(pf)): LOG.error("PF %s must have Mellanox Vendor ID " ",SR-IOV and driver module " "enabled. Terminating!", pf) sys.exit(1) if self.eswitches.get(fabric) is None: self.eswitches[fabric] = [] vfs = self.pci_utils.get_vfs_info(pf) self.eswitches[fabric].append( eswitch_db.eSwitchDB(pf=pf, vfs=vfs)) self._add_fabric(fabric, pf) self.sync_devices() def sync_devices(self): devices, vm_ids = self.rm.scan_attached_devices() added_devs = {} removed_devs = {} added_devs = set(devices) - self.devices removed_devs = self.devices - set(devices) self._treat_added_devices(added_devs, vm_ids) self._treat_removed_devices(removed_devs) self.devices = set(devices) def _add_fabric(self, fabric, pf): self.rm.add_fabric(fabric, pf) self._config_port_up(pf) pf_fabric_details = self.rm.get_fabric_details(fabric, pf) eswitches = self._get_eswitches_for_fabric(fabric) eswitch = None for esw in eswitches: if esw.pf == pf: eswitch = esw break for vf in pf_fabric_details['vfs']: eswitch.create_port(vf, constants.VIF_TYPE_HOSTDEV) def _treat_added_devices(self, devices, vm_ids): for device in devices: dev, mac, fabric = device if fabric: for eswitch in self.eswitches[fabric]: if dev in eswitch.vfs: eswitch.attach_vnic( port_name=dev, device_id=vm_ids[dev], vnic_mac=mac) if eswitch.vnic_exists(mac): eswitch.plug_nic(port_name=dev) break else: LOG.info("No Fabric defined for device %s", dev) def _treat_removed_devices(self, devices): for dev, mac in devices: fabric = self.rm.get_fabric_for_dev(dev) if fabric: for eswitch in self.eswitches[fabric]: if dev in eswitch.vfs: eswitch.detach_vnic(vnic_mac=mac) else: LOG.info("No Fabric defined for device %s", dev) def get_vnics(self, fabrics): vnics = {} for fabric in fabrics: eswitches = self._get_eswitches_for_fabric(fabric) if eswitches: for eswitch in eswitches: vnics_for_eswitch = eswitch.get_attached_vnics() vnics.update(vnics_for_eswitch) else: LOG.error("No eSwitch found for Fabric %s", fabric) continue LOG.info("vnics are %s", vnics) return vnics def plug_nic(self, fabric, device_id, vnic_mac, pci_slot): eswitch = self._get_eswitch_for_fabric_and_pci(fabric, pci_slot) if eswitch: eswitch.port_table[pci_slot]['vnic'] = vnic_mac eswitch.port_policy.update( {vnic_mac: {'vlan': None, 'dev': pci_slot, 'device_id': device_id}}) self._config_vf_mac_address(fabric, pci_slot, vnic_mac) eswitch.plug_nic(pci_slot) else: LOG.error("No eSwitch found for Fabric %s", fabric) return pci_slot def delete_port(self, fabric, vnic_mac): dev = None eswitches = self._get_eswitches_for_fabric(fabric) for eswitch in eswitches: if eswitch and vnic_mac in eswitch.get_attached_vnics(): dev = eswitch.detach_vnic(vnic_mac) if dev: self._config_vf_mac_address(fabric, dev) break if dev is None: LOG.warning("No eSwitch found for Fabric %s", fabric) return dev def port_release(self, fabric, vnic_mac): ret = None dev = None eswitches = self._get_eswitches_for_fabric(fabric) for eswitch in eswitches: if eswitch and vnic_mac in eswitch.get_attached_vnics(): dev = eswitch.get_dev_for_vnic(vnic_mac) if (dev is not None and eswitch.get_port_state(dev) == constants.VPORT_STATE_UNPLUGGED): ret = self.set_vlan( fabric, vnic_mac, constants.UNTAGGED_VLAN_ID) self.port_down(fabric, vnic_mac) eswitch.port_release(vnic_mac) return ret def port_up(self, fabric, vnic_mac): dev = None eswitches = self._get_eswitches_for_fabric(fabric) for eswitch in eswitches: if eswitch and vnic_mac in eswitch.get_attached_vnics(): dev = eswitch.get_dev_for_vnic(vnic_mac) break if not dev: LOG.info("No device for MAC %s", vnic_mac) def port_down(self, fabric, vnic_mac): dev = None eswitches = self._get_eswitches_for_fabric(fabric) for eswitch in eswitches: if eswitch and vnic_mac in eswitch.get_attached_vnics(): dev = eswitch.get_dev_for_vnic(vnic_mac) if dev: LOG.info("IB port for MAC %s doen't support " "port down", vnic_mac) break if dev is None: LOG.info("No device for MAC %s", vnic_mac) def set_vlan(self, fabric, vnic_mac, vlan): eswitches = self._get_eswitches_for_fabric(fabric) for eswitch in eswitches: if eswitch and vnic_mac in eswitch.get_attached_vnics(): eswitch.set_vlan(vnic_mac, vlan) dev = eswitch.get_dev_for_vnic(vnic_mac) state = eswitch.get_port_state(dev) if dev: if state in (constants.VPORT_STATE_ATTACHED, constants.VPORT_STATE_UNPLUGGED): if eswitch.get_port_table()[dev]['alias']: dev = eswitch.get_port_table()[dev]['alias'] try: self._config_vlan_ib(fabric, dev, vlan) return True except RuntimeError: LOG.error('Set VLAN operation failed') return False def get_eswitch_tables(self, fabrics): tables = {} for fabric in fabrics: eswitches = self._get_eswitches_for_fabric(fabric) if len(eswitches) == 0: LOG.info("Get eswitch tables: No eswitch %s", fabric) continue for eswitch in eswitches: if eswitch: tables[fabric] = { 'port_table': eswitch.get_port_table_matrix(), 'port_policy': eswitch.get_port_policy_matrix() } return tables def _get_eswitches_for_fabric(self, fabric): if fabric in self.eswitches: return self.eswitches[fabric] else: return def _get_eswitch_for_fabric_and_pci(self, fabric, pci_slot): eswitches = self._get_eswitches_for_fabric(fabric) for eswitch in eswitches: if pci_slot in eswitch.vfs: return eswitch def _config_vf_pkey(self, ppkey_idx, pkey_idx, pf_mlx_dev, vf_pci_id, hca_port): path = constants.MLNX4_PKEY_INDEX_PATH % (pf_mlx_dev, vf_pci_id, hca_port, pkey_idx) cmd = ['ebrctl', 'write-sys', path, ppkey_idx] command_utils.execute(*cmd) def _get_guid_idx(self, pf_mlx_dev, dev, hca_port): path = constants.MLNX4_GUID_INDEX_PATH % (pf_mlx_dev, dev, hca_port) with open(path) as fd: idx = fd.readline().strip() return idx def _get_guid_from_mac(self, mac, device_type): guid = None if device_type == constants.MLNX4_DEVICE_TYPE: if mac is None: guid = constants.MLNX4_INVALID_GUID else: mac = mac.replace(':', '') prefix = mac[:6] suffix = mac[6:] guid = prefix + '0000' + suffix elif (device_type == constants.MLNX5_DEVICE_TYPE): if mac is None: guid = constants.MLNX5_INVALID_GUID else: prefix = mac[:9] suffix = mac[9:] guid = prefix + '00:00:' + suffix return guid def _config_vf_mac_address(self, fabric, dev, vnic_mac=None): pf_fabric_details = self._get_pf_fabric(fabric, dev) vf_device_type = pf_fabric_details['vfs'][dev]['vf_device_type'] vguid = self._get_guid_from_mac(vnic_mac, vf_device_type) if vf_device_type == constants.MLNX4_DEVICE_TYPE: self._config_vf_mac_address_mlnx4(vguid, dev, pf_fabric_details) elif (vf_device_type == constants.MLNX5_DEVICE_TYPE): self._config_vf_mac_address_mlnx5(vguid, dev, pf_fabric_details) else: LOG.error("Unsupported vf device type: %s ", vf_device_type) def _config_vf_mac_address_mlnx4(self, vguid, dev, pf_fabric_details): hca_port = pf_fabric_details['hca_port'] pf_mlx_dev = pf_fabric_details['pf_mlx_dev'] self._config_vf_pkey( INVALID_PKEY, DEFAULT_PKEY_IDX, pf_mlx_dev, dev, hca_port) guid_idx = self._get_guid_idx(pf_mlx_dev, dev, hca_port) path = constants.MLNX4_ADMIN_GUID_PATH % ( pf_mlx_dev, hca_port, guid_idx) cmd = ['ebrctl', 'write-sys', path, vguid] command_utils.execute(*cmd) ppkey_idx = self._get_pkey_idx( int(DEFAULT_PKEY, 16), pf_mlx_dev, hca_port) if ppkey_idx >= 0: self._config_vf_pkey( ppkey_idx, PARTIAL_PKEY_IDX, pf_mlx_dev, dev, hca_port) else: LOG.error("Can't find partial management pkey for " "%(pf)s:%(dev)s", {'pf': pf_mlx_dev, 'dev': dev}) def _config_vf_mac_address_mlnx5(self, vguid, dev, pf_fabric_details): vf_num = pf_fabric_details['vfs'][dev]['vf_num'] pf_mlx_dev = pf_fabric_details['pf_mlx_dev'] guid_node = constants.MLNX5_GUID_NODE_PATH % {'module': pf_mlx_dev, 'vf_num': vf_num} guid_port = constants.MLNX5_GUID_PORT_PATH % {'module': pf_mlx_dev, 'vf_num': vf_num} guid_poliy = constants.MLNX5_GUID_POLICY_PATH % {'module': pf_mlx_dev, 'vf_num': vf_num} for path in (guid_node, guid_port): cmd = ['ebrctl', 'write-sys', path, vguid] command_utils.execute(*cmd) if vguid == constants.MLNX5_INVALID_GUID: cmd = ['ebrctl', 'write-sys', guid_poliy, 'Down\n'] command_utils.execute(*cmd) cmd = ['ebrctl', 'write-sys', constants.UNBIND_PATH, dev] command_utils.execute(*cmd) cmd = ['ebrctl', 'write-sys', constants.BIND_PATH, dev] command_utils.execute(*cmd) else: cmd = ['ebrctl', 'write-sys', guid_poliy, 'Up\n'] command_utils.execute(*cmd) def _config_vlan_ib(self, fabric, dev, vlan): pf_fabric_details = self._get_pf_fabric(fabric, dev) hca_port = pf_fabric_details['hca_port'] pf_mlx_dev = pf_fabric_details['pf_mlx_dev'] vf_device_type = pf_fabric_details['vfs'][dev]['vf_device_type'] if vf_device_type == constants.MLNX4_DEVICE_TYPE: self._config_vlan_ib_mlnx4(vlan, pf_mlx_dev, dev, hca_port) elif vf_device_type == constants.MLNX5_DEVICE_TYPE: pass else: LOG.error("Unsupported vf device type: %s ", vf_device_type) def _config_vlan_ib_mlnx4(self, vlan, pf_mlx_dev, dev, hca_port): if vlan == 0: ppkey_idx = self._get_pkey_idx( int(DEFAULT_PKEY, 16), pf_mlx_dev, hca_port) if ppkey_idx >= 0: self._config_vf_pkey( ppkey_idx, DEFAULT_PKEY_IDX, pf_mlx_dev, dev, hca_port) else: ppkey_idx = self._get_pkey_idx(str(vlan), pf_mlx_dev, hca_port) if ppkey_idx: self._config_vf_pkey( ppkey_idx, DEFAULT_PKEY_IDX, pf_mlx_dev, dev, hca_port) def _get_pkey_idx(self, vlan, pf_mlx_dev, hca_port): PKEYS_PATH = "/sys/class/infiniband/%s/ports/%s/pkeys/*" paths = PKEYS_PATH % (pf_mlx_dev, hca_port) for path in glob.glob(paths): fd = open(path) pkey = fd.readline() fd.close() # the MSB in pkey is the membership bit ( 0 - partial, 1 - full) # the other 15 bit are the number of the pkey # so we want to remove the 16th bit when compare pkey file # to the vlan (pkey) we are looking for is_match = int(pkey, 16) & DEFAULT_MASK == int(vlan) & DEFAULT_MASK if is_match: return path.split('/')[-1] return None def _config_port_up(self, dev): cmd = ['ip', 'link', 'set', dev, 'up'] command_utils.execute(*cmd) def _get_pf_fabric(self, fabric, dev): fabric_details = self.rm.get_fabric_details(fabric) for pf_fabric in fabric_details.values(): if dev in pf_fabric['vfs']: return pf_fabric networking-mlnx-15.0.2/networking_mlnx/eswitchd/msg_handler.py0000644000413600001450000001714413575645041024762 0ustar lennybmtl00000000000000# Copyright 2013 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from networking_mlnx.eswitchd.common import constants LOG = logging.getLogger(__name__) class BasicMessageHandler(object): MSG_ATTRS_MANDATORY_MAP = set() def __init__(self, msg): self.msg = msg def execute(self): raise Exception(_("execute method MUST be implemented!")) def validate(self): ret = True msg_attr = set(self.msg.keys()) for attr in self.MSG_ATTRS_MANDATORY_MAP: if attr not in msg_attr: return False if 'vnic_type' in self.msg.keys(): ret = self.validate_vnic_type(self.msg['vnic_type']) return ret def validate_vnic_type(self, vnic_type): if vnic_type in (constants.VIF_TYPE_HOSTDEV, ): return True return False def build_response(self, status, reason=None, response=None): if status: msg = {'status': 'OK', 'response': response} else: msg = {'status': 'FAIL', 'reason': reason} return msg class PlugVnic(BasicMessageHandler): MSG_ATTRS_MANDATORY_MAP = ('fabric', 'device_id', 'vnic_mac', 'dev_name') def __init__(self, msg): super(PlugVnic, self).__init__(msg) def execute(self, eswitch_handler): fabric = self.msg['fabric'] device_id = self.msg['device_id'] vnic_mac = (self.msg['vnic_mac']).lower() dev_name = self.msg['dev_name'] dev = eswitch_handler.plug_nic(fabric, device_id, vnic_mac, dev_name) if dev: return self.build_response(True, response={'dev': dev}) else: return self.build_response(False, reason='Plug vnic failed') class DetachVnic(BasicMessageHandler): MSG_ATTRS_MANDATORY_MAP = ('fabric', 'vnic_mac') def __init__(self, msg): super(DetachVnic, self).__init__(msg) def execute(self, eswitch_handler): fabric = self.msg['fabric'] vnic_mac = (self.msg['vnic_mac']).lower() dev = eswitch_handler.delete_port(fabric, vnic_mac) if dev: return self.build_response(True, response={'dev': dev}) else: return self.build_response(True, response={}) class SetVLAN(BasicMessageHandler): MSG_ATTRS_MANDATORY_MAP = ('fabric', 'port_mac', 'vlan') def __init__(self, msg): super(SetVLAN, self).__init__(msg) def execute(self, eswitch_handler): fabric = self.msg['fabric'] vnic_mac = (self.msg['port_mac']).lower() vlan = self.msg['vlan'] ret = eswitch_handler.set_vlan(fabric, vnic_mac, vlan) reason = None if not ret: reason = 'Set VLAN Failed' if reason: return self.build_response(False, reason=reason) return self.build_response(True, response={}) class GetVnics(BasicMessageHandler): MSG_ATTRS_MANDATORY_MAP = ('fabric', ) def __init__(self, msg): super(GetVnics, self).__init__(msg) def execute(self, eswitch_handler): fabric = self.msg['fabric'] if fabric == '*': fabrics = eswitch_handler.eswitches.keys() LOG.info("fabrics = %s", fabrics) else: fabrics = [fabric] vnics = eswitch_handler.get_vnics(fabrics) return self.build_response(True, response=vnics) class PortRelease(BasicMessageHandler): MSG_ATTRS_MANDATORY_MAP = ('fabric', 'ref_by', 'mac') def __init__(self, msg): super(PortRelease, self).__init__(msg) def execute(self, eswitch_handler): ref_by_keys = ['mac_address'] fabric = self.msg['fabric'] vnic_mac = (self.msg['mac']).lower() ref_by = self.msg['ref_by'] reason = None if ref_by not in ref_by_keys: reason = "reb_by %s is not supported" % ref_by else: try: eswitch_handler.port_release(fabric, vnic_mac) except Exception: reason = "port release failed" LOG.exception("PortRelease failed") if reason: return self.build_response(False, reason=reason) return self.build_response(True, response={}) class SetFabricMapping(BasicMessageHandler): MSG_ATTRS_MANDATORY_MAP = ('fabric', 'interface') def __init__(self, msg): super(SetFabricMapping, self).__init__(msg) def execute(self, eswitch_handler): fabric = self.msg['fabric'] interface = self.msg['interface'] response = {'fabric': fabric, 'dev': interface} return self.build_response(True, response=response) class PortUp(BasicMessageHandler): MSG_ATTRS_MANDATORY_MAP = ('fabric', 'mac') def __init__(self, msg): super(PortUp, self).__init__(msg) def execute(self, eswitch_handler): # fabric = self.msg['fabric'] # mac = self.msg['mac'] # eswitch_handler.port_up(fabric, mac) return self.build_response(True, response={}) class PortDown(BasicMessageHandler): MSG_ATTRS_MANDATORY_MAP = ('fabric', 'mac') def __init__(self, msg): super(PortDown, self).__init__(msg) def execute(self, eswitch_handler): # fabric = self.msg['fabric'] # mac = self.msg['mac'] # eswitch_handler.port_down(fabric, mac) return self.build_response(True, response={}) class GetEswitchTables(BasicMessageHandler): MSG_ATTRS_MANDATORY_MAP = ('fabric',) def __init__(self, msg): super(GetEswitchTables, self).__init__(msg) def execute(self, eswitch_handler): fabric = self.msg.get('fabric', '*') if fabric == '*': fabrics = eswitch_handler.eswitches.keys() LOG.info("fabrics = %s", fabrics) else: fabrics = [fabric] response = {'tables': eswitch_handler.get_eswitch_tables(fabrics)} return self.build_response(True, response=response) class MessageDispatch(object): MSG_MAP = {'delete_port': DetachVnic, 'set_vlan': SetVLAN, 'get_vnics': GetVnics, 'port_release': PortRelease, 'port_up': PortUp, 'port_down': PortDown, 'define_fabric_mapping': SetFabricMapping, 'plug_nic': PlugVnic, 'get_eswitch_tables': GetEswitchTables} def __init__(self, eswitch_handler): self.eswitch_handler = eswitch_handler def handle_msg(self, msg): LOG.info("Handling message - %s", msg) result = {} action = msg.pop('action') if action in MessageDispatch.MSG_MAP.keys(): msg_handler = MessageDispatch.MSG_MAP[action](msg) if msg_handler.validate(): result = msg_handler.execute(self.eswitch_handler) else: LOG.error('Invalid message - cannot handle') result = {'status': 'FAIL', 'reason': 'validation failed'} else: LOG.error("Unsupported action - %s", action) result = {'action': action, 'status': 'FAIL', 'reason': 'unknown action'} result['action'] = action return result networking-mlnx-15.0.2/networking_mlnx/eswitchd/resource_mngr.py0000644000413600001450000001152313575645041025344 0ustar lennybmtl00000000000000# Copyright 2013 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from defusedxml import ElementTree as ET import libvirt from oslo_log import log as logging from networking_mlnx.eswitchd.common import constants from networking_mlnx.eswitchd.db import device_db from networking_mlnx.eswitchd.utils import pci_utils LOG = logging.getLogger(__name__) class ResourceManager(object): def __init__(self): self.pci_utils = pci_utils.pciUtils() self.device_db = device_db.DeviceDB() def add_fabric(self, fabric, pf): hca_port, pf_mlx_dev = self._get_pf_details(pf) self.device_db.add_fabric(fabric, pf, hca_port, pf_mlx_dev) vfs = self.discover_devices(pf) LOG.info("PF %(pf)s, vfs = %(vf)s", {'pf': pf, 'vf': vfs}) self.device_db.set_fabric_devices(fabric, pf, vfs) def scan_attached_devices(self): devices = [] vm_ids = {} conn = libvirt.openReadOnly('qemu:///system') domains = [] self.macs_map = self._get_vfs_macs() domains_names = conn.listDefinedDomains() defined_domains = map(conn.lookupByName, domains_names) domains_ids = conn.listDomainsID() running_domains = map(conn.lookupByID, domains_ids) for domain in defined_domains: [state, maxmem, mem, ncpu, cputime] = domain.info() if state in (libvirt.VIR_DOMAIN_PAUSED, libvirt.VIR_DOMAIN_SHUTDOWN, libvirt.VIR_DOMAIN_SHUTOFF): domains.append(domain) domains += running_domains for domain in domains: raw_xml = domain.XMLDesc(0) xml_root = ET.fromstring(raw_xml) hostdevs = xml_root.findall("devices/hostdev/source/address") vm_id = xml_root.find('uuid').text for dev in self._get_attached_hostdevs(hostdevs): devices.append(dev) vm_ids[dev[0]] = vm_id return devices, vm_ids def get_fabric_details(self, fabric, pf=None): return self.device_db.get_fabric_details(fabric, pf) def discover_devices(self, pf): return self.pci_utils.get_vfs_info(pf) def get_fabric_for_dev(self, dev): return self.device_db.get_dev_fabric(dev) def _get_vfs_macs(self): macs_map = {} fabrics = self.device_db.device_db.keys() for fabric in fabrics: fabric_details = self.device_db.get_fabric_details(fabric) try: macs_map[fabric] = \ self.pci_utils.get_vfs_macs_ib(fabric_details) except Exception: LOG.exception("Failed to get vfs macs for fabric %s ", fabric) continue return macs_map def _get_attached_hostdevs(self, hostdevs): devs = [] for hostdev in hostdevs: dev = self.pci_utils.get_device_address(hostdev) fabric = self.get_fabric_for_dev(dev) if fabric: fabric_details = self.get_fabric_details(fabric) for pf_fabric_details in fabric_details.values(): if (pf_fabric_details['pf_device_type'] == constants.MLNX4_DEVICE_TYPE): hca_port = pf_fabric_details['hca_port'] pf_mlx_dev = pf_fabric_details['pf_mlx_dev'] vf_index = self.pci_utils.get_guid_index( pf_mlx_dev, dev, hca_port) elif (pf_fabric_details['pf_device_type'] == constants.MLNX5_DEVICE_TYPE): if dev in pf_fabric_details['vfs']: vf_index = pf_fabric_details['vfs'][dev]['vf_num'] else: continue try: mac = self.macs_map[fabric][str(vf_index)] devs.append((dev, mac, fabric)) except KeyError: LOG.warning("Failed to retrieve Hostdev MAC" "for dev %s", dev) else: LOG.info("No Fabric defined for device %s", hostdev) return devs def _get_pf_details(self, pf): hca_port = self.pci_utils.get_eth_port(pf) pf_mlx_dev = self.pci_utils.get_pf_mlx_dev(pf) return (hca_port, pf_mlx_dev) networking-mlnx-15.0.2/networking_mlnx/journal/0000755000413600001450000000000013575645772021771 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/journal/__init__.py0000644000413600001450000000000013566516767024071 0ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/journal/cleanup.py0000644000413600001450000000335313575645017023766 0ustar lennybmtl00000000000000# Copyright 2016 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from datetime import timedelta from oslo_config import cfg from oslo_log import log as logging from networking_mlnx.db import db from networking_mlnx.plugins.ml2.drivers.sdn import config from networking_mlnx.plugins.ml2.drivers.sdn import constants as sdn_const LOG = logging.getLogger(__name__) cfg.CONF.register_opts(config.sdn_opts, sdn_const.GROUP_OPT) class JournalCleanup(object): """Journal maintenance operation for deleting completed rows.""" def __init__(self): self._rows_retention = cfg.CONF.sdn.completed_rows_retention self._processing_timeout = cfg.CONF.sdn.processing_timeout def delete_completed_rows(self, session): if self._rows_retention is not -1: LOG.debug("Deleting completed rows") db.delete_rows_by_state_and_time( session, sdn_const.COMPLETED, timedelta(seconds=self._rows_retention)) def cleanup_processing_rows(self, session): row_count = db.reset_processing_rows(session, self._processing_timeout) if row_count: LOG.info("Reset %(num)s orphaned rows back to pending", {"num": row_count}) networking-mlnx-15.0.2/networking_mlnx/journal/dependency_validations.py0000644000413600001450000000604313575645017027051 0ustar lennybmtl00000000000000# Copyright 2016 Mellanox Technologies, Ltd # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils from networking_mlnx.db import db from networking_mlnx.plugins.ml2.drivers.sdn import constants as sdn_const def _is_valid_operation(session, row): # Check if there are older updates in the queue if db.check_for_older_ops(session, row): return False return True def validate_network_operation(session, row): """Validate the network operation based on dependencies. Validate network operation depending on whether it's dependencies are still in 'pending' or 'processing' state. e.g. """ if row.operation == sdn_const.DELETE: # Check for any pending or processing create or update # ops on this uuid itself if db.check_for_pending_or_processing_ops( session, row.object_uuid, [sdn_const.PUT, sdn_const.POST]): return False if db.check_for_pending_delete_ops_with_parent( session, sdn_const.PORT, row.object_uuid): return False elif (row.operation == sdn_const.PUT and not _is_valid_operation(session, row)): return False return True def validate_port_operation(session, row): """Validate port operation based on dependencies. Validate port operation depending on whether it's dependencies are still in 'pending' or 'processing' state. e.g. """ if row.operation in (sdn_const.POST, sdn_const.PUT): network_dict = jsonutils.loads(row.data) network_id = network_dict['network_id'] # Check for pending or processing network operations ops = db.check_for_pending_or_processing_ops( session, network_id, [sdn_const.POST]) if ops: return False return _is_valid_operation(session, row) _VALIDATION_MAP = { sdn_const.NETWORK: validate_network_operation, sdn_const.PORT: validate_port_operation, } def validate(session, row): """Validate resource dependency in journaled operations. :param session: db session :param row: entry in journal entry to be validated """ return _VALIDATION_MAP[row.object_type](session, row) def register_validator(object_type, validator): """Register validator function for given resource. :param object_type: neutron resource type :param validator: function to be registered which validates resource dependencies """ _VALIDATION_MAP[object_type] = validator networking-mlnx-15.0.2/networking_mlnx/journal/journal.py0000644000413600001450000002357313575645041024014 0ustar lennybmtl00000000000000# Copyright 2016 Mellanox Technologies, Ltd # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import threading from neutron_lib import context as nl_context from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils import requests from six.moves import html_parser from networking_mlnx.db import db from networking_mlnx.journal import dependency_validations from networking_mlnx.plugins.ml2.drivers.sdn import client from networking_mlnx.plugins.ml2.drivers.sdn import constants as sdn_const from networking_mlnx.plugins.ml2.drivers.sdn import exceptions as sdn_exc from networking_mlnx.plugins.ml2.drivers.sdn import utils as sdn_utils LOG = logging.getLogger(__name__) def call_thread_on_end(func): def new_func(obj, *args, **kwargs): return_value = func(obj, *args, **kwargs) obj.journal.set_sync_event() return return_value return new_func def record(db_session, object_type, object_uuid, operation, data, context=None): db.create_pending_row(db_session, object_type, object_uuid, operation, data) class SdnJournalThread(object): """Thread worker for the SDN Journal Database.""" def __init__(self): self.client = client.SdnRestClient.create_client() self._sync_timeout = cfg.CONF.sdn.sync_timeout self._row_retry_count = cfg.CONF.sdn.retry_count self.event = threading.Event() self.lock = threading.Lock() self._sync_thread = self.start_sync_thread() self._start_sync_timer() def start_sync_thread(self): # Start the sync thread LOG.debug("Starting a new sync thread") sync_thread = threading.Thread( name='sync', target=self.run_sync_thread) sync_thread.start() return sync_thread def set_sync_event(self): # Prevent race when starting the timer with self.lock: LOG.debug("Resetting thread timer") self._timer.cancel() self._start_sync_timer() self.event.set() def _start_sync_timer(self): self._timer = threading.Timer(self._sync_timeout, self.set_sync_event) self._timer.start() def run_sync_thread(self, exit_after_run=False): while True: try: self.event.wait() self.event.clear() context = nl_context.get_admin_context() self._sync_pending_rows(context.session, exit_after_run) self._sync_progress_rows(context.session) LOG.debug("Clearing sync thread event") if exit_after_run: # Permanently waiting thread model breaks unit tests # Adding this arg to exit here only for unit tests break except Exception: # Catch exceptions to protect the thread while running LOG.exception("Error on run_sync_thread") def _sync_pending_rows(self, session, exit_after_run): while True: LOG.debug("sync_pending_rows operation walking database") row = db.get_oldest_pending_db_row_with_lock(session) if not row: LOG.debug("No rows to sync") break # Validate the operation valid = dependency_validations.validate(session, row) if not valid: LOG.info("%(operation)s %(type)s %(uuid)s is not a " "valid operation yet, skipping for now", {'operation': row.operation, 'type': row.object_type, 'uuid': row.object_uuid}) # Set row back to pending. db.update_db_row_state(session, row, sdn_const.PENDING) if exit_after_run: break continue LOG.info("Syncing %(operation)s %(type)s %(uuid)s", {'operation': row.operation, 'type': row.object_type, 'uuid': row.object_uuid}) # Add code to sync this to NEO urlpath = sdn_utils.strings_to_url(row.object_type) if row.operation != sdn_const.POST: urlpath = sdn_utils.strings_to_url(urlpath, row.object_uuid) try: client_operation_method = ( getattr(self.client, row.operation.lower())) response = ( client_operation_method( urlpath, jsonutils.loads(row.data))) if response.status_code == requests.codes.not_implemented: db.update_db_row_state(session, row, sdn_const.COMPLETED) elif (response.status_code == requests.codes.not_found and row.operation == sdn_const.DELETE): db.update_db_row_state(session, row, sdn_const.COMPLETED) else: # update in progress and job_id job_id = None try: try: job_id = response.json() except ValueError: # Note(moshele) workaround for NEO # because for POST port it return html # and not json parser = html_parser.HTMLParser() parser.feed(response.text) parser.handle_starttag('a', []) url = parser.get_starttag_text() match = re.match( r'', url) if match: job_id = match.group(1) except Exception as e: LOG.error("Failed to extract job_id %s", e) if job_id: db.update_db_row_job_id( session, row, job_id=job_id) db.update_db_row_state( session, row, sdn_const.MONITORING) else: LOG.warning("object %s has NULL job_id", row.object_uuid) except (sdn_exc.SDNConnectionError, sdn_exc.SDNLoginError): # Log an error and raise the retry count. If the retry count # exceeds the limit, move it to the failed state. LOG.error("Cannot connect to the NEO Controller") db.update_pending_db_row_retry(session, row, self._row_retry_count) # Break out of the loop and retry with the next # timer interval break def _sync_progress_rows(self, session): # 1. get all progressed job # 2. get status for NEO # 3. Update status if completed/failed LOG.debug("sync_progress_rows operation walking database") rows = db.get_all_monitoring_db_row_by_oldest(session) if not rows: LOG.debug("No rows to sync") return for row in rows: try: if row.job_id is None: LOG.warning("object %s has NULL job_id", row.object_uuid) continue response = self.client.get(row.job_id.strip("/")) if response: try: job_status = response.json().get('Status') if job_status == 'Completed': db.update_db_row_state( session, row, sdn_const.COMPLETED) continue elif job_status in ("Pending", "Running"): LOG.debug("NEO Job id %(job_id)s is %(status)s " "continue monitoring", {'job_id': row.job_id, 'status': job_status}) continue else: LOG.error("NEO Job id %(job_id)s, failed with" " %(status)s", {'job_id': row.job_id, 'status': job_status}) db.update_db_row_state( session, row, sdn_const.PENDING) except (ValueError, AttributeError): LOG.error("failed to extract response for job" "id %s", row.job_id) else: LOG.error("NEO Job id %(job_id)s, failed with " "%(status)s", {'job_id': row.job_id, 'status': job_status}) db.update_db_row_state(session, row, sdn_const.PENDING) except (sdn_exc.SDNConnectionError, sdn_exc.SDNLoginError): # Don't raise the retry count, just log an error LOG.error("Cannot connect to the NEO Controller") db.update_db_row_state(session, row, sdn_const.PENDING) # Break out of the loop and retry with the next # timer interval break networking-mlnx-15.0.2/networking_mlnx/journal/maintenance.py0000644000413600001450000000511113575645017024613 0ustar lennybmtl00000000000000# Copyright 2016 Mellanox Technologies, Ltd # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import api as neutron_db_api from oslo_config import cfg from oslo_log import log as logging from oslo_service import loopingcall from networking_mlnx.db import db LOG = logging.getLogger(__name__) class MaintenanceThread(object): def __init__(self): self.timer = loopingcall.FixedIntervalLoopingCall(self.execute_ops) self.maintenance_interval = cfg.CONF.sdn.maintenance_interval self.maintenance_ops = [] def start(self): self.timer.start(self.maintenance_interval, stop_on_exception=False) def _execute_op(self, operation, session): op_details = operation.__name__ if operation.__doc__: op_details += " (%s)" % operation.func_doc try: LOG.info("Starting maintenance operation %s.", op_details) db.update_maintenance_operation(session, operation=operation) operation(session=session) LOG.info("Finished maintenance operation %s.", op_details) except Exception: LOG.exception("Failed during maintenance operation %s.", op_details) def execute_ops(self): LOG.info("Starting journal maintenance run.") session = neutron_db_api.get_reader_session() if not db.lock_maintenance(session): LOG.info("Maintenance already running, aborting.") return try: for operation in self.maintenance_ops: self._execute_op(operation, session) finally: db.update_maintenance_operation(session, operation=None) db.unlock_maintenance(session) LOG.info("Finished journal maintenance run.") def register_operation(self, f): """Register a function to be run by the maintenance thread. :param f: Function to call when the thread runs. The function will receive a DB session to use for DB operations. """ self.maintenance_ops.append(f) networking-mlnx-15.0.2/networking_mlnx/linux/0000755000413600001450000000000013575645772021456 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/linux/interface_drivers/0000755000413600001450000000000013575645772025154 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/linux/interface_drivers/__init__.py0000644000413600001450000000000013575645017027243 0ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/linux/interface_drivers/config.py0000644000413600001450000000316313575645017026766 0ustar lennybmtl00000000000000# Copyright 2019 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from networking_mlnx._i18n import _ interface_driver_opts = [ cfg.StrOpt('ipoib_physical_interface', default="ib0", help=_("Name of the IPoIB root device to use with" "ipoib interface driver.")), cfg.StrOpt('multi_interface_driver_mappings', default=None, help=_("A per physnet interface driver mapping used by " "multidriver interface driver to manage the virtual " "interface per physnet. a virtual network e.g vxlan " "will map to the 'nil' physnet.")), cfg.BoolOpt('enable_multi_interface_driver_cache_maintenance', default=True, help=_("Enable periodic job to perform maintenance to the" "embedded network cache for multi interface driver. " "Set to true if a multi interface driver instance will " "be active for an extended amount of time.")) ] networking-mlnx-15.0.2/networking_mlnx/linux/interface_drivers/constants.py0000644000413600001450000000202413575645017027530 0ustar lennybmtl00000000000000# Copyright 2019 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import neutron_lib.api.definitions.provider_net as prov_net_attr SEGMENTATION_ID = prov_net_attr.SEGMENTATION_ID PHYSICAL_NETWORK = prov_net_attr.PHYSICAL_NETWORK ADMIN_STATE_UP = 'admin_state_up' # Types of interfaces created by various neutron interface drivers INTERFACE_KIND_UNKNOWN = 'unknown' INTERFACE_KIND_OVS = 'openvswitch' INTERFACE_KIND_VETH = 'veth' INTERFACE_KIND_NULL = 'null' INTERFACE_KIND_IPOIB = 'ipoib' networking-mlnx-15.0.2/networking_mlnx/linux/interface_drivers/interface.py0000644000413600001450000003056513575645017027467 0ustar lennybmtl00000000000000# Copyright 2019 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import threading from neutron.agent.common import utils as n_agent_utils from neutron.agent.linux import interface as n_interface from neutron.agent.linux import ip_lib as n_ip_lib from neutron_lib.utils import helpers from neutron_lib.utils import runtime from oslo_config import cfg from oslo_log import log from oslo_service import loopingcall import six from networking_mlnx.linux.interface_drivers import config from networking_mlnx.linux.interface_drivers import constants from networking_mlnx.linux.interface_drivers import network_cache from networking_mlnx.linux import ip_lib # Register interface driver Opts when module is loaded cfg.CONF.register_opts(config.interface_driver_opts) LOG = log.getLogger(__name__) DEFAULT_CACHE_CLEANING_INTERVAL = 86400 class IPoIBInterfaceDriver(n_interface.LinuxInterfaceDriver): """Driver for creating ipoib interfaces.""" def __init__(self, conf, get_networks_callback=None, **kwargs): super(IPoIBInterfaceDriver, self).__init__( conf, get_networks_callback=get_networks_callback, **kwargs) self.get_networks = get_networks_callback self.root_dev = conf.ipoib_physical_interface if not n_ip_lib.device_exists(self.root_dev): LOG.error("IPoIB root device %s does not exist.", self.root_dev) def plug_new(self, network_id, port_id, device_name, mac_address, bridge=None, namespace=None, prefix=None, mtu=None): """Plugin the interface.""" ip = ip_lib.IPoIBWrapper(namespace=namespace) try: net = self.get_networks(filters={"id": [network_id]}, fields=[constants.SEGMENTATION_ID])[0] segmentation_id = net.get(constants.SEGMENTATION_ID) dev = ip.add_ipoib(device_name, self.root_dev, segmentation_id) dev.link.set_up() except RuntimeError as e: LOG.error("Failed plugging interface '%s' - %s", device_name, str(e)) def unplug(self, device_name, bridge=None, namespace=None, prefix=None): """Unplug the interface.""" ip = ip_lib.IPoIBWrapper(namespace=namespace) try: ip.del_ipoib(device_name) except RuntimeError as e: LOG.error("Failed unplugging interface '%s' - %s", device_name, str(e)) class MultiInterfaceDriver(n_interface.LinuxInterfaceDriver): """Driver for invoking interface driver per physnet. This driver Delegates the plug/unplug methods to other interface drivers per physnet according to mapping provided in conf file. This driver assumes uniqueness in regards to the kind of interface created by each delegated driver. """ # Mapping between interface driver and the Type of interface it creates # during a call to 'plug()' method. # NOTE(adrianc): OVSInterfaceDriver can create either veth or openvswitch # interfaces, however in the mapping we assume that the # latter will be used. It is the responsibility of the # user of this map to check `ovs_use_veth` is false. driver_to_interface_kind = { n_interface.BridgeInterfaceDriver: constants.INTERFACE_KIND_VETH, n_interface.OVSInterfaceDriver: constants.INTERFACE_KIND_OVS, n_interface.NullDriver: constants.INTERFACE_KIND_NULL, IPoIBInterfaceDriver: constants.INTERFACE_KIND_IPOIB} network_cache = None _cache_init_lock = threading.RLock() def __init__(self, conf, get_networks_callback=None, **kwargs): super(MultiInterfaceDriver, self).__init__( conf, get_networks_callback=get_networks_callback, **kwargs) fields = ['id', constants.PHYSICAL_NETWORK, constants.SEGMENTATION_ID] MultiInterfaceDriver._init_network_cache(conf, get_networks_callback, fields) # Use a cache backed get_networks callback to avoid un-needed RPC calls self.drivers = MultiInterfaceDriver.load_interface_driver_mappings( conf, get_networks_callback=( MultiInterfaceDriver._get_networks_from_cache), **kwargs) @staticmethod def _check_drivers(driver_mapping): """Check the loaded interface drivers are dealing with a unique interface kind. :raises SystemExit of 1 in case an inconsistency was found. """ uniq_intf_drivers = [] driver_types = [] for driver in six.itervalues(driver_mapping): if driver.__class__ not in driver_types: driver_types.append(driver.__class__) uniq_intf_drivers.append(driver) interface_kinds = [driver._interface_kind_ for driver in uniq_intf_drivers] if len(interface_kinds) != len(set(interface_kinds)): LOG.error("MultiInterfaceDriver cannot operate with interface" "drivers working with the same interface kind.") raise SystemExit(1) @staticmethod def _process_driver_obj(obj): """Process a loaded interface driver object. :param obj: n_interface.LinuxInterfaceDriver obj. :return: An augmented object containing _interface_kind_ attribute that represents the kind of interface a driver creates. """ LOG.debug("Processing driver object of type:%s", obj.__class__) if (obj.__class__ is n_interface.OVSInterfaceDriver and obj.conf.ovs_use_veth): interface_kind = constants.INTERFACE_KIND_VETH else: interface_kind = ( MultiInterfaceDriver.driver_to_interface_kind.get( obj.__class__, constants.INTERFACE_KIND_UNKNOWN)) if hasattr(obj, '_interface_kind_'): LOG.error("Attribute '_interface_kind_' defined, unexpected err.") raise SystemExit(1) obj._interface_kind_ = interface_kind return obj @staticmethod def load_interface_driver_mappings(conf, **kwargs): """Load interface drivers for agents like DHCP or L3 agent. :param conf: driver configuration object :param kwargs: additional keyword arguments :raises SystemExit of 1 if drivers cannot be loaded """ try: driver_mapping = {} mappings = conf.multi_interface_driver_mappings.split(',') mappings = helpers.parse_mappings(mappings, False) for physnet, intf_driver_name in mappings.items(): loaded_class = runtime.load_class_by_alias_or_classname( n_agent_utils.INTERFACE_NAMESPACE, intf_driver_name) obj = MultiInterfaceDriver._process_driver_obj( loaded_class(conf, **kwargs)) driver_mapping[physnet] = obj MultiInterfaceDriver._check_drivers(driver_mapping) return driver_mapping except ImportError: LOG.error("Error loading interface driver '%s'", conf.interface_driver) raise SystemExit(1) @classmethod def _init_network_cache(cls, conf, get_networks_cb, fields): """Initialize a global network cache to be shared between all Multi interface driver instances. :param conf: configuration object :param get_networks_cb: RPC callback to get network information :param fields: a list of fields to retrieve per network """ if cls.network_cache: # Already initialized return with cls._cache_init_lock: if cls.network_cache: # Double checking if already initialized by someone else return cls.network_cache = network_cache.SafeNetworkCache( get_networks_cb, fields) # Populate cache cls.network_cache.refresh() if conf.enable_multi_interface_driver_cache_maintenance: remove_stale_entries_loop = ( loopingcall.FixedIntervalLoopingCall( cls.network_cache.remove_stale_networks)) remove_stale_entries_loop.start( DEFAULT_CACHE_CLEANING_INTERVAL, initial_delay=DEFAULT_CACHE_CLEANING_INTERVAL) @classmethod def _get_networks_from_cache(cls, fields=None, filters=None): nets = [] if cls.network_cache is None: LOG.Error("MultiInterfaceDriver network cache was not initialized") return nets # NOTE(adrianc): since we want to work solely against cls.network_cache # it is required to impose some restrictions on filters and fields. if filters and list(six.iterkeys(filters)) != ['id']: LOG.error("Cache backed get_networks() does not support provided " "filters: %s", filters) return nets if fields and not set(fields).issubset(set( cls.network_cache.network_fields)): LOG.warning("Cache backed get_networks() does not contain all " "provided fields: %s", fields) if filters is None: nets = cls.network_cache.get_all() else: ids = filters.get('id') for id in ids: nets.append(cls.network_cache.get(id)) if fields: nets = [{field: net[field] for field in fields if field in net.keys()} for net in nets] # In case of an empty entry due to missing fields # remove from response. nets = [net for net in nets if len(net)] return nets def _get_driver_for_existing_interface(self, device_name, namespace=None): dev = n_ip_lib.IPDevice(device_name, namespace) kind = dev.link.link_kind for driver in six.itervalues(self.drivers): if driver._interface_kind_ == kind: return driver LOG.error("No interface driver found for interface %s of kind %s", device_name, kind) return None def plug_new(self, network_id, port_id, device_name, mac_address, bridge=None, namespace=None, prefix=None, mtu=None): """Plugin the interface.""" network = MultiInterfaceDriver.network_cache.get(network_id) physnet = network.get(constants.PHYSICAL_NETWORK) if physnet is None: # Tunnels(e.g vxlan, gre) map to the 'nil' interface driver. physnet = 'nil' try: driver = self.drivers[physnet] driver.plug_new(network_id, port_id, device_name, mac_address, bridge, namespace, prefix, mtu) except KeyError: LOG.error("Interface driver not found for physnet: %s", physnet) def unplug(self, device_name, bridge=None, namespace=None, prefix=None): """Unplug the interface.""" if not n_ip_lib.device_exists(device_name, namespace): # Nothing to unplug. return driver = self._get_driver_for_existing_interface(device_name, namespace) if driver: driver.unplug(device_name, bridge, namespace, prefix) else: LOG.error("Failed to unplug interface %s, did not find matching " "interface driver to unplug interface.", device_name) def set_mtu(self, device_name, mtu, namespace=None, prefix=None): """Set interface MTU""" driver = self._get_driver_for_existing_interface(device_name, namespace) if driver: driver.set_mtu(device_name, mtu, namespace, prefix) else: LOG.error("Failed to set MTU for %s, did not find matching " "interface driver for interface.", device_name) networking-mlnx-15.0.2/networking_mlnx/linux/interface_drivers/network_cache.py0000644000413600001450000001543613575645017030343 0ustar lennybmtl00000000000000# Copyright 2019 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_concurrency import lockutils from oslo_log import log from networking_mlnx.linux.interface_drivers import constants LOG = log.getLogger(__name__) class SimpleCache(object): """A simple object cache""" def __init__(self): self.object_cache = {} def __contains__(self, id): return id in self.object_cache def get(self, id): """Get object from cache :param id: object id :return: object or None if id is not in cache """ return self.object_cache.get(id) def get_all(self): """Get all object from cache :return: list of objects """ return list(self.object_cache.values()) def put(self, id, obj): """Add object to cache :param id: object id :param obj: object to add """ self.object_cache[id] = obj def remove(self, id): """Remove object from cache :param id: object id """ if id in self: del self.object_cache[id] def clear(self): """Clear the cache""" self.object_cache = {} class NetworkCache(SimpleCache): """A network cache that retrieves missing entries via callback. """ def __init__(self, get_networks_cb, fields): """__init__ :param get_networks_cb: a callback to get network object the callback should accept filters and fields parameters. e.g: def get_networks(filters, fields) :param fields: a list of fields to get for a network object used as an argument for get_networks_cb when getting new network objects. None value will fetch all available fields. """ super(NetworkCache, self).__init__() self.get_networks_cb = get_networks_cb self.network_fields = fields if self.network_fields and 'id' not in self.network_fields: self.network_fields.append('id') def _get_no_callback(self, network_id): """Get network from cache in case of a cache miss, return None. :param network_id: network id :return: network dict or None """ return super(NetworkCache, self).get(network_id) def get(self, network_id): """Get network from cache in case of a cache miss, attempt to get network from callback. :param network_id: network id :return: network dict """ net = self._get_no_callback(network_id) if net is None: LOG.debug("Network %s not in cache, fetching via callback.", network_id) net = self.get_networks_cb(filters={'id': [network_id]}, fields=self.network_fields)[0] self.put(net['id'], net) return net def refresh(self): """Refresh network cache""" nets = self.get_networks_cb(filters={constants.ADMIN_STATE_UP: [True]}, fields=self.network_fields) self.clear() for net in nets: self.put(net['id'], net) def remove_stale_networks(self): """Remove networks that no longer exist or are inactive from cache This method is intended to be used as a periodic job to prevent the cache from containing too many stale entries. """ nets = self.get_networks_cb(filters={constants.ADMIN_STATE_UP: [True]}, fields=['id']) current_net_ids = set([net['id'] for net in nets]) cached_net_ids = set([net['id'] for net in self.get_all()]) ids_to_remove = cached_net_ids - current_net_ids if ids_to_remove: LOG.debug("Removing stale networks from cache: %s", ids_to_remove) for id in ids_to_remove: self.remove(id) class SafeNetworkCache(NetworkCache): """Thread safe implementation of NetworkCache Which basically wraps operations with a ReaderWriter Lock """ def __init__(self, get_networks_cb, fields): """__init__ :param get_networks_cb: a callback to get network object the callback should accept filters and fields parameters. e.g: def get_networks(filters, fields) :param fields: a list of fields to get for a network object used as an argument for get_networks_cb when getting new network objects. None value will fetch all available fields. """ super(SafeNetworkCache, self).__init__(get_networks_cb, fields) self.__rw_lock = lockutils.ReaderWriterLock() def get(self, id): """Get network from cache in case of a cache miss, attempt to get network from callback. :param id: network id :return: network dict """ with self.__rw_lock.read_lock(): net = self._get_no_callback(id) if net is None: LOG.debug("Network %s not in cache, fetching via callback.", id) net = self.get_networks_cb(filters={'id': [id]}, fields=self.network_fields)[0] # Put operation is already protected by a writer lock self.put(net['id'], net) return net def get_all(self): """Get all object from cache :return: list of objects """ with self.__rw_lock.read_lock(): return super(SafeNetworkCache, self).get_all() def put(self, id, obj): """Add object to cache :param id: object id :param obj: object to add """ with self.__rw_lock.write_lock(): return super(SafeNetworkCache, self).put(id, obj) def remove(self, id): """Remove object from cache :param id: object id """ with self.__rw_lock.write_lock(): return super(SafeNetworkCache, self).remove(id) def clear(self): """Clear the cache""" with self.__rw_lock.write_lock(): return super(SafeNetworkCache, self).clear() def refresh(self): """Refresh network cache""" with self.__rw_lock.write_lock(): return super(SafeNetworkCache, self).refresh() networking-mlnx-15.0.2/networking_mlnx/linux/__init__.py0000644000413600001450000000000013575645017023545 0ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/linux/constants.py0000644000413600001450000000120113575645017024026 0ustar lennybmtl00000000000000# Copyright 2019 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. DEFAULT_PKEY = 0x7fff networking-mlnx-15.0.2/networking_mlnx/linux/ip_lib.py0000644000413600001450000000453013575645017023260 0ustar lennybmtl00000000000000# Copyright 2019 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from neutron.agent.linux import ip_lib as n_ip_lib from neutron.privileged.agent.linux import ip_lib as n_privileged from networking_mlnx.linux import constants LOG = logging.getLogger(__name__) class IPoIBWrapper(n_ip_lib.IPWrapper): def __init__(self, *args, **kwargs): super(IPoIBWrapper, self).__init__(*args, **kwargs) def _segmentation_id_to_pkey(self, segmentation_id): # NOTE(adrianc): use the default pkey (7fff) in case: # 1. network is flat (segmentation_id is None) # 2. segmentation_id is 0 # PKEY length is 15 bits. if segmentation_id is None or segmentation_id == 0: return constants.DEFAULT_PKEY return int(segmentation_id) def add_ipoib(self, name, src_dev, segmentation_id=None): LOG.debug("Adding IPoIB device: name:%s, src_dev:%s, " "segmentation_id:%s", name, src_dev, segmentation_id) pkey = self._segmentation_id_to_pkey(segmentation_id) # NOTE(adrianc): ipoib child interface needs to be created in the # same namespace as its root device (i.e the physical interface). # Create in default namespace and then move. n_privileged.create_interface(name, None, "ipoib", physical_interface=src_dev, pkey=pkey) n_privileged.set_link_attribute( name, None, net_ns_fd=self.namespace) return n_ip_lib.IPDevice(name, namespace=self.namespace) def del_ipoib(self, name): LOG.debug("Deleting IPoIB device: name:%s", name) n_privileged.delete_interface(name, self.namespace) networking-mlnx-15.0.2/networking_mlnx/plugins/0000755000413600001450000000000013575645772022000 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/plugins/ml2/0000755000413600001450000000000013575645772022472 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/plugins/ml2/drivers/0000755000413600001450000000000013575645772024150 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/plugins/ml2/drivers/mlnx/0000755000413600001450000000000013575645772025126 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/plugins/ml2/drivers/mlnx/agent/0000755000413600001450000000000013575645772026224 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/plugins/ml2/drivers/mlnx/agent/__init__.py0000644000413600001450000000000013566516767030324 0ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/plugins/ml2/drivers/mlnx/agent/comm_utils.py0000644000413600001450000000442613566516767030760 0ustar lennybmtl00000000000000# Copyright 2013 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import time from oslo_config import cfg from oslo_log import log as logging from networking_mlnx.plugins.ml2.drivers.mlnx.agent import config # noqa LOG = logging.getLogger(__name__) class RetryDecorator(object): """Retry decorator reruns a method 'retries' times if an exception occurs. Decorator for retrying a method if exceptionToCheck exception occurs If method raises exception, retries 'retries' times with increasing back off period between calls with 'interval' multiplier :param exceptionToCheck: the exception to check :param interval: initial delay between retries in seconds :param retries: number of times to try before giving up :raises: exceptionToCheck """ def __init__(self, exceptionToCheck, interval=cfg.CONF.ESWITCH.request_timeout / 1000, retries=cfg.CONF.ESWITCH.retries, backoff_rate=cfg.CONF.ESWITCH.backoff_rate): self.exc = exceptionToCheck self.interval = interval self.retries = retries self.backoff_rate = backoff_rate def __call__(self, original_func): def decorated(*args, **kwargs): sleep_interval = self.interval num_of_iter = self.retries while num_of_iter > 0: try: return original_func(*args, **kwargs) except self.exc: LOG.debug("Request timeout - call again after " "%s seconds", sleep_interval) time.sleep(sleep_interval) num_of_iter -= 1 sleep_interval *= self.backoff_rate return original_func(*args, **kwargs) return decorated networking-mlnx-15.0.2/networking_mlnx/plugins/ml2/drivers/mlnx/agent/config.py0000644000413600001450000000374513566516767030055 0ustar lennybmtl00000000000000# Copyright 2013 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from neutron.conf.agent import common as config from oslo_config import cfg from networking_mlnx._i18n import _ DEFAULT_INTERFACE_MAPPINGS = [] eswitch_opts = [ cfg.ListOpt('physical_interface_mappings', default=DEFAULT_INTERFACE_MAPPINGS, help=_("List of :")), cfg.StrOpt('daemon_endpoint', default='tcp://127.0.0.1:60001', help=_('eswitch daemon end point')), cfg.IntOpt('request_timeout', default=3000, help=_("The number of milliseconds the agent will wait for " "response on request to daemon.")), cfg.IntOpt('retries', default=3, help=_("The number of retries the agent will send request " "to daemon before giving up")), cfg.IntOpt('backoff_rate', default=2, help=_("backoff rate multiplier for waiting period between " "retries for request to daemon, i.e. value of 2 will " " double the request timeout each retry")), ] agent_opts = [ cfg.IntOpt('polling_interval', default=2, help=_("The number of seconds the agent will wait between " "polling for local device changes.")), ] cfg.CONF.register_opts(eswitch_opts, "ESWITCH") cfg.CONF.register_opts(agent_opts, "AGENT") config.register_agent_state_opts_helper(cfg.CONF) networking-mlnx-15.0.2/networking_mlnx/plugins/ml2/drivers/mlnx/agent/exceptions.py0000644000413600001450000000171313566516767030762 0ustar lennybmtl00000000000000# Copyright 2013 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from neutron_lib import exceptions as qexc from networking_mlnx._i18n import _ class MlnxException(qexc.NeutronException): message = _("Mlnx Exception: %(err_msg)s") class RequestTimeout(qexc.NeutronException): message = _("Request Timeout: no response from eSwitchD") class OperationFailed(qexc.NeutronException): message = _("Operation Failed: %(err_msg)s") networking-mlnx-15.0.2/networking_mlnx/plugins/ml2/drivers/mlnx/agent/mlnx_eswitch_neutron_agent.py0000644000413600001450000004145013575645041034223 0ustar lennybmtl00000000000000# Copyright 2013 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import socket import sys import time from neutron.agent import rpc as agent_rpc from neutron.agent import securitygroups_rpc as agent_sg_rpc from neutron.api.rpc.handlers import securitygroups_rpc as sg_rpc from neutron.common import config as common_config from neutron_lib.agent import topics from neutron_lib import constants from neutron_lib import context from neutron_lib.utils import helpers as q_utils from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from oslo_service import loopingcall import six from networking_mlnx.plugins.ml2.drivers.mlnx.agent import config # noqa from networking_mlnx.plugins.ml2.drivers.mlnx.agent import exceptions from networking_mlnx.plugins.ml2.drivers.mlnx.agent import utils from networking_mlnx.plugins.ml2.drivers.mlnx import mech_mlnx LOG = logging.getLogger(__name__) class EswitchManager(object): def __init__(self, interface_mappings, endpoint, timeout): self.utils = utils.EswitchUtils(endpoint, timeout) self.interface_mappings = interface_mappings self.network_map = {} self.utils.define_fabric_mappings(interface_mappings) def get_port_id_by_mac(self, port_mac): for network_id, data in six.iteritems(self.network_map): for port in data['ports']: if port['port_mac'] == port_mac: return port['port_id'] LOG.error("Agent cache inconsistency - port id " "is not stored for %s", port_mac) raise exceptions.MlnxException(err_msg=("Agent cache inconsistency, " "check logs")) def get_vnics_mac(self): return set(self.utils.get_attached_vnics().keys()) def vnic_port_exists(self, port_mac): return port_mac in self.utils.get_attached_vnics() def remove_network(self, network_id): if network_id in self.network_map: del self.network_map[network_id] else: LOG.debug("Network %s not defined on Agent.", network_id) def port_down(self, network_id, physical_network, port_mac): """Sets port to down. Check internal network map for port data. If port exists set port to Down """ for network_id, data in six.iteritems(self.network_map): for port in data['ports']: if port['port_mac'] == port_mac: self.utils.port_down(physical_network, port_mac) return LOG.info('Network %s is not available on this agent', network_id) def port_up(self, network_id, network_type, physical_network, seg_id, port_id, port_mac): """Sets port to up. Update internal network map with port data. - Check if vnic defined - configure eswitch vport - set port to Up """ LOG.debug("Connecting port %s", port_id) if network_id not in self.network_map: self.provision_network(port_id, port_mac, network_id, network_type, physical_network, seg_id) net_map = self.network_map[network_id] net_map['ports'].append({'port_id': port_id, 'port_mac': port_mac}) if network_type == constants.TYPE_VLAN: LOG.info('Binding Segmentation ID %(seg_id)s ' 'to eSwitch for vNIC mac_address %(mac)s', {'seg_id': seg_id, 'mac': port_mac}) elif network_type == constants.TYPE_FLAT: LOG.info('Binding eSwitch for vNIC mac_address %(mac)s' 'to flat network', {'mac': port_mac}) seg_id = 0 self.utils.set_port_vlan_id(physical_network, seg_id, port_mac) self.utils.port_up(physical_network, port_mac) def port_release(self, port_mac): """Clear port configuration from eSwitch.""" for network_id, net_data in six.iteritems(self.network_map): for port in net_data['ports']: if port['port_mac'] == port_mac: self.utils.port_release(net_data['physical_network'], port['port_mac']) return LOG.info('Port_mac %s is not available on this agent', port_mac) def provision_network(self, port_id, port_mac, network_id, network_type, physical_network, segmentation_id): LOG.info("Provisioning network %s", network_id) data = { 'physical_network': physical_network, 'network_type': network_type, 'ports': [], 'vlan_id': segmentation_id} self.network_map[network_id] = data class MlnxEswitchRpcCallbacks(sg_rpc.SecurityGroupAgentRpcCallbackMixin): # Set RPC API version to 1.3 by default. # history # 1.1 Support Security Group RPC # 1.2 Support DVR (Distributed Virtual Router) RPC (not supported) # 1.3 Added param devices_to_update to security_groups_provider_updated # (works with NoopFirewallDriver) target = oslo_messaging.Target(version='1.3') def __init__(self, context, agent, sg_agent): super(MlnxEswitchRpcCallbacks, self).__init__() self.context = context self.agent = agent self.eswitch = agent.eswitch self.sg_agent = sg_agent def network_delete(self, context, **kwargs): LOG.debug("network_delete received") network_id = kwargs.get('network_id') if not network_id: LOG.warning("Invalid Network ID, cannot remove Network") else: LOG.debug("Delete network %s", network_id) self.eswitch.remove_network(network_id) def port_update(self, context, **kwargs): port = kwargs.get('port') self.agent.add_port_update(port['mac_address']) LOG.debug("port_update message processed for port with mac %s", port['mac_address']) class MlnxEswitchNeutronAgent(object): def __init__(self, interface_mapping): self._polling_interval = cfg.CONF.AGENT.polling_interval self._setup_eswitches(interface_mapping) configurations = {'interface_mappings': interface_mapping} self.conf = cfg.CONF self.agent_state = { 'binary': 'neutron-mlnx-agent', 'host': self.conf.host, 'topic': constants.L2_AGENT_TOPIC, 'configurations': configurations, 'agent_type': mech_mlnx.AGENT_TYPE_MLNX, 'start_flag': True} # Stores port update notifications for processing in main rpc loop self.updated_ports = set() self.context = context.get_admin_context_without_session() self.plugin_rpc = agent_rpc.PluginApi(topics.PLUGIN) self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN) self.sg_agent = agent_sg_rpc.SecurityGroupAgentRpc(self.context, self.sg_plugin_rpc) self._setup_rpc() def _setup_eswitches(self, interface_mapping): daemon = cfg.CONF.ESWITCH.daemon_endpoint timeout = cfg.CONF.ESWITCH.request_timeout self.eswitch = EswitchManager(interface_mapping, daemon, timeout) def _report_state(self): try: devices = len(self.eswitch.get_vnics_mac()) self.agent_state.get('configurations')['devices'] = devices self.state_rpc.report_state(self.context, self.agent_state) self.agent_state.pop('start_flag', None) except Exception: LOG.exception("Failed reporting state!") def _setup_rpc(self): self.agent_id = 'mlnx-agent.%s' % socket.gethostname() LOG.info("RPC agent_id: %s", self.agent_id) self.topic = topics.AGENT self.state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS) # RPC network init # Handle updates from service self.endpoints = [MlnxEswitchRpcCallbacks(self.context, self, self.sg_agent)] # Define the listening consumers for the agent consumers = [[topics.PORT, topics.UPDATE], [topics.NETWORK, topics.DELETE], [topics.SECURITY_GROUP, topics.UPDATE]] self.connection = agent_rpc.create_consumers(self.endpoints, self.topic, consumers) report_interval = cfg.CONF.AGENT.report_interval if report_interval: heartbeat = loopingcall.FixedIntervalLoopingCall( self._report_state) heartbeat.start(interval=report_interval) def add_port_update(self, port): self.updated_ports.add(port) def scan_ports(self, previous, sync): cur_ports = self.eswitch.get_vnics_mac() port_info = {'current': cur_ports} updated_ports = self.updated_ports self.updated_ports = set() if sync: # Either it's the first iteration or previous iteration had # problems. port_info['added'] = cur_ports port_info['removed'] = ((previous['removed'] | previous['current']) - cur_ports) port_info['updated'] = ((previous['updated'] | updated_ports) & cur_ports) else: # Shouldn't process updates for not existing ports port_info['added'] = cur_ports - previous['current'] port_info['removed'] = previous['current'] - cur_ports port_info['updated'] = updated_ports & cur_ports return port_info def process_network_ports(self, port_info): resync_a = False resync_b = False device_added_updated = port_info['added'] | port_info['updated'] if device_added_updated: resync_a = self.treat_devices_added_or_updated( device_added_updated) if port_info['removed']: resync_b = self.treat_devices_removed(port_info['removed']) # If one of the above opertaions fails => resync with plugin return (resync_a | resync_b) def treat_vif_port(self, port_id, port_mac, network_id, network_type, physical_network, segmentation_id, admin_state_up): if self.eswitch.vnic_port_exists(port_mac): if admin_state_up: self.eswitch.port_up(network_id, network_type, physical_network, segmentation_id, port_id, port_mac) else: self.eswitch.port_down(network_id, physical_network, port_mac) else: LOG.debug("No port %s defined on agent.", port_id) def treat_devices_added_or_updated(self, devices): try: devs_details_list = self.plugin_rpc.get_devices_details_list( self.context, devices, self.agent_id, self.conf.host) except Exception as e: LOG.debug("Unable to get device details for devices " "with MAC address %(devices)s: due to %(exc)s", {'devices': devices, 'exc': e}) # resync is needed return True for dev_details in devs_details_list: device = dev_details['device'] LOG.info("Adding or updating port with mac %s", device) if 'port_id' in dev_details: LOG.info("Port %s updated", device) LOG.debug("Device details %s", str(dev_details)) self.treat_vif_port(dev_details['port_id'], dev_details['device'], dev_details['network_id'], dev_details['network_type'], dev_details['physical_network'], dev_details['segmentation_id'], dev_details['admin_state_up']) LOG.debug("Setting status for %s to UP", device) self.plugin_rpc.update_device_up( self.context, device, self.agent_id, self.conf.host) else: LOG.debug("Setting status for %s to DOWN", device) self.plugin_rpc.update_device_down( self.context, device, self.agent_id, self.conf.host) LOG.error("Device with mac_address %s not defined " "on Neutron Plugin", device) return False def treat_devices_removed(self, devices): resync = False for device in devices: LOG.info("Removing device with mac_address %s", device) try: port_id = self.eswitch.get_port_id_by_mac(device) dev_details = self.plugin_rpc.update_device_down(self.context, port_id, self.agent_id, self.conf.host ) except Exception as e: LOG.debug("Removing port failed for device %(device)s " "due to %(exc)s", {'device': device, 'exc': e}) resync = True continue if dev_details['exists']: LOG.info("Port %s updated.", device) else: LOG.debug("Device %s not defined on plugin", device) self.eswitch.port_release(device) return resync def _port_info_has_changes(self, port_info): return (port_info['added'] or port_info['removed'] or port_info['updated']) def run(self): LOG.info("eSwitch Agent Started!") sync = True port_info = {'current': set(), 'added': set(), 'removed': set(), 'updated': set()} while True: start = time.time() try: port_info = self.scan_ports(previous=port_info, sync=sync) except exceptions.RequestTimeout: LOG.exception("Request timeout in agent event loop " "eSwitchD is not responding - exiting...") sync = True continue if sync: LOG.info("Agent out of sync with plugin!") sync = False if self._port_info_has_changes(port_info): LOG.debug("Starting to process devices in:%s", port_info) try: sync = self.process_network_ports(port_info) except Exception: LOG.exception("Error in agent event loop") sync = True # sleep till end of polling interval elapsed = (time.time() - start) if (elapsed < self._polling_interval): time.sleep(self._polling_interval - elapsed) else: LOG.debug("Loop iteration exceeded interval " "(%(polling_interval)s vs. %(elapsed)s)", {'polling_interval': self._polling_interval, 'elapsed': elapsed}) def main(): config.config.register_root_helper(cfg.CONF) common_config.init(sys.argv[1:]) common_config.setup_logging() try: interface_mappings = q_utils.parse_mappings( cfg.CONF.ESWITCH.physical_interface_mappings, unique_keys=False) except ValueError as e: LOG.error("Parsing physical_interface_mappings failed: %s. " "Agent terminated!", e) sys.exit(1) LOG.info("Interface mappings: %s", interface_mappings) try: agent = MlnxEswitchNeutronAgent(interface_mappings) except Exception: LOG.exception("Failed on Agent initialisation: Agent terminated!") sys.exit(1) # Start everything. LOG.info("Agent initialised successfully, now running... ") agent.run() sys.exit(0) networking-mlnx-15.0.2/networking_mlnx/plugins/ml2/drivers/mlnx/agent/utils.py0000644000413600001450000001304613575645041027727 0ustar lennybmtl00000000000000# Copyright 2013 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import importutils import six from networking_mlnx._i18n import _LE from networking_mlnx.plugins.ml2.drivers.mlnx.agent import comm_utils from networking_mlnx.plugins.ml2.drivers.mlnx.agent import exceptions zmq = importutils.try_import('eventlet.green.zmq') LOG = logging.getLogger(__name__) class EswitchUtils(object): def __init__(self, daemon_endpoint, timeout): if not zmq: LOG.error("Failed to import eventlet.green.zmq. " "Won't connect to eSwitchD - exiting...") raise SystemExit(1) self.__conn = None self.daemon = daemon_endpoint self.timeout = timeout @property def _conn(self): if self.__conn is None: context = zmq.Context() socket = context.socket(zmq.REQ) socket.setsockopt(zmq.LINGER, 0) socket.connect(self.daemon) self.__conn = socket self.poller = zmq.Poller() self.poller.register(self._conn, zmq.POLLIN) return self.__conn @comm_utils.RetryDecorator(exceptions.RequestTimeout) def send_msg(self, msg): self._conn.send_string(msg) socks = dict(self.poller.poll(self.timeout)) if socks.get(self._conn) == zmq.POLLIN: recv_msg = self._conn.recv_string() response = self.parse_response_msg(recv_msg) return response else: self._conn.setsockopt(zmq.LINGER, 0) self._conn.close() self.poller.unregister(self._conn) self.__conn = None raise exceptions.RequestTimeout() def parse_response_msg(self, recv_msg): msg = jsonutils.loads(recv_msg) if msg['status'] == 'OK': if 'response' in msg: return msg.get('response') return elif msg['status'] == 'FAIL': msg_dict = dict(action=msg['action'], reason=msg['reason']) error_msg = _LE("Action %(action)s failed: %(reason)s") % msg_dict else: error_msg = _LE("Unknown operation status %s") % msg['status'] LOG.error(error_msg) raise exceptions.OperationFailed(err_msg=error_msg) def get_attached_vnics(self): LOG.debug("get_attached_vnics") msg = jsonutils.dumps({'action': 'get_vnics', 'fabric': '*'}) vnics = self.send_msg(msg) return vnics def set_port_vlan_id(self, physical_network, segmentation_id, port_mac): LOG.debug("Set Vlan %(segmentation_id)s on Port %(port_mac)s " "on Fabric %(physical_network)s", {'port_mac': port_mac, 'segmentation_id': segmentation_id, 'physical_network': physical_network}) msg = jsonutils.dumps({'action': 'set_vlan', 'fabric': physical_network, 'port_mac': port_mac, 'vlan': segmentation_id}) self.send_msg(msg) def define_fabric_mappings(self, interface_mapping): for fabric, phy_interface in six.iteritems(interface_mapping): LOG.debug("Define Fabric %(fabric)s on interface %(ifc)s", {'fabric': fabric, 'ifc': phy_interface}) msg = jsonutils.dumps({'action': 'define_fabric_mapping', 'fabric': fabric, 'interface': phy_interface}) self.send_msg(msg) def port_up(self, fabric, port_mac): LOG.debug("Port Up for %(port_mac)s on fabric %(fabric)s", {'port_mac': port_mac, 'fabric': fabric}) msg = jsonutils.dumps({'action': 'port_up', 'fabric': fabric, 'ref_by': 'mac_address', 'mac': 'port_mac'}) self.send_msg(msg) def port_down(self, fabric, port_mac): LOG.debug("Port Down for %(port_mac)s on fabric %(fabric)s", {'port_mac': port_mac, 'fabric': fabric}) msg = jsonutils.dumps({'action': 'port_down', 'fabric': fabric, 'ref_by': 'mac_address', 'mac': port_mac}) self.send_msg(msg) def port_release(self, fabric, port_mac): LOG.debug("Port Release for %(port_mac)s on fabric %(fabric)s", {'port_mac': port_mac, 'fabric': fabric}) msg = jsonutils.dumps({'action': 'port_release', 'fabric': fabric, 'ref_by': 'mac_address', 'mac': port_mac}) self.send_msg(msg) def get_eswitch_ports(self, fabric): # TODO(irena) - to implement for next phase return {} def get_eswitch_id(self, fabric): # TODO(irena) - to implement for next phase return "" networking-mlnx-15.0.2/networking_mlnx/plugins/ml2/drivers/mlnx/README0000644000413600001450000000035613566516767026013 0ustar lennybmtl00000000000000The Neutron Mellanox plugin has removed from the tree in Kilo. This directory includes Mellanox L2 agent for MLNX mechanism driver. For more details, please refer to the following link: https://wiki.openstack.org/wiki/Mellanox-Neutron-ML2networking-mlnx-15.0.2/networking_mlnx/plugins/ml2/drivers/mlnx/__init__.py0000644000413600001450000000000013566516767027226 0ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/plugins/ml2/drivers/mlnx/mech_mlnx.py0000644000413600001450000001121313575645017027440 0ustar lennybmtl00000000000000# Copyright (c) 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from neutron.plugins.ml2.drivers import mech_agent from neutron_lib.api.definitions import extra_dhcp_opt as edo_ext from neutron_lib.api.definitions import port from neutron_lib.api.definitions import portbindings from neutron_lib import constants as p_constants from neutron_lib.plugins import directory from neutron_lib.plugins.ml2 import api AGENT_TYPE_MLNX = 'Mellanox plugin agent' VIF_TYPE_IB_HOSTDEV = 'ib_hostdev' class MlnxMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase): """Attach to networks using Mellanox eSwitch L2 agent. The MellanoxMechanismDriver integrates the ml2 plugin with the Mellanox eswitch L2 agent. Port binding with this driver requires the Mellanox eswitch agent to be running on the port's host, and that agent to have connectivity to at least one segment of the port's network. """ def __init__(self): super(MlnxMechanismDriver, self).__init__( agent_type=AGENT_TYPE_MLNX, vif_type=VIF_TYPE_IB_HOSTDEV, vif_details={portbindings.CAP_PORT_FILTER: False}, supported_vnic_types=[portbindings.VNIC_DIRECT]) def get_allowed_network_types(self, agent=None): return [p_constants.TYPE_LOCAL, p_constants.TYPE_FLAT, p_constants.TYPE_VLAN] def get_mappings(self, agent): return agent['configurations'].get('interface_mappings', {}) def try_to_bind_segment_for_agent(self, context, segment, agent): if self.check_segment_for_agent(segment, agent): if (segment[api.NETWORK_TYPE] in (p_constants.TYPE_FLAT, p_constants.TYPE_VLAN)): self.vif_details['physical_network'] = segment[ 'physical_network'] context.set_binding(segment[api.ID], self.vif_type, self.vif_details) def _gen_client_id(self, port): _PREFIX = 'ff:00:00:00:00:00:02:00:00:02:c9:00:' _MIDDLE = ':00:00:' mac_address = port["mac_address"] mac_first = mac_address[:8] mac_last = mac_address[9:] client_id = ''.join([_PREFIX, mac_first, _MIDDLE, mac_last]) return client_id def _gen_client_id_opt(self, port): client_id = self._gen_client_id(port) return [{"opt_name": edo_ext.DHCP_OPT_CLIENT_ID, "opt_value": client_id}] def _gen_none_client_id_opt(self, port): updated_extra_dhcp_opts = [] for opt in port["extra_dhcp_opts"]: opt["opt_value"] = None updated_extra_dhcp_opts.append(opt) return updated_extra_dhcp_opts def _process_port_info(self, context): original_port = context.original updated_port = context.current original_host_id = original_port.get("binding:host_id") current_host_id = updated_port.get("binding:host_id") # in case migration did not take place or delete port if original_host_id == current_host_id or current_host_id is None: return plugin = directory.get_plugin() if updated_port.get("extra_dhcp_opts"): if updated_port["extra_dhcp_opts"] == \ self._gen_client_id_opt(updated_port): return if not updated_port["device_owner"]: updated_port["extra_dhcp_opts"] = \ self._gen_none_client_id_opt(updated_port) plugin._update_extra_dhcp_opts_on_port( context._plugin_context, updated_port["id"], {port.RESOURCE_NAME: updated_port}) elif ("compute" in updated_port["device_owner"] and updated_port["binding:vnic_type"] in ("direct", "normal")): updated_port["extra_dhcp_opts"] = \ self._gen_client_id_opt(updated_port) plugin._update_extra_dhcp_opts_on_port( context._plugin_context, updated_port["id"], {port.RESOURCE_NAME: updated_port}) def update_port_precommit(self, context): self._process_port_info(context) networking-mlnx-15.0.2/networking_mlnx/plugins/ml2/drivers/sdn/0000755000413600001450000000000013575645772024734 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/plugins/ml2/drivers/sdn/__init__.py0000644000413600001450000000000013566516767027034 0ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/plugins/ml2/drivers/sdn/client.py0000644000413600001450000001102213575645041026545 0ustar lennybmtl00000000000000# Copyright 2016 Mellanox Technologies, Ltd # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log from oslo_serialization import jsonutils import requests from networking_mlnx.plugins.ml2.drivers.sdn import config from networking_mlnx.plugins.ml2.drivers.sdn import constants as sdn_const from networking_mlnx.plugins.ml2.drivers.sdn import exceptions as sdn_exc from networking_mlnx.plugins.ml2.drivers.sdn import utils as sdn_utils LOG = log.getLogger(__name__) cfg.CONF.register_opts(config.sdn_opts, sdn_const.GROUP_OPT) class SdnRestClient(object): MANDATORY_ARGS = ('url', 'username', 'password') @classmethod def create_client(cls): return cls( cfg.CONF.sdn.url, cfg.CONF.sdn.domain, cfg.CONF.sdn.username, cfg.CONF.sdn.password, cfg.CONF.sdn.timeout) def __init__(self, url, domain, username, password, timeout): self.url = url self.domain = domain self.timeout = timeout self.username = username self.password = password self._validate_mandatory_params_exist() self.url.rstrip("/") def _validate_mandatory_params_exist(self): for arg in self.MANDATORY_ARGS: if not getattr(self, arg): raise cfg.RequiredOptError( arg, cfg.OptGroup(sdn_const.GROUP_OPT)) def _get_session(self): login_url = sdn_utils.strings_to_url(str(self.url), "login") login_data = "username=%s&password=%s" % (self.username, self.password) login_headers = sdn_const.LOGIN_HTTP_HEADER try: session = requests.session() LOG.debug("Login to SDN Provider. Login URL %(url)s", {'url': login_url}) r = session.request(sdn_const.POST, login_url, data=login_data, headers=login_headers, timeout=self.timeout) LOG.debug("request status: %d", r.status_code) r.raise_for_status() except Exception as e: raise sdn_exc.SDNLoginError(login_url=login_url, msg=e) return session def get(self, urlpath='', data=None): urlpath = sdn_utils.strings_to_url(self.url, urlpath) return self.request(sdn_const.GET, urlpath, data) def put(self, urlpath='', data=None): urlpath = sdn_utils.strings_to_url(self.url, self.domain, urlpath) return self.request(sdn_const.PUT, urlpath, data) def post(self, urlpath='', data=None): urlpath = sdn_utils.strings_to_url(self.url, self.domain, urlpath) return self.request(sdn_const.POST, urlpath, data) def delete(self, urlpath='', data=None): urlpath = sdn_utils.strings_to_url(self.url, self.domain, urlpath) return self.request(sdn_const.DELETE, urlpath, data) def request(self, method, urlpath='', data=None): data = jsonutils.dumps(data, indent=2) if data else None session = self._get_session() LOG.debug("Sending METHOD %(method)s URL %(url)s JSON %(data)s", {'method': method, 'url': urlpath, 'data': data}) return self._check_response(session.request( method, url=str(urlpath), headers=sdn_const.JSON_HTTP_HEADER, data=data, timeout=self.timeout), method) def _check_response(self, response, method): try: LOG.debug("request status: %d", response.status_code) request_found = True if response.text: LOG.debug("request text: %s", response.text) if (response.status_code == requests.codes.not_found and method == sdn_const.DELETE): request_found = False if (request_found and response.status_code != requests.codes.not_implemented): response.raise_for_status() except Exception as e: raise sdn_exc.SDNConnectionError(msg=e) return response networking-mlnx-15.0.2/networking_mlnx/plugins/ml2/drivers/sdn/config.py0000644000413600001450000000766413575645041026555 0ustar lennybmtl00000000000000# Copyright 2016 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from networking_mlnx._i18n import _ from networking_mlnx.plugins.ml2.drivers.sdn import constants as sdn_const sdn_opts = [ cfg.BoolOpt('sync_enabled', help=_("Whether synchronising state to an SDN provider is " "enabled."), default=True), cfg.StrOpt('url', help=_("HTTP URL of SDN Provider."), ), cfg.StrOpt('domain', help=_("Cloud domain name in SDN provider " "(for example: cloudx)"), default='cloudx' ), cfg.StrOpt('username', help=_("HTTP username for authentication."), ), cfg.StrOpt('password', help=_("HTTP password for authentication."), secret=True, default='123456' ), cfg.IntOpt('timeout', help=_("HTTP timeout in seconds."), default=10 ), cfg.IntOpt('sync_timeout', default=10, help=_("Sync thread timeout in seconds.")), cfg.IntOpt('retry_count', default=-1, help=_("Number of times to retry a row " "before failing." "To disable retry count value should be -1")), cfg.IntOpt('maintenance_interval', default=300, help=_("Journal maintenance operations interval " "in seconds.")), cfg.IntOpt('completed_rows_retention', default=600, help=_("Time to keep completed rows in seconds." "Completed rows retention will be checked every " "maintenance_interval by the cleanup thread." "To disable completed rows deletion " "value should be -1")), cfg.IntOpt('processing_timeout', default='100', help=_("Time in seconds to wait before a " "processing row is marked back to pending.")), cfg.ListOpt('physical_networks', default=sdn_const.ANY, help=_("Comma-separated list of " "that it will send notification. * " "means all physical_networks")), cfg.BoolOpt('bind_normal_ports', default=False, help=_("Allow the binding of normal ports for ports " "associated with an InfiniBand physnet from " "bind_normal_ports_physnets.")), # TODO(adrianc): The name here is a bit missleading, since there is no # usecase where SDN mechanism driver should bind normal ports for ETH # physnets. this should be renamed to: `infiniband_physnets` which will # require updates to deployment projects as well. cfg.ListOpt('bind_normal_ports_physnets', default=[], help=_("A list of InfiniBand physnets in which binding of " "normal ports is allowed. This option is used in " "conjuction with bind_normal_ports. " "The list must be a subset of physical_networks")), ] networking-mlnx-15.0.2/networking_mlnx/plugins/ml2/drivers/sdn/constants.py0000644000413600001450000000220213575645041027303 0ustar lennybmtl00000000000000# Copyright 2015 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Config file groups name GROUP_OPT = "sdn" # RESTful API paths: NETWORK = "Network" PORT = "Port" # HTTP request methods: DELETE = "DELETE" POST = "POST" PUT = "PUT" GET = "GET" # HTTP headers LOGIN_HTTP_HEADER = {'content-type': 'application/x-www-form-urlencoded'} JSON_HTTP_HEADER = {"Content-Type": "application/json"} # Constants for journal operation states PENDING = 'pending' PROCESSING = 'processing' MONITORING = 'monitoring' FAILED = 'failed' COMPLETED = 'completed' # Constants for physical_networks option ANY = '*' networking-mlnx-15.0.2/networking_mlnx/plugins/ml2/drivers/sdn/exceptions.py0000644000413600001450000000176213575645041027462 0ustar lennybmtl00000000000000# Copyright 2015 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import exceptions as exc from networking_mlnx._i18n import _ class SDNConnectionError(exc.NeutronException): message = _("Failed to send request %(msg)s") class SDNLoginError(exc.NeutronException): message = _("Failed login to URL: %(login_url)s %(msg)s") class SDNDriverConfError(exc.NeutronException): message = _("Driver configuration error: %(msg)s") networking-mlnx-15.0.2/networking_mlnx/plugins/ml2/drivers/sdn/sdn_mech_driver.py0000644000413600001450000004363313575645041030437 0ustar lennybmtl00000000000000# Copyright 2015 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from neutron.objects.qos import policy as policy_object from neutron_lib.api.definitions import extra_dhcp_opt as edo_ext from neutron_lib.api.definitions import portbindings from neutron_lib import constants as neutron_const from neutron_lib.db import api as db_api from neutron_lib.plugins.ml2 import api from oslo_config import cfg from oslo_log import log from networking_mlnx.journal import cleanup from networking_mlnx.journal import journal from networking_mlnx.journal import maintenance from networking_mlnx.plugins.ml2.drivers.sdn import config from networking_mlnx.plugins.ml2.drivers.sdn import constants as sdn_const from networking_mlnx.plugins.ml2.drivers.sdn import exceptions as sdn_excpt LOG = log.getLogger(__name__) cfg.CONF.register_opts(config.sdn_opts, sdn_const.GROUP_OPT) NETWORK_QOS_POLICY = 'network_qos_policy' DHCP_OPT_CLIENT_ID_NUM = '61' def context_validator(context_type=None): def real_decorator(func): @functools.wraps(func) def wrapper(instance, context, *args, **kwargs): if context_type == sdn_const.PORT: # port context contain network_context # which include the segments segments = getattr(context.network, "network_segments", None) elif context_type == sdn_const.NETWORK: segments = getattr(context, "network_segments", None) else: segments = getattr(context, "segments_to_bind", None) if segments and getattr(instance, "check_segments", None): if instance.check_segments(segments): return func(instance, context, *args, **kwargs) return wrapper return real_decorator def error_handler(func): @functools.wraps(func) def wrapper(instance, *args, **kwargs): try: return func(instance, *args, **kwargs) except Exception as e: LOG.error("%(function_name)s %(exception_desc)s", {'function_name': func.__name__, 'exception_desc': str(e)}) return wrapper class SDNMechanismDriver(api.MechanismDriver): """Mechanism Driver for SDN. This driver send notifications to SDN provider. The notifications are for port/network changes. """ supported_device_owners = [neutron_const.DEVICE_OWNER_DHCP, neutron_const.DEVICE_OWNER_ROUTER_INTF, neutron_const.DEVICE_OWNER_ROUTER_GW, neutron_const.DEVICE_OWNER_FLOATINGIP] def initialize(self): if self._is_sdn_sync_enabled(): self.journal = journal.SdnJournalThread() self._start_maintenance_thread() self.supported_vnic_types = [portbindings.VNIC_BAREMETAL] self.supported_network_types = ( [neutron_const.TYPE_VLAN, neutron_const.TYPE_FLAT]) self.vif_type = portbindings.VIF_TYPE_OTHER self.vif_details = {} SDNMechanismDriver._check_physnet_confs() self.allowed_physical_networks = cfg.CONF.sdn.physical_networks self.bind_normal_ports = cfg.CONF.sdn.bind_normal_ports self.bind_normal_ports_physnets = ( cfg.CONF.sdn.bind_normal_ports_physnets) @staticmethod def _check_physnet_confs(): """Check physical network related ML2 driver configuration options""" def _is_sublist(sub, lst): return functools.reduce( lambda x, y: x & y, map(lambda x: x in lst, sub)) LOG.debug("physnet Config opts: physical_networks=%s, " "bind_normal_ports=%s, bind_normal_ports_physnets=%s", cfg.CONF.sdn.physical_networks, cfg.CONF.sdn.bind_normal_ports, cfg.CONF.sdn.bind_normal_ports_physnets) # Note(adrianc): if `bind_normal_ports` is set then # `bind_normal_ports_physnets` must be a subset of `physical_networks` if (cfg.CONF.sdn.bind_normal_ports and not (sdn_const.ANY in cfg.CONF.sdn.physical_networks) and _is_sublist( cfg.CONF.sdn.bind_normal_ports_physnets, cfg.CONF.sdn.physical_networks)): raise sdn_excpt.SDNDriverConfError( msg="'bind_normal_ports_physnets' configuration option is " "expected to be a subset of 'physical_networks'.") @staticmethod def _is_sdn_sync_enabled(): """Whether to synchronise events to an SDN controller.""" return cfg.CONF.sdn.sync_enabled def _is_allowed_physical_network(self, physical_network): if (sdn_const.ANY in self.allowed_physical_networks or physical_network in self.allowed_physical_networks): return True return False def _is_allowed_physical_networks(self, network_context): for network_segment in network_context.network_segments: physical_network = network_segment.get('physical_network') if not self._is_allowed_physical_network(physical_network): return False return True def _start_maintenance_thread(self): # start the maintenance thread and register all the maintenance # operations : # (1) JournalCleanup - Delete completed rows from journal # (2) CleanupProcessing - Mark orphaned processing rows to pending cleanup_obj = cleanup.JournalCleanup() self._maintenance_thread = maintenance.MaintenanceThread() self._maintenance_thread.register_operation( cleanup_obj.delete_completed_rows) self._maintenance_thread.register_operation( cleanup_obj.cleanup_processing_rows) self._maintenance_thread.start() @staticmethod def _record_in_journal(context, object_type, operation, data=None): if not SDNMechanismDriver._is_sdn_sync_enabled(): return if data is None: data = context.current if object_type == sdn_const.PORT: SDNMechanismDriver._replace_port_dhcp_opt_name( data, DHCP_OPT_CLIENT_ID_NUM, edo_ext.DHCP_OPT_CLIENT_ID) journal.record(context._plugin_context.session, object_type, context.current['id'], operation, data) @context_validator(sdn_const.NETWORK) @error_handler def create_network_precommit(self, context): network_dic = context.current if (self._is_allowed_physical_networks(context) and network_dic.get('provider:segmentation_id')): network_dic[NETWORK_QOS_POLICY] = ( self._get_network_qos_policy(context, network_dic['id'])) SDNMechanismDriver._record_in_journal( context, sdn_const.NETWORK, sdn_const.POST, network_dic) @context_validator() @error_handler def bind_port(self, context): if not self._is_allowed_physical_networks(context.network): return port_dic = context.current if self._is_send_bind_port(port_dic): port_dic[NETWORK_QOS_POLICY] = ( self._get_network_qos_policy(context, port_dic['network_id'])) SDNMechanismDriver._record_in_journal( context, sdn_const.PORT, sdn_const.POST, port_dic) segments = context.network.network_segments for segment in segments: if not self._is_port_binding_supported(port_dic, segment): continue # set port to active if supported context.set_binding(segment[api.ID], self.vif_type, self.vif_details, neutron_const.PORT_STATUS_ACTIVE) @context_validator(sdn_const.NETWORK) @error_handler def update_network_precommit(self, context): network_dic = context.current if (self._is_allowed_physical_networks(context)): network_dic[NETWORK_QOS_POLICY] = ( self._get_network_qos_policy(context, network_dic['id'])) SDNMechanismDriver._record_in_journal( context, sdn_const.NETWORK, sdn_const.PUT, network_dic) def _get_client_id_from_port(self, port): dhcp_opts = port.get('extra_dhcp_opts', []) for dhcp_opt in dhcp_opts: if (isinstance(dhcp_opt, dict) and dhcp_opt.get('opt_name') in (edo_ext.DHCP_OPT_CLIENT_ID, DHCP_OPT_CLIENT_ID_NUM)): return dhcp_opt.get('opt_value') @staticmethod def _replace_port_dhcp_opt_name(port, old_opt_name, new_opt_name): dhcp_opts = port.get('extra_dhcp_opts', []) for dhcp_opt in dhcp_opts: if (isinstance(dhcp_opt, dict) and dhcp_opt.get('opt_name') == old_opt_name): dhcp_opt['opt_name'] = new_opt_name return def _get_local_link_information(self, port): binding_profile = port.get('binding:profile') if binding_profile: return binding_profile.get('local_link_information') def create_port_precommit(self, context): if not self._is_allowed_physical_networks(context.network): return port_dic = context.current port_dic[NETWORK_QOS_POLICY] = ( self._get_network_qos_policy(context, port_dic['network_id'])) vnic_type = port_dic[portbindings.VNIC_TYPE] if (vnic_type == portbindings.VNIC_BAREMETAL and (self._get_client_id_from_port(port_dic) or self._get_local_link_information(port_dic))): SDNMechanismDriver._record_in_journal( context, sdn_const.PORT, sdn_const.POST, port_dic) def update_port_precommit(self, context): if not self._is_allowed_physical_networks(context.network): return port_dic = context.current orig_port_dict = context.original port_dic[NETWORK_QOS_POLICY] = ( self._get_network_qos_policy(context, port_dic['network_id'])) vnic_type = port_dic[portbindings.VNIC_TYPE] # Check if we get a client id after binding the bare metal port, # and report the port to neo if vnic_type == portbindings.VNIC_BAREMETAL: # Ethernet Case link__info = self._get_local_link_information(port_dic) orig_link_info = self._get_local_link_information(orig_port_dict) if link__info != orig_link_info and link__info: SDNMechanismDriver._record_in_journal( context, sdn_const.PORT, sdn_const.POST, port_dic) return elif (orig_link_info and orig_port_dict[portbindings.HOST_ID] and not port_dic[portbindings.HOST_ID]): SDNMechanismDriver._record_in_journal( context, sdn_const.PORT, sdn_const.DELETE, orig_port_dict) return # InfiniBand Case current_client_id = self._get_client_id_from_port(port_dic) orig_client_id = self._get_client_id_from_port(orig_port_dict) if current_client_id != orig_client_id: SDNMechanismDriver._record_in_journal( context, sdn_const.PORT, sdn_const.POST, port_dic) return elif (orig_client_id and orig_port_dict[portbindings.HOST_ID] and not port_dic[portbindings.HOST_ID]): SDNMechanismDriver._record_in_journal( context, sdn_const.PORT, sdn_const.DELETE, orig_port_dict) return # delete the port in case instance is deleted # and port is created separately elif (orig_port_dict[portbindings.HOST_ID] and not port_dic[portbindings.HOST_ID] and self._is_send_bind_port(orig_port_dict)): SDNMechanismDriver._record_in_journal( context, sdn_const.PORT, sdn_const.DELETE, orig_port_dict) # delete the port in case instance is migrated to another hypervisor elif (orig_port_dict[portbindings.HOST_ID] and port_dic[portbindings.HOST_ID] != orig_port_dict[portbindings.HOST_ID] and self._is_send_bind_port(orig_port_dict)): SDNMechanismDriver._record_in_journal( context, sdn_const.PORT, sdn_const.DELETE, orig_port_dict) else: SDNMechanismDriver._record_in_journal( context, sdn_const.PORT, sdn_const.PUT, port_dic) @context_validator(sdn_const.NETWORK) @error_handler def delete_network_precommit(self, context): if not self._is_allowed_physical_networks(context): return network_dic = context.current network_dic[NETWORK_QOS_POLICY] = ( self._get_network_qos_policy(context, network_dic['id'])) SDNMechanismDriver._record_in_journal( context, sdn_const.NETWORK, sdn_const.DELETE, data=network_dic) @context_validator(sdn_const.PORT) @error_handler def delete_port_precommit(self, context): if not self._is_allowed_physical_networks(context.network): return port_dic = context.current # delete the port only if attached to a host vnic_type = port_dic[portbindings.VNIC_TYPE] if (port_dic[portbindings.HOST_ID] and (vnic_type == portbindings.VNIC_BAREMETAL or self._is_send_bind_port(port_dic))): port_dic[NETWORK_QOS_POLICY] = ( self._get_network_qos_policy(context, port_dic['network_id'])) SDNMechanismDriver._record_in_journal( context, sdn_const.PORT, sdn_const.DELETE, port_dic) @journal.call_thread_on_end def sync_from_callback(self, operation, res_type, res_id, resource_dict): object_type = res_type.singular object_uuid = (resource_dict[object_type]['id'] if operation == sdn_const.POST else res_id) if resource_dict is not None: resource_dict = resource_dict[object_type] journal.record(db_api.get_session(), object_type, object_uuid, operation, resource_dict) def _postcommit(self, context): if not self._is_sdn_sync_enabled(): return self.journal.set_sync_event() create_network_postcommit = _postcommit update_network_postcommit = _postcommit create_port_postcommit = _postcommit update_port_postcommit = _postcommit delete_network_postcommit = _postcommit delete_port_postcommit = _postcommit def _is_send_bind_port(self, port_context): """Verify that bind port is occur in compute context The request HTTP will occur only when the device owner is compute or when device owner is in self.supported_device_owners """ device_owner = port_context['device_owner'] return (device_owner and (device_owner.lower().startswith( neutron_const.DEVICE_OWNER_COMPUTE_PREFIX) or device_owner in self.supported_device_owners)) def _is_port_binding_supported(self, port, segment): """Check if driver is able to bind the port Port binding is supported if: a. Port VNIC type in supported_vnic_types (currently VNIC_BAREMETAL) and sdn sync disabled and not flat network. Or b. Port is of VNIC type normal and: 1. bind_normal_ports cfg opt is set. 2. The segment's physnet is in bind_normal_ports_physnets cfg opt. 3. The device owner is DHCP/Router(Non DVR) related port. :param port: port object :param segment: Segment dictionary representing the network segment to bind on. :return: True if port binding is supported by the driver else False. """ vnic_type = port[portbindings.VNIC_TYPE] if vnic_type in self.supported_vnic_types: if (segment[api.NETWORK_TYPE] != neutron_const.TYPE_FLAT and not self._is_sdn_sync_enabled()): # Don't bind to non-flat networks if not syncing to an SDN # controller. return False return True if (vnic_type == portbindings.VNIC_NORMAL and self.bind_normal_ports and port['device_owner'] in self.supported_device_owners and segment.get('physical_network') in self.bind_normal_ports_physnets): return True return False def check_segment(self, segment): """Verify if a segment is valid for the SDN MechanismDriver. Verify if the requested segment is supported by SDN MD and return True or False to indicate this to callers. """ network_type = segment[api.NETWORK_TYPE] return network_type in self.supported_network_types def check_segments(self, segments): """Verify if there is a segment in a list of segments that valid for the SDN MechanismDriver. Verify if the requested segments are supported by SDN MD and return True or False to indicate this to callers. """ if segments: for segment in segments: if self.check_segment(segment): return True return False def _get_network_qos_policy(self, context, net_id): return policy_object.QosPolicy.get_network_policy( context._plugin_context, net_id) networking-mlnx-15.0.2/networking_mlnx/plugins/ml2/drivers/sdn/utils.py0000644000413600001450000000125713566516767026454 0ustar lennybmtl00000000000000# Copyright 2016 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. def strings_to_url(*args): return "/".join(filter(None, args)) networking-mlnx-15.0.2/networking_mlnx/plugins/ml2/drivers/__init__.py0000644000413600001450000000000013566516767026250 0ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/plugins/ml2/__init__.py0000644000413600001450000000000013566516767024572 0ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/plugins/__init__.py0000644000413600001450000000000013566516767024100 0ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/tests/0000755000413600001450000000000013575645772021461 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/tests/unit/0000755000413600001450000000000013575645772022440 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/tests/unit/db/0000755000413600001450000000000013575645772023025 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/tests/unit/db/__init__.py0000644000413600001450000000000013566516767025125 0ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/tests/unit/db/test_db.py0000644000413600001450000002616313575645041025020 0ustar lennybmtl00000000000000# Copyright 2016 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from datetime import datetime from datetime import timedelta import mock from neutron.tests.unit import testlib_api from neutron_lib import context from oslo_db import exception from networking_mlnx.db import db from networking_mlnx.db.models import sdn_journal_db from networking_mlnx.db.models import sdn_maintenance_db from networking_mlnx.plugins.ml2.drivers.sdn import constants as sdn_const class DbTestCase(testlib_api.SqlTestCaseLight): UPDATE_ROW = [sdn_const.NETWORK, 'id', sdn_const.PUT, {'test': 'data'}] def setUp(self): super(DbTestCase, self).setUp() self.db_context = context.get_admin_context() self.db_session = self.db_context.session self.addCleanup(self._db_cleanup) def _db_cleanup(self): self.db_session.query(sdn_journal_db.SdnJournal).delete() def _update_row(self, row): self.db_session.merge(row) self.db_session.flush() def _test_validate_updates(self, rows, time_deltas, expected_validations): for row in rows: db.create_pending_row(self.db_session, *row) # update row created_at rows = db.get_all_db_rows(self.db_session) now = datetime.now() for row, time_delta in zip(rows, time_deltas): row.created_at = now - timedelta(hours=time_delta) self._update_row(row) # validate if there are older rows for row, expected_valid in zip(rows, expected_validations): valid = not db.check_for_older_ops(self.db_session, row) self.assertEqual(expected_valid, valid) def _test_retry_count(self, retry_num, max_retry, expected_retry_count, expected_state): # add new pending row db.create_pending_row(self.db_session, *self.UPDATE_ROW) # update the row with the requested retry_num row = db.get_all_db_rows(self.db_session)[0] row.retry_count = retry_num - 1 db.update_pending_db_row_retry(self.db_session, row, max_retry) # validate the state and the retry_count of the row row = db.get_all_db_rows(self.db_session)[0] self.assertEqual(expected_state, row.state) self.assertEqual(expected_retry_count, row.retry_count) def _test_update_row_state(self, from_state, to_state): # add new pending row db.create_pending_row(self.db_session, *self.UPDATE_ROW) row = db.get_all_db_rows(self.db_session)[0] for state in [from_state, to_state]: # update the row state db.update_db_row_state(self.db_session, row, state) # validate the new state row = db.get_all_db_rows(self.db_session)[0] self.assertEqual(state, row.state) def test_validate_updates_same_object_uuid(self): self._test_validate_updates( [self.UPDATE_ROW, self.UPDATE_ROW], [1, 0], [True, False]) def test_validate_updates_same_created_time(self): self._test_validate_updates( [self.UPDATE_ROW, self.UPDATE_ROW], [0, 0], [True, True]) def test_validate_updates_different_object_uuid(self): other_row = list(self.UPDATE_ROW) other_row[1] += 'a' self._test_validate_updates( [self.UPDATE_ROW, other_row], [1, 0], [True, True]) def test_validate_updates_different_object_type(self): other_row = list(self.UPDATE_ROW) other_row[0] = sdn_const.PORT other_row[1] += 'a' self._test_validate_updates( [self.UPDATE_ROW, other_row], [1, 0], [True, True]) def test_get_oldest_pending_row_none_when_no_rows(self): row = db.get_oldest_pending_db_row_with_lock(self.db_session) self.assertIsNone(row) def _test_get_oldest_pending_row_none(self, state): db.create_pending_row(self.db_session, *self.UPDATE_ROW) row = db.get_all_db_rows(self.db_session)[0] row.state = state self._update_row(row) row = db.get_oldest_pending_db_row_with_lock(self.db_session) self.assertIsNone(row) def test_get_oldest_pending_row_none_when_row_processing(self): self._test_get_oldest_pending_row_none(sdn_const.PROCESSING) def test_get_oldest_pending_row_none_when_row_failed(self): self._test_get_oldest_pending_row_none(sdn_const.FAILED) def test_get_oldest_pending_row_none_when_row_completed(self): self._test_get_oldest_pending_row_none(sdn_const.COMPLETED) def test_get_oldest_pending_row_none_when_row_monitoring(self): self._test_get_oldest_pending_row_none(sdn_const.MONITORING) def test_get_oldest_pending_row(self): db.create_pending_row(self.db_session, *self.UPDATE_ROW) row = db.get_oldest_pending_db_row_with_lock(self.db_session) self.assertIsNotNone(row) self.assertEqual(sdn_const.PROCESSING, row.state) def test_get_oldest_pending_row_order(self): db.create_pending_row(self.db_session, *self.UPDATE_ROW) older_row = db.get_all_db_rows(self.db_session)[0] older_row.last_retried -= timedelta(minutes=1) self._update_row(older_row) db.create_pending_row(self.db_session, *self.UPDATE_ROW) row = db.get_oldest_pending_db_row_with_lock(self.db_session) self.assertEqual(older_row, row) def test_get_all_monitoring_db_row_by_oldest_order(self): db.create_pending_row(self.db_session, *self.UPDATE_ROW) db.create_pending_row(self.db_session, *self.UPDATE_ROW) older_row = db.get_all_db_rows(self.db_session)[1] older_row.last_retried -= timedelta(minutes=1) older_row.state = sdn_const.MONITORING self._update_row(older_row) newer_row = db.get_all_db_rows(self.db_session)[0] newer_row.state = sdn_const.MONITORING self._update_row(newer_row) rows = db.get_all_monitoring_db_row_by_oldest(self.db_session) self.assertEqual(older_row, rows[0]) self.assertEqual(newer_row, rows[1]) def test_get_oldest_pending_row_when_deadlock(self): db.create_pending_row(self.db_session, *self.UPDATE_ROW) update_mock = ( mock.MagicMock(side_effect=(exception.DBDeadlock, mock.DEFAULT))) # Mocking is mandatory to achieve a deadlock regardless of the DB # backend being used when running the tests with mock.patch.object(db, 'update_db_row_state', new=update_mock): row = db.get_oldest_pending_db_row_with_lock(self.db_session) self.assertIsNotNone(row) self.assertEqual(2, update_mock.call_count) def _test_delete_rows_by_state_and_time(self, last_retried, row_retention, state, expected_rows): db.create_pending_row(self.db_session, *self.UPDATE_ROW) # update state and last retried row = db.get_all_db_rows(self.db_session)[0] row.state = state row.last_retried = row.last_retried - timedelta(seconds=last_retried) self._update_row(row) db.delete_rows_by_state_and_time(self.db_session, sdn_const.COMPLETED, timedelta(seconds=row_retention)) # validate the number of rows in the journal rows = db.get_all_db_rows(self.db_session) self.assertEqual(expected_rows, len(rows)) def test_delete_completed_rows_no_new_rows(self): self._test_delete_rows_by_state_and_time(0, 10, sdn_const.COMPLETED, 1) def test_delete_completed_rows_one_new_row(self): self._test_delete_rows_by_state_and_time(6, 5, sdn_const.COMPLETED, 0) def test_delete_completed_rows_wrong_state(self): self._test_delete_rows_by_state_and_time(10, 8, sdn_const.PENDING, 1) def test_valid_retry_count(self): self._test_retry_count(1, 1, 1, sdn_const.PENDING) def test_invalid_retry_count(self): self._test_retry_count(2, 1, 1, sdn_const.FAILED) def test_update_row_state_to_pending(self): self._test_update_row_state(sdn_const.PROCESSING, sdn_const.PENDING) def test_update_row_state_to_processing(self): self._test_update_row_state(sdn_const.PENDING, sdn_const.PROCESSING) def test_update_row_state_to_failed(self): self._test_update_row_state(sdn_const.PROCESSING, sdn_const.FAILED) def test_update_row_state_to_monitoring(self): self._test_update_row_state(sdn_const.PROCESSING, sdn_const.MONITORING) def test_update_row_state_to_completed(self): self._test_update_row_state(sdn_const.PROCESSING, sdn_const.COMPLETED) def test_update_row_job_id(self): # add new pending row expected_job_id = 'job_id' db.create_pending_row(self.db_session, *self.UPDATE_ROW) row = db.get_all_db_rows(self.db_session)[0] db.update_db_row_job_id(self.db_session, row, expected_job_id) row = db.get_all_db_rows(self.db_session)[0] self.assertEqual(expected_job_id, row.job_id) def _test_maintenance_lock_unlock(self, db_func, existing_state, expected_state, expected_result): row = sdn_maintenance_db.SdnMaintenance(id='test', state=existing_state) self.db_session.add(row) self.db_session.flush() self.assertEqual(expected_result, db_func(self.db_session)) row = self.db_session.query(sdn_maintenance_db.SdnMaintenance).one() self.assertEqual(expected_state, row['state']) def test_lock_maintenance(self): self._test_maintenance_lock_unlock(db.lock_maintenance, sdn_const.PENDING, sdn_const.PROCESSING, True) def test_lock_maintenance_fails_when_processing(self): self._test_maintenance_lock_unlock(db.lock_maintenance, sdn_const.PROCESSING, sdn_const.PROCESSING, False) def test_unlock_maintenance(self): self._test_maintenance_lock_unlock(db.unlock_maintenance, sdn_const.PROCESSING, sdn_const.PENDING, True) def test_unlock_maintenance_fails_when_pending(self): self._test_maintenance_lock_unlock(db.unlock_maintenance, sdn_const.PENDING, sdn_const.PENDING, False) networking-mlnx-15.0.2/networking_mlnx/tests/unit/eswitchd/0000755000413600001450000000000013575645772024252 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/tests/unit/eswitchd/__init__.py0000644000413600001450000000000013566516770026344 0ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/tests/unit/eswitchd/test_pci_utils.py0000644000413600001450000000573013575645041027650 0ustar lennybmtl00000000000000#!/usr/bin/env python # Copyright 2013 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import contextlib import subprocess import mock import six from networking_mlnx._i18n import _LE from networking_mlnx.eswitchd.utils import pci_utils from networking_mlnx.tests import base if six.PY3: @contextlib.contextmanager def nested(*contexts): with contextlib.ExitStack() as stack: yield [stack.enter_context(c) for c in contexts] else: nested = contextlib.nested class TestPciUtils(base.TestCase): def setUp(self): super(TestPciUtils, self).setUp() self.pci_utils = pci_utils.pciUtils() def test_get_vfs_info_not_found_device(self): pf = "pf_that_does_not_exist" with mock.patch.object(pci_utils, 'LOG') as LOG: self.pci_utils.get_vfs_info(pf) LOG.error.assert_called_with(_LE("PCI device %s not found"), pf) def test_get_dev_attr_valid_attr(self): cmd = "find /sys/class/net/*/device/vendor | head -1 | cut -d '/' -f5" pf = subprocess.check_output(cmd, shell=True) pf = pf.strip().decode("utf-8") if pf: attr_path = "/sys/class/net/%s/device/vendor" % pf return_val = self.pci_utils.get_dev_attr(attr_path) self.assertIsNotNone(return_val) def test_get_dev_attr_invalid_attr(self): attr_path = "/path/that/does/not/exist" return_val = self.pci_utils.get_dev_attr(attr_path) self.assertIsNone(return_val) def test_verify_vendor_pf_valid_vendor(self): cmd = "find /sys/class/net/*/device/vendor | head -1 | cut -d '/' -f5" pf = subprocess.check_output(cmd, shell=True) pf = pf.strip().decode("utf-8") if pf: attr_path = "/sys/class/net/%s/device/vendor" % pf attr = subprocess.check_output("cat %s" % attr_path, shell=True) attr = attr.strip().decode("utf-8") return_val = self.pci_utils.verify_vendor_pf(pf, attr) self.assertTrue(return_val) def test_verify_vendor_pf_invalid_vendor(self): cmd = "ls -U /sys/class/net | head -1" pf = subprocess.check_output(cmd, shell=True) pf = pf.strip() attr = "0x0000" return_val = self.pci_utils.verify_vendor_pf(pf, attr) self.assertFalse(return_val) def test_is_sriov_pf_false(self): pf = "pf_that_does_not_exist" is_sriov = self.pci_utils.is_sriov_pf(pf) self.assertFalse(is_sriov) networking-mlnx-15.0.2/networking_mlnx/tests/unit/journal/0000755000413600001450000000000013575645772024112 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/tests/unit/journal/__init__.py0000644000413600001450000000126713566516770026224 0ustar lennybmtl00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg cfg.CONF.use_stderr = False networking-mlnx-15.0.2/networking_mlnx/tests/unit/journal/test_dependency_validations.py0000644000413600001450000000315713566516770032237 0ustar lennybmtl00000000000000# # Copyright (C) 2016 Intel Corp. Isaku Yamahata # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import mock from neutron.tests import base from networking_mlnx.journal import dependency_validations class DependencyValidationsTestCase(base.DietTestCase): _RESOURCE_DUMMY = 'test_type' def setUp(self): super(DependencyValidationsTestCase, self).setUp() mock_validation_map = mock.patch.dict( dependency_validations._VALIDATION_MAP) mock_validation_map.start() self.addCleanup(mock_validation_map.stop) def test_register_validator(self): mock_session = mock.Mock() mock_validator = mock.Mock(return_value=False) mock_row = mock.Mock() mock_row.object_type = self._RESOURCE_DUMMY dependency_validations.register_validator(self._RESOURCE_DUMMY, mock_validator) valid = dependency_validations.validate(mock_session, mock_row) mock_validator.assert_called_once_with(mock_session, mock_row) self.assertFalse(valid) networking-mlnx-15.0.2/networking_mlnx/tests/unit/journal/test_maintenance.py0000644000413600001450000000636113575645017030003 0ustar lennybmtl00000000000000# Copyright 2016 Mellanox Technologies, Ltd # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import threading import mock from neutron.tests.unit import testlib_api from neutron_lib.db import api as neutron_db_api from networking_mlnx.db.models import sdn_maintenance_db from networking_mlnx.journal import maintenance from networking_mlnx.plugins.ml2.drivers.sdn import constants as sdn_const class MaintenanceThreadTestCase(testlib_api.SqlTestCaseLight): def setUp(self): super(MaintenanceThreadTestCase, self).setUp() self.db_session = neutron_db_api.get_writer_session() row = sdn_maintenance_db.SdnMaintenance(state=sdn_const.PENDING) self.db_session.add(row) self.db_session.flush() self.thread = maintenance.MaintenanceThread() self.thread.maintenance_interval = 0.01 def test__execute_op_no_exception(self): with mock.patch.object(maintenance, 'LOG') as mock_log: operation = mock.MagicMock() operation.__name__ = "test" self.thread._execute_op(operation, self.db_session) self.assertTrue(operation.called) self.assertTrue(mock_log.info.called) self.assertFalse(mock_log.exception.called) def test__execute_op_with_exception(self): with mock.patch.object(maintenance, 'LOG') as mock_log: operation = mock.MagicMock(side_effect=Exception()) operation.__name__ = "test" self.thread._execute_op(operation, self.db_session) self.assertTrue(mock_log.exception.called) def test_thread_works(self): callback_event = threading.Event() count = [0] def callback_op(**kwargs): count[0] += 1 # The following should be true on the second call, so we're making # sure that the thread runs more than once. if count[0] > 1: callback_event.set() self.thread.register_operation(callback_op) self.thread.start() # Make sure the callback event was called and not timed out self.assertTrue(callback_event.wait(timeout=5)) def test_thread_continues_after_exception(self): exception_event = threading.Event() callback_event = threading.Event() def exception_op(**kwargs): if not exception_event.is_set(): exception_event.set() raise Exception() def callback_op(**kwargs): callback_event.set() for op in [exception_op, callback_op]: self.thread.register_operation(op) self.thread.start() # Make sure the callback event was called and not timed out self.assertTrue(callback_event.wait(timeout=5)) networking-mlnx-15.0.2/networking_mlnx/tests/unit/linux/0000755000413600001450000000000013575645772023577 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/tests/unit/linux/interface_drivers/0000755000413600001450000000000013575645772027275 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/tests/unit/linux/interface_drivers/__init__.py0000644000413600001450000000000013575645017031364 0ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/tests/unit/linux/interface_drivers/test_interface.py0000644000413600001450000004504413575645017032645 0ustar lennybmtl00000000000000# Copyright 2019 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.agent.linux import interface as n_interface from neutron.agent.linux import ip_lib as n_ip_lib from neutron_lib.utils import runtime from oslo_config import cfg from oslo_service import loopingcall from oslo_utils.fixture import uuidsentinel as uuids import six from networking_mlnx.linux.interface_drivers import constants from networking_mlnx.linux.interface_drivers import interface from networking_mlnx.linux.interface_drivers import network_cache from networking_mlnx.linux import ip_lib from networking_mlnx.tests import base network_db = { uuids.vlan_net: {'id': uuids.vlan_net, constants.SEGMENTATION_ID: "14", constants.PHYSICAL_NETWORK: "datacenter"}, uuids.flat_net: {'id': uuids.flat_net, constants.SEGMENTATION_ID: None, constants.PHYSICAL_NETWORK: "ib-physnet"}, uuids.vxlan_net: {'id': uuids.vxlan_net, constants.SEGMENTATION_ID: 900, constants.PHYSICAL_NETWORK: None} } class TestIPoIBInterfaceDriver(base.TestCase): def _get_networks_cb(self, filters=None, fields=None): # Assert calls to callback with single filter and single field self.assertEqual(['id'], list(filters.keys())) self.assertEqual(1, len(filters["id"])) self.assertEqual([constants.SEGMENTATION_ID], fields) net_id = filters["id"][0] return [network_db[net_id]] def setUp(self): super(TestIPoIBInterfaceDriver, self).setUp() self.root_dev = "ib0" cfg.CONF.set_override('ipoib_physical_interface', self.root_dev) with mock.patch.object( n_interface.LinuxInterfaceDriver, "__init__") as init_mock, \ mock.patch.object(n_ip_lib, "device_exists"): # TODO(adrianc): Bump test-requirements neutron version to Train # and remove the __init__ mock above. def custom_init(*args, **kwargs): pass init_mock.side_effect = custom_init self.driver = interface.IPoIBInterfaceDriver( cfg.CONF, get_networks_callback=self._get_networks_cb) def _mock_ipoib_wrapper(self): patcher = mock.patch.object(ip_lib, "IPoIBWrapper") ipoib_cls_mock = patcher.start() ip_mock_inst = mock.MagicMock() ipoib_cls_mock.return_value = ip_mock_inst self.addCleanup(patcher.stop) return ip_mock_inst def _test_plug_new(self, net_id, dev_name): ip_mock = self._mock_ipoib_wrapper() ip_dev_mock = mock.Mock() ip_mock.add_ipoib.return_value = ip_dev_mock return self.driver.plug_new(net_id, uuids.port_id, dev_name, None) ip_mock.add_ipoib.assert_called_with( dev_name, self.root_dev, int(network_db[uuids.uuids.vlan_net][constants.SEGMENTATION_ID])) ip_dev_mock.link.set_up.asset_called_once() def test_plug_new_vlan_network(self): self._test_plug_new(uuids.vlan_net, "my-ipoib-netdev") def test_plug_new_flat_network(self): self._test_plug_new(uuids.flat_net, "my-ipoib-netdev") @mock.patch("networking_mlnx.linux.interface_drivers.interface.LOG") def test_plug_new_ip_lib_raises(self, log_mock): ip_mock = self._mock_ipoib_wrapper() ip_mock.add_ipoib.side_effect = RuntimeError("Unexpected!") self.driver.plug_new( uuids.vlan_net, uuids.port_a, "my-ipoib-netdev", None) log_mock.error.assert_called_once() def test_unplug(self): ip_mock = self._mock_ipoib_wrapper() self.driver.unplug("my-ipoib-netdev", None, namespace="test-ns") ip_mock.del_ipoib.assert_called_with("my-ipoib-netdev") @mock.patch("networking_mlnx.linux.interface_drivers.interface.LOG") def test_unplug_ip_lib_raises(self, log_mock): ip_mock = self._mock_ipoib_wrapper() ip_mock.del_ipoib.side_effect = RuntimeError("Unexpected!") self.driver.unplug("my-ipoib-netdev") log_mock.error.assert_called_once() class TestMultiInterfaceDriver(base.TestCase): def _get_networks_cb(self, filters=None, fields=None): if 'id' in filters.keys(): net_id = filters["id"][0] return [network_db[net_id]] return [list(network_db.values())] def setUp(self): super(TestMultiInterfaceDriver, self).setUp() interface.MultiInterfaceDriver.network_cache = None interface.MultiInterfaceDriver._cache_init_lock = mock.MagicMock() self.fields = ['id', constants.PHYSICAL_NETWORK, constants.SEGMENTATION_ID] # TODO(adrianc): Bump test-requirements neutron version to Train # and remove the __init__ mock above. self.init_patcher = mock.patch.object( n_interface.LinuxInterfaceDriver, "__init__") self.init_patcher.start() def tearDown(self): super(TestMultiInterfaceDriver, self).tearDown() self.init_patcher.stop() def _get_mock_drivers(self): drivers = {'datacenter': mock.Mock(), 'ib-physnet': mock.Mock(), 'nil': mock.Mock()} drivers['datacenter']._interface_kind_ = 'openvswitch' drivers['ib-physnet']._interface_kind_ = 'ipoib' drivers['nil']._interface_kind_ = 'openvswitch' return drivers def _init_network_cache(self): get_networks_mock = mock.Mock() get_networks_mock.return_value = {"IfYouSeeThis": "ThenYouBrokeIt"} fields = ['id', constants.PHYSICAL_NETWORK, constants.SEGMENTATION_ID] interface.MultiInterfaceDriver.network_cache = ( network_cache.NetworkCache(get_networks_mock, fields)) for net in six.itervalues(network_db): interface.MultiInterfaceDriver.network_cache.put(net['id'], net) @mock.patch.object(loopingcall, "FixedIntervalLoopingCall") @mock.patch.object(network_cache, "SafeNetworkCache") def test__init_network_cache(self, net_cache_cls_mock, looping_mock): conf = mock.Mock() conf.enable_multi_interface_driver_cache_maintenance = False interface.MultiInterfaceDriver._init_network_cache( conf, self._get_networks_cb, self.fields) net_cache_cls_mock.assert_called_once_with(self._get_networks_cb, self.fields) self.assertFalse(looping_mock.called) # Make sure consecutive calls dont re-initialize network cache net_cache_cls_mock.reset_mock() interface.MultiInterfaceDriver._init_network_cache( conf, self._get_networks_cb, self.fields) self.assertFalse(net_cache_cls_mock.called) @mock.patch.object(loopingcall, "FixedIntervalLoopingCall") @mock.patch.object(network_cache, "SafeNetworkCache") def test__init_network_cache_with_cache_maintenance(self, net_cache_cls_mock, looping_mock): conf = mock.Mock() conf.enable_multi_interface_driver_cache_maintenance = True net_cache_mock = mock.Mock() net_cache_cls_mock.return_value = net_cache_mock loop_obj = mock.Mock() looping_mock.return_value = loop_obj interface.MultiInterfaceDriver._init_network_cache( conf, self._get_networks_cb, self.fields) looping_mock.assert_called_once_with( net_cache_mock.remove_stale_networks) loop_obj.start.called_once_with() # Make sure consecutive calls dont re-spawn cleanup thread looping_mock.reset_mock() loop_obj.start.reset_mock() looping_mock.return_value = loop_obj interface.MultiInterfaceDriver._init_network_cache( conf, self._get_networks_cb, self.fields) looping_mock.assert_not_called() loop_obj.start.assert_not_called() @mock.patch.object(interface.MultiInterfaceDriver, "_process_driver_obj") @mock.patch.object(runtime, 'load_class_by_alias_or_classname') def test_load_interface_driver_mappings(self, load_cls_mock, process_mock): loaded_intf_drivers = [] def load_cls(namespace, name): loaded_intf_drivers.append(name) return mock.Mock() load_cls_mock.side_effect = load_cls conf = mock.Mock() conf.multi_interface_driver_mappings = ( "physnetA:openvswitch,physnetB:ipoib") mapping = ( interface.MultiInterfaceDriver.load_interface_driver_mappings( conf, get_networks_callback=mock.Mock())) self.assertEqual(set(["physnetA", "physnetB"]), set(mapping.keys())) self.assertEqual(set(["openvswitch", "ipoib"]), set(loaded_intf_drivers)) def _check_drivers(self, first_driver_spec, second_driver_spec, should_raise=False): type, kind = first_driver_spec first_mock = mock.Mock(spec=type) first_mock._interface_kind_ = kind type, kind = second_driver_spec second_mock = mock.Mock(spec=type) second_mock._interface_kind_ = kind mapping = {"physnetA": first_mock, "physnetB": second_mock} if should_raise: self.assertRaises(SystemExit, interface.MultiInterfaceDriver._check_drivers, mapping) else: interface.MultiInterfaceDriver._check_drivers(mapping) def test__check_drivers_different_type(self): self._check_drivers((n_interface.OVSInterfaceDriver, 'openvswitch'), (interface.IPoIBInterfaceDriver, 'ipoib'), False) def test__check_drivers_same_type(self): self._check_drivers((n_interface.OVSInterfaceDriver, 'openvswitch'), (n_interface.OVSInterfaceDriver, 'openvswitch'), False) def test__check_drivers_raises(self): self._check_drivers((n_interface.BridgeInterfaceDriver, 'veth'), (n_interface.OVSInterfaceDriver, 'veth'), True) def _check_process_driver_obj_ovs(self, conf, expected_kind): ovs_driver = mock.Mock(spec=n_interface.OVSInterfaceDriver) ovs_driver.conf = conf ovs_driver = interface.MultiInterfaceDriver._process_driver_obj( ovs_driver) self.assertTrue(hasattr(ovs_driver, "_interface_kind_")) self.assertEqual(expected_kind, ovs_driver._interface_kind_) def test__process_driver_obj_ovs(self): conf = mock.Mock() conf.ovs_use_veth = False self._check_process_driver_obj_ovs(conf, "openvswitch") def test__process_driver_obj_ovs_veth(self): conf = mock.Mock() conf.ovs_use_veth = True self._check_process_driver_obj_ovs(conf, "veth") @mock.patch.object(n_ip_lib, "device_exists") def test__process_ipoib_driver_obj(self, dev_exist_mock): conf = mock.Mock() conf.ipoib_physical_interface = "ib0" ipoib_driver = interface.IPoIBInterfaceDriver( conf, get_networks_callback=mock.Mock()) ipoib_driver = interface.MultiInterfaceDriver._process_driver_obj( ipoib_driver) self.assertTrue(hasattr(ipoib_driver, "_interface_kind_")) self.assertEqual('ipoib', ipoib_driver._interface_kind_) def test__process_unknown_driver_obj(self): unknown_driver = mock.Mock(spec=['conf']) unknown_driver = interface.MultiInterfaceDriver._process_driver_obj( unknown_driver) self.assertTrue(hasattr(unknown_driver, "_interface_kind_")) self.assertEqual('unknown', unknown_driver._interface_kind_) def test__get_networks_from_cache_all_nets(self): self._init_network_cache() nets = interface.MultiInterfaceDriver._get_networks_from_cache( filters=None, fields=None) for net in nets: # assert net object are identical self.assertDictEqual(network_db[net['id']], net) # assert all objects where retrieved self.assertEqual(set(network_db.keys()), set( [net['id'] for net in nets])) def test__get_networks_from_cache_some_fields(self): self._init_network_cache() some_fields = ['id', constants.PHYSICAL_NETWORK] nets = interface.MultiInterfaceDriver._get_networks_from_cache( filters=None, fields=some_fields) for net in nets: # assert retrieved net obj contains only the requested fields # 'id' field self.assertIn('id', net) self.assertIn(net['id'], network_db) # 'provider:physical_network' field self.assertIn(constants.PHYSICAL_NETWORK, net) self.assertEqual( network_db[net['id']][constants.PHYSICAL_NETWORK], net[constants.PHYSICAL_NETWORK]) # No additional fields self.assertEqual(set(some_fields), set(net.keys())) # assert all networks were retrieved self.assertEqual(set(network_db.keys()), set( [net['id'] for net in nets])) def test__get_networks_from_cache_some_filters(self): self._init_network_cache() nets = interface.MultiInterfaceDriver._get_networks_from_cache( filters={'id': [uuids.vlan_net]}, fields=None) self.assertEqual(1, len(nets)) self.assertDictEqual(network_db[uuids.vlan_net], nets[0]) def test__get_networks_from_cache_invalid_field(self): self._init_network_cache() nets = interface.MultiInterfaceDriver._get_networks_from_cache( fields=['invalid']) self.assertEqual([], nets) def test__get_networks_from_cache_invalid_filter(self): self._init_network_cache() nets = interface.MultiInterfaceDriver._get_networks_from_cache( filters={'invalid': ['i', 'n', 'v', 'a', 'l', 'i', 'd']}) self.assertEqual([], nets) @mock.patch.object(n_ip_lib, 'IPDevice') @mock.patch.object(interface.MultiInterfaceDriver, '_init_network_cache') @mock.patch.object(interface.MultiInterfaceDriver, 'load_interface_driver_mappings') def test__get_driver_for_existing_interface(self, load_mapping_mock, init_net_cache_mock, ip_device_mock): drivers = self._get_mock_drivers() load_mapping_mock.return_value = drivers ipd_mock = mock.Mock() ipd_mock.link.link_kind = 'ipoib' ip_device_mock.return_value = ipd_mock multi_driver = interface.MultiInterfaceDriver(mock.Mock, None) driver = multi_driver._get_driver_for_existing_interface("dev-name") self.assertEqual(drivers['ib-physnet'], driver) # in case a matching driver is not found - ensure None is returned ipd_mock.link.link_kind = 'veth' driver = multi_driver._get_driver_for_existing_interface("dummy-dev") self.assertIsNone(driver) @mock.patch.object(n_ip_lib, 'device_exists') @mock.patch.object(interface.MultiInterfaceDriver, '_get_driver_for_existing_interface') @mock.patch.object(interface.MultiInterfaceDriver, '_init_network_cache') @mock.patch.object(interface.MultiInterfaceDriver, 'load_interface_driver_mappings') def _unplug_device(self, device_exists, load_mapping_mock, init_net_cache_mock, get_driver_mock, device_exists_mock): device_exists_mock.return_value = device_exists mock_driver = mock.Mock() get_driver_mock.return_value = mock_driver driver = interface.MultiInterfaceDriver(mock.Mock, None) driver.unplug("dummy-dev", namespace="dummy-ns") if device_exists: mock_driver.unplug.assert_called_once_with("dummy-dev", None, "dummy-ns", None) else: mock_driver.unplug.assert_not_called() def test_unplug_device_exist(self): self._unplug_device(True) def test_unplug_device_does_not_exist(self): self._unplug_device(False) @mock.patch.object(interface.MultiInterfaceDriver, '_get_driver_for_existing_interface') @mock.patch.object(interface.MultiInterfaceDriver, '_init_network_cache') @mock.patch.object(interface.MultiInterfaceDriver, 'load_interface_driver_mappings') def test_set_mtu(self, load_mapping_mock, init_net_cache_mock, get_driver_mock): driver_mock = mock.Mock() get_driver_mock.return_value = driver_mock driver = interface.MultiInterfaceDriver(mock.Mock, None) driver.set_mtu("dummy-dev", 1500, namespace="dummy-ns", prefix=None) driver_mock.set_mtu.assert_called_once_with( "dummy-dev", 1500, "dummy-ns", None) @mock.patch.object(interface.MultiInterfaceDriver, '_init_network_cache') @mock.patch.object(interface.MultiInterfaceDriver, 'load_interface_driver_mappings') def test_plug_new(self, load_mapping_mock, init_net_cache_mock): self._init_network_cache() driver = interface.MultiInterfaceDriver(mock.Mock, None) driver.drivers = self._get_mock_drivers() device_name = 'test-dev' mac = 'fa:18:64:2d:07:d5' ns = 'test-ns' # network with physnet driver.plug_new(uuids.vlan_net, uuids.vlan_port, device_name, mac, bridge=None, namespace=ns, prefix=None, mtu=None) driver.drivers['datacenter'].plug_new.assert_called_once_with( uuids.vlan_net, uuids.vlan_port, device_name, mac, None, ns, None, None) # network without physnet driver.plug_new(uuids.vxlan_net, uuids.vxlan_port, device_name, mac, bridge=None, namespace=ns, prefix=None, mtu=None) driver.drivers['nil'].plug_new.assert_called_once_with( uuids.vxlan_net, uuids.vxlan_port, device_name, mac, None, ns, None, None) networking-mlnx-15.0.2/networking_mlnx/tests/unit/linux/interface_drivers/test_network_cache.py0000644000413600001450000001326513575645017033521 0ustar lennybmtl00000000000000# Copyright 2019 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock from oslo_concurrency import lockutils from oslo_utils.fixture import uuidsentinel as uuids from networking_mlnx.linux.interface_drivers import constants from networking_mlnx.linux.interface_drivers import network_cache from networking_mlnx.tests import base network_db = { uuids.obj_a: {'id': uuids.obj_a, 'attrA': 'a-attr', 'attrB': 'b-attr'}, uuids.obj_b: {'id': uuids.obj_b, 'attrA': 'a-attr', 'attrB': 'b-attr'}, uuids.obj_c: {'id': uuids.obj_c, 'attrA': 'a-attr', 'attrB': 'b-attr'}, } class TestSimpleCache(base.TestCase): def setUp(self): super(TestSimpleCache, self).setUp() self.cache = network_cache.SimpleCache() self._init_cache() def _init_cache(self): self.cache.object_cache = copy.deepcopy(network_db) def test_contains_operator(self): self.assertIn(uuids.obj_a, self.cache) self.assertNotIn(uuids.obj_non_existent, self.cache) self.assertNotIn(uuids.obj_non_existent, self.cache.object_cache) def test_get(self): obj = self.cache.get(uuids.obj_b) self.assertDictEqual(network_db[uuids.obj_b], obj) def test_get_non_existent_entry(self): self.assertIsNone(self.cache.get(uuids.obj_non_existent)) def test_get_all(self): objs = self.cache.get_all() objs_ids = set([obj['id'] for obj in objs]) self.assertEqual(objs_ids, set(network_db.keys())) for obj in objs: self.assertDictEqual(network_db[obj['id']], obj) def test_put(self): new_entry = {'id': uuids.new_obj, 'attrA': 'a-attr', 'attrB': 'b-attr'} self.cache.put(uuids.new_obj, new_entry) obj = self.cache.get(uuids.new_obj) self.assertDictEqual(new_entry, obj) def test_remove(self): self.cache.remove(uuids.obj_a) self.assertIsNone(self.cache.get(uuids.obj_a)) self.assertNotIn(uuids.obj_a, self.cache.object_cache) def test_clear(self): self.cache.clear() self.assertListEqual([], self.cache.get_all()) class TestNetworkCache(base.TestCase): def _get_networks_cb(self, filters, fields): if 'id' in filters: net_id = filters["id"][0] nets = [self.network_db[net_id]] else: nets = list(self.network_db.values()) return nets def setUp(self): super(TestNetworkCache, self).setUp() self.cb_mock = mock.Mock() self.cb_mock.side_effect = self._get_networks_cb self.network_fields = ['attrA', 'attrB', 'id'] self.cache = network_cache.NetworkCache( self.cb_mock, self.network_fields) self.network_db = copy.deepcopy(network_db) def test_get(self): self.cache.put(uuids.obj_a, self.network_db[uuids.obj_a]) obj = self.cache.get(uuids.obj_a) self.assertDictEqual(self.network_db[uuids.obj_a], obj) def test_get_with_miss(self): self.cache.put(uuids.obj_a, self.network_db[uuids.obj_a]) obj = self.cache.get(uuids.obj_b) self.assertTrue(self.cb_mock.called) self.assertDictEqual(self.network_db[uuids.obj_b], obj) def test_refresh(self): self.cache.refresh() self.cb_mock.assert_called_once_with( filters={constants.ADMIN_STATE_UP: [True]}, fields=self.network_fields) objs = self.cache.get_all() self.assertEqual( set(self.network_db.keys()), set([obj['id'] for obj in objs])) def test_remove_stale_networks(self): self.cache.put(uuids.obj_a, self.network_db[uuids.obj_a]) self.cache.put(uuids.obj_b, self.network_db[uuids.obj_b]) del self.network_db[uuids.obj_a] self.cache.remove_stale_networks() self.assertNotIn(uuids.obj_a, self.cache) self.assertIn(uuids.obj_b, self.cache) class TestSafeNetworkCache(TestNetworkCache): def setUp(self): super(TestSafeNetworkCache, self).setUp() with mock.patch.object(lockutils, 'ReaderWriterLock') as rw_cls: self.rw_lock_mock = rw_cls.return_value = mock.MagicMock() self.cache = network_cache.SafeNetworkCache( self.cb_mock, ['attrA', 'attrB']) def test_get(self): self.cache.get(uuids.obj_a) self.assertTrue(self.rw_lock_mock.read_lock.called) def test_get_all(self): self.cache.get_all() self.assertTrue(self.rw_lock_mock.read_lock.called) def test_put(self): self.cache.put(uuids.new_obj_id, {}) self.assertTrue(self.rw_lock_mock.write_lock.called) def test_remove(self): self.cache.remove(uuids.obj_a) self.assertTrue(self.rw_lock_mock.write_lock.called) def test_clear(self): self.cache.clear() self.assertTrue(self.rw_lock_mock.write_lock.called) def test_refresh(self): self.cache.refresh() self.assertTrue(self.rw_lock_mock.write_lock.called) def test_cache_miss_with_real_lock(self): # re-instantiate cache with a real lock self.cache = self.cache = network_cache.SafeNetworkCache( self.cb_mock, ['attrA', 'attrB']) self.test_get_with_miss() networking-mlnx-15.0.2/networking_mlnx/tests/unit/linux/__init__.py0000644000413600001450000000000013575645017025666 0ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/tests/unit/linux/test_ip_lib.py0000644000413600001450000000543413575645017026444 0ustar lennybmtl00000000000000# Copyright 2019 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.privileged.agent.linux import ip_lib as n_privileged from networking_mlnx.linux import constants from networking_mlnx.linux import ip_lib from networking_mlnx.tests import base class TestIPoIBWrapper(base.TestCase): def setUp(self): super(TestIPoIBWrapper, self).setUp() self.execute_p = mock.patch.object(ip_lib.IPoIBWrapper, '_execute') self.execute = self.execute_p.start() self.namespace = "test-ns" self.ipoib = ip_lib.IPoIBWrapper(namespace=self.namespace) def test_segmentation_id_to_pkey_default(self): pkey = self.ipoib._segmentation_id_to_pkey(None) self.assertEqual(constants.DEFAULT_PKEY, pkey) pkey = self.ipoib._segmentation_id_to_pkey(0) self.assertEqual(constants.DEFAULT_PKEY, pkey) def test_segmentation_id_to_pkey_int_val(self): seg_id = 4 pkey = self.ipoib._segmentation_id_to_pkey(seg_id) self.assertEqual(seg_id, pkey) def test_segmentation_id_to_pkey_str_val(self): seg_id = "14" pkey = self.ipoib._segmentation_id_to_pkey(seg_id) self.assertEqual(int(seg_id), pkey) def test_del_ipoib(self): with mock.patch.object( n_privileged, 'delete_interface') as del_ifc_mock: self.ipoib.del_ipoib("ipoib0") del_ifc_mock.assert_called_with("ipoib0", self.namespace) def test_add_ipoib(self): with mock.patch.object( n_privileged, 'create_interface') as create_ifc_mock, \ mock.patch.object( n_privileged, 'set_link_attribute') as set_link_attr_mock: name = "ipoib0" src_dev = "ib0" pkey = 0x4 ipdev = self.ipoib.add_ipoib(name, src_dev, pkey) # Assert device created in default namespace create_ifc_mock.assert_called_with( name, None, 'ipoib', physical_interface=src_dev, pkey=pkey) # Assert device moved to namespace set_link_attr_mock.assert_called_with( name, None, net_ns_fd=self.namespace) self.assertEqual(name, ipdev.name) self.assertEqual(self.namespace, ipdev.namespace) networking-mlnx-15.0.2/networking_mlnx/tests/unit/ml2/0000755000413600001450000000000013575645772023132 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/tests/unit/ml2/drivers/0000755000413600001450000000000013575645772024610 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/tests/unit/ml2/drivers/mlnx/0000755000413600001450000000000013575645772025566 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/tests/unit/ml2/drivers/mlnx/__init__.py0000644000413600001450000000000013566516770027660 0ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/tests/unit/ml2/drivers/mlnx/test_mech_mlnx.py0000644000413600001450000001451713575645017031151 0ustar lennybmtl00000000000000# Copyright (c) 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.tests.unit.plugins.ml2 import _test_mech_agent as base from neutron.tests.unit.plugins.ml2 import test_plugin from neutron_lib.api.definitions import portbindings from neutron_lib import context from neutron_lib.plugins.ml2 import api from oslo_utils import uuidutils from networking_mlnx.plugins.ml2.drivers.mlnx import mech_mlnx class MlnxMechanismBaseTestCase(base.AgentMechanismBaseTestCase): VIF_TYPE = mech_mlnx.VIF_TYPE_IB_HOSTDEV CAP_PORT_FILTER = False AGENT_TYPE = mech_mlnx.AGENT_TYPE_MLNX VNIC_TYPE = portbindings.VNIC_DIRECT GOOD_MAPPINGS = {'fake_physical_network': 'fake_bridge'} GOOD_CONFIGS = {'interface_mappings': GOOD_MAPPINGS} BAD_MAPPINGS = {'wrong_physical_network': 'wrong_bridge'} BAD_CONFIGS = {'interface_mappings': BAD_MAPPINGS} AGENTS = [{'alive': True, 'configurations': GOOD_CONFIGS, 'host': 'host'}] AGENTS_DEAD = [{'alive': False, 'configurations': GOOD_CONFIGS, 'host': 'dead_host'}] AGENTS_BAD = [{'alive': False, 'configurations': GOOD_CONFIGS, 'host': 'bad_host_1'}, {'alive': True, 'configurations': BAD_CONFIGS, 'host': 'bad_host_2'}] def setUp(self): super(MlnxMechanismBaseTestCase, self).setUp() self.driver = mech_mlnx.MlnxMechanismDriver() self.driver.initialize() class MlnxMechanismGenericTestCase(MlnxMechanismBaseTestCase, base.AgentMechanismGenericTestCase): pass class MlnxMechanismLocalTestCase(MlnxMechanismBaseTestCase, base.AgentMechanismLocalTestCase): pass class MlnxMechanismFlatTestCase(MlnxMechanismBaseTestCase, base.AgentMechanismFlatTestCase): pass class MlnxMechanismVlanTestCase(MlnxMechanismBaseTestCase, base.AgentMechanismVlanTestCase): pass class MlnxMechanismVifDetailsTestCase(MlnxMechanismBaseTestCase): def setUp(self): super(MlnxMechanismVifDetailsTestCase, self).setUp() def test_vif_details_contains_physical_net(self): VLAN_SEGMENTS = [{api.ID: 'vlan_segment_id', api.NETWORK_TYPE: 'vlan', api.PHYSICAL_NETWORK: 'fake_physical_network', api.SEGMENTATION_ID: 1234}] _context = base.FakePortContext(self.AGENT_TYPE, self.AGENTS, VLAN_SEGMENTS, portbindings.VNIC_DIRECT) segment = VLAN_SEGMENTS[0] agent = self.AGENTS[0] self.driver.try_to_bind_segment_for_agent(_context, segment, agent) set({"physical_network": "fake_physical_network"}).issubset( set(_context._bound_vif_details.items())) class FakeContext(base.FakePortContext): def __init__(self, agent_type, agents, segments, vnic_type=portbindings.VNIC_NORMAL, original=None, current=None): super(FakeContext, self).__init__(agent_type, agents, segments, vnic_type) self._original = original self._current = current self._plugin_context = context.Context('', 'test-tenant') @property def current(self): if self._current: return self._current return super(FakeContext, self).current @current.setter def set_current(self, value): self._current = value @property def original(self): return self._original @original.setter def set_original(self, value): self._original = value class MlnxMechanismIbPortTestCase(MlnxMechanismBaseTestCase, test_plugin.Ml2PluginV2TestCase): mechanism_drivers = ['mlnx_infiniband'] expected_client_id = ( "ff:00:00:00:00:00:02:00:00:02:c9:00:01:23:45:00:00:67:89:ab") def setUp(self): super(MlnxMechanismIbPortTestCase, self).setUp() def _get_context(self): VLAN_SEGMENTS = [{api.ID: 'vlan_segment_id', api.NETWORK_TYPE: 'vlan', api.PHYSICAL_NETWORK: 'fake_physical_network', api.SEGMENTATION_ID: 1234}] original_context = {'id': uuidutils.generate_uuid(), 'binding:host_id': None} current_context = {'id': uuidutils.generate_uuid(), 'binding:host_id': 'host1', 'mac_address': '01:23:45:67:89:ab', 'binding:vnic_type': portbindings.VNIC_DIRECT, 'device_owner': 'compute'} return FakeContext(self.AGENT_TYPE, self.AGENTS, VLAN_SEGMENTS, portbindings.VNIC_DIRECT, original=original_context, current=current_context) def test_precommit_same_host_id(self): _context = self._get_context() with mock.patch('neutron_lib.plugins.directory.get_plugin'): self.driver.update_port_precommit(_context) self.assertIsNotNone(_context.current.get('extra_dhcp_opts')) self.assertEqual(self.expected_client_id, _context.current['extra_dhcp_opts'][0]['opt_value']) def test_percommit_migrete_port(self): _context = self._get_context() _context.current['binding:host_id'] = 'host2' with mock.patch('neutron_lib.plugins.directory.get_plugin'): self.driver.update_port_precommit(_context) self.assertIsNotNone(_context.current.get('extra_dhcp_opts')) networking-mlnx-15.0.2/networking_mlnx/tests/unit/ml2/drivers/mlnx/test_mlnx_comm_utils.py0000644000413600001450000001105313575645017032400 0ustar lennybmtl00000000000000# Copyright (c) 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron.tests import base from oslo_config import cfg from networking_mlnx.plugins.ml2.drivers.mlnx.agent import comm_utils from networking_mlnx.plugins.ml2.drivers.mlnx.agent import config # noqa from networking_mlnx.plugins.ml2.drivers.mlnx.agent import exceptions class WrongException(Exception): pass class TestRetryDecorator(base.BaseTestCase): def setUp(self): super(TestRetryDecorator, self).setUp() self.sleep_fn_p = mock.patch("time.sleep") self.sleep_fn = self.sleep_fn_p.start() def test_no_retry_required(self): self.counter = 0 @comm_utils.RetryDecorator(exceptions.RequestTimeout, interval=2, retries=3, backoff_rate=2) def succeeds(): self.counter += 1 return 'success' ret = succeeds() self.assertFalse(self.sleep_fn.called) self.assertEqual(ret, 'success') self.assertEqual(self.counter, 1) def test_retry_zero_times(self): self.counter = 0 interval = 2 backoff_rate = 2 retries = 0 @comm_utils.RetryDecorator(exceptions.RequestTimeout, interval, retries, backoff_rate) def always_fails(): self.counter += 1 raise exceptions.RequestTimeout() self.assertRaises(exceptions.RequestTimeout, always_fails) self.assertEqual(self.counter, 1) self.assertFalse(self.sleep_fn.called) def test_retries_once(self): self.counter = 0 interval = 2 backoff_rate = 2 retries = 3 @comm_utils.RetryDecorator(exceptions.RequestTimeout, interval, retries, backoff_rate) def fails_once(): self.counter += 1 if self.counter < 2: raise exceptions.RequestTimeout() else: return 'success' ret = fails_once() self.assertEqual(ret, 'success') self.assertEqual(self.counter, 2) self.assertEqual(self.sleep_fn.call_count, 1) self.sleep_fn.assert_called_with(interval) def test_limit_is_reached(self): self.counter = 0 retries = 3 interval = 2 backoff_rate = 4 @comm_utils.RetryDecorator(exceptions.RequestTimeout, interval, retries, backoff_rate) def always_fails(): self.counter += 1 raise exceptions.RequestTimeout() self.assertRaises(exceptions.RequestTimeout, always_fails) self.assertEqual(self.counter, retries + 1) self.assertEqual(self.sleep_fn.call_count, retries) expected_sleep_fn_arg = [] for i in range(retries): expected_sleep_fn_arg.append(interval) interval *= backoff_rate self.sleep_fn.assert_has_calls(map(mock.call, expected_sleep_fn_arg)) def test_limit_is_reached_with_conf(self): self.counter = 0 @comm_utils.RetryDecorator(exceptions.RequestTimeout) def always_fails(): self.counter += 1 raise exceptions.RequestTimeout() retry = cfg.CONF.ESWITCH.retries interval = cfg.CONF.ESWITCH.request_timeout / 1000 delay_rate = cfg.CONF.ESWITCH.backoff_rate expected_sleep_fn_arg = [] for i in range(retry): expected_sleep_fn_arg.append(interval) interval *= delay_rate self.assertRaises(exceptions.RequestTimeout, always_fails) self.assertEqual(self.counter, retry + 1) self.assertEqual(self.sleep_fn.call_count, retry) self.sleep_fn.assert_has_calls(map(mock.call, expected_sleep_fn_arg)) def test_wrong_exception_no_retry(self): @comm_utils.RetryDecorator(exceptions.RequestTimeout) def raise_unexpected_error(): raise WrongException("wrong exception") self.assertRaises(WrongException, raise_unexpected_error) self.assertFalse(self.sleep_fn.called) networking-mlnx-15.0.2/networking_mlnx/tests/unit/ml2/drivers/mlnx/test_mlnx_neutron_agent.py0000644000413600001450000002510213575645041033072 0ustar lennybmtl00000000000000# Copyright 2014 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron.tests import base from oslo_config import cfg import testtools from networking_mlnx.plugins.ml2.drivers.mlnx.agent import ( mlnx_eswitch_neutron_agent) from networking_mlnx.plugins.ml2.drivers.mlnx.agent import exceptions from networking_mlnx.plugins.ml2.drivers.mlnx.agent import utils class TestEswichManager(base.BaseTestCase): def setUp(self): super(TestEswichManager, self).setUp() class MockEswitchUtils(object): def __init__(self, endpoint, timeout): pass mock.patch('networking_mlnx.plugins.mlnx.agent.utils.EswitchManager', new=MockEswitchUtils) with mock.patch.object(utils, 'zmq'): self.manager = mlnx_eswitch_neutron_agent.EswitchManager( {}, None, None) def test_get_not_exist_port_id(self): with testtools.ExpectedException(exceptions.MlnxException): self.manager.get_port_id_by_mac('no-such-mac') class TestMlnxEswitchRpcCallbacks(base.BaseTestCase): def setUp(self): super(TestMlnxEswitchRpcCallbacks, self).setUp() agent = mock.Mock() self.rpc_callbacks = \ mlnx_eswitch_neutron_agent.MlnxEswitchRpcCallbacks( 'context', agent, agent) def test_port_update(self): port = {'mac_address': '10:20:30:40:50:60'} add_port_update = self.rpc_callbacks.agent.add_port_update self.rpc_callbacks.port_update('context', port=port) add_port_update.assert_called_once_with(port['mac_address']) class TestEswitchAgent(base.BaseTestCase): def setUp(self): super(TestEswitchAgent, self).setUp() cfg.CONF.set_default('firewall_driver', 'noop', group='SECURITYGROUP') class MockFixedIntervalLoopingCall(object): def __init__(self, f): self.f = f def start(self, interval=0): self.f() mock.patch('neutron.openstack.common.loopingcall.' 'FixedIntervalLoopingCall', new=MockFixedIntervalLoopingCall) with mock.patch.object(utils, 'zmq'): self.agent = mlnx_eswitch_neutron_agent.MlnxEswitchNeutronAgent( {}) self.agent.plugin_rpc = mock.Mock() self.agent.context = mock.Mock() self.agent.agent_id = mock.Mock() self.agent.eswitch = mock.Mock() self.agent.eswitch.get_vnics_mac.return_value = [] def test_treat_devices_added_returns_true_for_missing_device(self): attrs = {'get_devices_details_list.side_effect': Exception()} self.agent.plugin_rpc.configure_mock(**attrs) with mock.patch('networking_mlnx.plugins.ml2.drivers.mlnx.agent.' 'mlnx_eswitch_neutron_agent.EswitchManager.' 'get_vnics_mac', return_value=[]): self.assertTrue(self.agent.treat_devices_added_or_updated([{}])) def _mock_treat_devices_added_updated(self, details, func_name): """Mock treat devices added. :param details: the details to return for the device :param func_name: the function that should be called :returns: whether the named function was called """ with mock.patch('networking_mlnx.plugins.ml2.drivers.mlnx.agent.' 'mlnx_eswitch_neutron_agent.EswitchManager.' 'get_vnics_mac', return_value=[]),\ mock.patch.object(self.agent.plugin_rpc, 'get_devices_details_list', return_value=[details]),\ mock.patch.object(self.agent.plugin_rpc, 'update_device_up') as upd_dev_up,\ mock.patch.object(self.agent.plugin_rpc, 'update_device_down') as upd_dev_down,\ mock.patch.object(self.agent, func_name) as func: self.assertFalse(self.agent.treat_devices_added_or_updated([{}])) return (func.called, upd_dev_up.called, upd_dev_down.called) def test_treat_devices_added_updates_known_port(self): details = mock.MagicMock() details.__contains__.side_effect = lambda x: True func, dev_up, dev_down = self._mock_treat_devices_added_updated( details, 'treat_vif_port') self.assertTrue(func) self.assertTrue(dev_up) def test_treat_devices_added_updates_known_port_admin_down(self): details = {'device': '01:02:03:04:05:06', 'network_id': '123456789', 'network_type': 'vlan', 'physical_network': 'default', 'segmentation_id': 2, 'admin_state_up': False} func, dev_up, dev_down = self._mock_treat_devices_added_updated( details, 'treat_vif_port') self.assertFalse(func) self.assertFalse(dev_up) self.assertTrue(dev_down) def test_treat_devices_added_updates_known_port_admin_up(self): details = {'port_id': '1234567890', 'device': '01:02:03:04:05:06', 'network_id': '123456789', 'network_type': 'vlan', 'physical_network': 'default', 'segmentation_id': 2, 'admin_state_up': False} func, dev_up, dev_down = self._mock_treat_devices_added_updated( details, 'treat_vif_port') self.assertTrue(func) self.assertTrue(dev_up) self.assertFalse(dev_down) def test_treat_devices_removed_returns_true_for_missing_device(self): with mock.patch.object(self.agent.plugin_rpc, 'update_device_down', side_effect=Exception()): self.assertTrue(self.agent.treat_devices_removed([{}])) def test_treat_devices_removed_releases_port(self): details = dict(exists=False) with mock.patch.object(self.agent.plugin_rpc, 'update_device_down', return_value=details): with mock.patch.object(self.agent.eswitch, 'port_release') as port_release: self.assertFalse(self.agent.treat_devices_removed([{}])) self.assertTrue(port_release.called) def _test_process_network_ports(self, port_info): with mock.patch.object(self.agent, 'treat_devices_added_or_updated', return_value=False) as device_added_updated,\ mock.patch.object(self.agent, 'treat_devices_removed', return_value=False) as device_removed: self.assertFalse(self.agent.process_network_ports(port_info)) device_added_updated.assert_called_once_with( port_info['added'] | port_info['updated']) device_removed.assert_called_once_with(port_info['removed']) def test_process_network_ports(self): self._test_process_network_ports( {'current': set(['10:20:30:40:50:60']), 'updated': set(), 'added': set(['11:21:31:41:51:61']), 'removed': set(['13:23:33:43:53:63'])}) def test_process_network_ports_with_updated_ports(self): self._test_process_network_ports( {'current': set(['10:20:30:40:50:60']), 'updated': set(['12:22:32:42:52:62']), 'added': set(['11:21:31:41:51:61']), 'removed': set(['13:23:33:43:53:63'])}) def test_add_port_update(self): mac_addr = '10:20:30:40:50:60' self.agent.add_port_update(mac_addr) self.assertEqual(set([mac_addr]), self.agent.updated_ports) def _mock_scan_ports(self, vif_port_set, previous, updated_ports, sync=False): self.agent.updated_ports = updated_ports with mock.patch.object(self.agent.eswitch, 'get_vnics_mac', return_value=vif_port_set): return self.agent.scan_ports(previous, sync) def test_scan_ports_return_current_for_unchanged_ports(self): vif_port_set = set([1, 2]) previous = dict(current=set([1, 2]), added=set(), removed=set(), updated=set()) expected = dict(current=vif_port_set, added=set(), removed=set(), updated=set()) actual = self._mock_scan_ports(vif_port_set, previous, set()) self.assertEqual(expected, actual) def test_scan_ports_return_port_changes(self): vif_port_set = set([1, 3]) previous = dict(current=set([1, 2]), added=set(), removed=set(), updated=set()) expected = dict(current=vif_port_set, added=set([3]), removed=set([2]), updated=set()) actual = self._mock_scan_ports(vif_port_set, previous, set()) self.assertEqual(expected, actual) def test_scan_ports_with_updated_ports(self): vif_port_set = set([1, 3, 4]) previous = dict(current=set([1, 2, 4]), added=set(), removed=set(), updated=set()) expected = dict(current=vif_port_set, added=set([3]), removed=set([2]), updated=set([4])) actual = self._mock_scan_ports(vif_port_set, previous, set([4])) self.assertEqual(expected, actual) def test_scan_ports_with_unknown_updated_ports(self): vif_port_set = set([1, 3, 4]) previous = dict(current=set([1, 2, 4]), added=set(), removed=set(), updated=set()) expected = dict(current=vif_port_set, added=set([3]), removed=set([2]), updated=set([4])) actual = self._mock_scan_ports(vif_port_set, previous, updated_ports=set([4, 5])) self.assertEqual(expected, actual) networking-mlnx-15.0.2/networking_mlnx/tests/unit/ml2/drivers/sdn/0000755000413600001450000000000013575645772025374 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/tests/unit/ml2/drivers/sdn/__init__.py0000644000413600001450000000000013566516770027466 0ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/tests/unit/ml2/drivers/sdn/test_client.py0000644000413600001450000001505513575645041030256 0ustar lennybmtl00000000000000# Copyright 2014 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_config import cfg from oslo_config import fixture as fixture_config from networking_mlnx.plugins.ml2.drivers.sdn import client from networking_mlnx.plugins.ml2.drivers.sdn import config from networking_mlnx.plugins.ml2.drivers.sdn import constants as sdn_const from networking_mlnx.tests import base class TestClient(base.TestCase): def setUp(self): super(TestClient, self).setUp() self.conf_fixture = self.useFixture(fixture_config.Config()) self.conf = self.conf_fixture.conf self._set_args() self.client = client.SdnRestClient.create_client() self.url = 'some_url' self.data = {'some': 'data'} def _set_args(self): self.conf.register_opts(config.sdn_opts, sdn_const.GROUP_OPT) for arg in client.SdnRestClient.MANDATORY_ARGS: self.conf.set_override(name=arg, override="http://some_val", group=sdn_const.GROUP_OPT) def test_mandatory_args(self): mandatory_arg_objects = filter( lambda obj: obj.name in client.SdnRestClient.MANDATORY_ARGS, config.sdn_opts) for arg in mandatory_arg_objects: self._set_args() self.conf.unregister_opt(opt=arg, group=sdn_const.GROUP_OPT) self.assertRaises(cfg.NoSuchOptError, client.SdnRestClient.create_client) @mock.patch('networking_mlnx.plugins.ml2.drivers.' 'sdn.client.SdnRestClient.request') def test_get(self, mocked_request): self.client.get(self.url, self.data) expected_url = '/'.join((self.conf.sdn.url, self.url)) mocked_request.assert_called_once_with(sdn_const.GET, expected_url, self.data) mocked_request.reset_mock() self.client.get(self.url) mocked_request.assert_called_once_with(sdn_const.GET, expected_url, None) mocked_request.reset_mock() self.client.get() mocked_request.assert_called_once_with(sdn_const.GET, self.conf.sdn.url, None) @mock.patch('networking_mlnx.plugins.ml2.drivers.' 'sdn.client.SdnRestClient.request') def test_put(self, mocked_request): self.client.put(self.url, self.data) expected_url = '/'.join((self.conf.sdn.url, self.conf.sdn.domain, self.url)) mocked_request.assert_called_once_with(sdn_const.PUT, expected_url, self.data) mocked_request.reset_mock() self.client.put(self.url) mocked_request.assert_called_once_with(sdn_const.PUT, expected_url, None) mocked_request.reset_mock() self.client.put() expected_url = '/'.join((self.conf.sdn.url, self.conf.sdn.domain)) mocked_request.assert_called_once_with(sdn_const.PUT, expected_url, None) @mock.patch('networking_mlnx.plugins.ml2.drivers.' 'sdn.client.SdnRestClient.request') def test_post(self, mocked_request): self.client.post(self.url, self.data) expected_url = '/'.join((self.conf.sdn.url, self.conf.sdn.domain, self.url)) mocked_request.assert_called_once_with(sdn_const.POST, expected_url, self.data) mocked_request.reset_mock() self.client.post(self.url) mocked_request.assert_called_once_with(sdn_const.POST, expected_url, None) mocked_request.reset_mock() self.client.post() expected_url = '/'.join((self.conf.sdn.url, self.conf.sdn.domain)) mocked_request.assert_called_once_with(sdn_const.POST, expected_url, None) @mock.patch('networking_mlnx.plugins.ml2.drivers.' 'sdn.client.SdnRestClient.request') def test_delete(self, mocked_request): self.client.delete(self.url, self.data) expected_url = '/'.join((self.conf.sdn.url, self.conf.sdn.domain, self.url)) mocked_request.assert_called_once_with(sdn_const.DELETE, expected_url, self.data) mocked_request.reset_mock() self.client.delete(self.url) mocked_request.assert_called_once_with(sdn_const.DELETE, expected_url, None) mocked_request.reset_mock() self.client.delete() expected_url = '/'.join((self.conf.sdn.url, self.conf.sdn.domain)) mocked_request.assert_called_once_with(sdn_const.DELETE, expected_url, None) @mock.patch('networking_mlnx.plugins.ml2.drivers.' 'sdn.client.SdnRestClient._get_session', return_value=mock.Mock()) def test_request_bad_data(self, mocked_get_session): # non serialized json data data = self self.assertRaises(ValueError, self.client.request, sdn_const.DELETE, '', data) networking-mlnx-15.0.2/networking_mlnx/tests/unit/ml2/drivers/sdn/test_mechanism_sdn.py0000644000413600001450000006331613575645041031613 0ustar lennybmtl00000000000000# Copyright 2015 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import mock import requests from neutron.plugins.ml2 import plugin from neutron.tests.unit.plugins.ml2 import test_plugin from neutron.tests.unit import testlib_api from neutron_lib import constants from neutron_lib import context as nl_context from oslo_config import cfg from oslo_config import fixture as fixture_config from oslo_serialization import jsonutils from oslo_utils import uuidutils from networking_mlnx.db import db from networking_mlnx.journal import cleanup from networking_mlnx.journal import journal from networking_mlnx.plugins.ml2.drivers.sdn import client from networking_mlnx.plugins.ml2.drivers.sdn import config from networking_mlnx.plugins.ml2.drivers.sdn import constants as sdn_const from networking_mlnx.plugins.ml2.drivers.sdn import sdn_mech_driver from networking_mlnx.plugins.ml2.drivers.sdn import utils as sdn_utils PLUGIN_NAME = 'neutron.plugins.ml2.plugin.Ml2Plugin' SEG_ID = 4 DEVICE_OWNER_COMPUTE = 'compute:None' MECHANISM_DRIVER_NAME = 'mlnx_sdn_assist' cfg.CONF.import_group(sdn_const.GROUP_OPT, 'networking_mlnx.plugins.ml2.drivers.sdn') class SdnConfigBase(test_plugin.Ml2PluginV2TestCase): def setUp(self): super(SdnConfigBase, self).setUp() self.conf_fixture = self.useFixture(fixture_config.Config()) self.conf = self.conf_fixture.conf self.conf.register_opts(config.sdn_opts, sdn_const.GROUP_OPT) self.conf.set_override('mechanism_drivers', ['logger', MECHANISM_DRIVER_NAME], 'ml2') self.conf.set_override('url', 'http://127.0.0.1/neo', sdn_const.GROUP_OPT) self.conf.set_override('username', 'admin', sdn_const.GROUP_OPT) class SdnTestCase(SdnConfigBase): def setUp(self): super(SdnTestCase, self).setUp() self.mech = sdn_mech_driver.SDNMechanismDriver() mock.patch.object(journal.SdnJournalThread, 'start_sync_thread').start() self.mock_request = mock.patch.object(client.SdnRestClient, 'request').start() self.mock_request.side_effect = self.check_request def check_request(self, method, urlpath, obj): self.assertFalse(urlpath.startswith("http://")) class SdnMechanismConfigTests(testlib_api.SqlTestCase): def _set_config(self, url='http://127.0.0.1/neo', username='admin', password='123456', sync_enabled=True): self.conf_fixture = self.useFixture(fixture_config.Config()) self.conf = self.conf_fixture.conf self.conf.register_opts(config.sdn_opts, sdn_const.GROUP_OPT) self.conf.set_override('mechanism_drivers', ['logger', MECHANISM_DRIVER_NAME], 'ml2') self.conf.set_override('url', url, sdn_const.GROUP_OPT) self.conf.set_override('username', username, sdn_const.GROUP_OPT) self.conf.set_override('password', password, sdn_const.GROUP_OPT) self.conf.set_override('sync_enabled', sync_enabled, sdn_const.GROUP_OPT) def _test_missing_config(self, **kwargs): self._set_config(**kwargs) self.assertRaises(cfg.RequiredOptError, plugin.Ml2Plugin) def test_valid_config(self): self._set_config() plugin.Ml2Plugin() def test_missing_url_raises_exception(self): self._test_missing_config(url=None) def test_missing_username_raises_exception(self): self._test_missing_config(username=None) def test_missing_password_raises_exception(self): self._test_missing_config(password=None) def test_missing_config_ok_when_disabled(self): self._set_config(url=None, username=None, password=None, sync_enabled=False) plugin.Ml2Plugin() class SdnMechanismTestBasicGet(test_plugin.TestMl2BasicGet, SdnTestCase): pass class SdnMechanismTestNetworksV2(test_plugin.TestMl2NetworksV2, SdnTestCase): pass class SdnMechanismTestPortsV2(test_plugin.TestMl2PortsV2, SdnTestCase): pass class DataMatcher(object): def __init__(self, context, object_type): self._data = context.__dict__["_" + object_type.lower()] def __eq__(self, data): return jsonutils.loads(data) == self._data def __repr__(self): return jsonutils.dumps(self._data) class SdnDriverTestCase(SdnConfigBase): OPERATION_MAPPING = { sdn_const.PUT: 'update', sdn_const.DELETE: 'delete', sdn_const.POST: 'create', } def setUp(self): super(SdnDriverTestCase, self).setUp() context = nl_context.get_admin_context() self.db_session = context.session self.mech = sdn_mech_driver.SDNMechanismDriver() self.mock_sync_thread = mock.patch.object( journal.SdnJournalThread, 'start_sync_thread').start() self.mech.initialize() self.thread = journal.SdnJournalThread() self.addCleanup(self._db_cleanup) def _get_segments_list(self, seg_id=SEG_ID, net_type=constants.TYPE_VLAN): return [{'segmentation_id': seg_id, 'physical_network': u'physnet1', 'id': u'c13bba05-eb07-45ba-ace2-765706b2d701', 'network_type': net_type}] def _get_mock_network_operation_context(self): current = {"provider:segmentation_id": SEG_ID, 'id': 'c13bba05-eb07-45ba-ace2-765706b2d701', 'name': 'net1', 'provider:network_type': 'vlan', 'network_qos_policy': None} context = mock.Mock(current=current, _network=current, _segments=self._get_segments_list(), network_segments=self._get_segments_list()) context._plugin_context.session = ( nl_context.get_admin_context().session) return context def _get_mock_port_operation_context(self): current = {'binding:host_id': 'r-ufm177', 'binding:profile': {u'pci_slot': u'0000:02:00.4', u'physical_network': u'physnet1', u'pci_vendor_info': u'15b3:1004'}, 'id': '72c56c48-e9b8-4dcf-b3a7-0813bb3bd839', 'binding:vnic_type': 'direct', 'device_owner': DEVICE_OWNER_COMPUTE, 'mac_address': '12:34:56:78:21:b6', 'name': 'port_test1', 'network_id': 'c13bba05-eb07-45ba-ace2-765706b2d701', 'network_qos_policy': None} original = {'binding:host_id': None, 'binding:profile': {u'pci_slot': None, u'physical_network': u'physnet1', u'pci_vendor_info': u'15b3:1004'}, 'id': None, 'binding:vnic_type': None, 'device_owner': DEVICE_OWNER_COMPUTE, 'mac_address': None, 'name': None, 'network_id': 'c13bba05-eb07-45ba-ace2-765706b2d701', 'network_qos_policy': None} # The port context should have NetwrokContext object that contain # the segments list network_context = type('NetworkContext', (object,), {"network_segments": self._get_segments_list()}) context = mock.Mock(current=current, _port=current, original=original, network=network_context) context._plugin_context.session = ( nl_context.get_admin_context().session) return context def _get_mock_bind_operation_context(self): current = {'binding:host_id': 'r-ufm177', 'binding:profile': {u'pci_slot': u'0000:02:00.4', u'physical_network': u'physnet1', u'pci_vendor_info': u'15b3:1004'}, 'id': '72c56c48-e9b8-4dcf-b3a7-0813bb3bd839', 'binding:vnic_type': 'direct', 'mac_address': '12:34:56:78:21:b6', 'name': 'port_test1', 'device_owner': DEVICE_OWNER_COMPUTE, 'network_id': 'c13bba05-eb07-45ba-ace2-765706b2d701', 'network_qos_policy': None} # The port context should have NetwrokContext object that contain # the segments list network_context = type('NetworkContext', (object,), {"network_segments": self._get_segments_list()}) context = mock.Mock(current=current, _port=current, segments_to_bind=self._get_segments_list(), network=network_context) context._plugin_context.session = ( nl_context.get_admin_context().session) return context def _get_mock_operation_context(self, object_type): getter = getattr(self, '_get_mock_%s_operation_context' % object_type.lower()) return getter() _status_code_msgs = { 200: '', 201: '', 204: '', 400: '400 Client Error: Bad Request', 401: '401 Client Error: Unauthorized', 403: '403 Client Error: Forbidden', 404: '404 Client Error: Not Found', 409: '409 Client Error: Conflict', 501: '501 Server Error: Not Implemented', 503: '503 Server Error: Service Unavailable', } def _get_http_request_codes(self): for err_code in (requests.codes.ok, requests.codes.created, requests.codes.no_content, requests.codes.bad_request, requests.codes.unauthorized, requests.codes.forbidden, requests.codes.not_found, requests.codes.conflict, requests.codes.not_implemented, requests.codes.service_unavailable): yield err_code def _db_cleanup(self): rows = db.get_all_db_rows(self.db_session) for row in rows: db.delete_row(self.db_session, row=row) @classmethod def _get_mock_request_response(cls, status_code, job_url): response = mock.Mock(status_code=status_code) if status_code < 400: response.raise_for_status = mock.Mock() response.json = mock.Mock( side_effect=[job_url, {"Status": "Completed"}]) else: mock.Mock(side_effect=requests.exceptions.HTTPError( cls._status_code_msgs[status_code])) return response def _test_operation(self, method, status_code, expected_calls, *args, **kwargs): job_url = 'app/jobs/' + uuidutils.generate_uuid() urlpath = sdn_utils.strings_to_url( cfg.CONF.sdn.url, job_url) request_response = self._get_mock_request_response( status_code, job_url) if expected_calls == 4 and status_code < 400: job_url2 = 'app/jobs/' + uuidutils.generate_uuid() urlpath2 = sdn_utils.strings_to_url( cfg.CONF.sdn.url, job_url) request_response.json = mock.Mock( side_effect=[job_url, job_url2, {"Status": "Completed"}, {"Status": "Completed"}]) with mock.patch('requests.Session.request', return_value=request_response) as mock_method: method(exit_after_run=True) login_args = mock.call( sdn_const.POST, mock.ANY, headers=sdn_const.LOGIN_HTTP_HEADER, data=mock.ANY, timeout=cfg.CONF.sdn.timeout) job_get_args = mock.call( sdn_const.GET, data=None, headers=sdn_const.JSON_HTTP_HEADER, url=urlpath, timeout=cfg.CONF.sdn.timeout) if status_code < 400: if expected_calls: operation_args = mock.call( headers=sdn_const.JSON_HTTP_HEADER, timeout=cfg.CONF.sdn.timeout, *args, **kwargs) if expected_calls == 4: urlpath2 = sdn_utils.strings_to_url( cfg.CONF.sdn.url, job_url2) job_get_args2 = mock.call( sdn_const.GET, data=None, headers=sdn_const.JSON_HTTP_HEADER, url=urlpath2, timeout=cfg.CONF.sdn.timeout) self.assertEqual( login_args, mock_method.mock_calls[4]) self.assertEqual( job_get_args, mock_method.mock_calls[5]) self.assertEqual( login_args, mock_method.mock_calls[6]) self.assertEqual( job_get_args2, mock_method.mock_calls[7]) else: self.assertEqual( login_args, mock_method.mock_calls[0]) self.assertEqual( operation_args, mock_method.mock_calls[1]) self.assertEqual( login_args, mock_method.mock_calls[2]) self.assertEqual( job_get_args, mock_method.mock_calls[3]) # we need to reduce the login call_cout self.assertEqual(expected_calls * 2, mock_method.call_count) def _call_operation_object(self, operation, object_type): if object_type == sdn_const.PORT and operation == sdn_const.POST: context = self._get_mock_bind_operation_context() method = getattr(self.mech, 'bind_port') else: context = self._get_mock_operation_context(object_type) operation = self.OPERATION_MAPPING[operation] object_type = object_type.lower() method = getattr(self.mech, '%s_%s_precommit' % (operation, object_type)) method(context) def _test_operation_object(self, operation, object_type): self._call_operation_object(operation, object_type) context = self._get_mock_operation_context(object_type) row = db.get_oldest_pending_db_row_with_lock(self.db_session) self.assertEqual(operation, row['operation']) self.assertEqual(object_type, row['object_type']) self.assertEqual(context.current['id'], row['object_uuid']) def _test_thread_processing(self, operation, object_type, expected_calls=2): status_codes = {sdn_const.POST: requests.codes.created, sdn_const.PUT: requests.codes.ok, sdn_const.DELETE: requests.codes.no_content} http_request = operation status_code = status_codes[operation] self._call_operation_object(operation, object_type) if object_type == sdn_const.PORT and operation == sdn_const.POST: context = self._get_mock_bind_operation_context() else: context = self._get_mock_operation_context(object_type) url_object_type = object_type.replace('_', '-') url = '%s/%s/%s' % (cfg.CONF.sdn.url, cfg.CONF.sdn.domain, url_object_type) if operation in (sdn_const.PUT, sdn_const.DELETE): uuid = context.current['id'] url = '%s/%s' % (url, uuid) kwargs = {'url': url, 'data': DataMatcher(context, object_type)} with mock.patch.object(self.thread.event, 'wait', return_value=False): self._test_operation(self.thread.run_sync_thread, status_code, expected_calls, http_request, **kwargs) def _test_object_type(self, object_type): # Add and process create request. self._test_thread_processing(sdn_const.POST, object_type) rows = db.get_all_db_rows_by_state(self.db_session, sdn_const.COMPLETED) self.assertEqual(1, len(rows)) # Add and process update request. Adds to database. self._test_thread_processing(sdn_const.PUT, object_type) rows = db.get_all_db_rows_by_state(self.db_session, sdn_const.COMPLETED) self.assertEqual(2, len(rows)) # Add and process update request. Adds to database. self._test_thread_processing(sdn_const.DELETE, object_type) rows = db.get_all_db_rows_by_state(self.db_session, sdn_const.COMPLETED) self.assertEqual(3, len(rows)) def _test_object_type_pending_network(self, object_type): # Create a network (creates db row in pending state). self._call_operation_object(sdn_const.POST, sdn_const.NETWORK) # Create object_type database row and process. This results in both # the object_type and network rows being processed. self._test_thread_processing(sdn_const.POST, object_type, expected_calls=4) # Verify both rows are now marked as completed. rows = db.get_all_db_rows_by_state(self.db_session, sdn_const.COMPLETED) self.assertEqual(2, len(rows)) def _test_object_type_processing_network(self, object_type): self._test_object_operation_pending_another_object_operation( object_type, sdn_const.POST, sdn_const.NETWORK, sdn_const.POST) def _test_object_operation_pending_object_operation( self, object_type, operation, pending_operation): self._test_object_operation_pending_another_object_operation( object_type, operation, object_type, pending_operation) def _test_object_operation_pending_another_object_operation( self, object_type, operation, pending_type, pending_operation): # Create the object_type (creates db row in pending state). self._call_operation_object(pending_operation, pending_type) # Get pending row and mark as processing so that # this row will not be processed by journal thread. row = db.get_all_db_rows_by_state(self.db_session, sdn_const.PENDING) db.update_db_row_state(self.db_session, row[0], sdn_const.PROCESSING) # Create the object_type database row and process. # Verify that object request is not processed because the # dependent object operation has not been marked as 'completed'. self._test_thread_processing(operation, object_type, expected_calls=0) # Verify that all rows are still in the database. rows = db.get_all_db_rows_by_state(self.db_session, sdn_const.PROCESSING) self.assertEqual(1, len(rows)) rows = db.get_all_db_rows_by_state(self.db_session, sdn_const.PENDING) self.assertEqual(1, len(rows)) def _test_parent_delete_pending_child_delete(self, parent, child): self._test_object_operation_pending_another_object_operation( parent, sdn_const.DELETE, child, sdn_const.DELETE) def _test_cleanup_processing_rows(self, last_retried, expected_state): # Create a dummy network (creates db row in pending state). self._call_operation_object(sdn_const.POST, sdn_const.NETWORK) # Get pending row and mark as processing and update # the last_retried time row = db.get_all_db_rows_by_state(self.db_session, sdn_const.PENDING)[0] row.last_retried = last_retried db.update_db_row_state(self.db_session, row, sdn_const.PROCESSING) # Test if the cleanup marks this in the desired state # based on the last_retried timestamp cleanup.JournalCleanup().cleanup_processing_rows(self.db_session) # Verify that the Db row is in the desired state rows = db.get_all_db_rows_by_state(self.db_session, expected_state) self.assertEqual(1, len(rows)) def test_driver(self): for operation in (sdn_const.POST, sdn_const.PUT, sdn_const.DELETE): for object_type in (sdn_const.NETWORK, sdn_const.PORT): self._test_operation_object(operation, object_type) def test_network(self): self._test_object_type(sdn_const.NETWORK) def test_network_update_pending_network_create(self): self._test_object_operation_pending_object_operation( sdn_const.NETWORK, sdn_const.PUT, sdn_const.POST) def test_network_delete_pending_network_create(self): self._test_object_operation_pending_object_operation( sdn_const.NETWORK, sdn_const.DELETE, sdn_const.POST) def test_network_delete_pending_network_update(self): self._test_object_operation_pending_object_operation( sdn_const.NETWORK, sdn_const.DELETE, sdn_const.PUT) def test_network_delete_pending_port_delete(self): self._test_parent_delete_pending_child_delete( sdn_const.NETWORK, sdn_const.PORT) def test_port(self): self._test_object_type(sdn_const.PORT) def test_port_update_pending_port_create(self): self._test_object_operation_pending_object_operation( sdn_const.PORT, sdn_const.PUT, sdn_const.POST) def test_port_delete_pending_port_create(self): self._test_object_operation_pending_object_operation( sdn_const.PORT, sdn_const.DELETE, sdn_const.POST) def test_port_delete_pending_port_update(self): self._test_object_operation_pending_object_operation( sdn_const.PORT, sdn_const.DELETE, sdn_const.PUT) def test_port_pending_network(self): self._test_object_type_pending_network(sdn_const.PORT) def test_port_processing_network(self): self._test_object_type_processing_network(sdn_const.PORT) def test_cleanup_processing_rows_time_not_expired(self): self._test_cleanup_processing_rows(datetime.datetime.utcnow(), sdn_const.PROCESSING) def test_cleanup_processing_rows_time_expired(self): old_time = datetime.datetime.utcnow() - datetime.timedelta(hours=24) self._test_cleanup_processing_rows(old_time, sdn_const.PENDING) def test_thread_call(self): """Verify that the sync thread method is called.""" # Create any object that would spin up the sync thread via the # decorator call_thread_on_end() used by all the event handlers. self._call_operation_object(sdn_const.POST, sdn_const.NETWORK) # Verify that the thread call was made. self.assertTrue(self.mock_sync_thread.called) def _decrease_row_created_time(self, row): row.created_at -= datetime.timedelta(hours=1) self.db_session.merge(row) self.db_session.flush() def test_sync_multiple_updates(self): # add 2 updates for i in range(2): self._call_operation_object(sdn_const.PUT, sdn_const.NETWORK) # get the last update row last_row = db.get_all_db_rows(self.db_session)[-1] # change the last update created time self._decrease_row_created_time(last_row) # create 1 more operation to trigger the sync thread # verify that there are no calls to NEO controller, because the # first row was not valid (exit_after_run = true) self._test_thread_processing(sdn_const.PUT, sdn_const.NETWORK, expected_calls=0) # validate that all the rows are in 'pending' state # first row should be set back to 'pending' because it was not valid rows = db.get_all_db_rows_by_state(self.db_session, sdn_const.PENDING) self.assertEqual(3, len(rows)) def test_network_filter_phynset(self): self.conf.set_override( 'physical_networks', 'datacenter', sdn_const.GROUP_OPT) self.mech = sdn_mech_driver.SDNMechanismDriver() self.mech.initialize() self._test_filtered_object_type(sdn_const.NETWORK) def test_port_filter_phynset(self): self.conf.set_override( 'physical_networks', 'datacenter', sdn_const.GROUP_OPT) self.mech = sdn_mech_driver.SDNMechanismDriver() self.mech.initialize() self._test_filtered_object_type(sdn_const.PORT) def _test_filtered_object_type(self, object_type): # Add and process create request. for operation in (sdn_const.POST, sdn_const.PUT, sdn_const.DELETE): self._call_operation_object(operation, object_type) rows = db.get_all_db_rows(self.db_session) self.assertEqual(0, len(rows)) networking-mlnx-15.0.2/networking_mlnx/tests/unit/ml2/drivers/__init__.py0000644000413600001450000000000013566516770026702 0ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/tests/unit/ml2/__init__.py0000644000413600001450000000000013566516770025224 0ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/tests/unit/__init__.py0000644000413600001450000000126713566516767024560 0ustar lennybmtl00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg cfg.CONF.use_stderr = False networking-mlnx-15.0.2/networking_mlnx/tests/__init__.py0000644000413600001450000000000013566516767023561 0ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/tests/base.py0000644000413600001450000000143213566516767022746 0ustar lennybmtl00000000000000# -*- coding: utf-8 -*- # Copyright 2010-2011 OpenStack Foundation # Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslotest import base class TestCase(base.BaseTestCase): """Test case base class for all unit tests.""" networking-mlnx-15.0.2/networking_mlnx/__init__.py0000644000413600001450000000000013566516767022417 0ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx/_i18n.py0000644000413600001450000000264213575645017021603 0ustar lennybmtl00000000000000# Copyright 2016 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import oslo_i18n DOMAIN = "networking-mlnx" _translators = oslo_i18n.TranslatorFactory(domain=DOMAIN) # The primary translation function using the well-known name "_" _ = _translators.primary # The contextual translation function using the name "_C" # requires oslo.i18n >=2.1.0 _C = _translators.contextual_form # The plural translation function using the name "_P" # requires oslo.i18n >=2.1.0 _P = _translators.plural_form # Translators for log levels. # # The abbreviated names are meant to reflect the usual use of a short # name like '_'. The "L" is for "log" and the other letter comes from # the level. _LI = _translators.log_info _LW = _translators.log_warning _LE = _translators.log_error _LC = _translators.log_critical def get_available_languages(): return oslo_i18n.get_available_languages(DOMAIN) networking-mlnx-15.0.2/networking_mlnx/version.py0000644000413600001450000000125713575645017022353 0ustar lennybmtl00000000000000# Copyright 2019 Mellanox Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pbr.version version_info = pbr.version.VersionInfo('networking-mlnx') networking-mlnx-15.0.2/networking_mlnx.egg-info/0000755000413600001450000000000013575645771022010 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/networking_mlnx.egg-info/PKG-INFO0000644000413600001450000000224713575645770023111 0ustar lennybmtl00000000000000Metadata-Version: 1.1 Name: networking-mlnx Version: 15.0.2 Summary: Mellanox Networking Home-page: http://www.mellanox.com/ Author: Mellanox Author-email: openstack@mellanox.com License: UNKNOWN Description: =============== networking-mlnx =============== Networking MLNX contains the Mellanox vendor code for Openstack Neutron * Free software: Apache license * Documentation: https://wiki.openstack.org/wiki/Mellanox-Neutron * Source: https://opendev.org/x/networking-mlnx * Bugs: https://bugs.launchpad.net/networking-mlnx Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 networking-mlnx-15.0.2/networking_mlnx.egg-info/SOURCES.txt0000644000413600001450000001431413575645770023676 0ustar lennybmtl00000000000000.coveragerc .mailmap .pylintrc .stestr.conf .testr.conf .zuul.yaml AUTHORS CONTRIBUTING.rst ChangeLog HACKING.rst LICENSE README.rst TESTING.rst babel.cfg requirements.txt setup.cfg setup.py test-requirements.txt tox.ini devstack/README.rst devstack/plugin.sh devstack/lib/eswitchd devstack/lib/neutron_ml2_mlnx doc/source/conf.py doc/source/contributing.rst doc/source/index.rst doc/source/installation.rst doc/source/readme.rst doc/source/usage.rst etc/policy.json etc/neutron/plugins/ml2/eswitchd.conf etc/neutron/plugins/ml2/ml2_conf_sdn.ini etc/neutron/plugins/mlnx/mlnx_conf.ini etc/neutron/rootwrap.d/eswitchd.filters networking_mlnx/__init__.py networking_mlnx/_i18n.py networking_mlnx/version.py networking_mlnx.egg-info/PKG-INFO networking_mlnx.egg-info/SOURCES.txt networking_mlnx.egg-info/dependency_links.txt networking_mlnx.egg-info/entry_points.txt networking_mlnx.egg-info/not-zip-safe networking_mlnx.egg-info/pbr.json networking_mlnx.egg-info/requires.txt networking_mlnx.egg-info/top_level.txt networking_mlnx/cmd/__init__.py networking_mlnx/cmd/eventlet/__init__.py networking_mlnx/cmd/eventlet/agents/__init__.py networking_mlnx/cmd/eventlet/agents/mlnx_agent.py networking_mlnx/db/__init__.py networking_mlnx/db/db.py networking_mlnx/db/migration/__init__.py networking_mlnx/db/migration/alembic_migrations/__init__.py networking_mlnx/db/migration/alembic_migrations/env.py networking_mlnx/db/migration/alembic_migrations/script.py.mako networking_mlnx/db/migration/alembic_migrations/versions/CONTRACT_HEAD networking_mlnx/db/migration/alembic_migrations/versions/EXPAND_HEAD networking_mlnx/db/migration/alembic_migrations/versions/start_networking_mlnx.py networking_mlnx/db/migration/alembic_migrations/versions/newton/contract/dfd1a1f22c4180_initial.py networking_mlnx/db/migration/alembic_migrations/versions/newton/expand/5d5e04ea01d5_sdn_journal_change_data_to_text.py networking_mlnx/db/migration/alembic_migrations/versions/newton/expand/65b6db113427b9_initial.py networking_mlnx/db/migration/alembic_migrations/versions/newton/expand/9f30890cfbd1_adding_sdn_journal_db.py networking_mlnx/db/migration/alembic_migrations/versions/newton/expand/d02c04effb34_adding_sdn_maintenance_db.py networking_mlnx/db/migration/models/__init__.py networking_mlnx/db/migration/models/head.py networking_mlnx/db/models/__init__.py networking_mlnx/db/models/sdn_journal_db.py networking_mlnx/db/models/sdn_maintenance_db.py networking_mlnx/eswitchd/__init__.py networking_mlnx/eswitchd/eswitch_daemon.py networking_mlnx/eswitchd/eswitch_handler.py networking_mlnx/eswitchd/msg_handler.py networking_mlnx/eswitchd/resource_mngr.py networking_mlnx/eswitchd/cli/__init__.py networking_mlnx/eswitchd/cli/conn_utils.py networking_mlnx/eswitchd/cli/ebr_dbg.py networking_mlnx/eswitchd/cli/ebrctl.py networking_mlnx/eswitchd/cli/exceptions.py networking_mlnx/eswitchd/common/__init__.py networking_mlnx/eswitchd/common/config.py networking_mlnx/eswitchd/common/constants.py networking_mlnx/eswitchd/common/exceptions.py networking_mlnx/eswitchd/db/__init__.py networking_mlnx/eswitchd/db/device_db.py networking_mlnx/eswitchd/db/eswitch_db.py networking_mlnx/eswitchd/utils/__init__.py networking_mlnx/eswitchd/utils/command_utils.py networking_mlnx/eswitchd/utils/helper_utils.py networking_mlnx/eswitchd/utils/pci_utils.py networking_mlnx/journal/__init__.py networking_mlnx/journal/cleanup.py networking_mlnx/journal/dependency_validations.py networking_mlnx/journal/journal.py networking_mlnx/journal/maintenance.py networking_mlnx/linux/__init__.py networking_mlnx/linux/constants.py networking_mlnx/linux/ip_lib.py networking_mlnx/linux/interface_drivers/__init__.py networking_mlnx/linux/interface_drivers/config.py networking_mlnx/linux/interface_drivers/constants.py networking_mlnx/linux/interface_drivers/interface.py networking_mlnx/linux/interface_drivers/network_cache.py networking_mlnx/plugins/__init__.py networking_mlnx/plugins/ml2/__init__.py networking_mlnx/plugins/ml2/drivers/__init__.py networking_mlnx/plugins/ml2/drivers/mlnx/README networking_mlnx/plugins/ml2/drivers/mlnx/__init__.py networking_mlnx/plugins/ml2/drivers/mlnx/mech_mlnx.py networking_mlnx/plugins/ml2/drivers/mlnx/agent/__init__.py networking_mlnx/plugins/ml2/drivers/mlnx/agent/comm_utils.py networking_mlnx/plugins/ml2/drivers/mlnx/agent/config.py networking_mlnx/plugins/ml2/drivers/mlnx/agent/exceptions.py networking_mlnx/plugins/ml2/drivers/mlnx/agent/mlnx_eswitch_neutron_agent.py networking_mlnx/plugins/ml2/drivers/mlnx/agent/utils.py networking_mlnx/plugins/ml2/drivers/sdn/__init__.py networking_mlnx/plugins/ml2/drivers/sdn/client.py networking_mlnx/plugins/ml2/drivers/sdn/config.py networking_mlnx/plugins/ml2/drivers/sdn/constants.py networking_mlnx/plugins/ml2/drivers/sdn/exceptions.py networking_mlnx/plugins/ml2/drivers/sdn/sdn_mech_driver.py networking_mlnx/plugins/ml2/drivers/sdn/utils.py networking_mlnx/tests/__init__.py networking_mlnx/tests/base.py networking_mlnx/tests/unit/__init__.py networking_mlnx/tests/unit/db/__init__.py networking_mlnx/tests/unit/db/test_db.py networking_mlnx/tests/unit/eswitchd/__init__.py networking_mlnx/tests/unit/eswitchd/test_pci_utils.py networking_mlnx/tests/unit/journal/__init__.py networking_mlnx/tests/unit/journal/test_dependency_validations.py networking_mlnx/tests/unit/journal/test_maintenance.py networking_mlnx/tests/unit/linux/__init__.py networking_mlnx/tests/unit/linux/test_ip_lib.py networking_mlnx/tests/unit/linux/interface_drivers/__init__.py networking_mlnx/tests/unit/linux/interface_drivers/test_interface.py networking_mlnx/tests/unit/linux/interface_drivers/test_network_cache.py networking_mlnx/tests/unit/ml2/__init__.py networking_mlnx/tests/unit/ml2/drivers/__init__.py networking_mlnx/tests/unit/ml2/drivers/mlnx/__init__.py networking_mlnx/tests/unit/ml2/drivers/mlnx/test_mech_mlnx.py networking_mlnx/tests/unit/ml2/drivers/mlnx/test_mlnx_comm_utils.py networking_mlnx/tests/unit/ml2/drivers/mlnx/test_mlnx_neutron_agent.py networking_mlnx/tests/unit/ml2/drivers/sdn/__init__.py networking_mlnx/tests/unit/ml2/drivers/sdn/test_client.py networking_mlnx/tests/unit/ml2/drivers/sdn/test_mechanism_sdn.py tools/coding-checks.sh tools/install_venv.py tools/install_venv_common.py tools/misc-sanity-checks.sh tools/pip_install_src_modules.sh tools/with_venv.shnetworking-mlnx-15.0.2/networking_mlnx.egg-info/dependency_links.txt0000644000413600001450000000000113575645770026055 0ustar lennybmtl00000000000000 networking-mlnx-15.0.2/networking_mlnx.egg-info/entry_points.txt0000644000413600001450000000127313575645770025310 0ustar lennybmtl00000000000000[console_scripts] ebrctl = networking_mlnx.eswitchd.cli.ebrctl:main eswitchd = networking_mlnx.eswitchd.eswitch_daemon:main neutron-mlnx-agent = networking_mlnx.cmd.eventlet.agents.mlnx_agent:main [neutron.db.alembic_migrations] networking-mlnx = networking_mlnx.db.migration:alembic_migrations [neutron.interface_drivers] ipoib = networking_mlnx.linux.interface_drivers.interface:IPoIBInterfaceDriver multi = networking_mlnx.linux.interface_drivers.interface:MultiInterfaceDriver [neutron.ml2.mechanism_drivers] mlnx_infiniband = networking_mlnx.plugins.ml2.drivers.mlnx.mech_mlnx:MlnxMechanismDriver mlnx_sdn_assist = networking_mlnx.plugins.ml2.drivers.sdn.sdn_mech_driver:SDNMechanismDriver networking-mlnx-15.0.2/networking_mlnx.egg-info/not-zip-safe0000644000413600001450000000000113575645770024235 0ustar lennybmtl00000000000000 networking-mlnx-15.0.2/networking_mlnx.egg-info/pbr.json0000644000413600001450000000005613575645770023466 0ustar lennybmtl00000000000000{"git_version": "8f8c33c", "is_release": true}networking-mlnx-15.0.2/networking_mlnx.egg-info/requires.txt0000644000413600001450000000047513575645770024415 0ustar lennybmtl00000000000000Babel>=1.3 pbr!=2.1.0,>=2.0.0 defusedxml>=0.5.0 eventlet!=0.18.3,>=0.18.2 netaddr>=0.7.18 pyroute2>=0.5.7 python-neutronclient>=5.1.0 SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.0.10 six>=1.10.0 oslo.config>=5.2.0 oslo.concurrency>=3.26.0 python-openstackclient>=3.3.0 neutron-lib>=1.28.0 neutron>=13.0.0.0b2 pyzmq networking-mlnx-15.0.2/networking_mlnx.egg-info/top_level.txt0000644000413600001450000000002013575645770024531 0ustar lennybmtl00000000000000networking_mlnx networking-mlnx-15.0.2/tools/0000755000413600001450000000000013575645772016232 5ustar lennybmtl00000000000000networking-mlnx-15.0.2/tools/coding-checks.sh0000755000413600001450000000255713575645017021273 0ustar lennybmtl00000000000000#!/bin/sh set -eu usage () { echo "Usage: $0 [OPTION]..." echo "Run netwroking-mlnx's coding check(s)" echo "" echo " -Y, --pylint [] Run pylint check on the entire netwroking-mlnx module or just files changed in basecommit (e.g. HEAD~1)" echo " -h, --help Print this usage message" echo exit 0 } process_options () { i=1 while [ $i -le $# ]; do eval opt=\$$i case $opt in -h|--help) usage;; -Y|--pylint) pylint=1;; *) scriptargs="$scriptargs $opt" esac i=$((i+1)) done } run_pylint () { local target="${scriptargs:-all}" if [ "$target" = "all" ]; then files="networking_mlnx" else case "$target" in *HEAD~[0-9]*) files=$(git diff --diff-filter=AM --name-only $target -- "*.py");; *) echo "$target is an unrecognized basecommit"; exit 1;; esac fi echo "Running pylint..." echo "You can speed this up by running it on 'HEAD~[0-9]' (e.g. HEAD~1, this change only)..." if [ -n "${files}" ]; then pylint --rcfile=.pylintrc --output-format=colorized ${files} else echo "No python changes in this commit, pylint check not required." exit 0 fi } scriptargs= pylint=1 process_options $@ if [ $pylint -eq 1 ]; then run_pylint exit 0 fi networking-mlnx-15.0.2/tools/install_venv.py0000644000413600001450000000466113566516770021312 0ustar lennybmtl00000000000000#!/usr/bin/env python # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2010 OpenStack Foundation. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Installation script for Neutron's development virtualenv """ from __future__ import print_function import os import sys import install_venv_common as install_venv def print_help(): help = """ Neutron development environment setup is complete. Neutron development uses virtualenv to track and manage Python dependencies while in development and testing. To activate the Neutron virtualenv for the extent of your current shell session you can run: $ source .venv/bin/activate Or, if you prefer, you can run commands in the virtualenv on a case by case basis by running: $ tools/with_venv.sh Also, make test will automatically use the virtualenv. """ print(help) def main(argv): if 'tools_path' in os.environ: root = os.environ['tools_path'] else: root = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) if 'venv' in os.environ: venv = os.environ['venv'] else: venv = os.path.join(root, '.venv') pip_requires = os.path.join(root, 'requirements.txt') test_requires = os.path.join(root, 'test-requirements.txt') py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1]) project = 'Neutron' install = install_venv.InstallVenv(root, venv, pip_requires, test_requires, py_version, project) options = install.parse_args(argv) install.check_python_version() install.check_dependencies() install.create_virtualenv(no_site_packages=options.no_site_packages) install.install_dependencies() print_help() if __name__ == '__main__': sys.exit(main(sys.argv)) networking-mlnx-15.0.2/tools/install_venv_common.py0000644000413600001450000001350713566516770022661 0ustar lennybmtl00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Provides methods needed by installation script for OpenStack development virtual environments. Since this script is used to bootstrap a virtualenv from the system's Python environment, it should be kept strictly compatible with Python 2.6. Synced in from openstack-common """ from __future__ import print_function import optparse import os import subprocess import sys class InstallVenv(object): def __init__(self, root, venv, requirements, test_requirements, py_version, project): self.root = root self.venv = venv self.requirements = requirements self.test_requirements = test_requirements self.py_version = py_version self.project = project def die(self, message, *args): print(message % args, file=sys.stderr) sys.exit(1) def check_python_version(self): if sys.version_info < (2, 6): self.die("Need Python Version >= 2.6") def run_command_with_code(self, cmd, redirect_output=True, check_exit_code=True): """Runs a command in an out-of-process shell. Returns the output of that command. Working directory is self.root. """ if redirect_output: stdout = subprocess.PIPE else: stdout = None proc = subprocess.Popen(cmd, cwd=self.root, stdout=stdout) output = proc.communicate()[0] if check_exit_code and proc.returncode != 0: self.die('Command "%s" failed.\n%s', ' '.join(cmd), output) return (output, proc.returncode) def run_command(self, cmd, redirect_output=True, check_exit_code=True): return self.run_command_with_code(cmd, redirect_output, check_exit_code)[0] def get_distro(self): if (os.path.exists('/etc/fedora-release') or os.path.exists('/etc/redhat-release')): return Fedora( self.root, self.venv, self.requirements, self.test_requirements, self.py_version, self.project) else: return Distro( self.root, self.venv, self.requirements, self.test_requirements, self.py_version, self.project) def check_dependencies(self): self.get_distro().install_virtualenv() def create_virtualenv(self, no_site_packages=True): """Creates the virtual environment and installs PIP. Creates the virtual environment and installs PIP only into the virtual environment. """ if not os.path.isdir(self.venv): print('Creating venv...', end=' ') if no_site_packages: self.run_command(['virtualenv', '-q', '--no-site-packages', self.venv]) else: self.run_command(['virtualenv', '-q', self.venv]) print('done.') else: print("venv already exists...") pass def pip_install(self, *args): self.run_command(['tools/with_venv.sh', 'pip', 'install', '--upgrade'] + list(args), redirect_output=False) def install_dependencies(self): print('Installing dependencies with pip (this can take a while)...') # First things first, make sure our venv has the latest pip and # setuptools and pbr self.pip_install('pip>=1.4') self.pip_install('setuptools') self.pip_install('pbr') self.pip_install('-r', self.requirements, '-r', self.test_requirements) def parse_args(self, argv): """Parses command-line arguments.""" parser = optparse.OptionParser() parser.add_option('-n', '--no-site-packages', action='store_true', help="Do not inherit packages from global Python " "install.") return parser.parse_args(argv[1:])[0] class Distro(InstallVenv): def check_cmd(self, cmd): return bool(self.run_command(['which', cmd], check_exit_code=False).strip()) def install_virtualenv(self): if self.check_cmd('virtualenv'): return if self.check_cmd('easy_install'): print('Installing virtualenv via easy_install...', end=' ') if self.run_command(['easy_install', 'virtualenv']): print('Succeeded') return else: print('Failed') self.die('ERROR: virtualenv not found.\n\n%s development' ' requires virtualenv, please install it using your' ' favorite package management tool' % self.project) class Fedora(Distro): """This covers all Fedora-based distributions. Includes: Fedora, RHEL, CentOS, Scientific Linux """ def check_pkg(self, pkg): return self.run_command_with_code(['rpm', '-q', pkg], check_exit_code=False)[1] == 0 def install_virtualenv(self): if self.check_cmd('virtualenv'): return if not self.check_pkg('python-virtualenv'): self.die("Please install 'python-virtualenv'.") super(Fedora, self).install_virtualenv() networking-mlnx-15.0.2/tools/misc-sanity-checks.sh0000755000413600001450000000343713575645017022266 0ustar lennybmtl00000000000000#! /bin/sh # Copyright (C) 2014 VA Linux Systems Japan K.K. # Copyright (C) 2014 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # The purpose of this script is to avoid casual introduction of more # bash dependency. Please consider alternatives before commiting code # which uses bash specific features. export TMPDIR=`/bin/mktemp -d` trap "rm -rf $TMPDIR" EXIT FAILURES=$TMPDIR/failures check_opinionated_shell () { # Check that shell scripts are not bash opinionated (ignore comments though) # If you cannot avoid the use of bash, please change the EXPECTED var below. OBSERVED=$(grep -E '^([^#]|#!).*bash' tox.ini tools/* | wc -l) EXPECTED=6 if [ ${EXPECTED} -ne ${OBSERVED} ]; then echo "Bash usage has been detected!" >>$FAILURES fi } check_no_symlinks_allowed () { # Symlinks break the package build process, so ensure that they # do not slip in, except hidden symlinks. if [ $(find . -type l ! -path '*/\.*' | wc -l) -ge 1 ]; then echo "Symlinks are not allowed!" >>$FAILURES fi } # Add your checks here... check_opinionated_shell check_no_symlinks_allowed # Fail, if there are emitted failures if [ -f $FAILURES ]; then cat $FAILURES exit 1 fi networking-mlnx-15.0.2/tools/pip_install_src_modules.sh0000755000413600001450000000121313566516770023476 0ustar lennybmtl00000000000000#!/bin/bash # For neutron unit tests, you can define git repos containing modules # that you want to use to override the requirements-based packages. # # Why, you ask? Because you made changes to neutron-lib, and you want # run the unit tests together. E.g.: # # env TOX_ENV_SRC_MODULES="$HOME/src/neutron-lib" tox -e py27 toxinidir="$1" if [ -z "$TOX_ENV_SRC_MODULES" ]; then exit 0 fi for repo in $TOX_ENV_SRC_MODULES; do d="${toxinidir}/${repo}" if [ ! -d "$d" ]; then echo "tox_env_src: error: no directory found at $d" continue fi echo "tox_env_src: pip installing from $d" pip install -e "$d" done networking-mlnx-15.0.2/tools/with_venv.sh0000755000413600001450000000132313566516770020574 0ustar lennybmtl00000000000000#!/bin/bash # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. TOOLS=`dirname $0` VENV=$TOOLS/../.venv source $VENV/bin/activate && "$@" networking-mlnx-15.0.2/.coveragerc0000644000413600001450000000020713566516767017213 0ustar lennybmtl00000000000000[run] branch = True source = networking_mlnx omit = networking_mlnx/tests/*,networking_mlnx/openstack/* [report] ignore-errors = True networking-mlnx-15.0.2/.mailmap0000644000413600001450000000111613566516767016513 0ustar lennybmtl00000000000000# Format is: # # lawrancejing Jiajun Liu Zhongyue Luo Kun Huang Zhenguo Niu Isaku Yamahata Isaku Yamahata Morgan Fainberg networking-mlnx-15.0.2/.pylintrc0000644000413600001450000000724613575645017016740 0ustar lennybmtl00000000000000# The format of this file isn't really documented; just use --generate-rcfile [MASTER] # Add to the black list. It should be a base name, not a # path. You may set this option multiple times. ignore=.git,tests [MESSAGES CONTROL] # NOTE(gus): This is a long list. A number of these are important and # should be re-enabled once the offending code is fixed (or marked # with a local disable) disable= # "F" Fatal errors that prevent further processing import-error, # "I" Informational noise c-extension-no-member, locally-disabled, # "E" Error for important programming issues (likely bugs) access-member-before-definition, no-member, no-method-argument, no-self-argument, not-an-iterable, # "W" Warnings for stylistic problems or minor programming issues abstract-method, arguments-differ, attribute-defined-outside-init, bad-builtin, bad-indentation, broad-except, dangerous-default-value, deprecated-lambda, expression-not-assigned, fixme, global-statement, keyword-arg-before-vararg, literal-comparison, no-init, non-parent-init-called, not-callable, protected-access, redefined-builtin, redefined-outer-name, signature-differs, star-args, super-init-not-called, super-on-old-class, unpacking-non-sequence, unused-argument, unused-import, unused-variable, useless-super-delegation, # TODO(dougwig) - disable nonstandard-exception while we have neutron_lib shims nonstandard-exception, # "C" Coding convention violations bad-continuation, consider-iterating-dictionary, consider-using-enumerate, invalid-name, len-as-condition, misplaced-comparison-constant, missing-docstring, singleton-comparison, superfluous-parens, ungrouped-imports, wrong-import-order, # "R" Refactor recommendations abstract-class-little-used, abstract-class-not-used, consider-merging-isinstance, consider-using-ternary, duplicate-code, inconsistent-return-statements, interface-not-implemented, no-else-return, no-self-use, redefined-argument-from-local, simplifiable-if-statement, too-few-public-methods, too-many-ancestors, too-many-arguments, too-many-branches, too-many-instance-attributes, too-many-lines, too-many-locals, too-many-nested-blocks, too-many-public-methods, too-many-return-statements, too-many-statements, # new for python3 version of pylint consider-using-set-comprehension, unnecessary-pass, useless-object-inheritance [BASIC] # Variable names can be 1 to 31 characters long, with lowercase and underscores variable-rgx=[a-z_][a-z0-9_]{0,30}$ # Argument names can be 2 to 31 characters long, with lowercase and underscores argument-rgx=[a-z_][a-z0-9_]{1,30}$ # Method names should be at least 3 characters long # and be lowercased with underscores method-rgx=([a-z_][a-z0-9_]{2,}|setUp|tearDown)$ # Module names matching neutron-* are ok (files in bin/) module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(neutron-[a-z0-9_-]+))$ # Don't require docstrings on tests. no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$ [FORMAT] # Maximum number of characters on a single line. max-line-length=79 [VARIABLES] # List of additional names supposed to be defined in builtins. Remember that # you should avoid to define new builtins when possible. # _ is used by our localization additional-builtins=_ [CLASSES] # List of interface methods to ignore, separated by a comma. ignore-iface-methods= [IMPORTS] # Deprecated modules which should not be used, separated by a comma deprecated-modules= # should use oslo_serialization.jsonutils json [TYPECHECK] # List of module names for which member attributes should not be checked ignored-modules=six.moves,_MovedItems [REPORTS] # Tells whether to display a full report or only the messages reports=no networking-mlnx-15.0.2/.stestr.conf0000644000413600001450000000011513566516767017341 0ustar lennybmtl00000000000000[DEFAULT] test_path=${OS_TEST_PATH:-./networking_mlnx/tests/unit} top_dir=./ networking-mlnx-15.0.2/.testr.conf0000644000413600001450000000047613566516767017170 0ustar lennybmtl00000000000000[DEFAULT] test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \ ${PYTHON:-python} -m subunit.run discover -t ./ . $LISTOPT $IDOPTION test_id_option=--load-list $IDFILE test_list_option=--listnetworking-mlnx-15.0.2/.zuul.yaml0000644000413600001450000000010113575645017017013 0ustar lennybmtl00000000000000- project: templates: - openstack-python3-train-jobs networking-mlnx-15.0.2/AUTHORS0000644000413600001450000001632713575645770016151 0ustar lennybmtl00000000000000Aaron Rosen Abhishek Chanda Adam Harwell Adrian Chiris Akihiro MOTOKI Akihiro Motoki Aleks Chirko Alessandro Pilotti Alessio Ababilov Alessio Ababilov AmichayPolishuk Amir Sadoughi Andre Pech Andreas Jaeger Andreas Jaeger Angus Lees Ann Kamyshnikova Arvind Somy Arvind Somya Assaf Muller Bhuvan Arumugam Blair Bethwaite Bob Kukura Bob Melander Boden R Brad Hall Brad Hall Brant Knudson Carl Baldwin Cedric Brandily Chang Bo Guo Christian Berendt Chuck Short Clark Boylan Clint Byrum Dan Prince Dan Wendlandt Davanum Srinivas Deepak N Dirk Mueller Doug Hellmann Doug Wiegley Edan David Edgar Magana Edgar Magana Emilien Macchi Eugene Nikanorov Gary Kotton Gary Kotton Gauvain Pocentek Gordon Chung Hamdy Khader Hareesh Puthalath He Jie Xu Hemanth Ravi Henry Gessau Henry Gessau HenryVIII Hirofumi Ichihara Hong Hui Xiao Ignacio Scopetta Ihar Hrachyshka Ionuț Arțăriși Irena Berezovsky Isaku Yamahata Isaku Yamahata JJ Asghar Jacek Swiderski Jakub Libosvar James E. Blair James E. Blair Janonymous Jason Kölker Javier Pena Jay Pipes Jeremy Stanley Jiajun Liu Joe Gordon John Dunning Jordan Tardif Juliano Martinez Julien Danjou Justin Hammond Justin Lund Keshava Bharadwaj Kevin Benton Kevin L. Mitchell Kris Lindgren Kun Huang Kyle Mestery Lenny Verkhovsky Luke Gorrie Major Hayden Mark Goddard Mark McClain Mark McClain Mark McLoughlin Maru Newby Maru Newby Mate Lakat Matt Riedemann Matthew Treinish Matthew Treinish Miguel Angel Ajo Mohammad Banikazemi Monty Taylor Morgan Fainberg Moshe Levi Motohiro OTSUKA Murad Awawdeh Nachi Ueno Nachi Ueno Nader Lahouti Nguyen Hung Phuong Oleg Bondarev Omri Marcovitch Ondřej Nový Paul Michali Praneet Bachheti Rajaram Mallya Ralf Haferkamp Rich Curran Roey Chen Roman Podoliaka Romil Gupta Rossella Sblendido Rui Zang Russell Bryant Ryota MIBU Salvatore Orlando Salvatore Orlando Samer Deeb Santhosh Santhosh Kumar Sanu Madhavan Sascha Peilicke Sascha Peilicke Sean Dague Sean Dague Sean M. Collins Sergey Lukjanov Sergey Skripnick Shiv Haris Shweta P Somik Behera Somik Behera Sukhdev Sulaiman Radwan Sumit Naiksatam Sushil Kumar Sylvain Afchain Terry Wilson Thierry Carrez Thomas Bechtold Tim Miller Trinath Somanchi Tyler Smith Vieri <15050873171@163.com> Weidong Shao Wu Wenxiang XiaojueGuan Xu Han Peng YAMAMOTO Takashi Yaguang Tang Ying Liu Yong Sheng Gong Yong Sheng Gong Yoshihiro Kaneko Zang MingJie Zhenguo Niu ZhiQiang Fan ZhiQiang Fan Zhongyue Luo abdallahyas alexpilotti armando-migliaccio armando-migliaccio fumihiko kakuma gongysh gongysh gordon chung hamdy khader huang.zhiping justin Lund lawrancejing liu-sheng liuqing llg8212 mark mcclain mathieu-rohon motyz rohitagarwalla ronak rossella shihanzhang sukhdev trinaths vinkesh banka wangqian zhhuabj zhouxinyong networking-mlnx-15.0.2/CONTRIBUTING.rst0000644000413600001450000000106013566516767017531 0ustar lennybmtl00000000000000If you would like to contribute to the development of OpenStack, you must follow the steps documented at: http://docs.openstack.org/infra/manual/developers.html#development-workflow Once those steps have been completed, changes to OpenStack should be submitted for review via the Gerrit tool, following the workflow documented at: http://docs.openstack.org/infra/manual/developers.html#development-workflow Pull requests submitted through GitHub will be ignored. Bugs should be filed on Launchpad, not GitHub: https://bugs.launchpad.net/neutron networking-mlnx-15.0.2/ChangeLog0000644000413600001450000012151513575645770016647 0ustar lennybmtl00000000000000CHANGES ======= 15.0.2 ------ * Fix zmq send/rcv for python3 * Fixed zmq python3 send\_string * devstack: configure bind\_normal\_ports and bind\_normal\_ports\_physnets * always bind normal ports for IB physnet * Update some config opts description * Added DHCP and L3 params for IB multi interface 15.0.1 ------ * Refactoring of process\_port\_info * Refactor the way a device is identified * Add pyroute2 v0.5.7 * Remove redundant call to set\_port\_vlan\_id 15.0.0 ------ * updated .gitreview * remote ethtool and ip link from rootwrap * Change cache maintenance config opt default * Fix deadlock of read and write lock taken by the same thread * Add support for binding normal ports * remove unused clean.sh and make misc-sanity-checks.sh executable * add checks from neutron pylint and fix them * Add multi interface driver * Replace openstack.org URLs with opendev.org URs * Trivial: update url to new url * Update url in HACKING.rst * Add network cache module * Add IPoIB interface driver * add bandit security linter * update hacking>=1.1.0, enable E125 E129 N537 and fix pep8 issues * update pep8 check and fix new pep8 errors * add bashate * Add py37 jobs to Zuul * remove MlnxDHCP * remove debug print * Set port status to ACTIVE when binding a port * remove py37 and add py36 * skip test\_release\_unused\_leases\_one\_lease\_with\_client\_id\_none * OpenDev Migration Patch * Remove version from PKG-INFO file * Replacing the link in HACKING.rst * Update min tox version to 2.0 * Switch testing from Python 3.5 to 3.7 * Fix some minor typos and comments * Fix exception handling in journal thread 13.1.0 ------ * fixed host id * Fixed: Use db\_api from neutron\_lib * Use neutron\_lib for db\_api instead of neutron * Support disabling SDN controller sync * Disable test\_client\_id\_num\_str test * fix tox python3 overrides * Use Param DHCP\_OPT\_CLIENT\_ID\_NUM * Disable test\_client\_id\_num test * fix unused\_leases\_one\_lease\_with\_client\_id test * Also run unit tests under Python 3 * uncap eventlet * delete baremetal ports when they are unbounded * Don't bind ports on disallowed physical networks * Avoid tox-install.sh Fix import topics * Fix issue that causes job to stuck on monitoring * Remove obsolete tempest-lib * Fix neutron\_db\_api.get\_session with the latest neutron db api * Fix MLNX\_GUID\_INDEX\_PATH for eswitch * Fixed neutron.plugins.common import * Revert "Fix MLNX4\_GUID\_INDEX\_PATH" * Fix MLNX4\_GUID\_INDEX\_PATH * FIX: moving import driver\_api to neutron\_lib.plugins.ml2 * Remove double import of neutron lib constants * Update requirements versions to be compatible with neutron * Fixing p\_constants import from neutron.plugins.common to neutron\_lib * remove fabric\_type * add support to plx * Fix devstack's bash script, fix 2 logical expressions * Remove unused ebrctl message flow "create-port" * Use 'port' definition from neutron-lib instead of deprecated neutron * Added the ability to update client-id dhcp option * Cp only eswtich conf file * fixed neutron\_lib.api.definitions import and CLIENT\_ID * Fix deprecation warning Modules moved from 'neutron.agent.securitygroups\_rpc' to 'neutron.api.rpc.handlers.securitygroups\_rpc' * add physical\_networks option to filter phynet * update requirements * Add support for multi port on the same phynet * ignore [N537] Don't translate logs * neo sdn: fix delete port to * remove pbr from requirements * Fixed neutron.agent.common import config * update pbr version to 2.0 and use neutron\_lib context * Fixed neutron\_lib.utils import * TrivialFix: Merge imports in code * reset driver using bind/unbind when vm is deleted * requirements zmq changed to pyzmq * baremetal send POST if we have the local\_link\_information * Use neutron-lib portbindings api-def * Remove white space between print () * check port device\_owner before sending DELETE port * add missing \_\_init\_\_.py for jornal unit test * update files to 644 * fix race condition in migration when delete job done after create port * fix dhcp test\_spawn\_no\_dns\_domain test * pass old port dict when calling PORT DELETE api * Move tests located at ml2/sdn to ml2/drivers/sdn folder * Add unit test for sdn client * Remove unused method 'try\_delete' in sdn client * Fixed CX5 device\_type handling * Fixed import model\_base * Add support for Ironic multi tenant with ETH * use neutron\_lib constants * Send delete port request for unattached ports * Add support for Ironic multi tenant with IB * Adjust unit tests to python 3 * add support for baremetal port binding * Update test-requirements * fix mlnx\_sdn\_assist documentation 9.0.0b1 ------- * don't send network with null provider:segmentation\_id * fix NEO sync when deleteing port and network * Put py34 first in the env order of tox and remove py33 * fix retry count * catch login exception in jouranl * Add Sync mechanism to NEO * add get method to client * rearrange unitest folder * fix jouranl dependency validation * DB: Refactor networking-mlnx db * db: change Enum field to use constants * db: Remove depreciation warnings from db * Fix to allow use of param PHYSICAL\_INTERFACE\_MAPPINGS * Fix deprecation errors * mlnx\_sdn: create REST client * Enable DeprecationWarning in test environments * add sdn maintenance table * add Journal cleanup * Add dependency validation * add sdn journal database * Initial alembic migrations * fix missing translation in pep8 * centrelize sdn config options * change remove PATH from constant names * add domain with cloudx default value to sdn conf * Use separate queue for agent state reports * Use neutron\_lib hacking and fix epep8 issues * Adding root\_helper registering * Use noop instead of neutron.agent.firewall:NoopFirewallDriver * Don't log Error on http 501 * Log rest response text on debug * replace assert with assertFalse * remove session\_timeout option * rearrange import in sdn driver * tox: Update import\_exceptions to import networking\_mlnx.\_i18n * Move class properties to instances for dhcp tests * move to oslo\_concurrency * remove unused var and function in eswitchd * Added zmq to requirements.txt * fixed removing ESWITCHD\_CONF\_DIR on clean * Renaming Mellanox Mechanism driver * Fixed dependencies lxml * removed version from metadata in setup.cfg * deleted non working tests * added unit tests for eswitchd/db/device\_db 8.0.0.0b3 --------- * Fixed Stucking plugin in case of missing/wrong message * added more unit tests for pci\_utils * change log level from exception to error * deleted pci\_utils.py from unit test folder * i18n imports fixed * updated test requirements according to neutron * sdn mech driver add support for flat network * Fix mlnx mech driver to work with flat network * removed eswitchd-rootwrap leftover * replace eswitchd-rootwrap with neutron-rootwrap * removed datafiles from setup.cfg 8.0.0.0b2 --------- * Bump version to mark Mitaka branch * Update link to correct wiki docs location * change RPC version to 1.3 * Deprecated tox -downloadcache option removed * Fixed exception on port deallocate for eswitch if network is not present * Moving eswitchd to networking-mlnx repo * Add to sdn mechanism driver to send DHCP port information * Decompose ML2 mechanism driver for Mellanox * Update .gitreview with openstack instead of stackforge * updates due to moving from stackforge to openstack github * copy eswitchd files with sudo * Add debug log for NEO response code * Add config NoopDriver as firewall\_driver * update sync to true if we get request timeout * Add network qos policy to sdn mech driver * Retry when esiwtchd is down instead of existing * fix test\_host\_file\_on\_net\_with\_v6\_slaac\_and\_v4 in dchp test * fix move MLNX AGENT constant to mlnx\_mech and some dhcp unitests * update devstack eswitchd to work with eswitch pbr * Add support for IB dhcp * Fix files permission to be 644 * update devstack readme.rst * add mlnx dnsmasq support * Change the connection authentication of the SDN provider to a session * Update tests files to use oslo\_config and not oslo.config * added missing PKG-INFO for package building * loopingcall was removed from neutron.openstack.common fixed unitest Change-Id: Ie6b4eea23a3a1df79ed24e6e2556735d39b15758 Signed-off-by: Lenny Verkhovsky * mlnx MD: mlnx\_direct removal * Updating version to 2015.2.1 * Add /usr/local/bin to exec\_dirs in nova rootwrap.conf * Change the entry point name of sdn provider mechanism driver plugin * make hostdev defualt vnic type in devstack * Add README.rst file * Add SDN mechanism driver * Fix import in unit test tree * Move mlnx agent to be under ml2/drivers/mlnx * Migrate to oslo.log * oslo: migrate to namespace-less import paths and add hacking rule * Remove root\_helper arg from SecurityGroupAgentRpc and from mlnx agent * Update networking-mlnx README file * Update the requirements.txt to point to the real neutron repository * Fix devstack external plugin * Untangle and drop SecurityGroupAgentRpcMixin usage and replace it with SecurityGroupAgentRpc. This patch separates the use of SecurityGroupAgentRpcMixin out to its own class * networking-mlnx devstack external plugin * Initial creation for networking-mlnx * Generated new .gitreview file for networking-mlnx * Disable unbalanced-tuple-unpacking * Updated from global requirements * Dropped fixture module * Move agent cleanup scripts to cmd module * misc-sanity-checks.sh: Some cleanups * Service split: cleaned up setup.cfg * hacking: enable H238 (old style class declaration, use new style) * hacking: enable W292 (no newline at end of file) * Update hacking to 0.10 * Move metadata agent entry to its own file * Updated from global requirements * Break out config and entry point out of l3/agent file * Move postcommit ops out of transaction for bulk * Add support for retargetable functional api testing * Replace mention of nose with nose2 in devref * Delete the console scripts for lbaas and vpnaas * Enable the "not-callable" pylint check * Retry on unassigned ofport instead of treating it as a failure * Clean-up sanity checks done via shell scripts * Enable pylint checks for "anomalous" string escapes * Combine author\_tag and log\_translation\_hint regexes * Prevent symlinks to be added to the tree * Move DB TestModelsMigrations from unit to functional * Backward compatibility for advanced services 2015.1.0b1 ---------- * Updated from global requirements * Removed unused iso8601 dependency * Remove mlnx plugin * Set timeout for functional job * Remove unused dependencies * Migrate to oslo.context * Have L3 agent catch the correct exception * Updated from global requirements * Switch to using subunit-trace from tempest-lib * Move classes out of l3\_agent.py * Prettify tox output for functional tests * Services split, pass 2 * Remove TODO for H404 * Updated from global requirements * Use comments rather than no-op string statements * Enforce log hints * Disallow log hints in LOG.debug * Enforce log hints in ofagent and oneconvergence * Update i18n translation for NEC plugin log msg's * Update i18n translation for IBM plugin log msg's * Workflow documentation is now in infra-manual * tox.ini: Prevent casual addition of bash dependency * Updated from global requirements * Convert several uses of RpcCallback * Get rid of py26 references: OrderedDict, httplib, xml testing * Enforce log hints in opencontrail * Update i18n translation for Metaplugin plugin * Update i18n translation for Brocade plugin log msg's * Update i18n translation for Nuage plugin * Update i18n translation for Embrane plugin * Enforce log hints in neutron.plugins.plumgrid * Update i18n translation for Midonet plugin * Enforce log hints in neutron.plugins.sriovnicagent * Enforce log hints in neutron.plugins.hyperv * Updated the README.rst * Update i18n translation for BigSwitch plugin log msg's * pretty\_tox.sh: Portablity improvement * test\_dhcp\_agent: Fix no-op tests * Update i18n translation for Mellanox plugin and agent log msg's * Update i18n translation for VMware NSX plugin log msg's * hacking: Check if correct log markers are used * Enable undefined-loop-variable pylint check * Fix incorrect exception order in \_execute\_request * Migrate to oslo.i18n * Migrate to oslo.middleware * Migrate to oslo.utils * Remove Python 2.6 classifier * Update i18n translation for Cisco plugins and cfg agent log msg's * Remove ryu plugin * Update i18n translation for linuxbridge log msg's * Update i18n translation for openvswitch log msg's * Update i18n translation for ML2 plugin log msg's * Updated from global requirements * Enforce log hints in neutron.services * Enforce log hints in neutron.services.metering * Show progress output while running unit tests * Enforce log hints in neutron.services.loadbalancer * Enforce log hints in neutron.services.firewall * Enforce log hints in neutron.services.l3\_router * enable H401 hacking check * enable H237 check * Updated from global requirements * Update i18n translation for neutron.server/scheduler log msg's * Update i18n translation for neutron.notifiers log msg's * Update i18n translation for neutron.common/debug log msg's * Update i18n translation for neutron.api log msg's * Updated from global requirements * Update i18n translation for neutron.extension log msg's * Update i18n translation for neutron.db log msg's * Update i18n translation for neutron.cmd log msg's * Update i18n translation for neutron.agents log msg's * enable F812 check for flake8 * enable F811 check for flake8 * Support pudb as a different post mortem debugger * switch to oslo.serialization * Add rootwrap filters for ofagent * Remove openvswitch core plugin entry point * Updated from global requirements * Updated from global requirements * enable F402 check for flake8 * enable E713 in pep8 tests * Hyper-V: Remove useless use of "else" clause on for loop * Enable no-name-in-module pylint check * Updated from global requirements * Remove duplicate import of constants module * Switch run-time import to using importutils.import\_module * Enable assignment-from-no-return pylint check * tox.ini: Avoid using bash where unnecessary * Empty files should not contain copyright or license * Remove single occurrence of lost-exception warning * Updated fileutils and its dependencies 2014.2 ------ * remove E251 exemption from pep8 check * mock.assert\_called\_once() is not a valid method * Add pylint tox environment and disable all existing warnings * Updated from global requirements * Ignore top-level hidden dirs/files by default * Remove some duplicate unit tests * Drop sslutils and versionutils modules 2014.2.rc2 ---------- * Removed kombu from requirements * Updated from global requirements * Updated from global requirements * Remove sslutils from openstack.common 2014.2.rc1 ---------- * remove linuxbridge plugin * Fix sleep function call * Open Kilo development * Implement ModelsMigrationsSync test from oslo.db * Fix entrypoint of OneConvergencePlugin plugin * Set dsvm-functional job to use system packages * Separate Configuration from Freescale SDN ML2 mechanism Driver * Remove @author(s) from copyright statements * Updated from global requirements * Adds ipset support for Security Groups * Add requests\_mock to test-requirements.txt * Removed kombu from requirements * Supply missing cisco\_cfg\_agent.ini file * Remove unused arg to config.setup\_logging() * Updated from global requirements 2014.2.b3 --------- * Work toward Python 3.4 support and testing * Revert "Cisco DFA ML2 Mechanism Driver" * Big Switch: Separate L3 functions into L3 service * Remove reference to cisco\_cfg\_agent.ini from setup.cfg again * Adds router service plugin for CSR1kv * Support for extensions in ML2 * Cisco DFA ML2 Mechanism Driver * Adding mechanism driver in ML2 plugin for Nuage Networks * Fix state\_path in tests * Remove ovs dependency in embrane plugin * Remove binding:profile update from Mellanox ML2 MD * Use lockutils module for tox functional env * Updated from global requirements * Refresh rpc\_backend values in unit tests to those from oslo.messaging * Add specific docs build option to tox * Fix bigswitch setup.cfg lines * Updated from global requirements * Use jsonutils instead of stdlib json * Opencontrail plug-in implementation for core resources * MLNX Agent: ensure removed ports get treated on resyncs * MLNX Agent: Process port\_update notifications in the main agent loop * Add a tox test environment for random hashseed testing * Updated from global requirements * Remove reference to cisco\_cfg\_agent.ini from setup.cfg * Removed configobj from test requirements * Updated from global requirements * Functional tests work fine with random PYTHONHASHSEED * Set python hash seed to 0 in tox.ini * Configuration agent for Cisco devices * Updated from global requirements * ML2 mechanism driver for SR-IOV capable NIC based switching, Part 2 2014.2.b2 --------- * This patch changes the name of directory from mech\_arista to arista * ML2 mechanism driver for SR-IOV capable NIC based switching, Part 1 * Allow to import \_LC, \_LE, \_LI and \_LW functions directly * Make readme reference git.openstack.org not github * Bump hacking to version 0.9.2 * Use auth\_token from keystonemiddleware * Remove reference to setuptools\_git * Add a gate-specific tox env for functional tests * Add CONTRIBUTING.rst * Updated from global requirements * Updated from global requirements * Updated from global requirements * Fix example for running individual tests * Switch to using of oslo.db * remove unsupported middleware * Add config for performance gate job * Introduce bulk calls for get device details * Synced log module and its dependencies from olso-incubator * don't ignore rules that are already enforced * Moved rpc\_compat.py code back into rpc.py * Updated from global requirements * Updated from global requirements * ofagent: move main module from ryu repository * Remove the useless vim modelines * Removed 'rpc' and 'notifier' incubator modules * Removed create\_rpc\_dispatcher methods * Use openstack.common.lockutils module for locks in tox functional tests * Port to oslo.messaging * Updated from global requirements * Ignore emacs checkpoint files * Configure agents using neutron.common.config.init (formerly .parse) * Added missing core\_plugins symbolic names * Introduce RpcCallback class * remove pep8 E122 exemption and correct style 2014.2.b1 --------- * remove E112 hacking exemption and fix errors * Updated from global requirements * Monkey patch threading module as early as possible * Introduced transition RPC exception types * Freescale SDN Mechanism Driver for ML2 Plugin * Remove run-time version checking for openvswitch features * Added missing plugin .ini files to setup.cfg * Updated from global requirements * Synced jsonutils from oslo-incubator * Cisco APIC ML2 mechanism driver, part 2 * NSX: get rid of the last Nicira/NVP bits * Add missing translation support * Add mailmap entry * Updated from global requirements * Remove explicit dependency on amqplib * eswitch\_neutron\_agent: Whitespace fixes in comments * Remove duplicate module-rgx line in .pylintrc * Fix H302 violations * Fix H302 violations in plugins package * Fix H302 violations in unit tests * Don't print duplicate messages on SystemExit * Updated from global requirements * Add physical\_network to binding:vif\_details dictionary * Updated from global requirements * Switch over to FixedIntervalLoopingCall * Exclude .ropeproject from flake8 checks * Remove mock.patch.stop from tests that inherit from BaseTestCase * Enable flake8 E711 and E712 checking * Updated from global requirements * Sync service and systemd modules from oslo-incubator * Move bash whitelisting to pep8 testenv * Fix Jenkins translation jobs * ignore build directory for pep8 * Enable hacking H301 check * Updated from global requirements 2014.1.rc1 ---------- * Remove last parts of Quantum compatibility shim * Open Juno development * Start using oslosphinx theme for docs * Fixed TypeError when creating MlnxException * Remove extra space in help string * Updated from global requirements * Add enable\_security\_group option * add HEAD sentinel file that contains migration revision * Add update binding:profile with physical\_network * Bugfix and refactoring for ovs\_lib flow methods * Removes calls to mock.patch.stopall in unit tests * Updated from global requirements * Updated from global requirements * Updated from global requirements * One Convergence Neutron Plugin l3 ext support * One Convergence Neutron Plugin Implementation * BigSwitch: Add SSL Certificate Validation 2014.1.b3 --------- * Updated from global requirements * Add OpenDaylight ML2 MechanismDriver * Implementaion of Mechanism driver for Brocade VDX cluster of switches * Implement Mellanox ML2 MechanismDriver * Remove call to addCleanup(cfg.CONF.reset) * Implement OpenFlow Agent mechanism driver * Finish off rebranding of the Nicira NVP plugin * BigSwitch: Add agent to support neutron sec groups * Adds the new IBM SDN-VE plugin * Updated from global requirements * Developer documentation * Change tenant network type usage for IB Fabric * Rename Neutron core/service plugins for VMware NSX * Updated from global requirements * Sync minimum requirements * Copy cache package from oslo-incubator * Add update from agent to plugin on device up * Remove dependent module py3kcompat * Add migration support from agent to NSX dhcp/metadata services * Remove psutil dependency * LBaaS: move agent based driver files into a separate dir * mailmap: update .mailmap * Return request-id in API response * Prepare for multiple cisco ML2 mech drivers * Support building wheels (PEP-427) * Use oslo.rootwrap library instead of local copy * Enables BigSwitch/Restproxy ML2 VLAN driver * Add an explicit tox job for functional tests * Base ML2 bulk support on the loaded drivers * Enable hacking H233 rule 2014.1.b2 --------- * Update RPC code from oslo * Configure plugins by name * Update lockutils and fixture in openstack.common * Rename nicira configuration elements to match new naming structure * Remove unused imports * Rename check\_nvp\_config utility tool * Corrects broken format strings in check\_i18n.py * Change default eswitchd port to avoid conflict * Updates tox.ini to use new features * Updated from global requirements * Sync global requirements to pin sphinx to sphinx>=1.1.2,<1.2 * Update common network type consts to same origin * Add fwaas\_driver.ini to setup.cfg 2014.1.b1 --------- * Add vpnaas and debug filters to setup.cfg * Add request timeout handling for Mellanox Neutron Agent * Updates .gitignore * Update Zhenguo Niu's mailmap * Replace stubout with fixtures * Ensure get\_pid\_to\_kill works with rootwrap script * Updated from global requirements * Cleanup HACKING.rst * Fix import log\_handler error with publish\_errors set * Updated from global requirements * Updated from global requirements * Fix incorrect indentations found by Pep 1.4.6+ * Cleanup and make HACKING.rst DRYer * Add support for managing async processes * Remove obsolete redhat-eventlet.patch * Change rpc\_support\_old\_agents default to False 2013.2.rc1 ---------- * Open Icehouse development * Updated from global requirements * Require oslo.config 1.2.0 final * Use built-in print() instead of print statement * Add l2 population base classes * Fix message i18n error * Install metering\_agent.ini and vpn\_agent.ini * fix conversion type missing * Enclose command args in with\_venv.sh * ML2 Mechanism Driver for Cisco Nexus * Reference driver implementation (IPsec) for VPNaaS * Implement ML2 port binding * Arista ML2 Mechanism driver * ML2 Mechanism Driver for Tail-f Network Control System (NCS) * Default to not capturing log output in tests * Add Neutron l3 metering agent * Add recent neutron extentions and IB support * Update mailmap * Fix wrong example in HACKING.rst * Bumps hacking to 0.7.0 * remove binaries under bin * Fixes Windows setup dependency bug * Restore Babel to requirements.txt * Remove DHCP lease logic * Remove last vestiges of nose * Updated from global requirements * Ignore pbr\*.egg directory * Fix H102, H103 Apache 2.0 license hacking check error * Remove openstack.common.exception usage * Adds Babel dependency missing from 555d27c * Fix the alphabetical order in requirement files * Remove comments from requirements.txt (workaround pbr bug) * remove netifaces dependency of ryu-agent 2013.2.b2 --------- * Add gre tunneling support for the ML2 plugin * Add VXLAN tunneling support for the ML2 plugin * xenapi - rename quantum to neutron * Fix issue with pip installing oslo.config-1.2.0 * Initial Modular L2 Mechanism Driver implementation * Add cover/ to .gitignore * fix some missing change from quantum to neutron * Rename Quantum to Neutron * Rename quantum to neutron in .gitreview * Sync install\_venv\_common from oslo * Update to use OSLO db * Require greenlet 0.3.2 (or later) * Remove single-version-externally-managed in setup.cfg * Fix single-version-externally-mananged typo in setup.cfg * Allow use of lowercase section names in conf files * Require pbr 0.5.16 or newer * Update to the latest stevedore * Rename agent\_loadbalancer directory to loadbalancer * Remove unit tests that are no longer run * Remove explicit distribute depend * Fix and enable H90x tests * Remove generic Exception when using assertRaises * Add \*.swo/swp to .gitignore * python3: Introduce py33 to tox.ini * Rename README to README.rst * Rename requires files to standard names * Initial Modular L2 plugin implementation * Perform a sync with oslo-incubator * update mailmap * Revert "Fix ./run\_tests.sh --pep8" * Move to pbr * Fix ./run\_tests.sh --pep8 * blueprint mellanox-quantum-plugin * Let the cover venv run individual tests * Copy the RHEL6 eventlet workaround from Oslo * Remove locals() from strings substitutions * Enable automatic validation of many HACKING rules * Shorten the path of the nicira nvp plugin * Allow pdb debugging in manually-invoked tests * Reformat openstack-common.conf * Switch to flake8 from pep8 * Parallelize quantum unit testing: * blueprint cisco-single-config * Add lbaas\_agent files to setup.py * Add VIRTUAL\_ENV key to enviroment passed to patch\_tox\_env * Sync latest Oslo components for updated copyright * Replace "OpenStack LLC" with "OpenStack Foundation" * First havana commit * remove references to netstack in setup.py * Update tox.ini to support RHEL 6.x * Add common test base class to hold common things * Pin pep8 to 1.3.3 * Add initial testr support * LBaaS Agent Reference Implementation * Add scheduling feature basing on agent management extension * Use testtools instead of unittest or unittest2 * Add midonet to setup.py * Sync latest install\_venv\_common.py with olso * Add check-nvp-config utility * Use oslo-config-2013.1b3 * Adds Brocade Plugin implementation * Synchronize code from oslo * PLUMgrid quantum plugin * Update .coveragerc * Allow tools/install\_venv\_common.py to be run from within the source directory * Updated to latest oslo-version code * Use install\_venv\_common.py from oslo * Cisco plugin cleanup * Use babel to generate translation file * Adds support for deploying Quantum on Windows * Add migration support to Quantum * .gitignore cleanup * Logging module cleanup * Add OVS cleanup utility * Add tox artifacts to .gitignore * Add restproxy.ini to config\_path in setup.py * Add script for checking i18n message * l3 agent rpc * Add metadata\_agent.ini to config\_path in setup.py * add metadata proxy support for Quantum Networks * Add QUANTUM\_ prefix for env used by quantum-debug * Make tox.ini run pep8 checks on bin * Explicitly include versioninfo in tarball * Import lockutils and fileutils from openstack-common * Updated openstack-common setup and version code * Add uuidutils module * Add eventlet\_backdoor and threadgroup from openstack-common * Add loopingcall from openstack-common * Added service from openstack-common * Import order clean-up * Correct Intended Audience * Add OpenStack trove classifier for PyPI * l3\_nat\_agent was renamed to l3\_agent and this was missed * Support for several HA RabbitMQ servers * add missing files from setup.py * Create .mailmap file * Implements agent for Quantum Networking testing * Create utility to clean-up netns * Update rootwrap; track changes in nova/cinder * Add lease expiration script support for dnsmasq * quantum l3 + floating IP support * NEC OpenFlow plugin support * Initial implemention of MetaPlugin * Exempt openstack-common from pep8 check * fix bug lp:1025526,update iniparser.py to accept empty value * Introduce files from openstack common * fix bug lp:1019230,update rpc from openstack-common * implement dhcp agent for quantum * Use setuptools git plugin for file inclusion * Remove paste configuration details to a seperate file. blueprint use-common-cfg * Implements the blueprint use-common-cfg for the quantum service. More specifically uses global CONF for the quantum.conf file * Add authZ through incorporation of policy checks * Bug #1013967 - Quantum is breaking on tests with pep 1.3 * Use openstack.common.exception * Fix up test running to match jenkins expectation * Add build\_sphinx options * Quantum should use openstack.common.jsonutils * Quantum should use openstack.common.importutils * PEP8 fixes * Parse linuxbridge plugins using openstack.common.cfg * Add HACKING.rst to tarball generation bug 1001220 * Include AUTHORS in release package * Add HACKING.rst coding style doc * bug 963152: add a few missing files to sdist tarball * Split out pip requires and aligned tox file * Fix missing files in sdist package [bug 954906] * more files missing in sdist tarball * make sure pip-requires is included in setup.py sdist * remove pep8 and strict lxml version from setup.py * plugin: introduce ryu plugin * bug 934459: pip no longer supports -E * Initial commit: nvp plugin * Cleanup the source distribution * blueprint quantum-linux-bridge-plugin * Remove quantum CLI console script * Make tox config work * Split out quantum.client and quantum.common * Quantum was missing depend on lxml * Getting ready for the client split * Removed erroneous print from setup.py * Base version.py on glance * Fix lp bug 897882 * Install a good version of pip in the venv * Rename .quantum-venv to .venv * Remove plugin pip-requires * Bug #890028 * Fix for bug 900316 * Second round of packaging changes * Changes to make pip-based tests work with jenkins * Fix for Bug #888820 - pip-requires file support for plugins * blueprint quantum-packaging * Add .gitreview config file for gerrit 2011.3 ------ * merge tyler's unit tests for cisco plugin changes lp845140 * merge salv's no-cheetah CLI branch lp 842190 * merge sumit's branch for lp837752 * merge salv's branch for bug834013 * merge salv's branch for keystone token on client bug838006 * merge rohit's db test branch: lp838318 * merge salv fix for bug 841982, fix minor pep8 violation * merge salv fix for bug834008 * Merging latest from lp:quantum * Merging lo:~salvatore-orlando/quantum/quantum-api-auth * syncing diverged branches * merging from lp:quantum * merging from lp:quantum * Mergin from lp:quantum * merge salv's branch to remove dummy plugin * Merging Shweta's test cases for mutliport resource * Merging Sumit's changes including fixes for multinic support, and CLI module for working with extensions * Merging from Cisco branch * Merging changes from Ying's branch (new mutliport resource) * Merging from lp:quantum * merge cisco consolidated plugin changes * Merging lp:~salvatore-orlando/quantum/bug834449 * Merging Ying's changes (minor) * merge trunk * merge trunk * merging changes from cisco consolidated branch * Merging fixes from Sumit's branch for extension API version number and to UCS inventory to associated VIF-ID with ports * Merging from the Cisco branch * Merging Shweta's fix for extensions' test cases (clean up was not happening completely) * Merging from lp:quantum * Merging Shweta's fixes in the tests for key names changes in the Core API * merge salvatore's new cli code * Merging lp:quantum, resolving conflict * merge two pep8 branch * Merging Ying's pep8 fixes * Merging quantum trunk * Merging lp:~danwent/quantum/lp834491 Fixing Bug #834491: api alignment merge broke ovs plugin (Critical) * Merging from quantum * merge cisco extensions branch * Merging Shweta's fixes to the test cases for the extensions * Merging from Sumit's branch, changes to VIF-driver and Scheduler; extension action names have been changed in response to Salvatore's review comments in the extensions branch review * Syncing with Cisco extensions branch * Merging changes from Sumit's branch * Mergin Ying's branch * Merging from Sumit's branch, import ordering related changes * Merging the Cisco branch * Finishing cli work Fixing bug with XML deserialization * Merging lp:~salvatore-orlando/quantum/quantum-api-alignment * Merging from Sumit's branch * Merging Rohit's changes * merge latest quantum branch and resolve conflicts * Merging lp:~asomya/quantum/lp833163 Fix for Bug #833163: Pep8 violations in recent packaging changes that were merged into trunk (Critical) * PEP8 fixes for setup.py * Made changes according to reviewer's comments. Add addtional information on extension test in README * Merging changes from Sumit's branch * Merging lp:~cisco-openstack/quantum/802dot1qbh-vifdriver-scheduler * Merging lp:~cisco-openstack/quantum/l2network-plugin-persistence * Merging lp:quantum * merging with lp:quantum * merge lp:~bgh/quantum/lp837174 * Merging from Sumit's latest branch - Fixed loading of Nexus DB tables; moved imports to l2nework\_db.py; Refactoring of code to generalize inventory handling (enhancement) * Making Keystone version configurable * Merging lp:~raxnetworking/quantum/bug827272 * Merging branch: lp:~danwent/quantum/test-refactor * Merging UCS inventory state initialization fix from Sumit's branch * Fixes an issue with loading the UCS inventory when a dynamic nic has been used outside of Quantum * Pep8, pylint fixes * Merging rohit's UCS persistence support * Merging changes from Rohit's branch * Merging changes from cisco extensions * merged Shweta's branch for ext test. Minor fix for review comments * merged Shweta's ext test branch * Syncing with lp:quantum * sync up with l2network exception handling for extension * merged Cisco branch's latest changes * Adding changes from Sumit's latest merge * merge with lp:~cisco-openstack/quantum/l2network-plugin-extensions * Raising exceptions in extension resources handling (where missing). Changing exception name to QosNotFound * Mergin from Cisco branch * Merging fixes to client side exception handling. Thanks lp:tylesmit ! * Merging fixes and changes batch-config script. Thanks lp:danwent ! * merge with ying's branch * merging with Ying's extension branch * merging with ~cisco-openstack/quantum/l2network-plugin-extensions * fix pylint issuses * Merging bug fix for Bug 821733. Thanks lp:salvatore-orlando ! * Mering Sumit's branch with plugin support for Credentials, QoS, NovaTenant resources. Also merging latest from lp:~cisco-openstack/quantum/l2network-plugin-persistence * Merging from Sumit's branch, VIF-driver and Quantum-aware scheduler * Merging lp:~asomya/quantum/lp824145 Fix for Bug#824145 : Adding a setup script for quantum * merge trunk pep8 fixes adapting CLI to API v1.0 Fixing wsgi to avoid failure with extensions * merge trunk * Pulling in changes from lp:quantum * Merging Cisco's contribution to Quantum. Thanks to various folks at Cisco Systems, Quantum will have plugins to integrate with Cisco UCS blade servers using 802.1Qbh, Cisco Nexus family of switches and the ability for Quantum plugin to have multiple switches/devices within a single Quantum plugin * Merging Shweta's change to fix a function call in the test code * Merging from Sumit's branch pylint fixes and incorporating review comments * Changes to README file and merging Shweta's changes * Mergin Shweta's test changes, also README file * Mergin from cisco brach * Merging from lp:quantum * Pulling changes from Cisco branch * Merging Nexus pylint changes and other enhancements from Edgar * Merging Rohit's changes * Merging plugin and tests' changes * Pulling in changes from Rohit's branch * Pulling in changes from Shweta's branch * Merging rohit's changes * Merging: lp:~danwent/quantum/client-lib * Merging: lp:~tylesmit/quantum/api-client-fix-serialization Adding automattic serialization to all requests by moving it to do\_request * fixes from rohit's branch * from rohit's branch * Merging quantum extenions framework into trunk. Thanks rajaram vinkesh, deepak & santhosh for the great work! * lp Bug#824145 : Adding a setup script for quantum * merge trunk * Merged quantum trunk * - Adding setup script * Added tests directory to list of modules in the README file * Merging changes addressing Bug # 802772. Thanks lp:danwent ! * Merging bugfix for Bug 822890 - Added License file for Quantum code distribution * L2 Network Plugin Framework merge * Adding Apache Version 2.0 license file. This is the official license agreement under which Quantum code is available to the Open Source community * Merging test cases from Shwetas' branch, and further modified README file * Merging the test framework from Shweta's branch * another merge * merge * merge heckj's pip-requires fixes * Merged quantum trunk * Removing extra file in Nexus Driver * Merging changes * Merging changes from lp:quantum * Completing API spec alignment Unit tests aligned with changes in the API spec * Applying fix for bug #814518 Merging from lp:~salvatore-orlando/quantum/bug814518 * Merging the port profile client name fix * Mergin fix for Bug 818321 * Merging approved OVS plugin configuration change branch. Thanks lp:danwent ! * Merging the brand new Quantum-client-library feature * merging branch for bug802772, which this branch is stacked on top of * Merging lp:quantum updates * persistence of l2network & ucs plugins using mysql - db\_conn.ini - configuration details of making a connection to the database - db\_test\_plugin.py - contains abstraction methods for storing database values in a dict and unit test cases for DB testing - l2network\_db.py - db methods for l2network models - l2network\_models.py - class definitions for the l2 network tables - ucs\_db.py - db methods for ucs models - ucs\_models.py - class definition for the ucs tables dynamic loading of the 2nd layer plugin db's based on passed arguments Create, Delete, Get, Getall, Update database methods at - Quantum, L2Network and Ucs Unit test cases for create, delete, getall and update operations for L2Network and Ucs plugins pep8 checks done branch based off revision 34 plugin-framework * Merged from trunk * merged the latest changes from plugin-framework branch - revision 39 conforming to the new cisco plugin directory structure and moving all db related modules into cisco/db folder updated db\_test\_plugin.py - added import of cisco constants module - added LOG.getLogger for logging component name - updated import module paths for l2network\_models/db and ucs\_models/db to use the new directory structure - updated (rearranged) imports section to obey openstack alphabetical placement convention updated db\_conn.ini - updated database name from cisco\_naas to quantum\_l2network unit test cases ran successfully and pep8 checks done again * merge branch for to fix bug817826 * Merging the latest changes from lp:quantum * merge Salvatore's api branch with fixes for tests. Tweaking branch to remove unwanted bin/quantum.py as part of merge * Merging in main repo updates * Apply fix for bug #817813 Merging lp:~danwent/quantum/bug817813 * Apply fix for bug #814012 Merging lp:~danwent/quantum/bug814012 * Apply fix for bug #814517 merging lp:~tylesmit/quantum/quantum-bug-814517 * Santhosh/Rajaram|latest merge from quantum and made extensions use options to load plugin * Apply fix for bug #797419 merging lp:~salvatore-orlando/quantum/bug797419 * Merging branch lp:~salvatore-orlando/quantum/bug802892 Fixing bug #802892 * Merging branch lp:~netstack/quantum/quantum-unit-tests * Fixing silly pep8 error * Merged from quantum trunk * Applying fix for bug #804237 from branch lp:~salvatore-orlando/quantum/bug804237 * Adapated plugin infrastructure to allow API to pass options to plugins Now using in-memory sqlite db for tests on FakePlugin teardown() now 'resets' the in-memory db Adding unit tests for APIs * Merging dan wendlandt's bugfixes for Bug #800466 and improvements that enable Quantum to seamlessly run on KVM! * merge * Merge: bzr merge lp:~bgh/quantum/bugfixes * merge and pep8 cleanup * Merging latest changes from parent repo - lp:network-service , Parent repo had approved merge proposal for merging lp:~santhom/network-service/quantum\_testing\_framework , which has now been merged into lp:network-service * Merging pep8 and functional test related changes lp:~santhom/network-service/quantum\_testing\_framework branch * Santhosh/Vinkesh | Added the testing framework. Moved the smoketest to tests/functional * merged remote README changes * Merged Brad's ovsplugin code * Initial version of openvswitch plugin * \* Merged changes from Salvatore's branch - quantum-api-workinprogress \* Removed spurious methods from quantum\_base\_plugin class. \* Updated the sample plugins to be compliant with the new QuantumBase class * Adding first files for quantum API * merged salvatore's changes to local branch * Pushing initial started code based on Glance project and infrstructure work done by the melange team * Merging in Shweta's fixes from the review by Sumit * Merging in latest changes from lp:quantum * Merging in Shweta's test changes networking-mlnx-15.0.2/HACKING.rst0000644000413600001450000000252513566516767016675 0ustar lennybmtl00000000000000Neutron Style Commandments ======================= - Step 1: Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/ - Step 2: Read on Neutron Specific Commandments -------------------------- - [N319] Validate that debug level logs are not translated - [N320] Validate that LOG messages, except debug ones, have translations - [N321] Validate that jsonutils module is used instead of json - [N322] Detect common errors with assert_called_once_with - [N323] Enforce namespace-less imports for oslo libraries Creating Unit Tests ------------------- For every new feature, unit tests should be created that both test and (implicitly) document the usage of said feature. If submitting a patch for a bug that had no unit test, a new passing unit test should be added. If a submitted bug fix does have a unit test, be sure to add a new one that fails without the patch and passes with the patch. All unittest classes must ultimately inherit from testtools.TestCase. In the Neutron test suite, this should be done by inheriting from neutron.tests.base.BaseTestCase. All setUp and tearDown methods must upcall using the super() method. tearDown methods should be avoided and addCleanup calls should be preferred. Never manually create tempfiles. Always use the tempfile fixtures from the fixture library to ensure that they are cleaned up. networking-mlnx-15.0.2/LICENSE0000644000413600001450000002363713566516767016113 0ustar lennybmtl00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. networking-mlnx-15.0.2/README.rst0000644000413600001450000000050713575645017016553 0ustar lennybmtl00000000000000=============== networking-mlnx =============== Networking MLNX contains the Mellanox vendor code for Openstack Neutron * Free software: Apache license * Documentation: https://wiki.openstack.org/wiki/Mellanox-Neutron * Source: https://opendev.org/x/networking-mlnx * Bugs: https://bugs.launchpad.net/networking-mlnx networking-mlnx-15.0.2/TESTING.rst0000644000413600001450000001611313575645017016733 0ustar lennybmtl00000000000000Testing Neutron ============================================================= Overview -------- The unit tests (neutron/test/unit/) are meant to cover as much code as possible and should be executed without the service running. They are designed to test the various pieces of the neutron tree to make sure any new changes don't break existing functionality. The functional tests (neutron/tests/functional/) are intended to validate actual system interaction. Mocks should be used sparingly, if at all. Care should be taken to ensure that existing system resources are not modified and that resources created in tests are properly cleaned up. Development process ------------------- It is expected that any new changes that are proposed for merge come with tests for that feature or code area. Ideally any bugs fixes that are submitted also have tests to prove that they stay fixed! In addition, before proposing for merge, all of the current tests should be passing. Virtual environments ~~~~~~~~~~~~~~~~~~~~ Testing OpenStack projects, including Neutron, is made easier with `DevStack `_. Create a machine (such as a VM or Vagrant box) running a distribution supported by DevStack and install DevStack there. For example, there is a Vagrant script for DevStack at https://github.com/bcwaldon/vagrant_devstack. .. note:: If you prefer not to use DevStack, you can still check out source code on your local machine and develop from there. Running unit tests ------------------ There are three mechanisms for running tests: run_tests.sh, tox, and nose2. Before submitting a patch for review you should always ensure all test pass; a tox run is triggered by the jenkins gate executed on gerrit for each patch pushed for review. With these mechanisms you can either run the tests in the standard environment or create a virtual environment to run them in. By default after running all of the tests, any pep8 errors found in the tree will be reported. With `run_tests.sh` ~~~~~~~~~~~~~~~~~~~ You can use the `run_tests.sh` script in the root source directory to execute tests in a virtualenv:: ./run_tests -V With `nose2` ~~~~~~~~~~~ You can use `nose2`_ to run individual tests, as well as use for debugging portions of your code:: source .venv/bin/activate pip install nose2 nose2 There are disadvantages to running nose2 - the tests are run sequentially, so race condition bugs will not be triggered, and the full test suite will take significantly longer than tox & testr. The upside is that testr has some rough edges when it comes to diagnosing errors and failures, and there is no easy way to set a breakpoint in the Neutron code, and enter an interactive debugging session while using testr. It is also possible to use nose2's predecessor, `nose`_, to run the tests:: source .venv/bin/activate pip install nose nosetests nose has one additional disadvantage over nose2 - it does not understand the `load_tests protocol`_ introduced in Python 2.7. This limitation will result in errors being reported for modules that depend on load_tests (usually due to use of `testscenarios`_). .. _nose2: http://nose2.readthedocs.org/en/latest/index.html .. _nose: https://nose.readthedocs.org/en/latest/index.html .. _load_tests protocol: https://docs.python.org/2/library/unittest.html#load-tests-protocol .. _testscenarios: https://pypi.org/project/testscenarios/ With `tox` ~~~~~~~~~~ Neutron, like other OpenStack projects, uses `tox`_ for managing the virtual environments for running test cases. It uses `Testr`_ for managing the running of the test cases. Tox handles the creation of a series of `virtualenvs`_ that target specific versions of Python (2.6, 2.7, 3.3, etc). Testr handles the parallel execution of series of test cases as well as the tracking of long-running tests and other things. Running unit tests is as easy as executing this in the root directory of the Neutron source code:: tox To run functional tests that do not require sudo privileges or specific-system dependencies:: tox -e functional To run all the functional tests in an environment that has been configured by devstack to support sudo and system-specific dependencies:: tox -e dsvm-functional For more information on the standard Tox-based test infrastructure used by OpenStack and how to do some common test/debugging procedures with Testr, see this wiki page: https://wiki.openstack.org/wiki/Testr .. _Testr: https://wiki.openstack.org/wiki/Testr .. _tox: http://tox.readthedocs.org/en/latest/ .. _virtualenvs: https://pypi.org/project/virtualenv Running individual tests ~~~~~~~~~~~~~~~~~~~~~~~~ For running individual test modules or cases, you just need to pass the dot-separated path to the module you want as an argument to it. For executing a specific test case, specify the name of the test case class separating it from the module path with a colon. For example, the following would run only the JSONV2TestCase tests from neutron/tests/unit/test_api_v2.py:: $ ./run_tests.sh neutron.tests.unit.test_api_v2.JSONV2TestCase or:: $ tox -e py27 neutron.tests.unit.test_api_v2.JSONV2TestCase Adding more tests ~~~~~~~~~~~~~~~~~ Neutron has a fast growing code base and there is plenty of areas that need to be covered by unit and functional tests. To get a grasp of the areas where tests are needed, you can check current coverage by running:: $ ./run_tests.sh -c Debugging --------- By default, calls to pdb.set_trace() will be ignored when tests are run. For pdb statements to work, invoke run_tests as follows:: $ ./run_tests.sh -d [test module path] It's possible to debug tests in a tox environment:: $ tox -e venv -- python -m testtools.run [test module path] Tox-created virtual environments (venv's) can also be activated after a tox run and reused for debugging:: $ tox -e venv $ . .tox/venv/bin/activate $ python -m testtools.run [test module path] Tox packages and installs the neutron source tree in a given venv on every invocation, but if modifications need to be made between invocation (e.g. adding more pdb statements), it is recommended that the source tree be installed in the venv in editable mode:: # run this only after activating the venv $ pip install --editable . Editable mode ensures that changes made to the source tree are automatically reflected in the venv, and that such changes are not overwritten during the next tox run. Post-mortem debugging ~~~~~~~~~~~~~~~~~~~~~ Setting OS_POST_MORTEM_DEBUGGER in the shell environment will ensure that the debugger .post_mortem() method will be invoked on test failure:: $ OS_POST_MORTEM_DEBUGGER=pdb ./run_tests.sh -d [test module path] Supported debuggers are pdb, and pudb. Pudb is full-screen, console-based visual debugger for Python which let you inspect variables, the stack, and breakpoints in a very visual way, keeping a high degree of compatibility with pdb:: $ ./.venv/bin/pip install pudb $ OS_POST_MORTEM_DEBUGGER=pudb ./run_tests.sh -d [test module path] References ========== .. [#pudb] PUDB debugger: https://pypi.org/project/pudb networking-mlnx-15.0.2/babel.cfg0000644000413600001450000000002113566516767016612 0ustar lennybmtl00000000000000[python: **.py] networking-mlnx-15.0.2/requirements.txt0000644000413600001450000000121213575645041020337 0ustar lennybmtl00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. Babel>=1.3 pbr!=2.1.0,>=2.0.0 # Apache-2.0 defusedxml>=0.5.0 # PSF eventlet!=0.18.3,>=0.18.2 # MIT netaddr>=0.7.18 # BSD pyroute2>=0.5.7 python-neutronclient>=5.1.0 # Apache-2.0 SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.0.10 # MIT six>=1.10.0 # MIT oslo.config>=5.2.0 # Apache-2.0 oslo.concurrency>=3.26.0 # Apache-2.0 python-openstackclient>=3.3.0 # Apache-2.0 neutron-lib>=1.28.0 # Apache-2.0 neutron>=13.0.0.0b2 # Apache-2.0 pyzmq networking-mlnx-15.0.2/setup.cfg0000644000413600001450000000362113575645772016715 0ustar lennybmtl00000000000000[metadata] name = networking-mlnx summary = Mellanox Networking description-file = README.rst author = Mellanox author-email = openstack@mellanox.com home-page = http://www.mellanox.com/ classifier = Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: 2 Programming Language :: Python :: 2.7 Programming Language :: Python :: 3 Programming Language :: Python :: 3.6 Programming Language :: Python :: 3.7 [files] packages = networking_mlnx [global] setup-hooks = pbr.hooks.setup_hook [build_sphinx] all_files = 1 build-dir = doc/build source-dir = doc/source [extract_messages] keywords = _ gettext ngettext l_ lazy_gettext mapping_file = babel.cfg output_file = networking-mlnx/locale/networking-mlnx.pot [compile_catalog] directory = networking-mlnx/locale domain = networking-mlnx [update_catalog] domain = networking-mlnx output_dir = networking-mlnx/locale input_file = networking-mlnx/locale/networking-mlnx.pot [wheel] universal = 1 [entry_points] console_scripts = neutron-mlnx-agent = networking_mlnx.cmd.eventlet.agents.mlnx_agent:main eswitchd = networking_mlnx.eswitchd.eswitch_daemon:main ebrctl = networking_mlnx.eswitchd.cli.ebrctl:main neutron.ml2.mechanism_drivers = mlnx_sdn_assist = networking_mlnx.plugins.ml2.drivers.sdn.sdn_mech_driver:SDNMechanismDriver mlnx_infiniband = networking_mlnx.plugins.ml2.drivers.mlnx.mech_mlnx:MlnxMechanismDriver neutron.db.alembic_migrations = networking-mlnx = networking_mlnx.db.migration:alembic_migrations neutron.interface_drivers = ipoib = networking_mlnx.linux.interface_drivers.interface:IPoIBInterfaceDriver multi = networking_mlnx.linux.interface_drivers.interface:MultiInterfaceDriver [egg_info] tag_build = tag_date = 0 networking-mlnx-15.0.2/setup.py0000644000413600001450000000203413575645017016573 0ustar lennybmtl00000000000000#!/usr/bin/env python # Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT import setuptools # In python < 2.7.4, a lazy loading of package `pbr` will break # setuptools if some other modules registered functions in `atexit`. # solution from: http://bugs.python.org/issue15881#msg170215 try: import multiprocessing # noqa except ImportError: pass setuptools.setup( setup_requires=['pbr>=2.0.0'], pbr=True) networking-mlnx-15.0.2/test-requirements.txt0000644000413600001450000000163013575645017021323 0ustar lennybmtl00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. hacking>=1.1.0 # Apache-2.0 cliff>=1.15.0 # Apache-2.0 coverage>=3.6 discover fixtures>=1.3.1 # Apache-2.0/BSD mock>=1.2 python-subunit>=0.0.18 requests-mock>=0.7.0 # Apache-2.0 sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2 # BSD oslosphinx!=3.4.0,>=2.5.0 # Apache-2.0 testrepository>=0.0.18 testtools>=1.4.0 testscenarios>=0.4 WebTest>=2.0 oslotest>=1.5.1 # Apache-2.0 testresources>=0.2.4 # Apache-2.0/BSD os-testr>=1.0.0 # Apache-2.0 ddt>=1.0.1 # MIT reno>=0.1.1 # Apache2 # Needed to run DB commands in virtualenvs flake8-import-order==0.12 # LGPLv3 pylint==1.9.2;python_version<"3.0" # GPLv2 pylint==2.3.0;python_version>="3.0" # GPLv2 bashate>=0.5.1 # Apache-2.0 flake8==2.6.2 bandit!=1.6.0,>=1.1.0 # Apache-2.0 networking-mlnx-15.0.2/tox.ini0000644000413600001450000001264713575645017016407 0ustar lennybmtl00000000000000[tox] envlist = py37,py36,py27,pep8 minversion = 2.3.2 skipsdist = True [testenv] setenv = VIRTUAL_ENV={envdir} OS_LOG_CAPTURE={env:OS_LOG_CAPTURE:true} OS_STDOUT_CAPTURE={env:OS_STDOUT_CAPTURE:true} OS_STDERR_CAPTURE={env:OS_STDERR_CAPTURE:true} PYTHONWARNINGS=default::DeprecationWarning,ignore::DeprecationWarning:distutils,ignore::DeprecationWarning:site passenv = TRACE_FAILONLY GENERATE_HASHES http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY TOX_ENV_SRC_MODULES usedevelop = True install_command = pip install {opts} {packages} deps = -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt whitelist_externals = sh commands = {toxinidir}/tools/pip_install_src_modules.sh "{toxinidir}" stestr run {posargs} [testenv:common] basepython = python3 # Fake job to define environment variables shared between dsvm/non-dsvm jobs setenv = OS_TEST_TIMEOUT={env:OS_TEST_TIMEOUT:180} commands = false [testenv:pep8] basepython = python3 envdir = {toxworkdir}/shared deps = {[testenv]deps} commands= # If it is easier to add a check via a shell script, consider adding it in this file #{toxinidir}/tools/check_unit_test_structure.sh # Checks for coding and style guidelines flake8 sh ./tools/coding-checks.sh --pylint '{posargs}' neutron-db-manage --subproject=networking-mlnx check_migration #{[testenv:genconfig]commands} {[testenv:bashate]commands} {[testenv:bandit]commands} whitelist_externals = sh bash [testenv:debug] basepython = python3 envdir = {toxworkdir}/shared commands = oslo_debug_helper -t networking_mlnx/tests {posargs} [testenv:cover] basepython = python3 envdir = {toxworkdir}/shared setenv = {[testenv]setenv} PYTHON=coverage run --source networking-mlnx --parallel-mode commands = stestr run --no-subunit-trace {posargs} coverage combine coverage report --fail-under=82 --skip-covered coverage html -d cover coverage xml -o cover/coverage.xml [testenv:venv] basepython = python3 commands = {posargs} [testenv:docs] basepython = python3 envdir = {toxworkdir}/docs deps = -r{toxinidir}/doc/requirements.txt commands = sphinx-build -W -b html doc/source doc/build/html [flake8] # E126 continuation line over-indented for hanging indent # E128 continuation line under-indented for visual indent # H405 multi line docstring summary not separated with an empty line # N530 direct neutron imports not allowed # TODO(amotoki) check the following new rules should be fixed or ignored # E731 do not assign a lambda expression, use a def # W504 line break after binary operator ignore = E125,E126,E128,E129,E731,H405,N530,W504 # H106: Don't put vim configuration in source files # H203: Use assertIs(Not)None to check for None # H204: Use assert(Not)Equal to check for equality # H205: Use assert(Greater|Less)(Equal) for comparison # H904: Delay string interpolations at logging calls enable-extensions = H106,H203,H204,H205,H904 show-source = true exclude = ./.*,build,dist,doc import-order-style = pep8 [hacking] import_exceptions = networking_mlnx._i18n local-check-factory = neutron_lib.hacking.checks.factory [testenv:bashate] basepython = python3 envdir = {toxworkdir}/shared commands = bash -c "find {toxinidir} \ -not \( -type d -name .tox\* -prune \) \ -not \( -type d -name .venv\* -prune \) \ -type f \ -name \*.sh \ # E005 file does not begin with #! or have a .sh prefix # E006 check for lines longer than 79 columns # E042 local declaration hides errors # E043 Arithmetic compound has inconsistent return semantics -print0 | xargs -0 bashate -v -iE006 -eE005,E042,E043" [testenv:bandit] basepython = python3 envdir = {toxworkdir}/shared # B104: Possible binding to all interfaces # B303: blacklist calls: md5, sha1 # B311: Standard pseudo-random generators are not suitable for security/cryptographic purpose # B604: any_other_function_with_shell_equals_true deps = -r{toxinidir}/test-requirements.txt commands = bandit -r networking_mlnx -x tests -n5 -s B104,B303,B311,B604 [testenv:genconfig] basepython = python3 envdir = {toxworkdir}/shared commands = {toxinidir}/tools/generate_config_file_samples.sh # This environment can be used to quickly validate that all needed system # packages required to successfully execute test targets are installed [testenv:bindep] basepython = python3 # Do not install any requirements. We want this to be fast and work even if # system dependencies are missing, since it's used to tell you what system # dependencies are missing! This also means that bindep must be installed # separately, outside of the requirements files. deps = bindep commands = bindep test [testenv:lower-constraints] basepython = python3 setenv = OS_TEST_TIMEOUT={env:OS_TEST_TIMEOUT:60} deps = -c{toxinidir}/lower-constraints.txt -r{toxinidir}/test-requirements.txt -r{toxinidir}/requirements.txt [testenv:dev] # run locally (not in the gate) using editable mode # https://pip.pypa.io/en/stable/reference/pip_install/#editable-installs commands = pip install -q -e "git+https://git.openstack.org/openstack/neutron#egg=neutron" [testenv:py3-dev] basepython = python3 commands = {[testenv:dev]commands} {[testenv]commands} [testenv:pep8-dev] basepython = python3 deps = {[testenv]deps} commands = {[testenv:dev]commands} {[testenv:pep8]commands} networking-mlnx-15.0.2/PKG-INFO0000644000413600001450000000224713575645772016174 0ustar lennybmtl00000000000000Metadata-Version: 1.1 Name: networking-mlnx Version: 15.0.2 Summary: Mellanox Networking Home-page: http://www.mellanox.com/ Author: Mellanox Author-email: openstack@mellanox.com License: UNKNOWN Description: =============== networking-mlnx =============== Networking MLNX contains the Mellanox vendor code for Openstack Neutron * Free software: Apache license * Documentation: https://wiki.openstack.org/wiki/Mellanox-Neutron * Source: https://opendev.org/x/networking-mlnx * Bugs: https://bugs.launchpad.net/networking-mlnx Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7