pax_global_header00006660000000000000000000000064131431267060014515gustar00rootroot0000000000000052 comment=a69e2f47fa0b6c8a0dc065740c16cfc8dd770094 tap-as-a-service-2.0.0/000077500000000000000000000000001314312670600145555ustar00rootroot00000000000000tap-as-a-service-2.0.0/.coveragerc000066400000000000000000000001511314312670600166730ustar00rootroot00000000000000[run] branch = True source = neutron_taas omit = neutron_taas/openstack/* [report] ignore_errors = True tap-as-a-service-2.0.0/.gitignore000066400000000000000000000007231314312670600165470ustar00rootroot00000000000000*.py[cod] # C extensions *.so # Packages *.egg *.egg-info dist build .eggs eggs parts bin var sdist develop-eggs .installed.cfg lib lib64 # Installer logs pip-log.txt # Unit test / coverage reports .coverage .tox nosetests.xml .testrepository .venv # Translations *.mo # Mr Developer .mr.developer.cfg .project .pydevproject # Complexity output/*.html output/*/index.html # Sphinx doc/build # pbr generates these AUTHORS ChangeLog # Editors *~ .*.swp .*sw? tap-as-a-service-2.0.0/.gitreview000066400000000000000000000001251314312670600165610ustar00rootroot00000000000000[gerrit] host=review.openstack.org port=29418 project=openstack/tap-as-a-service.git tap-as-a-service-2.0.0/.mailmap000066400000000000000000000001311314312670600161710ustar00rootroot00000000000000# Format is: # # tap-as-a-service-2.0.0/.testr.conf000066400000000000000000000005501314312670600166430ustar00rootroot00000000000000[DEFAULT] test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \ ${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./neutron_taas/tests/unit} $LISTOPT $IDOPTION test_id_option=--load-list $IDFILE test_list_option=--list tap-as-a-service-2.0.0/API_REFERENCE.rst000066400000000000000000000174201314312670600173020ustar00rootroot00000000000000============================== Tap as a Service API REFERENCE ============================== This documents is an API REFERENCE for Tap-as-a-Service Neutron extension. The documents is organized into the following sections: * TaaS Resources * API Reference * TaaS CLI Reference * Workflow TaaS Resources ============== TaaS consists of two resources, TapService and TapFlow. TapService ---------- TapService Represents the port on which the mirrored traffic is delivered. Any service (VM) that uses the mirrored data is attached to the port. .. code-block:: python 'tap_services': { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:string': None}, 'required_by_policy': True, 'is_visible': True}, 'name': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'is_visible': True, 'default': ''}, 'description': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'is_visible': True, 'default': ''}, 'port_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True}, } TapFlow ------- TapFlow Represents the port from which the traffic needs to be mirrored. .. code-block:: python 'tap_flows': { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:string': None}, 'required_by_policy': True, 'is_visible': True}, 'name': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'is_visible': True, 'default': ''}, 'description': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'is_visible': True, 'default': ''}, 'tap_service_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:uuid': None}, 'required_by_policy': True, 'is_visible': True}, 'source_port': {'allow_post': True, 'allow_put': False, 'validate': {'type:uuid': None}, 'required_by_policy': True, 'is_visible': True}, 'direction': {'allow_post': True, 'allow_put': False, 'validate': {'type:values': direction_enum}, 'is_visible': True} } direction_enum = ['IN', 'OUT', 'BOTH'] Multiple TapFlow instances can be associated with a single TapService instance. API REFERENCE ============= Below is the list of REST APIs that can be used to interact with TaaS Neutron extension 1. Create TapService \ **POST /v2.0/taas/tap_services** \ Json Request: .. code-block:: python { "tap_service": { "description": "Test_Tap", "name": "Test", "port_id": "c9beb5a1-21f5-4225-9eaf-02ddccdd50a9", "tenant_id": "97e1586d580745d7b311406697aaf097" } } \ Json Response: .. code-block:: python { "tap_service": { "description": "Test_Tap", "id": "c352f537-ad49-48eb-ab05-1c6b8cb900ff", "name": "Test", "port_id": "c9beb5a1-21f5-4225-9eaf-02ddccdd50a9", "tenant_id": "97e1586d580745d7b311406697aaf097" } } 2. List TapServices \ **GET /v2.0/taas/tap_services/{tap_service_uuid}** \ Json Response: .. code-block:: python { "tap_services": [ { "description": "Test_Tap", "id": "c352f537-ad49-48eb-ab05-1c6b8cb900ff", "name": "Test", "port_id": "c9beb5a1-21f5-4225-9eaf-02ddccdd50a9", "tenant_id": "97e1586d580745d7b311406697aaf097" } ] } 3. Delete TapService \ **DELETE /v2.0/taas/tap_services/{tap_service_uuid}** \ 4. Create TapFlow \ **POST /v2.0/taas/tap_flows** \ Json Request: .. code-block:: python { "tap_flow": { "description": "Test_flow1", "direction": "BOTH", "name": "flow1", "source_port": "775a58bb-e2c6-4529-a918-2f019169b5b1", "tap_service_id": "69bd12b2-0e13-45ec-9045-b674fd9f0468", "tenant_id": "97e1586d580745d7b311406697aaf097" } } \ Json Response: .. code-block:: python { "tap_flow": { "description": "Test_flow1", "direction": "BOTH", "id": "cc47f881-345f-4e62-ad24-bea79eb28304", "name": "flow1", "source_port": "775a58bb-e2c6-4529-a918-2f019169b5b1", "tap_service_id": "69bd12b2-0e13-45ec-9045-b674fd9f0468", "tenant_id": "97e1586d580745d7b311406697aaf097" } } 5. List TapFlows \ **GET /v2.0/taas/tap_flows/{tap_flow_uuid}** \ Json Response: .. code-block:: python { "tap_flows": [ { "description": "Test_flow1", "direction": "BOTH", "id": "cc47f881-345f-4e62-ad24-bea79eb28304", "name": "flow1", "source_port": "775a58bb-e2c6-4529-a918-2f019169b5b1", "tap_service_id": "c352f537-ad49-48eb-ab05-1c6b8cb900ff", "tenant_id": "97e1586d580745d7b311406697aaf097" } ] } 6. Delete TapFlow \ **DELETE /v2.0/taas/tap_flows/{tap_flow_uuid}** \ TaaS CLI Reference ================== The TaaS commands can be executed using TaaS CLI, which is integrated with neutron. It can be used to send REST request and interact with the TaaS extension. Given below are the detail of the CLIs: - **neutron tap-service-create**: Creates a Tap service. - **neutron tap-service-list**: Lists all the Tap services. - **neutron tap-service-show**: Show the details for a Tap service. - **neutron tap-service-update**: Update the information for a Tap service. - **neutron tap-service-delete**: Delete an existing Tap service. - **neutron tap-flow-create**: Creates a Tap flow. - **neutron tap-flow-list**: Lists all the Tap flows. - **neutron tap-flow-show**: Show the details for a Tap flow. - **neutron tap-flow-update**: Update the information for a Tap flow. - **neutron tap-flow-delete**: Delete an existing Tap flow. For usage type **--help** after any of the above commands in the terminal after TaaS has been installed. Workflow ========= In this section we describe a simple sequence of steps to use TaaS. Workflow Sequence ------------------ 1. Create a Neutron port with 'port_security_enabled' set to 'false'. 2. Launch a VM (VM on which you want to monitor/receive the mirrored data). Associate the Neutron port created in step 1 while creating the VM. 3. Using Neutron Client command for TaaS **neutron tap-service-create** or via REST APIs create a Tap Service instance by associating the port created in step 1. 4. Using Neutron Client command for TaaS **neutron tap-flow-create** or via REST APIs create a Tap Flow instance by associating the Tap Service instance created in step 3 and the target Neutron port from which you want to mirror traffic (assuming the Neutron port from which the traffic needs to be monitored already exists.) Mirroring can be done for both incoming and/or outgoing traffic from the target Neutron port. 5. Observe the mirrored traffic on the monitoring VM by running tools such as tcpdump. tap-as-a-service-2.0.0/CONTRIBUTING.rst000066400000000000000000000012241314312670600172150ustar00rootroot00000000000000If you would like to contribute to the development of OpenStack, you must follow the steps in this page: http://docs.openstack.org/infra/manual/developers.html If you already have a good understanding of how the system works and your OpenStack accounts are set up, you can skip to the development workflow section of this documentation to learn how changes to OpenStack should be submitted for review via the Gerrit tool: http://docs.openstack.org/infra/manual/developers.html#development-workflow Pull requests submitted through GitHub will be ignored. Bugs should be filed on Launchpad, not GitHub: https://bugs.launchpad.net/tap-as-a-service tap-as-a-service-2.0.0/HACKING.rst000066400000000000000000000002501314312670600163500ustar00rootroot00000000000000tap-as-a-service Style Commandments =============================================== Read the OpenStack Style Commandments http://docs.openstack.org/developer/hacking/ tap-as-a-service-2.0.0/INSTALL.rst000066400000000000000000000016151314312670600164200ustar00rootroot00000000000000=================================== Tap as a Service installation guide =================================== This is the installation guide for enabling Tap-as-a-Service(TaaS) feature in OpenStack Neutron We have tested TaaS with latest version DevStack running on Ubuntu 12.04 and 14.04. TaaS is currently under active development and we will update you of new features and capabilities as and when they become available. Feel free to approach us with any issues related to installing or using TaaS. Dependencies ============ TaaS requires the 'Port Security' Neutron ML2 extension. Please make sure that this extension has been enabled. Adding the following to 'local.conf' while installing DevStack will enable 'Port Security' extension. (It's enabled by default) Q_ML2_PLUGIN_EXT_DRIVERS=port_security Installation ============ You can use DevStack external plugin. See `devstack/README.rst`. tap-as-a-service-2.0.0/LICENSE000066400000000000000000000236371314312670600155750ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. tap-as-a-service-2.0.0/MANIFEST.in000066400000000000000000000003541314312670600163150ustar00rootroot00000000000000include AUTHORS include ChangeLog include neutron_taas/db/migration/alembic_migration/script.py.mako recursive-include neutron_taas/db/migration/alembic_migration/versions * exclude .gitignore exclude .gitreview global-exclude *.pyc tap-as-a-service-2.0.0/README.rst000066400000000000000000000023041314312670600162430ustar00rootroot00000000000000================ Tap as a Service ================ Tap-as-a-Service (TaaS) is an extension to the OpenStack network service (Neutron). It provides remote port mirroring capability for tenant virtual networks. Port mirroring involves sending a copy of packets entering and/or leaving one port to another port, which is usually different from the original destinations of the packets being mirrored. This service has been primarily designed to help tenants (or the cloud administrator) debug complex virtual networks and gain visibility into their VMs, by monitoring the network traffic associated with them. TaaS honors tenant boundaries and its mirror sessions are capable of spanning across multiple compute and network nodes. It serves as an essential infrastructure component that can be utilized for supplying data to a variety of network analytics and security applications (e.g. IDS). * Free software: Apache license * API Reference: https://github.com/openstack/tap-as-a-service/blob/master/API_REFERENCE.rst * Source: https://git.openstack.org/cgit/openstack/tap-as-a-service * Bugs: https://bugs.launchpad.net/tap-as-a-service For installing Tap-as-a-Service with Devstack please read the INSTALL.rst file tap-as-a-service-2.0.0/babel.cfg000066400000000000000000000000211314312670600162740ustar00rootroot00000000000000[python: **.py] tap-as-a-service-2.0.0/devstack/000077500000000000000000000000001314312670600163615ustar00rootroot00000000000000tap-as-a-service-2.0.0/devstack/README.rst000066400000000000000000000005531314312670600200530ustar00rootroot00000000000000======================== DevStack external plugin ======================== A `local.conf` recipe to enable tap-as-a-service:: [[local|localrc]] enable_plugin tap-as-a-service https://github.com/openstack/tap-as-a-service enable_service taas TAAS_SERVICE_DRIVER=TAAS:TAAS:neutron_taas.services.taas.service_drivers.taas_rpc.TaasRpcDriver:default tap-as-a-service-2.0.0/devstack/devstackgaterc000066400000000000000000000027271314312670600213060ustar00rootroot00000000000000# Copyright 2015 Midokura SARL # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # This script is executed in the OpenStack CI job that runs DevStack + tempest. # You can find the CI job configuration here: # # http://git.openstack.org/cgit/openstack-infra/project-config/tree/jenkins/jobs/tap-as-a-service.yaml # OVERRIDE_ENABLED_SERVICES=key,mysql,rabbit OVERRIDE_ENABLED_SERVICES+=,g-api,g-reg OVERRIDE_ENABLED_SERVICES+=,n-api,n-cond,n-cpu,n-crt,n-sch,placement-api OVERRIDE_ENABLED_SERVICES+=,n-api-meta OVERRIDE_ENABLED_SERVICES+=,q-agt,q-dhcp,q-l3,q-meta,q-metering,q-svc,quantum OVERRIDE_ENABLED_SERVICES+=,taas,taas_openvswitch_agent OVERRIDE_ENABLED_SERVICES+=,tempest,dstat export OVERRIDE_ENABLED_SERVICES # Begin list of exclusions. r="^(?!.*" # exclude the slow tag (part of the default for 'full') r="$r(?:.*\[.*\bslow\b.*\])" # End list of exclusions. r="$r)" r="$r(tempest\.(api.network\.|scenario.test_network)|neutron_taas\.).*$" export DEVSTACK_GATE_TEMPEST_REGEX="$r" tap-as-a-service-2.0.0/devstack/plugin.sh000066400000000000000000000046441314312670600202230ustar00rootroot00000000000000#!/bin/bash # Copyright 2015 Midokura SARL # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This script is meant to be sourced from devstack. It is a wrapper of # devmido scripts that allows proper exporting of environment variables. function install_taas { pip_install --no-deps --editable $TAAS_PLUGIN_PATH } function configure_taas_plugin { if [ ! -d $NEUTRON_CONF_DIR ]; then _create_neutron_conf_dir fi cp $TAAS_PLUGIN_PATH/etc/taas_plugin.ini $TAAS_PLUGIN_CONF_FILE neutron_server_config_add $TAAS_PLUGIN_CONF_FILE _neutron_service_plugin_class_add taas } if is_service_enabled taas; then if [[ "$1" == "stack" ]]; then if [[ "$2" == "pre-install" ]]; then : elif [[ "$2" == "install" ]]; then install_taas configure_taas_plugin elif [[ "$2" == "post-config" ]]; then neutron-db-manage --subproject tap-as-a-service upgrade head echo "Configuring taas" if [ "$TAAS_SERVICE_DRIVER" ]; then inicomment $TAAS_PLUGIN_CONF_FILE service_providers service_provider iniadd $TAAS_PLUGIN_CONF_FILE service_providers service_provider $TAAS_SERVICE_DRIVER fi elif [[ "$2" == "extra" ]]; then : fi elif [[ "$1" == "unstack" ]]; then : fi fi if is_service_enabled q-agt neutron-agent; then if [[ "$1" == "stack" ]]; then if [[ "$2" == "pre-install" ]]; then : elif [[ "$2" == "install" ]]; then install_taas elif [[ "$2" == "post-config" ]]; then if is_service_enabled q-agt neutron-agent; then source $NEUTRON_DIR/devstack/lib/l2_agent plugin_agent_add_l2_agent_extension taas configure_l2_agent fi elif [[ "$2" == "extra" ]]; then : fi elif [[ "$1" == "unstack" ]]; then : fi fi tap-as-a-service-2.0.0/devstack/settings000066400000000000000000000004361314312670600201470ustar00rootroot00000000000000# Devstack settings ABSOLUTE_PATH=$(cd `dirname "${BASH_SOURCE[0]}"` && pwd) TAAS_PLUGIN_PATH=$ABSOLUTE_PATH/.. TAAS_PLUGIN_CONF_FILE="/etc/neutron/taas_plugin.ini" TAAS_OVS_AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-taas-openvswitch-agent" TAAS_OVS_AGENT_CONF_FILE="/etc/neutron/taas.ini" tap-as-a-service-2.0.0/doc/000077500000000000000000000000001314312670600153225ustar00rootroot00000000000000tap-as-a-service-2.0.0/doc/source/000077500000000000000000000000001314312670600166225ustar00rootroot00000000000000tap-as-a-service-2.0.0/doc/source/api_reference.rst000066400000000000000000000000451314312670600221420ustar00rootroot00000000000000.. include:: ../../API_REFERENCE.rst tap-as-a-service-2.0.0/doc/source/conf.py000066400000000000000000000046401314312670600201250ustar00rootroot00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys sys.path.insert(0, os.path.abspath('../..')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', #'sphinx.ext.intersphinx', 'oslosphinx' ] # autodoc generation is a bit aggressive and a nuisance when doing heavy # text edit cycles. # execute "export SPHINX_DEBUG=1" in your terminal to disable # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = u'tap-as-a-service' copyright = u'2013, OpenStack Foundation' # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] # html_theme = '_theme' # html_static_path = ['static'] # Output file base name for HTML help builder. htmlhelp_basename = '%sdoc' % project # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', '%s.tex' % project, u'%s Documentation' % project, u'OpenStack Foundation', 'manual'), ] # Example configuration for intersphinx: refer to the Python standard library. #intersphinx_mapping = {'http://docs.python.org/': None} tap-as-a-service-2.0.0/doc/source/contributing.rst000066400000000000000000000001131314312670600220560ustar00rootroot00000000000000============ Contributing ============ .. include:: ../../CONTRIBUTING.rst tap-as-a-service-2.0.0/doc/source/index.rst000066400000000000000000000010531314312670600204620ustar00rootroot00000000000000.. tap-as-a-service documentation master file, created by sphinx-quickstart on Tue Jul 9 22:26:36 2013. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. Welcome to tap-as-a-service's documentation! ======================================================== Contents: .. toctree:: :maxdepth: 2 readme installation api_reference contributing specs/index presentations Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` tap-as-a-service-2.0.0/doc/source/installation.rst000066400000000000000000000000371314312670600220550ustar00rootroot00000000000000.. include:: ../../INSTALL.rst tap-as-a-service-2.0.0/doc/source/presentations.rst000066400000000000000000000015711314312670600222560ustar00rootroot00000000000000============= Presentations ============= - `Tap-As-A-Service What You Need to Know Now `_ 40 min presentation at OpenStack Summit Austin, April 2016, including a demo with Horizon. - `Using Open Source Security Architecture to Defend against Targeted Attacks` 40 min presentation at OpenStack Summit Austin, April 2016, including IDS/IPS use cases and a demo with snort. - `Tap-as-a-Service (TaaS): Port Monitoring for Neutron Networks `_ 40 min presentation at OpenStack Summit Vancouver, May 2015, including a demo. tap-as-a-service-2.0.0/doc/source/readme.rst000066400000000000000000000000361314312670600206100ustar00rootroot00000000000000.. include:: ../../README.rst tap-as-a-service-2.0.0/doc/source/specs000077700000000000000000000000001314312670600213432../../specsustar00rootroot00000000000000tap-as-a-service-2.0.0/etc/000077500000000000000000000000001314312670600153305ustar00rootroot00000000000000tap-as-a-service-2.0.0/etc/taas.ini000066400000000000000000000002151314312670600167570ustar00rootroot00000000000000[taas] driver = neutron_taas.services.taas.drivers.linux.ovs_taas.OvsTaasDriver enabled = True vlan_range_start = 3000 vlan_range_end = 3500 tap-as-a-service-2.0.0/etc/taas_plugin.ini000066400000000000000000000003751314312670600203440ustar00rootroot00000000000000[DEFAULT] [service_providers] # Defines providers for advanced services using the format: # ::[:default] (multi valued) service_provider = TAAS:TAAS:neutron_taas.services.taas.service_drivers.taas_rpc.TaasRpcDriver:default tap-as-a-service-2.0.0/neutron_taas/000077500000000000000000000000001314312670600172575ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/__init__.py000066400000000000000000000000001314312670600213560ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/_i18n.py000066400000000000000000000020061314312670600205450ustar00rootroot00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import oslo_i18n DOMAIN = 'neutron_taas' _translators = oslo_i18n.TranslatorFactory(domain=DOMAIN) # The primary translation function using the well-known name "_" _ = _translators.primary # The contextual translation function using the name "_C" _C = _translators.contextual_form # The plural translation function using the name "_P" _P = _translators.plural_form def get_available_languages(): return oslo_i18n.get_available_languages(DOMAIN) tap-as-a-service-2.0.0/neutron_taas/common/000077500000000000000000000000001314312670600205475ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/common/__init__.py000066400000000000000000000000001314312670600226460ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/common/constants.py000066400000000000000000000012101314312670600231270ustar00rootroot00000000000000# Copyright (C) 2015 Midokura SARL. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. TAAS = 'TAAS' tap-as-a-service-2.0.0/neutron_taas/common/topics.py000066400000000000000000000013531314312670600224240ustar00rootroot00000000000000# Copyright (C) 2015 Midokura SARL. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # TODO(yamamoto): Move these to neutron.common.topics TAAS_PLUGIN = 'n-taas-plugin' TAAS_AGENT = 'n-taas_agent' tap-as-a-service-2.0.0/neutron_taas/db/000077500000000000000000000000001314312670600176445ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/db/__init__.py000066400000000000000000000000001314312670600217430ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/db/head.py000066400000000000000000000013671314312670600211260ustar00rootroot00000000000000# Copyright 2016 VMware, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.db.migration.models import head from neutron_taas.db import taas_db # noqa def get_metadata(): return head.model_base.BASEV2.metadata tap-as-a-service-2.0.0/neutron_taas/db/migration/000077500000000000000000000000001314312670600216355ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/db/migration/__init__.py000066400000000000000000000000001314312670600237340ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/db/migration/alembic_migration/000077500000000000000000000000001314312670600253025ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/db/migration/alembic_migration/README000066400000000000000000000000461314312670600261620ustar00rootroot00000000000000Generic single-database configuration.tap-as-a-service-2.0.0/neutron_taas/db/migration/alembic_migration/__init__.py000066400000000000000000000000001314312670600274010ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/db/migration/alembic_migration/env.py000066400000000000000000000046121314312670600264470ustar00rootroot00000000000000# Copyright 2015 Midokura SARL # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from logging import config as logging_config from neutron_lib.db import model_base from alembic import context from oslo_config import cfg from oslo_db.sqlalchemy import session import sqlalchemy as sa from sqlalchemy import event MYSQL_ENGINE = None TAAS_VERSION_TABLE = 'alembic_version_taas' config = context.config neutron_config = config.neutron_config logging_config.fileConfig(config.config_file_name) target_metadata = model_base.BASEV2.metadata def set_mysql_engine(): try: mysql_engine = neutron_config.command.mysql_engine except cfg.NoSuchOptError: mysql_engine = None global MYSQL_ENGINE MYSQL_ENGINE = (mysql_engine or model_base.BASEV2.__table_args__['mysql_engine']) def run_migrations_offline(): set_mysql_engine() kwargs = dict() if neutron_config.database.connection: kwargs['url'] = neutron_config.database.connection else: kwargs['dialect_name'] = neutron_config.database.engine kwargs['version_table'] = TAAS_VERSION_TABLE context.configure(**kwargs) with context.begin_transaction(): context.run_migrations() @event.listens_for(sa.Table, 'after_parent_attach') def set_storage_engine(target, parent): if MYSQL_ENGINE: target.kwargs['mysql_engine'] = MYSQL_ENGINE def run_migrations_online(): set_mysql_engine() engine = session.create_engine(neutron_config.database.connection) connection = engine.connect() context.configure( connection=connection, target_metadata=target_metadata, version_table=TAAS_VERSION_TABLE ) try: with context.begin_transaction(): context.run_migrations() finally: connection.close() engine.dispose() if context.is_offline_mode(): run_migrations_offline() else: run_migrations_online() tap-as-a-service-2.0.0/neutron_taas/db/migration/alembic_migration/script.py.mako000066400000000000000000000006371314312670600301140ustar00rootroot00000000000000"""${message} Revision ID: ${up_revision} Revises: ${down_revision} Create Date: ${create_date} """ # revision identifiers, used by Alembic. revision = ${repr(up_revision)} down_revision = ${repr(down_revision)} % if branch_labels: branch_labels = ${repr(branch_labels)} %endif from alembic import op import sqlalchemy as sa ${imports if imports else ""} def upgrade(): ${upgrades if upgrades else "pass"} tap-as-a-service-2.0.0/neutron_taas/db/migration/alembic_migration/versions/000077500000000000000000000000001314312670600271525ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/db/migration/alembic_migration/versions/CONTRACT_HEAD000066400000000000000000000000151314312670600310670ustar00rootroot00000000000000bac61f603e39 tap-as-a-service-2.0.0/neutron_taas/db/migration/alembic_migration/versions/EXPAND_HEAD000066400000000000000000000000151314312670600306310ustar00rootroot00000000000000fddbdec8711a tap-as-a-service-2.0.0/neutron_taas/db/migration/alembic_migration/versions/newton/000077500000000000000000000000001314312670600304645ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/db/migration/alembic_migration/versions/newton/contract/000077500000000000000000000000001314312670600323015ustar00rootroot000000000000001817af933379_remove_network_id_from_tap_service.py000066400000000000000000000016721314312670600434240ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/db/migration/alembic_migration/versions/newton/contract# Copyright (c) 2016 Midokura SARL # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Remove network-id from tap-service Revision ID: 1817af933379 Revises: 80c85b675b6e Create Date: 2016-04-05 21:59:28.829793 """ # revision identifiers, used by Alembic. revision = '1817af933379' down_revision = '80c85b675b6e' from alembic import op def upgrade(): op.drop_column('tap_services', 'network_id') 2ecce0368a62_add_foreign_key_constraint_on_tap_id_association.py000066400000000000000000000021121314312670600464500ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/db/migration/alembic_migration/versions/newton/contract# Copyright 2016 Midokura SARL # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """add foreign key constraint on tap id association Revision ID: 2ecce0368a62 Revises: 1817af933379 Create Date: 2016-05-19 11:39:52.892610 """ # revision identifiers, used by Alembic. revision = '2ecce0368a62' down_revision = '1817af933379' from alembic import op def upgrade(): op.create_foreign_key( constraint_name=None, source_table='tap_id_associations', referent_table='tap_services', local_cols=['tap_service_id'], remote_cols=['id'], ondelete='CASCADE') 4086b3cffc01_rename_tenant_to_project.py000066400000000000000000000054671314312670600415510ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/db/migration/alembic_migration/versions/newton/contract# Copyright 2016 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """rename tenant to project Revision ID: 4086b3cffc01 Revises: 2ecce0368a62 Create Date: 2016-07-30 22:09:16.372917 """ # revision identifiers, used by Alembic. revision = '4086b3cffc01' down_revision = '2ecce0368a62' from alembic import op import sqlalchemy as sa from sqlalchemy.engine import reflection from neutron.db import migration _INSPECTOR = None # milestone identifier, used by neutron-db-manage neutron_milestone = [migration.NEWTON, migration.OCATA] def get_inspector(): """Reuse inspector.""" global _INSPECTOR if _INSPECTOR: return _INSPECTOR else: bind = op.get_bind() _INSPECTOR = reflection.Inspector.from_engine(bind) return _INSPECTOR def get_tables(): """Returns hardcoded list of tables which have ``tenant_id`` column. The list is hard-coded to match the state of the schema when this upgrade script is run. """ tables = [ 'tap_services', 'tap_flows', ] return tables def get_columns(table): """Returns list of columns for given table.""" inspector = get_inspector() return inspector.get_columns(table) def get_data(): """Returns combined list of tuples: [(table, column)]. The list is built from tables with a tenant_id column. """ output = [] tables = get_tables() for table in tables: columns = get_columns(table) for column in columns: if column['name'] == 'tenant_id': output.append((table, column)) return output def alter_column(table, column): old_name = 'tenant_id' new_name = 'project_id' op.alter_column( table_name=table, column_name=old_name, new_column_name=new_name, existing_type=column['type'], existing_nullable=column['nullable'] ) def upgrade(): data = get_data() for table, column in data: alter_column(table, column) def contract_creation_exceptions(): """Special migration for the blueprint to support Keystone V3. We drop all tenant_id columns and create project_id columns instead. """ return { sa.Column: ['.'.join([table, 'project_id']) for table in get_tables()], sa.Index: get_tables() } 80c85b675b6e_initial_newton_no_op_contract_script.py000066400000000000000000000016711314312670600441270ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/db/migration/alembic_migration/versions/newton/contract# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """initial Newton no op contract script Revision ID: 80c85b675b6e Revises: start_neutron_taas Create Date: 2016-05-06 04:58:04.510568 """ from neutron.db.migration import cli # revision identifiers, used by Alembic. revision = '80c85b675b6e' down_revision = 'start_neutron_taas' branch_labels = (cli.CONTRACT_BRANCH,) def upgrade(): pass tap-as-a-service-2.0.0/neutron_taas/db/migration/alembic_migration/versions/newton/expand/000077500000000000000000000000001314312670600317435ustar00rootroot0000000000000004625466c6fa_initial_newton_no_op_expand_script.py000066400000000000000000000016651314312670600431460ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/db/migration/alembic_migration/versions/newton/expand# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """initial Newton no op expand script Revision ID: 04625466c6fa Revises: start_neutron_taas Create Date: 2016-05-06 05:17:30.172181 """ from neutron.db.migration import cli # revision identifiers, used by Alembic. revision = '04625466c6fa' down_revision = 'start_neutron_taas' branch_labels = (cli.EXPAND_BRANCH,) def upgrade(): pass fddbdec8711a_add_status.py000066400000000000000000000026761314312670600365210ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/db/migration/alembic_migration/versions/newton/expand# Copyright 2016 FUJITSU LABORATORIES LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add status Revision ID: fddbdec8711a Revises: 04625466c6fa Create Date: 2016-06-06 10:54:42.252898 """ # revision identifiers, used by Alembic. revision = 'fddbdec8711a' down_revision = '04625466c6fa' from alembic import op from neutron.db import migration from neutron_lib import constants import sqlalchemy as sa # milestone identifier, used by neutron-db-manage neutron_milestone = [migration.NEWTON, migration.OCATA] def upgrade(): op.add_column('tap_services', sa.Column('status', sa.String(16), server_default=constants.ACTIVE, nullable=False)) op.add_column('tap_flows', sa.Column('status', sa.String(16), server_default=constants.ACTIVE, nullable=False)) tap-as-a-service-2.0.0/neutron_taas/db/migration/alembic_migration/versions/pike/000077500000000000000000000000001314312670600301025ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/db/migration/alembic_migration/versions/pike/contract/000077500000000000000000000000001314312670600317175ustar00rootroot00000000000000bac61f603e39_alter_tap_id_associations_to_support_tap_id_reuse.py000066400000000000000000000032611314312670600463370ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/db/migration/alembic_migration/versions/pike/contract# Copyright 2016-17 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Alter TapIdAssociations to support tap id reuse Revision ID: bac61f603e39 Revises: 4086b3cffc01 Create Date: 2016-07-27 09:31:54.200165 """ # revision identifiers, used by Alembic. revision = 'bac61f603e39' down_revision = '4086b3cffc01' from alembic import op from sqlalchemy.engine import reflection import sqlalchemy as sa TABLE_NAME = 'tap_id_associations' def upgrade(): inspector = reflection.Inspector.from_engine(op.get_bind()) fk_constraints = inspector.get_foreign_keys(TABLE_NAME) for fk in fk_constraints: op.drop_constraint(fk['name'], TABLE_NAME, type_='foreignkey') op.create_foreign_key('fk_tap_id_assoc_tap_service', TABLE_NAME, 'tap_services', ['tap_service_id'], ['id'], ondelete='SET NULL') op.alter_column(TABLE_NAME, 'taas_id', autoincrement=False, existing_type=sa.INTEGER, nullable=False) op.alter_column(TABLE_NAME, 'tap_service_id', existing_type=sa.String(36), nullable=True) op.create_unique_constraint('unique_taas_id', TABLE_NAME, ['taas_id']) tap-as-a-service-2.0.0/neutron_taas/db/migration/alembic_migration/versions/start_neutron_taas.py000066400000000000000000000015731314312670600334510ustar00rootroot00000000000000# Copyright 2015 Midokura SARL # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """start neutron-taas chain Revision ID: start_neutron_taas Revises: None Create Date: 2015-11-11 02:36:00.209301 """ # revision identifiers, used by Alembic. revision = 'start_neutron_taas' down_revision = None from neutron_taas.db.migration import taas_init_ops def upgrade(): taas_init_ops.upgrade() tap-as-a-service-2.0.0/neutron_taas/db/migration/taas_init_ops.py000066400000000000000000000041741314312670600250510ustar00rootroot00000000000000# Copyright 2015 Ericsson AB # Copyright (c) 2015 Gigamon # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Initial schema operations for Tap-as-a-Service service plugin from alembic import op import sqlalchemy as sa direction_types = sa.Enum('IN', 'OUT', 'BOTH', name='tapflows_direction') def upgrade(): op.create_table( 'tap_services', sa.Column('id', sa.String(length=36), primary_key=True, nullable=False), sa.Column('tenant_id', sa.String(length=255), nullable=True), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('description', sa.String(length=1024), nullable=True), sa.Column('port_id', sa.String(36), nullable=False), sa.Column('network_id', sa.String(36), nullable=True)) op.create_table( 'tap_flows', sa.Column('id', sa.String(length=36), primary_key=True, nullable=False), sa.Column('tenant_id', sa.String(length=255), nullable=True), sa.Column('name', sa.String(length=255), nullable=True), sa.Column('description', sa.String(length=1024), nullable=True), sa.Column('tap_service_id', sa.String(length=36), sa.ForeignKey("tap_services.id", ondelete="CASCADE"), nullable=False), sa.Column('source_port', sa.String(length=36), nullable=False), sa.Column('direction', direction_types, nullable=False)) op.create_table( 'tap_id_associations', sa.Column('tap_service_id', sa.String(length=36)), sa.Column('taas_id', sa.INTEGER, primary_key=True, autoincrement=True)) tap-as-a-service-2.0.0/neutron_taas/db/taas_db.py000066400000000000000000000257241314312670600216250ustar00rootroot00000000000000# Copyright (C) 2015 Ericsson AB # Copyright (c) 2015 Gigamon # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sa from sqlalchemy import orm from sqlalchemy.orm import exc from neutron.db import common_db_mixin as base_db from neutron_lib import constants from neutron_lib.db import model_base from neutron_lib.plugins import directory from neutron_taas.extensions import taas from oslo_config import cfg from oslo_log import log as logging from oslo_utils import uuidutils LOG = logging.getLogger(__name__) class TapService(model_base.BASEV2, model_base.HasId, model_base.HasProjectNoIndex): # Represents a V2 TapService Object __tablename__ = 'tap_services' name = sa.Column(sa.String(255), nullable=True) description = sa.Column(sa.String(1024), nullable=True) port_id = sa.Column(sa.String(36), nullable=False) status = sa.Column(sa.String(16), nullable=False, server_default=constants.ACTIVE) class TapFlow(model_base.BASEV2, model_base.HasId, model_base.HasProjectNoIndex): # Represents a V2 TapFlow Object __tablename__ = 'tap_flows' name = sa.Column(sa.String(255), nullable=True) description = sa.Column(sa.String(1024), nullable=True) tap_service_id = sa.Column(sa.String(36), sa.ForeignKey("tap_services.id", ondelete="CASCADE"), nullable=False) source_port = sa.Column(sa.String(36), nullable=False) direction = sa.Column(sa.Enum('IN', 'OUT', 'BOTH', name='tapflows_direction'), nullable=False) status = sa.Column(sa.String(16), nullable=False, server_default=constants.ACTIVE) class TapIdAssociation(model_base.BASEV2): # Internal mapping between a TAP Service and # id to be used by the Agents __tablename__ = 'tap_id_associations' tap_service_id = sa.Column(sa.String(36), sa.ForeignKey("tap_services.id", ondelete='SET NULL'), nullable=True) taas_id = sa.Column(sa.Integer, primary_key=True, unique=True) tap_service = orm.relationship( TapService, backref=orm.backref("tap_service_id", lazy="joined"), primaryjoin='TapService.id==TapIdAssociation.tap_service_id') class Taas_db_Mixin(taas.TaasPluginBase, base_db.CommonDbMixin): def _core_plugin(self): return directory.get_plugin() def _get_tap_service(self, context, id): try: return self._get_by_id(context, TapService, id) except exc.NoResultFound: raise taas.TapServiceNotFound(tap_id=id) def _get_tap_id_association(self, context, tap_service_id): try: query = self._model_query(context, TapIdAssociation) return query.filter(TapIdAssociation.tap_service_id == tap_service_id).one() except exc.NoResultFound: raise taas.TapServiceNotFound(tap_id=tap_service_id) def _get_tap_flow(self, context, id): try: return self._get_by_id(context, TapFlow, id) except Exception: raise taas.TapFlowNotFound(flow_id=id) def _make_tap_service_dict(self, tap_service, fields=None): res = {'id': tap_service['id'], 'tenant_id': tap_service['tenant_id'], 'name': tap_service['name'], 'description': tap_service['description'], 'port_id': tap_service['port_id'], 'status': tap_service['status']} return self._fields(res, fields) def _make_tap_id_association_dict(self, tap_id_association): res = {'tap_service_id': tap_id_association['tap_service_id'], 'taas_id': tap_id_association['taas_id']} return res def _make_tap_flow_dict(self, tap_flow, fields=None): res = {'id': tap_flow['id'], 'tenant_id': tap_flow['tenant_id'], 'tap_service_id': tap_flow['tap_service_id'], 'name': tap_flow['name'], 'description': tap_flow['description'], 'source_port': tap_flow['source_port'], 'direction': tap_flow['direction'], 'status': tap_flow['status']} return self._fields(res, fields) def create_tap_service(self, context, tap_service): LOG.debug("create_tap_service() called") t_s = tap_service['tap_service'] tenant_id = t_s['tenant_id'] with context.session.begin(subtransactions=True): tap_service_db = TapService( id=uuidutils.generate_uuid(), tenant_id=tenant_id, name=t_s['name'], description=t_s['description'], port_id=t_s['port_id'], status=constants.ACTIVE, ) context.session.add(tap_service_db) return self._make_tap_service_dict(tap_service_db) def _rebuild_taas_id_allocation_range(self, context): query = context.session.query( TapIdAssociation).all() allocate_taas_id_list = [_q.taas_id for _q in query] first_taas_id = cfg.CONF.taas.vlan_range_start # Exclude range end last_taas_id = cfg.CONF.taas.vlan_range_end all_taas_id_set = set(range(first_taas_id, last_taas_id)) vaild_taas_id_set = all_taas_id_set - set(allocate_taas_id_list) for _id in vaild_taas_id_set: # new taas id context.session.add(TapIdAssociation( taas_id=_id)) def _allocate_taas_id_with_tap_service_id(self, context, tap_service_id): query = context.session.query(TapIdAssociation).filter_by( tap_service_id=None).first() if not query: self._rebuild_taas_id_allocation_range(context) # try again query = context.session.query(TapIdAssociation).filter_by( tap_service_id=None).first() if query: query.update({"tap_service_id": tap_service_id}) return query # not found raise taas.TapServiceLimitReached() def create_tap_id_association(self, context, tap_service_id): LOG.debug("create_tap_id_association() called") # create the TapIdAssociation object with context.session.begin(subtransactions=True): # allocate Taas id. # if conflict happened, it will raise db.DBDuplicateEntry. # this will be retry request again in neutron controller framework. # so we just make sure TapIdAssociation field taas_id is unique tap_id_association_db = self._allocate_taas_id_with_tap_service_id( context, tap_service_id) return self._make_tap_id_association_dict(tap_id_association_db) def create_tap_flow(self, context, tap_flow): LOG.debug("create_tap_flow() called") t_f = tap_flow['tap_flow'] tenant_id = t_f['tenant_id'] # TODO(Vinay): Check for the tenant_id validation # TODO(Vinay): Check for the source port validation with context.session.begin(subtransactions=True): tap_flow_db = TapFlow( id=uuidutils.generate_uuid(), tenant_id=tenant_id, name=t_f['name'], description=t_f['description'], tap_service_id=t_f['tap_service_id'], source_port=t_f['source_port'], direction=t_f['direction'], status=constants.ACTIVE, ) context.session.add(tap_flow_db) return self._make_tap_flow_dict(tap_flow_db) def delete_tap_service(self, context, id): LOG.debug("delete_tap_service() called") count = context.session.query(TapService).filter_by(id=id).delete() if not count: raise taas.TapServiceNotFound(tap_id=id) def delete_tap_flow(self, context, id): LOG.debug("delete_tap_flow() called") count = context.session.query(TapFlow).filter_by(id=id).delete() if not count: raise taas.TapFlowNotFound(flow_id=id) def get_tap_service(self, context, id, fields=None): LOG.debug("get_tap_service() called") t_s = self._get_tap_service(context, id) return self._make_tap_service_dict(t_s, fields) def get_tap_id_association(self, context, tap_service_id): LOG.debug("get_tap_id_association() called") t_a = self._get_tap_id_association(context, tap_service_id) return self._make_tap_id_association_dict(t_a) def get_tap_flow(self, context, id, fields=None): LOG.debug("get_tap_flow() called") t_f = self._get_tap_flow(context, id) return self._make_tap_flow_dict(t_f, fields) def get_tap_services(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): LOG.debug("get_tap_services() called") return self._get_collection(context, TapService, self._make_tap_service_dict, filters=filters, fields=fields) def get_tap_flows(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): LOG.debug("get_tap_flows() called") return self._get_collection(context, TapFlow, self._make_tap_flow_dict, filters=filters, fields=fields) def _get_port_details(self, context, port_id): with context.session.begin(subtransactions=True): port = self._core_plugin().get_port(context, port_id) return port def update_tap_service(self, context, id, tap_service): LOG.debug("update_tap_service() called") t_s = tap_service['tap_service'] with context.session.begin(subtransactions=True): tap_service_db = self._get_tap_service(context, id) tap_service_db.update(t_s) return self._make_tap_service_dict(tap_service_db) def update_tap_flow(self, context, id, tap_flow): LOG.debug("update_tap_flow() called") t_f = tap_flow['tap_flow'] with context.session.begin(subtransactions=True): tap_flow_db = self._get_tap_flow(context, id) tap_flow_db.update(t_f) return self._make_tap_flow_dict(tap_flow_db) tap-as-a-service-2.0.0/neutron_taas/extensions/000077500000000000000000000000001314312670600214565ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/extensions/__init__.py000066400000000000000000000000001314312670600235550ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/extensions/taas.py000066400000000000000000000203261314312670600227630ustar00rootroot00000000000000# Copyright (C) 2015 Ericsson AB # Copyright (c) 2015 Gigamon # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from neutron_lib.api import extensions from neutron_lib import exceptions as qexception from neutron_lib.services import base as service_base from neutron.api.v2 import resource_helper from neutron_taas._i18n import _ from neutron_taas.common import constants from oslo_config import cfg import six # TaaS exception handling classes class TapServiceNotFound(qexception.NotFound): message = _("Tap Service %(tap_id)s does not exist") class TapFlowNotFound(qexception.NotFound): message = _("Tap Flow %(flow_id)s does not exist") class InvalidDestinationPort(qexception.NotFound): message = _("Destination Port %(port)s does not exist") class InvalidSourcePort(qexception.NotFound): message = _("Source Port %(port)s does not exist") class PortDoesNotBelongToTenant(qexception.NotAuthorized): message = _("The specified port does not belong to the tenant") class TapServiceNotBelongToTenant(qexception.NotAuthorized): message = _("Specified Tap Service does not belong to the tenant") class TapServiceLimitReached(qexception.OverQuota): message = _("Reached the maximum quota for Tap Services") direction_enum = ['IN', 'OUT', 'BOTH'] ''' Resource Attribute Map: Note: 'tap_services' data model refers to the Tap Service created. port_id specifies destination port to which the mirrored data is sent. ''' RESOURCE_ATTRIBUTE_MAP = { 'tap_services': { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:string': None}, 'required_by_policy': True, 'is_visible': True}, 'name': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'is_visible': True, 'default': ''}, 'description': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'is_visible': True, 'default': ''}, 'port_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True}, 'status': {'allow_post': False, 'allow_put': False, 'is_visible': True} }, 'tap_flows': { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:string': None}, 'required_by_policy': True, 'is_visible': True}, 'name': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'is_visible': True, 'default': ''}, 'description': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'is_visible': True, 'default': ''}, 'tap_service_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:uuid': None}, 'required_by_policy': True, 'is_visible': True}, 'source_port': {'allow_post': True, 'allow_put': False, 'validate': {'type:uuid': None}, 'required_by_policy': True, 'is_visible': True}, 'direction': {'allow_post': True, 'allow_put': False, 'validate': {'type:values': direction_enum}, 'is_visible': True}, 'status': {'allow_post': False, 'allow_put': False, 'is_visible': True} } } taas_quota_opts = [ cfg.IntOpt('quota_tap_service', default=1, help=_('Number of Tap Service instances allowed per tenant')), cfg.IntOpt('quota_tap_flow', default=10, help=_('Number of Tap flows allowed per tenant')) ] cfg.CONF.register_opts(taas_quota_opts, 'QUOTAS') TaasOpts = [ cfg.StrOpt( 'driver', default='', help=_("Name of the TaaS Driver")), cfg.BoolOpt( 'enabled', default=False, help=_("Enable TaaS")), cfg.IntOpt( 'vlan_range_start', default=3900, help=_("Starting range of TAAS VLAN IDs")), cfg.IntOpt( 'vlan_range_end', default=4000, help=_("End range of TAAS VLAN IDs")), ] cfg.CONF.register_opts(TaasOpts, 'taas') class Taas(extensions.ExtensionDescriptor): @classmethod def get_name(cls): return "Neutron Tap as a Service" @classmethod def get_alias(cls): return "taas" @classmethod def get_description(cls): return "Neutron Tap as a Service Extension." @classmethod def get_namespace(cls): return "http://wiki.openstack.org/wiki/Neutron/Taas/#API" @classmethod def get_updated(cls): return "2015-01-14T10:00:00-00:00" @classmethod def get_plugin_interface(cls): return TaasPluginBase @classmethod def get_resources(cls): """Returns Ext Resources.""" plural_mappings = resource_helper.build_plural_mappings( {}, RESOURCE_ATTRIBUTE_MAP) return resource_helper.build_resource_info(plural_mappings, RESOURCE_ATTRIBUTE_MAP, constants.TAAS, translate_name=False, allow_bulk=True) def update_attributes_map(self, attributes): super(Taas, self).update_attributes_map( attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP) def get_extended_resources(self, version): if version == "2.0": return RESOURCE_ATTRIBUTE_MAP else: return {} @six.add_metaclass(abc.ABCMeta) class TaasPluginBase(service_base.ServicePluginBase): def get_plugin_name(self): return constants.TAAS def get_plugin_description(self): return "Tap Service Plugin" @classmethod def get_plugin_type(cls): return constants.TAAS @abc.abstractmethod def create_tap_service(self, context, tap_service): """Create a Tap Service.""" pass @abc.abstractmethod def delete_tap_service(self, context, id): """Delete a Tap Service.""" pass @abc.abstractmethod def get_tap_service(self, context, id, fields=None): """Get a Tap Service.""" pass @abc.abstractmethod def get_tap_services(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): """List all Tap Services.""" pass @abc.abstractmethod def update_tap_service(self, context, id, tap_service): """Update a Tap Service.""" pass @abc.abstractmethod def create_tap_flow(self, context, tap_flow): """Create a Tap Flow.""" pass @abc.abstractmethod def get_tap_flow(self, context, id, fields=None): """Get a Tap Flow.""" pass @abc.abstractmethod def delete_tap_flow(self, context, id): """Delete a Tap Flow.""" pass @abc.abstractmethod def get_tap_flows(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): """List all Tap Flows.""" pass @abc.abstractmethod def update_tap_flow(self, context, id, tap_flow): """Update a Tap Flow.""" pass tap-as-a-service-2.0.0/neutron_taas/services/000077500000000000000000000000001314312670600211025ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/services/__init__.py000066400000000000000000000000001314312670600232010ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/services/taas/000077500000000000000000000000001314312670600220325ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/services/taas/__init__.py000066400000000000000000000000001314312670600241310ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/services/taas/agents/000077500000000000000000000000001314312670600233135ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/services/taas/agents/__init__.py000066400000000000000000000000001314312670600254120ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/services/taas/agents/extensions/000077500000000000000000000000001314312670600255125ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/services/taas/agents/extensions/__init__.py000066400000000000000000000000001314312670600276110ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/services/taas/agents/extensions/taas.py000066400000000000000000000052101314312670600270120ustar00rootroot00000000000000# Copyright 2017 FUJITSU LABORATORIES LTD. # Copyright 2016 NEC Technologies India Pvt. Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import six from neutron.agent.l2 import l2_agent_extension from neutron_taas.services.taas.agents.ovs import taas_ovs_agent from oslo_config import cfg from oslo_log import log as logging LOG = logging.getLogger(__name__) OPTS = [ cfg.IntOpt( 'taas_agent_periodic_interval', default=5, help=_('Seconds between periodic task runs') ) ] cfg.CONF.register_opts(OPTS) @six.add_metaclass(abc.ABCMeta) class TaasAgentDriver(object): """Defines stable abstract interface for TaaS Agent Driver.""" @abc.abstractmethod def initialize(self): """Perform Taas agent driver initialization.""" def consume_api(self, agent_api): """Consume the AgentAPI instance from the TaasAgentExtension class :param agent_api: An instance of an agent specific API """ @abc.abstractmethod def create_tap_service(self, tap_service): """Create a Tap Service request in driver.""" @abc.abstractmethod def create_tap_flow(self, tap_flow): """Create a tap flow request in driver.""" @abc.abstractmethod def delete_tap_service(self, tap_service): """delete a Tap Service request in driver.""" @abc.abstractmethod def delete_tap_flow(self, tap_flow): """Delete a tap flow request in driver.""" class TaasAgentExtension(l2_agent_extension.L2AgentExtension): def initialize(self, connection, driver_type): """Initialize agent extension.""" self.taas_agent = taas_ovs_agent.TaasOvsAgentRpcCallback( cfg.CONF, driver_type) self.taas_agent.consume_api(self.agent_api) self.taas_agent.initialize() def consume_api(self, agent_api): """Receive neutron agent API object Allows an extension to gain access to resources internal to the neutron agent and otherwise unavailable to the extension. """ self.agent_api = agent_api def handle_port(self, context, port): pass def delete_port(self, context, port): pass tap-as-a-service-2.0.0/neutron_taas/services/taas/agents/ovs/000077500000000000000000000000001314312670600241225ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/services/taas/agents/ovs/__init__.py000066400000000000000000000000001314312670600262210ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/services/taas/agents/ovs/taas_ovs_agent.py000066400000000000000000000110371314312670600274730ustar00rootroot00000000000000# Copyright (C) 2015 Ericsson AB # Copyright (c) 2015 Gigamon # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.common import rpc as n_rpc from neutron import manager from neutron_taas.common import topics from neutron_taas.services.taas.agents import taas_agent_api as api from oslo_config import cfg from oslo_log import log as logging from oslo_service import service LOG = logging.getLogger(__name__) class TaasOvsPluginApi(api.TaasPluginApiMixin): # Currently there are not any APIs from the the agent towards plugin def __init__(self, topic, host): super(TaasOvsPluginApi, self).__init__(topic, host) return class TaasOvsAgentRpcCallback(api.TaasAgentRpcCallbackMixin): def __init__(self, conf, driver_type): LOG.debug("TaaS OVS Agent initialize called") self.conf = conf self.driver_type = driver_type super(TaasOvsAgentRpcCallback, self).__init__() def initialize(self): self.taas_driver = manager.NeutronManager.load_class_for_provider( 'neutron_taas.taas.agent_drivers', self.driver_type)() self.taas_driver.consume_api(self.agent_api) self.taas_driver.initialize() self._taas_rpc_setup() TaasOvsAgentService(self).start() def consume_api(self, agent_api): self.agent_api = agent_api def _invoke_driver_for_plugin_api(self, context, args, func_name): LOG.debug("Invoking Driver for %(func_name)s from agent", {'func_name': func_name}) try: self.taas_driver.__getattribute__(func_name)(args) except Exception: LOG.debug("Failed to invoke the driver") return def create_tap_service(self, context, tap_service, host): """Handle Rpc from plugin to create a firewall.""" if host != self.conf.host: return LOG.debug("In RPC Call for Create Tap Service: MSG=%s" % tap_service) return self._invoke_driver_for_plugin_api( context, tap_service, 'create_tap_service') def create_tap_flow(self, context, tap_flow_msg, host): if host != self.conf.host: return LOG.debug("In RPC Call for Create Tap Flow: MSG=%s" % tap_flow_msg) return self._invoke_driver_for_plugin_api( context, tap_flow_msg, 'create_tap_flow') def delete_tap_service(self, context, tap_service, host): # # Cleanup operations must be performed by all hosts # where the source and/or destination ports associated # with this tap service were residing. # LOG.debug("In RPC Call for Delete Tap Service: MSG=%s" % tap_service) return self._invoke_driver_for_plugin_api( context, tap_service, 'delete_tap_service') def delete_tap_flow(self, context, tap_flow_msg, host): if host != self.conf.host: return LOG.debug("In RPC Call for Delete Tap Flow: MSG=%s" % tap_flow_msg) return self._invoke_driver_for_plugin_api( context, tap_flow_msg, 'delete_tap_flow') def _taas_rpc_setup(self): # setup RPC to msg taas plugin self.taas_plugin_rpc = TaasOvsPluginApi( topics.TAAS_PLUGIN, self.conf.host) endpoints = [self] conn = n_rpc.create_connection() conn.create_consumer(topics.TAAS_AGENT, endpoints, fanout=False) conn.consume_in_threads() def periodic_tasks(self): # # Regenerate the flow in br-tun's TAAS_SEND_FLOOD table # to ensure all existing tunnel ports are included. # self.taas_driver.update_tunnel_flood_flow() class TaasOvsAgentService(service.Service): def __init__(self, driver): super(TaasOvsAgentService, self).__init__() self.driver = driver def start(self): super(TaasOvsAgentService, self).start() self.tg.add_timer( int(cfg.CONF.taas_agent_periodic_interval), self.driver.periodic_tasks, None ) tap-as-a-service-2.0.0/neutron_taas/services/taas/agents/taas_agent_api.py000066400000000000000000000044411314312670600266270ustar00rootroot00000000000000# Copyright (C) 2015 Ericsson AB # Copyright (c) 2015 Gigamon # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_taas._i18n import _ from oslo_config import cfg import oslo_messaging as messaging from neutron.common import rpc as n_rpc TaasOpts = [ cfg.StrOpt( 'driver', default='', help=_("Name of the TaaS Driver")), cfg.BoolOpt( 'enabled', default=False, help=_("Enable TaaS")), ] cfg.CONF.register_opts(TaasOpts, 'taas') class TaasPluginApiMixin(object): # Currently there are no Calls the Agent makes towards the Plugin. def __init__(self, topic, host): self.host = host target = messaging.Target(topic=topic, version='1.0') self.client = n_rpc.get_client(target) super(TaasPluginApiMixin, self).__init__() return class TaasAgentRpcCallbackMixin(object): """Mixin for Taas agent Implementations.""" def __init__(self): super(TaasAgentRpcCallbackMixin, self).__init__() def consume_api(self, agent_api): """Receive neutron agent API object Allows an extension to gain access to resources internal to the neutron agent and otherwise unavailable to the extension. """ self.agent_api = agent_api def create_tap_service(self, context, tap_service, host): """Handle RPC cast from plugin to create a tap service.""" pass def delete_tap_service(self, context, tap_service, host): """Handle RPC cast from plugin to delete a tap service.""" pass def create_tap_flow(self, context, tap_flow_msg, host): """Handle RPC cast from plugin to create a tap flow""" pass def delete_tap_flow(self, context, tap_flow_msg, host): """Handle RPC cast from plugin to delete a tap flow""" pass tap-as-a-service-2.0.0/neutron_taas/services/taas/drivers/000077500000000000000000000000001314312670600235105ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/services/taas/drivers/__init__.py000066400000000000000000000000001314312670600256070ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/services/taas/drivers/linux/000077500000000000000000000000001314312670600246475ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/services/taas/drivers/linux/__init__.py000066400000000000000000000000001314312670600267460ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/services/taas/drivers/linux/ovs_constants.py000066400000000000000000000015341314312670600301270ustar00rootroot00000000000000# Copyright (C) 2015 Ericsson AB # Copyright (c) 2015 Gigamon # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # OVS tables used by TaaS in br-tap TAAS_RECV_LOC = 1 TAAS_RECV_REM = 2 # OVS tables used by TaaS in br-tun TAAS_SEND_UCAST = 30 TAAS_SEND_FLOOD = 31 TAAS_CLASSIFY = 35 TAAS_DST_CHECK = 36 TAAS_SRC_CHECK = 37 TAAS_DST_RESPOND = 38 TAAS_SRC_RESPOND = 39 tap-as-a-service-2.0.0/neutron_taas/services/taas/drivers/linux/ovs_taas.py000066400000000000000000000504611314312670600270460ustar00rootroot00000000000000# Copyright (C) 2015 Ericsson AB # Copyright (c) 2015 Gigamon # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.agent.common import ovs_lib from neutron.agent.linux import utils from neutron.conf.agent import common # from neutron.plugins.openvswitch.common import constants as ovs_consts from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants \ as ovs_consts from neutron_taas.services.taas.agents.extensions import taas as taas_base from oslo_config import cfg from oslo_log import log as logging import ovs_constants as taas_ovs_consts import ovs_utils as taas_ovs_utils LOG = logging.getLogger(__name__) TaaS_DRIVER_NAME = 'Taas OVS driver' class OVSBridge_tap_extension(ovs_lib.OVSBridge): def __init__(self, br_name, root_helper): super(OVSBridge_tap_extension, self).__init__(br_name) class OvsTaasDriver(taas_base.TaasAgentDriver): def __init__(self): super(OvsTaasDriver, self).__init__() LOG.debug("Initializing Taas OVS Driver") self.agent_api = None self.root_helper = common.get_root_helper(cfg.CONF) def initialize(self): self.int_br = self.agent_api.request_int_br() self.tun_br = self.agent_api.request_tun_br() self.tap_br = OVSBridge_tap_extension('br-tap', self.root_helper) # Prepare OVS bridges for TaaS self.setup_ovs_bridges() # Setup key-value manager for ingress BCMC flows self.bcmc_kvm = taas_ovs_utils.key_value_mgr(4096) def setup_ovs_bridges(self): # # br-int : Integration Bridge # br-tap : Tap Bridge # br-tun : Tunnel Bridge # # Create br-tap self.tap_br.create() # Connect br-tap to br-int and br-tun self.int_br.add_patch_port('patch-int-tap', 'patch-tap-int') self.tap_br.add_patch_port('patch-tap-int', 'patch-int-tap') self.tun_br.add_patch_port('patch-tun-tap', 'patch-tap-tun') self.tap_br.add_patch_port('patch-tap-tun', 'patch-tun-tap') # Get patch port IDs patch_tap_int_id = self.tap_br.get_port_ofport('patch-tap-int') patch_tap_tun_id = self.tap_br.get_port_ofport('patch-tap-tun') patch_tun_tap_id = self.tun_br.get_port_ofport('patch-tun-tap') # Purge all existing Taas flows from br-tap and br-tun self.tap_br.delete_flows(table=0) self.tap_br.delete_flows(table=taas_ovs_consts.TAAS_RECV_LOC) self.tap_br.delete_flows(table=taas_ovs_consts.TAAS_RECV_REM) self.tun_br.delete_flows(table=0, in_port=patch_tun_tap_id) self.tun_br.delete_flows(table=taas_ovs_consts.TAAS_SEND_UCAST) self.tun_br.delete_flows(table=taas_ovs_consts.TAAS_SEND_FLOOD) self.tun_br.delete_flows(table=taas_ovs_consts.TAAS_CLASSIFY) self.tun_br.delete_flows(table=taas_ovs_consts.TAAS_DST_CHECK) self.tun_br.delete_flows(table=taas_ovs_consts.TAAS_SRC_CHECK) self.tun_br.delete_flows(table=taas_ovs_consts.TAAS_DST_RESPOND) self.tun_br.delete_flows(table=taas_ovs_consts.TAAS_SRC_RESPOND) # # Configure standard TaaS flows in br-tap # self.tap_br.add_flow(table=0, priority=1, in_port=patch_tap_int_id, actions="resubmit(,%s)" % taas_ovs_consts.TAAS_RECV_LOC) self.tap_br.add_flow(table=0, priority=1, in_port=patch_tap_tun_id, actions="resubmit(,%s)" % taas_ovs_consts.TAAS_RECV_REM) self.tap_br.add_flow(table=0, priority=0, actions="drop") self.tap_br.add_flow(table=taas_ovs_consts.TAAS_RECV_LOC, priority=0, actions="output:%s" % str(patch_tap_tun_id)) self.tap_br.add_flow(table=taas_ovs_consts.TAAS_RECV_REM, priority=0, actions="drop") # # Configure standard Taas flows in br-tun # self.tun_br.add_flow(table=0, priority=1, in_port=patch_tun_tap_id, actions="resubmit(,%s)" % taas_ovs_consts.TAAS_SEND_UCAST) self.tun_br.add_flow(table=taas_ovs_consts.TAAS_SEND_UCAST, priority=0, actions="resubmit(,%s)" % taas_ovs_consts.TAAS_SEND_FLOOD) flow_action = self._create_tunnel_flood_flow_action() if flow_action != "": self.tun_br.add_flow(table=taas_ovs_consts.TAAS_SEND_FLOOD, priority=0, actions=flow_action) self.tun_br.add_flow(table=taas_ovs_consts.TAAS_CLASSIFY, priority=2, reg0=0, actions="resubmit(,%s)" % taas_ovs_consts.TAAS_DST_CHECK) self.tun_br.add_flow(table=taas_ovs_consts.TAAS_CLASSIFY, priority=1, reg0=1, actions="resubmit(,%s)" % taas_ovs_consts.TAAS_DST_CHECK) self.tun_br.add_flow(table=taas_ovs_consts.TAAS_CLASSIFY, priority=1, reg0=2, actions="resubmit(,%s)" % taas_ovs_consts.TAAS_SRC_CHECK) self.tun_br.add_flow(table=taas_ovs_consts.TAAS_DST_CHECK, priority=0, actions="drop") self.tun_br.add_flow(table=taas_ovs_consts.TAAS_SRC_CHECK, priority=0, actions="drop") self.tun_br.add_flow(table=taas_ovs_consts.TAAS_DST_RESPOND, priority=2, reg0=0, actions="output:%s" % str(patch_tun_tap_id)) self.tun_br.add_flow(table=taas_ovs_consts.TAAS_DST_RESPOND, priority=1, reg0=1, actions=( "output:%s," "move:NXM_OF_VLAN_TCI[0..11]->NXM_NX_TUN_ID" "[0..11],mod_vlan_vid:2,output:in_port" % str(patch_tun_tap_id))) self.tun_br.add_flow(table=taas_ovs_consts.TAAS_SRC_RESPOND, priority=1, actions=( "learn(table=%s,hard_timeout=60," "priority=1,NXM_OF_VLAN_TCI[0..11]," "load:NXM_OF_VLAN_TCI[0..11]->NXM_NX_TUN_ID" "[0..11],load:0->NXM_OF_VLAN_TCI[0..11]," "output:NXM_OF_IN_PORT[])" % taas_ovs_consts.TAAS_SEND_UCAST)) return def consume_api(self, agent_api): self.agent_api = agent_api def create_tap_service(self, tap_service): taas_id = tap_service['taas_id'] port = tap_service['port'] # Get OVS port id for tap service port ovs_port = self.int_br.get_vif_port_by_id(port['id']) ovs_port_id = ovs_port.ofport # Get VLAN id for tap service port port_dict = self.int_br.get_port_tag_dict() port_vlan_id = port_dict[ovs_port.port_name] # Get patch port IDs patch_int_tap_id = self.int_br.get_port_ofport('patch-int-tap') patch_tap_int_id = self.tap_br.get_port_ofport('patch-tap-int') # Add flow(s) in br-int self.int_br.add_flow(table=0, priority=25, in_port=patch_int_tap_id, dl_vlan=taas_id, actions="mod_vlan_vid:%s,output:%s" % (str(port_vlan_id), str(ovs_port_id))) # Add flow(s) in br-tap self.tap_br.add_flow(table=taas_ovs_consts.TAAS_RECV_LOC, priority=1, dl_vlan=taas_id, actions="output:in_port") self.tap_br.add_flow(table=taas_ovs_consts.TAAS_RECV_REM, priority=1, dl_vlan=taas_id, actions="output:%s" % str(patch_tap_int_id)) # Add flow(s) in br-tun for tunnel_type in ovs_consts.TUNNEL_NETWORK_TYPES: self.tun_br.add_flow(table=ovs_consts.TUN_TABLE[tunnel_type], priority=1, tun_id=taas_id, actions=( "move:NXM_OF_VLAN_TCI[0..11]->" "NXM_NX_REG0[0..11],move:NXM_NX_TUN_ID" "[0..11]->NXM_OF_VLAN_TCI[0..11]," "resubmit(,%s)" % taas_ovs_consts.TAAS_CLASSIFY)) self.tun_br.add_flow(table=taas_ovs_consts.TAAS_DST_CHECK, priority=1, tun_id=taas_id, actions="resubmit(,%s)" % taas_ovs_consts.TAAS_DST_RESPOND) # # Disable mac-address learning in the Linux bridge to which # the OVS port is attached (via the veth pair). This will # effectively turn the bridge into a hub, ensuring that all # incoming mirrored traffic reaches the tap interface (used # for attaching a VM to the bridge) irrespective of the # destination mac addresses in mirrored packets. # ovs_port_name = ovs_port.port_name linux_br_name = ovs_port_name.replace('qvo', 'qbr') utils.execute(['brctl', 'setageing', linux_br_name, 0], run_as_root=True) return def delete_tap_service(self, tap_service): taas_id = tap_service['taas_id'] # Get patch port ID patch_int_tap_id = self.int_br.get_port_ofport('patch-int-tap') # Delete flow(s) from br-int self.int_br.delete_flows(table=0, in_port=patch_int_tap_id, dl_vlan=taas_id) # Delete flow(s) from br-tap self.tap_br.delete_flows(table=taas_ovs_consts.TAAS_RECV_LOC, dl_vlan=taas_id) self.tap_br.delete_flows(table=taas_ovs_consts.TAAS_RECV_REM, dl_vlan=taas_id) # Delete flow(s) from br-tun for tunnel_type in ovs_consts.TUNNEL_NETWORK_TYPES: self.tun_br.delete_flows(table=ovs_consts.TUN_TABLE[tunnel_type], tun_id=taas_id) self.tun_br.delete_flows(table=taas_ovs_consts.TAAS_DST_CHECK, tun_id=taas_id) self.tun_br.delete_flows(table=taas_ovs_consts.TAAS_SRC_CHECK, tun_id=taas_id) return def create_tap_flow(self, tap_flow): taas_id = tap_flow['taas_id'] port = tap_flow['port'] direction = tap_flow['tap_flow']['direction'] # Get OVS port id for tap flow port ovs_port = self.int_br.get_vif_port_by_id(port['id']) ovs_port_id = ovs_port.ofport # Get patch port ID patch_int_tap_id = self.int_br.get_port_ofport('patch-int-tap') # Add flow(s) in br-int if direction == 'OUT' or direction == 'BOTH': self.int_br.add_flow(table=0, priority=20, in_port=ovs_port_id, actions="normal,mod_vlan_vid:%s,output:%s" % (str(taas_id), str(patch_int_tap_id))) if direction == 'IN' or direction == 'BOTH': port_mac = tap_flow['port_mac'] # # Note: The ingress side flow (for unicast traffic) should # include a check for the 'VLAN id of the Neutron # network the port belongs to' + 'MAC address of the # port', to comply with the requirement that port MAC # addresses are unique only within a Neutron network. # Unfortunately, at the moment there is no clean way # to implement such a check, given OVS's handling of # VLAN tags and Neutron's use of the NORMAL action in # br-int. # # We are therefore temporarily disabling the VLAN id # check until a mechanism is available to implement # it correctly. The {broad,multi}cast flow, which is # also dependent on the VLAN id, has been disabled # for the same reason. # # Get VLAN id for tap flow port # port_dict = self.int_br.get_port_tag_dict() # port_vlan_id = port_dict[ovs_port.port_name] self.int_br.add_flow(table=0, priority=20, # dl_vlan=port_vlan_id, dl_dst=port_mac, actions="normal,mod_vlan_vid:%s,output:%s" % (str(taas_id), str(patch_int_tap_id))) # self._add_update_ingress_bcmc_flow(port_vlan_id, # taas_id, # patch_int_tap_id) # Add flow(s) in br-tun for tunnel_type in ovs_consts.TUNNEL_NETWORK_TYPES: self.tun_br.add_flow(table=ovs_consts.TUN_TABLE[tunnel_type], priority=1, tun_id=taas_id, actions=( "move:NXM_OF_VLAN_TCI[0..11]->" "NXM_NX_REG0[0..11],move:NXM_NX_TUN_ID" "[0..11]->NXM_OF_VLAN_TCI[0..11]," "resubmit(,%s)" % taas_ovs_consts.TAAS_CLASSIFY)) self.tun_br.add_flow(table=taas_ovs_consts.TAAS_SRC_CHECK, priority=1, tun_id=taas_id, actions="resubmit(,%s)" % taas_ovs_consts.TAAS_SRC_RESPOND) return def delete_tap_flow(self, tap_flow): port = tap_flow['port'] direction = tap_flow['tap_flow']['direction'] # Get OVS port id for tap flow port ovs_port = self.int_br.get_vif_port_by_id(port['id']) ovs_port_id = ovs_port.ofport # Delete flow(s) from br-int if direction == 'OUT' or direction == 'BOTH': self.int_br.delete_flows(table=0, in_port=ovs_port_id) if direction == 'IN' or direction == 'BOTH': port_mac = tap_flow['port_mac'] # # The VLAN id related checks have been temporarily disabled. # Please see comment in create_tap_flow() for details. # # taas_id = tap_flow['taas_id'] # Get VLAN id for tap flow port # port_dict = self.int_br.get_port_tag_dict() # port_vlan_id = port_dict[ovs_port.port_name] # Get patch port ID # patch_int_tap_id = self.int_br.get_port_ofport('patch-int-tap') self.int_br.delete_flows(table=0, # dl_vlan=port_vlan_id, dl_dst=port_mac) # self._del_update_ingress_bcmc_flow(port_vlan_id, # taas_id, # patch_int_tap_id) return def update_tunnel_flood_flow(self): flow_action = self._create_tunnel_flood_flow_action() if flow_action != "": self.tun_br.mod_flow(table=taas_ovs_consts.TAAS_SEND_FLOOD, actions=flow_action) def _create_tunnel_flood_flow_action(self): args = ["ovs-vsctl", "list-ports", "br-tun"] res = utils.execute(args, run_as_root=True) port_name_list = res.splitlines() flow_action = ("move:NXM_OF_VLAN_TCI[0..11]->NXM_NX_TUN_ID[0..11]," "mod_vlan_vid:1") tunnel_ports_exist = False for port_name in port_name_list: if (port_name != 'patch-int') and (port_name != 'patch-tun-tap'): flow_action += (",output:%d" % self.tun_br.get_port_ofport(port_name)) tunnel_ports_exist = True if tunnel_ports_exist: return flow_action else: return "" def _create_ingress_bcmc_flow_action(self, taas_id_list, out_port_id): flow_action = "normal" for taas_id in taas_id_list: flow_action += (",mod_vlan_vid:%d,output:%d" % (taas_id, out_port_id)) return flow_action # # Adds or updates a special flow in br-int to mirror (duplicate and # redirect to 'out_port_id') all ingress broadcast/multicast traffic, # associated with a VLAN, to possibly multiple tap service instances. # def _add_update_ingress_bcmc_flow(self, vlan_id, taas_id, out_port_id): # Add a tap service instance affiliation with VLAN self.bcmc_kvm.affiliate(vlan_id, taas_id) # Find all tap service instances affiliated with VLAN taas_id_list = self.bcmc_kvm.list_affiliations(vlan_id) # # Add/update flow to mirror ingress BCMC traffic, associated # with VLAN, to all affiliated tap-service instances. # flow_action = self._create_ingress_bcmc_flow_action(taas_id_list, out_port_id) self.int_br.add_flow(table=0, priority=20, dl_vlan=vlan_id, dl_dst="01:00:00:00:00:00/01:00:00:00:00:00", actions=flow_action) return # # Removes or updates a special flow in br-int to mirror (duplicate # and redirect to 'out_port_id') all ingress broadcast/multicast # traffic, associated with a VLAN, to possibly multiple tap-service # instances. # def _del_update_ingress_bcmc_flow(self, vlan_id, taas_id, out_port_id): # Remove a tap-service instance affiliation with VLAN self.bcmc_kvm.unaffiliate(vlan_id, taas_id) # Find all tap-service instances affiliated with VLAN taas_id_list = self.bcmc_kvm.list_affiliations(vlan_id) # # If there are tap service instances affiliated with VLAN, update # the flow to mirror ingress BCMC traffic, associated with VLAN, # to all of them. Otherwise, remove the flow. # if taas_id_list: flow_action = self._create_ingress_bcmc_flow_action(taas_id_list, out_port_id) self.int_br.add_flow(table=0, priority=20, dl_vlan=vlan_id, dl_dst="01:00:00:00:00:00/01:00:00:00:00:00", actions=flow_action) else: self.int_br.delete_flows(table=0, dl_vlan=vlan_id, dl_dst=("01:00:00:00:00:00/" "01:00:00:00:00:00")) return tap-as-a-service-2.0.0/neutron_taas/services/taas/drivers/linux/ovs_utils.py000066400000000000000000000052461314312670600272570ustar00rootroot00000000000000# Copyright (C) 2015 Ericsson AB # Copyright (c) 2015 Gigamon # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # This class implements a simple key-value manager that can support # the following relationships. # # - Multiple values may be affiliated with a key. # - A value may be affiliated with multiple keys. # - A value may be affiliated with a key multiple times. # class key_value_mgr(object): # # Initializes internal state for specified # keys # def __init__(self, nr_keys): self.key_list = [] for i in range(nr_keys): self.key_list.append([]) return # # Returns specified key-value affilation, if it exists. # def _find_affiliation(self, key, value): aff_list = self.key_list[key] for aff in aff_list: if aff['value'] == value: return aff return None # # Adds an affiliation of 'value' with 'key' # def affiliate(self, key, value): # Locate key-value affiliation aff = self._find_affiliation(key, value) if aff is None: # Create a (new) key-value affiliation aff = { 'value': value, 'refcnt': 0, } aff_list = self.key_list[key] aff_list.append(aff) # Increment affiliation reference count aff['refcnt'] += 1 return # # Removes an affiliation of 'value' with 'key' # def unaffiliate(self, key, value): # Locate key-value affiliation aff = self._find_affiliation(key, value) if aff is None: return # Decrement affiliation reference count aff['refcnt'] -= 1 # Destroy affiliation iff no outstanding references if aff['refcnt'] <= 0: aff_list = self.key_list[key] aff_list.remove(aff) return # # Lists all values affiliated with 'key' # # Note: The returned list is a set (contains no duplicates) # def list_affiliations(self, key): aff_list = self.key_list[key] value_list = [] for aff in aff_list: value_list.append(aff['value']) return value_list tap-as-a-service-2.0.0/neutron_taas/services/taas/service_drivers/000077500000000000000000000000001314312670600252305ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/services/taas/service_drivers/__init__.py000066400000000000000000000027701314312670600273470ustar00rootroot00000000000000# Copyright (C) 2016 Midokura SARL. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six @six.add_metaclass(abc.ABCMeta) class TaasBaseDriver(object): def __init__(self, service_plugin): self.service_plugin = service_plugin @property def service_type(self): pass @abc.abstractmethod def create_tap_service_precommit(self, context): pass @abc.abstractmethod def delete_tap_service_precommit(self, context): pass @abc.abstractmethod def create_tap_flow_precommit(self, context): pass @abc.abstractmethod def delete_tap_flow_precommit(self, context): pass @abc.abstractmethod def create_tap_service_postcommit(self, context): pass @abc.abstractmethod def delete_tap_service_postcommit(self, context): pass @abc.abstractmethod def create_tap_flow_postcommit(self, context): pass @abc.abstractmethod def delete_tap_flow_postcommit(self, context): pass tap-as-a-service-2.0.0/neutron_taas/services/taas/service_drivers/service_driver_context.py000066400000000000000000000043001314312670600323560ustar00rootroot00000000000000# Copyright (C) 2016 Midokura SARL. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_taas.extensions import taas from oslo_log import log LOG = log.getLogger(__name__) class ServiceDriverContext(object): """ServiceDriverContext context base class""" def __init__(self, service_plugin, plugin_context): self._plugin = service_plugin self._plugin_context = plugin_context class TapServiceContext(ServiceDriverContext): def __init__(self, service_plugin, plugin_context, tap_service): super(TapServiceContext, self).__init__(service_plugin, plugin_context) self._tap_service = tap_service self._tap_id_association = None self._setup_tap_id_association(tap_service['id']) def _setup_tap_id_association(self, tap_service_id): try: self._tap_id_association = self._plugin.get_tap_id_association( self._plugin_context, tap_service_id) except taas.TapServiceNotFound: LOG.debug("Not found tap_ip_association for tap_service: %s", tap_service_id) @property def tap_service(self): return self._tap_service @property def tap_id_association(self): return self._tap_id_association @tap_id_association.setter def tap_id_association(self, tap_id_association): """Set tap_id_association in context""" self._tap_id_association = tap_id_association class TapFlowContext(ServiceDriverContext): def __init__(self, service_plugin, plugin_context, tap_flow): super(TapFlowContext, self).__init__(service_plugin, plugin_context) self._tap_flow = tap_flow @property def tap_flow(self): return self._tap_flow tap-as-a-service-2.0.0/neutron_taas/services/taas/service_drivers/taas_agent_api.py000066400000000000000000000051121314312670600305400ustar00rootroot00000000000000# Copyright (C) 2016 Midokura SARL. # Copyright (C) 2015 Ericsson AB # Copyright (c) 2015 Gigamon # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.common import rpc as n_rpc from oslo_log import log as logging import oslo_messaging as messaging LOG = logging.getLogger(__name__) class TaasCallbacks(object): """Currently there are no callbacks to the Taas Plugin.""" def __init__(self, plugin): super(TaasCallbacks, self).__init__() self.plugin = plugin return class TaasAgentApi(object): """RPC calls to agent APIs""" def __init__(self, topic, host): self.host = host target = messaging.Target(topic=topic, version='1.0') self.client = n_rpc.get_client(target) return def create_tap_service(self, context, tap_service, host): LOG.debug("In RPC Call for Create Tap Service: Host=%s, MSG=%s" % (host, tap_service)) cctxt = self.client.prepare(fanout=True) cctxt.cast(context, 'create_tap_service', tap_service=tap_service, host=host) return def create_tap_flow(self, context, tap_flow_msg, host): LOG.debug("In RPC Call for Create Tap Flow: Host=%s, MSG=%s" % (host, tap_flow_msg)) cctxt = self.client.prepare(fanout=True) cctxt.cast(context, 'create_tap_flow', tap_flow_msg=tap_flow_msg, host=host) return def delete_tap_service(self, context, tap_service, host): LOG.debug("In RPC Call for Delete Tap Service: Host=%s, MSG=%s" % (host, tap_service)) cctxt = self.client.prepare(fanout=True) cctxt.cast(context, 'delete_tap_service', tap_service=tap_service, host=host) return def delete_tap_flow(self, context, tap_flow_msg, host): LOG.debug("In RPC Call for Delete Tap Flow: Host=%s, MSG=%s" % (host, tap_flow_msg)) cctxt = self.client.prepare(fanout=True) cctxt.cast(context, 'delete_tap_flow', tap_flow_msg=tap_flow_msg, host=host) return tap-as-a-service-2.0.0/neutron_taas/services/taas/service_drivers/taas_rpc.py000066400000000000000000000135431314312670600274040ustar00rootroot00000000000000# Copyright (C) 2016 Midokura SARL. # Copyright (C) 2015 Ericsson AB # Copyright (c) 2015 Gigamon # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.common import rpc as n_rpc from neutron_lib import exceptions as n_exc from neutron_taas.common import topics from neutron_taas.services.taas import service_drivers from neutron_taas.services.taas.service_drivers import taas_agent_api from oslo_config import cfg from oslo_log import log as logging LOG = logging.getLogger(__name__) class TaasRpcDriver(service_drivers.TaasBaseDriver): """Taas Rpc Service Driver class""" def __init__(self, service_plugin): LOG.debug("Loading TaasRpcDriver.") super(TaasRpcDriver, self).__init__(service_plugin) self.endpoints = [taas_agent_api.TaasCallbacks(service_plugin)] self.conn = n_rpc.create_connection() self.conn.create_consumer(topics.TAAS_PLUGIN, self.endpoints, fanout=False) self.conn.consume_in_threads() self.agent_rpc = taas_agent_api.TaasAgentApi( topics.TAAS_AGENT, cfg.CONF.host ) return def _get_taas_id(self, context, tf): ts = self.service_plugin.get_tap_service(context, tf['tap_service_id']) taas_id = (self.service_plugin.get_tap_id_association( context, tap_service_id=ts['id']))['taas_id'] return taas_id def create_tap_service_precommit(self, context): ts = context.tap_service tap_id_association = context._plugin.create_tap_id_association( context._plugin_context, ts['id']) context.tap_id_association = tap_id_association def create_tap_service_postcommit(self, context): """Send tap service creation RPC message to agent. This RPC message includes taas_id that is added vlan_range_start to so that taas-ovs-agent can use taas_id as VLANID. """ # Get taas id associated with the Tap Service ts = context.tap_service tap_id_association = context.tap_id_association taas_vlan_id = tap_id_association['taas_id'] port = self.service_plugin._get_port_details(context._plugin_context, ts['port_id']) host = port['binding:host_id'] rpc_msg = {'tap_service': ts, 'taas_id': taas_vlan_id, 'port': port} self.agent_rpc.create_tap_service(context._plugin_context, rpc_msg, host) return def delete_tap_service_precommit(self, context): pass def delete_tap_service_postcommit(self, context): """Send tap service deletion RPC message to agent. This RPC message includes taas_id that is added vlan_range_start to so that taas-ovs-agent can use taas_id as VLANID. """ ts = context.tap_service tap_id_association = context.tap_id_association taas_vlan_id = tap_id_association['taas_id'] try: port = self.service_plugin._get_port_details( context._plugin_context, ts['port_id']) host = port['binding:host_id'] except n_exc.PortNotFound: # if not found, we just pass to None port = None host = None rpc_msg = {'tap_service': ts, 'taas_id': taas_vlan_id, 'port': port} self.agent_rpc.delete_tap_service(context._plugin_context, rpc_msg, host) return def create_tap_flow_precommit(self, context): pass def create_tap_flow_postcommit(self, context): """Send tap flow creation RPC message to agent.""" tf = context.tap_flow taas_id = self._get_taas_id(context._plugin_context, tf) # Extract the host where the source port is located port = self.service_plugin._get_port_details(context._plugin_context, tf['source_port']) host = port['binding:host_id'] port_mac = port['mac_address'] # Send RPC message to both the source port host and # tap service(destination) port host rpc_msg = {'tap_flow': tf, 'port_mac': port_mac, 'taas_id': taas_id, 'port': port} self.agent_rpc.create_tap_flow(context._plugin_context, rpc_msg, host) return def delete_tap_flow_precommit(self, context): pass def delete_tap_flow_postcommit(self, context): """Send tap flow deletion RPC message to agent.""" tf = context.tap_flow taas_id = self._get_taas_id(context._plugin_context, tf) # Extract the host where the source port is located port = self.service_plugin._get_port_details(context._plugin_context, tf['source_port']) host = port['binding:host_id'] port_mac = port['mac_address'] # Send RPC message to both the source port host and # tap service(destination) port host rpc_msg = {'tap_flow': tf, 'port_mac': port_mac, 'taas_id': taas_id, 'port': port} self.agent_rpc.delete_tap_flow(context._plugin_context, rpc_msg, host) return tap-as-a-service-2.0.0/neutron_taas/services/taas/taas_plugin.py000066400000000000000000000152061314312670600247160ustar00rootroot00000000000000# Copyright (C) 2016 Midokura SARL. # Copyright (C) 2015 Ericsson AB # Copyright (c) 2015 Gigamon # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.db import servicetype_db as st_db from neutron.services import provider_configuration as pconf from neutron.services import service_base from neutron_lib import exceptions as n_exc from neutron_taas.common import constants from neutron_taas.db import taas_db from neutron_taas.extensions import taas as taas_ex from neutron_taas.services.taas.service_drivers import (service_driver_context as sd_context) from oslo_log import log as logging from oslo_utils import excutils LOG = logging.getLogger(__name__) def add_provider_configuration(type_manager, service_type): type_manager.add_provider_configuration( service_type, pconf.ProviderConfiguration('neutron_taas')) class TaasPlugin(taas_db.Taas_db_Mixin): supported_extension_aliases = ["taas"] path_prefix = "/taas" def __init__(self): LOG.debug("TAAS PLUGIN INITIALIZED") self.service_type_manager = st_db.ServiceTypeManager.get_instance() add_provider_configuration(self.service_type_manager, constants.TAAS) self._load_drivers() self.driver = self._get_driver_for_provider(self.default_provider) return def _load_drivers(self): """Loads plugin-drivers specified in configuration.""" self.drivers, self.default_provider = service_base.load_drivers( 'TAAS', self) def _get_driver_for_provider(self, provider): if provider in self.drivers: return self.drivers[provider] raise n_exc.Invalid("Error retrieving driver for provider %s" % provider) def create_tap_service(self, context, tap_service): LOG.debug("create_tap_service() called") t_s = tap_service['tap_service'] tenant_id = t_s['tenant_id'] port_id = t_s['port_id'] # Get port details port = self._get_port_details(context, port_id) # Check if the port is owned by the tenant. if port['tenant_id'] != tenant_id: raise taas_ex.PortDoesNotBelongToTenant() # Extract the host where the port is located host = port['binding:host_id'] if host is not None: LOG.debug("Host on which the port is created = %s" % host) else: LOG.debug("Host could not be found, Port Binding disbaled!") # Create tap service in the db model with context.session.begin(subtransactions=True): ts = super(TaasPlugin, self).create_tap_service(context, tap_service) driver_context = sd_context.TapServiceContext(self, context, ts) self.driver.create_tap_service_precommit(driver_context) try: self.driver.create_tap_service_postcommit(driver_context) except Exception: with excutils.save_and_reraise_exception(): LOG.error("Failed to create tap service on driver," "deleting tap_service %s", ts['id']) super(TaasPlugin, self).delete_tap_service(context, ts['id']) return ts def delete_tap_service(self, context, id): LOG.debug("delete_tap_service() called") # Get all the tap Flows that are associated with the Tap service # and delete them as well t_f_collection = self.get_tap_flows( context, filters={'tap_service_id': [id]}, fields=['id']) for t_f in t_f_collection: self.delete_tap_flow(context, t_f['id']) with context.session.begin(subtransactions=True): ts = self.get_tap_service(context, id) driver_context = sd_context.TapServiceContext(self, context, ts) super(TaasPlugin, self).delete_tap_service(context, id) self.driver.delete_tap_service_precommit(driver_context) try: self.driver.delete_tap_service_postcommit(driver_context) except Exception: with excutils.save_and_reraise_exception(): LOG.error("Failed to delete tap service on driver. " "tap_sevice: %s", id) def create_tap_flow(self, context, tap_flow): LOG.debug("create_tap_flow() called") t_f = tap_flow['tap_flow'] tenant_id = t_f['tenant_id'] # Check if the tenant id of the source port is the same as the # tenant_id of the tap service we are attaching it to. ts = self.get_tap_service(context, t_f['tap_service_id']) ts_tenant_id = ts['tenant_id'] if tenant_id != ts_tenant_id: raise taas_ex.TapServiceNotBelongToTenant() # create tap flow in the db model with context.session.begin(subtransactions=True): tf = super(TaasPlugin, self).create_tap_flow(context, tap_flow) driver_context = sd_context.TapFlowContext(self, context, tf) self.driver.create_tap_flow_precommit(driver_context) try: self.driver.create_tap_flow_postcommit(driver_context) except Exception: with excutils.save_and_reraise_exception(): LOG.error("Failed to create tap flow on driver," "deleting tap_flow %s", tf['id']) super(TaasPlugin, self).delete_tap_flow(context, tf['id']) return tf def delete_tap_flow(self, context, id): LOG.debug("delete_tap_flow() called") with context.session.begin(subtransactions=True): tf = self.get_tap_flow(context, id) driver_context = sd_context.TapFlowContext(self, context, tf) super(TaasPlugin, self).delete_tap_flow(context, id) self.driver.delete_tap_flow_precommit(driver_context) try: self.driver.delete_tap_flow_postcommit(driver_context) except Exception: with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception(): LOG.error("Failed to delete tap flow on driver. " "tap_flow: %s", id) tap-as-a-service-2.0.0/neutron_taas/taas_client/000077500000000000000000000000001314312670600215455ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/taas_client/__init__.py000066400000000000000000000000001314312670600236440ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/taas_client/tapflow.py000066400000000000000000000073671314312670600236100ustar00rootroot00000000000000# Copyright 2015 NEC Corporation # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_taas._i18n import _ from neutronclient.common import extension from neutronclient.common import utils from neutronclient.neutron import v2_0 as neutronv20 def _add_updatable_args(parser): parser.add_argument( '--name', help=_('Name of this Tap flow.')) parser.add_argument( '--description', help=_('Description for this Tap flow.')) def _updatable_args2body(parsed_args, body): neutronv20.update_dict(parsed_args, body, ['name', 'description']) class TapFlow(extension.NeutronClientExtension): # Define required variables for resource operations. resource = 'tap_flow' resource_plural = '%ss' % resource object_path = '/taas/%s' % resource_plural resource_path = '/taas/%s/%%s' % resource_plural versions = ['2.0'] class ListTapFlow(extension.ClientExtensionList, TapFlow): """List tap flows.""" shell_command = 'tap-flow-list' list_columns = ['id', 'name', 'source_port', 'tap_service_id', 'status'] pagination_support = True sorting_support = True class CreateTapFlow(extension.ClientExtensionCreate, TapFlow): """Create a tap flow.""" shell_command = 'tap-flow-create' list_columns = ['id', 'name', 'direction', 'source_port'] def add_known_arguments(self, parser): _add_updatable_args(parser) parser.add_argument( '--port', required=True, metavar="SOURCE_PORT", help=_('Source port to which the Tap Flow is connected.')) parser.add_argument( '--tap-service', required=True, metavar="TAP_SERVICE", help=_('Tap Service to which the Tap Flow belongs.')) parser.add_argument( '--direction', required=True, metavar="DIRECTION", choices=['IN', 'OUT', 'BOTH'], type=utils.convert_to_uppercase, help=_('Direction of the Tap flow.')) def args2body(self, parsed_args): client = self.get_client() source_port = neutronv20.find_resourceid_by_name_or_id( client, 'port', parsed_args.port) tap_service_id = neutronv20.find_resourceid_by_name_or_id( client, 'tap_service', parsed_args.tap_service) body = {'source_port': source_port, 'tap_service_id': tap_service_id} neutronv20.update_dict(parsed_args, body, ['tenant_id', 'direction']) _updatable_args2body(parsed_args, body) return {self.resource: body} class DeleteTapFlow(extension.ClientExtensionDelete, TapFlow): """Delete a tap flow.""" shell_command = 'tap-flow-delete' class ShowTapFlow(extension.ClientExtensionShow, TapFlow): """Show a tap flow.""" shell_command = 'tap-flow-show' class UpdateTapFlow(extension.ClientExtensionUpdate, TapFlow): """Update a tap flow.""" shell_command = 'tap-flow-update' list_columns = ['id', 'name'] def add_known_arguments(self, parser): _add_updatable_args(parser) def args2body(self, parsed_args): body = {} _updatable_args2body(parsed_args, body) return {self.resource: body} tap-as-a-service-2.0.0/neutron_taas/taas_client/tapservice.py000066400000000000000000000062101314312670600242630ustar00rootroot00000000000000# Copyright 2015 NEC Corporation # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_taas._i18n import _ from neutronclient.common import extension from neutronclient.neutron import v2_0 as neutronv20 def _add_updatable_args(parser): parser.add_argument( '--name', help=_('Name of this Tap service.')) parser.add_argument( '--description', help=_('Description for this Tap service.')) def _updatable_args2body(parsed_args, body): neutronv20.update_dict(parsed_args, body, ['name', 'description']) class TapService(extension.NeutronClientExtension): # Define required variables for resource operations. resource = 'tap_service' resource_plural = '%ss' % resource object_path = '/taas/%s' % resource_plural resource_path = '/taas/%s/%%s' % resource_plural versions = ['2.0'] class ListTapService(extension.ClientExtensionList, TapService): """List tap services.""" shell_command = 'tap-service-list' list_columns = ['id', 'name', 'port', 'status'] pagination_support = True sorting_support = True class CreateTapService(extension.ClientExtensionCreate, TapService): """Create a tap service.""" shell_command = 'tap-service-create' list_columns = ['id', 'name', 'port'] def add_known_arguments(self, parser): _add_updatable_args(parser) parser.add_argument( '--port', dest='port_id', required=True, metavar="PORT", help=_('Port to which the Tap service is connected.')) def args2body(self, parsed_args): client = self.get_client() port_id = neutronv20.find_resourceid_by_name_or_id( client, 'port', parsed_args.port_id) body = {'port_id': port_id} if parsed_args.tenant_id: body['tenant_id'] = parsed_args.tenant_id _updatable_args2body(parsed_args, body) return {self.resource: body} class DeleteTapService(extension.ClientExtensionDelete, TapService): """Delete a tap service.""" shell_command = 'tap-service-delete' class ShowTapService(extension.ClientExtensionShow, TapService): """Show a tap service.""" shell_command = 'tap-service-show' class UpdateTapService(extension.ClientExtensionUpdate, TapService): """Update a tap service.""" shell_command = 'tap-service-update' list_columns = ['id', 'name'] def add_known_arguments(self, parser): _add_updatable_args(parser) def args2body(self, parsed_args): body = {} _updatable_args2body(parsed_args, body) return {self.resource: body} tap-as-a-service-2.0.0/neutron_taas/tests/000077500000000000000000000000001314312670600204215ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/tests/__init__.py000066400000000000000000000000001314312670600225200ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/tests/tempest_plugin/000077500000000000000000000000001314312670600234605ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/tests/tempest_plugin/__init__.py000066400000000000000000000000001314312670600255570ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/tests/tempest_plugin/plugin.py000066400000000000000000000023401314312670600253270ustar00rootroot00000000000000# Copyright (c) 2015 Midokura SARL # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from tempest.test_discover import plugins class NeutronTaaSPlugin(plugins.TempestPlugin): def get_opt_lists(self): return [] def load_tests(self): this_dir = os.path.dirname(os.path.abspath(__file__)) # top_level_dir = $(this_dir)/../../.. d = os.path.split(this_dir)[0] d = os.path.split(d)[0] top_level_dir = os.path.split(d)[0] test_dir = os.path.join(top_level_dir, 'neutron_taas/tests/tempest_plugin/tests') return (test_dir, top_level_dir) def register_opts(self, conf): return tap-as-a-service-2.0.0/neutron_taas/tests/tempest_plugin/services/000077500000000000000000000000001314312670600253035ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/tests/tempest_plugin/services/__init__.py000066400000000000000000000000001314312670600274020ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/tests/tempest_plugin/services/client.py000066400000000000000000000044511314312670600271370ustar00rootroot00000000000000# Copyright (c) 2015 Midokura SARL # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.lib.services.network import base class TapServicesClient(base.BaseNetworkClient): def create_tap_service(self, **kwargs): uri = '/taas/tap_services' post_data = {'tap_service': kwargs} return self.create_resource(uri, post_data) def update_tap_service(self, tap_service_id, **kwargs): uri = '/taas/tap_services' post_data = {'tap_service': kwargs} return self.update_resource(uri, post_data) def show_tap_service(self, tap_service_id, **fields): uri = '/taas/tap_services/%s' % tap_service_id return self.show_resource(uri, **fields) def delete_tap_service(self, tap_service_id): uri = '/taas/tap_services/%s' % tap_service_id return self.delete_resource(uri) def list_tap_services(self, **filters): uri = '/taas/tap_services' return self.list_resources(uri, **filters) class TapFlowsClient(base.BaseNetworkClient): def create_tap_flow(self, **kwargs): uri = '/taas/tap_flows' post_data = {'tap_flow': kwargs} return self.create_resource(uri, post_data) def update_tap_flow(self, tap_flow_id, **kwargs): uri = '/taas/tap_flows' post_data = {'tap_flow': kwargs} return self.update_resource(uri, post_data) def show_tap_flow(self, tap_flow_id, **fields): uri = '/taas/tap_flows/%s' % tap_flow_id return self.show_resource(uri, **fields) def delete_tap_flow(self, tap_flow_id): uri = '/taas/tap_flows/%s' % tap_flow_id return self.delete_resource(uri) def list_tap_flows(self, **filters): uri = '/taas/tap_flows' return self.list_resources(uri, **filters) tap-as-a-service-2.0.0/neutron_taas/tests/tempest_plugin/tests/000077500000000000000000000000001314312670600246225ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/tests/tempest_plugin/tests/__init__.py000066400000000000000000000000001314312670600267210ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/tests/tempest_plugin/tests/api/000077500000000000000000000000001314312670600253735ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/tests/tempest_plugin/tests/api/__init__.py000066400000000000000000000000001314312670600274720ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/tests/tempest_plugin/tests/api/base.py000066400000000000000000000014611314312670600266610ustar00rootroot00000000000000# Copyright (c) 2015 Midokura SARL # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.api.network import base from neutron_taas.tests.tempest_plugin.tests import taas_client class BaseTaaSTest(taas_client.TaaSClientMixin, base.BaseNetworkTest): pass tap-as-a-service-2.0.0/neutron_taas/tests/tempest_plugin/tests/api/test_taas.py000066400000000000000000000036061314312670600277410ustar00rootroot00000000000000# Copyright (c) 2015 Midokura SARL # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import config from tempest.lib import decorators from tempest import test from neutron_taas.tests.tempest_plugin.tests.api import base CONF = config.CONF class TaaSExtensionTestJSON(base.BaseTaaSTest): @classmethod def resource_setup(cls): super(TaaSExtensionTestJSON, cls).resource_setup() if not test.is_extension_enabled('taas', 'network'): msg = "TaaS Extension not enabled." raise cls.skipException(msg) @decorators.idempotent_id('b993c14e-797a-4c91-b4da-8cb1a450aa2f') def test_create_tap_service_and_flow(self): network = self.create_network() port = self.create_port(network) tap_service = self.create_tap_service(port_id=port['id']) self.create_tap_flow(tap_service_id=tap_service['id'], direction='BOTH', source_port=port['id']) @decorators.idempotent_id('d7a2115d-16b4-41cf-95a6-dcebc3682b24') def test_delete_tap_service_after_delete_port(self): network = self.create_network() port = self.create_port(network) tap_service = self.create_tap_service(port_id=port['id']) # delete port self.ports_client.delete_port(port['id']) self.tap_services_client.delete_tap_service(tap_service['id']) tap-as-a-service-2.0.0/neutron_taas/tests/tempest_plugin/tests/scenario/000077500000000000000000000000001314312670600264255ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/tests/tempest_plugin/tests/scenario/__init__.py000066400000000000000000000000001314312670600305240ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/tests/tempest_plugin/tests/scenario/base.py000066400000000000000000000013761314312670600277200ustar00rootroot00000000000000# Copyright (c) 2015 Midokura SARL # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_taas.tests.tempest_plugin.tests.scenario import manager class TaaSScenarioTest(manager.NetworkScenarioTest): pass tap-as-a-service-2.0.0/neutron_taas/tests/tempest_plugin/tests/scenario/manager.py000066400000000000000000001627411314312670600304240ustar00rootroot00000000000000# Copyright 2012 OpenStack Foundation # Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import subprocess import netaddr from oslo_log import log from oslo_serialization import jsonutils as json from oslo_utils import netutils from tempest.common import compute from tempest.common import image as common_image from tempest.common.utils.linux import remote_client from tempest.common.utils import net_utils from tempest.common import waiters from tempest import config from tempest import exceptions from tempest.lib.common.utils import data_utils from tempest.lib.common.utils import test_utils from tempest.lib import exceptions as lib_exc import tempest.test CONF = config.CONF LOG = log.getLogger(__name__) class ScenarioTest(tempest.test.BaseTestCase): """Base class for scenario tests. Uses tempest own clients. """ credentials = ['primary'] @classmethod def setup_clients(cls): super(ScenarioTest, cls).setup_clients() # Clients (in alphabetical order) cls.flavors_client = cls.manager.flavors_client cls.compute_floating_ips_client = ( cls.manager.compute_floating_ips_client) if CONF.service_available.glance: # Check if glance v1 is available to determine which client to use. if CONF.image_feature_enabled.api_v1: cls.image_client = cls.manager.image_client elif CONF.image_feature_enabled.api_v2: cls.image_client = cls.manager.image_client_v2 else: raise lib_exc.InvalidConfiguration( 'Either api_v1 or api_v2 must be True in ' '[image-feature-enabled].') # Compute image client cls.compute_images_client = cls.manager.compute_images_client cls.keypairs_client = cls.manager.keypairs_client # Nova security groups client cls.compute_security_groups_client = ( cls.manager.compute_security_groups_client) cls.compute_security_group_rules_client = ( cls.manager.compute_security_group_rules_client) cls.servers_client = cls.manager.servers_client cls.interface_client = cls.manager.interfaces_client # Neutron network client cls.networks_client = cls.manager.networks_client cls.ports_client = cls.manager.ports_client cls.routers_client = cls.manager.routers_client cls.subnets_client = cls.manager.subnets_client cls.floating_ips_client = cls.manager.floating_ips_client cls.security_groups_client = cls.manager.security_groups_client cls.security_group_rules_client = ( cls.manager.security_group_rules_client) if CONF.volume_feature_enabled.api_v2: cls.volumes_client = cls.manager.volumes_v2_client cls.snapshots_client = cls.manager.snapshots_v2_client else: cls.volumes_client = cls.manager.volumes_client cls.snapshots_client = cls.manager.snapshots_client # ## Test functions library # # The create_[resource] functions only return body and discard the # resp part which is not used in scenario tests def _create_port(self, network_id, client=None, namestart='port-quotatest', **kwargs): if not client: client = self.ports_client name = data_utils.rand_name(namestart) result = client.create_port( name=name, network_id=network_id, **kwargs) self.assertIsNotNone(result, 'Unable to allocate port') port = result['port'] self.addCleanup(test_utils.call_and_ignore_notfound_exc, client.delete_port, port['id']) return port def create_keypair(self, client=None): if not client: client = self.keypairs_client name = data_utils.rand_name(self.__class__.__name__) # We don't need to create a keypair by pubkey in scenario body = client.create_keypair(name=name) self.addCleanup(client.delete_keypair, name) return body['keypair'] def create_server(self, name=None, image_id=None, flavor=None, validatable=False, wait_until='ACTIVE', clients=None, **kwargs): """Wrapper utility that returns a test server. This wrapper utility calls the common create test server and returns a test server. The purpose of this wrapper is to minimize the impact on the code of the tests already using this function. """ # NOTE(jlanoux): As a first step, ssh checks in the scenario # tests need to be run regardless of the run_validation and # validatable parameters and thus until the ssh validation job # becomes voting in CI. The test resources management and IP # association are taken care of in the scenario tests. # Therefore, the validatable parameter is set to false in all # those tests. In this way create_server just return a standard # server and the scenario tests always perform ssh checks. # Needed for the cross_tenant_traffic test: if clients is None: clients = self.manager if name is None: name = data_utils.rand_name(self.__class__.__name__ + "-server") vnic_type = CONF.network.port_vnic_type # If vnic_type is configured create port for # every network if vnic_type: ports = [] create_port_body = {'binding:vnic_type': vnic_type, 'namestart': 'port-smoke'} if kwargs: # Convert security group names to security group ids # to pass to create_port if 'security_groups' in kwargs: security_groups = \ clients.security_groups_client.list_security_groups( ).get('security_groups') sec_dict = dict([(s['name'], s['id']) for s in security_groups]) sec_groups_names = [s['name'] for s in kwargs.pop( 'security_groups')] security_groups_ids = [sec_dict[s] for s in sec_groups_names] if security_groups_ids: create_port_body[ 'security_groups'] = security_groups_ids networks = kwargs.pop('networks', []) else: networks = [] # If there are no networks passed to us we look up # for the project's private networks and create a port. # The same behaviour as we would expect when passing # the call to the clients with no networks if not networks: networks = clients.networks_client.list_networks( **{'router:external': False, 'fields': 'id'})['networks'] # It's net['uuid'] if networks come from kwargs # and net['id'] if they come from # clients.networks_client.list_networks for net in networks: net_id = net.get('uuid', net.get('id')) if 'port' not in net: port = self._create_port(network_id=net_id, client=clients.ports_client, **create_port_body) ports.append({'port': port['id']}) else: ports.append({'port': net['port']}) if ports: kwargs['networks'] = ports self.ports = ports tenant_network = self.get_tenant_network() body, servers = compute.create_test_server( clients, tenant_network=tenant_network, wait_until=wait_until, name=name, flavor=flavor, image_id=image_id, **kwargs) self.addCleanup(waiters.wait_for_server_termination, clients.servers_client, body['id']) self.addCleanup(test_utils.call_and_ignore_notfound_exc, clients.servers_client.delete_server, body['id']) server = clients.servers_client.show_server(body['id'])['server'] return server def create_volume(self, size=None, name=None, snapshot_id=None, imageRef=None, volume_type=None): if size is None: size = CONF.volume.volume_size if imageRef: image = self.compute_images_client.show_image(imageRef)['image'] min_disk = image.get('minDisk') size = max(size, min_disk) if name is None: name = data_utils.rand_name(self.__class__.__name__ + "-volume") kwargs = {'display_name': name, 'snapshot_id': snapshot_id, 'imageRef': imageRef, 'volume_type': volume_type, 'size': size} volume = self.volumes_client.create_volume(**kwargs)['volume'] self.addCleanup(self.volumes_client.wait_for_resource_deletion, volume['id']) self.addCleanup(test_utils.call_and_ignore_notfound_exc, self.volumes_client.delete_volume, volume['id']) # NOTE(e0ne): Cinder API v2 uses name instead of display_name if 'display_name' in volume: self.assertEqual(name, volume['display_name']) else: self.assertEqual(name, volume['name']) waiters.wait_for_volume_resource_status(self.volumes_client, volume['id'], 'available') # The volume retrieved on creation has a non-up-to-date status. # Retrieval after it becomes active ensures correct details. volume = self.volumes_client.show_volume(volume['id'])['volume'] return volume def create_volume_type(self, client=None, name=None, backend_name=None): if not client: client = self.admin_volume_types_client if not name: class_name = self.__class__.__name__ name = data_utils.rand_name(class_name + '-volume-type') randomized_name = data_utils.rand_name('scenario-type-' + name) LOG.debug("Creating a volume type: %s on backend %s", randomized_name, backend_name) extra_specs = {} if backend_name: extra_specs = {"volume_backend_name": backend_name} body = client.create_volume_type(name=randomized_name, extra_specs=extra_specs) volume_type = body['volume_type'] self.assertIn('id', volume_type) self.addCleanup(client.delete_volume_type, volume_type['id']) return volume_type def _create_loginable_secgroup_rule(self, secgroup_id=None): _client = self.compute_security_groups_client _client_rules = self.compute_security_group_rules_client if secgroup_id is None: sgs = _client.list_security_groups()['security_groups'] for sg in sgs: if sg['name'] == 'default': secgroup_id = sg['id'] # These rules are intended to permit inbound ssh and icmp # traffic from all sources, so no group_id is provided. # Setting a group_id would only permit traffic from ports # belonging to the same security group. rulesets = [ { # ssh 'ip_protocol': 'tcp', 'from_port': 22, 'to_port': 22, 'cidr': '0.0.0.0/0', }, { # ping 'ip_protocol': 'icmp', 'from_port': -1, 'to_port': -1, 'cidr': '0.0.0.0/0', } ] rules = list() for ruleset in rulesets: sg_rule = _client_rules.create_security_group_rule( parent_group_id=secgroup_id, **ruleset)['security_group_rule'] rules.append(sg_rule) return rules def _create_security_group(self): # Create security group sg_name = data_utils.rand_name(self.__class__.__name__) sg_desc = sg_name + " description" secgroup = self.compute_security_groups_client.create_security_group( name=sg_name, description=sg_desc)['security_group'] self.assertEqual(secgroup['name'], sg_name) self.assertEqual(secgroup['description'], sg_desc) self.addCleanup( test_utils.call_and_ignore_notfound_exc, self.compute_security_groups_client.delete_security_group, secgroup['id']) # Add rules to the security group self._create_loginable_secgroup_rule(secgroup['id']) return secgroup def get_remote_client(self, ip_address, username=None, private_key=None): """Get a SSH client to a remote server @param ip_address the server floating or fixed IP address to use for ssh validation @param username name of the Linux account on the remote server @param private_key the SSH private key to use @return a RemoteClient object """ if username is None: username = CONF.validation.image_ssh_user # Set this with 'keypair' or others to log in with keypair or # username/password. if CONF.validation.auth_method == 'keypair': password = None if private_key is None: private_key = self.keypair['private_key'] else: password = CONF.validation.image_ssh_password private_key = None linux_client = remote_client.RemoteClient(ip_address, username, pkey=private_key, password=password) try: linux_client.validate_authentication() except Exception as e: message = ('Initializing SSH connection to %(ip)s failed. ' 'Error: %(error)s' % {'ip': ip_address, 'error': e}) caller = test_utils.find_test_caller() if caller: message = '(%s) %s' % (caller, message) LOG.exception(message) self._log_console_output() raise return linux_client def _image_create(self, name, fmt, path, disk_format=None, properties=None): if properties is None: properties = {} name = data_utils.rand_name('%s-' % name) params = { 'name': name, 'container_format': fmt, 'disk_format': disk_format or fmt, } if CONF.image_feature_enabled.api_v1: params['is_public'] = 'False' params['properties'] = properties params = {'headers': common_image.image_meta_to_headers(**params)} else: params['visibility'] = 'private' # Additional properties are flattened out in the v2 API. params.update(properties) body = self.image_client.create_image(**params) image = body['image'] if 'image' in body else body self.addCleanup(self.image_client.delete_image, image['id']) self.assertEqual("queued", image['status']) with open(path, 'rb') as image_file: if CONF.image_feature_enabled.api_v1: self.image_client.update_image(image['id'], data=image_file) else: self.image_client.store_image_file(image['id'], image_file) return image['id'] def glance_image_create(self): img_path = CONF.scenario.img_dir + "/" + CONF.scenario.img_file aki_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.aki_img_file ari_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.ari_img_file ami_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.ami_img_file img_container_format = CONF.scenario.img_container_format img_disk_format = CONF.scenario.img_disk_format img_properties = CONF.scenario.img_properties LOG.debug("paths: img: %s, container_format: %s, disk_format: %s, " "properties: %s, ami: %s, ari: %s, aki: %s", img_path, img_container_format, img_disk_format, img_properties, ami_img_path, ari_img_path, aki_img_path) try: image = self._image_create('scenario-img', img_container_format, img_path, disk_format=img_disk_format, properties=img_properties) except IOError: LOG.debug("A qcow2 image was not found. Try to get a uec image.") kernel = self._image_create('scenario-aki', 'aki', aki_img_path) ramdisk = self._image_create('scenario-ari', 'ari', ari_img_path) properties = {'kernel_id': kernel, 'ramdisk_id': ramdisk} image = self._image_create('scenario-ami', 'ami', path=ami_img_path, properties=properties) LOG.debug("image:%s", image) return image def _log_console_output(self, servers=None): if not CONF.compute_feature_enabled.console_output: LOG.debug('Console output not supported, cannot log') return if not servers: servers = self.servers_client.list_servers() servers = servers['servers'] for server in servers: try: console_output = self.servers_client.get_console_output( server['id'])['output'] LOG.debug('Console output for %s\nbody=\n%s', server['id'], console_output) except lib_exc.NotFound: LOG.debug("Server %s disappeared(deleted) while looking " "for the console log", server['id']) def _log_net_info(self, exc): # network debug is called as part of ssh init if not isinstance(exc, lib_exc.SSHTimeout): LOG.debug('Network information on a devstack host') def create_server_snapshot(self, server, name=None): # Glance client _image_client = self.image_client # Compute client _images_client = self.compute_images_client if name is None: name = data_utils.rand_name(self.__class__.__name__ + 'snapshot') LOG.debug("Creating a snapshot image for server: %s", server['name']) image = _images_client.create_image(server['id'], name=name) image_id = image.response['location'].split('images/')[1] waiters.wait_for_image_status(_image_client, image_id, 'active') self.addCleanup(_image_client.wait_for_resource_deletion, image_id) self.addCleanup(test_utils.call_and_ignore_notfound_exc, _image_client.delete_image, image_id) if CONF.image_feature_enabled.api_v1: # In glance v1 the additional properties are stored in the headers. resp = _image_client.check_image(image_id) snapshot_image = common_image.get_image_meta_from_headers(resp) image_props = snapshot_image.get('properties', {}) else: # In glance v2 the additional properties are flattened. snapshot_image = _image_client.show_image(image_id) image_props = snapshot_image bdm = image_props.get('block_device_mapping') if bdm: bdm = json.loads(bdm) if bdm and 'snapshot_id' in bdm[0]: snapshot_id = bdm[0]['snapshot_id'] self.addCleanup( self.snapshots_client.wait_for_resource_deletion, snapshot_id) self.addCleanup(test_utils.call_and_ignore_notfound_exc, self.snapshots_client.delete_snapshot, snapshot_id) waiters.wait_for_volume_resource_status(self.snapshots_client, snapshot_id, 'available') image_name = snapshot_image['name'] self.assertEqual(name, image_name) LOG.debug("Created snapshot image %s for server %s", image_name, server['name']) return snapshot_image def nova_volume_attach(self, server, volume_to_attach): volume = self.servers_client.attach_volume( server['id'], volumeId=volume_to_attach['id'], device='/dev/%s' % CONF.compute.volume_device_name)['volumeAttachment'] self.assertEqual(volume_to_attach['id'], volume['id']) waiters.wait_for_volume_resource_status(self.volumes_client, volume['id'], 'in-use') # Return the updated volume after the attachment return self.volumes_client.show_volume(volume['id'])['volume'] def nova_volume_detach(self, server, volume): self.servers_client.detach_volume(server['id'], volume['id']) waiters.wait_for_volume_resource_status(self.volumes_client, volume['id'], 'available') volume = self.volumes_client.show_volume(volume['id'])['volume'] self.assertEqual('available', volume['status']) def rebuild_server(self, server_id, image=None, preserve_ephemeral=False, wait=True, rebuild_kwargs=None): if image is None: image = CONF.compute.image_ref rebuild_kwargs = rebuild_kwargs or {} LOG.debug("Rebuilding server (id: %s, image: %s, preserve eph: %s)", server_id, image, preserve_ephemeral) self.servers_client.rebuild_server( server_id=server_id, image_ref=image, preserve_ephemeral=preserve_ephemeral, **rebuild_kwargs) if wait: waiters.wait_for_server_status(self.servers_client, server_id, 'ACTIVE') def ping_ip_address(self, ip_address, should_succeed=True, ping_timeout=None, mtu=None): timeout = ping_timeout or CONF.validation.ping_timeout cmd = ['ping', '-c1', '-w1'] if mtu: cmd += [ # don't fragment '-M', 'do', # ping receives just the size of ICMP payload '-s', str(net_utils.get_ping_payload_size(mtu, 4)) ] cmd.append(ip_address) def ping(): proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) proc.communicate() return (proc.returncode == 0) == should_succeed caller = test_utils.find_test_caller() LOG.debug('%(caller)s begins to ping %(ip)s in %(timeout)s sec and the' ' expected result is %(should_succeed)s', { 'caller': caller, 'ip': ip_address, 'timeout': timeout, 'should_succeed': 'reachable' if should_succeed else 'unreachable' }) result = test_utils.call_until_true(ping, timeout, 1) LOG.debug('%(caller)s finishes ping %(ip)s in %(timeout)s sec and the ' 'ping result is %(result)s', { 'caller': caller, 'ip': ip_address, 'timeout': timeout, 'result': 'expected' if result else 'unexpected' }) return result def check_vm_connectivity(self, ip_address, username=None, private_key=None, should_connect=True, mtu=None): """Check server connectivity :param ip_address: server to test against :param username: server's ssh username :param private_key: server's ssh private key to be used :param should_connect: True/False indicates positive/negative test positive - attempt ping and ssh negative - attempt ping and fail if succeed :param mtu: network MTU to use for connectivity validation :raises: AssertError if the result of the connectivity check does not match the value of the should_connect param """ if should_connect: msg = "Timed out waiting for %s to become reachable" % ip_address else: msg = "ip address %s is reachable" % ip_address self.assertTrue(self.ping_ip_address(ip_address, should_succeed=should_connect, mtu=mtu), msg=msg) if should_connect: # no need to check ssh for negative connectivity self.get_remote_client(ip_address, username, private_key) def check_public_network_connectivity(self, ip_address, username, private_key, should_connect=True, msg=None, servers=None, mtu=None): # The target login is assumed to have been configured for # key-based authentication by cloud-init. LOG.debug('checking network connections to IP %s with user: %s', ip_address, username) try: self.check_vm_connectivity(ip_address, username, private_key, should_connect=should_connect, mtu=mtu) except Exception: ex_msg = 'Public network connectivity check failed' if msg: ex_msg += ": " + msg LOG.exception(ex_msg) self._log_console_output(servers) raise def create_floating_ip(self, thing, pool_name=None): """Create a floating IP and associates to a server on Nova""" if not pool_name: pool_name = CONF.network.floating_network_name floating_ip = (self.compute_floating_ips_client. create_floating_ip(pool=pool_name)['floating_ip']) self.addCleanup(test_utils.call_and_ignore_notfound_exc, self.compute_floating_ips_client.delete_floating_ip, floating_ip['id']) self.compute_floating_ips_client.associate_floating_ip_to_server( floating_ip['ip'], thing['id']) return floating_ip def create_timestamp(self, ip_address, dev_name=None, mount_path='/mnt', private_key=None): ssh_client = self.get_remote_client(ip_address, private_key=private_key) if dev_name is not None: ssh_client.make_fs(dev_name) ssh_client.mount(dev_name, mount_path) cmd_timestamp = 'sudo sh -c "date > %s/timestamp; sync"' % mount_path ssh_client.exec_command(cmd_timestamp) timestamp = ssh_client.exec_command('sudo cat %s/timestamp' % mount_path) if dev_name is not None: ssh_client.umount(mount_path) return timestamp def get_timestamp(self, ip_address, dev_name=None, mount_path='/mnt', private_key=None): ssh_client = self.get_remote_client(ip_address, private_key=private_key) if dev_name is not None: ssh_client.mount(dev_name, mount_path) timestamp = ssh_client.exec_command('sudo cat %s/timestamp' % mount_path) if dev_name is not None: ssh_client.umount(mount_path) return timestamp def get_server_ip(self, server): """Get the server fixed or floating IP. Based on the configuration we're in, return a correct ip address for validating that a guest is up. """ if CONF.validation.connect_method == 'floating': # The tests calling this method don't have a floating IP # and can't make use of the validation resources. So the # method is creating the floating IP there. return self.create_floating_ip(server)['ip'] elif CONF.validation.connect_method == 'fixed': # Determine the network name to look for based on config or creds # provider network resources. if CONF.validation.network_for_ssh: addresses = server['addresses'][ CONF.validation.network_for_ssh] else: creds_provider = self._get_credentials_provider() net_creds = creds_provider.get_primary_creds() network = getattr(net_creds, 'network', None) addresses = (server['addresses'][network['name']] if network else []) for address in addresses: if (address['version'] == CONF.validation.ip_version_for_ssh and address['OS-EXT-IPS:type'] == 'fixed'): return address['addr'] raise exceptions.ServerUnreachable(server_id=server['id']) else: raise lib_exc.InvalidConfiguration() class NetworkScenarioTest(ScenarioTest): """Base class for network scenario tests. This class provide helpers for network scenario tests, using the neutron API. Helpers from ancestor which use the nova network API are overridden with the neutron API. This Class also enforces using Neutron instead of novanetwork. Subclassed tests will be skipped if Neutron is not enabled """ credentials = ['primary', 'admin'] @classmethod def skip_checks(cls): super(NetworkScenarioTest, cls).skip_checks() if not CONF.service_available.neutron: raise cls.skipException('Neutron not available') def _create_network(self, networks_client=None, tenant_id=None, namestart='network-smoke-', port_security_enabled=True): if not networks_client: networks_client = self.networks_client if not tenant_id: tenant_id = networks_client.tenant_id name = data_utils.rand_name(namestart) network_kwargs = dict(name=name, tenant_id=tenant_id) # Neutron disables port security by default so we have to check the # config before trying to create the network with port_security_enabled if CONF.network_feature_enabled.port_security: network_kwargs['port_security_enabled'] = port_security_enabled result = networks_client.create_network(**network_kwargs) network = result['network'] self.assertEqual(network['name'], name) self.addCleanup(test_utils.call_and_ignore_notfound_exc, networks_client.delete_network, network['id']) return network def _create_subnet(self, network, subnets_client=None, routers_client=None, namestart='subnet-smoke', **kwargs): """Create a subnet for the given network within the cidr block configured for tenant networks. """ if not subnets_client: subnets_client = self.subnets_client if not routers_client: routers_client = self.routers_client def cidr_in_use(cidr, tenant_id): """Check cidr existence :returns: True if subnet with cidr already exist in tenant False else """ cidr_in_use = self.admin_manager.subnets_client.list_subnets( tenant_id=tenant_id, cidr=cidr)['subnets'] return len(cidr_in_use) != 0 ip_version = kwargs.pop('ip_version', 4) if ip_version == 6: tenant_cidr = netaddr.IPNetwork( CONF.network.project_network_v6_cidr) num_bits = CONF.network.project_network_v6_mask_bits else: tenant_cidr = netaddr.IPNetwork(CONF.network.project_network_cidr) num_bits = CONF.network.project_network_mask_bits result = None str_cidr = None # Repeatedly attempt subnet creation with sequential cidr # blocks until an unallocated block is found. for subnet_cidr in tenant_cidr.subnet(num_bits): str_cidr = str(subnet_cidr) if cidr_in_use(str_cidr, tenant_id=network['tenant_id']): continue subnet = dict( name=data_utils.rand_name(namestart), network_id=network['id'], tenant_id=network['tenant_id'], cidr=str_cidr, ip_version=ip_version, **kwargs ) try: result = subnets_client.create_subnet(**subnet) break except lib_exc.Conflict as e: is_overlapping_cidr = 'overlaps with another subnet' in str(e) if not is_overlapping_cidr: raise self.assertIsNotNone(result, 'Unable to allocate tenant network') subnet = result['subnet'] self.assertEqual(subnet['cidr'], str_cidr) self.addCleanup(test_utils.call_and_ignore_notfound_exc, subnets_client.delete_subnet, subnet['id']) return subnet def _get_server_port_id_and_ip4(self, server, ip_addr=None): ports = self.admin_manager.ports_client.list_ports( device_id=server['id'], fixed_ip=ip_addr)['ports'] # A port can have more than one IP address in some cases. # If the network is dual-stack (IPv4 + IPv6), this port is associated # with 2 subnets p_status = ['ACTIVE'] # NOTE(vsaienko) With Ironic, instances live on separate hardware # servers. Neutron does not bind ports for Ironic instances, as a # result the port remains in the DOWN state. # TODO(vsaienko) remove once bug: #1599836 is resolved. if getattr(CONF.service_available, 'ironic', False): p_status.append('DOWN') port_map = [(p["id"], fxip["ip_address"]) for p in ports for fxip in p["fixed_ips"] if netutils.is_valid_ipv4(fxip["ip_address"]) and p['status'] in p_status] inactive = [p for p in ports if p['status'] != 'ACTIVE'] if inactive: LOG.warning("Instance has ports that are not ACTIVE: %s", inactive) self.assertNotEqual(0, len(port_map), "No IPv4 addresses found in: %s" % ports) self.assertEqual(len(port_map), 1, "Found multiple IPv4 addresses: %s. " "Unable to determine which port to target." % port_map) return port_map[0] def _get_network_by_name(self, network_name): net = self.admin_manager.networks_client.list_networks( name=network_name)['networks'] self.assertNotEqual(len(net), 0, "Unable to get network by name: %s" % network_name) return net[0] def create_floating_ip(self, thing, external_network_id=None, port_id=None, client=None): """Create a floating IP and associates to a resource/port on Neutron""" if not external_network_id: external_network_id = CONF.network.public_network_id if not client: client = self.floating_ips_client if not port_id: port_id, ip4 = self._get_server_port_id_and_ip4(thing) else: ip4 = None result = client.create_floatingip( floating_network_id=external_network_id, port_id=port_id, tenant_id=thing['tenant_id'], fixed_ip_address=ip4 ) floating_ip = result['floatingip'] self.addCleanup(test_utils.call_and_ignore_notfound_exc, client.delete_floatingip, floating_ip['id']) return floating_ip def _associate_floating_ip(self, floating_ip, server): port_id, _ = self._get_server_port_id_and_ip4(server) kwargs = dict(port_id=port_id) floating_ip = self.floating_ips_client.update_floatingip( floating_ip['id'], **kwargs)['floatingip'] self.assertEqual(port_id, floating_ip['port_id']) return floating_ip def _disassociate_floating_ip(self, floating_ip): """:param floating_ip: floating_ips_client.create_floatingip""" kwargs = dict(port_id=None) floating_ip = self.floating_ips_client.update_floatingip( floating_ip['id'], **kwargs)['floatingip'] self.assertIsNone(floating_ip['port_id']) return floating_ip def check_floating_ip_status(self, floating_ip, status): """Verifies floatingip reaches the given status :param dict floating_ip: floating IP dict to check status :param status: target status :raises: AssertionError if status doesn't match """ floatingip_id = floating_ip['id'] def refresh(): result = (self.floating_ips_client. show_floatingip(floatingip_id)['floatingip']) return status == result['status'] test_utils.call_until_true(refresh, CONF.network.build_timeout, CONF.network.build_interval) floating_ip = self.floating_ips_client.show_floatingip( floatingip_id)['floatingip'] self.assertEqual(status, floating_ip['status'], message="FloatingIP: {fp} is at status: {cst}. " "failed to reach status: {st}" .format(fp=floating_ip, cst=floating_ip['status'], st=status)) LOG.info("FloatingIP: {fp} is at status: {st}" .format(fp=floating_ip, st=status)) def _check_tenant_network_connectivity(self, server, username, private_key, should_connect=True, servers_for_debug=None): if not CONF.network.project_networks_reachable: msg = 'Tenant networks not configured to be reachable.' LOG.info(msg) return # The target login is assumed to have been configured for # key-based authentication by cloud-init. try: for net_name, ip_addresses in server['addresses'].items(): for ip_address in ip_addresses: self.check_vm_connectivity(ip_address['addr'], username, private_key, should_connect=should_connect) except Exception as e: LOG.exception('Tenant network connectivity check failed') self._log_console_output(servers_for_debug) self._log_net_info(e) raise def _check_remote_connectivity(self, source, dest, should_succeed=True, nic=None): """check ping server via source ssh connection :param source: RemoteClient: an ssh connection from which to ping :param dest: and IP to ping against :param should_succeed: boolean should ping succeed or not :param nic: specific network interface to ping from :returns: boolean -- should_succeed == ping :returns: ping is false if ping failed """ def ping_remote(): try: source.ping_host(dest, nic=nic) except lib_exc.SSHExecCommandFailed: LOG.warning('Failed to ping IP: %s via a ssh connection ' 'from: %s.', dest, source.ssh_client.host) return not should_succeed return should_succeed return test_utils.call_until_true(ping_remote, CONF.validation.ping_timeout, 1) def _create_security_group(self, security_group_rules_client=None, tenant_id=None, namestart='secgroup-smoke', security_groups_client=None): if security_group_rules_client is None: security_group_rules_client = self.security_group_rules_client if security_groups_client is None: security_groups_client = self.security_groups_client if tenant_id is None: tenant_id = security_groups_client.tenant_id secgroup = self._create_empty_security_group( namestart=namestart, client=security_groups_client, tenant_id=tenant_id) # Add rules to the security group rules = self._create_loginable_secgroup_rule( security_group_rules_client=security_group_rules_client, secgroup=secgroup, security_groups_client=security_groups_client) for rule in rules: self.assertEqual(tenant_id, rule['tenant_id']) self.assertEqual(secgroup['id'], rule['security_group_id']) return secgroup def _create_empty_security_group(self, client=None, tenant_id=None, namestart='secgroup-smoke'): """Create a security group without rules. Default rules will be created: - IPv4 egress to any - IPv6 egress to any :param tenant_id: secgroup will be created in this tenant :returns: the created security group """ if client is None: client = self.security_groups_client if not tenant_id: tenant_id = client.tenant_id sg_name = data_utils.rand_name(namestart) sg_desc = sg_name + " description" sg_dict = dict(name=sg_name, description=sg_desc) sg_dict['tenant_id'] = tenant_id result = client.create_security_group(**sg_dict) secgroup = result['security_group'] self.assertEqual(secgroup['name'], sg_name) self.assertEqual(tenant_id, secgroup['tenant_id']) self.assertEqual(secgroup['description'], sg_desc) self.addCleanup(test_utils.call_and_ignore_notfound_exc, client.delete_security_group, secgroup['id']) return secgroup def _default_security_group(self, client=None, tenant_id=None): """Get default secgroup for given tenant_id. :returns: default secgroup for given tenant """ if client is None: client = self.security_groups_client if not tenant_id: tenant_id = client.tenant_id sgs = [ sg for sg in list(client.list_security_groups().values())[0] if sg['tenant_id'] == tenant_id and sg['name'] == 'default' ] msg = "No default security group for tenant %s." % (tenant_id) self.assertGreater(len(sgs), 0, msg) return sgs[0] def _create_security_group_rule(self, secgroup=None, sec_group_rules_client=None, tenant_id=None, security_groups_client=None, **kwargs): """Create a rule from a dictionary of rule parameters. Create a rule in a secgroup. if secgroup not defined will search for default secgroup in tenant_id. :param secgroup: the security group. :param tenant_id: if secgroup not passed -- the tenant in which to search for default secgroup :param kwargs: a dictionary containing rule parameters: for example, to allow incoming ssh: rule = { direction: 'ingress' protocol:'tcp', port_range_min: 22, port_range_max: 22 } """ if sec_group_rules_client is None: sec_group_rules_client = self.security_group_rules_client if security_groups_client is None: security_groups_client = self.security_groups_client if not tenant_id: tenant_id = security_groups_client.tenant_id if secgroup is None: secgroup = self._default_security_group( client=security_groups_client, tenant_id=tenant_id) ruleset = dict(security_group_id=secgroup['id'], tenant_id=secgroup['tenant_id']) ruleset.update(kwargs) sg_rule = sec_group_rules_client.create_security_group_rule(**ruleset) sg_rule = sg_rule['security_group_rule'] self.assertEqual(secgroup['tenant_id'], sg_rule['tenant_id']) self.assertEqual(secgroup['id'], sg_rule['security_group_id']) return sg_rule def _create_loginable_secgroup_rule(self, security_group_rules_client=None, secgroup=None, security_groups_client=None): """Create loginable security group rule This function will create: 1. egress and ingress tcp port 22 allow rule in order to allow ssh access for ipv4. 2. egress and ingress ipv6 icmp allow rule, in order to allow icmpv6. 3. egress and ingress ipv4 icmp allow rule, in order to allow icmpv4. """ if security_group_rules_client is None: security_group_rules_client = self.security_group_rules_client if security_groups_client is None: security_groups_client = self.security_groups_client rules = [] rulesets = [ dict( # ssh protocol='tcp', port_range_min=22, port_range_max=22, ), dict( # ping protocol='icmp', ), dict( # ipv6-icmp for ping6 protocol='icmp', ethertype='IPv6', ) ] sec_group_rules_client = security_group_rules_client for ruleset in rulesets: for r_direction in ['ingress', 'egress']: ruleset['direction'] = r_direction try: sg_rule = self._create_security_group_rule( sec_group_rules_client=sec_group_rules_client, secgroup=secgroup, security_groups_client=security_groups_client, **ruleset) except lib_exc.Conflict as ex: # if rule already exist - skip rule and continue msg = 'Security group rule already exists' if msg not in ex._error_string: raise ex else: self.assertEqual(r_direction, sg_rule['direction']) rules.append(sg_rule) return rules def _get_router(self, client=None, tenant_id=None): """Retrieve a router for the given tenant id. If a public router has been configured, it will be returned. If a public router has not been configured, but a public network has, a tenant router will be created and returned that routes traffic to the public network. """ if not client: client = self.routers_client if not tenant_id: tenant_id = client.tenant_id router_id = CONF.network.public_router_id network_id = CONF.network.public_network_id if router_id: body = client.show_router(router_id) return body['router'] elif network_id: router = self._create_router(client, tenant_id) kwargs = {'external_gateway_info': dict(network_id=network_id)} router = client.update_router(router['id'], **kwargs)['router'] return router else: raise Exception("Neither of 'public_router_id' or " "'public_network_id' has been defined.") def _create_router(self, client=None, tenant_id=None, namestart='router-smoke'): if not client: client = self.routers_client if not tenant_id: tenant_id = client.tenant_id name = data_utils.rand_name(namestart) result = client.create_router(name=name, admin_state_up=True, tenant_id=tenant_id) router = result['router'] self.assertEqual(router['name'], name) self.addCleanup(test_utils.call_and_ignore_notfound_exc, client.delete_router, router['id']) return router def _update_router_admin_state(self, router, admin_state_up): kwargs = dict(admin_state_up=admin_state_up) router = self.routers_client.update_router( router['id'], **kwargs)['router'] self.assertEqual(admin_state_up, router['admin_state_up']) def create_networks(self, networks_client=None, routers_client=None, subnets_client=None, tenant_id=None, dns_nameservers=None, port_security_enabled=True): """Create a network with a subnet connected to a router. The baremetal driver is a special case since all nodes are on the same shared network. :param tenant_id: id of tenant to create resources in. :param dns_nameservers: list of dns servers to send to subnet. :returns: network, subnet, router """ if CONF.network.shared_physical_network: # NOTE(Shrews): This exception is for environments where tenant # credential isolation is available, but network separation is # not (the current baremetal case). Likely can be removed when # test account mgmt is reworked: # https://blueprints.launchpad.net/tempest/+spec/test-accounts if not CONF.compute.fixed_network_name: m = 'fixed_network_name must be specified in config' raise lib_exc.InvalidConfiguration(m) network = self._get_network_by_name( CONF.compute.fixed_network_name) router = None subnet = None else: network = self._create_network( networks_client=networks_client, tenant_id=tenant_id, port_security_enabled=port_security_enabled) router = self._get_router(client=routers_client, tenant_id=tenant_id) subnet_kwargs = dict(network=network, subnets_client=subnets_client, routers_client=routers_client) # use explicit check because empty list is a valid option if dns_nameservers is not None: subnet_kwargs['dns_nameservers'] = dns_nameservers subnet = self._create_subnet(**subnet_kwargs) if not routers_client: routers_client = self.routers_client router_id = router['id'] routers_client.add_router_interface(router_id, subnet_id=subnet['id']) # save a cleanup job to remove this association between # router and subnet self.addCleanup(test_utils.call_and_ignore_notfound_exc, routers_client.remove_router_interface, router_id, subnet_id=subnet['id']) return network, subnet, router class EncryptionScenarioTest(ScenarioTest): """Base class for encryption scenario tests""" credentials = ['primary', 'admin'] @classmethod def setup_clients(cls): super(EncryptionScenarioTest, cls).setup_clients() if CONF.volume_feature_enabled.api_v2: cls.admin_volume_types_client = cls.os_adm.volume_types_v2_client cls.admin_encryption_types_client =\ cls.os_adm.encryption_types_v2_client else: cls.admin_volume_types_client = cls.os_adm.volume_types_client cls.admin_encryption_types_client =\ cls.os_adm.encryption_types_client def create_encryption_type(self, client=None, type_id=None, provider=None, key_size=None, cipher=None, control_location=None): if not client: client = self.admin_encryption_types_client if not type_id: volume_type = self.create_volume_type() type_id = volume_type['id'] LOG.debug("Creating an encryption type for volume type: %s", type_id) client.create_encryption_type( type_id, provider=provider, key_size=key_size, cipher=cipher, control_location=control_location)['encryption'] class ObjectStorageScenarioTest(ScenarioTest): """Provide harness to do Object Storage scenario tests. Subclasses implement the tests that use the methods provided by this class. """ @classmethod def skip_checks(cls): super(ObjectStorageScenarioTest, cls).skip_checks() if not CONF.service_available.swift: skip_msg = ("%s skipped as swift is not available" % cls.__name__) raise cls.skipException(skip_msg) @classmethod def setup_credentials(cls): cls.set_network_resources() super(ObjectStorageScenarioTest, cls).setup_credentials() operator_role = CONF.object_storage.operator_role cls.os_operator = cls.get_client_manager(roles=[operator_role]) @classmethod def setup_clients(cls): super(ObjectStorageScenarioTest, cls).setup_clients() # Clients for Swift cls.account_client = cls.os_operator.account_client cls.container_client = cls.os_operator.container_client cls.object_client = cls.os_operator.object_client def get_swift_stat(self): """get swift status for our user account.""" self.account_client.list_account_containers() LOG.debug('Swift status information obtained successfully') def create_container(self, container_name=None): name = container_name or data_utils.rand_name( 'swift-scenario-container') self.container_client.create_container(name) # look for the container to assure it is created self.list_and_check_container_objects(name) LOG.debug('Container %s created', name) self.addCleanup(test_utils.call_and_ignore_notfound_exc, self.container_client.delete_container, name) return name def delete_container(self, container_name): self.container_client.delete_container(container_name) LOG.debug('Container %s deleted', container_name) def upload_object_to_container(self, container_name, obj_name=None): obj_name = obj_name or data_utils.rand_name('swift-scenario-object') obj_data = data_utils.random_bytes() self.object_client.create_object(container_name, obj_name, obj_data) self.addCleanup(test_utils.call_and_ignore_notfound_exc, self.object_client.delete_object, container_name, obj_name) return obj_name, obj_data def delete_object(self, container_name, filename): self.object_client.delete_object(container_name, filename) self.list_and_check_container_objects(container_name, not_present_obj=[filename]) def list_and_check_container_objects(self, container_name, present_obj=None, not_present_obj=None): # List objects for a given container and assert which are present and # which are not. if present_obj is None: present_obj = [] if not_present_obj is None: not_present_obj = [] _, object_list = self.container_client.list_container_contents( container_name) if present_obj: for obj in present_obj: self.assertIn(obj, object_list) if not_present_obj: for obj in not_present_obj: self.assertNotIn(obj, object_list) def change_container_acl(self, container_name, acl): metadata_param = {'metadata_prefix': 'x-container-', 'metadata': {'read': acl}} self.container_client.update_container_metadata(container_name, **metadata_param) resp, _ = self.container_client.list_container_metadata(container_name) self.assertEqual(resp['x-container-read'], acl) def download_and_verify(self, container_name, obj_name, expected_data): _, obj = self.object_client.get_object(container_name, obj_name) self.assertEqual(obj, expected_data) tap-as-a-service-2.0.0/neutron_taas/tests/tempest_plugin/tests/scenario/test_taas.py000066400000000000000000000023331314312670600307670ustar00rootroot00000000000000# Copyright (c) 2015 Midokura SARL # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import config from tempest.lib import decorators from tempest import test from neutron_taas.tests.tempest_plugin.tests.scenario import base CONF = config.CONF class TestTaaS(base.TaaSScenarioTest): @classmethod def resource_setup(cls): super(TestTaaS, cls).resource_setup() for ext in ['taas']: if not test.is_extension_enabled(ext, 'network'): msg = "%s Extension not enabled." % ext raise cls.skipException(msg) @decorators.idempotent_id('40903cbd-0e3c-464d-b311-dc77d3894e65') def test_dummy(self): pass tap-as-a-service-2.0.0/neutron_taas/tests/tempest_plugin/tests/taas_client.py000066400000000000000000000050611314312670600274640ustar00rootroot00000000000000# Copyright (c) 2015 Midokura SARL # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest import config from tempest.lib.common.utils import data_utils from tempest.lib.common.utils import test_utils from neutron_taas.tests.tempest_plugin.services import client CONF = config.CONF class TaaSClientMixin(object): @classmethod def resource_setup(cls): super(TaaSClientMixin, cls).resource_setup() manager = cls.manager cls.tap_services_client = client.TapServicesClient( manager.auth_provider, CONF.network.catalog_type, CONF.network.region or CONF.identity.region, endpoint_type=CONF.network.endpoint_type, build_interval=CONF.network.build_interval, build_timeout=CONF.network.build_timeout, **manager.default_params) cls.tap_flows_client = client.TapFlowsClient( manager.auth_provider, CONF.network.catalog_type, CONF.network.region or CONF.identity.region, endpoint_type=CONF.network.endpoint_type, build_interval=CONF.network.build_interval, build_timeout=CONF.network.build_timeout, **manager.default_params) def create_tap_service(self, **kwargs): body = self.tap_services_client.create_tap_service( name=data_utils.rand_name("tap_service"), **kwargs) tap_service = body['tap_service'] self.addCleanup(test_utils.call_and_ignore_notfound_exc, self.tap_services_client.delete_tap_service, tap_service['id']) return tap_service def create_tap_flow(self, **kwargs): body = self.tap_flows_client.create_tap_flow( name=data_utils.rand_name("tap_service"), **kwargs) tap_flow = body['tap_flow'] self.addCleanup(test_utils.call_and_ignore_notfound_exc, self.tap_flows_client.delete_tap_flow, tap_flow['id']) return tap_flow tap-as-a-service-2.0.0/neutron_taas/tests/unit/000077500000000000000000000000001314312670600214005ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/tests/unit/__init__.py000066400000000000000000000000001314312670600234770ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/tests/unit/db/000077500000000000000000000000001314312670600217655ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/tests/unit/db/__init__.py000066400000000000000000000000001314312670600240640ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/tests/unit/db/test_migrations.py000066400000000000000000000044311314312670600255540ustar00rootroot00000000000000# Copyright 2016 VMware, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron.db.migration.alembic_migrations import external from neutron.db.migration import cli as migration from neutron.tests.functional.db import test_migrations from neutron.tests.unit import testlib_api from neutron_taas.db import head # EXTERNAL_TABLES should contain all names of tables that are not related to # current repo. EXTERNAL_TABLES = set(external.TABLES) VERSION_TABLE = 'taas_alembic_version' class _TestModelsMigrationsTAAS(test_migrations._TestModelsMigrations): def db_sync(self, engine): cfg.CONF.set_override('connection', engine.url, group='database') for conf in migration.get_alembic_configs(): self.alembic_config = conf self.alembic_config.neutron_config = cfg.CONF migration.do_alembic_command(conf, 'upgrade', 'heads') def get_metadata(self): return head.get_metadata() def include_object(self, object_, name, type_, reflected, compare_to): if type_ == 'table' and (name.startswith('alembic') or name == VERSION_TABLE or name in EXTERNAL_TABLES): return False if type_ == 'index' and reflected and name.startswith("idx_autoinc_"): return False return True class TestModelsMigrationsMysql(testlib_api.MySQLTestCaseMixin, _TestModelsMigrationsTAAS, testlib_api.SqlTestCaseLight): pass class TestModelsMigrationsPostgresql(testlib_api.PostgreSQLTestCaseMixin, _TestModelsMigrationsTAAS, testlib_api.SqlTestCaseLight): pass tap-as-a-service-2.0.0/neutron_taas/tests/unit/db/test_taas_db.py000066400000000000000000000213661314312670600250030ustar00rootroot00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.tests.unit import testlib_api from neutron_lib import context from oslo_utils import importutils from oslo_utils import uuidutils from neutron_taas.db import taas_db from neutron_taas.extensions import taas DB_PLUGIN_KLAAS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' _uuid = uuidutils.generate_uuid class TaaSDbTestCase(testlib_api.SqlTestCase): """Unit test for TaaS DB support.""" def setUp(self): super(TaaSDbTestCase, self).setUp() self.ctx = context.get_admin_context() self.mixin = taas_db.Taas_db_Mixin() self.plugin = importutils.import_object(DB_PLUGIN_KLAAS) self.tenant_id = 'fake-tenant-id' def _get_tap_service_data(self, name='ts-1', port_id=None): port_id = port_id or _uuid() return {"tap_service": {"name": name, "tenant_id": self.tenant_id, "description": "test tap service", "port_id": port_id}} def _get_tap_flow_data(self, tap_service_id, name='tf-1', direction='BOTH', source_port=None): source_port = source_port or _uuid() return {"tap_flow": {"name": name, "tenant_id": self.tenant_id, "description": "test tap flow", "tap_service_id": tap_service_id, "source_port": source_port, "direction": direction}} def _get_tap_service(self, tap_service_id): """Helper method to retrieve tap service.""" with self.ctx.session.begin(subtransactions=True): return self.mixin.get_tap_service(self.ctx, tap_service_id) def _get_tap_services(self): """Helper method to retrieve all tap services.""" with self.ctx.session.begin(subtransactions=True): return self.mixin.get_tap_services(self.ctx) def _create_tap_service(self, tap_service): """Helper method to create tap service.""" with self.ctx.session.begin(subtransactions=True): return self.mixin.create_tap_service(self.ctx, tap_service) def _update_tap_service(self, tap_service_id, tap_service): """Helper method to update tap service.""" with self.ctx.session.begin(subtransactions=True): return self.mixin.update_tap_service(self.ctx, tap_service_id, tap_service) def _delete_tap_service(self, tap_service_id): """Helper method to delete tap service.""" with self.ctx.session.begin(subtransactions=True): return self.mixin.delete_tap_service(self.ctx, tap_service_id) def _get_tap_flow(self, tap_flow_id): """Helper method to retrieve tap flow.""" with self.ctx.session.begin(subtransactions=True): return self.mixin.get_tap_flow(self.ctx, tap_flow_id) def _get_tap_flows(self): """Helper method to retrieve all tap flows.""" with self.ctx.session.begin(subtransactions=True): return self.mixin.get_tap_flows(self.ctx) def _create_tap_flow(self, tap_flow): """Helper method to create tap flow.""" with self.ctx.session.begin(subtransactions=True): return self.mixin.create_tap_flow(self.ctx, tap_flow) def _update_tap_flow(self, tap_flow_id, tap_flow): """Helper method to update tap flow.""" with self.ctx.session.begin(subtransactions=True): return self.mixin.update_tap_flow(self.ctx, tap_flow_id, tap_flow) def _delete_tap_flow(self, tap_flow_id): """Helper method to delete tap flow.""" with self.ctx.session.begin(subtransactions=True): return self.mixin.delete_tap_flow(self.ctx, tap_flow_id) def test_tap_service_get(self): """Test to retrieve a tap service from the database.""" name = 'test-tap-service' data = self._get_tap_service_data(name=name) result = self._create_tap_service(data) get_result = self._get_tap_service(result['id']) self.assertEqual(name, get_result['name']) def test_tap_service_create(self): """Test to create a tap service in the database.""" name = 'test-tap-service' port_id = _uuid() data = self._get_tap_service_data(name=name, port_id=port_id) result = self._create_tap_service(data) self.assertEqual(name, result['name']) self.assertEqual(port_id, result['port_id']) def test_tap_service_list(self): """Test to retrieve all tap services from the database.""" name_1 = "ts-1" data_1 = self._get_tap_service_data(name=name_1) name_2 = "ts-2" data_2 = self._get_tap_service_data(name=name_2) self._create_tap_service(data_1) self._create_tap_service(data_2) tap_services = self._get_tap_services() self.assertEqual(2, len(tap_services)) def test_tap_service_update(self): """Test to update a tap service in the database.""" original_name = "ts-1" updated_name = "ts-1-got-updated" data = self._get_tap_service_data(name=original_name) ts = self._create_tap_service(data) updated_data = self._get_tap_service_data(name=updated_name) ts_updated = self._update_tap_service(ts['id'], updated_data) self.assertEqual(updated_name, ts_updated['name']) def test_tap_service_delete(self): """Test to delete a tap service from the database.""" data = self._get_tap_service_data() result = self._create_tap_service(data) self._delete_tap_service(result['id']) self.assertRaises(taas.TapServiceNotFound, self._get_tap_service, result['id']) def test_tap_flow_get(self): """Test to retrieve a tap flow from the database.""" ts_data = self._get_tap_service_data() ts = self._create_tap_service(ts_data) tf_name = 'test-tap-flow' tf_data = self._get_tap_flow_data(tap_service_id=ts['id'], name=tf_name) tf = self._create_tap_flow(tf_data) get_tf = self._get_tap_flow(tf['id']) self.assertEqual(tf_name, get_tf['name']) def test_tap_flow_create(self): """Test to create a tap flow in the database.""" ts_data = self._get_tap_service_data() ts = self._create_tap_service(ts_data) tf_name = 'test-tap-flow' tf_direction = 'IN' tf_source_port = _uuid() tf_data = self._get_tap_flow_data(tap_service_id=ts['id'], name=tf_name, source_port=tf_source_port, direction=tf_direction) tf = self._create_tap_flow(tf_data) self.assertEqual(tf_name, tf['name']) self.assertEqual(tf_direction, tf['direction']) self.assertEqual(tf_source_port, tf['source_port']) def test_tap_flow_list(self): """Test to retrieve all tap flows from the database.""" ts_data = self._get_tap_service_data() ts = self._create_tap_service(ts_data) tf_1_name = "tf-1" tf_1_data = self._get_tap_flow_data(tap_service_id=ts['id'], name=tf_1_name) tf_2_name = "tf-2" tf_2_data = self._get_tap_flow_data(tap_service_id=ts['id'], name=tf_2_name) self._create_tap_flow(tf_1_data) self._create_tap_flow(tf_2_data) tap_flows = self._get_tap_flows() self.assertEqual(2, len(tap_flows)) def test_tap_flow_delete(self): """Test to delete a tap flow from the database.""" ts_data = self._get_tap_service_data() ts = self._create_tap_service(ts_data) tf_name = "test-tap-flow" tf_data = self._get_tap_flow_data(tap_service_id=ts['id'], name=tf_name) tf = self._create_tap_flow(tf_data) self._delete_tap_flow(tf['id']) self.assertRaises(taas.TapFlowNotFound, self._get_tap_flow, tf['id']) tap-as-a-service-2.0.0/neutron_taas/tests/unit/extensions/000077500000000000000000000000001314312670600235775ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/tests/unit/extensions/__init__.py000066400000000000000000000000001314312670600256760ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/tests/unit/extensions/test_taas.py000066400000000000000000000072771314312670600261550ustar00rootroot00000000000000# Copyright 2017 FUJITSU LABORATORIES LTD. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock from webob import exc from oslo_utils import uuidutils from neutron.tests.unit.api.v2 import test_base as test_api_v2 from neutron.tests.unit.extensions import base as test_api_v2_extension from neutron_taas.extensions import taas as taas_ext _uuid = uuidutils.generate_uuid _get_path = test_api_v2._get_path TAP_SERVICE_PATH = 'taas/tap_services' TAP_FLOW_PATH = 'taas/tap_flows' class TaasExtensionTestCase(test_api_v2_extension.ExtensionTestCase): fmt = 'json' def setUp(self): super(TaasExtensionTestCase, self).setUp() self._setUpExtension( 'neutron_taas.extensions.taas.TaasPluginBase', 'TAAS', taas_ext.RESOURCE_ATTRIBUTE_MAP, taas_ext.Taas, 'taas', plural_mappings={} ) def test_create_tap_service(self): tenant_id = _uuid() tap_service_data = { 'tenant_id': tenant_id, 'name': 'MyTap', 'description': 'This is my tap service', 'port_id': _uuid(), 'project_id': tenant_id, } data = {'tap_service': tap_service_data} expected_ret_val = copy.copy(data['tap_service']) expected_ret_val.update({'id': _uuid()}) instance = self.plugin.return_value instance.create_tap_service.return_value = expected_ret_val res = self.api.post(_get_path(TAP_SERVICE_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) instance.create_tap_service.assert_called_with( mock.ANY, tap_service=data) self.assertEqual(exc.HTTPCreated.code, res.status_int) res = self.deserialize(res) self.assertIn('tap_service', res) self.assertEqual(expected_ret_val, res['tap_service']) def test_delete_tap_service(self): self._test_entity_delete('tap_service') def test_create_tap_flow(self): tenant_id = _uuid() tap_flow_data = { 'tenant_id': tenant_id, 'name': 'MyTapFlow', 'description': 'This is my tap flow', 'direction': 'BOTH', 'tap_service_id': _uuid(), 'source_port': _uuid(), 'project_id': tenant_id, } data = {'tap_flow': tap_flow_data} expected_ret_val = copy.copy(data['tap_flow']) expected_ret_val.update({'id': _uuid()}) instance = self.plugin.return_value instance.create_tap_flow.return_value = expected_ret_val res = self.api.post(_get_path(TAP_FLOW_PATH, fmt=self.fmt), self.serialize(data), content_type='application/%s' % self.fmt) instance.create_tap_flow.assert_called_with( mock.ANY, tap_flow=data) self.assertEqual(exc.HTTPCreated.code, res.status_int) res = self.deserialize(res) self.assertIn('tap_flow', res) self.assertEqual(expected_ret_val, res['tap_flow']) def test_delete_tap_flow(self): self._test_entity_delete('tap_flow') tap-as-a-service-2.0.0/neutron_taas/tests/unit/services/000077500000000000000000000000001314312670600232235ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/tests/unit/services/__init__.py000066400000000000000000000000001314312670600253220ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/tests/unit/services/taas/000077500000000000000000000000001314312670600241535ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/tests/unit/services/taas/__init__.py000066400000000000000000000000001314312670600262520ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/tests/unit/services/taas/test_taas_plugin.py000066400000000000000000000303141314312670600300730ustar00rootroot00000000000000# Copyright (C) 2015 Midokura SARL. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import mock import testtools from neutron_lib import context from neutron_lib.utils import net as n_utils from oslo_config import cfg from oslo_utils import uuidutils import neutron.common.rpc as n_rpc from neutron.tests.unit import testlib_api import neutron_taas.db.taas_db # noqa import neutron_taas.extensions.taas as taas_ext from neutron_taas.services.taas.service_drivers import taas_agent_api from neutron_taas.services.taas import taas_plugin class DummyError(Exception): pass class TestTaasPlugin(testlib_api.SqlTestCase): def setUp(self): super(TestTaasPlugin, self).setUp() mock.patch.object(n_rpc, 'create_connection', auto_spec=True).start() mock.patch.object(taas_agent_api, 'TaasCallbacks', auto_spec=True).start() mock.patch.object(taas_agent_api, 'TaasAgentApi', auto_spec=True).start() self.driver = mock.MagicMock() mock.patch('neutron.services.service_base.load_drivers', return_value=({'dummy_provider': self.driver}, 'dummy_provider')).start() mock.patch('neutron.db.servicetype_db.ServiceTypeManager.get_instance', return_value=mock.MagicMock()).start() self._plugin = taas_plugin.TaasPlugin() self._context = context.get_admin_context() self._project_id = self._tenant_id = 'tenant-X' self._network_id = uuidutils.generate_uuid() self._host_id = 'host-A' self._port_id = uuidutils.generate_uuid() self._port_details = { 'tenant_id': self._tenant_id, 'binding:host_id': self._host_id, 'mac_address': n_utils.get_random_mac( 'fa:16:3e:00:00:00'.split(':')), } self._tap_service = { 'tenant_id': self._tenant_id, 'name': 'MyTap', 'description': 'This is my tap service', 'port_id': self._port_id, 'project_id': self._project_id, } self._tap_flow = { 'description': 'This is my tap flow', 'direction': 'BOTH', 'name': 'MyTapFlow', 'source_port': self._port_id, 'tenant_id': self._tenant_id, 'project_id': self._project_id, } @contextlib.contextmanager def tap_service(self): req = { 'tap_service': self._tap_service, } with mock.patch.object(self._plugin, '_get_port_details', return_value=self._port_details): yield self._plugin.create_tap_service(self._context, req) self._tap_service['id'] = mock.ANY self._tap_service['status'] = 'ACTIVE' self.driver.assert_has_calls([ mock.call.create_tap_service_precommit(mock.ANY), mock.call.create_tap_service_postcommit(mock.ANY), ]) pre_args = self.driver.create_tap_service_precommit.call_args[0][0] self.assertEqual(self._context, pre_args._plugin_context) self.assertEqual(self._tap_service, pre_args.tap_service) post_args = self.driver.create_tap_service_postcommit.call_args[0][0] self.assertEqual(self._context, post_args._plugin_context) self.assertEqual(self._tap_service, post_args.tap_service) @contextlib.contextmanager def tap_flow(self, tap_service, tenant_id=None): self._tap_flow['tap_service_id'] = tap_service if tenant_id is not None: self._tap_flow['tenant_id'] = tenant_id req = { 'tap_flow': self._tap_flow, } with mock.patch.object(self._plugin, '_get_port_details', return_value=self._port_details): yield self._plugin.create_tap_flow(self._context, req) self._tap_flow['id'] = mock.ANY self._tap_flow['status'] = 'ACTIVE' self._tap_service['id'] = mock.ANY self.driver.assert_has_calls([ mock.call.create_tap_flow_precommit(mock.ANY), mock.call.create_tap_flow_postcommit(mock.ANY), ]) pre_args = self.driver.create_tap_flow_precommit.call_args[0][0] self.assertEqual(self._context, pre_args._plugin_context) self.assertEqual(self._tap_flow, pre_args.tap_flow) post_args = self.driver.create_tap_flow_postcommit.call_args[0][0] self.assertEqual(self._context, post_args._plugin_context) self.assertEqual(self._tap_flow, post_args.tap_flow) def test_create_tap_service(self): with self.tap_service(): pass def test_verify_taas_id_reused(self): # make small range id cfg.CONF.set_override("vlan_range_start", 1, group="taas") cfg.CONF.set_override("vlan_range_end", 3, group="taas") with self.tap_service() as ts_1, self.tap_service() as ts_2, \ self.tap_service() as ts_3, self.tap_service() as ts_4: ts_id_1 = ts_1['id'] ts_id_2 = ts_2['id'] ts_id_3 = ts_3['id'] tap_id_assoc_1 = self._plugin.create_tap_id_association( self._context, ts_id_1) tap_id_assoc_2 = self._plugin.create_tap_id_association( self._context, ts_id_2) self.assertEqual(set([1, 2]), set([tap_id_assoc_1['taas_id'], tap_id_assoc_2['taas_id']])) with testtools.ExpectedException(taas_ext.TapServiceLimitReached): self._plugin.create_tap_id_association( self._context, ts_4['id'] ) # free an tap_id and verify could reallocate same taas id self._plugin.delete_tap_service(self._context, ts_id_1) tap_id_assoc_3 = self._plugin.create_tap_id_association( self._context, ts_id_3) self.assertEqual(set([1, 2]), set([tap_id_assoc_3['taas_id'], tap_id_assoc_2['taas_id']])) def test_create_tap_service_wrong_tenant_id(self): self._port_details['tenant_id'] = 'other-tenant' with testtools.ExpectedException(taas_ext.PortDoesNotBelongToTenant), \ self.tap_service(): pass self.assertEqual([], self.driver.mock_calls) def test_create_tap_service_reach_limit(self): # TODO(Yoichiro):Need to move this test to taas_rpc test pass def test_create_tap_service_failed_on_service_driver(self): attr = {'create_tap_service_postcommit.side_effect': DummyError} self.driver.configure_mock(**attr) with testtools.ExpectedException(DummyError): req = { 'tap_service': self._tap_service, } with mock.patch.object(self._plugin, '_get_port_details', return_value=self._port_details): self._plugin.create_tap_service(self._context, req) def test_delete_tap_service(self): with self.tap_service() as ts: self._plugin.delete_tap_service(self._context, ts['id']) self.driver.assert_has_calls([ mock.call.delete_tap_service_precommit(mock.ANY), mock.call.delete_tap_service_postcommit(mock.ANY), ]) pre_args = self.driver.delete_tap_service_precommit.call_args[0][0] self.assertEqual(self._context, pre_args._plugin_context) self.assertEqual(self._tap_service, pre_args.tap_service) post_args = self.driver.delete_tap_service_postcommit.call_args[0][0] self.assertEqual(self._context, post_args._plugin_context) self.assertEqual(self._tap_service, post_args.tap_service) def test_delete_tap_service_with_flow(self): with self.tap_service() as ts, \ self.tap_flow(tap_service=ts['id']): self._plugin.delete_tap_service(self._context, ts['id']) self.driver.assert_has_calls([ mock.call.delete_tap_flow_precommit(mock.ANY), mock.call.delete_tap_flow_postcommit(mock.ANY), mock.call.delete_tap_service_precommit(mock.ANY), mock.call.delete_tap_service_postcommit(mock.ANY), ]) pre_args = self.driver.delete_tap_flow_precommit.call_args[0][0] self.assertEqual(self._context, pre_args._plugin_context) self.assertEqual(self._tap_flow, pre_args.tap_flow) post_args = self.driver.delete_tap_flow_postcommit.call_args[0][0] self.assertEqual(self._context, post_args._plugin_context) self.assertEqual(self._tap_flow, post_args.tap_flow) pre_args = self.driver.delete_tap_service_precommit.call_args[0][0] self.assertEqual(self._context, pre_args._plugin_context) self.assertEqual(self._tap_service, pre_args.tap_service) post_args = self.driver.delete_tap_service_postcommit.call_args[0][0] self.assertEqual(self._context, post_args._plugin_context) self.assertEqual(self._tap_service, post_args.tap_service) def test_delete_tap_service_non_existent(self): with testtools.ExpectedException(taas_ext.TapServiceNotFound): self._plugin.delete_tap_service(self._context, 'non-existent') def test_delete_tap_service_failed_on_service_driver(self): attr = {'delete_tap_service_postcommit.side_effect': DummyError} self.driver.configure_mock(**attr) with self.tap_service() as ts: with testtools.ExpectedException(DummyError): self._plugin.delete_tap_service(self._context, ts['id']) def test_create_tap_flow(self): with self.tap_service() as ts, self.tap_flow(tap_service=ts['id']): pass def test_create_tap_flow_wrong_tenant_id(self): with self.tap_service() as ts, \ testtools.ExpectedException(taas_ext.TapServiceNotBelongToTenant), \ self.tap_flow(tap_service=ts['id'], tenant_id='other-tenant'): pass def test_create_tap_flow_failed_on_service_driver(self): with self.tap_service() as ts: attr = {'create_tap_flow_postcommit.side_effect': DummyError} self.driver.configure_mock(**attr) with testtools.ExpectedException(DummyError): self._tap_flow['tap_service_id'] = ts['id'] req = { 'tap_flow': self._tap_flow, } with mock.patch.object(self._plugin, '_get_port_details', return_value=self._port_details): self._plugin.create_tap_flow(self._context, req) def test_delete_tap_flow(self): with self.tap_service() as ts, \ self.tap_flow(tap_service=ts['id']) as tf: self._plugin.delete_tap_flow(self._context, tf['id']) self._tap_flow['id'] = tf['id'] self.driver.assert_has_calls([ mock.call.delete_tap_flow_precommit(mock.ANY), mock.call.delete_tap_flow_postcommit(mock.ANY), ]) pre_args = self.driver.delete_tap_flow_precommit.call_args[0][0] self.assertEqual(self._context, pre_args._plugin_context) self.assertEqual(self._tap_flow, pre_args.tap_flow) post_args = self.driver.delete_tap_flow_postcommit.call_args[0][0] self.assertEqual(self._context, post_args._plugin_context) self.assertEqual(self._tap_flow, post_args.tap_flow) def test_delete_tap_flow_failed_on_service_driver(self): with self.tap_service() as ts, \ self.tap_flow(tap_service=ts['id']) as tf: attr = {'delete_tap_flow_postcommit.side_effect': DummyError} self.driver.configure_mock(**attr) with testtools.ExpectedException(DummyError): self._plugin.delete_tap_flow(self._context, tf['id']) tap-as-a-service-2.0.0/neutron_taas/tests/unit/taas_client/000077500000000000000000000000001314312670600236665ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/tests/unit/taas_client/__init__.py000066400000000000000000000000001314312670600257650ustar00rootroot00000000000000tap-as-a-service-2.0.0/neutron_taas/tests/unit/taas_client/test_cli20_tapflow.py000066400000000000000000000111231314312670600277420ustar00rootroot00000000000000# Copyright 2015 NEC Corporation # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import mock import sys from neutron_taas.taas_client import tapflow from neutronclient import shell from neutronclient.tests.unit import test_cli20 class CLITestV20TapFlowJSON(test_cli20.CLITestV20Base): resource = 'tap_flow' resource_plural = '%ss' % resource def setUp(self): self._mock_extension_loading() super(CLITestV20TapFlowJSON, self).setUp() self.resources = self.resource_plural self.register_non_admin_status_resource(self.resource) def _create_patch(self, name, func=None): patcher = mock.patch(name) thing = patcher.start() return thing def _mock_extension_loading(self): ext_pkg = 'neutronclient.common.extension' contrib = self._create_patch(ext_pkg + '._discover_via_entry_points') contrib.return_value = [("_tap_flow", tapflow)] return contrib def test_ext_cmd_loaded(self): shell.NeutronShell('2.0') extension_cmd = {'tap-flow-create': tapflow.CreateTapFlow, 'tap-flow-delete': tapflow.DeleteTapFlow, 'tap-flow-show': tapflow.ShowTapFlow, 'tap-flow-list': tapflow.ListTapFlow} self.assertDictContainsSubset(extension_cmd, shell.COMMANDS['2.0']) def _test_create_tap_flow(self, port_id="random_port", service_id="random_service", direction="BOTH", arg_attr=None, name_attr=None, val_attr=None, name=''): # Common definition for creating Tap flow arg_attr = arg_attr or [] name_attr = name_attr or [] val_attr = val_attr or [] cmd = tapflow.CreateTapFlow(test_cli20.MyApp(sys.stdout), None) tenant_id = 'my-tenant' my_id = 'my-id' args = ['--tenant-id', tenant_id, '--port', port_id, '--tap-service', service_id, '--direction', direction] + arg_attr pos_names = ['source_port', 'tap_service_id', 'direction'] + name_attr pos_values = [port_id, service_id, direction] + val_attr self._test_create_resource(self.resource, cmd, name, my_id, args, pos_names, pos_values, tenant_id=tenant_id) def test_create_tap_flow_mandatory_params(self): self._test_create_tap_flow() def test_create_tap_flow_all_params(self): name = 'dummyTapFlow' description = 'Create a dummy tap flow' self._test_create_tap_flow(name=name, arg_attr=[ '--name', name, '--description', description], name_attr=['name', 'description'], val_attr=[name, description]) def test_delete_tap_flow(self): # Delete tap_flow: myid. cmd = tapflow.DeleteTapFlow(test_cli20.MyApp(sys.stdout), None) myid = 'myid' args = [myid] self._test_delete_resource(self.resource, cmd, myid, args) def test_update_tap_flow(self): # Update tap_flow: myid --name myname. cmd = tapflow.UpdateTapFlow(test_cli20.MyApp(sys.stdout), None) self._test_update_resource(self.resource, cmd, 'myid', ['myid', '--name', 'myname'], {'name': 'myname'}) def test_list_tap_flows(self): # List tap_flows. cmd = tapflow.ListTapFlow(test_cli20.MyApp(sys.stdout), None) self._test_list_resources(self.resources, cmd, True) def test_show_tap_flow(self): # Show tap_flow: --fields id --fields name myid. cmd = tapflow.ShowTapFlow(test_cli20.MyApp(sys.stdout), None) args = ['--fields', 'id', '--fields', 'name', self.test_id] self._test_show_resource(self.resource, cmd, self.test_id, args, ['id', 'name']) tap-as-a-service-2.0.0/neutron_taas/tests/unit/taas_client/test_cli20_tapservice.py000066400000000000000000000113741314312670600304430ustar00rootroot00000000000000# Copyright 2015 NEC Corporation # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import mock import sys from neutron_taas.taas_client import tapservice from neutronclient import shell from neutronclient.tests.unit import test_cli20 class CLITestV20TapServiceJSON(test_cli20.CLITestV20Base): resource = 'tap_service' resource_plural = '%ss' % resource def setUp(self): self._mock_extension_loading() super(CLITestV20TapServiceJSON, self).setUp() self.resources = self.resource_plural self.register_non_admin_status_resource(self.resource) def _create_patch(self, name, func=None): patcher = mock.patch(name) thing = patcher.start() return thing def _mock_extension_loading(self): ext_pkg = 'neutronclient.common.extension' contrib = self._create_patch(ext_pkg + '._discover_via_entry_points') contrib.return_value = [("_tap_service", tapservice)] return contrib def test_ext_cmd_loaded(self): shell.NeutronShell('2.0') extension_cmd = {'tap-service-create': tapservice.CreateTapService, 'tap-service-delete': tapservice.DeleteTapService, 'tap-service-show': tapservice.ShowTapService, 'tap-service-list': tapservice.ListTapService} self.assertDictContainsSubset(extension_cmd, shell.COMMANDS['2.0']) def _test_create_tap_service(self, port_id="random_port", name='', args_attr=None, position_names_attr=None, position_values_attr=None): cmd = tapservice.CreateTapService(test_cli20.MyApp(sys.stdout), None) args_attr = args_attr or [] position_names_attr = position_names_attr or [] position_values_attr = position_values_attr or [] name = name tenant_id = 'my-tenant' my_id = 'my-id' args = ['--tenant-id', tenant_id, '--port', port_id] + args_attr position_names = ['port_id'] + position_names_attr position_values = [port_id] + position_values_attr self._test_create_resource(self.resource, cmd, name, my_id, args, position_names, position_values, tenant_id=tenant_id) def test_create_tap_service_mandatory_params(self): # Create tap_service: --port random_port self._test_create_tap_service() def test_create_tap_service_all_params(self): # Create tap_service with mandatory params, --name and --description name = 'new-tap-service' description = 'This defines a new tap-service' args_attr = ['--name', name, '--description', description] position_names_attr = ['name', 'description'] position_val_attr = [name, description] self._test_create_tap_service(name=name, args_attr=args_attr, position_names_attr=position_names_attr, position_values_attr=position_val_attr) def test_delete_tap_service(self): # Delete tap_service: myid. cmd = tapservice.DeleteTapService(test_cli20.MyApp(sys.stdout), None) myid = 'myid' args = [myid] self._test_delete_resource(self.resource, cmd, myid, args) def test_update_tap_service(self): # Update tap_service: myid --name myname. cmd = tapservice.UpdateTapService(test_cli20.MyApp(sys.stdout), None) self._test_update_resource(self.resource, cmd, 'myid', ['myid', '--name', 'myname'], {'name': 'myname'}) def test_list_tap_services(self): # List tap_services. cmd = tapservice.ListTapService(test_cli20.MyApp(sys.stdout), None) self._test_list_resources(self.resources, cmd, True) def test_show_tap_service(self): # Show tap_service: --fields id --fields name myid. cmd = tapservice.ShowTapService(test_cli20.MyApp(sys.stdout), None) args = ['--fields', 'id', '--fields', 'name', self.test_id] self._test_show_resource(self.resource, cmd, self.test_id, args, ['id', 'name']) tap-as-a-service-2.0.0/openstack-common.conf000066400000000000000000000002131314312670600206750ustar00rootroot00000000000000[DEFAULT] # The list of modules to copy from oslo-incubator.git # The base module to hold the copy of openstack.common base=neutron_taas tap-as-a-service-2.0.0/requirements.txt000066400000000000000000000004161314312670600200420ustar00rootroot00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. pbr!=2.1.0,>=2.0.0 # Apache-2.0 Babel!=2.4.0,>=2.3.4 # BSD tap-as-a-service-2.0.0/setup.cfg000066400000000000000000000036711314312670600164050ustar00rootroot00000000000000[metadata] name = tap-as-a-service summary = Tap-as-a-Service (TaaS) is an extension to the OpenStack network service (Neutron), it provides remote port mirroring capability for tenant virtual networks. description-file = README.rst author = OpenStack author-email = openstack-dev@lists.openstack.org home-page = http://www.openstack.org/ classifier = Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: 2 Programming Language :: Python :: 2.7 Programming Language :: Python :: 3 Programming Language :: Python :: 3.5 [files] packages = neutron_taas [build_sphinx] source-dir = doc/source build-dir = doc/build all_files = 1 [upload_sphinx] upload-dir = doc/build/html [compile_catalog] directory = neutron_taas/locale domain = neutron_taas [update_catalog] domain = neutron_taas output_dir = neutron_taas/locale input_file = neutron_taas/locale/neutron_taas.pot [extract_messages] keywords = _ gettext ngettext l_ lazy_gettext mapping_file = babel.cfg output_file = neutron_taas/locale/neutron_taas.pot [entry_points] neutron.agent.l2.extensions = taas = neutron_taas.services.taas.agents.extensions.taas:TaasAgentExtension neutron_taas.taas.agent_drivers = ovs = neutron_taas.services.taas.drivers.linux.ovs_taas:OvsTaasDriver neutron.service_plugins = taas = neutron_taas.services.taas.taas_plugin:TaasPlugin neutron.db.alembic_migrations = tap-as-a-service = neutron_taas.db.migration:alembic_migration tempest.test_plugins = tap-as-a-service = neutron_taas.tests.tempest_plugin.plugin:NeutronTaaSPlugin neutronclient.extension = tap_service = neutron_taas.taas_client.tapservice tap_flow = neutron_taas.taas_client.tapflow [pbr] autodoc_index_modules = True warnerrors = True tap-as-a-service-2.0.0/setup.py000066400000000000000000000020061314312670600162650ustar00rootroot00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT import setuptools # In python < 2.7.4, a lazy loading of package `pbr` will break # setuptools if some other modules registered functions in `atexit`. # solution from: http://bugs.python.org/issue15881#msg170215 try: import multiprocessing # noqa except ImportError: pass setuptools.setup( setup_requires=['pbr>=2.0.0'], pbr=True) tap-as-a-service-2.0.0/specs/000077500000000000000000000000001314312670600156725ustar00rootroot00000000000000tap-as-a-service-2.0.0/specs/index.rst000066400000000000000000000002541314312670600175340ustar00rootroot00000000000000.. tap-as-a-service specs documentation index ============== Specifications ============== Mitaka specs ============ .. toctree:: :glob: :maxdepth: 1 mitaka/* tap-as-a-service-2.0.0/specs/mitaka/000077500000000000000000000000001314312670600171405ustar00rootroot00000000000000tap-as-a-service-2.0.0/specs/mitaka/tap-as-a-service.rst000066400000000000000000000510371314312670600227410ustar00rootroot00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode ============================ Tap-as-a-Service for Neutron ============================ Launchpad blueprint: https://blueprints.launchpad.net/neutron/+spec/tap-as-a-service This spec explains an extension for the port mirroring functionality. Port mirroring involves sending a copy of packets ingressing and/or egressing one port (where ingress means entering a VM and egress means leaving a VM) to another port, (usually different from the packet's original destination). A port could be attached to a VM or a networking resource like router. While the blueprint describes the functionality of mirroring Neutron ports as an extension to the port object, the spec proposes to offer port mirroring as a service, which will enable more advanced use-cases (e.g. intrusion detection) to be deployed. The proposed port mirroring capability shall be introduced in Neutron as a service called "Tap-as-a-Service". Problem description =================== Neutron currently does not support the functionality of port mirroring for tenant networks. This feature will greatly benefit tenants and admins, who want to debug their virtual networks and gain visibility into their VMs by monitoring and analyzing the network traffic associated with them (e.g. IDS). This spec focuses on mirroring traffic from one Neutron port to another; future versions may address mirroring from a Neutron port to an arbitrary interface (not managed by Neutron) on a compute host or the network controller. Different usage scenarios for the service are listed below: 1. Tapping/mirroring network traffic ingressing and/or egressing a particular Neutron port. 2. Tapping/mirroring all network traffic on a tenant network. 3. Tenant or admin will be able to do tap/traffic mirroring based on a policy rule and set destination as a Neutron port, which can be linked to a virtual machine as normal Nova operations or to a physical machine via l2-gateway functionality. 4. Admin will be able to do packet level network debugging for the virtual network. 5. Provide a way for real time analytics based on different criteria, like tenants, ports, traffic types (policy) etc. Note that some of the above use-cases are not covered by this proposal, at least for the first step. Proposed change =============== The proposal is to introduce a new Neutron service plugin, called "Tap-as-a-Service", which provides tapping (port-mirroring) capability for Neutron networks; tenant or provider networks. This service will be modeled similar to other Neutron services such as the firewall, load-balancer, L3-router etc. The proposed service will allow the tenants to create a tap service instance to which they can add Neutron ports that need to be mirrored by creating tap flows. The tap service itself will be a Neutron port, which will be the destination port for the mirrored traffic. The destination Tap-as-a-Service Neutron port should be created beforehand on a network owned by the tenant who is requesting the service. The ports to be mirrored that are added to the service must be owned by the same tenant who created the tap service instance. Even on a shared network, a tenant will only be allowed to mirror the traffic from ports that they own on the shared network and not traffic from ports that they do not own on the shared network. The ports owned by the tenant that are mirrored can be on networks other than the network on which tap service port is created. This allows the tenant to mirror traffic from any port it owns on a network on to the same Tap-as-a-Service Neutron port. The tenant can launch a VM specifying the tap destination port for the VM interface (--nic port-id=tap_port_uuid), thus receiving mirrored traffic for further processing (dependent on use case) on that VM. The following would be the work flow for using this service from a tenant's point of view 0. Create a Neutron port which will be used as the destination port. This can be a part of ordinary VM launch. 1. Create a tap service instance, specifying the Neutron port. 2. If you haven't yet, launch a monitoring or traffic analysis VM and connect it to the destination port for the tap service instance. 3. Associate Neutron ports with a tap service instance if/when they need to be monitored. 4. Disassociate Neutron ports from a tap service instance if/when they no longer need to be monitored. 5. Destroy a tap-service instance when it is no longer needed. 6. Delete the destination port when it is no longer neeeded. Please note that the normal work flow of launching a VM is not affected while using TaaS. Alternatives ------------ As an alternative to introducing port mirroring functionality under Neutron services, it could be added as an extension to the existing Neutron v2 APIs. Data model impact ----------------- Tap-as-a-Service introduces the following data models into Neutron as database schemas. 1. tap_service +-------------+--------+----------+-----------+---------------+-------------------------+ | Attribute | Type | Access | Default | Validation/ | Description | | Name | | (CRUD) | Value | Conversion | | +=============+========+==========+===========+===============+=========================+ | id | UUID | R, all | generated | N/A | UUID of the tap | | | | | | | service instance. | +-------------+--------+----------+-----------+---------------+-------------------------+ | project_id | String | CR, all | Requester | N/A | ID of the | | | | | | | project creating | | | | | | | the service | +-------------+--------+----------+-----------+---------------+-------------------------+ | name | String | CRU, all | Empty | N/A | Name for the service | | | | | | | instance. | +-------------+--------+----------+-----------+---------------+-------------------------+ | description | String | CRU, all | Empty | N/A | Description of the | | | | | | | service instance. | +-------------+--------+----------+-----------+---------------+-------------------------+ | port_id | UUID | CR, all | N/A | UUID of a | An existing Neutron port| | | | | | valid Neutron | to which traffic will | | | | | | port | be mirrored | +-------------+--------+----------+-----------+---------------+-------------------------+ | status | String | R, all | N/A | N/A | The operation status of | | | | | | | the resource | | | | | | | (ACTIVE, PENDING_foo, | | | | | | | ERROR, ...) | +-------------+--------+----------+-----------+---------------+-------------------------+ 2. tap_flow +----------------+--------+----------+-----------+---------------+-------------------------+ | Attribute | Type | Access | Default | Validation/ | Description | | Name | | (CRUD) | Value | Conversion | | +================+========+==========+===========+===============+=========================+ | id | UUID | R, all | generated | N/A | UUID of the | | | | | | | tap flow instance. | +----------------+--------+----------+-----------+---------------+-------------------------+ | name | String | CRU, all | Empty | N/A | Name for the tap flow | | | | | | | instance. | +----------------+--------+----------+-----------+---------------+-------------------------+ | description | String | CRU, all | Empty | N/A | Description of the | | | | | | | tap flow instance. | +----------------+--------+----------+-----------+---------------+-------------------------+ | tap_service_id | UUID | CR, all | N/A | Valid tap | UUID of the tap | | | | | | service UUID | service instance. | +----------------+--------+----------+-----------+---------------+-------------------------+ | source_port | UUID | CR, all | N/A | UUID of a | UUID of the Neutron | | | | | | valid Neutron | port that needed to be | | | | | | port | mirrored | +----------------+--------+----------+-----------+---------------+-------------------------+ | direction | ENUM | CR, all | BOTH | | Whether to mirror the | | | (IN, | | | | traffic leaving or | | | OUT, | | | | arriving at the | | | BOTH) | | | | source port | | | | | | | IN: Network -> VM | | | | | | | OUT: VM -> Network | +----------------+--------+----------+-----------+---------------+-------------------------+ | status | String | R, all | N/A | N/A | The operation status of | | | | | | | the resource | | | | | | | (ACTIVE, PENDING_foo, | | | | | | | ERROR, ...) | +----------------+--------+----------+-----------+---------------+-------------------------+ REST API impact --------------- Tap-as-a-Service shall be offered over the RESTFull API interface under the following namespace: http://wiki.openstack.org/Neutron/TaaS/API_1.0 The resource attribute map for TaaS is provided below: .. code-block:: python direction_enum = ['IN', 'OUT', 'BOTH'] RESOURCE_ATTRIBUTE_MAP = { 'tap_service': { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True}, 'project_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:string': None}, 'required_by_policy': True, 'is_visible': True}, 'name': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'is_visible': True, 'default': ''}, 'description': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'is_visible': True, 'default': ''}, 'port_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True}, 'status': {'allow_post': False, 'allow_put': False, 'is_visible': True}, }, 'tap_flow': { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True}, 'name': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'is_visible': True, 'default': ''}, 'description': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'is_visible': True, 'default': ''}, 'tap_service_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:uuid': None}, 'required_by_policy': True, 'is_visible': True}, 'source_port': {'allow_post': True, 'allow_put': False, 'validate': {'type:uuid': None}, 'required_by_policy': True, 'is_visible': True}, 'direction': {'allow_post': True, 'allow_put': False, 'validate': {'type:string': direction_enum}, 'is_visible': True}, 'status': {'allow_post': False, 'allow_put': False, 'is_visible': True}, } } Security impact --------------- A TaaS instance comprises a collection of source Neutron ports (whose ingress and/or egress traffic are being mirrored) and a destination Neutron port (where the mirrored traffic is received). Security Groups will be handled differently for these two classes of ports, as described below: Destination Side: Ingress Security Group filters, including the filter that prevents MAC-address spoofing, will be disabled for the destination Neutron port. This will ensure that all of the mirrored packets received at this port are able to reach the monitoring VM attached to it. Source Side: Ideally it would be nice to mirror all packets entering and/or leaving the virtual NICs associated with the VMs that are being monitored. This means capturing ingress traffic after it passes the inbound Security Group filters and capturing egress traffic before it passes the outbound Security Group filters. However, due to the manner in which Security Groups are currently implemented in OpenStack (i.e. north of the Open vSwitch ports, using Linux IP Tables) this is not possible because port mirroring support resides inside Open vSwitch. Therefore, in the first version of TaaS, Security Groups will be ignored for the source Neutron ports; this effectively translates into capturing ingress traffic before it passes the inbound Security Group filters and capturing egress traffic after it passes the outbound Security Group filters. In other words, port mirroring will be implemented for all packets entering and/or leaving the Open vSwitch ports associated with the respective virtual NICs of the VMs that are being monitored. There is a separate effort that has been initiated to implement Security Groups within OpenvSwitch. A later version of TaaS may make use of this feature, if and when it is available, so that we can realize the ideal behavior described above. It should be noted that such an enhancement should not require a change to the TaaS data model. Keeping data privacy aspects in mind and preventing the data center admin from snooping on tenant's network traffic without their knowledge, the admin shall not be allowed to mirror traffic from any ports that belong to tenants. Hence creation of 'Tap_Flow' is only permitted on ports that are owned by the creating tenant. If an admin wants to monitor tenant's traffic, the admin will have to join that tenant as a member. This will ensure that the tenant is aware that the admin might be monitoring their traffic. Notifications impact -------------------- A set of new RPC calls for communication between the TaaS server and agents are required and will be put in place as part of the reference implementation. IPv6 impact -------------------- None Other end user impact --------------------- Users will be able to invoke and access the TaaS APIs through python-neutronclient. Performance Impact ------------------ The performance impact of mirroring traffic needs to be examined and quantified. The impact of a tenant potentially mirroring all traffic from all ports could be large and needs more examination. Some alternatives to reduce the amount of mirrored traffic are listed below. 1. Rate limiting on the ports being mirrored. 2. Filters to select certain flows ingressing/egressing a port to be mirrored. 3. Having a quota on the number of TaaS Flows that can be defined by the tenant. Other deployer impact --------------------- Configurations for the service plugin will be added later. A new bridge (br-tap) mentioned in Implementation section. Developer impact ---------------- This will be a new extension API, and will not affect the existing API. Community impact ---------------- None Follow up work -------------- Going forward, TaaS would be incorporated with Service Insertion [2]_ similar to other existing services like FWaaS, LBaaS, and VPNaaS. While integrating Tap-as-a-Service with Service Insertion the key changes to the data model needed would be the removal of 'network_id' and 'port_id' from the 'Tap_Service' data model. Some policy based filtering rules would help alleviate the potential performance issues. We might want to ensure exclusive use of the destination port. We might want to create the destination port automatically on tap-service creation, rather than specifying an existing port. In that case, network_id should be taken as a parameter for tap-service creation, instead of port_id. We might want to allow the destination port be used for purposes other than just launching a VM on it, for example the port could be used as an 'external-port' [1]_ to get the mirrored data out from the tenant virtual network on a device or network not managed by openstack. We might want to introduce a way to tap a whole traffic for the specified network. We need a mechanism to coordinate usage of various resources with other agent extensions. E.g. OVS flows, tunnel IDs, VLAN IDs. Implementation ============== The reference implementation for TaaS will be based on Open vSwitch. In addition to the existing integration (br-int) and tunnel (br-tun) bridges, a separate tap bridge (br-tap) will be used. The tap bridge provides nice isolation for supporting more complex TaaS features (e.g. filtering mirrored packets) in the future. The tapping operation will be realized by adding higher priority flows in br-int, which duplicate the ingress and/or egress packets associated with specific ports (belonging to the VMs being monitored) and send the copies to br-tap. Packets sent to br-tap will also be tagged with an appropriate VLAN id corresponding to the associated TaaS instance (in the initial release these VLAN ids may be reserved from highest to lowest; in later releases it should be coordinated with the Neutron service). The original packets will continue to be processed normally, so as not to affect the traffic patterns of the VMs being monitored. Flows will be placed in br-tap to determine if the mirrored traffic should be sent to br-tun or not. If the destination port of a Tap-aaS instance happens to reside on the same host as a source port, packets from that source port will be returned to br-int; otherwise they will be forwarded to br-tun for delivery to a remote node. Packets arriving at br-tun from br-tap will get routed to the destination ports of appropriate TaaS instances using the same GRE or VXLAN tunnel network that is used to pass regular traffic between hosts. Separate tunnel IDs will be used to isolate different TaaS instances from one another and from the normal (non-mirrored) traffic passing through the bridge. This will ensure that proper action can be taken on the receiving end of a tunnel so that mirrored traffic is sent to br-tap instead of br-int. Special flows will be used in br-tun to automatically learn about the location of the destination ports of TaaS instances. Packets entering br-tap from br-tun will be forwarded to br-int only if the destination port of the corresponding TaaS instance resides on the same host. Finally, packets entering br-int from br-tap will be delivered to the appropriate destination port after the TaaS instance VLAN id is replaced with the VLAN id for the port. Assignee(s) ----------- * Vinay Yadhav Work Items ---------- * TaaS API and data model implementation. * TaaS OVS driver. * OVS agent changes for port mirroring. Dependencies ============ None Testing ======= * Unit Tests to be added. * Functional tests in tempest to be added. * API Tests in Tempest to be added. Documentation Impact ==================== * User Documentation needs to be updated * Developer Documentation needs to be updated References ========== .. [1] External port https://review.openstack.org/#/c/87825 .. [2] Service base and insertion https://review.openstack.org/#/c/93128 .. [3] NFV unaddressed interfaces https://review.openstack.org/#/c/97715/ tap-as-a-service-2.0.0/test-requirements.txt000066400000000000000000000011661314312670600210220ustar00rootroot00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0 coverage!=4.4,>=4.0 # Apache-2.0 python-subunit>=0.0.18 # Apache-2.0/BSD sphinx>=1.6.2 # BSD psycopg2>=2.5 # LGPL/ZPL PyMySQL>=0.7.6 # MIT License oslosphinx>=4.7.0 # Apache-2.0 oslotest>=1.10.0 # Apache-2.0 os-testr>=0.8.0 # Apache-2.0 testrepository>=0.0.18 # Apache-2.0/BSD testresources>=0.2.4 # Apache-2.0/BSD testscenarios>=0.4 # Apache-2.0/BSD testtools>=1.4.0 # MIT tap-as-a-service-2.0.0/tools/000077500000000000000000000000001314312670600157155ustar00rootroot00000000000000tap-as-a-service-2.0.0/tools/test-setup.sh000077500000000000000000000035031314312670600203720ustar00rootroot00000000000000#!/bin/bash -xe # This script will be run by OpenStack CI before unit tests are run, # it sets up the test system as needed. # Developers should setup their test systems in a similar way. # This setup needs to be run as a user that can run sudo. # The root password for the MySQL database; pass it in via # MYSQL_ROOT_PW. DB_ROOT_PW=${MYSQL_ROOT_PW:-insecure_slave} # This user and its password are used by the tests, if you change it, # your tests might fail. DB_USER=openstack_citest DB_PW=openstack_citest sudo -H mysqladmin -u root password $DB_ROOT_PW # It's best practice to remove anonymous users from the database. If # a anonymous user exists, then it matches first for connections and # other connections from that host will not work. sudo -H mysql -u root -p$DB_ROOT_PW -h localhost -e " DELETE FROM mysql.user WHERE User=''; FLUSH PRIVILEGES; GRANT ALL PRIVILEGES ON *.* TO '$DB_USER'@'%' identified by '$DB_PW' WITH GRANT OPTION;" # Now create our database. mysql -u $DB_USER -p$DB_PW -h 127.0.0.1 -e " SET default_storage_engine=MYISAM; DROP DATABASE IF EXISTS openstack_citest; CREATE DATABASE openstack_citest CHARACTER SET utf8;" # Same for PostgreSQL # Setup user root_roles=$(sudo -H -u postgres psql -t -c " SELECT 'HERE' from pg_roles where rolname='$DB_USER'") if [[ ${root_roles} == *HERE ]];then sudo -H -u postgres psql -c "ALTER ROLE $DB_USER WITH SUPERUSER LOGIN PASSWORD '$DB_PW'" else sudo -H -u postgres psql -c "CREATE ROLE $DB_USER WITH SUPERUSER LOGIN PASSWORD '$DB_PW'" fi # Store password for tests cat << EOF > $HOME/.pgpass *:*:*:$DB_USER:$DB_PW EOF chmod 0600 $HOME/.pgpass # Now create our database psql -h 127.0.0.1 -U $DB_USER -d template1 -c "DROP DATABASE IF EXISTS openstack_citest" createdb -h 127.0.0.1 -U $DB_USER -l C -T template0 -E utf8 openstack_citest tap-as-a-service-2.0.0/tools/tox_install.sh000077500000000000000000000004111314312670600206100ustar00rootroot00000000000000#! /bin/sh set -e DIR=$(dirname $0) ${DIR}/tox_install_project.sh neutron neutron $* CONSTRAINTS_FILE=$1 shift install_cmd="pip install" if [ $CONSTRAINTS_FILE != "unconstrained" ]; then install_cmd="$install_cmd -c$CONSTRAINTS_FILE" fi $install_cmd -U $* tap-as-a-service-2.0.0/tools/tox_install_project.sh000077500000000000000000000026531314312670600223500ustar00rootroot00000000000000#!/bin/sh # Many of neutron's repos suffer from the problem of depending on neutron, # but it not existing on pypi. # This wrapper for tox's package installer will use the existing package # if it exists, else use zuul-cloner if that program exists, else grab it # from neutron master via a hard-coded URL. That last case should only # happen with devs running unit tests locally. # From the tox.ini config page: # install_command=ARGV # default: # pip install {opts} {packages} PROJ=$1 MOD=$2 shift 2 ZUUL_CLONER=/usr/zuul-env/bin/zuul-cloner neutron_installed=$(echo "import ${MOD}" | python 2>/dev/null ; echo $?) BRANCH_NAME=master set -e CONSTRAINTS_FILE=$1 shift install_cmd="pip install" if [ $CONSTRAINTS_FILE != "unconstrained" ]; then install_cmd="$install_cmd -c$CONSTRAINTS_FILE" fi if [ $neutron_installed -eq 0 ]; then echo "ALREADY INSTALLED" > /tmp/tox_install-${PROJ}.txt echo "${PROJ} already installed; using existing package" elif [ -x "$ZUUL_CLONER" ]; then echo "ZUUL CLONER" > /tmp/tox_install-${PROJ}.txt cwd=$(/bin/pwd) cd /tmp $ZUUL_CLONER --cache-dir \ /opt/git \ --branch ${BRANCH_NAME} \ git://git.openstack.org \ openstack/${PROJ} cd openstack/${PROJ} $install_cmd -e . cd "$cwd" else echo "PIP HARDCODE" > /tmp/tox_install-${PROJ}.txt $install_cmd -U -egit+https://git.openstack.org/openstack/${PROJ}@${BRANCH_NAME}#egg=${PROJ} fi tap-as-a-service-2.0.0/tox.ini000066400000000000000000000025241314312670600160730ustar00rootroot00000000000000[tox] envlist = docs,py35,py27,pep8 minversion = 1.8 skipsdist = True [testenv] setenv = VIRTUAL_ENV={envdir} PYTHONWARNINGS=default::DeprecationWarning usedevelop = True install_command = {toxinidir}/tools/tox_install.sh {env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages} deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt commands = find . -type f -name "*.py[c|o]" -delete find . -type d -name "__pycache__" -delete ostestr --regex '{posargs}' whitelist_externals = find [tox:jenkins] sitepackages = True [testenv:py27] setenv = OS_FAIL_ON_MISSING_DEPS=1 [testenv:pep8] commands = flake8 neutron-db-manage --subproject tap-as-a-service --database-connection sqlite:// check_migration [testenv:venv] commands = {posargs} [testenv:cover] commands = python setup.py testr --coverage --coverage-package-name=neutron_taas --testr-args='{posargs}' coverage report [testenv:docs] commands = python setup.py build_sphinx [testenv:debug] commands = oslo_debug_helper {posargs} [flake8] # E123, E125 skipped as they are invalid PEP-8. show-source = True ignore = E123,E125 builtins = _ exclude=.venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build [hacking] import_exceptions = neutron_taas._i18n